@ai-sdk/openai-compatible 1.0.0-canary.9 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -12,7 +12,7 @@ import {
12
12
  parseProviderOptions,
13
13
  postJsonToApi
14
14
  } from "@ai-sdk/provider-utils";
15
- import { z as z3 } from "zod";
15
+ import { z as z3 } from "zod/v4";
16
16
 
17
17
  // src/convert-to-openai-compatible-chat-messages.ts
18
18
  import {
@@ -86,7 +86,7 @@ function convertToOpenAICompatibleChatMessages(prompt) {
86
86
  type: "function",
87
87
  function: {
88
88
  name: part.toolName,
89
- arguments: JSON.stringify(part.args)
89
+ arguments: JSON.stringify(part.input)
90
90
  },
91
91
  ...partMetadata
92
92
  });
@@ -104,11 +104,24 @@ function convertToOpenAICompatibleChatMessages(prompt) {
104
104
  }
105
105
  case "tool": {
106
106
  for (const toolResponse of content) {
107
+ const output = toolResponse.output;
108
+ let contentValue;
109
+ switch (output.type) {
110
+ case "text":
111
+ case "error-text":
112
+ contentValue = output.value;
113
+ break;
114
+ case "content":
115
+ case "json":
116
+ case "error-json":
117
+ contentValue = JSON.stringify(output.value);
118
+ break;
119
+ }
107
120
  const toolResponseMetadata = getOpenAIMetadata(toolResponse);
108
121
  messages.push({
109
122
  role: "tool",
110
123
  tool_call_id: toolResponse.toolCallId,
111
- content: JSON.stringify(toolResponse.result),
124
+ content: contentValue,
112
125
  ...toolResponseMetadata
113
126
  });
114
127
  }
@@ -154,17 +167,21 @@ function mapOpenAICompatibleFinishReason(finishReason) {
154
167
  }
155
168
 
156
169
  // src/openai-compatible-chat-options.ts
157
- import { z } from "zod";
170
+ import { z } from "zod/v4";
158
171
  var openaiCompatibleProviderOptions = z.object({
159
172
  /**
160
173
  * A unique identifier representing your end-user, which can help the provider to
161
174
  * monitor and detect abuse.
162
175
  */
163
- user: z.string().optional()
176
+ user: z.string().optional(),
177
+ /**
178
+ * Reasoning effort for reasoning models. Defaults to `medium`.
179
+ */
180
+ reasoningEffort: z.string().optional()
164
181
  });
165
182
 
166
183
  // src/openai-compatible-error.ts
167
- import { z as z2 } from "zod";
184
+ import { z as z2 } from "zod/v4";
168
185
  var openaiCompatibleErrorDataSchema = z2.object({
169
186
  error: z2.object({
170
187
  message: z2.string(),
@@ -204,7 +221,7 @@ function prepareTools({
204
221
  function: {
205
222
  name: tool.name,
206
223
  description: tool.description,
207
- parameters: tool.parameters
224
+ parameters: tool.inputSchema
208
225
  }
209
226
  });
210
227
  }
@@ -257,11 +274,11 @@ var OpenAICompatibleChatLanguageModel = class {
257
274
  get providerOptionsName() {
258
275
  return this.config.provider.split(".")[0].trim();
259
276
  }
260
- async getSupportedUrls() {
277
+ get supportedUrls() {
261
278
  var _a, _b, _c;
262
- return (_c = (_b = (_a = this.config).getSupportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
279
+ return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
263
280
  }
264
- getArgs({
281
+ async getArgs({
265
282
  prompt,
266
283
  maxOutputTokens,
267
284
  temperature,
@@ -279,12 +296,12 @@ var OpenAICompatibleChatLanguageModel = class {
279
296
  var _a, _b, _c;
280
297
  const warnings = [];
281
298
  const compatibleOptions = Object.assign(
282
- (_a = parseProviderOptions({
299
+ (_a = await parseProviderOptions({
283
300
  provider: "openai-compatible",
284
301
  providerOptions,
285
302
  schema: openaiCompatibleProviderOptions
286
303
  })) != null ? _a : {},
287
- (_b = parseProviderOptions({
304
+ (_b = await parseProviderOptions({
288
305
  provider: this.providerOptionsName,
289
306
  providerOptions,
290
307
  schema: openaiCompatibleProviderOptions
@@ -331,6 +348,7 @@ var OpenAICompatibleChatLanguageModel = class {
331
348
  stop: stopSequences,
332
349
  seed,
333
350
  ...providerOptions == null ? void 0 : providerOptions[this.providerOptionsName],
351
+ reasoning_effort: compatibleOptions.reasoningEffort,
334
352
  // messages:
335
353
  messages: convertToOpenAICompatibleChatMessages(prompt),
336
354
  // tools:
@@ -341,8 +359,8 @@ var OpenAICompatibleChatLanguageModel = class {
341
359
  };
342
360
  }
343
361
  async doGenerate(options) {
344
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
345
- const { args, warnings } = this.getArgs({ ...options });
362
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
363
+ const { args, warnings } = await this.getArgs({ ...options });
346
364
  const body = JSON.stringify(args);
347
365
  const {
348
366
  responseHeaders,
@@ -372,7 +390,6 @@ var OpenAICompatibleChatLanguageModel = class {
372
390
  if (reasoning != null && reasoning.length > 0) {
373
391
  content.push({
374
392
  type: "reasoning",
375
- reasoningType: "text",
376
393
  text: reasoning
377
394
  });
378
395
  }
@@ -380,39 +397,34 @@ var OpenAICompatibleChatLanguageModel = class {
380
397
  for (const toolCall of choice.message.tool_calls) {
381
398
  content.push({
382
399
  type: "tool-call",
383
- toolCallType: "function",
384
400
  toolCallId: (_a = toolCall.id) != null ? _a : generateId(),
385
401
  toolName: toolCall.function.name,
386
- args: toolCall.function.arguments
402
+ input: toolCall.function.arguments
387
403
  });
388
404
  }
389
405
  }
390
406
  const providerMetadata = {
391
407
  [this.providerOptionsName]: {},
392
- ...(_c = (_b = this.config.metadataExtractor) == null ? void 0 : _b.extractMetadata) == null ? void 0 : _c.call(_b, {
408
+ ...await ((_c = (_b = this.config.metadataExtractor) == null ? void 0 : _b.extractMetadata) == null ? void 0 : _c.call(_b, {
393
409
  parsedBody: rawResponse
394
- })
410
+ }))
395
411
  };
396
412
  const completionTokenDetails = (_d = responseBody.usage) == null ? void 0 : _d.completion_tokens_details;
397
- const promptTokenDetails = (_e = responseBody.usage) == null ? void 0 : _e.prompt_tokens_details;
398
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
399
- providerMetadata[this.providerOptionsName].reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
400
- }
401
413
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
402
414
  providerMetadata[this.providerOptionsName].acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
403
415
  }
404
416
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
405
417
  providerMetadata[this.providerOptionsName].rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
406
418
  }
407
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
408
- providerMetadata[this.providerOptionsName].cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
409
- }
410
419
  return {
411
420
  content,
412
421
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
413
422
  usage: {
414
- inputTokens: (_g = (_f = responseBody.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
415
- outputTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
423
+ inputTokens: (_f = (_e = responseBody.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
424
+ outputTokens: (_h = (_g = responseBody.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0,
425
+ totalTokens: (_j = (_i = responseBody.usage) == null ? void 0 : _i.total_tokens) != null ? _j : void 0,
426
+ reasoningTokens: (_m = (_l = (_k = responseBody.usage) == null ? void 0 : _k.completion_tokens_details) == null ? void 0 : _l.reasoning_tokens) != null ? _m : void 0,
427
+ cachedInputTokens: (_p = (_o = (_n = responseBody.usage) == null ? void 0 : _n.prompt_tokens_details) == null ? void 0 : _o.cached_tokens) != null ? _p : void 0
416
428
  },
417
429
  providerMetadata,
418
430
  request: { body },
@@ -426,8 +438,13 @@ var OpenAICompatibleChatLanguageModel = class {
426
438
  }
427
439
  async doStream(options) {
428
440
  var _a;
429
- const { args, warnings } = this.getArgs({ ...options });
430
- const body = JSON.stringify({ ...args, stream: true });
441
+ const { args, warnings } = await this.getArgs({ ...options });
442
+ const body = {
443
+ ...args,
444
+ stream: true,
445
+ // only include stream_options when in strict compatibility mode:
446
+ stream_options: this.config.includeUsage ? { include_usage: true } : void 0
447
+ };
431
448
  const metadataExtractor = (_a = this.config.metadataExtractor) == null ? void 0 : _a.createStreamExtractor();
432
449
  const { responseHeaders, value: response } = await postJsonToApi({
433
450
  url: this.config.url({
@@ -435,10 +452,7 @@ var OpenAICompatibleChatLanguageModel = class {
435
452
  modelId: this.modelId
436
453
  }),
437
454
  headers: combineHeaders(this.config.headers(), options.headers),
438
- body: {
439
- ...args,
440
- stream: true
441
- },
455
+ body,
442
456
  failedResponseHandler: this.failedResponseHandler,
443
457
  successfulResponseHandler: createEventSourceResponseHandler(
444
458
  this.chunkSchema
@@ -448,7 +462,7 @@ var OpenAICompatibleChatLanguageModel = class {
448
462
  });
449
463
  const toolCalls = [];
450
464
  let finishReason = "unknown";
451
- let usage = {
465
+ const usage = {
452
466
  completionTokens: void 0,
453
467
  completionTokensDetails: {
454
468
  reasoningTokens: void 0,
@@ -458,10 +472,13 @@ var OpenAICompatibleChatLanguageModel = class {
458
472
  promptTokens: void 0,
459
473
  promptTokensDetails: {
460
474
  cachedTokens: void 0
461
- }
475
+ },
476
+ totalTokens: void 0
462
477
  };
463
478
  let isFirstChunk = true;
464
- let providerOptionsName = this.providerOptionsName;
479
+ const providerOptionsName = this.providerOptionsName;
480
+ let isActiveReasoning = false;
481
+ let isActiveText = false;
465
482
  return {
466
483
  stream: response.pipeThrough(
467
484
  new TransformStream({
@@ -471,6 +488,9 @@ var OpenAICompatibleChatLanguageModel = class {
471
488
  // TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
472
489
  transform(chunk, controller) {
473
490
  var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
491
+ if (options.includeRawChunks) {
492
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
493
+ }
474
494
  if (!chunk.success) {
475
495
  finishReason = "error";
476
496
  controller.enqueue({ type: "error", error: chunk.error });
@@ -494,11 +514,13 @@ var OpenAICompatibleChatLanguageModel = class {
494
514
  const {
495
515
  prompt_tokens,
496
516
  completion_tokens,
517
+ total_tokens,
497
518
  prompt_tokens_details,
498
519
  completion_tokens_details
499
520
  } = value.usage;
500
521
  usage.promptTokens = prompt_tokens != null ? prompt_tokens : void 0;
501
522
  usage.completionTokens = completion_tokens != null ? completion_tokens : void 0;
523
+ usage.totalTokens = total_tokens != null ? total_tokens : void 0;
502
524
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
503
525
  usage.completionTokensDetails.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
504
526
  }
@@ -523,28 +545,34 @@ var OpenAICompatibleChatLanguageModel = class {
523
545
  }
524
546
  const delta = choice.delta;
525
547
  if (delta.reasoning_content != null) {
548
+ if (!isActiveReasoning) {
549
+ controller.enqueue({
550
+ type: "reasoning-start",
551
+ id: "reasoning-0"
552
+ });
553
+ isActiveReasoning = true;
554
+ }
526
555
  controller.enqueue({
527
- type: "reasoning",
528
- reasoningType: "text",
529
- text: delta.reasoning_content
556
+ type: "reasoning-delta",
557
+ id: "reasoning-0",
558
+ delta: delta.reasoning_content
530
559
  });
531
560
  }
532
561
  if (delta.content != null) {
562
+ if (!isActiveText) {
563
+ controller.enqueue({ type: "text-start", id: "txt-0" });
564
+ isActiveText = true;
565
+ }
533
566
  controller.enqueue({
534
- type: "text",
535
- text: delta.content
567
+ type: "text-delta",
568
+ id: "txt-0",
569
+ delta: delta.content
536
570
  });
537
571
  }
538
572
  if (delta.tool_calls != null) {
539
573
  for (const toolCallDelta of delta.tool_calls) {
540
574
  const index = toolCallDelta.index;
541
575
  if (toolCalls[index] == null) {
542
- if (toolCallDelta.type !== "function") {
543
- throw new InvalidResponseDataError({
544
- data: toolCallDelta,
545
- message: `Expected 'function' type.`
546
- });
547
- }
548
576
  if (toolCallDelta.id == null) {
549
577
  throw new InvalidResponseDataError({
550
578
  data: toolCallDelta,
@@ -557,6 +585,11 @@ var OpenAICompatibleChatLanguageModel = class {
557
585
  message: `Expected 'function.name' to be a string.`
558
586
  });
559
587
  }
588
+ controller.enqueue({
589
+ type: "tool-input-start",
590
+ id: toolCallDelta.id,
591
+ toolName: toolCallDelta.function.name
592
+ });
560
593
  toolCalls[index] = {
561
594
  id: toolCallDelta.id,
562
595
  type: "function",
@@ -570,20 +603,21 @@ var OpenAICompatibleChatLanguageModel = class {
570
603
  if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
571
604
  if (toolCall2.function.arguments.length > 0) {
572
605
  controller.enqueue({
573
- type: "tool-call-delta",
574
- toolCallType: "function",
575
- toolCallId: toolCall2.id,
576
- toolName: toolCall2.function.name,
577
- argsTextDelta: toolCall2.function.arguments
606
+ type: "tool-input-start",
607
+ id: toolCall2.id,
608
+ toolName: toolCall2.function.name
578
609
  });
579
610
  }
580
611
  if (isParsableJson(toolCall2.function.arguments)) {
612
+ controller.enqueue({
613
+ type: "tool-input-end",
614
+ id: toolCall2.id
615
+ });
581
616
  controller.enqueue({
582
617
  type: "tool-call",
583
- toolCallType: "function",
584
618
  toolCallId: (_e = toolCall2.id) != null ? _e : generateId(),
585
619
  toolName: toolCall2.function.name,
586
- args: toolCall2.function.arguments
620
+ input: toolCall2.function.arguments
587
621
  });
588
622
  toolCall2.hasFinished = true;
589
623
  }
@@ -598,19 +632,20 @@ var OpenAICompatibleChatLanguageModel = class {
598
632
  toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
599
633
  }
600
634
  controller.enqueue({
601
- type: "tool-call-delta",
602
- toolCallType: "function",
603
- toolCallId: toolCall.id,
604
- toolName: toolCall.function.name,
605
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
635
+ type: "tool-input-delta",
636
+ id: toolCall.id,
637
+ delta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
606
638
  });
607
639
  if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && isParsableJson(toolCall.function.arguments)) {
640
+ controller.enqueue({
641
+ type: "tool-input-end",
642
+ id: toolCall.id
643
+ });
608
644
  controller.enqueue({
609
645
  type: "tool-call",
610
- toolCallType: "function",
611
646
  toolCallId: (_l = toolCall.id) != null ? _l : generateId(),
612
647
  toolName: toolCall.function.name,
613
- args: toolCall.function.arguments
648
+ input: toolCall.function.arguments
614
649
  });
615
650
  toolCall.hasFinished = true;
616
651
  }
@@ -618,29 +653,46 @@ var OpenAICompatibleChatLanguageModel = class {
618
653
  }
619
654
  },
620
655
  flush(controller) {
621
- var _a2, _b;
656
+ var _a2, _b, _c, _d, _e, _f;
657
+ if (isActiveReasoning) {
658
+ controller.enqueue({ type: "reasoning-end", id: "reasoning-0" });
659
+ }
660
+ if (isActiveText) {
661
+ controller.enqueue({ type: "text-end", id: "txt-0" });
662
+ }
663
+ for (const toolCall of toolCalls.filter(
664
+ (toolCall2) => !toolCall2.hasFinished
665
+ )) {
666
+ controller.enqueue({
667
+ type: "tool-input-end",
668
+ id: toolCall.id
669
+ });
670
+ controller.enqueue({
671
+ type: "tool-call",
672
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
673
+ toolName: toolCall.function.name,
674
+ input: toolCall.function.arguments
675
+ });
676
+ }
622
677
  const providerMetadata = {
623
678
  [providerOptionsName]: {},
624
679
  ...metadataExtractor == null ? void 0 : metadataExtractor.buildMetadata()
625
680
  };
626
- if (usage.completionTokensDetails.reasoningTokens != null) {
627
- providerMetadata[providerOptionsName].reasoningTokens = usage.completionTokensDetails.reasoningTokens;
628
- }
629
681
  if (usage.completionTokensDetails.acceptedPredictionTokens != null) {
630
682
  providerMetadata[providerOptionsName].acceptedPredictionTokens = usage.completionTokensDetails.acceptedPredictionTokens;
631
683
  }
632
684
  if (usage.completionTokensDetails.rejectedPredictionTokens != null) {
633
685
  providerMetadata[providerOptionsName].rejectedPredictionTokens = usage.completionTokensDetails.rejectedPredictionTokens;
634
686
  }
635
- if (usage.promptTokensDetails.cachedTokens != null) {
636
- providerMetadata[providerOptionsName].cachedPromptTokens = usage.promptTokensDetails.cachedTokens;
637
- }
638
687
  controller.enqueue({
639
688
  type: "finish",
640
689
  finishReason,
641
690
  usage: {
642
- inputTokens: (_a2 = usage.promptTokens) != null ? _a2 : void 0,
643
- outputTokens: (_b = usage.completionTokens) != null ? _b : void 0
691
+ inputTokens: (_b = usage.promptTokens) != null ? _b : void 0,
692
+ outputTokens: (_c = usage.completionTokens) != null ? _c : void 0,
693
+ totalTokens: (_d = usage.totalTokens) != null ? _d : void 0,
694
+ reasoningTokens: (_e = usage.completionTokensDetails.reasoningTokens) != null ? _e : void 0,
695
+ cachedInputTokens: (_f = usage.promptTokensDetails.cachedTokens) != null ? _f : void 0
644
696
  },
645
697
  providerMetadata
646
698
  });
@@ -655,6 +707,7 @@ var OpenAICompatibleChatLanguageModel = class {
655
707
  var openaiCompatibleTokenUsageSchema = z3.object({
656
708
  prompt_tokens: z3.number().nullish(),
657
709
  completion_tokens: z3.number().nullish(),
710
+ total_tokens: z3.number().nullish(),
658
711
  prompt_tokens_details: z3.object({
659
712
  cached_tokens: z3.number().nullish()
660
713
  }).nullish(),
@@ -677,7 +730,6 @@ var OpenAICompatibleChatResponseSchema = z3.object({
677
730
  tool_calls: z3.array(
678
731
  z3.object({
679
732
  id: z3.string().nullish(),
680
- type: z3.literal("function"),
681
733
  function: z3.object({
682
734
  name: z3.string(),
683
735
  arguments: z3.string()
@@ -705,7 +757,6 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
705
757
  z3.object({
706
758
  index: z3.number(),
707
759
  id: z3.string().nullish(),
708
- type: z3.literal("function").nullish(),
709
760
  function: z3.object({
710
761
  name: z3.string().nullish(),
711
762
  arguments: z3.string().nullish()
@@ -730,7 +781,7 @@ import {
730
781
  parseProviderOptions as parseProviderOptions2,
731
782
  postJsonToApi as postJsonToApi2
732
783
  } from "@ai-sdk/provider-utils";
733
- import { z as z5 } from "zod";
784
+ import { z as z5 } from "zod/v4";
734
785
 
735
786
  // src/convert-to-openai-compatible-completion-prompt.ts
736
787
  import {
@@ -739,13 +790,9 @@ import {
739
790
  } from "@ai-sdk/provider";
740
791
  function convertToOpenAICompatibleCompletionPrompt({
741
792
  prompt,
742
- inputFormat,
743
793
  user = "user",
744
794
  assistant = "assistant"
745
795
  }) {
746
- if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
747
- return { prompt: prompt[0].content[0].text };
748
- }
749
796
  let text = "";
750
797
  if (prompt[0].role === "system") {
751
798
  text += `${prompt[0].content}
@@ -815,7 +862,7 @@ ${user}:`]
815
862
  }
816
863
 
817
864
  // src/openai-compatible-completion-options.ts
818
- import { z as z4 } from "zod";
865
+ import { z as z4 } from "zod/v4";
819
866
  var openaiCompatibleCompletionProviderOptions = z4.object({
820
867
  /**
821
868
  * Echo back the prompt in addition to the completion.
@@ -827,7 +874,7 @@ var openaiCompatibleCompletionProviderOptions = z4.object({
827
874
  * Accepts a JSON object that maps tokens (specified by their token ID in
828
875
  * the GPT tokenizer) to an associated bias value from -100 to 100.
829
876
  */
830
- logitBias: z4.record(z4.number(), z4.number()).optional(),
877
+ logitBias: z4.record(z4.string(), z4.number()).optional(),
831
878
  /**
832
879
  * The suffix that comes after a completion of inserted text.
833
880
  */
@@ -859,12 +906,11 @@ var OpenAICompatibleCompletionLanguageModel = class {
859
906
  get providerOptionsName() {
860
907
  return this.config.provider.split(".")[0].trim();
861
908
  }
862
- async getSupportedUrls() {
909
+ get supportedUrls() {
863
910
  var _a, _b, _c;
864
- return (_c = (_b = (_a = this.config).getSupportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
911
+ return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
865
912
  }
866
- getArgs({
867
- inputFormat,
913
+ async getArgs({
868
914
  prompt,
869
915
  maxOutputTokens,
870
916
  temperature,
@@ -881,7 +927,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
881
927
  }) {
882
928
  var _a;
883
929
  const warnings = [];
884
- const completionOptions = (_a = parseProviderOptions2({
930
+ const completionOptions = (_a = await parseProviderOptions2({
885
931
  provider: this.providerOptionsName,
886
932
  providerOptions,
887
933
  schema: openaiCompatibleCompletionProviderOptions
@@ -902,7 +948,7 @@ var OpenAICompatibleCompletionLanguageModel = class {
902
948
  details: "JSON response format is not supported."
903
949
  });
904
950
  }
905
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompatibleCompletionPrompt({ prompt, inputFormat });
951
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompatibleCompletionPrompt({ prompt });
906
952
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
907
953
  return {
908
954
  args: {
@@ -930,8 +976,8 @@ var OpenAICompatibleCompletionLanguageModel = class {
930
976
  };
931
977
  }
932
978
  async doGenerate(options) {
933
- var _a, _b, _c, _d;
934
- const { args, warnings } = this.getArgs(options);
979
+ var _a, _b, _c, _d, _e, _f;
980
+ const { args, warnings } = await this.getArgs(options);
935
981
  const {
936
982
  responseHeaders,
937
983
  value: response,
@@ -959,7 +1005,8 @@ var OpenAICompatibleCompletionLanguageModel = class {
959
1005
  content,
960
1006
  usage: {
961
1007
  inputTokens: (_b = (_a = response.usage) == null ? void 0 : _a.prompt_tokens) != null ? _b : void 0,
962
- outputTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : void 0
1008
+ outputTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : void 0,
1009
+ totalTokens: (_f = (_e = response.usage) == null ? void 0 : _e.total_tokens) != null ? _f : void 0
963
1010
  },
964
1011
  finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
965
1012
  request: { body: args },
@@ -972,10 +1019,12 @@ var OpenAICompatibleCompletionLanguageModel = class {
972
1019
  };
973
1020
  }
974
1021
  async doStream(options) {
975
- const { args, warnings } = this.getArgs(options);
1022
+ const { args, warnings } = await this.getArgs(options);
976
1023
  const body = {
977
1024
  ...args,
978
- stream: true
1025
+ stream: true,
1026
+ // only include stream_options when in strict compatibility mode:
1027
+ stream_options: this.config.includeUsage ? { include_usage: true } : void 0
979
1028
  };
980
1029
  const { responseHeaders, value: response } = await postJsonToApi2({
981
1030
  url: this.config.url({
@@ -994,7 +1043,8 @@ var OpenAICompatibleCompletionLanguageModel = class {
994
1043
  let finishReason = "unknown";
995
1044
  const usage = {
996
1045
  inputTokens: void 0,
997
- outputTokens: void 0
1046
+ outputTokens: void 0,
1047
+ totalTokens: void 0
998
1048
  };
999
1049
  let isFirstChunk = true;
1000
1050
  return {
@@ -1004,7 +1054,10 @@ var OpenAICompatibleCompletionLanguageModel = class {
1004
1054
  controller.enqueue({ type: "stream-start", warnings });
1005
1055
  },
1006
1056
  transform(chunk, controller) {
1007
- var _a, _b;
1057
+ var _a, _b, _c;
1058
+ if (options.includeRawChunks) {
1059
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1060
+ }
1008
1061
  if (!chunk.success) {
1009
1062
  finishReason = "error";
1010
1063
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1022,10 +1075,15 @@ var OpenAICompatibleCompletionLanguageModel = class {
1022
1075
  type: "response-metadata",
1023
1076
  ...getResponseMetadata(value)
1024
1077
  });
1078
+ controller.enqueue({
1079
+ type: "text-start",
1080
+ id: "0"
1081
+ });
1025
1082
  }
1026
1083
  if (value.usage != null) {
1027
1084
  usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
1028
1085
  usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
1086
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
1029
1087
  }
1030
1088
  const choice = value.choices[0];
1031
1089
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1035,12 +1093,16 @@ var OpenAICompatibleCompletionLanguageModel = class {
1035
1093
  }
1036
1094
  if ((choice == null ? void 0 : choice.text) != null) {
1037
1095
  controller.enqueue({
1038
- type: "text",
1039
- text: choice.text
1096
+ type: "text-delta",
1097
+ id: "0",
1098
+ delta: choice.text
1040
1099
  });
1041
1100
  }
1042
1101
  },
1043
1102
  flush(controller) {
1103
+ if (!isFirstChunk) {
1104
+ controller.enqueue({ type: "text-end", id: "0" });
1105
+ }
1044
1106
  controller.enqueue({
1045
1107
  type: "finish",
1046
1108
  finishReason,
@@ -1054,6 +1116,11 @@ var OpenAICompatibleCompletionLanguageModel = class {
1054
1116
  };
1055
1117
  }
1056
1118
  };
1119
+ var usageSchema = z5.object({
1120
+ prompt_tokens: z5.number(),
1121
+ completion_tokens: z5.number(),
1122
+ total_tokens: z5.number()
1123
+ });
1057
1124
  var openaiCompatibleCompletionResponseSchema = z5.object({
1058
1125
  id: z5.string().nullish(),
1059
1126
  created: z5.number().nullish(),
@@ -1064,10 +1131,7 @@ var openaiCompatibleCompletionResponseSchema = z5.object({
1064
1131
  finish_reason: z5.string()
1065
1132
  })
1066
1133
  ),
1067
- usage: z5.object({
1068
- prompt_tokens: z5.number(),
1069
- completion_tokens: z5.number()
1070
- }).nullish()
1134
+ usage: usageSchema.nullish()
1071
1135
  });
1072
1136
  var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z5.union([
1073
1137
  z5.object({
@@ -1081,10 +1145,7 @@ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z5.union([
1081
1145
  index: z5.number()
1082
1146
  })
1083
1147
  ),
1084
- usage: z5.object({
1085
- prompt_tokens: z5.number(),
1086
- completion_tokens: z5.number()
1087
- }).nullish()
1148
+ usage: usageSchema.nullish()
1088
1149
  }),
1089
1150
  errorSchema
1090
1151
  ]);
@@ -1100,10 +1161,10 @@ import {
1100
1161
  parseProviderOptions as parseProviderOptions3,
1101
1162
  postJsonToApi as postJsonToApi3
1102
1163
  } from "@ai-sdk/provider-utils";
1103
- import { z as z7 } from "zod";
1164
+ import { z as z7 } from "zod/v4";
1104
1165
 
1105
1166
  // src/openai-compatible-embedding-options.ts
1106
- import { z as z6 } from "zod";
1167
+ import { z as z6 } from "zod/v4";
1107
1168
  var openaiCompatibleEmbeddingProviderOptions = z6.object({
1108
1169
  /**
1109
1170
  * The number of dimensions the resulting output embeddings should have.
@@ -1146,12 +1207,12 @@ var OpenAICompatibleEmbeddingModel = class {
1146
1207
  }) {
1147
1208
  var _a, _b, _c;
1148
1209
  const compatibleOptions = Object.assign(
1149
- (_a = parseProviderOptions3({
1210
+ (_a = await parseProviderOptions3({
1150
1211
  provider: "openai-compatible",
1151
1212
  providerOptions,
1152
1213
  schema: openaiCompatibleEmbeddingProviderOptions
1153
1214
  })) != null ? _a : {},
1154
- (_b = parseProviderOptions3({
1215
+ (_b = await parseProviderOptions3({
1155
1216
  provider: this.providerOptionsName,
1156
1217
  providerOptions,
1157
1218
  schema: openaiCompatibleEmbeddingProviderOptions
@@ -1194,13 +1255,15 @@ var OpenAICompatibleEmbeddingModel = class {
1194
1255
  return {
1195
1256
  embeddings: response.data.map((item) => item.embedding),
1196
1257
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1258
+ providerMetadata: response.providerMetadata,
1197
1259
  response: { headers: responseHeaders, body: rawValue }
1198
1260
  };
1199
1261
  }
1200
1262
  };
1201
1263
  var openaiTextEmbeddingResponseSchema = z7.object({
1202
1264
  data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1203
- usage: z7.object({ prompt_tokens: z7.number() }).nullish()
1265
+ usage: z7.object({ prompt_tokens: z7.number() }).nullish(),
1266
+ providerMetadata: z7.record(z7.string(), z7.record(z7.string(), z7.any())).optional()
1204
1267
  });
1205
1268
 
1206
1269
  // src/openai-compatible-image-model.ts
@@ -1210,17 +1273,13 @@ import {
1210
1273
  createJsonResponseHandler as createJsonResponseHandler4,
1211
1274
  postJsonToApi as postJsonToApi4
1212
1275
  } from "@ai-sdk/provider-utils";
1213
- import { z as z8 } from "zod";
1276
+ import { z as z8 } from "zod/v4";
1214
1277
  var OpenAICompatibleImageModel = class {
1215
- constructor(modelId, settings, config) {
1278
+ constructor(modelId, config) {
1216
1279
  this.modelId = modelId;
1217
- this.settings = settings;
1218
1280
  this.config = config;
1219
- this.specificationVersion = "v1";
1220
- }
1221
- get maxImagesPerCall() {
1222
- var _a;
1223
- return (_a = this.settings.maxImagesPerCall) != null ? _a : 10;
1281
+ this.specificationVersion = "v2";
1282
+ this.maxImagesPerCall = 10;
1224
1283
  }
1225
1284
  get provider() {
1226
1285
  return this.config.provider;
@@ -1260,8 +1319,7 @@ var OpenAICompatibleImageModel = class {
1260
1319
  n,
1261
1320
  size,
1262
1321
  ...(_d = providerOptions.openai) != null ? _d : {},
1263
- response_format: "b64_json",
1264
- ...this.settings.user ? { user: this.settings.user } : {}
1322
+ response_format: "b64_json"
1265
1323
  },
1266
1324
  failedResponseHandler: createJsonErrorResponseHandler4(
1267
1325
  (_e = this.config.errorStructure) != null ? _e : defaultOpenAICompatibleErrorStructure
@@ -1309,22 +1367,18 @@ function createOpenAICompatible(options) {
1309
1367
  fetch: options.fetch
1310
1368
  });
1311
1369
  const createLanguageModel = (modelId) => createChatModel(modelId);
1312
- const createChatModel = (modelId) => new OpenAICompatibleChatLanguageModel(
1313
- modelId,
1314
- getCommonModelConfig("chat")
1315
- );
1316
- const createCompletionModel = (modelId) => new OpenAICompatibleCompletionLanguageModel(
1317
- modelId,
1318
- getCommonModelConfig("completion")
1319
- );
1370
+ const createChatModel = (modelId) => new OpenAICompatibleChatLanguageModel(modelId, {
1371
+ ...getCommonModelConfig("chat"),
1372
+ includeUsage: options.includeUsage
1373
+ });
1374
+ const createCompletionModel = (modelId) => new OpenAICompatibleCompletionLanguageModel(modelId, {
1375
+ ...getCommonModelConfig("completion"),
1376
+ includeUsage: options.includeUsage
1377
+ });
1320
1378
  const createEmbeddingModel = (modelId) => new OpenAICompatibleEmbeddingModel(modelId, {
1321
1379
  ...getCommonModelConfig("embedding")
1322
1380
  });
1323
- const createImageModel = (modelId, settings = {}) => new OpenAICompatibleImageModel(
1324
- modelId,
1325
- settings,
1326
- getCommonModelConfig("image")
1327
- );
1381
+ const createImageModel = (modelId) => new OpenAICompatibleImageModel(modelId, getCommonModelConfig("image"));
1328
1382
  const provider = (modelId) => createLanguageModel(modelId);
1329
1383
  provider.languageModel = createLanguageModel;
1330
1384
  provider.chatModel = createChatModel;