@ai-sdk/openai 2.0.0-canary.15 → 2.0.0-canary.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -83,7 +83,6 @@ type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
83
83
 
84
84
  type OpenAIChatConfig = {
85
85
  provider: string;
86
- compatibility: 'strict' | 'compatible';
87
86
  headers: () => Record<string, string | undefined>;
88
87
  url: (options: {
89
88
  modelId: string;
@@ -162,7 +161,6 @@ type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOp
162
161
 
163
162
  type OpenAICompletionConfig = {
164
163
  provider: string;
165
- compatibility: 'strict' | 'compatible';
166
164
  headers: () => Record<string, string | undefined>;
167
165
  url: (options: {
168
166
  modelId: string;
@@ -258,37 +256,37 @@ declare const openAITranscriptionProviderOptions: z.ZodObject<{
258
256
  /**
259
257
  * Additional information to include in the transcription response.
260
258
  */
261
- include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
259
+ include: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
262
260
  /**
263
261
  * The language of the input audio in ISO-639-1 format.
264
262
  */
265
- language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
263
+ language: z.ZodOptional<z.ZodString>;
266
264
  /**
267
265
  * An optional text to guide the model's style or continue a previous audio segment.
268
266
  */
269
- prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
267
+ prompt: z.ZodOptional<z.ZodString>;
270
268
  /**
271
269
  * The sampling temperature, between 0 and 1.
272
270
  * @default 0
273
271
  */
274
- temperature: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
272
+ temperature: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
275
273
  /**
276
274
  * The timestamp granularities to populate for this transcription.
277
275
  * @default ['segment']
278
276
  */
279
- timestampGranularities: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
277
+ timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>;
280
278
  }, "strip", z.ZodTypeAny, {
281
- prompt?: string | null | undefined;
282
- temperature?: number | null | undefined;
283
- include?: string[] | null | undefined;
284
- language?: string | null | undefined;
285
- timestampGranularities?: ("word" | "segment")[] | null | undefined;
279
+ prompt?: string | undefined;
280
+ temperature?: number | undefined;
281
+ include?: string[] | undefined;
282
+ language?: string | undefined;
283
+ timestampGranularities?: ("word" | "segment")[] | undefined;
286
284
  }, {
287
- prompt?: string | null | undefined;
288
- temperature?: number | null | undefined;
289
- include?: string[] | null | undefined;
290
- language?: string | null | undefined;
291
- timestampGranularities?: ("word" | "segment")[] | null | undefined;
285
+ prompt?: string | undefined;
286
+ temperature?: number | undefined;
287
+ include?: string[] | undefined;
288
+ language?: string | undefined;
289
+ timestampGranularities?: ("word" | "segment")[] | undefined;
292
290
  }>;
293
291
  type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
294
292
 
@@ -83,7 +83,6 @@ type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
83
83
 
84
84
  type OpenAIChatConfig = {
85
85
  provider: string;
86
- compatibility: 'strict' | 'compatible';
87
86
  headers: () => Record<string, string | undefined>;
88
87
  url: (options: {
89
88
  modelId: string;
@@ -162,7 +161,6 @@ type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOp
162
161
 
163
162
  type OpenAICompletionConfig = {
164
163
  provider: string;
165
- compatibility: 'strict' | 'compatible';
166
164
  headers: () => Record<string, string | undefined>;
167
165
  url: (options: {
168
166
  modelId: string;
@@ -258,37 +256,37 @@ declare const openAITranscriptionProviderOptions: z.ZodObject<{
258
256
  /**
259
257
  * Additional information to include in the transcription response.
260
258
  */
261
- include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
259
+ include: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
262
260
  /**
263
261
  * The language of the input audio in ISO-639-1 format.
264
262
  */
265
- language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
263
+ language: z.ZodOptional<z.ZodString>;
266
264
  /**
267
265
  * An optional text to guide the model's style or continue a previous audio segment.
268
266
  */
269
- prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
267
+ prompt: z.ZodOptional<z.ZodString>;
270
268
  /**
271
269
  * The sampling temperature, between 0 and 1.
272
270
  * @default 0
273
271
  */
274
- temperature: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
272
+ temperature: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
275
273
  /**
276
274
  * The timestamp granularities to populate for this transcription.
277
275
  * @default ['segment']
278
276
  */
279
- timestampGranularities: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
277
+ timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>;
280
278
  }, "strip", z.ZodTypeAny, {
281
- prompt?: string | null | undefined;
282
- temperature?: number | null | undefined;
283
- include?: string[] | null | undefined;
284
- language?: string | null | undefined;
285
- timestampGranularities?: ("word" | "segment")[] | null | undefined;
279
+ prompt?: string | undefined;
280
+ temperature?: number | undefined;
281
+ include?: string[] | undefined;
282
+ language?: string | undefined;
283
+ timestampGranularities?: ("word" | "segment")[] | undefined;
286
284
  }, {
287
- prompt?: string | null | undefined;
288
- temperature?: number | null | undefined;
289
- include?: string[] | null | undefined;
290
- language?: string | null | undefined;
291
- timestampGranularities?: ("word" | "segment")[] | null | undefined;
285
+ prompt?: string | undefined;
286
+ temperature?: number | undefined;
287
+ include?: string[] | undefined;
288
+ language?: string | undefined;
289
+ timestampGranularities?: ("word" | "segment")[] | undefined;
292
290
  }>;
293
291
  type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
294
292
 
@@ -553,7 +553,7 @@ var OpenAIChatLanguageModel = class {
553
553
  };
554
554
  }
555
555
  async doGenerate(options) {
556
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
556
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
557
557
  const { args: body, warnings } = await this.getArgs(options);
558
558
  const {
559
559
  responseHeaders,
@@ -591,18 +591,12 @@ var OpenAIChatLanguageModel = class {
591
591
  const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
592
592
  const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
593
593
  const providerMetadata = { openai: {} };
594
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
595
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
596
- }
597
594
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
598
595
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
599
596
  }
600
597
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
601
598
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
602
599
  }
603
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
604
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
605
- }
606
600
  if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
607
601
  providerMetadata.openai.logprobs = choice.logprobs.content;
608
602
  }
@@ -611,7 +605,10 @@ var OpenAIChatLanguageModel = class {
611
605
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
612
606
  usage: {
613
607
  inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
614
- outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
608
+ outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
609
+ totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
610
+ reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
611
+ cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
615
612
  },
616
613
  request: { body },
617
614
  response: {
@@ -628,8 +625,9 @@ var OpenAIChatLanguageModel = class {
628
625
  const body = {
629
626
  ...args,
630
627
  stream: true,
631
- // only include stream_options when in strict compatibility mode:
632
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
628
+ stream_options: {
629
+ include_usage: true
630
+ }
633
631
  };
634
632
  const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
635
633
  url: this.config.url({
@@ -645,12 +643,12 @@ var OpenAIChatLanguageModel = class {
645
643
  abortSignal: options.abortSignal,
646
644
  fetch: this.config.fetch
647
645
  });
648
- const { messages: rawPrompt, ...rawSettings } = args;
649
646
  const toolCalls = [];
650
647
  let finishReason = "unknown";
651
648
  const usage = {
652
649
  inputTokens: void 0,
653
- outputTokens: void 0
650
+ outputTokens: void 0,
651
+ totalTokens: void 0
654
652
  };
655
653
  let isFirstChunk = true;
656
654
  const providerMetadata = { openai: {} };
@@ -661,7 +659,7 @@ var OpenAIChatLanguageModel = class {
661
659
  controller.enqueue({ type: "stream-start", warnings });
662
660
  },
663
661
  transform(chunk, controller) {
664
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
662
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
665
663
  if (!chunk.success) {
666
664
  finishReason = "error";
667
665
  controller.enqueue({ type: "error", error: chunk.error });
@@ -681,31 +679,25 @@ var OpenAIChatLanguageModel = class {
681
679
  });
682
680
  }
683
681
  if (value.usage != null) {
684
- const {
685
- prompt_tokens,
686
- completion_tokens,
687
- prompt_tokens_details,
688
- completion_tokens_details
689
- } = value.usage;
690
- usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
691
- usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
692
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
693
- providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
694
- }
695
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
696
- providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
682
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
683
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
684
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
685
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
686
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
687
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
688
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
697
689
  }
698
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
699
- providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
700
- }
701
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
702
- providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
690
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
691
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
703
692
  }
704
693
  }
705
694
  const choice = value.choices[0];
706
695
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
707
696
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
708
697
  }
698
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
699
+ providerMetadata.openai.logprobs = choice.logprobs.content;
700
+ }
709
701
  if ((choice == null ? void 0 : choice.delta) == null) {
710
702
  return;
711
703
  }
@@ -732,7 +724,7 @@ var OpenAIChatLanguageModel = class {
732
724
  message: `Expected 'id' to be a string.`
733
725
  });
734
726
  }
735
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
727
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
736
728
  throw new import_provider3.InvalidResponseDataError({
737
729
  data: toolCallDelta,
738
730
  message: `Expected 'function.name' to be a string.`
@@ -743,12 +735,12 @@ var OpenAIChatLanguageModel = class {
743
735
  type: "function",
744
736
  function: {
745
737
  name: toolCallDelta.function.name,
746
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
738
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
747
739
  },
748
740
  hasFinished: false
749
741
  };
750
742
  const toolCall2 = toolCalls[index];
751
- if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
743
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
752
744
  if (toolCall2.function.arguments.length > 0) {
753
745
  controller.enqueue({
754
746
  type: "tool-call-delta",
@@ -762,7 +754,7 @@ var OpenAIChatLanguageModel = class {
762
754
  controller.enqueue({
763
755
  type: "tool-call",
764
756
  toolCallType: "function",
765
- toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
757
+ toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils3.generateId)(),
766
758
  toolName: toolCall2.function.name,
767
759
  args: toolCall2.function.arguments
768
760
  });
@@ -775,21 +767,21 @@ var OpenAIChatLanguageModel = class {
775
767
  if (toolCall.hasFinished) {
776
768
  continue;
777
769
  }
778
- if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
779
- toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
770
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
771
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
780
772
  }
781
773
  controller.enqueue({
782
774
  type: "tool-call-delta",
783
775
  toolCallType: "function",
784
776
  toolCallId: toolCall.id,
785
777
  toolName: toolCall.function.name,
786
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
778
+ argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
787
779
  });
788
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
780
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
789
781
  controller.enqueue({
790
782
  type: "tool-call",
791
783
  toolCallType: "function",
792
- toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
784
+ toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils3.generateId)(),
793
785
  toolName: toolCall.function.name,
794
786
  args: toolCall.function.arguments
795
787
  });
@@ -816,6 +808,7 @@ var OpenAIChatLanguageModel = class {
816
808
  var openaiTokenUsageSchema = import_zod3.z.object({
817
809
  prompt_tokens: import_zod3.z.number().nullish(),
818
810
  completion_tokens: import_zod3.z.number().nullish(),
811
+ total_tokens: import_zod3.z.number().nullish(),
819
812
  prompt_tokens_details: import_zod3.z.object({
820
813
  cached_tokens: import_zod3.z.number().nullish()
821
814
  }).nullish(),
@@ -887,6 +880,20 @@ var openaiChatChunkSchema = import_zod3.z.union([
887
880
  })
888
881
  ).nullish()
889
882
  }).nullish(),
883
+ logprobs: import_zod3.z.object({
884
+ content: import_zod3.z.array(
885
+ import_zod3.z.object({
886
+ token: import_zod3.z.string(),
887
+ logprob: import_zod3.z.number(),
888
+ top_logprobs: import_zod3.z.array(
889
+ import_zod3.z.object({
890
+ token: import_zod3.z.string(),
891
+ logprob: import_zod3.z.number()
892
+ })
893
+ )
894
+ })
895
+ ).nullish()
896
+ }).nullish(),
890
897
  finish_reason: import_zod3.z.string().nullish(),
891
898
  index: import_zod3.z.number()
892
899
  })
@@ -1148,6 +1155,7 @@ var OpenAICompletionLanguageModel = class {
1148
1155
  };
1149
1156
  }
1150
1157
  async doGenerate(options) {
1158
+ var _a, _b, _c;
1151
1159
  const { args, warnings } = await this.getArgs(options);
1152
1160
  const {
1153
1161
  responseHeaders,
@@ -1175,8 +1183,9 @@ var OpenAICompletionLanguageModel = class {
1175
1183
  return {
1176
1184
  content: [{ type: "text", text: choice.text }],
1177
1185
  usage: {
1178
- inputTokens: response.usage.prompt_tokens,
1179
- outputTokens: response.usage.completion_tokens
1186
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1187
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1188
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1180
1189
  },
1181
1190
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1182
1191
  request: { body: args },
@@ -1194,8 +1203,9 @@ var OpenAICompletionLanguageModel = class {
1194
1203
  const body = {
1195
1204
  ...args,
1196
1205
  stream: true,
1197
- // only include stream_options when in strict compatibility mode:
1198
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1206
+ stream_options: {
1207
+ include_usage: true
1208
+ }
1199
1209
  };
1200
1210
  const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1201
1211
  url: this.config.url({
@@ -1215,7 +1225,8 @@ var OpenAICompletionLanguageModel = class {
1215
1225
  const providerMetadata = { openai: {} };
1216
1226
  const usage = {
1217
1227
  inputTokens: void 0,
1218
- outputTokens: void 0
1228
+ outputTokens: void 0,
1229
+ totalTokens: void 0
1219
1230
  };
1220
1231
  let isFirstChunk = true;
1221
1232
  return {
@@ -1246,6 +1257,7 @@ var OpenAICompletionLanguageModel = class {
1246
1257
  if (value.usage != null) {
1247
1258
  usage.inputTokens = value.usage.prompt_tokens;
1248
1259
  usage.outputTokens = value.usage.completion_tokens;
1260
+ usage.totalTokens = value.usage.total_tokens;
1249
1261
  }
1250
1262
  const choice = value.choices[0];
1251
1263
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1276,6 +1288,11 @@ var OpenAICompletionLanguageModel = class {
1276
1288
  };
1277
1289
  }
1278
1290
  };
1291
+ var usageSchema = import_zod5.z.object({
1292
+ prompt_tokens: import_zod5.z.number(),
1293
+ completion_tokens: import_zod5.z.number(),
1294
+ total_tokens: import_zod5.z.number()
1295
+ });
1279
1296
  var openaiCompletionResponseSchema = import_zod5.z.object({
1280
1297
  id: import_zod5.z.string().nullish(),
1281
1298
  created: import_zod5.z.number().nullish(),
@@ -1291,10 +1308,7 @@ var openaiCompletionResponseSchema = import_zod5.z.object({
1291
1308
  }).nullish()
1292
1309
  })
1293
1310
  ),
1294
- usage: import_zod5.z.object({
1295
- prompt_tokens: import_zod5.z.number(),
1296
- completion_tokens: import_zod5.z.number()
1297
- })
1311
+ usage: usageSchema.nullish()
1298
1312
  });
1299
1313
  var openaiCompletionChunkSchema = import_zod5.z.union([
1300
1314
  import_zod5.z.object({
@@ -1313,10 +1327,7 @@ var openaiCompletionChunkSchema = import_zod5.z.union([
1313
1327
  }).nullish()
1314
1328
  })
1315
1329
  ),
1316
- usage: import_zod5.z.object({
1317
- prompt_tokens: import_zod5.z.number(),
1318
- completion_tokens: import_zod5.z.number()
1319
- }).nullish()
1330
+ usage: usageSchema.nullish()
1320
1331
  }),
1321
1332
  openaiErrorDataSchema
1322
1333
  ]);
@@ -1516,25 +1527,25 @@ var openAITranscriptionProviderOptions = import_zod9.z.object({
1516
1527
  /**
1517
1528
  * Additional information to include in the transcription response.
1518
1529
  */
1519
- include: import_zod9.z.array(import_zod9.z.string()).nullish(),
1530
+ include: import_zod9.z.array(import_zod9.z.string()).optional(),
1520
1531
  /**
1521
1532
  * The language of the input audio in ISO-639-1 format.
1522
1533
  */
1523
- language: import_zod9.z.string().nullish(),
1534
+ language: import_zod9.z.string().optional(),
1524
1535
  /**
1525
1536
  * An optional text to guide the model's style or continue a previous audio segment.
1526
1537
  */
1527
- prompt: import_zod9.z.string().nullish(),
1538
+ prompt: import_zod9.z.string().optional(),
1528
1539
  /**
1529
1540
  * The sampling temperature, between 0 and 1.
1530
1541
  * @default 0
1531
1542
  */
1532
- temperature: import_zod9.z.number().min(0).max(1).default(0).nullish(),
1543
+ temperature: import_zod9.z.number().min(0).max(1).default(0).optional(),
1533
1544
  /**
1534
1545
  * The timestamp granularities to populate for this transcription.
1535
1546
  * @default ['segment']
1536
1547
  */
1537
- timestampGranularities: import_zod9.z.array(import_zod9.z.enum(["word", "segment"])).default(["segment"]).nullish()
1548
+ timestampGranularities: import_zod9.z.array(import_zod9.z.enum(["word", "segment"])).default(["segment"]).optional()
1538
1549
  });
1539
1550
 
1540
1551
  // src/openai-transcription-model.ts
@@ -2204,7 +2215,7 @@ var OpenAIResponsesLanguageModel = class {
2204
2215
  ])
2205
2216
  ),
2206
2217
  incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullable(),
2207
- usage: usageSchema
2218
+ usage: usageSchema2
2208
2219
  })
2209
2220
  ),
2210
2221
  abortSignal: options.abortSignal,
@@ -2258,7 +2269,10 @@ var OpenAIResponsesLanguageModel = class {
2258
2269
  }),
2259
2270
  usage: {
2260
2271
  inputTokens: response.usage.input_tokens,
2261
- outputTokens: response.usage.output_tokens
2272
+ outputTokens: response.usage.output_tokens,
2273
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2274
+ reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
2275
+ cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
2262
2276
  },
2263
2277
  request: { body },
2264
2278
  response: {
@@ -2270,9 +2284,7 @@ var OpenAIResponsesLanguageModel = class {
2270
2284
  },
2271
2285
  providerMetadata: {
2272
2286
  openai: {
2273
- responseId: response.id,
2274
- cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
2275
- reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
2287
+ responseId: response.id
2276
2288
  }
2277
2289
  },
2278
2290
  warnings
@@ -2301,10 +2313,9 @@ var OpenAIResponsesLanguageModel = class {
2301
2313
  let finishReason = "unknown";
2302
2314
  const usage = {
2303
2315
  inputTokens: void 0,
2304
- outputTokens: void 0
2316
+ outputTokens: void 0,
2317
+ totalTokens: void 0
2305
2318
  };
2306
- let cachedPromptTokens = null;
2307
- let reasoningTokens = null;
2308
2319
  let responseId = null;
2309
2320
  const ongoingToolCalls = {};
2310
2321
  let hasToolCalls = false;
@@ -2382,8 +2393,9 @@ var OpenAIResponsesLanguageModel = class {
2382
2393
  });
2383
2394
  usage.inputTokens = value.response.usage.input_tokens;
2384
2395
  usage.outputTokens = value.response.usage.output_tokens;
2385
- cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2386
- reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2396
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2397
+ usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
2398
+ usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
2387
2399
  } else if (isResponseAnnotationAddedChunk(value)) {
2388
2400
  controller.enqueue({
2389
2401
  type: "source",
@@ -2399,13 +2411,9 @@ var OpenAIResponsesLanguageModel = class {
2399
2411
  type: "finish",
2400
2412
  finishReason,
2401
2413
  usage,
2402
- ...(cachedPromptTokens != null || reasoningTokens != null) && {
2403
- providerMetadata: {
2404
- openai: {
2405
- responseId,
2406
- cachedPromptTokens,
2407
- reasoningTokens
2408
- }
2414
+ providerMetadata: {
2415
+ openai: {
2416
+ responseId
2409
2417
  }
2410
2418
  }
2411
2419
  });
@@ -2417,7 +2425,7 @@ var OpenAIResponsesLanguageModel = class {
2417
2425
  };
2418
2426
  }
2419
2427
  };
2420
- var usageSchema = import_zod12.z.object({
2428
+ var usageSchema2 = import_zod12.z.object({
2421
2429
  input_tokens: import_zod12.z.number(),
2422
2430
  input_tokens_details: import_zod12.z.object({ cached_tokens: import_zod12.z.number().nullish() }).nullish(),
2423
2431
  output_tokens: import_zod12.z.number(),
@@ -2431,7 +2439,7 @@ var responseFinishedChunkSchema = import_zod12.z.object({
2431
2439
  type: import_zod12.z.enum(["response.completed", "response.incomplete"]),
2432
2440
  response: import_zod12.z.object({
2433
2441
  incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullish(),
2434
- usage: usageSchema
2442
+ usage: usageSchema2
2435
2443
  })
2436
2444
  });
2437
2445
  var responseCreatedChunkSchema = import_zod12.z.object({