@ai-sdk/openai 2.0.0-canary.15 → 2.0.0-canary.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -258,37 +258,37 @@ declare const openAITranscriptionProviderOptions: z.ZodObject<{
258
258
  /**
259
259
  * Additional information to include in the transcription response.
260
260
  */
261
- include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
261
+ include: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
262
262
  /**
263
263
  * The language of the input audio in ISO-639-1 format.
264
264
  */
265
- language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
265
+ language: z.ZodOptional<z.ZodString>;
266
266
  /**
267
267
  * An optional text to guide the model's style or continue a previous audio segment.
268
268
  */
269
- prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
269
+ prompt: z.ZodOptional<z.ZodString>;
270
270
  /**
271
271
  * The sampling temperature, between 0 and 1.
272
272
  * @default 0
273
273
  */
274
- temperature: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
274
+ temperature: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
275
275
  /**
276
276
  * The timestamp granularities to populate for this transcription.
277
277
  * @default ['segment']
278
278
  */
279
- timestampGranularities: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
279
+ timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>;
280
280
  }, "strip", z.ZodTypeAny, {
281
- prompt?: string | null | undefined;
282
- temperature?: number | null | undefined;
283
- include?: string[] | null | undefined;
284
- language?: string | null | undefined;
285
- timestampGranularities?: ("word" | "segment")[] | null | undefined;
281
+ prompt?: string | undefined;
282
+ temperature?: number | undefined;
283
+ include?: string[] | undefined;
284
+ language?: string | undefined;
285
+ timestampGranularities?: ("word" | "segment")[] | undefined;
286
286
  }, {
287
- prompt?: string | null | undefined;
288
- temperature?: number | null | undefined;
289
- include?: string[] | null | undefined;
290
- language?: string | null | undefined;
291
- timestampGranularities?: ("word" | "segment")[] | null | undefined;
287
+ prompt?: string | undefined;
288
+ temperature?: number | undefined;
289
+ include?: string[] | undefined;
290
+ language?: string | undefined;
291
+ timestampGranularities?: ("word" | "segment")[] | undefined;
292
292
  }>;
293
293
  type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
294
294
 
@@ -258,37 +258,37 @@ declare const openAITranscriptionProviderOptions: z.ZodObject<{
258
258
  /**
259
259
  * Additional information to include in the transcription response.
260
260
  */
261
- include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
261
+ include: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
262
262
  /**
263
263
  * The language of the input audio in ISO-639-1 format.
264
264
  */
265
- language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
265
+ language: z.ZodOptional<z.ZodString>;
266
266
  /**
267
267
  * An optional text to guide the model's style or continue a previous audio segment.
268
268
  */
269
- prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
269
+ prompt: z.ZodOptional<z.ZodString>;
270
270
  /**
271
271
  * The sampling temperature, between 0 and 1.
272
272
  * @default 0
273
273
  */
274
- temperature: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
274
+ temperature: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
275
275
  /**
276
276
  * The timestamp granularities to populate for this transcription.
277
277
  * @default ['segment']
278
278
  */
279
- timestampGranularities: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
279
+ timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>;
280
280
  }, "strip", z.ZodTypeAny, {
281
- prompt?: string | null | undefined;
282
- temperature?: number | null | undefined;
283
- include?: string[] | null | undefined;
284
- language?: string | null | undefined;
285
- timestampGranularities?: ("word" | "segment")[] | null | undefined;
281
+ prompt?: string | undefined;
282
+ temperature?: number | undefined;
283
+ include?: string[] | undefined;
284
+ language?: string | undefined;
285
+ timestampGranularities?: ("word" | "segment")[] | undefined;
286
286
  }, {
287
- prompt?: string | null | undefined;
288
- temperature?: number | null | undefined;
289
- include?: string[] | null | undefined;
290
- language?: string | null | undefined;
291
- timestampGranularities?: ("word" | "segment")[] | null | undefined;
287
+ prompt?: string | undefined;
288
+ temperature?: number | undefined;
289
+ include?: string[] | undefined;
290
+ language?: string | undefined;
291
+ timestampGranularities?: ("word" | "segment")[] | undefined;
292
292
  }>;
293
293
  type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
294
294
 
@@ -553,7 +553,7 @@ var OpenAIChatLanguageModel = class {
553
553
  };
554
554
  }
555
555
  async doGenerate(options) {
556
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
556
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
557
557
  const { args: body, warnings } = await this.getArgs(options);
558
558
  const {
559
559
  responseHeaders,
@@ -591,18 +591,12 @@ var OpenAIChatLanguageModel = class {
591
591
  const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
592
592
  const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
593
593
  const providerMetadata = { openai: {} };
594
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
595
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
596
- }
597
594
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
598
595
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
599
596
  }
600
597
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
601
598
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
602
599
  }
603
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
604
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
605
- }
606
600
  if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
607
601
  providerMetadata.openai.logprobs = choice.logprobs.content;
608
602
  }
@@ -611,7 +605,10 @@ var OpenAIChatLanguageModel = class {
611
605
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
612
606
  usage: {
613
607
  inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
614
- outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
608
+ outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
609
+ totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
610
+ reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
611
+ cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
615
612
  },
616
613
  request: { body },
617
614
  response: {
@@ -645,12 +642,12 @@ var OpenAIChatLanguageModel = class {
645
642
  abortSignal: options.abortSignal,
646
643
  fetch: this.config.fetch
647
644
  });
648
- const { messages: rawPrompt, ...rawSettings } = args;
649
645
  const toolCalls = [];
650
646
  let finishReason = "unknown";
651
647
  const usage = {
652
648
  inputTokens: void 0,
653
- outputTokens: void 0
649
+ outputTokens: void 0,
650
+ totalTokens: void 0
654
651
  };
655
652
  let isFirstChunk = true;
656
653
  const providerMetadata = { openai: {} };
@@ -661,7 +658,7 @@ var OpenAIChatLanguageModel = class {
661
658
  controller.enqueue({ type: "stream-start", warnings });
662
659
  },
663
660
  transform(chunk, controller) {
664
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
661
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
665
662
  if (!chunk.success) {
666
663
  finishReason = "error";
667
664
  controller.enqueue({ type: "error", error: chunk.error });
@@ -681,31 +678,25 @@ var OpenAIChatLanguageModel = class {
681
678
  });
682
679
  }
683
680
  if (value.usage != null) {
684
- const {
685
- prompt_tokens,
686
- completion_tokens,
687
- prompt_tokens_details,
688
- completion_tokens_details
689
- } = value.usage;
690
- usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
691
- usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
692
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
693
- providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
694
- }
695
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
696
- providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
697
- }
698
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
699
- providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
681
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
682
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
683
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
684
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
685
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
686
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
687
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
700
688
  }
701
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
702
- providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
689
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
690
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
703
691
  }
704
692
  }
705
693
  const choice = value.choices[0];
706
694
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
707
695
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
708
696
  }
697
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
698
+ providerMetadata.openai.logprobs = choice.logprobs.content;
699
+ }
709
700
  if ((choice == null ? void 0 : choice.delta) == null) {
710
701
  return;
711
702
  }
@@ -732,7 +723,7 @@ var OpenAIChatLanguageModel = class {
732
723
  message: `Expected 'id' to be a string.`
733
724
  });
734
725
  }
735
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
726
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
736
727
  throw new import_provider3.InvalidResponseDataError({
737
728
  data: toolCallDelta,
738
729
  message: `Expected 'function.name' to be a string.`
@@ -743,12 +734,12 @@ var OpenAIChatLanguageModel = class {
743
734
  type: "function",
744
735
  function: {
745
736
  name: toolCallDelta.function.name,
746
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
737
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
747
738
  },
748
739
  hasFinished: false
749
740
  };
750
741
  const toolCall2 = toolCalls[index];
751
- if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
742
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
752
743
  if (toolCall2.function.arguments.length > 0) {
753
744
  controller.enqueue({
754
745
  type: "tool-call-delta",
@@ -762,7 +753,7 @@ var OpenAIChatLanguageModel = class {
762
753
  controller.enqueue({
763
754
  type: "tool-call",
764
755
  toolCallType: "function",
765
- toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
756
+ toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils3.generateId)(),
766
757
  toolName: toolCall2.function.name,
767
758
  args: toolCall2.function.arguments
768
759
  });
@@ -775,21 +766,21 @@ var OpenAIChatLanguageModel = class {
775
766
  if (toolCall.hasFinished) {
776
767
  continue;
777
768
  }
778
- if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
779
- toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
769
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
770
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
780
771
  }
781
772
  controller.enqueue({
782
773
  type: "tool-call-delta",
783
774
  toolCallType: "function",
784
775
  toolCallId: toolCall.id,
785
776
  toolName: toolCall.function.name,
786
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
777
+ argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
787
778
  });
788
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
779
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
789
780
  controller.enqueue({
790
781
  type: "tool-call",
791
782
  toolCallType: "function",
792
- toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
783
+ toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils3.generateId)(),
793
784
  toolName: toolCall.function.name,
794
785
  args: toolCall.function.arguments
795
786
  });
@@ -816,6 +807,7 @@ var OpenAIChatLanguageModel = class {
816
807
  var openaiTokenUsageSchema = import_zod3.z.object({
817
808
  prompt_tokens: import_zod3.z.number().nullish(),
818
809
  completion_tokens: import_zod3.z.number().nullish(),
810
+ total_tokens: import_zod3.z.number().nullish(),
819
811
  prompt_tokens_details: import_zod3.z.object({
820
812
  cached_tokens: import_zod3.z.number().nullish()
821
813
  }).nullish(),
@@ -887,6 +879,20 @@ var openaiChatChunkSchema = import_zod3.z.union([
887
879
  })
888
880
  ).nullish()
889
881
  }).nullish(),
882
+ logprobs: import_zod3.z.object({
883
+ content: import_zod3.z.array(
884
+ import_zod3.z.object({
885
+ token: import_zod3.z.string(),
886
+ logprob: import_zod3.z.number(),
887
+ top_logprobs: import_zod3.z.array(
888
+ import_zod3.z.object({
889
+ token: import_zod3.z.string(),
890
+ logprob: import_zod3.z.number()
891
+ })
892
+ )
893
+ })
894
+ ).nullish()
895
+ }).nullish(),
890
896
  finish_reason: import_zod3.z.string().nullish(),
891
897
  index: import_zod3.z.number()
892
898
  })
@@ -1148,6 +1154,7 @@ var OpenAICompletionLanguageModel = class {
1148
1154
  };
1149
1155
  }
1150
1156
  async doGenerate(options) {
1157
+ var _a, _b, _c;
1151
1158
  const { args, warnings } = await this.getArgs(options);
1152
1159
  const {
1153
1160
  responseHeaders,
@@ -1175,8 +1182,9 @@ var OpenAICompletionLanguageModel = class {
1175
1182
  return {
1176
1183
  content: [{ type: "text", text: choice.text }],
1177
1184
  usage: {
1178
- inputTokens: response.usage.prompt_tokens,
1179
- outputTokens: response.usage.completion_tokens
1185
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1186
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1187
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1180
1188
  },
1181
1189
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1182
1190
  request: { body: args },
@@ -1215,7 +1223,8 @@ var OpenAICompletionLanguageModel = class {
1215
1223
  const providerMetadata = { openai: {} };
1216
1224
  const usage = {
1217
1225
  inputTokens: void 0,
1218
- outputTokens: void 0
1226
+ outputTokens: void 0,
1227
+ totalTokens: void 0
1219
1228
  };
1220
1229
  let isFirstChunk = true;
1221
1230
  return {
@@ -1246,6 +1255,7 @@ var OpenAICompletionLanguageModel = class {
1246
1255
  if (value.usage != null) {
1247
1256
  usage.inputTokens = value.usage.prompt_tokens;
1248
1257
  usage.outputTokens = value.usage.completion_tokens;
1258
+ usage.totalTokens = value.usage.total_tokens;
1249
1259
  }
1250
1260
  const choice = value.choices[0];
1251
1261
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1276,6 +1286,11 @@ var OpenAICompletionLanguageModel = class {
1276
1286
  };
1277
1287
  }
1278
1288
  };
1289
+ var usageSchema = import_zod5.z.object({
1290
+ prompt_tokens: import_zod5.z.number(),
1291
+ completion_tokens: import_zod5.z.number(),
1292
+ total_tokens: import_zod5.z.number()
1293
+ });
1279
1294
  var openaiCompletionResponseSchema = import_zod5.z.object({
1280
1295
  id: import_zod5.z.string().nullish(),
1281
1296
  created: import_zod5.z.number().nullish(),
@@ -1291,10 +1306,7 @@ var openaiCompletionResponseSchema = import_zod5.z.object({
1291
1306
  }).nullish()
1292
1307
  })
1293
1308
  ),
1294
- usage: import_zod5.z.object({
1295
- prompt_tokens: import_zod5.z.number(),
1296
- completion_tokens: import_zod5.z.number()
1297
- })
1309
+ usage: usageSchema.nullish()
1298
1310
  });
1299
1311
  var openaiCompletionChunkSchema = import_zod5.z.union([
1300
1312
  import_zod5.z.object({
@@ -1313,10 +1325,7 @@ var openaiCompletionChunkSchema = import_zod5.z.union([
1313
1325
  }).nullish()
1314
1326
  })
1315
1327
  ),
1316
- usage: import_zod5.z.object({
1317
- prompt_tokens: import_zod5.z.number(),
1318
- completion_tokens: import_zod5.z.number()
1319
- }).nullish()
1328
+ usage: usageSchema.nullish()
1320
1329
  }),
1321
1330
  openaiErrorDataSchema
1322
1331
  ]);
@@ -1516,25 +1525,25 @@ var openAITranscriptionProviderOptions = import_zod9.z.object({
1516
1525
  /**
1517
1526
  * Additional information to include in the transcription response.
1518
1527
  */
1519
- include: import_zod9.z.array(import_zod9.z.string()).nullish(),
1528
+ include: import_zod9.z.array(import_zod9.z.string()).optional(),
1520
1529
  /**
1521
1530
  * The language of the input audio in ISO-639-1 format.
1522
1531
  */
1523
- language: import_zod9.z.string().nullish(),
1532
+ language: import_zod9.z.string().optional(),
1524
1533
  /**
1525
1534
  * An optional text to guide the model's style or continue a previous audio segment.
1526
1535
  */
1527
- prompt: import_zod9.z.string().nullish(),
1536
+ prompt: import_zod9.z.string().optional(),
1528
1537
  /**
1529
1538
  * The sampling temperature, between 0 and 1.
1530
1539
  * @default 0
1531
1540
  */
1532
- temperature: import_zod9.z.number().min(0).max(1).default(0).nullish(),
1541
+ temperature: import_zod9.z.number().min(0).max(1).default(0).optional(),
1533
1542
  /**
1534
1543
  * The timestamp granularities to populate for this transcription.
1535
1544
  * @default ['segment']
1536
1545
  */
1537
- timestampGranularities: import_zod9.z.array(import_zod9.z.enum(["word", "segment"])).default(["segment"]).nullish()
1546
+ timestampGranularities: import_zod9.z.array(import_zod9.z.enum(["word", "segment"])).default(["segment"]).optional()
1538
1547
  });
1539
1548
 
1540
1549
  // src/openai-transcription-model.ts
@@ -2204,7 +2213,7 @@ var OpenAIResponsesLanguageModel = class {
2204
2213
  ])
2205
2214
  ),
2206
2215
  incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullable(),
2207
- usage: usageSchema
2216
+ usage: usageSchema2
2208
2217
  })
2209
2218
  ),
2210
2219
  abortSignal: options.abortSignal,
@@ -2258,7 +2267,10 @@ var OpenAIResponsesLanguageModel = class {
2258
2267
  }),
2259
2268
  usage: {
2260
2269
  inputTokens: response.usage.input_tokens,
2261
- outputTokens: response.usage.output_tokens
2270
+ outputTokens: response.usage.output_tokens,
2271
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2272
+ reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
2273
+ cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
2262
2274
  },
2263
2275
  request: { body },
2264
2276
  response: {
@@ -2270,9 +2282,7 @@ var OpenAIResponsesLanguageModel = class {
2270
2282
  },
2271
2283
  providerMetadata: {
2272
2284
  openai: {
2273
- responseId: response.id,
2274
- cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
2275
- reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
2285
+ responseId: response.id
2276
2286
  }
2277
2287
  },
2278
2288
  warnings
@@ -2301,10 +2311,9 @@ var OpenAIResponsesLanguageModel = class {
2301
2311
  let finishReason = "unknown";
2302
2312
  const usage = {
2303
2313
  inputTokens: void 0,
2304
- outputTokens: void 0
2314
+ outputTokens: void 0,
2315
+ totalTokens: void 0
2305
2316
  };
2306
- let cachedPromptTokens = null;
2307
- let reasoningTokens = null;
2308
2317
  let responseId = null;
2309
2318
  const ongoingToolCalls = {};
2310
2319
  let hasToolCalls = false;
@@ -2382,8 +2391,9 @@ var OpenAIResponsesLanguageModel = class {
2382
2391
  });
2383
2392
  usage.inputTokens = value.response.usage.input_tokens;
2384
2393
  usage.outputTokens = value.response.usage.output_tokens;
2385
- cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2386
- reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2394
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2395
+ usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
2396
+ usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
2387
2397
  } else if (isResponseAnnotationAddedChunk(value)) {
2388
2398
  controller.enqueue({
2389
2399
  type: "source",
@@ -2399,13 +2409,9 @@ var OpenAIResponsesLanguageModel = class {
2399
2409
  type: "finish",
2400
2410
  finishReason,
2401
2411
  usage,
2402
- ...(cachedPromptTokens != null || reasoningTokens != null) && {
2403
- providerMetadata: {
2404
- openai: {
2405
- responseId,
2406
- cachedPromptTokens,
2407
- reasoningTokens
2408
- }
2412
+ providerMetadata: {
2413
+ openai: {
2414
+ responseId
2409
2415
  }
2410
2416
  }
2411
2417
  });
@@ -2417,7 +2423,7 @@ var OpenAIResponsesLanguageModel = class {
2417
2423
  };
2418
2424
  }
2419
2425
  };
2420
- var usageSchema = import_zod12.z.object({
2426
+ var usageSchema2 = import_zod12.z.object({
2421
2427
  input_tokens: import_zod12.z.number(),
2422
2428
  input_tokens_details: import_zod12.z.object({ cached_tokens: import_zod12.z.number().nullish() }).nullish(),
2423
2429
  output_tokens: import_zod12.z.number(),
@@ -2431,7 +2437,7 @@ var responseFinishedChunkSchema = import_zod12.z.object({
2431
2437
  type: import_zod12.z.enum(["response.completed", "response.incomplete"]),
2432
2438
  response: import_zod12.z.object({
2433
2439
  incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullish(),
2434
- usage: usageSchema
2440
+ usage: usageSchema2
2435
2441
  })
2436
2442
  });
2437
2443
  var responseCreatedChunkSchema = import_zod12.z.object({