@ai-sdk/openai 2.0.0-canary.14 → 2.0.0-canary.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -226,6 +226,16 @@ var openaiProviderOptions = z.object({
226
226
  * the GPT tokenizer) to an associated bias value from -100 to 100.
227
227
  */
228
228
  logitBias: z.record(z.coerce.number(), z.number()).optional(),
229
+ /**
230
+ * Return the log probabilities of the tokens.
231
+ *
232
+ * Setting to true will return the log probabilities of the tokens that
233
+ * were generated.
234
+ *
235
+ * Setting to a number will return the log probabilities of the top n
236
+ * tokens that were generated.
237
+ */
238
+ logprobs: z.union([z.boolean(), z.number()]).optional(),
229
239
  /**
230
240
  * Whether to enable parallel function calling during tool use. Default to true.
231
241
  */
@@ -402,6 +412,8 @@ var OpenAIChatLanguageModel = class {
402
412
  model: this.modelId,
403
413
  // model specific settings:
404
414
  logit_bias: openaiOptions.logitBias,
415
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
416
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
405
417
  user: openaiOptions.user,
406
418
  parallel_tool_calls: openaiOptions.parallelToolCalls,
407
419
  // standardized settings:
@@ -474,6 +486,20 @@ var OpenAIChatLanguageModel = class {
474
486
  message: "logitBias is not supported for reasoning models"
475
487
  });
476
488
  }
489
+ if (baseArgs.logprobs != null) {
490
+ baseArgs.logprobs = void 0;
491
+ warnings.push({
492
+ type: "other",
493
+ message: "logprobs is not supported for reasoning models"
494
+ });
495
+ }
496
+ if (baseArgs.top_logprobs != null) {
497
+ baseArgs.top_logprobs = void 0;
498
+ warnings.push({
499
+ type: "other",
500
+ message: "topLogprobs is not supported for reasoning models"
501
+ });
502
+ }
477
503
  if (baseArgs.max_tokens != null) {
478
504
  if (baseArgs.max_completion_tokens == null) {
479
505
  baseArgs.max_completion_tokens = baseArgs.max_tokens;
@@ -509,7 +535,7 @@ var OpenAIChatLanguageModel = class {
509
535
  };
510
536
  }
511
537
  async doGenerate(options) {
512
- var _a, _b, _c, _d, _e, _f, _g, _h;
538
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
513
539
  const { args: body, warnings } = await this.getArgs(options);
514
540
  const {
515
541
  responseHeaders,
@@ -547,24 +573,24 @@ var OpenAIChatLanguageModel = class {
547
573
  const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
548
574
  const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
549
575
  const providerMetadata = { openai: {} };
550
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
551
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
552
- }
553
576
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
554
577
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
555
578
  }
556
579
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
557
580
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
558
581
  }
559
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
560
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
582
+ if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
583
+ providerMetadata.openai.logprobs = choice.logprobs.content;
561
584
  }
562
585
  return {
563
586
  content,
564
587
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
565
588
  usage: {
566
- inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
567
- outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
589
+ inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
590
+ outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
591
+ totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
592
+ reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
593
+ cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
568
594
  },
569
595
  request: { body },
570
596
  response: {
@@ -598,12 +624,12 @@ var OpenAIChatLanguageModel = class {
598
624
  abortSignal: options.abortSignal,
599
625
  fetch: this.config.fetch
600
626
  });
601
- const { messages: rawPrompt, ...rawSettings } = args;
602
627
  const toolCalls = [];
603
628
  let finishReason = "unknown";
604
629
  const usage = {
605
630
  inputTokens: void 0,
606
- outputTokens: void 0
631
+ outputTokens: void 0,
632
+ totalTokens: void 0
607
633
  };
608
634
  let isFirstChunk = true;
609
635
  const providerMetadata = { openai: {} };
@@ -614,7 +640,7 @@ var OpenAIChatLanguageModel = class {
614
640
  controller.enqueue({ type: "stream-start", warnings });
615
641
  },
616
642
  transform(chunk, controller) {
617
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
643
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
618
644
  if (!chunk.success) {
619
645
  finishReason = "error";
620
646
  controller.enqueue({ type: "error", error: chunk.error });
@@ -634,31 +660,25 @@ var OpenAIChatLanguageModel = class {
634
660
  });
635
661
  }
636
662
  if (value.usage != null) {
637
- const {
638
- prompt_tokens,
639
- completion_tokens,
640
- prompt_tokens_details,
641
- completion_tokens_details
642
- } = value.usage;
643
- usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
644
- usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
645
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
646
- providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
647
- }
648
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
649
- providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
663
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
664
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
665
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
666
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
667
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
668
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
669
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
650
670
  }
651
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
652
- providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
653
- }
654
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
655
- providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
671
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
672
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
656
673
  }
657
674
  }
658
675
  const choice = value.choices[0];
659
676
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
660
677
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
661
678
  }
679
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
680
+ providerMetadata.openai.logprobs = choice.logprobs.content;
681
+ }
662
682
  if ((choice == null ? void 0 : choice.delta) == null) {
663
683
  return;
664
684
  }
@@ -685,7 +705,7 @@ var OpenAIChatLanguageModel = class {
685
705
  message: `Expected 'id' to be a string.`
686
706
  });
687
707
  }
688
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
708
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
689
709
  throw new InvalidResponseDataError({
690
710
  data: toolCallDelta,
691
711
  message: `Expected 'function.name' to be a string.`
@@ -696,12 +716,12 @@ var OpenAIChatLanguageModel = class {
696
716
  type: "function",
697
717
  function: {
698
718
  name: toolCallDelta.function.name,
699
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
719
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
700
720
  },
701
721
  hasFinished: false
702
722
  };
703
723
  const toolCall2 = toolCalls[index];
704
- if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
724
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
705
725
  if (toolCall2.function.arguments.length > 0) {
706
726
  controller.enqueue({
707
727
  type: "tool-call-delta",
@@ -715,7 +735,7 @@ var OpenAIChatLanguageModel = class {
715
735
  controller.enqueue({
716
736
  type: "tool-call",
717
737
  toolCallType: "function",
718
- toolCallId: (_e = toolCall2.id) != null ? _e : generateId(),
738
+ toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
719
739
  toolName: toolCall2.function.name,
720
740
  args: toolCall2.function.arguments
721
741
  });
@@ -728,21 +748,21 @@ var OpenAIChatLanguageModel = class {
728
748
  if (toolCall.hasFinished) {
729
749
  continue;
730
750
  }
731
- if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
732
- toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
751
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
752
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
733
753
  }
734
754
  controller.enqueue({
735
755
  type: "tool-call-delta",
736
756
  toolCallType: "function",
737
757
  toolCallId: toolCall.id,
738
758
  toolName: toolCall.function.name,
739
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
759
+ argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
740
760
  });
741
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && isParsableJson(toolCall.function.arguments)) {
761
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
742
762
  controller.enqueue({
743
763
  type: "tool-call",
744
764
  toolCallType: "function",
745
- toolCallId: (_l = toolCall.id) != null ? _l : generateId(),
765
+ toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
746
766
  toolName: toolCall.function.name,
747
767
  args: toolCall.function.arguments
748
768
  });
@@ -769,6 +789,7 @@ var OpenAIChatLanguageModel = class {
769
789
  var openaiTokenUsageSchema = z3.object({
770
790
  prompt_tokens: z3.number().nullish(),
771
791
  completion_tokens: z3.number().nullish(),
792
+ total_tokens: z3.number().nullish(),
772
793
  prompt_tokens_details: z3.object({
773
794
  cached_tokens: z3.number().nullish()
774
795
  }).nullish(),
@@ -799,6 +820,20 @@ var openaiChatResponseSchema = z3.object({
799
820
  ).nullish()
800
821
  }),
801
822
  index: z3.number(),
823
+ logprobs: z3.object({
824
+ content: z3.array(
825
+ z3.object({
826
+ token: z3.string(),
827
+ logprob: z3.number(),
828
+ top_logprobs: z3.array(
829
+ z3.object({
830
+ token: z3.string(),
831
+ logprob: z3.number()
832
+ })
833
+ )
834
+ })
835
+ ).nullish()
836
+ }).nullish(),
802
837
  finish_reason: z3.string().nullish()
803
838
  })
804
839
  ),
@@ -818,7 +853,7 @@ var openaiChatChunkSchema = z3.union([
818
853
  z3.object({
819
854
  index: z3.number(),
820
855
  id: z3.string().nullish(),
821
- type: z3.literal("function").optional(),
856
+ type: z3.literal("function").nullish(),
822
857
  function: z3.object({
823
858
  name: z3.string().nullish(),
824
859
  arguments: z3.string().nullish()
@@ -826,7 +861,21 @@ var openaiChatChunkSchema = z3.union([
826
861
  })
827
862
  ).nullish()
828
863
  }).nullish(),
829
- finish_reason: z3.string().nullable().optional(),
864
+ logprobs: z3.object({
865
+ content: z3.array(
866
+ z3.object({
867
+ token: z3.string(),
868
+ logprob: z3.number(),
869
+ top_logprobs: z3.array(
870
+ z3.object({
871
+ token: z3.string(),
872
+ logprob: z3.number()
873
+ })
874
+ )
875
+ })
876
+ ).nullish()
877
+ }).nullish(),
878
+ finish_reason: z3.string().nullish(),
830
879
  index: z3.number()
831
880
  })
832
881
  ),
@@ -995,7 +1044,17 @@ var openaiCompletionProviderOptions = z4.object({
995
1044
  A unique identifier representing your end-user, which can help OpenAI to
996
1045
  monitor and detect abuse. Learn more.
997
1046
  */
998
- user: z4.string().optional()
1047
+ user: z4.string().optional(),
1048
+ /**
1049
+ Return the log probabilities of the tokens. Including logprobs will increase
1050
+ the response size and can slow down response times. However, it can
1051
+ be useful to better understand how the model is behaving.
1052
+ Setting to true will return the log probabilities of the tokens that
1053
+ were generated.
1054
+ Setting to a number will return the log probabilities of the top n
1055
+ tokens that were generated.
1056
+ */
1057
+ logprobs: z4.union([z4.boolean(), z4.number()]).optional()
999
1058
  });
1000
1059
 
1001
1060
  // src/openai-completion-language-model.ts
@@ -1067,6 +1126,7 @@ var OpenAICompletionLanguageModel = class {
1067
1126
  // model specific settings:
1068
1127
  echo: openaiOptions.echo,
1069
1128
  logit_bias: openaiOptions.logitBias,
1129
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1070
1130
  suffix: openaiOptions.suffix,
1071
1131
  user: openaiOptions.user,
1072
1132
  // standardized settings:
@@ -1085,6 +1145,7 @@ var OpenAICompletionLanguageModel = class {
1085
1145
  };
1086
1146
  }
1087
1147
  async doGenerate(options) {
1148
+ var _a, _b, _c;
1088
1149
  const { args, warnings } = await this.getArgs(options);
1089
1150
  const {
1090
1151
  responseHeaders,
@@ -1105,11 +1166,16 @@ var OpenAICompletionLanguageModel = class {
1105
1166
  fetch: this.config.fetch
1106
1167
  });
1107
1168
  const choice = response.choices[0];
1169
+ const providerMetadata = { openai: {} };
1170
+ if (choice.logprobs != null) {
1171
+ providerMetadata.openai.logprobs = choice.logprobs;
1172
+ }
1108
1173
  return {
1109
1174
  content: [{ type: "text", text: choice.text }],
1110
1175
  usage: {
1111
- inputTokens: response.usage.prompt_tokens,
1112
- outputTokens: response.usage.completion_tokens
1176
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1177
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1178
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1113
1179
  },
1114
1180
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1115
1181
  request: { body: args },
@@ -1118,6 +1184,7 @@ var OpenAICompletionLanguageModel = class {
1118
1184
  headers: responseHeaders,
1119
1185
  body: rawResponse
1120
1186
  },
1187
+ providerMetadata,
1121
1188
  warnings
1122
1189
  };
1123
1190
  }
@@ -1144,9 +1211,11 @@ var OpenAICompletionLanguageModel = class {
1144
1211
  fetch: this.config.fetch
1145
1212
  });
1146
1213
  let finishReason = "unknown";
1214
+ const providerMetadata = { openai: {} };
1147
1215
  const usage = {
1148
1216
  inputTokens: void 0,
1149
- outputTokens: void 0
1217
+ outputTokens: void 0,
1218
+ totalTokens: void 0
1150
1219
  };
1151
1220
  let isFirstChunk = true;
1152
1221
  return {
@@ -1177,11 +1246,15 @@ var OpenAICompletionLanguageModel = class {
1177
1246
  if (value.usage != null) {
1178
1247
  usage.inputTokens = value.usage.prompt_tokens;
1179
1248
  usage.outputTokens = value.usage.completion_tokens;
1249
+ usage.totalTokens = value.usage.total_tokens;
1180
1250
  }
1181
1251
  const choice = value.choices[0];
1182
1252
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
1183
1253
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
1184
1254
  }
1255
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1256
+ providerMetadata.openai.logprobs = choice.logprobs;
1257
+ }
1185
1258
  if ((choice == null ? void 0 : choice.text) != null) {
1186
1259
  controller.enqueue({
1187
1260
  type: "text",
@@ -1193,6 +1266,7 @@ var OpenAICompletionLanguageModel = class {
1193
1266
  controller.enqueue({
1194
1267
  type: "finish",
1195
1268
  finishReason,
1269
+ providerMetadata,
1196
1270
  usage
1197
1271
  });
1198
1272
  }
@@ -1203,6 +1277,11 @@ var OpenAICompletionLanguageModel = class {
1203
1277
  };
1204
1278
  }
1205
1279
  };
1280
+ var usageSchema = z5.object({
1281
+ prompt_tokens: z5.number(),
1282
+ completion_tokens: z5.number(),
1283
+ total_tokens: z5.number()
1284
+ });
1206
1285
  var openaiCompletionResponseSchema = z5.object({
1207
1286
  id: z5.string().nullish(),
1208
1287
  created: z5.number().nullish(),
@@ -1210,13 +1289,15 @@ var openaiCompletionResponseSchema = z5.object({
1210
1289
  choices: z5.array(
1211
1290
  z5.object({
1212
1291
  text: z5.string(),
1213
- finish_reason: z5.string()
1292
+ finish_reason: z5.string(),
1293
+ logprobs: z5.object({
1294
+ tokens: z5.array(z5.string()),
1295
+ token_logprobs: z5.array(z5.number()),
1296
+ top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1297
+ }).nullish()
1214
1298
  })
1215
1299
  ),
1216
- usage: z5.object({
1217
- prompt_tokens: z5.number(),
1218
- completion_tokens: z5.number()
1219
- })
1300
+ usage: usageSchema.nullish()
1220
1301
  });
1221
1302
  var openaiCompletionChunkSchema = z5.union([
1222
1303
  z5.object({
@@ -1227,13 +1308,15 @@ var openaiCompletionChunkSchema = z5.union([
1227
1308
  z5.object({
1228
1309
  text: z5.string(),
1229
1310
  finish_reason: z5.string().nullish(),
1230
- index: z5.number()
1311
+ index: z5.number(),
1312
+ logprobs: z5.object({
1313
+ tokens: z5.array(z5.string()),
1314
+ token_logprobs: z5.array(z5.number()),
1315
+ top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1316
+ }).nullish()
1231
1317
  })
1232
1318
  ),
1233
- usage: z5.object({
1234
- prompt_tokens: z5.number(),
1235
- completion_tokens: z5.number()
1236
- }).nullish()
1319
+ usage: usageSchema.nullish()
1237
1320
  }),
1238
1321
  openaiErrorDataSchema
1239
1322
  ]);
@@ -1415,12 +1498,23 @@ var OpenAIImageModel = class {
1415
1498
  timestamp: currentDate,
1416
1499
  modelId: this.modelId,
1417
1500
  headers: responseHeaders
1501
+ },
1502
+ providerMetadata: {
1503
+ openai: {
1504
+ images: response.data.map(
1505
+ (item) => item.revised_prompt ? {
1506
+ revisedPrompt: item.revised_prompt
1507
+ } : null
1508
+ )
1509
+ }
1418
1510
  }
1419
1511
  };
1420
1512
  }
1421
1513
  };
1422
1514
  var openaiImageResponseSchema = z8.object({
1423
- data: z8.array(z8.object({ b64_json: z8.string() }))
1515
+ data: z8.array(
1516
+ z8.object({ b64_json: z8.string(), revised_prompt: z8.string().optional() })
1517
+ )
1424
1518
  });
1425
1519
 
1426
1520
  // src/openai-tools.ts
@@ -1460,25 +1554,25 @@ var openAITranscriptionProviderOptions = z10.object({
1460
1554
  /**
1461
1555
  * Additional information to include in the transcription response.
1462
1556
  */
1463
- include: z10.array(z10.string()).nullish(),
1557
+ include: z10.array(z10.string()).optional(),
1464
1558
  /**
1465
1559
  * The language of the input audio in ISO-639-1 format.
1466
1560
  */
1467
- language: z10.string().nullish(),
1561
+ language: z10.string().optional(),
1468
1562
  /**
1469
1563
  * An optional text to guide the model's style or continue a previous audio segment.
1470
1564
  */
1471
- prompt: z10.string().nullish(),
1565
+ prompt: z10.string().optional(),
1472
1566
  /**
1473
1567
  * The sampling temperature, between 0 and 1.
1474
1568
  * @default 0
1475
1569
  */
1476
- temperature: z10.number().min(0).max(1).default(0).nullish(),
1570
+ temperature: z10.number().min(0).max(1).default(0).optional(),
1477
1571
  /**
1478
1572
  * The timestamp granularities to populate for this transcription.
1479
1573
  * @default ['segment']
1480
1574
  */
1481
- timestampGranularities: z10.array(z10.enum(["word", "segment"])).default(["segment"]).nullish()
1575
+ timestampGranularities: z10.array(z10.enum(["word", "segment"])).default(["segment"]).optional()
1482
1576
  });
1483
1577
 
1484
1578
  // src/openai-transcription-model.ts
@@ -2060,7 +2154,7 @@ var OpenAIResponsesLanguageModel = class {
2060
2154
  ])
2061
2155
  ),
2062
2156
  incomplete_details: z12.object({ reason: z12.string() }).nullable(),
2063
- usage: usageSchema
2157
+ usage: usageSchema2
2064
2158
  })
2065
2159
  ),
2066
2160
  abortSignal: options.abortSignal,
@@ -2114,7 +2208,10 @@ var OpenAIResponsesLanguageModel = class {
2114
2208
  }),
2115
2209
  usage: {
2116
2210
  inputTokens: response.usage.input_tokens,
2117
- outputTokens: response.usage.output_tokens
2211
+ outputTokens: response.usage.output_tokens,
2212
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2213
+ reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
2214
+ cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
2118
2215
  },
2119
2216
  request: { body },
2120
2217
  response: {
@@ -2126,9 +2223,7 @@ var OpenAIResponsesLanguageModel = class {
2126
2223
  },
2127
2224
  providerMetadata: {
2128
2225
  openai: {
2129
- responseId: response.id,
2130
- cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
2131
- reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
2226
+ responseId: response.id
2132
2227
  }
2133
2228
  },
2134
2229
  warnings
@@ -2157,10 +2252,9 @@ var OpenAIResponsesLanguageModel = class {
2157
2252
  let finishReason = "unknown";
2158
2253
  const usage = {
2159
2254
  inputTokens: void 0,
2160
- outputTokens: void 0
2255
+ outputTokens: void 0,
2256
+ totalTokens: void 0
2161
2257
  };
2162
- let cachedPromptTokens = null;
2163
- let reasoningTokens = null;
2164
2258
  let responseId = null;
2165
2259
  const ongoingToolCalls = {};
2166
2260
  let hasToolCalls = false;
@@ -2238,8 +2332,9 @@ var OpenAIResponsesLanguageModel = class {
2238
2332
  });
2239
2333
  usage.inputTokens = value.response.usage.input_tokens;
2240
2334
  usage.outputTokens = value.response.usage.output_tokens;
2241
- cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2242
- reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2335
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2336
+ usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
2337
+ usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
2243
2338
  } else if (isResponseAnnotationAddedChunk(value)) {
2244
2339
  controller.enqueue({
2245
2340
  type: "source",
@@ -2255,13 +2350,9 @@ var OpenAIResponsesLanguageModel = class {
2255
2350
  type: "finish",
2256
2351
  finishReason,
2257
2352
  usage,
2258
- ...(cachedPromptTokens != null || reasoningTokens != null) && {
2259
- providerMetadata: {
2260
- openai: {
2261
- responseId,
2262
- cachedPromptTokens,
2263
- reasoningTokens
2264
- }
2353
+ providerMetadata: {
2354
+ openai: {
2355
+ responseId
2265
2356
  }
2266
2357
  }
2267
2358
  });
@@ -2273,7 +2364,7 @@ var OpenAIResponsesLanguageModel = class {
2273
2364
  };
2274
2365
  }
2275
2366
  };
2276
- var usageSchema = z12.object({
2367
+ var usageSchema2 = z12.object({
2277
2368
  input_tokens: z12.number(),
2278
2369
  input_tokens_details: z12.object({ cached_tokens: z12.number().nullish() }).nullish(),
2279
2370
  output_tokens: z12.number(),
@@ -2287,7 +2378,7 @@ var responseFinishedChunkSchema = z12.object({
2287
2378
  type: z12.enum(["response.completed", "response.incomplete"]),
2288
2379
  response: z12.object({
2289
2380
  incomplete_details: z12.object({ reason: z12.string() }).nullish(),
2290
- usage: usageSchema
2381
+ usage: usageSchema2
2291
2382
  })
2292
2383
  });
2293
2384
  var responseCreatedChunkSchema = z12.object({