@ai-sdk/openai 2.0.0-canary.14 → 2.0.0-canary.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -220,6 +220,16 @@ var openaiProviderOptions = z.object({
220
220
  * the GPT tokenizer) to an associated bias value from -100 to 100.
221
221
  */
222
222
  logitBias: z.record(z.coerce.number(), z.number()).optional(),
223
+ /**
224
+ * Return the log probabilities of the tokens.
225
+ *
226
+ * Setting to true will return the log probabilities of the tokens that
227
+ * were generated.
228
+ *
229
+ * Setting to a number will return the log probabilities of the top n
230
+ * tokens that were generated.
231
+ */
232
+ logprobs: z.union([z.boolean(), z.number()]).optional(),
223
233
  /**
224
234
  * Whether to enable parallel function calling during tool use. Default to true.
225
235
  */
@@ -396,6 +406,8 @@ var OpenAIChatLanguageModel = class {
396
406
  model: this.modelId,
397
407
  // model specific settings:
398
408
  logit_bias: openaiOptions.logitBias,
409
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
410
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
399
411
  user: openaiOptions.user,
400
412
  parallel_tool_calls: openaiOptions.parallelToolCalls,
401
413
  // standardized settings:
@@ -468,6 +480,20 @@ var OpenAIChatLanguageModel = class {
468
480
  message: "logitBias is not supported for reasoning models"
469
481
  });
470
482
  }
483
+ if (baseArgs.logprobs != null) {
484
+ baseArgs.logprobs = void 0;
485
+ warnings.push({
486
+ type: "other",
487
+ message: "logprobs is not supported for reasoning models"
488
+ });
489
+ }
490
+ if (baseArgs.top_logprobs != null) {
491
+ baseArgs.top_logprobs = void 0;
492
+ warnings.push({
493
+ type: "other",
494
+ message: "topLogprobs is not supported for reasoning models"
495
+ });
496
+ }
471
497
  if (baseArgs.max_tokens != null) {
472
498
  if (baseArgs.max_completion_tokens == null) {
473
499
  baseArgs.max_completion_tokens = baseArgs.max_tokens;
@@ -503,7 +529,7 @@ var OpenAIChatLanguageModel = class {
503
529
  };
504
530
  }
505
531
  async doGenerate(options) {
506
- var _a, _b, _c, _d, _e, _f, _g, _h;
532
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
507
533
  const { args: body, warnings } = await this.getArgs(options);
508
534
  const {
509
535
  responseHeaders,
@@ -541,24 +567,24 @@ var OpenAIChatLanguageModel = class {
541
567
  const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
542
568
  const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
543
569
  const providerMetadata = { openai: {} };
544
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
545
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
546
- }
547
570
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
548
571
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
549
572
  }
550
573
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
551
574
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
552
575
  }
553
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
554
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
576
+ if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
577
+ providerMetadata.openai.logprobs = choice.logprobs.content;
555
578
  }
556
579
  return {
557
580
  content,
558
581
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
559
582
  usage: {
560
- inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
561
- outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
583
+ inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
584
+ outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
585
+ totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
586
+ reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
587
+ cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
562
588
  },
563
589
  request: { body },
564
590
  response: {
@@ -592,12 +618,12 @@ var OpenAIChatLanguageModel = class {
592
618
  abortSignal: options.abortSignal,
593
619
  fetch: this.config.fetch
594
620
  });
595
- const { messages: rawPrompt, ...rawSettings } = args;
596
621
  const toolCalls = [];
597
622
  let finishReason = "unknown";
598
623
  const usage = {
599
624
  inputTokens: void 0,
600
- outputTokens: void 0
625
+ outputTokens: void 0,
626
+ totalTokens: void 0
601
627
  };
602
628
  let isFirstChunk = true;
603
629
  const providerMetadata = { openai: {} };
@@ -608,7 +634,7 @@ var OpenAIChatLanguageModel = class {
608
634
  controller.enqueue({ type: "stream-start", warnings });
609
635
  },
610
636
  transform(chunk, controller) {
611
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
637
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
612
638
  if (!chunk.success) {
613
639
  finishReason = "error";
614
640
  controller.enqueue({ type: "error", error: chunk.error });
@@ -628,31 +654,25 @@ var OpenAIChatLanguageModel = class {
628
654
  });
629
655
  }
630
656
  if (value.usage != null) {
631
- const {
632
- prompt_tokens,
633
- completion_tokens,
634
- prompt_tokens_details,
635
- completion_tokens_details
636
- } = value.usage;
637
- usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
638
- usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
639
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
640
- providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
641
- }
642
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
643
- providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
657
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
658
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
659
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
660
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
661
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
662
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
663
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
644
664
  }
645
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
646
- providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
647
- }
648
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
649
- providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
665
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
666
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
650
667
  }
651
668
  }
652
669
  const choice = value.choices[0];
653
670
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
654
671
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
655
672
  }
673
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
674
+ providerMetadata.openai.logprobs = choice.logprobs.content;
675
+ }
656
676
  if ((choice == null ? void 0 : choice.delta) == null) {
657
677
  return;
658
678
  }
@@ -679,7 +699,7 @@ var OpenAIChatLanguageModel = class {
679
699
  message: `Expected 'id' to be a string.`
680
700
  });
681
701
  }
682
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
702
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
683
703
  throw new InvalidResponseDataError({
684
704
  data: toolCallDelta,
685
705
  message: `Expected 'function.name' to be a string.`
@@ -690,12 +710,12 @@ var OpenAIChatLanguageModel = class {
690
710
  type: "function",
691
711
  function: {
692
712
  name: toolCallDelta.function.name,
693
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
713
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
694
714
  },
695
715
  hasFinished: false
696
716
  };
697
717
  const toolCall2 = toolCalls[index];
698
- if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
718
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
699
719
  if (toolCall2.function.arguments.length > 0) {
700
720
  controller.enqueue({
701
721
  type: "tool-call-delta",
@@ -709,7 +729,7 @@ var OpenAIChatLanguageModel = class {
709
729
  controller.enqueue({
710
730
  type: "tool-call",
711
731
  toolCallType: "function",
712
- toolCallId: (_e = toolCall2.id) != null ? _e : generateId(),
732
+ toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
713
733
  toolName: toolCall2.function.name,
714
734
  args: toolCall2.function.arguments
715
735
  });
@@ -722,21 +742,21 @@ var OpenAIChatLanguageModel = class {
722
742
  if (toolCall.hasFinished) {
723
743
  continue;
724
744
  }
725
- if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
726
- toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
745
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
746
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
727
747
  }
728
748
  controller.enqueue({
729
749
  type: "tool-call-delta",
730
750
  toolCallType: "function",
731
751
  toolCallId: toolCall.id,
732
752
  toolName: toolCall.function.name,
733
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
753
+ argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
734
754
  });
735
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && isParsableJson(toolCall.function.arguments)) {
755
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
736
756
  controller.enqueue({
737
757
  type: "tool-call",
738
758
  toolCallType: "function",
739
- toolCallId: (_l = toolCall.id) != null ? _l : generateId(),
759
+ toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
740
760
  toolName: toolCall.function.name,
741
761
  args: toolCall.function.arguments
742
762
  });
@@ -763,6 +783,7 @@ var OpenAIChatLanguageModel = class {
763
783
  var openaiTokenUsageSchema = z3.object({
764
784
  prompt_tokens: z3.number().nullish(),
765
785
  completion_tokens: z3.number().nullish(),
786
+ total_tokens: z3.number().nullish(),
766
787
  prompt_tokens_details: z3.object({
767
788
  cached_tokens: z3.number().nullish()
768
789
  }).nullish(),
@@ -793,6 +814,20 @@ var openaiChatResponseSchema = z3.object({
793
814
  ).nullish()
794
815
  }),
795
816
  index: z3.number(),
817
+ logprobs: z3.object({
818
+ content: z3.array(
819
+ z3.object({
820
+ token: z3.string(),
821
+ logprob: z3.number(),
822
+ top_logprobs: z3.array(
823
+ z3.object({
824
+ token: z3.string(),
825
+ logprob: z3.number()
826
+ })
827
+ )
828
+ })
829
+ ).nullish()
830
+ }).nullish(),
796
831
  finish_reason: z3.string().nullish()
797
832
  })
798
833
  ),
@@ -812,7 +847,7 @@ var openaiChatChunkSchema = z3.union([
812
847
  z3.object({
813
848
  index: z3.number(),
814
849
  id: z3.string().nullish(),
815
- type: z3.literal("function").optional(),
850
+ type: z3.literal("function").nullish(),
816
851
  function: z3.object({
817
852
  name: z3.string().nullish(),
818
853
  arguments: z3.string().nullish()
@@ -820,7 +855,21 @@ var openaiChatChunkSchema = z3.union([
820
855
  })
821
856
  ).nullish()
822
857
  }).nullish(),
823
- finish_reason: z3.string().nullable().optional(),
858
+ logprobs: z3.object({
859
+ content: z3.array(
860
+ z3.object({
861
+ token: z3.string(),
862
+ logprob: z3.number(),
863
+ top_logprobs: z3.array(
864
+ z3.object({
865
+ token: z3.string(),
866
+ logprob: z3.number()
867
+ })
868
+ )
869
+ })
870
+ ).nullish()
871
+ }).nullish(),
872
+ finish_reason: z3.string().nullish(),
824
873
  index: z3.number()
825
874
  })
826
875
  ),
@@ -989,7 +1038,17 @@ var openaiCompletionProviderOptions = z4.object({
989
1038
  A unique identifier representing your end-user, which can help OpenAI to
990
1039
  monitor and detect abuse. Learn more.
991
1040
  */
992
- user: z4.string().optional()
1041
+ user: z4.string().optional(),
1042
+ /**
1043
+ Return the log probabilities of the tokens. Including logprobs will increase
1044
+ the response size and can slow down response times. However, it can
1045
+ be useful to better understand how the model is behaving.
1046
+ Setting to true will return the log probabilities of the tokens that
1047
+ were generated.
1048
+ Setting to a number will return the log probabilities of the top n
1049
+ tokens that were generated.
1050
+ */
1051
+ logprobs: z4.union([z4.boolean(), z4.number()]).optional()
993
1052
  });
994
1053
 
995
1054
  // src/openai-completion-language-model.ts
@@ -1061,6 +1120,7 @@ var OpenAICompletionLanguageModel = class {
1061
1120
  // model specific settings:
1062
1121
  echo: openaiOptions.echo,
1063
1122
  logit_bias: openaiOptions.logitBias,
1123
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1064
1124
  suffix: openaiOptions.suffix,
1065
1125
  user: openaiOptions.user,
1066
1126
  // standardized settings:
@@ -1079,6 +1139,7 @@ var OpenAICompletionLanguageModel = class {
1079
1139
  };
1080
1140
  }
1081
1141
  async doGenerate(options) {
1142
+ var _a, _b, _c;
1082
1143
  const { args, warnings } = await this.getArgs(options);
1083
1144
  const {
1084
1145
  responseHeaders,
@@ -1099,11 +1160,16 @@ var OpenAICompletionLanguageModel = class {
1099
1160
  fetch: this.config.fetch
1100
1161
  });
1101
1162
  const choice = response.choices[0];
1163
+ const providerMetadata = { openai: {} };
1164
+ if (choice.logprobs != null) {
1165
+ providerMetadata.openai.logprobs = choice.logprobs;
1166
+ }
1102
1167
  return {
1103
1168
  content: [{ type: "text", text: choice.text }],
1104
1169
  usage: {
1105
- inputTokens: response.usage.prompt_tokens,
1106
- outputTokens: response.usage.completion_tokens
1170
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1171
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1172
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1107
1173
  },
1108
1174
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1109
1175
  request: { body: args },
@@ -1112,6 +1178,7 @@ var OpenAICompletionLanguageModel = class {
1112
1178
  headers: responseHeaders,
1113
1179
  body: rawResponse
1114
1180
  },
1181
+ providerMetadata,
1115
1182
  warnings
1116
1183
  };
1117
1184
  }
@@ -1138,9 +1205,11 @@ var OpenAICompletionLanguageModel = class {
1138
1205
  fetch: this.config.fetch
1139
1206
  });
1140
1207
  let finishReason = "unknown";
1208
+ const providerMetadata = { openai: {} };
1141
1209
  const usage = {
1142
1210
  inputTokens: void 0,
1143
- outputTokens: void 0
1211
+ outputTokens: void 0,
1212
+ totalTokens: void 0
1144
1213
  };
1145
1214
  let isFirstChunk = true;
1146
1215
  return {
@@ -1171,11 +1240,15 @@ var OpenAICompletionLanguageModel = class {
1171
1240
  if (value.usage != null) {
1172
1241
  usage.inputTokens = value.usage.prompt_tokens;
1173
1242
  usage.outputTokens = value.usage.completion_tokens;
1243
+ usage.totalTokens = value.usage.total_tokens;
1174
1244
  }
1175
1245
  const choice = value.choices[0];
1176
1246
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
1177
1247
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
1178
1248
  }
1249
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1250
+ providerMetadata.openai.logprobs = choice.logprobs;
1251
+ }
1179
1252
  if ((choice == null ? void 0 : choice.text) != null) {
1180
1253
  controller.enqueue({
1181
1254
  type: "text",
@@ -1187,6 +1260,7 @@ var OpenAICompletionLanguageModel = class {
1187
1260
  controller.enqueue({
1188
1261
  type: "finish",
1189
1262
  finishReason,
1263
+ providerMetadata,
1190
1264
  usage
1191
1265
  });
1192
1266
  }
@@ -1197,6 +1271,11 @@ var OpenAICompletionLanguageModel = class {
1197
1271
  };
1198
1272
  }
1199
1273
  };
1274
+ var usageSchema = z5.object({
1275
+ prompt_tokens: z5.number(),
1276
+ completion_tokens: z5.number(),
1277
+ total_tokens: z5.number()
1278
+ });
1200
1279
  var openaiCompletionResponseSchema = z5.object({
1201
1280
  id: z5.string().nullish(),
1202
1281
  created: z5.number().nullish(),
@@ -1204,13 +1283,15 @@ var openaiCompletionResponseSchema = z5.object({
1204
1283
  choices: z5.array(
1205
1284
  z5.object({
1206
1285
  text: z5.string(),
1207
- finish_reason: z5.string()
1286
+ finish_reason: z5.string(),
1287
+ logprobs: z5.object({
1288
+ tokens: z5.array(z5.string()),
1289
+ token_logprobs: z5.array(z5.number()),
1290
+ top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1291
+ }).nullish()
1208
1292
  })
1209
1293
  ),
1210
- usage: z5.object({
1211
- prompt_tokens: z5.number(),
1212
- completion_tokens: z5.number()
1213
- })
1294
+ usage: usageSchema.nullish()
1214
1295
  });
1215
1296
  var openaiCompletionChunkSchema = z5.union([
1216
1297
  z5.object({
@@ -1221,13 +1302,15 @@ var openaiCompletionChunkSchema = z5.union([
1221
1302
  z5.object({
1222
1303
  text: z5.string(),
1223
1304
  finish_reason: z5.string().nullish(),
1224
- index: z5.number()
1305
+ index: z5.number(),
1306
+ logprobs: z5.object({
1307
+ tokens: z5.array(z5.string()),
1308
+ token_logprobs: z5.array(z5.number()),
1309
+ top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1310
+ }).nullish()
1225
1311
  })
1226
1312
  ),
1227
- usage: z5.object({
1228
- prompt_tokens: z5.number(),
1229
- completion_tokens: z5.number()
1230
- }).nullish()
1313
+ usage: usageSchema.nullish()
1231
1314
  }),
1232
1315
  openaiErrorDataSchema
1233
1316
  ]);
@@ -1409,12 +1492,23 @@ var OpenAIImageModel = class {
1409
1492
  timestamp: currentDate,
1410
1493
  modelId: this.modelId,
1411
1494
  headers: responseHeaders
1495
+ },
1496
+ providerMetadata: {
1497
+ openai: {
1498
+ images: response.data.map(
1499
+ (item) => item.revised_prompt ? {
1500
+ revisedPrompt: item.revised_prompt
1501
+ } : null
1502
+ )
1503
+ }
1412
1504
  }
1413
1505
  };
1414
1506
  }
1415
1507
  };
1416
1508
  var openaiImageResponseSchema = z8.object({
1417
- data: z8.array(z8.object({ b64_json: z8.string() }))
1509
+ data: z8.array(
1510
+ z8.object({ b64_json: z8.string(), revised_prompt: z8.string().optional() })
1511
+ )
1418
1512
  });
1419
1513
 
1420
1514
  // src/openai-transcription-model.ts
@@ -1433,25 +1527,25 @@ var openAITranscriptionProviderOptions = z9.object({
1433
1527
  /**
1434
1528
  * Additional information to include in the transcription response.
1435
1529
  */
1436
- include: z9.array(z9.string()).nullish(),
1530
+ include: z9.array(z9.string()).optional(),
1437
1531
  /**
1438
1532
  * The language of the input audio in ISO-639-1 format.
1439
1533
  */
1440
- language: z9.string().nullish(),
1534
+ language: z9.string().optional(),
1441
1535
  /**
1442
1536
  * An optional text to guide the model's style or continue a previous audio segment.
1443
1537
  */
1444
- prompt: z9.string().nullish(),
1538
+ prompt: z9.string().optional(),
1445
1539
  /**
1446
1540
  * The sampling temperature, between 0 and 1.
1447
1541
  * @default 0
1448
1542
  */
1449
- temperature: z9.number().min(0).max(1).default(0).nullish(),
1543
+ temperature: z9.number().min(0).max(1).default(0).optional(),
1450
1544
  /**
1451
1545
  * The timestamp granularities to populate for this transcription.
1452
1546
  * @default ['segment']
1453
1547
  */
1454
- timestampGranularities: z9.array(z9.enum(["word", "segment"])).default(["segment"]).nullish()
1548
+ timestampGranularities: z9.array(z9.enum(["word", "segment"])).default(["segment"]).optional()
1455
1549
  });
1456
1550
 
1457
1551
  // src/openai-transcription-model.ts
@@ -2137,7 +2231,7 @@ var OpenAIResponsesLanguageModel = class {
2137
2231
  ])
2138
2232
  ),
2139
2233
  incomplete_details: z12.object({ reason: z12.string() }).nullable(),
2140
- usage: usageSchema
2234
+ usage: usageSchema2
2141
2235
  })
2142
2236
  ),
2143
2237
  abortSignal: options.abortSignal,
@@ -2191,7 +2285,10 @@ var OpenAIResponsesLanguageModel = class {
2191
2285
  }),
2192
2286
  usage: {
2193
2287
  inputTokens: response.usage.input_tokens,
2194
- outputTokens: response.usage.output_tokens
2288
+ outputTokens: response.usage.output_tokens,
2289
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2290
+ reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
2291
+ cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
2195
2292
  },
2196
2293
  request: { body },
2197
2294
  response: {
@@ -2203,9 +2300,7 @@ var OpenAIResponsesLanguageModel = class {
2203
2300
  },
2204
2301
  providerMetadata: {
2205
2302
  openai: {
2206
- responseId: response.id,
2207
- cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
2208
- reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
2303
+ responseId: response.id
2209
2304
  }
2210
2305
  },
2211
2306
  warnings
@@ -2234,10 +2329,9 @@ var OpenAIResponsesLanguageModel = class {
2234
2329
  let finishReason = "unknown";
2235
2330
  const usage = {
2236
2331
  inputTokens: void 0,
2237
- outputTokens: void 0
2332
+ outputTokens: void 0,
2333
+ totalTokens: void 0
2238
2334
  };
2239
- let cachedPromptTokens = null;
2240
- let reasoningTokens = null;
2241
2335
  let responseId = null;
2242
2336
  const ongoingToolCalls = {};
2243
2337
  let hasToolCalls = false;
@@ -2315,8 +2409,9 @@ var OpenAIResponsesLanguageModel = class {
2315
2409
  });
2316
2410
  usage.inputTokens = value.response.usage.input_tokens;
2317
2411
  usage.outputTokens = value.response.usage.output_tokens;
2318
- cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2319
- reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2412
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2413
+ usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
2414
+ usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
2320
2415
  } else if (isResponseAnnotationAddedChunk(value)) {
2321
2416
  controller.enqueue({
2322
2417
  type: "source",
@@ -2332,13 +2427,9 @@ var OpenAIResponsesLanguageModel = class {
2332
2427
  type: "finish",
2333
2428
  finishReason,
2334
2429
  usage,
2335
- ...(cachedPromptTokens != null || reasoningTokens != null) && {
2336
- providerMetadata: {
2337
- openai: {
2338
- responseId,
2339
- cachedPromptTokens,
2340
- reasoningTokens
2341
- }
2430
+ providerMetadata: {
2431
+ openai: {
2432
+ responseId
2342
2433
  }
2343
2434
  }
2344
2435
  });
@@ -2350,7 +2441,7 @@ var OpenAIResponsesLanguageModel = class {
2350
2441
  };
2351
2442
  }
2352
2443
  };
2353
- var usageSchema = z12.object({
2444
+ var usageSchema2 = z12.object({
2354
2445
  input_tokens: z12.number(),
2355
2446
  input_tokens_details: z12.object({ cached_tokens: z12.number().nullish() }).nullish(),
2356
2447
  output_tokens: z12.number(),
@@ -2364,7 +2455,7 @@ var responseFinishedChunkSchema = z12.object({
2364
2455
  type: z12.enum(["response.completed", "response.incomplete"]),
2365
2456
  response: z12.object({
2366
2457
  incomplete_details: z12.object({ reason: z12.string() }).nullish(),
2367
- usage: usageSchema
2458
+ usage: usageSchema2
2368
2459
  })
2369
2460
  });
2370
2461
  var responseCreatedChunkSchema = z12.object({