@ai-sdk/openai 2.0.0-canary.14 → 2.0.0-canary.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -246,6 +246,16 @@ var openaiProviderOptions = import_zod.z.object({
246
246
  * the GPT tokenizer) to an associated bias value from -100 to 100.
247
247
  */
248
248
  logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
249
+ /**
250
+ * Return the log probabilities of the tokens.
251
+ *
252
+ * Setting to true will return the log probabilities of the tokens that
253
+ * were generated.
254
+ *
255
+ * Setting to a number will return the log probabilities of the top n
256
+ * tokens that were generated.
257
+ */
258
+ logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
249
259
  /**
250
260
  * Whether to enable parallel function calling during tool use. Default to true.
251
261
  */
@@ -420,6 +430,8 @@ var OpenAIChatLanguageModel = class {
420
430
  model: this.modelId,
421
431
  // model specific settings:
422
432
  logit_bias: openaiOptions.logitBias,
433
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
434
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
423
435
  user: openaiOptions.user,
424
436
  parallel_tool_calls: openaiOptions.parallelToolCalls,
425
437
  // standardized settings:
@@ -492,6 +504,20 @@ var OpenAIChatLanguageModel = class {
492
504
  message: "logitBias is not supported for reasoning models"
493
505
  });
494
506
  }
507
+ if (baseArgs.logprobs != null) {
508
+ baseArgs.logprobs = void 0;
509
+ warnings.push({
510
+ type: "other",
511
+ message: "logprobs is not supported for reasoning models"
512
+ });
513
+ }
514
+ if (baseArgs.top_logprobs != null) {
515
+ baseArgs.top_logprobs = void 0;
516
+ warnings.push({
517
+ type: "other",
518
+ message: "topLogprobs is not supported for reasoning models"
519
+ });
520
+ }
495
521
  if (baseArgs.max_tokens != null) {
496
522
  if (baseArgs.max_completion_tokens == null) {
497
523
  baseArgs.max_completion_tokens = baseArgs.max_tokens;
@@ -527,7 +553,7 @@ var OpenAIChatLanguageModel = class {
527
553
  };
528
554
  }
529
555
  async doGenerate(options) {
530
- var _a, _b, _c, _d, _e, _f, _g, _h;
556
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
531
557
  const { args: body, warnings } = await this.getArgs(options);
532
558
  const {
533
559
  responseHeaders,
@@ -565,24 +591,24 @@ var OpenAIChatLanguageModel = class {
565
591
  const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
566
592
  const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
567
593
  const providerMetadata = { openai: {} };
568
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
569
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
570
- }
571
594
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
572
595
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
573
596
  }
574
597
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
575
598
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
576
599
  }
577
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
578
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
600
+ if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
601
+ providerMetadata.openai.logprobs = choice.logprobs.content;
579
602
  }
580
603
  return {
581
604
  content,
582
605
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
583
606
  usage: {
584
- inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
585
- outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
607
+ inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
608
+ outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
609
+ totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
610
+ reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
611
+ cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
586
612
  },
587
613
  request: { body },
588
614
  response: {
@@ -616,12 +642,12 @@ var OpenAIChatLanguageModel = class {
616
642
  abortSignal: options.abortSignal,
617
643
  fetch: this.config.fetch
618
644
  });
619
- const { messages: rawPrompt, ...rawSettings } = args;
620
645
  const toolCalls = [];
621
646
  let finishReason = "unknown";
622
647
  const usage = {
623
648
  inputTokens: void 0,
624
- outputTokens: void 0
649
+ outputTokens: void 0,
650
+ totalTokens: void 0
625
651
  };
626
652
  let isFirstChunk = true;
627
653
  const providerMetadata = { openai: {} };
@@ -632,7 +658,7 @@ var OpenAIChatLanguageModel = class {
632
658
  controller.enqueue({ type: "stream-start", warnings });
633
659
  },
634
660
  transform(chunk, controller) {
635
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
661
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
636
662
  if (!chunk.success) {
637
663
  finishReason = "error";
638
664
  controller.enqueue({ type: "error", error: chunk.error });
@@ -652,31 +678,25 @@ var OpenAIChatLanguageModel = class {
652
678
  });
653
679
  }
654
680
  if (value.usage != null) {
655
- const {
656
- prompt_tokens,
657
- completion_tokens,
658
- prompt_tokens_details,
659
- completion_tokens_details
660
- } = value.usage;
661
- usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
662
- usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
663
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
664
- providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
665
- }
666
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
667
- providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
681
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
682
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
683
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
684
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
685
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
686
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
687
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
668
688
  }
669
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
670
- providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
671
- }
672
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
673
- providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
689
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
690
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
674
691
  }
675
692
  }
676
693
  const choice = value.choices[0];
677
694
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
678
695
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
679
696
  }
697
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
698
+ providerMetadata.openai.logprobs = choice.logprobs.content;
699
+ }
680
700
  if ((choice == null ? void 0 : choice.delta) == null) {
681
701
  return;
682
702
  }
@@ -703,7 +723,7 @@ var OpenAIChatLanguageModel = class {
703
723
  message: `Expected 'id' to be a string.`
704
724
  });
705
725
  }
706
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
726
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
707
727
  throw new import_provider3.InvalidResponseDataError({
708
728
  data: toolCallDelta,
709
729
  message: `Expected 'function.name' to be a string.`
@@ -714,12 +734,12 @@ var OpenAIChatLanguageModel = class {
714
734
  type: "function",
715
735
  function: {
716
736
  name: toolCallDelta.function.name,
717
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
737
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
718
738
  },
719
739
  hasFinished: false
720
740
  };
721
741
  const toolCall2 = toolCalls[index];
722
- if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
742
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
723
743
  if (toolCall2.function.arguments.length > 0) {
724
744
  controller.enqueue({
725
745
  type: "tool-call-delta",
@@ -733,7 +753,7 @@ var OpenAIChatLanguageModel = class {
733
753
  controller.enqueue({
734
754
  type: "tool-call",
735
755
  toolCallType: "function",
736
- toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
756
+ toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils3.generateId)(),
737
757
  toolName: toolCall2.function.name,
738
758
  args: toolCall2.function.arguments
739
759
  });
@@ -746,21 +766,21 @@ var OpenAIChatLanguageModel = class {
746
766
  if (toolCall.hasFinished) {
747
767
  continue;
748
768
  }
749
- if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
750
- toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
769
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
770
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
751
771
  }
752
772
  controller.enqueue({
753
773
  type: "tool-call-delta",
754
774
  toolCallType: "function",
755
775
  toolCallId: toolCall.id,
756
776
  toolName: toolCall.function.name,
757
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
777
+ argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
758
778
  });
759
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
779
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
760
780
  controller.enqueue({
761
781
  type: "tool-call",
762
782
  toolCallType: "function",
763
- toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
783
+ toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils3.generateId)(),
764
784
  toolName: toolCall.function.name,
765
785
  args: toolCall.function.arguments
766
786
  });
@@ -787,6 +807,7 @@ var OpenAIChatLanguageModel = class {
787
807
  var openaiTokenUsageSchema = import_zod3.z.object({
788
808
  prompt_tokens: import_zod3.z.number().nullish(),
789
809
  completion_tokens: import_zod3.z.number().nullish(),
810
+ total_tokens: import_zod3.z.number().nullish(),
790
811
  prompt_tokens_details: import_zod3.z.object({
791
812
  cached_tokens: import_zod3.z.number().nullish()
792
813
  }).nullish(),
@@ -817,6 +838,20 @@ var openaiChatResponseSchema = import_zod3.z.object({
817
838
  ).nullish()
818
839
  }),
819
840
  index: import_zod3.z.number(),
841
+ logprobs: import_zod3.z.object({
842
+ content: import_zod3.z.array(
843
+ import_zod3.z.object({
844
+ token: import_zod3.z.string(),
845
+ logprob: import_zod3.z.number(),
846
+ top_logprobs: import_zod3.z.array(
847
+ import_zod3.z.object({
848
+ token: import_zod3.z.string(),
849
+ logprob: import_zod3.z.number()
850
+ })
851
+ )
852
+ })
853
+ ).nullish()
854
+ }).nullish(),
820
855
  finish_reason: import_zod3.z.string().nullish()
821
856
  })
822
857
  ),
@@ -836,7 +871,7 @@ var openaiChatChunkSchema = import_zod3.z.union([
836
871
  import_zod3.z.object({
837
872
  index: import_zod3.z.number(),
838
873
  id: import_zod3.z.string().nullish(),
839
- type: import_zod3.z.literal("function").optional(),
874
+ type: import_zod3.z.literal("function").nullish(),
840
875
  function: import_zod3.z.object({
841
876
  name: import_zod3.z.string().nullish(),
842
877
  arguments: import_zod3.z.string().nullish()
@@ -844,7 +879,21 @@ var openaiChatChunkSchema = import_zod3.z.union([
844
879
  })
845
880
  ).nullish()
846
881
  }).nullish(),
847
- finish_reason: import_zod3.z.string().nullable().optional(),
882
+ logprobs: import_zod3.z.object({
883
+ content: import_zod3.z.array(
884
+ import_zod3.z.object({
885
+ token: import_zod3.z.string(),
886
+ logprob: import_zod3.z.number(),
887
+ top_logprobs: import_zod3.z.array(
888
+ import_zod3.z.object({
889
+ token: import_zod3.z.string(),
890
+ logprob: import_zod3.z.number()
891
+ })
892
+ )
893
+ })
894
+ ).nullish()
895
+ }).nullish(),
896
+ finish_reason: import_zod3.z.string().nullish(),
848
897
  index: import_zod3.z.number()
849
898
  })
850
899
  ),
@@ -1004,7 +1053,17 @@ var openaiCompletionProviderOptions = import_zod4.z.object({
1004
1053
  A unique identifier representing your end-user, which can help OpenAI to
1005
1054
  monitor and detect abuse. Learn more.
1006
1055
  */
1007
- user: import_zod4.z.string().optional()
1056
+ user: import_zod4.z.string().optional(),
1057
+ /**
1058
+ Return the log probabilities of the tokens. Including logprobs will increase
1059
+ the response size and can slow down response times. However, it can
1060
+ be useful to better understand how the model is behaving.
1061
+ Setting to true will return the log probabilities of the tokens that
1062
+ were generated.
1063
+ Setting to a number will return the log probabilities of the top n
1064
+ tokens that were generated.
1065
+ */
1066
+ logprobs: import_zod4.z.union([import_zod4.z.boolean(), import_zod4.z.number()]).optional()
1008
1067
  });
1009
1068
 
1010
1069
  // src/openai-completion-language-model.ts
@@ -1076,6 +1135,7 @@ var OpenAICompletionLanguageModel = class {
1076
1135
  // model specific settings:
1077
1136
  echo: openaiOptions.echo,
1078
1137
  logit_bias: openaiOptions.logitBias,
1138
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1079
1139
  suffix: openaiOptions.suffix,
1080
1140
  user: openaiOptions.user,
1081
1141
  // standardized settings:
@@ -1094,6 +1154,7 @@ var OpenAICompletionLanguageModel = class {
1094
1154
  };
1095
1155
  }
1096
1156
  async doGenerate(options) {
1157
+ var _a, _b, _c;
1097
1158
  const { args, warnings } = await this.getArgs(options);
1098
1159
  const {
1099
1160
  responseHeaders,
@@ -1114,11 +1175,16 @@ var OpenAICompletionLanguageModel = class {
1114
1175
  fetch: this.config.fetch
1115
1176
  });
1116
1177
  const choice = response.choices[0];
1178
+ const providerMetadata = { openai: {} };
1179
+ if (choice.logprobs != null) {
1180
+ providerMetadata.openai.logprobs = choice.logprobs;
1181
+ }
1117
1182
  return {
1118
1183
  content: [{ type: "text", text: choice.text }],
1119
1184
  usage: {
1120
- inputTokens: response.usage.prompt_tokens,
1121
- outputTokens: response.usage.completion_tokens
1185
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1186
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1187
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1122
1188
  },
1123
1189
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1124
1190
  request: { body: args },
@@ -1127,6 +1193,7 @@ var OpenAICompletionLanguageModel = class {
1127
1193
  headers: responseHeaders,
1128
1194
  body: rawResponse
1129
1195
  },
1196
+ providerMetadata,
1130
1197
  warnings
1131
1198
  };
1132
1199
  }
@@ -1153,9 +1220,11 @@ var OpenAICompletionLanguageModel = class {
1153
1220
  fetch: this.config.fetch
1154
1221
  });
1155
1222
  let finishReason = "unknown";
1223
+ const providerMetadata = { openai: {} };
1156
1224
  const usage = {
1157
1225
  inputTokens: void 0,
1158
- outputTokens: void 0
1226
+ outputTokens: void 0,
1227
+ totalTokens: void 0
1159
1228
  };
1160
1229
  let isFirstChunk = true;
1161
1230
  return {
@@ -1186,11 +1255,15 @@ var OpenAICompletionLanguageModel = class {
1186
1255
  if (value.usage != null) {
1187
1256
  usage.inputTokens = value.usage.prompt_tokens;
1188
1257
  usage.outputTokens = value.usage.completion_tokens;
1258
+ usage.totalTokens = value.usage.total_tokens;
1189
1259
  }
1190
1260
  const choice = value.choices[0];
1191
1261
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
1192
1262
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
1193
1263
  }
1264
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1265
+ providerMetadata.openai.logprobs = choice.logprobs;
1266
+ }
1194
1267
  if ((choice == null ? void 0 : choice.text) != null) {
1195
1268
  controller.enqueue({
1196
1269
  type: "text",
@@ -1202,6 +1275,7 @@ var OpenAICompletionLanguageModel = class {
1202
1275
  controller.enqueue({
1203
1276
  type: "finish",
1204
1277
  finishReason,
1278
+ providerMetadata,
1205
1279
  usage
1206
1280
  });
1207
1281
  }
@@ -1212,6 +1286,11 @@ var OpenAICompletionLanguageModel = class {
1212
1286
  };
1213
1287
  }
1214
1288
  };
1289
+ var usageSchema = import_zod5.z.object({
1290
+ prompt_tokens: import_zod5.z.number(),
1291
+ completion_tokens: import_zod5.z.number(),
1292
+ total_tokens: import_zod5.z.number()
1293
+ });
1215
1294
  var openaiCompletionResponseSchema = import_zod5.z.object({
1216
1295
  id: import_zod5.z.string().nullish(),
1217
1296
  created: import_zod5.z.number().nullish(),
@@ -1219,13 +1298,15 @@ var openaiCompletionResponseSchema = import_zod5.z.object({
1219
1298
  choices: import_zod5.z.array(
1220
1299
  import_zod5.z.object({
1221
1300
  text: import_zod5.z.string(),
1222
- finish_reason: import_zod5.z.string()
1301
+ finish_reason: import_zod5.z.string(),
1302
+ logprobs: import_zod5.z.object({
1303
+ tokens: import_zod5.z.array(import_zod5.z.string()),
1304
+ token_logprobs: import_zod5.z.array(import_zod5.z.number()),
1305
+ top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
1306
+ }).nullish()
1223
1307
  })
1224
1308
  ),
1225
- usage: import_zod5.z.object({
1226
- prompt_tokens: import_zod5.z.number(),
1227
- completion_tokens: import_zod5.z.number()
1228
- })
1309
+ usage: usageSchema.nullish()
1229
1310
  });
1230
1311
  var openaiCompletionChunkSchema = import_zod5.z.union([
1231
1312
  import_zod5.z.object({
@@ -1236,13 +1317,15 @@ var openaiCompletionChunkSchema = import_zod5.z.union([
1236
1317
  import_zod5.z.object({
1237
1318
  text: import_zod5.z.string(),
1238
1319
  finish_reason: import_zod5.z.string().nullish(),
1239
- index: import_zod5.z.number()
1320
+ index: import_zod5.z.number(),
1321
+ logprobs: import_zod5.z.object({
1322
+ tokens: import_zod5.z.array(import_zod5.z.string()),
1323
+ token_logprobs: import_zod5.z.array(import_zod5.z.number()),
1324
+ top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
1325
+ }).nullish()
1240
1326
  })
1241
1327
  ),
1242
- usage: import_zod5.z.object({
1243
- prompt_tokens: import_zod5.z.number(),
1244
- completion_tokens: import_zod5.z.number()
1245
- }).nullish()
1328
+ usage: usageSchema.nullish()
1246
1329
  }),
1247
1330
  openaiErrorDataSchema
1248
1331
  ]);
@@ -1413,12 +1496,23 @@ var OpenAIImageModel = class {
1413
1496
  timestamp: currentDate,
1414
1497
  modelId: this.modelId,
1415
1498
  headers: responseHeaders
1499
+ },
1500
+ providerMetadata: {
1501
+ openai: {
1502
+ images: response.data.map(
1503
+ (item) => item.revised_prompt ? {
1504
+ revisedPrompt: item.revised_prompt
1505
+ } : null
1506
+ )
1507
+ }
1416
1508
  }
1417
1509
  };
1418
1510
  }
1419
1511
  };
1420
1512
  var openaiImageResponseSchema = import_zod8.z.object({
1421
- data: import_zod8.z.array(import_zod8.z.object({ b64_json: import_zod8.z.string() }))
1513
+ data: import_zod8.z.array(
1514
+ import_zod8.z.object({ b64_json: import_zod8.z.string(), revised_prompt: import_zod8.z.string().optional() })
1515
+ )
1422
1516
  });
1423
1517
 
1424
1518
  // src/openai-transcription-model.ts
@@ -1431,25 +1525,25 @@ var openAITranscriptionProviderOptions = import_zod9.z.object({
1431
1525
  /**
1432
1526
  * Additional information to include in the transcription response.
1433
1527
  */
1434
- include: import_zod9.z.array(import_zod9.z.string()).nullish(),
1528
+ include: import_zod9.z.array(import_zod9.z.string()).optional(),
1435
1529
  /**
1436
1530
  * The language of the input audio in ISO-639-1 format.
1437
1531
  */
1438
- language: import_zod9.z.string().nullish(),
1532
+ language: import_zod9.z.string().optional(),
1439
1533
  /**
1440
1534
  * An optional text to guide the model's style or continue a previous audio segment.
1441
1535
  */
1442
- prompt: import_zod9.z.string().nullish(),
1536
+ prompt: import_zod9.z.string().optional(),
1443
1537
  /**
1444
1538
  * The sampling temperature, between 0 and 1.
1445
1539
  * @default 0
1446
1540
  */
1447
- temperature: import_zod9.z.number().min(0).max(1).default(0).nullish(),
1541
+ temperature: import_zod9.z.number().min(0).max(1).default(0).optional(),
1448
1542
  /**
1449
1543
  * The timestamp granularities to populate for this transcription.
1450
1544
  * @default ['segment']
1451
1545
  */
1452
- timestampGranularities: import_zod9.z.array(import_zod9.z.enum(["word", "segment"])).default(["segment"]).nullish()
1546
+ timestampGranularities: import_zod9.z.array(import_zod9.z.enum(["word", "segment"])).default(["segment"]).optional()
1453
1547
  });
1454
1548
 
1455
1549
  // src/openai-transcription-model.ts
@@ -2119,7 +2213,7 @@ var OpenAIResponsesLanguageModel = class {
2119
2213
  ])
2120
2214
  ),
2121
2215
  incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullable(),
2122
- usage: usageSchema
2216
+ usage: usageSchema2
2123
2217
  })
2124
2218
  ),
2125
2219
  abortSignal: options.abortSignal,
@@ -2173,7 +2267,10 @@ var OpenAIResponsesLanguageModel = class {
2173
2267
  }),
2174
2268
  usage: {
2175
2269
  inputTokens: response.usage.input_tokens,
2176
- outputTokens: response.usage.output_tokens
2270
+ outputTokens: response.usage.output_tokens,
2271
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2272
+ reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
2273
+ cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
2177
2274
  },
2178
2275
  request: { body },
2179
2276
  response: {
@@ -2185,9 +2282,7 @@ var OpenAIResponsesLanguageModel = class {
2185
2282
  },
2186
2283
  providerMetadata: {
2187
2284
  openai: {
2188
- responseId: response.id,
2189
- cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
2190
- reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
2285
+ responseId: response.id
2191
2286
  }
2192
2287
  },
2193
2288
  warnings
@@ -2216,10 +2311,9 @@ var OpenAIResponsesLanguageModel = class {
2216
2311
  let finishReason = "unknown";
2217
2312
  const usage = {
2218
2313
  inputTokens: void 0,
2219
- outputTokens: void 0
2314
+ outputTokens: void 0,
2315
+ totalTokens: void 0
2220
2316
  };
2221
- let cachedPromptTokens = null;
2222
- let reasoningTokens = null;
2223
2317
  let responseId = null;
2224
2318
  const ongoingToolCalls = {};
2225
2319
  let hasToolCalls = false;
@@ -2297,8 +2391,9 @@ var OpenAIResponsesLanguageModel = class {
2297
2391
  });
2298
2392
  usage.inputTokens = value.response.usage.input_tokens;
2299
2393
  usage.outputTokens = value.response.usage.output_tokens;
2300
- cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2301
- reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2394
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2395
+ usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
2396
+ usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
2302
2397
  } else if (isResponseAnnotationAddedChunk(value)) {
2303
2398
  controller.enqueue({
2304
2399
  type: "source",
@@ -2314,13 +2409,9 @@ var OpenAIResponsesLanguageModel = class {
2314
2409
  type: "finish",
2315
2410
  finishReason,
2316
2411
  usage,
2317
- ...(cachedPromptTokens != null || reasoningTokens != null) && {
2318
- providerMetadata: {
2319
- openai: {
2320
- responseId,
2321
- cachedPromptTokens,
2322
- reasoningTokens
2323
- }
2412
+ providerMetadata: {
2413
+ openai: {
2414
+ responseId
2324
2415
  }
2325
2416
  }
2326
2417
  });
@@ -2332,7 +2423,7 @@ var OpenAIResponsesLanguageModel = class {
2332
2423
  };
2333
2424
  }
2334
2425
  };
2335
- var usageSchema = import_zod12.z.object({
2426
+ var usageSchema2 = import_zod12.z.object({
2336
2427
  input_tokens: import_zod12.z.number(),
2337
2428
  input_tokens_details: import_zod12.z.object({ cached_tokens: import_zod12.z.number().nullish() }).nullish(),
2338
2429
  output_tokens: import_zod12.z.number(),
@@ -2346,7 +2437,7 @@ var responseFinishedChunkSchema = import_zod12.z.object({
2346
2437
  type: import_zod12.z.enum(["response.completed", "response.incomplete"]),
2347
2438
  response: import_zod12.z.object({
2348
2439
  incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullish(),
2349
- usage: usageSchema
2440
+ usage: usageSchema2
2350
2441
  })
2351
2442
  });
2352
2443
  var responseCreatedChunkSchema = import_zod12.z.object({