@ai-sdk/openai 2.0.0-canary.15 → 2.0.0-canary.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -529,7 +529,7 @@ var OpenAIChatLanguageModel = class {
529
529
  };
530
530
  }
531
531
  async doGenerate(options) {
532
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
532
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
533
533
  const { args: body, warnings } = await this.getArgs(options);
534
534
  const {
535
535
  responseHeaders,
@@ -567,18 +567,12 @@ var OpenAIChatLanguageModel = class {
567
567
  const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
568
568
  const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
569
569
  const providerMetadata = { openai: {} };
570
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
571
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
572
- }
573
570
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
574
571
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
575
572
  }
576
573
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
577
574
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
578
575
  }
579
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
580
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
581
- }
582
576
  if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
583
577
  providerMetadata.openai.logprobs = choice.logprobs.content;
584
578
  }
@@ -587,7 +581,10 @@ var OpenAIChatLanguageModel = class {
587
581
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
588
582
  usage: {
589
583
  inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
590
- outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
584
+ outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
585
+ totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
586
+ reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
587
+ cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
591
588
  },
592
589
  request: { body },
593
590
  response: {
@@ -621,12 +618,12 @@ var OpenAIChatLanguageModel = class {
621
618
  abortSignal: options.abortSignal,
622
619
  fetch: this.config.fetch
623
620
  });
624
- const { messages: rawPrompt, ...rawSettings } = args;
625
621
  const toolCalls = [];
626
622
  let finishReason = "unknown";
627
623
  const usage = {
628
624
  inputTokens: void 0,
629
- outputTokens: void 0
625
+ outputTokens: void 0,
626
+ totalTokens: void 0
630
627
  };
631
628
  let isFirstChunk = true;
632
629
  const providerMetadata = { openai: {} };
@@ -637,7 +634,7 @@ var OpenAIChatLanguageModel = class {
637
634
  controller.enqueue({ type: "stream-start", warnings });
638
635
  },
639
636
  transform(chunk, controller) {
640
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
637
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
641
638
  if (!chunk.success) {
642
639
  finishReason = "error";
643
640
  controller.enqueue({ type: "error", error: chunk.error });
@@ -657,31 +654,25 @@ var OpenAIChatLanguageModel = class {
657
654
  });
658
655
  }
659
656
  if (value.usage != null) {
660
- const {
661
- prompt_tokens,
662
- completion_tokens,
663
- prompt_tokens_details,
664
- completion_tokens_details
665
- } = value.usage;
666
- usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
667
- usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
668
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
669
- providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
670
- }
671
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
672
- providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
673
- }
674
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
675
- providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
657
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
658
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
659
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
660
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
661
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
662
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
663
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
676
664
  }
677
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
678
- providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
665
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
666
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
679
667
  }
680
668
  }
681
669
  const choice = value.choices[0];
682
670
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
683
671
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
684
672
  }
673
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
674
+ providerMetadata.openai.logprobs = choice.logprobs.content;
675
+ }
685
676
  if ((choice == null ? void 0 : choice.delta) == null) {
686
677
  return;
687
678
  }
@@ -708,7 +699,7 @@ var OpenAIChatLanguageModel = class {
708
699
  message: `Expected 'id' to be a string.`
709
700
  });
710
701
  }
711
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
702
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
712
703
  throw new InvalidResponseDataError({
713
704
  data: toolCallDelta,
714
705
  message: `Expected 'function.name' to be a string.`
@@ -719,12 +710,12 @@ var OpenAIChatLanguageModel = class {
719
710
  type: "function",
720
711
  function: {
721
712
  name: toolCallDelta.function.name,
722
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
713
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
723
714
  },
724
715
  hasFinished: false
725
716
  };
726
717
  const toolCall2 = toolCalls[index];
727
- if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
718
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
728
719
  if (toolCall2.function.arguments.length > 0) {
729
720
  controller.enqueue({
730
721
  type: "tool-call-delta",
@@ -738,7 +729,7 @@ var OpenAIChatLanguageModel = class {
738
729
  controller.enqueue({
739
730
  type: "tool-call",
740
731
  toolCallType: "function",
741
- toolCallId: (_e = toolCall2.id) != null ? _e : generateId(),
732
+ toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
742
733
  toolName: toolCall2.function.name,
743
734
  args: toolCall2.function.arguments
744
735
  });
@@ -751,21 +742,21 @@ var OpenAIChatLanguageModel = class {
751
742
  if (toolCall.hasFinished) {
752
743
  continue;
753
744
  }
754
- if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
755
- toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
745
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
746
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
756
747
  }
757
748
  controller.enqueue({
758
749
  type: "tool-call-delta",
759
750
  toolCallType: "function",
760
751
  toolCallId: toolCall.id,
761
752
  toolName: toolCall.function.name,
762
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
753
+ argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
763
754
  });
764
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && isParsableJson(toolCall.function.arguments)) {
755
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
765
756
  controller.enqueue({
766
757
  type: "tool-call",
767
758
  toolCallType: "function",
768
- toolCallId: (_l = toolCall.id) != null ? _l : generateId(),
759
+ toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
769
760
  toolName: toolCall.function.name,
770
761
  args: toolCall.function.arguments
771
762
  });
@@ -792,6 +783,7 @@ var OpenAIChatLanguageModel = class {
792
783
  var openaiTokenUsageSchema = z3.object({
793
784
  prompt_tokens: z3.number().nullish(),
794
785
  completion_tokens: z3.number().nullish(),
786
+ total_tokens: z3.number().nullish(),
795
787
  prompt_tokens_details: z3.object({
796
788
  cached_tokens: z3.number().nullish()
797
789
  }).nullish(),
@@ -863,6 +855,20 @@ var openaiChatChunkSchema = z3.union([
863
855
  })
864
856
  ).nullish()
865
857
  }).nullish(),
858
+ logprobs: z3.object({
859
+ content: z3.array(
860
+ z3.object({
861
+ token: z3.string(),
862
+ logprob: z3.number(),
863
+ top_logprobs: z3.array(
864
+ z3.object({
865
+ token: z3.string(),
866
+ logprob: z3.number()
867
+ })
868
+ )
869
+ })
870
+ ).nullish()
871
+ }).nullish(),
866
872
  finish_reason: z3.string().nullish(),
867
873
  index: z3.number()
868
874
  })
@@ -1133,6 +1139,7 @@ var OpenAICompletionLanguageModel = class {
1133
1139
  };
1134
1140
  }
1135
1141
  async doGenerate(options) {
1142
+ var _a, _b, _c;
1136
1143
  const { args, warnings } = await this.getArgs(options);
1137
1144
  const {
1138
1145
  responseHeaders,
@@ -1160,8 +1167,9 @@ var OpenAICompletionLanguageModel = class {
1160
1167
  return {
1161
1168
  content: [{ type: "text", text: choice.text }],
1162
1169
  usage: {
1163
- inputTokens: response.usage.prompt_tokens,
1164
- outputTokens: response.usage.completion_tokens
1170
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1171
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1172
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1165
1173
  },
1166
1174
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1167
1175
  request: { body: args },
@@ -1200,7 +1208,8 @@ var OpenAICompletionLanguageModel = class {
1200
1208
  const providerMetadata = { openai: {} };
1201
1209
  const usage = {
1202
1210
  inputTokens: void 0,
1203
- outputTokens: void 0
1211
+ outputTokens: void 0,
1212
+ totalTokens: void 0
1204
1213
  };
1205
1214
  let isFirstChunk = true;
1206
1215
  return {
@@ -1231,6 +1240,7 @@ var OpenAICompletionLanguageModel = class {
1231
1240
  if (value.usage != null) {
1232
1241
  usage.inputTokens = value.usage.prompt_tokens;
1233
1242
  usage.outputTokens = value.usage.completion_tokens;
1243
+ usage.totalTokens = value.usage.total_tokens;
1234
1244
  }
1235
1245
  const choice = value.choices[0];
1236
1246
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1261,6 +1271,11 @@ var OpenAICompletionLanguageModel = class {
1261
1271
  };
1262
1272
  }
1263
1273
  };
1274
+ var usageSchema = z5.object({
1275
+ prompt_tokens: z5.number(),
1276
+ completion_tokens: z5.number(),
1277
+ total_tokens: z5.number()
1278
+ });
1264
1279
  var openaiCompletionResponseSchema = z5.object({
1265
1280
  id: z5.string().nullish(),
1266
1281
  created: z5.number().nullish(),
@@ -1276,10 +1291,7 @@ var openaiCompletionResponseSchema = z5.object({
1276
1291
  }).nullish()
1277
1292
  })
1278
1293
  ),
1279
- usage: z5.object({
1280
- prompt_tokens: z5.number(),
1281
- completion_tokens: z5.number()
1282
- })
1294
+ usage: usageSchema.nullish()
1283
1295
  });
1284
1296
  var openaiCompletionChunkSchema = z5.union([
1285
1297
  z5.object({
@@ -1298,10 +1310,7 @@ var openaiCompletionChunkSchema = z5.union([
1298
1310
  }).nullish()
1299
1311
  })
1300
1312
  ),
1301
- usage: z5.object({
1302
- prompt_tokens: z5.number(),
1303
- completion_tokens: z5.number()
1304
- }).nullish()
1313
+ usage: usageSchema.nullish()
1305
1314
  }),
1306
1315
  openaiErrorDataSchema
1307
1316
  ]);
@@ -1518,25 +1527,25 @@ var openAITranscriptionProviderOptions = z9.object({
1518
1527
  /**
1519
1528
  * Additional information to include in the transcription response.
1520
1529
  */
1521
- include: z9.array(z9.string()).nullish(),
1530
+ include: z9.array(z9.string()).optional(),
1522
1531
  /**
1523
1532
  * The language of the input audio in ISO-639-1 format.
1524
1533
  */
1525
- language: z9.string().nullish(),
1534
+ language: z9.string().optional(),
1526
1535
  /**
1527
1536
  * An optional text to guide the model's style or continue a previous audio segment.
1528
1537
  */
1529
- prompt: z9.string().nullish(),
1538
+ prompt: z9.string().optional(),
1530
1539
  /**
1531
1540
  * The sampling temperature, between 0 and 1.
1532
1541
  * @default 0
1533
1542
  */
1534
- temperature: z9.number().min(0).max(1).default(0).nullish(),
1543
+ temperature: z9.number().min(0).max(1).default(0).optional(),
1535
1544
  /**
1536
1545
  * The timestamp granularities to populate for this transcription.
1537
1546
  * @default ['segment']
1538
1547
  */
1539
- timestampGranularities: z9.array(z9.enum(["word", "segment"])).default(["segment"]).nullish()
1548
+ timestampGranularities: z9.array(z9.enum(["word", "segment"])).default(["segment"]).optional()
1540
1549
  });
1541
1550
 
1542
1551
  // src/openai-transcription-model.ts
@@ -2222,7 +2231,7 @@ var OpenAIResponsesLanguageModel = class {
2222
2231
  ])
2223
2232
  ),
2224
2233
  incomplete_details: z12.object({ reason: z12.string() }).nullable(),
2225
- usage: usageSchema
2234
+ usage: usageSchema2
2226
2235
  })
2227
2236
  ),
2228
2237
  abortSignal: options.abortSignal,
@@ -2276,7 +2285,10 @@ var OpenAIResponsesLanguageModel = class {
2276
2285
  }),
2277
2286
  usage: {
2278
2287
  inputTokens: response.usage.input_tokens,
2279
- outputTokens: response.usage.output_tokens
2288
+ outputTokens: response.usage.output_tokens,
2289
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2290
+ reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
2291
+ cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
2280
2292
  },
2281
2293
  request: { body },
2282
2294
  response: {
@@ -2288,9 +2300,7 @@ var OpenAIResponsesLanguageModel = class {
2288
2300
  },
2289
2301
  providerMetadata: {
2290
2302
  openai: {
2291
- responseId: response.id,
2292
- cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
2293
- reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
2303
+ responseId: response.id
2294
2304
  }
2295
2305
  },
2296
2306
  warnings
@@ -2319,10 +2329,9 @@ var OpenAIResponsesLanguageModel = class {
2319
2329
  let finishReason = "unknown";
2320
2330
  const usage = {
2321
2331
  inputTokens: void 0,
2322
- outputTokens: void 0
2332
+ outputTokens: void 0,
2333
+ totalTokens: void 0
2323
2334
  };
2324
- let cachedPromptTokens = null;
2325
- let reasoningTokens = null;
2326
2335
  let responseId = null;
2327
2336
  const ongoingToolCalls = {};
2328
2337
  let hasToolCalls = false;
@@ -2400,8 +2409,9 @@ var OpenAIResponsesLanguageModel = class {
2400
2409
  });
2401
2410
  usage.inputTokens = value.response.usage.input_tokens;
2402
2411
  usage.outputTokens = value.response.usage.output_tokens;
2403
- cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2404
- reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2412
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2413
+ usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
2414
+ usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
2405
2415
  } else if (isResponseAnnotationAddedChunk(value)) {
2406
2416
  controller.enqueue({
2407
2417
  type: "source",
@@ -2417,13 +2427,9 @@ var OpenAIResponsesLanguageModel = class {
2417
2427
  type: "finish",
2418
2428
  finishReason,
2419
2429
  usage,
2420
- ...(cachedPromptTokens != null || reasoningTokens != null) && {
2421
- providerMetadata: {
2422
- openai: {
2423
- responseId,
2424
- cachedPromptTokens,
2425
- reasoningTokens
2426
- }
2430
+ providerMetadata: {
2431
+ openai: {
2432
+ responseId
2427
2433
  }
2428
2434
  }
2429
2435
  });
@@ -2435,7 +2441,7 @@ var OpenAIResponsesLanguageModel = class {
2435
2441
  };
2436
2442
  }
2437
2443
  };
2438
- var usageSchema = z12.object({
2444
+ var usageSchema2 = z12.object({
2439
2445
  input_tokens: z12.number(),
2440
2446
  input_tokens_details: z12.object({ cached_tokens: z12.number().nullish() }).nullish(),
2441
2447
  output_tokens: z12.number(),
@@ -2449,7 +2455,7 @@ var responseFinishedChunkSchema = z12.object({
2449
2455
  type: z12.enum(["response.completed", "response.incomplete"]),
2450
2456
  response: z12.object({
2451
2457
  incomplete_details: z12.object({ reason: z12.string() }).nullish(),
2452
- usage: usageSchema
2458
+ usage: usageSchema2
2453
2459
  })
2454
2460
  });
2455
2461
  var responseCreatedChunkSchema = z12.object({