@ai-sdk/openai 1.0.12 → 1.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -514,7 +514,7 @@ var OpenAIChatLanguageModel = class {
514
514
  }
515
515
  }
516
516
  async doGenerate(options) {
517
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
517
+ var _a, _b, _c, _d, _e, _f, _g, _h;
518
518
  const { args: body, warnings } = this.getArgs(options);
519
519
  const { responseHeaders, value: response } = await postJsonToApi({
520
520
  url: this.config.url({
@@ -532,18 +532,23 @@ var OpenAIChatLanguageModel = class {
532
532
  });
533
533
  const { messages: rawPrompt, ...rawSettings } = body;
534
534
  const choice = response.choices[0];
535
- let providerMetadata;
536
- if (((_b = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null || ((_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens_details) == null ? void 0 : _d.cached_tokens) != null) {
537
- providerMetadata = { openai: {} };
538
- if (((_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null) {
539
- providerMetadata.openai.reasoningTokens = (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens_details) == null ? void 0 : _h.reasoning_tokens;
540
- }
541
- if (((_j = (_i = response.usage) == null ? void 0 : _i.prompt_tokens_details) == null ? void 0 : _j.cached_tokens) != null) {
542
- providerMetadata.openai.cachedPromptTokens = (_l = (_k = response.usage) == null ? void 0 : _k.prompt_tokens_details) == null ? void 0 : _l.cached_tokens;
543
- }
535
+ const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
536
+ const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
537
+ const providerMetadata = { openai: {} };
538
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
539
+ providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
540
+ }
541
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
542
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
543
+ }
544
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
545
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
546
+ }
547
+ if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
548
+ providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
544
549
  }
545
550
  return {
546
- text: (_m = choice.message.content) != null ? _m : void 0,
551
+ text: (_c = choice.message.content) != null ? _c : void 0,
547
552
  toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
548
553
  {
549
554
  toolCallType: "function",
@@ -551,7 +556,7 @@ var OpenAIChatLanguageModel = class {
551
556
  toolName: choice.message.function_call.name,
552
557
  args: choice.message.function_call.arguments
553
558
  }
554
- ] : (_n = choice.message.tool_calls) == null ? void 0 : _n.map((toolCall) => {
559
+ ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
555
560
  var _a2;
556
561
  return {
557
562
  toolCallType: "function",
@@ -562,8 +567,8 @@ var OpenAIChatLanguageModel = class {
562
567
  }),
563
568
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
564
569
  usage: {
565
- promptTokens: (_p = (_o = response.usage) == null ? void 0 : _o.prompt_tokens) != null ? _p : NaN,
566
- completionTokens: (_r = (_q = response.usage) == null ? void 0 : _q.completion_tokens) != null ? _r : NaN
570
+ promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
571
+ completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
567
572
  },
568
573
  rawCall: { rawPrompt, rawSettings },
569
574
  rawResponse: { headers: responseHeaders },
@@ -642,12 +647,12 @@ var OpenAIChatLanguageModel = class {
642
647
  let logprobs;
643
648
  let isFirstChunk = true;
644
649
  const { useLegacyFunctionCalling } = this.settings;
645
- let providerMetadata;
650
+ const providerMetadata = { openai: {} };
646
651
  return {
647
652
  stream: response.pipeThrough(
648
653
  new TransformStream({
649
654
  transform(chunk, controller) {
650
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
655
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
651
656
  if (!chunk.success) {
652
657
  finishReason = "error";
653
658
  controller.enqueue({ type: "error", error: chunk.error });
@@ -667,22 +672,27 @@ var OpenAIChatLanguageModel = class {
667
672
  });
668
673
  }
669
674
  if (value.usage != null) {
670
- usage = {
671
- promptTokens: (_a = value.usage.prompt_tokens) != null ? _a : void 0,
672
- completionTokens: (_b = value.usage.completion_tokens) != null ? _b : void 0
673
- };
674
675
  const {
675
- completion_tokens_details: completionTokenDetails,
676
- prompt_tokens_details: promptTokenDetails
676
+ prompt_tokens,
677
+ completion_tokens,
678
+ prompt_tokens_details,
679
+ completion_tokens_details
677
680
  } = value.usage;
678
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null || (promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
679
- providerMetadata = { openai: {} };
680
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
681
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
682
- }
683
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
684
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
685
- }
681
+ usage = {
682
+ promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
683
+ completionTokens: completion_tokens != null ? completion_tokens : void 0
684
+ };
685
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
686
+ providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
687
+ }
688
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
689
+ providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
690
+ }
691
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
692
+ providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
693
+ }
694
+ if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
695
+ providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
686
696
  }
687
697
  }
688
698
  const choice = value.choices[0];
@@ -730,7 +740,7 @@ var OpenAIChatLanguageModel = class {
730
740
  message: `Expected 'id' to be a string.`
731
741
  });
732
742
  }
733
- if (((_c = toolCallDelta.function) == null ? void 0 : _c.name) == null) {
743
+ if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
734
744
  throw new InvalidResponseDataError({
735
745
  data: toolCallDelta,
736
746
  message: `Expected 'function.name' to be a string.`
@@ -741,12 +751,12 @@ var OpenAIChatLanguageModel = class {
741
751
  type: "function",
742
752
  function: {
743
753
  name: toolCallDelta.function.name,
744
- arguments: (_d = toolCallDelta.function.arguments) != null ? _d : ""
754
+ arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
745
755
  },
746
756
  hasFinished: false
747
757
  };
748
758
  const toolCall2 = toolCalls[index];
749
- if (((_e = toolCall2.function) == null ? void 0 : _e.name) != null && ((_f = toolCall2.function) == null ? void 0 : _f.arguments) != null) {
759
+ if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
750
760
  if (toolCall2.function.arguments.length > 0) {
751
761
  controller.enqueue({
752
762
  type: "tool-call-delta",
@@ -760,7 +770,7 @@ var OpenAIChatLanguageModel = class {
760
770
  controller.enqueue({
761
771
  type: "tool-call",
762
772
  toolCallType: "function",
763
- toolCallId: (_g = toolCall2.id) != null ? _g : generateId(),
773
+ toolCallId: (_e = toolCall2.id) != null ? _e : generateId(),
764
774
  toolName: toolCall2.function.name,
765
775
  args: toolCall2.function.arguments
766
776
  });
@@ -773,21 +783,21 @@ var OpenAIChatLanguageModel = class {
773
783
  if (toolCall.hasFinished) {
774
784
  continue;
775
785
  }
776
- if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
777
- toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
786
+ if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
787
+ toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
778
788
  }
779
789
  controller.enqueue({
780
790
  type: "tool-call-delta",
781
791
  toolCallType: "function",
782
792
  toolCallId: toolCall.id,
783
793
  toolName: toolCall.function.name,
784
- argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
794
+ argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
785
795
  });
786
- if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && isParsableJson(toolCall.function.arguments)) {
796
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && isParsableJson(toolCall.function.arguments)) {
787
797
  controller.enqueue({
788
798
  type: "tool-call",
789
799
  toolCallType: "function",
790
- toolCallId: (_n = toolCall.id) != null ? _n : generateId(),
800
+ toolCallId: (_l = toolCall.id) != null ? _l : generateId(),
791
801
  toolName: toolCall.function.name,
792
802
  args: toolCall.function.arguments
793
803
  });
@@ -825,7 +835,9 @@ var openaiTokenUsageSchema = z2.object({
825
835
  cached_tokens: z2.number().nullish()
826
836
  }).nullish(),
827
837
  completion_tokens_details: z2.object({
828
- reasoning_tokens: z2.number().nullish()
838
+ reasoning_tokens: z2.number().nullish(),
839
+ accepted_prediction_tokens: z2.number().nullish(),
840
+ rejected_prediction_tokens: z2.number().nullish()
829
841
  }).nullish()
830
842
  }).nullish();
831
843
  var openaiChatResponseSchema = z2.object({