@ai-sdk/openai 2.0.1 → 2.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -692,7 +692,7 @@ var OpenAIChatLanguageModel = class {
692
692
  };
693
693
  }
694
694
  async doGenerate(options) {
695
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
695
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
696
696
  const { args: body, warnings } = await this.getArgs(options);
697
697
  const {
698
698
  responseHeaders,
@@ -726,8 +726,17 @@ var OpenAIChatLanguageModel = class {
726
726
  input: toolCall.function.arguments
727
727
  });
728
728
  }
729
- const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
730
- const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
729
+ for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
730
+ content.push({
731
+ type: "source",
732
+ sourceType: "url",
733
+ id: generateId(),
734
+ url: annotation.url,
735
+ title: annotation.title
736
+ });
737
+ }
738
+ const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
739
+ const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
731
740
  const providerMetadata = { openai: {} };
732
741
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
733
742
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
@@ -735,18 +744,18 @@ var OpenAIChatLanguageModel = class {
735
744
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
736
745
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
737
746
  }
738
- if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
747
+ if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
739
748
  providerMetadata.openai.logprobs = choice.logprobs.content;
740
749
  }
741
750
  return {
742
751
  content,
743
752
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
744
753
  usage: {
745
- inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
746
- outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
747
- totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
748
- reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
749
- cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
754
+ inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
755
+ outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
756
+ totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
757
+ reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
758
+ cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
750
759
  },
751
760
  request: { body },
752
761
  response: {
@@ -943,6 +952,17 @@ var OpenAIChatLanguageModel = class {
943
952
  }
944
953
  }
945
954
  }
955
+ if (delta.annotations != null) {
956
+ for (const annotation of delta.annotations) {
957
+ controller.enqueue({
958
+ type: "source",
959
+ sourceType: "url",
960
+ id: generateId(),
961
+ url: annotation.url,
962
+ title: annotation.title
963
+ });
964
+ }
965
+ }
946
966
  },
947
967
  flush(controller) {
948
968
  if (isActiveText) {
@@ -993,6 +1013,15 @@ var openaiChatResponseSchema = z5.object({
993
1013
  arguments: z5.string()
994
1014
  })
995
1015
  })
1016
+ ).nullish(),
1017
+ annotations: z5.array(
1018
+ z5.object({
1019
+ type: z5.literal("url_citation"),
1020
+ start_index: z5.number(),
1021
+ end_index: z5.number(),
1022
+ url: z5.string(),
1023
+ title: z5.string()
1024
+ })
996
1025
  ).nullish()
997
1026
  }),
998
1027
  index: z5.number(),
@@ -1035,6 +1064,15 @@ var openaiChatChunkSchema = z5.union([
1035
1064
  arguments: z5.string().nullish()
1036
1065
  })
1037
1066
  })
1067
+ ).nullish(),
1068
+ annotations: z5.array(
1069
+ z5.object({
1070
+ type: z5.literal("url_citation"),
1071
+ start_index: z5.number(),
1072
+ end_index: z5.number(),
1073
+ url: z5.string(),
1074
+ title: z5.string()
1075
+ })
1038
1076
  ).nullish()
1039
1077
  }).nullish(),
1040
1078
  logprobs: z5.object({