@ai-sdk/openai 2.0.1 → 2.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -716,7 +716,7 @@ var OpenAIChatLanguageModel = class {
716
716
  };
717
717
  }
718
718
  async doGenerate(options) {
719
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
719
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
720
720
  const { args: body, warnings } = await this.getArgs(options);
721
721
  const {
722
722
  responseHeaders,
@@ -750,8 +750,17 @@ var OpenAIChatLanguageModel = class {
750
750
  input: toolCall.function.arguments
751
751
  });
752
752
  }
753
- const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
754
- const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
753
+ for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
754
+ content.push({
755
+ type: "source",
756
+ sourceType: "url",
757
+ id: (0, import_provider_utils5.generateId)(),
758
+ url: annotation.url,
759
+ title: annotation.title
760
+ });
761
+ }
762
+ const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
763
+ const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
755
764
  const providerMetadata = { openai: {} };
756
765
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
757
766
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
@@ -759,18 +768,18 @@ var OpenAIChatLanguageModel = class {
759
768
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
760
769
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
761
770
  }
762
- if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
771
+ if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
763
772
  providerMetadata.openai.logprobs = choice.logprobs.content;
764
773
  }
765
774
  return {
766
775
  content,
767
776
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
768
777
  usage: {
769
- inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
770
- outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
771
- totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
772
- reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
773
- cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
778
+ inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
779
+ outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
780
+ totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
781
+ reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
782
+ cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
774
783
  },
775
784
  request: { body },
776
785
  response: {
@@ -967,6 +976,17 @@ var OpenAIChatLanguageModel = class {
967
976
  }
968
977
  }
969
978
  }
979
+ if (delta.annotations != null) {
980
+ for (const annotation of delta.annotations) {
981
+ controller.enqueue({
982
+ type: "source",
983
+ sourceType: "url",
984
+ id: (0, import_provider_utils5.generateId)(),
985
+ url: annotation.url,
986
+ title: annotation.title
987
+ });
988
+ }
989
+ }
970
990
  },
971
991
  flush(controller) {
972
992
  if (isActiveText) {
@@ -1017,6 +1037,15 @@ var openaiChatResponseSchema = import_v45.z.object({
1017
1037
  arguments: import_v45.z.string()
1018
1038
  })
1019
1039
  })
1040
+ ).nullish(),
1041
+ annotations: import_v45.z.array(
1042
+ import_v45.z.object({
1043
+ type: import_v45.z.literal("url_citation"),
1044
+ start_index: import_v45.z.number(),
1045
+ end_index: import_v45.z.number(),
1046
+ url: import_v45.z.string(),
1047
+ title: import_v45.z.string()
1048
+ })
1020
1049
  ).nullish()
1021
1050
  }),
1022
1051
  index: import_v45.z.number(),
@@ -1059,6 +1088,15 @@ var openaiChatChunkSchema = import_v45.z.union([
1059
1088
  arguments: import_v45.z.string().nullish()
1060
1089
  })
1061
1090
  })
1091
+ ).nullish(),
1092
+ annotations: import_v45.z.array(
1093
+ import_v45.z.object({
1094
+ type: import_v45.z.literal("url_citation"),
1095
+ start_index: import_v45.z.number(),
1096
+ end_index: import_v45.z.number(),
1097
+ url: import_v45.z.string(),
1098
+ title: import_v45.z.string()
1099
+ })
1062
1100
  ).nullish()
1063
1101
  }).nullish(),
1064
1102
  logprobs: import_v45.z.object({