@ai-sdk/openai 2.0.1 → 2.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -698,7 +698,7 @@ var OpenAIChatLanguageModel = class {
698
698
  };
699
699
  }
700
700
  async doGenerate(options) {
701
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
701
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
702
702
  const { args: body, warnings } = await this.getArgs(options);
703
703
  const {
704
704
  responseHeaders,
@@ -732,8 +732,17 @@ var OpenAIChatLanguageModel = class {
732
732
  input: toolCall.function.arguments
733
733
  });
734
734
  }
735
- const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
736
- const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
735
+ for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
736
+ content.push({
737
+ type: "source",
738
+ sourceType: "url",
739
+ id: generateId(),
740
+ url: annotation.url,
741
+ title: annotation.title
742
+ });
743
+ }
744
+ const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
745
+ const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
737
746
  const providerMetadata = { openai: {} };
738
747
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
739
748
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
@@ -741,18 +750,18 @@ var OpenAIChatLanguageModel = class {
741
750
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
742
751
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
743
752
  }
744
- if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
753
+ if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
745
754
  providerMetadata.openai.logprobs = choice.logprobs.content;
746
755
  }
747
756
  return {
748
757
  content,
749
758
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
750
759
  usage: {
751
- inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
752
- outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
753
- totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
754
- reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
755
- cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
760
+ inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
761
+ outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
762
+ totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
763
+ reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
764
+ cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
756
765
  },
757
766
  request: { body },
758
767
  response: {
@@ -949,6 +958,17 @@ var OpenAIChatLanguageModel = class {
949
958
  }
950
959
  }
951
960
  }
961
+ if (delta.annotations != null) {
962
+ for (const annotation of delta.annotations) {
963
+ controller.enqueue({
964
+ type: "source",
965
+ sourceType: "url",
966
+ id: generateId(),
967
+ url: annotation.url,
968
+ title: annotation.title
969
+ });
970
+ }
971
+ }
952
972
  },
953
973
  flush(controller) {
954
974
  if (isActiveText) {
@@ -999,6 +1019,15 @@ var openaiChatResponseSchema = z5.object({
999
1019
  arguments: z5.string()
1000
1020
  })
1001
1021
  })
1022
+ ).nullish(),
1023
+ annotations: z5.array(
1024
+ z5.object({
1025
+ type: z5.literal("url_citation"),
1026
+ start_index: z5.number(),
1027
+ end_index: z5.number(),
1028
+ url: z5.string(),
1029
+ title: z5.string()
1030
+ })
1002
1031
  ).nullish()
1003
1032
  }),
1004
1033
  index: z5.number(),
@@ -1041,6 +1070,15 @@ var openaiChatChunkSchema = z5.union([
1041
1070
  arguments: z5.string().nullish()
1042
1071
  })
1043
1072
  })
1073
+ ).nullish(),
1074
+ annotations: z5.array(
1075
+ z5.object({
1076
+ type: z5.literal("url_citation"),
1077
+ start_index: z5.number(),
1078
+ end_index: z5.number(),
1079
+ url: z5.string(),
1080
+ title: z5.string()
1081
+ })
1044
1082
  ).nullish()
1045
1083
  }).nullish(),
1046
1084
  logprobs: z5.object({