@ai-sdk/openai 2.0.0 → 2.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -716,7 +716,7 @@ var OpenAIChatLanguageModel = class {
716
716
  };
717
717
  }
718
718
  async doGenerate(options) {
719
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
719
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
720
720
  const { args: body, warnings } = await this.getArgs(options);
721
721
  const {
722
722
  responseHeaders,
@@ -750,8 +750,17 @@ var OpenAIChatLanguageModel = class {
750
750
  input: toolCall.function.arguments
751
751
  });
752
752
  }
753
- const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
754
- const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
753
+ for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
754
+ content.push({
755
+ type: "source",
756
+ sourceType: "url",
757
+ id: (0, import_provider_utils5.generateId)(),
758
+ url: annotation.url,
759
+ title: annotation.title
760
+ });
761
+ }
762
+ const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
763
+ const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
755
764
  const providerMetadata = { openai: {} };
756
765
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
757
766
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
@@ -759,18 +768,18 @@ var OpenAIChatLanguageModel = class {
759
768
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
760
769
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
761
770
  }
762
- if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
771
+ if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
763
772
  providerMetadata.openai.logprobs = choice.logprobs.content;
764
773
  }
765
774
  return {
766
775
  content,
767
776
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
768
777
  usage: {
769
- inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
770
- outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
771
- totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
772
- reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
773
- cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
778
+ inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
779
+ outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
780
+ totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
781
+ reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
782
+ cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
774
783
  },
775
784
  request: { body },
776
785
  response: {
@@ -967,6 +976,17 @@ var OpenAIChatLanguageModel = class {
967
976
  }
968
977
  }
969
978
  }
979
+ if (delta.annotations != null) {
980
+ for (const annotation of delta.annotations) {
981
+ controller.enqueue({
982
+ type: "source",
983
+ sourceType: "url",
984
+ id: (0, import_provider_utils5.generateId)(),
985
+ url: annotation.url,
986
+ title: annotation.title
987
+ });
988
+ }
989
+ }
970
990
  },
971
991
  flush(controller) {
972
992
  if (isActiveText) {
@@ -1017,6 +1037,15 @@ var openaiChatResponseSchema = import_v45.z.object({
1017
1037
  arguments: import_v45.z.string()
1018
1038
  })
1019
1039
  })
1040
+ ).nullish(),
1041
+ annotations: import_v45.z.array(
1042
+ import_v45.z.object({
1043
+ type: import_v45.z.literal("url_citation"),
1044
+ start_index: import_v45.z.number(),
1045
+ end_index: import_v45.z.number(),
1046
+ url: import_v45.z.string(),
1047
+ title: import_v45.z.string()
1048
+ })
1020
1049
  ).nullish()
1021
1050
  }),
1022
1051
  index: import_v45.z.number(),
@@ -1059,6 +1088,15 @@ var openaiChatChunkSchema = import_v45.z.union([
1059
1088
  arguments: import_v45.z.string().nullish()
1060
1089
  })
1061
1090
  })
1091
+ ).nullish(),
1092
+ annotations: import_v45.z.array(
1093
+ import_v45.z.object({
1094
+ type: import_v45.z.literal("url_citation"),
1095
+ start_index: import_v45.z.number(),
1096
+ end_index: import_v45.z.number(),
1097
+ url: import_v45.z.string(),
1098
+ title: import_v45.z.string()
1099
+ })
1062
1100
  ).nullish()
1063
1101
  }).nullish(),
1064
1102
  logprobs: import_v45.z.object({
@@ -2531,6 +2569,11 @@ var OpenAIResponsesLanguageModel = class {
2531
2569
  id: import_v415.z.string(),
2532
2570
  status: import_v415.z.string().optional()
2533
2571
  }),
2572
+ import_v415.z.object({
2573
+ type: import_v415.z.literal("file_search_call"),
2574
+ id: import_v415.z.string(),
2575
+ status: import_v415.z.string().optional()
2576
+ }),
2534
2577
  import_v415.z.object({
2535
2578
  type: import_v415.z.literal("reasoning"),
2536
2579
  id: import_v415.z.string(),
@@ -2657,6 +2700,26 @@ var OpenAIResponsesLanguageModel = class {
2657
2700
  });
2658
2701
  break;
2659
2702
  }
2703
+ case "file_search_call": {
2704
+ content.push({
2705
+ type: "tool-call",
2706
+ toolCallId: part.id,
2707
+ toolName: "file_search",
2708
+ input: "",
2709
+ providerExecuted: true
2710
+ });
2711
+ content.push({
2712
+ type: "tool-result",
2713
+ toolCallId: part.id,
2714
+ toolName: "file_search",
2715
+ result: {
2716
+ type: "file_search_tool_result",
2717
+ status: part.status || "completed"
2718
+ },
2719
+ providerExecuted: true
2720
+ });
2721
+ break;
2722
+ }
2660
2723
  }
2661
2724
  }
2662
2725
  return {
@@ -3033,6 +3096,11 @@ var responseOutputItemAddedSchema = import_v415.z.object({
3033
3096
  type: import_v415.z.literal("computer_call"),
3034
3097
  id: import_v415.z.string(),
3035
3098
  status: import_v415.z.string()
3099
+ }),
3100
+ import_v415.z.object({
3101
+ type: import_v415.z.literal("file_search_call"),
3102
+ id: import_v415.z.string(),
3103
+ status: import_v415.z.string()
3036
3104
  })
3037
3105
  ])
3038
3106
  });
@@ -3066,6 +3134,11 @@ var responseOutputItemDoneSchema = import_v415.z.object({
3066
3134
  type: import_v415.z.literal("computer_call"),
3067
3135
  id: import_v415.z.string(),
3068
3136
  status: import_v415.z.literal("completed")
3137
+ }),
3138
+ import_v415.z.object({
3139
+ type: import_v415.z.literal("file_search_call"),
3140
+ id: import_v415.z.string(),
3141
+ status: import_v415.z.literal("completed")
3069
3142
  })
3070
3143
  ])
3071
3144
  });