@ai-sdk/openai 2.0.14 → 2.0.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.16
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [68751f9]
8
+ - @ai-sdk/provider-utils@3.0.4
9
+
10
+ ## 2.0.15
11
+
12
+ ### Patch Changes
13
+
14
+ - a4bef93: feat(provider/openai): expose web search queries in responses api
15
+ - 6ed34cb: refactor(openai): consolidate model config into `getResponsesModelConfig()`
16
+
17
+ https://github.com/vercel/ai/pull/8038
18
+
3
19
  ## 2.0.14
4
20
 
5
21
  ### Patch Changes
package/dist/index.js CHANGED
@@ -2287,7 +2287,7 @@ var OpenAIResponsesLanguageModel = class {
2287
2287
  });
2288
2288
  }
2289
2289
  }
2290
- if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
2290
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !modelConfig.supportsFlexProcessing) {
2291
2291
  warnings.push({
2292
2292
  type: "unsupported-setting",
2293
2293
  setting: "serviceTier",
@@ -2295,7 +2295,7 @@ var OpenAIResponsesLanguageModel = class {
2295
2295
  });
2296
2296
  delete baseArgs.service_tier;
2297
2297
  }
2298
- if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !supportsPriorityProcessing2(this.modelId)) {
2298
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !modelConfig.supportsPriorityProcessing) {
2299
2299
  warnings.push({
2300
2300
  type: "unsupported-setting",
2301
2301
  setting: "serviceTier",
@@ -2322,7 +2322,7 @@ var OpenAIResponsesLanguageModel = class {
2322
2322
  };
2323
2323
  }
2324
2324
  async doGenerate(options) {
2325
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
2325
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
2326
2326
  const { args: body, warnings } = await this.getArgs(options);
2327
2327
  const url = this.config.url({
2328
2328
  path: "/responses",
@@ -2388,7 +2388,11 @@ var OpenAIResponsesLanguageModel = class {
2388
2388
  import_v413.z.object({
2389
2389
  type: import_v413.z.literal("web_search_call"),
2390
2390
  id: import_v413.z.string(),
2391
- status: import_v413.z.string().optional()
2391
+ status: import_v413.z.string().optional(),
2392
+ action: import_v413.z.object({
2393
+ type: import_v413.z.literal("search"),
2394
+ query: import_v413.z.string().optional()
2395
+ }).nullish()
2392
2396
  }),
2393
2397
  import_v413.z.object({
2394
2398
  type: import_v413.z.literal("computer_call"),
@@ -2520,14 +2524,17 @@ var OpenAIResponsesLanguageModel = class {
2520
2524
  type: "tool-call",
2521
2525
  toolCallId: part.id,
2522
2526
  toolName: "web_search_preview",
2523
- input: "",
2527
+ input: (_k = (_j = part.action) == null ? void 0 : _j.query) != null ? _k : "",
2524
2528
  providerExecuted: true
2525
2529
  });
2526
2530
  content.push({
2527
2531
  type: "tool-result",
2528
2532
  toolCallId: part.id,
2529
2533
  toolName: "web_search_preview",
2530
- result: { status: part.status || "completed" },
2534
+ result: {
2535
+ status: part.status || "completed",
2536
+ ...((_l = part.action) == null ? void 0 : _l.query) && { query: part.action.query }
2537
+ },
2531
2538
  providerExecuted: true
2532
2539
  });
2533
2540
  break;
@@ -2585,15 +2592,15 @@ var OpenAIResponsesLanguageModel = class {
2585
2592
  return {
2586
2593
  content,
2587
2594
  finishReason: mapOpenAIResponseFinishReason({
2588
- finishReason: (_j = response.incomplete_details) == null ? void 0 : _j.reason,
2595
+ finishReason: (_m = response.incomplete_details) == null ? void 0 : _m.reason,
2589
2596
  hasToolCalls: content.some((part) => part.type === "tool-call")
2590
2597
  }),
2591
2598
  usage: {
2592
2599
  inputTokens: response.usage.input_tokens,
2593
2600
  outputTokens: response.usage.output_tokens,
2594
2601
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2595
- reasoningTokens: (_l = (_k = response.usage.output_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : void 0,
2596
- cachedInputTokens: (_n = (_m = response.usage.input_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? _n : void 0
2602
+ reasoningTokens: (_o = (_n = response.usage.output_tokens_details) == null ? void 0 : _n.reasoning_tokens) != null ? _o : void 0,
2603
+ cachedInputTokens: (_q = (_p = response.usage.input_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : void 0
2597
2604
  },
2598
2605
  request: { body },
2599
2606
  response: {
@@ -2645,7 +2652,7 @@ var OpenAIResponsesLanguageModel = class {
2645
2652
  controller.enqueue({ type: "stream-start", warnings });
2646
2653
  },
2647
2654
  transform(chunk, controller) {
2648
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
2655
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s;
2649
2656
  if (options.includeRawChunks) {
2650
2657
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2651
2658
  }
@@ -2752,7 +2759,7 @@ var OpenAIResponsesLanguageModel = class {
2752
2759
  type: "tool-call",
2753
2760
  toolCallId: value.item.id,
2754
2761
  toolName: "web_search_preview",
2755
- input: "",
2762
+ input: (_c = (_b = value.item.action) == null ? void 0 : _b.query) != null ? _c : "",
2756
2763
  providerExecuted: true
2757
2764
  });
2758
2765
  controller.enqueue({
@@ -2761,7 +2768,10 @@ var OpenAIResponsesLanguageModel = class {
2761
2768
  toolName: "web_search_preview",
2762
2769
  result: {
2763
2770
  type: "web_search_tool_result",
2764
- status: value.item.status || "completed"
2771
+ status: value.item.status || "completed",
2772
+ ...((_d = value.item.action) == null ? void 0 : _d.query) && {
2773
+ query: value.item.action.query
2774
+ }
2765
2775
  },
2766
2776
  providerExecuted: true
2767
2777
  });
@@ -2829,7 +2839,7 @@ var OpenAIResponsesLanguageModel = class {
2829
2839
  providerMetadata: {
2830
2840
  openai: {
2831
2841
  itemId: value.item.id,
2832
- reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
2842
+ reasoningEncryptedContent: (_e = value.item.encrypted_content) != null ? _e : null
2833
2843
  }
2834
2844
  }
2835
2845
  });
@@ -2864,7 +2874,7 @@ var OpenAIResponsesLanguageModel = class {
2864
2874
  }
2865
2875
  } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
2866
2876
  if (value.summary_index > 0) {
2867
- (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
2877
+ (_f = activeReasoning[value.item_id]) == null ? void 0 : _f.summaryParts.push(
2868
2878
  value.summary_index
2869
2879
  );
2870
2880
  controller.enqueue({
@@ -2873,7 +2883,7 @@ var OpenAIResponsesLanguageModel = class {
2873
2883
  providerMetadata: {
2874
2884
  openai: {
2875
2885
  itemId: value.item_id,
2876
- reasoningEncryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
2886
+ reasoningEncryptedContent: (_h = (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.encryptedContent) != null ? _h : null
2877
2887
  }
2878
2888
  }
2879
2889
  });
@@ -2891,20 +2901,20 @@ var OpenAIResponsesLanguageModel = class {
2891
2901
  });
2892
2902
  } else if (isResponseFinishedChunk(value)) {
2893
2903
  finishReason = mapOpenAIResponseFinishReason({
2894
- finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
2904
+ finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
2895
2905
  hasToolCalls
2896
2906
  });
2897
2907
  usage.inputTokens = value.response.usage.input_tokens;
2898
2908
  usage.outputTokens = value.response.usage.output_tokens;
2899
2909
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2900
- usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
2901
- usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
2910
+ usage.reasoningTokens = (_k = (_j = value.response.usage.output_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0;
2911
+ usage.cachedInputTokens = (_m = (_l = value.response.usage.input_tokens_details) == null ? void 0 : _l.cached_tokens) != null ? _m : void 0;
2902
2912
  } else if (isResponseAnnotationAddedChunk(value)) {
2903
2913
  if (value.annotation.type === "url_citation") {
2904
2914
  controller.enqueue({
2905
2915
  type: "source",
2906
2916
  sourceType: "url",
2907
- id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0, import_provider_utils12.generateId)(),
2917
+ id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : (0, import_provider_utils12.generateId)(),
2908
2918
  url: value.annotation.url,
2909
2919
  title: value.annotation.title
2910
2920
  });
@@ -2912,7 +2922,7 @@ var OpenAIResponsesLanguageModel = class {
2912
2922
  controller.enqueue({
2913
2923
  type: "source",
2914
2924
  sourceType: "document",
2915
- id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : (0, import_provider_utils12.generateId)(),
2925
+ id: (_s = (_r = (_q = self.config).generateId) == null ? void 0 : _r.call(_q)) != null ? _s : (0, import_provider_utils12.generateId)(),
2916
2926
  mediaType: "text/plain",
2917
2927
  title: value.annotation.quote,
2918
2928
  filename: value.annotation.file_id
@@ -3002,7 +3012,11 @@ var responseOutputItemAddedSchema = import_v413.z.object({
3002
3012
  import_v413.z.object({
3003
3013
  type: import_v413.z.literal("web_search_call"),
3004
3014
  id: import_v413.z.string(),
3005
- status: import_v413.z.string()
3015
+ status: import_v413.z.string(),
3016
+ action: import_v413.z.object({
3017
+ type: import_v413.z.literal("search"),
3018
+ query: import_v413.z.string().optional()
3019
+ }).nullish()
3006
3020
  }),
3007
3021
  import_v413.z.object({
3008
3022
  type: import_v413.z.literal("computer_call"),
@@ -3051,7 +3065,11 @@ var responseOutputItemDoneSchema = import_v413.z.object({
3051
3065
  import_v413.z.object({
3052
3066
  type: import_v413.z.literal("web_search_call"),
3053
3067
  id: import_v413.z.string(),
3054
- status: import_v413.z.literal("completed")
3068
+ status: import_v413.z.literal("completed"),
3069
+ action: import_v413.z.object({
3070
+ type: import_v413.z.literal("search"),
3071
+ query: import_v413.z.string().optional()
3072
+ }).nullish()
3055
3073
  }),
3056
3074
  import_v413.z.object({
3057
3075
  type: import_v413.z.literal("computer_call"),
@@ -3159,39 +3177,39 @@ function isErrorChunk(chunk) {
3159
3177
  return chunk.type === "error";
3160
3178
  }
3161
3179
  function getResponsesModelConfig(modelId) {
3180
+ const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
3181
+ const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3182
+ const defaults = {
3183
+ requiredAutoTruncation: false,
3184
+ systemMessageMode: "system",
3185
+ supportsFlexProcessing: supportsFlexProcessing2,
3186
+ supportsPriorityProcessing: supportsPriorityProcessing2
3187
+ };
3162
3188
  if (modelId.startsWith("gpt-5-chat")) {
3163
3189
  return {
3164
- isReasoningModel: false,
3165
- systemMessageMode: "system",
3166
- requiredAutoTruncation: false
3190
+ ...defaults,
3191
+ isReasoningModel: false
3167
3192
  };
3168
3193
  }
3169
3194
  if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
3170
3195
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
3171
3196
  return {
3197
+ ...defaults,
3172
3198
  isReasoningModel: true,
3173
- systemMessageMode: "remove",
3174
- requiredAutoTruncation: false
3199
+ systemMessageMode: "remove"
3175
3200
  };
3176
3201
  }
3177
3202
  return {
3203
+ ...defaults,
3178
3204
  isReasoningModel: true,
3179
- systemMessageMode: "developer",
3180
- requiredAutoTruncation: false
3205
+ systemMessageMode: "developer"
3181
3206
  };
3182
3207
  }
3183
3208
  return {
3184
- isReasoningModel: false,
3185
- systemMessageMode: "system",
3186
- requiredAutoTruncation: false
3209
+ ...defaults,
3210
+ isReasoningModel: false
3187
3211
  };
3188
3212
  }
3189
- function supportsFlexProcessing2(modelId) {
3190
- return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
3191
- }
3192
- function supportsPriorityProcessing2(modelId) {
3193
- return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3194
- }
3195
3213
  var openaiResponsesProviderOptionsSchema = import_v413.z.object({
3196
3214
  metadata: import_v413.z.any().nullish(),
3197
3215
  parallelToolCalls: import_v413.z.boolean().nullish(),