@ai-sdk/openai 2.0.26 → 2.0.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,18 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.28
4
+
5
+ ### Patch Changes
6
+
7
+ - 4c2bb77: fix (provider/openai): send sources action as include
8
+ - 561e8b0: fix (provider/openai): fix code interpreter tool in doGenerate
9
+
10
+ ## 2.0.27
11
+
12
+ ### Patch Changes
13
+
14
+ - 2338c79: feat (provider/openai): add jsdoc for openai tools
15
+
3
16
  ## 2.0.26
4
17
 
5
18
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -11,7 +11,7 @@ type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large
11
11
 
12
12
  type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
13
13
 
14
- declare const factory: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
14
+ declare const webSearchToolFactory: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
15
15
  /**
16
16
  * Filters for the search.
17
17
  */
@@ -57,12 +57,40 @@ declare const factory: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
57
57
  };
58
58
  }>;
59
59
 
60
+ declare const codeInterpreterToolFactory: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
61
+ /**
62
+ * The code interpreter container.
63
+ * Can be a container ID
64
+ * or an object that specifies uploaded file IDs to make available to your code.
65
+ */
66
+ container?: string | {
67
+ fileIds?: string[];
68
+ };
69
+ }>;
70
+
60
71
  declare const openaiTools: {
61
- codeInterpreter: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
62
- container?: string | {
63
- fileIds?: string[];
64
- };
65
- }>;
72
+ /**
73
+ * The Code Interpreter tool allows models to write and run Python code in a
74
+ * sandboxed environment to solve complex problems in domains like data analysis,
75
+ * coding, and math.
76
+ *
77
+ * @param container - The container to use for the code interpreter.
78
+ *
79
+ * Must have name `code_interpreter`.
80
+ */
81
+ codeInterpreter: (args?: Parameters<typeof codeInterpreterToolFactory>[0]) => _ai_sdk_provider_utils.Tool<{}, unknown>;
82
+ /**
83
+ * File search is a tool available in the Responses API. It enables models to
84
+ * retrieve information in a knowledge base of previously uploaded files through
85
+ * semantic and keyword search.
86
+ *
87
+ * Must have name `file_search`.
88
+ *
89
+ * @param vectorStoreIds - The vector store IDs to use for the file search.
90
+ * @param maxNumResults - The maximum number of results to return.
91
+ * @param ranking - The ranking options to use for the file search.
92
+ * @param filters - The filters to use for the file search.
93
+ */
66
94
  fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{
67
95
  query: string;
68
96
  }, {
@@ -80,6 +108,17 @@ declare const openaiTools: {
80
108
  filters: any[];
81
109
  };
82
110
  }>;
111
+ /**
112
+ * Web search allows models to access up-to-date information from the internet
113
+ * and provide answers with sourced citations.
114
+ *
115
+ * Must have name `web_search_preview`.
116
+ *
117
+ * @param searchContextSize - The search context size to use for the web search.
118
+ * @param userLocation - The user location to use for the web search.
119
+ *
120
+ * @deprecated Use `webSearch` instead.
121
+ */
83
122
  webSearchPreview: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
84
123
  searchContextSize?: "low" | "medium" | "high";
85
124
  userLocation?: {
@@ -90,7 +129,17 @@ declare const openaiTools: {
90
129
  timezone?: string;
91
130
  };
92
131
  }>;
93
- webSearch: (args?: Parameters<typeof factory>[0]) => _ai_sdk_provider_utils.Tool<{}, unknown>;
132
+ /**
133
+ * Web search allows models to access up-to-date information from the internet
134
+ * and provide answers with sourced citations.
135
+ *
136
+ * Must have name `web_search`.
137
+ *
138
+ * @param filters - The filters to use for the web search.
139
+ * @param searchContextSize - The search context size to use for the web search.
140
+ * @param userLocation - The user location to use for the web search.
141
+ */
142
+ webSearch: (args?: Parameters<typeof webSearchToolFactory>[0]) => _ai_sdk_provider_utils.Tool<{}, unknown>;
94
143
  };
95
144
 
96
145
  type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
@@ -206,9 +255,9 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
206
255
  priority: "priority";
207
256
  }>>>;
208
257
  include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
209
- "reasoning.encrypted_content": "reasoning.encrypted_content";
210
258
  "file_search_call.results": "file_search_call.results";
211
259
  "message.output_text.logprobs": "message.output_text.logprobs";
260
+ "reasoning.encrypted_content": "reasoning.encrypted_content";
212
261
  }>>>>;
213
262
  textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
214
263
  low: "low";
package/dist/index.d.ts CHANGED
@@ -11,7 +11,7 @@ type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large
11
11
 
12
12
  type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
13
13
 
14
- declare const factory: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
14
+ declare const webSearchToolFactory: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
15
15
  /**
16
16
  * Filters for the search.
17
17
  */
@@ -57,12 +57,40 @@ declare const factory: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
57
57
  };
58
58
  }>;
59
59
 
60
+ declare const codeInterpreterToolFactory: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
61
+ /**
62
+ * The code interpreter container.
63
+ * Can be a container ID
64
+ * or an object that specifies uploaded file IDs to make available to your code.
65
+ */
66
+ container?: string | {
67
+ fileIds?: string[];
68
+ };
69
+ }>;
70
+
60
71
  declare const openaiTools: {
61
- codeInterpreter: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
62
- container?: string | {
63
- fileIds?: string[];
64
- };
65
- }>;
72
+ /**
73
+ * The Code Interpreter tool allows models to write and run Python code in a
74
+ * sandboxed environment to solve complex problems in domains like data analysis,
75
+ * coding, and math.
76
+ *
77
+ * @param container - The container to use for the code interpreter.
78
+ *
79
+ * Must have name `code_interpreter`.
80
+ */
81
+ codeInterpreter: (args?: Parameters<typeof codeInterpreterToolFactory>[0]) => _ai_sdk_provider_utils.Tool<{}, unknown>;
82
+ /**
83
+ * File search is a tool available in the Responses API. It enables models to
84
+ * retrieve information in a knowledge base of previously uploaded files through
85
+ * semantic and keyword search.
86
+ *
87
+ * Must have name `file_search`.
88
+ *
89
+ * @param vectorStoreIds - The vector store IDs to use for the file search.
90
+ * @param maxNumResults - The maximum number of results to return.
91
+ * @param ranking - The ranking options to use for the file search.
92
+ * @param filters - The filters to use for the file search.
93
+ */
66
94
  fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{
67
95
  query: string;
68
96
  }, {
@@ -80,6 +108,17 @@ declare const openaiTools: {
80
108
  filters: any[];
81
109
  };
82
110
  }>;
111
+ /**
112
+ * Web search allows models to access up-to-date information from the internet
113
+ * and provide answers with sourced citations.
114
+ *
115
+ * Must have name `web_search_preview`.
116
+ *
117
+ * @param searchContextSize - The search context size to use for the web search.
118
+ * @param userLocation - The user location to use for the web search.
119
+ *
120
+ * @deprecated Use `webSearch` instead.
121
+ */
83
122
  webSearchPreview: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
84
123
  searchContextSize?: "low" | "medium" | "high";
85
124
  userLocation?: {
@@ -90,7 +129,17 @@ declare const openaiTools: {
90
129
  timezone?: string;
91
130
  };
92
131
  }>;
93
- webSearch: (args?: Parameters<typeof factory>[0]) => _ai_sdk_provider_utils.Tool<{}, unknown>;
132
+ /**
133
+ * Web search allows models to access up-to-date information from the internet
134
+ * and provide answers with sourced citations.
135
+ *
136
+ * Must have name `web_search`.
137
+ *
138
+ * @param filters - The filters to use for the web search.
139
+ * @param searchContextSize - The search context size to use for the web search.
140
+ * @param userLocation - The user location to use for the web search.
141
+ */
142
+ webSearch: (args?: Parameters<typeof webSearchToolFactory>[0]) => _ai_sdk_provider_utils.Tool<{}, unknown>;
94
143
  };
95
144
 
96
145
  type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
@@ -206,9 +255,9 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
206
255
  priority: "priority";
207
256
  }>>>;
208
257
  include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
209
- "reasoning.encrypted_content": "reasoning.encrypted_content";
210
258
  "file_search_call.results": "file_search_call.results";
211
259
  "message.output_text.logprobs": "message.output_text.logprobs";
260
+ "reasoning.encrypted_content": "reasoning.encrypted_content";
212
261
  }>>>>;
213
262
  textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
214
263
  low: "low";
package/dist/index.js CHANGED
@@ -369,23 +369,11 @@ var compoundFilterSchema = import_v43.z.object({
369
369
  });
370
370
  var filtersSchema = import_v43.z.union([comparisonFilterSchema, compoundFilterSchema]);
371
371
  var fileSearchArgsSchema = import_v43.z.object({
372
- /**
373
- * List of vector store IDs to search through. If not provided, searches all available vector stores.
374
- */
375
372
  vectorStoreIds: import_v43.z.array(import_v43.z.string()).optional(),
376
- /**
377
- * Maximum number of search results to return. Defaults to 10.
378
- */
379
373
  maxNumResults: import_v43.z.number().optional(),
380
- /**
381
- * Ranking options for the search.
382
- */
383
374
  ranking: import_v43.z.object({
384
375
  ranker: import_v43.z.enum(["auto", "default-2024-08-21"]).optional()
385
376
  }).optional(),
386
- /**
387
- * A filter to apply based on file attributes.
388
- */
389
377
  filters: filtersSchema.optional()
390
378
  });
391
379
  var fileSearch = (0, import_provider_utils3.createProviderDefinedToolFactory)({
@@ -1819,11 +1807,14 @@ var codeInterpreterArgsSchema = import_v411.z.object({
1819
1807
  })
1820
1808
  ]).optional()
1821
1809
  });
1822
- var codeInterpreter = (0, import_provider_utils9.createProviderDefinedToolFactory)({
1810
+ var codeInterpreterToolFactory = (0, import_provider_utils9.createProviderDefinedToolFactory)({
1823
1811
  id: "openai.code_interpreter",
1824
1812
  name: "code_interpreter",
1825
1813
  inputSchema: import_v411.z.object({})
1826
1814
  });
1815
+ var codeInterpreter = (args = {}) => {
1816
+ return codeInterpreterToolFactory(args);
1817
+ };
1827
1818
 
1828
1819
  // src/tool/web-search.ts
1829
1820
  var import_provider_utils10 = require("@ai-sdk/provider-utils");
@@ -1841,7 +1832,7 @@ var webSearchArgsSchema = import_v412.z.object({
1841
1832
  timezone: import_v412.z.string().optional()
1842
1833
  }).optional()
1843
1834
  });
1844
- var factory = (0, import_provider_utils10.createProviderDefinedToolFactory)({
1835
+ var webSearchToolFactory = (0, import_provider_utils10.createProviderDefinedToolFactory)({
1845
1836
  id: "openai.web_search",
1846
1837
  name: "web_search",
1847
1838
  inputSchema: import_v412.z.object({
@@ -1863,14 +1854,56 @@ var factory = (0, import_provider_utils10.createProviderDefinedToolFactory)({
1863
1854
  })
1864
1855
  });
1865
1856
  var webSearch = (args = {}) => {
1866
- return factory(args);
1857
+ return webSearchToolFactory(args);
1867
1858
  };
1868
1859
 
1869
1860
  // src/openai-tools.ts
1870
1861
  var openaiTools = {
1862
+ /**
1863
+ * The Code Interpreter tool allows models to write and run Python code in a
1864
+ * sandboxed environment to solve complex problems in domains like data analysis,
1865
+ * coding, and math.
1866
+ *
1867
+ * @param container - The container to use for the code interpreter.
1868
+ *
1869
+ * Must have name `code_interpreter`.
1870
+ */
1871
1871
  codeInterpreter,
1872
+ /**
1873
+ * File search is a tool available in the Responses API. It enables models to
1874
+ * retrieve information in a knowledge base of previously uploaded files through
1875
+ * semantic and keyword search.
1876
+ *
1877
+ * Must have name `file_search`.
1878
+ *
1879
+ * @param vectorStoreIds - The vector store IDs to use for the file search.
1880
+ * @param maxNumResults - The maximum number of results to return.
1881
+ * @param ranking - The ranking options to use for the file search.
1882
+ * @param filters - The filters to use for the file search.
1883
+ */
1872
1884
  fileSearch,
1885
+ /**
1886
+ * Web search allows models to access up-to-date information from the internet
1887
+ * and provide answers with sourced citations.
1888
+ *
1889
+ * Must have name `web_search_preview`.
1890
+ *
1891
+ * @param searchContextSize - The search context size to use for the web search.
1892
+ * @param userLocation - The user location to use for the web search.
1893
+ *
1894
+ * @deprecated Use `webSearch` instead.
1895
+ */
1873
1896
  webSearchPreview,
1897
+ /**
1898
+ * Web search allows models to access up-to-date information from the internet
1899
+ * and provide answers with sourced citations.
1900
+ *
1901
+ * Must have name `web_search`.
1902
+ *
1903
+ * @param filters - The filters to use for the web search.
1904
+ * @param searchContextSize - The search context size to use for the web search.
1905
+ * @param userLocation - The user location to use for the web search.
1906
+ */
1874
1907
  webSearch
1875
1908
  };
1876
1909
 
@@ -2257,7 +2290,7 @@ var OpenAIResponsesLanguageModel = class {
2257
2290
  toolChoice,
2258
2291
  responseFormat
2259
2292
  }) {
2260
- var _a, _b;
2293
+ var _a, _b, _c;
2261
2294
  const warnings = [];
2262
2295
  const modelConfig = getResponsesModelConfig(this.modelId);
2263
2296
  if (topK != null) {
@@ -2293,8 +2326,13 @@ var OpenAIResponsesLanguageModel = class {
2293
2326
  schema: openaiResponsesProviderOptionsSchema
2294
2327
  });
2295
2328
  const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2329
+ let include = openaiOptions == null ? void 0 : openaiOptions.include;
2296
2330
  const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
2297
- const openaiOptionsInclude = topLogprobs ? Array.isArray(openaiOptions == null ? void 0 : openaiOptions.include) ? [...openaiOptions == null ? void 0 : openaiOptions.include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : openaiOptions == null ? void 0 : openaiOptions.include;
2331
+ include = topLogprobs ? Array.isArray(include) ? [...include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : include;
2332
+ const webSearchToolName = (_b = tools == null ? void 0 : tools.find(
2333
+ (tool) => tool.type === "provider-defined" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
2334
+ )) == null ? void 0 : _b.name;
2335
+ include = webSearchToolName ? Array.isArray(include) ? [...include, "web_search_call.action.sources"] : ["web_search_call.action.sources"] : include;
2298
2336
  const baseArgs = {
2299
2337
  model: this.modelId,
2300
2338
  input: messages,
@@ -2307,7 +2345,7 @@ var OpenAIResponsesLanguageModel = class {
2307
2345
  format: responseFormat.schema != null ? {
2308
2346
  type: "json_schema",
2309
2347
  strict: strictJsonSchema,
2310
- name: (_b = responseFormat.name) != null ? _b : "response",
2348
+ name: (_c = responseFormat.name) != null ? _c : "response",
2311
2349
  description: responseFormat.description,
2312
2350
  schema: responseFormat.schema
2313
2351
  } : { type: "json_object" }
@@ -2325,7 +2363,7 @@ var OpenAIResponsesLanguageModel = class {
2325
2363
  user: openaiOptions == null ? void 0 : openaiOptions.user,
2326
2364
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2327
2365
  service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2328
- include: openaiOptionsInclude,
2366
+ include,
2329
2367
  prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
2330
2368
  safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
2331
2369
  top_logprobs: topLogprobs,
@@ -2403,6 +2441,7 @@ var OpenAIResponsesLanguageModel = class {
2403
2441
  strictJsonSchema
2404
2442
  });
2405
2443
  return {
2444
+ webSearchToolName,
2406
2445
  args: {
2407
2446
  ...baseArgs,
2408
2447
  tools: openaiTools2,
@@ -2413,7 +2452,11 @@ var OpenAIResponsesLanguageModel = class {
2413
2452
  }
2414
2453
  async doGenerate(options) {
2415
2454
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
2416
- const { args: body, warnings } = await this.getArgs(options);
2455
+ const {
2456
+ args: body,
2457
+ warnings,
2458
+ webSearchToolName
2459
+ } = await this.getArgs(options);
2417
2460
  const url = this.config.url({
2418
2461
  path: "/responses",
2419
2462
  modelId: this.modelId
@@ -2464,12 +2507,18 @@ var OpenAIResponsesLanguageModel = class {
2464
2507
  start_index: import_v414.z.number().nullish(),
2465
2508
  end_index: import_v414.z.number().nullish(),
2466
2509
  quote: import_v414.z.string().nullish()
2510
+ }),
2511
+ import_v414.z.object({
2512
+ type: import_v414.z.literal("container_file_citation")
2467
2513
  })
2468
2514
  ])
2469
2515
  )
2470
2516
  })
2471
2517
  )
2472
2518
  }),
2519
+ import_v414.z.object({
2520
+ type: import_v414.z.literal("code_interpreter_call")
2521
+ }),
2473
2522
  import_v414.z.object({
2474
2523
  type: import_v414.z.literal("function_call"),
2475
2524
  call_id: import_v414.z.string(),
@@ -2610,14 +2659,14 @@ var OpenAIResponsesLanguageModel = class {
2610
2659
  content.push({
2611
2660
  type: "tool-call",
2612
2661
  toolCallId: part.id,
2613
- toolName: "web_search_preview",
2662
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
2614
2663
  input: JSON.stringify({ action: part.action }),
2615
2664
  providerExecuted: true
2616
2665
  });
2617
2666
  content.push({
2618
2667
  type: "tool-result",
2619
2668
  toolCallId: part.id,
2620
- toolName: "web_search_preview",
2669
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
2621
2670
  result: { status: part.status },
2622
2671
  providerExecuted: true
2623
2672
  });
@@ -2702,7 +2751,11 @@ var OpenAIResponsesLanguageModel = class {
2702
2751
  };
2703
2752
  }
2704
2753
  async doStream(options) {
2705
- const { args: body, warnings } = await this.getArgs(options);
2754
+ const {
2755
+ args: body,
2756
+ warnings,
2757
+ webSearchToolName
2758
+ } = await this.getArgs(options);
2706
2759
  const { responseHeaders, value: response } = await (0, import_provider_utils13.postJsonToApi)({
2707
2760
  url: this.config.url({
2708
2761
  path: "/responses",
@@ -2763,13 +2816,13 @@ var OpenAIResponsesLanguageModel = class {
2763
2816
  });
2764
2817
  } else if (value.item.type === "web_search_call") {
2765
2818
  ongoingToolCalls[value.output_index] = {
2766
- toolName: "web_search_preview",
2819
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
2767
2820
  toolCallId: value.item.id
2768
2821
  };
2769
2822
  controller.enqueue({
2770
2823
  type: "tool-input-start",
2771
2824
  id: value.item.id,
2772
- toolName: "web_search_preview"
2825
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search"
2773
2826
  });
2774
2827
  } else if (value.item.type === "computer_call") {
2775
2828
  ongoingToolCalls[value.output_index] = {