@ai-sdk/openai 3.0.45 → 3.0.47

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -958,6 +958,145 @@ Your execute function must return:
958
958
  - **status** _'completed' | 'failed'_ - Whether the patch was applied successfully
959
959
  - **output** _string_ (optional) - Human-readable log text (e.g., results or error messages)
960
960
 
961
+ #### Tool Search
962
+
963
+ Tool search allows the model to dynamically search for and load tools into context as needed,
964
+ rather than loading all tool definitions up front. This can reduce token usage, cost, and latency
965
+ when you have many tools. Mark the tools you want to make searchable with `deferLoading: true`
966
+ in their `providerOptions`.
967
+
968
+ There are two execution modes:
969
+
970
+ - **Server-executed (hosted):** OpenAI searches across the deferred tools declared in the request and returns the loaded subset in the same response. No extra round-trip is needed.
971
+ - **Client-executed:** The model emits a `tool_search_call`, your application performs the lookup, and you return the matching tools via the `execute` callback.
972
+
973
+ ##### Server-Executed (Hosted) Tool Search
974
+
975
+ Use hosted tool search when the candidate tools are already known at request time.
976
+ Add `openai.tools.toolSearch()` with no arguments and mark your tools with `deferLoading: true`:
977
+
978
+ ```ts
979
+ import { openai } from '@ai-sdk/openai';
980
+ import { generateText, tool, stepCountIs } from 'ai';
981
+ import { z } from 'zod';
982
+
983
+ const result = await generateText({
984
+ model: openai.responses('gpt-5.4'),
985
+ prompt: 'What is the weather in San Francisco?',
986
+ stopWhen: stepCountIs(10),
987
+ tools: {
988
+ toolSearch: openai.tools.toolSearch(),
989
+
990
+ get_weather: tool({
991
+ description: 'Get the current weather at a specific location',
992
+ inputSchema: z.object({
993
+ location: z.string(),
994
+ unit: z.enum(['celsius', 'fahrenheit']),
995
+ }),
996
+ execute: async ({ location, unit }) => ({
997
+ location,
998
+ temperature: unit === 'celsius' ? 18 : 64,
999
+ }),
1000
+ providerOptions: {
1001
+ openai: { deferLoading: true },
1002
+ },
1003
+ }),
1004
+
1005
+ search_files: tool({
1006
+ description: 'Search through files in the workspace',
1007
+ inputSchema: z.object({ query: z.string() }),
1008
+ execute: async ({ query }) => ({
1009
+ results: [`Found 3 files matching "${query}"`],
1010
+ }),
1011
+ providerOptions: {
1012
+ openai: { deferLoading: true },
1013
+ },
1014
+ }),
1015
+ },
1016
+ });
1017
+ ```
1018
+
1019
+ In hosted mode, the model internally searches the deferred tools, loads the relevant ones, and
1020
+ proceeds to call them — all within a single response. The `tool_search_call` and
1021
+ `tool_search_output` items appear in the response with `execution: 'server'` and `call_id: null`.
1022
+
1023
+ ##### Client-Executed Tool Search
1024
+
1025
+ Use client-executed tool search when tool discovery depends on runtime state — for example,
1026
+ tools that vary per tenant, project, or external system. Pass `execution: 'client'` along with
1027
+ a `description`, `parameters` schema, and an `execute` callback:
1028
+
1029
+ ```ts
1030
+ import { openai } from '@ai-sdk/openai';
1031
+ import { generateText, tool, stepCountIs } from 'ai';
1032
+ import { z } from 'zod';
1033
+
1034
+ const result = await generateText({
1035
+ model: openai.responses('gpt-5.4'),
1036
+ prompt: 'What is the weather in San Francisco?',
1037
+ stopWhen: stepCountIs(10),
1038
+ tools: {
1039
+ toolSearch: openai.tools.toolSearch({
1040
+ execution: 'client',
1041
+ description: 'Search for available tools based on what the user needs.',
1042
+ parameters: {
1043
+ type: 'object',
1044
+ properties: {
1045
+ goal: {
1046
+ type: 'string',
1047
+ description: 'What the user is trying to accomplish',
1048
+ },
1049
+ },
1050
+ required: ['goal'],
1051
+ additionalProperties: false,
1052
+ },
1053
+ execute: async ({ arguments: args }) => {
1054
+ // Your custom tool discovery logic here.
1055
+ // Return the tools that match the search goal.
1056
+ return {
1057
+ tools: [
1058
+ {
1059
+ type: 'function',
1060
+ name: 'get_weather',
1061
+ description: 'Get the current weather at a specific location',
1062
+ deferLoading: true,
1063
+ parameters: {
1064
+ type: 'object',
1065
+ properties: {
1066
+ location: { type: 'string' },
1067
+ },
1068
+ required: ['location'],
1069
+ additionalProperties: false,
1070
+ },
1071
+ },
1072
+ ],
1073
+ };
1074
+ },
1075
+ }),
1076
+
1077
+ get_weather: tool({
1078
+ description: 'Get the current weather at a specific location',
1079
+ inputSchema: z.object({ location: z.string() }),
1080
+ execute: async ({ location }) => ({
1081
+ location,
1082
+ temperature: 64,
1083
+ condition: 'Partly cloudy',
1084
+ }),
1085
+ providerOptions: {
1086
+ openai: { deferLoading: true },
1087
+ },
1088
+ }),
1089
+ },
1090
+ });
1091
+ ```
1092
+
1093
+ In client mode, the flow spans two steps:
1094
+
1095
+ 1. **Step 1:** The model emits a `tool_search_call` with `execution: 'client'` and a non-null `call_id`. The SDK calls your `execute` callback with the search arguments. Your callback returns the discovered tools.
1096
+ 2. **Step 2:** The SDK sends the `tool_search_output` (with the matching `call_id`) back to the model. The model can now call the loaded tools as normal function calls.
1097
+
1098
+ For more details, see the [OpenAI Tool Search documentation](https://platform.openai.com/docs/guides/tools-tool-search).
1099
+
961
1100
  #### Custom Tool
962
1101
 
963
1102
  The OpenAI Responses API supports
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/openai",
3
- "version": "3.0.45",
3
+ "version": "3.0.47",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -37,7 +37,7 @@
37
37
  },
38
38
  "dependencies": {
39
39
  "@ai-sdk/provider": "3.0.8",
40
- "@ai-sdk/provider-utils": "4.0.20"
40
+ "@ai-sdk/provider-utils": "4.0.21"
41
41
  },
42
42
  "devDependencies": {
43
43
  "@types/node": "20.17.24",
@@ -5,6 +5,7 @@ import { fileSearch } from './tool/file-search';
5
5
  import { imageGeneration } from './tool/image-generation';
6
6
  import { localShell } from './tool/local-shell';
7
7
  import { shell } from './tool/shell';
8
+ import { toolSearch } from './tool/tool-search';
8
9
  import { webSearch } from './tool/web-search';
9
10
  import { webSearchPreview } from './tool/web-search-preview';
10
11
  import { mcp } from './tool/mcp';
@@ -123,4 +124,15 @@ export const openaiTools = {
123
124
  * @param serverUrl - URL for the MCP server.
124
125
  */
125
126
  mcp,
127
+
128
+ /**
129
+ * Tool search allows the model to dynamically search for and load deferred
130
+ * tools into the model's context as needed. This helps reduce overall token
131
+ * usage, cost, and latency by only loading tools when the model needs them.
132
+ *
133
+ * To use tool search, mark functions or namespaces with `defer_loading: true`
134
+ * in the tools array. The model will use tool search to load these tools
135
+ * when it determines they are needed.
136
+ */
137
+ toolSearch,
126
138
  };
@@ -7,6 +7,7 @@ import {
7
7
  import {
8
8
  convertToBase64,
9
9
  isNonNullable,
10
+ parseJSON,
10
11
  parseProviderOptions,
11
12
  ToolNameMapping,
12
13
  validateTypes,
@@ -21,6 +22,10 @@ import {
21
22
  localShellOutputSchema,
22
23
  } from '../tool/local-shell';
23
24
  import { shellInputSchema, shellOutputSchema } from '../tool/shell';
25
+ import {
26
+ toolSearchInputSchema,
27
+ toolSearchOutputSchema,
28
+ } from '../tool/tool-search';
24
29
  import {
25
30
  OpenAIResponsesCustomToolCallOutput,
26
31
  OpenAIResponsesFunctionCallOutput,
@@ -206,6 +211,41 @@ export async function convertToOpenAIResponsesInput({
206
211
  break;
207
212
  }
208
213
 
214
+ const resolvedToolName = toolNameMapping.toProviderToolName(
215
+ part.toolName,
216
+ );
217
+
218
+ if (resolvedToolName === 'tool_search') {
219
+ if (store && id != null) {
220
+ input.push({ type: 'item_reference', id });
221
+ break;
222
+ }
223
+
224
+ const parsedInput =
225
+ typeof part.input === 'string'
226
+ ? await parseJSON({
227
+ text: part.input,
228
+ schema: toolSearchInputSchema,
229
+ })
230
+ : await validateTypes({
231
+ value: part.input,
232
+ schema: toolSearchInputSchema,
233
+ });
234
+
235
+ const execution =
236
+ parsedInput.call_id != null ? 'client' : 'server';
237
+
238
+ input.push({
239
+ type: 'tool_search_call',
240
+ id: id ?? part.toolCallId,
241
+ execution,
242
+ call_id: parsedInput.call_id ?? null,
243
+ status: 'completed',
244
+ arguments: parsedInput.arguments,
245
+ });
246
+ break;
247
+ }
248
+
209
249
  if (part.providerExecuted) {
210
250
  if (store && id != null) {
211
251
  input.push({ type: 'item_reference', id });
@@ -218,10 +258,6 @@ export async function convertToOpenAIResponsesInput({
218
258
  break;
219
259
  }
220
260
 
221
- const resolvedToolName = toolNameMapping.toProviderToolName(
222
- part.toolName,
223
- );
224
-
225
261
  if (hasLocalShellTool && resolvedToolName === 'local_shell') {
226
262
  const parsedInput = await validateTypes({
227
263
  value: part.input,
@@ -328,6 +364,35 @@ export async function convertToOpenAIResponsesInput({
328
364
  part.toolName,
329
365
  );
330
366
 
367
+ if (resolvedResultToolName === 'tool_search') {
368
+ const itemId =
369
+ (
370
+ part.providerOptions?.[providerOptionsName] as
371
+ | { itemId?: string }
372
+ | undefined
373
+ )?.itemId ?? part.toolCallId;
374
+
375
+ if (store) {
376
+ input.push({ type: 'item_reference', id: itemId });
377
+ } else if (part.output.type === 'json') {
378
+ const parsedOutput = await validateTypes({
379
+ value: part.output.value,
380
+ schema: toolSearchOutputSchema,
381
+ });
382
+
383
+ input.push({
384
+ type: 'tool_search_output',
385
+ id: itemId,
386
+ execution: 'server',
387
+ call_id: null,
388
+ status: 'completed',
389
+ tools: parsedOutput.tools,
390
+ });
391
+ }
392
+
393
+ break;
394
+ }
395
+
331
396
  /*
332
397
  * Shell tool results are separate output items (shell_call_output)
333
398
  * with their own item IDs distinct from the shell_call's item ID.
@@ -527,6 +592,22 @@ export async function convertToOpenAIResponsesInput({
527
592
  part.toolName,
528
593
  );
529
594
 
595
+ if (resolvedToolName === 'tool_search' && output.type === 'json') {
596
+ const parsedOutput = await validateTypes({
597
+ value: output.value,
598
+ schema: toolSearchOutputSchema,
599
+ });
600
+
601
+ input.push({
602
+ type: 'tool_search_output',
603
+ execution: 'client',
604
+ call_id: part.toolCallId,
605
+ status: 'completed',
606
+ tools: parsedOutput.tools,
607
+ });
608
+ continue;
609
+ }
610
+
530
611
  if (
531
612
  hasLocalShellTool &&
532
613
  resolvedToolName === 'local_shell' &&
@@ -1,7 +1,18 @@
1
- import { JSONSchema7 } from '@ai-sdk/provider';
1
+ import { JSONObject, JSONSchema7, JSONValue } from '@ai-sdk/provider';
2
2
  import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
3
3
  import { z } from 'zod/v4';
4
4
 
5
+ const jsonValueSchema: z.ZodType<JSONValue> = z.lazy(() =>
6
+ z.union([
7
+ z.string(),
8
+ z.number(),
9
+ z.boolean(),
10
+ z.null(),
11
+ z.array(jsonValueSchema),
12
+ z.record(z.string(), jsonValueSchema.optional()),
13
+ ]),
14
+ );
15
+
5
16
  export type OpenAIResponsesInput = Array<OpenAIResponsesInputItem>;
6
17
 
7
18
  export type OpenAIResponsesInputItem =
@@ -20,6 +31,8 @@ export type OpenAIResponsesInputItem =
20
31
  | OpenAIResponsesShellCallOutput
21
32
  | OpenAIResponsesApplyPatchCall
22
33
  | OpenAIResponsesApplyPatchCallOutput
34
+ | OpenAIResponsesToolSearchCall
35
+ | OpenAIResponsesToolSearchOutput
23
36
  | OpenAIResponsesReasoning
24
37
  | OpenAIResponsesItemReference;
25
38
 
@@ -199,6 +212,24 @@ export type OpenAIResponsesApplyPatchCallOutput = {
199
212
  output?: string;
200
213
  };
201
214
 
215
+ export type OpenAIResponsesToolSearchCall = {
216
+ type: 'tool_search_call';
217
+ id: string;
218
+ execution: 'server' | 'client';
219
+ call_id: string | null;
220
+ status: 'in_progress' | 'completed' | 'incomplete';
221
+ arguments: unknown;
222
+ };
223
+
224
+ export type OpenAIResponsesToolSearchOutput = {
225
+ type: 'tool_search_output';
226
+ id?: string;
227
+ execution: 'server' | 'client';
228
+ call_id: string | null;
229
+ status: 'in_progress' | 'completed' | 'incomplete';
230
+ tools: Array<JSONObject>;
231
+ };
232
+
202
233
  export type OpenAIResponsesItemReference = {
203
234
  type: 'item_reference';
204
235
  id: string;
@@ -249,6 +280,7 @@ export type OpenAIResponsesTool =
249
280
  description: string | undefined;
250
281
  parameters: JSONSchema7;
251
282
  strict?: boolean;
283
+ defer_loading?: boolean;
252
284
  }
253
285
  | {
254
286
  type: 'apply_patch';
@@ -407,6 +439,12 @@ export type OpenAIResponsesTool =
407
439
  path: string;
408
440
  }>;
409
441
  };
442
+ }
443
+ | {
444
+ type: 'tool_search';
445
+ execution?: 'server' | 'client';
446
+ description?: string;
447
+ parameters?: Record<string, unknown>;
410
448
  };
411
449
 
412
450
  export type OpenAIResponsesReasoning = {
@@ -592,6 +630,22 @@ export const openaiResponsesChunkSchema = lazySchema(() =>
592
630
  }),
593
631
  ),
594
632
  }),
633
+ z.object({
634
+ type: z.literal('tool_search_call'),
635
+ id: z.string(),
636
+ execution: z.enum(['server', 'client']),
637
+ call_id: z.string().nullable(),
638
+ status: z.enum(['in_progress', 'completed', 'incomplete']),
639
+ arguments: z.unknown(),
640
+ }),
641
+ z.object({
642
+ type: z.literal('tool_search_output'),
643
+ id: z.string(),
644
+ execution: z.enum(['server', 'client']),
645
+ call_id: z.string().nullable(),
646
+ status: z.enum(['in_progress', 'completed', 'incomplete']),
647
+ tools: z.array(z.record(z.string(), jsonValueSchema.optional())),
648
+ }),
595
649
  ]),
596
650
  }),
597
651
  z.object({
@@ -815,6 +869,22 @@ export const openaiResponsesChunkSchema = lazySchema(() =>
815
869
  }),
816
870
  ),
817
871
  }),
872
+ z.object({
873
+ type: z.literal('tool_search_call'),
874
+ id: z.string(),
875
+ execution: z.enum(['server', 'client']),
876
+ call_id: z.string().nullable(),
877
+ status: z.enum(['in_progress', 'completed', 'incomplete']),
878
+ arguments: z.unknown(),
879
+ }),
880
+ z.object({
881
+ type: z.literal('tool_search_output'),
882
+ id: z.string(),
883
+ execution: z.enum(['server', 'client']),
884
+ call_id: z.string().nullable(),
885
+ status: z.enum(['in_progress', 'completed', 'incomplete']),
886
+ tools: z.array(z.record(z.string(), jsonValueSchema.optional())),
887
+ }),
818
888
  ]),
819
889
  }),
820
890
  z.object({
@@ -1238,6 +1308,22 @@ export const openaiResponsesResponseSchema = lazySchema(() =>
1238
1308
  }),
1239
1309
  ),
1240
1310
  }),
1311
+ z.object({
1312
+ type: z.literal('tool_search_call'),
1313
+ id: z.string(),
1314
+ execution: z.enum(['server', 'client']),
1315
+ call_id: z.string().nullable(),
1316
+ status: z.enum(['in_progress', 'completed', 'incomplete']),
1317
+ arguments: z.unknown(),
1318
+ }),
1319
+ z.object({
1320
+ type: z.literal('tool_search_output'),
1321
+ id: z.string(),
1322
+ execution: z.enum(['server', 'client']),
1323
+ call_id: z.string().nullable(),
1324
+ status: z.enum(['in_progress', 'completed', 'incomplete']),
1325
+ tools: z.array(z.record(z.string(), jsonValueSchema.optional())),
1326
+ }),
1241
1327
  ]),
1242
1328
  )
1243
1329
  .optional(),
@@ -38,6 +38,10 @@ import { imageGenerationOutputSchema } from '../tool/image-generation';
38
38
  import { localShellInputSchema } from '../tool/local-shell';
39
39
  import { mcpOutputSchema } from '../tool/mcp';
40
40
  import { shellInputSchema, shellOutputSchema } from '../tool/shell';
41
+ import {
42
+ toolSearchInputSchema,
43
+ toolSearchOutputSchema,
44
+ } from '../tool/tool-search';
41
45
  import { webSearchOutputSchema } from '../tool/web-search';
42
46
  import {
43
47
  convertOpenAIResponsesUsage,
@@ -193,6 +197,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
193
197
  'openai.web_search_preview': 'web_search_preview',
194
198
  'openai.mcp': 'mcp',
195
199
  'openai.apply_patch': 'apply_patch',
200
+ 'openai.tool_search': 'tool_search',
196
201
  },
197
202
  resolveProviderToolName: tool =>
198
203
  tool.id === 'openai.custom'
@@ -505,6 +510,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
505
510
 
506
511
  // flag that checks if there have been client-side tool calls (not executed by openai)
507
512
  let hasFunctionCall = false;
513
+ const hostedToolSearchCallIds: string[] = [];
508
514
 
509
515
  // map response content to content array (defined when there is no error)
510
516
  for (const part of response.output!) {
@@ -551,6 +557,54 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
551
557
  break;
552
558
  }
553
559
 
560
+ case 'tool_search_call': {
561
+ const toolCallId = part.call_id ?? part.id;
562
+ const isHosted = part.execution === 'server';
563
+
564
+ if (isHosted) {
565
+ hostedToolSearchCallIds.push(toolCallId);
566
+ }
567
+
568
+ content.push({
569
+ type: 'tool-call',
570
+ toolCallId,
571
+ toolName: toolNameMapping.toCustomToolName('tool_search'),
572
+ input: JSON.stringify({
573
+ arguments: part.arguments,
574
+ call_id: part.call_id,
575
+ } satisfies InferSchema<typeof toolSearchInputSchema>),
576
+ ...(isHosted ? { providerExecuted: true } : {}),
577
+ providerMetadata: {
578
+ [providerOptionsName]: {
579
+ itemId: part.id,
580
+ },
581
+ },
582
+ });
583
+
584
+ break;
585
+ }
586
+
587
+ case 'tool_search_output': {
588
+ const toolCallId =
589
+ part.call_id ?? hostedToolSearchCallIds.shift() ?? part.id;
590
+
591
+ content.push({
592
+ type: 'tool-result',
593
+ toolCallId,
594
+ toolName: toolNameMapping.toCustomToolName('tool_search'),
595
+ result: {
596
+ tools: part.tools,
597
+ } satisfies InferSchema<typeof toolSearchOutputSchema>,
598
+ providerMetadata: {
599
+ [providerOptionsName]: {
600
+ itemId: part.id,
601
+ },
602
+ },
603
+ });
604
+
605
+ break;
606
+ }
607
+
554
608
  case 'local_shell_call': {
555
609
  content.push({
556
610
  type: 'tool-call',
@@ -1026,6 +1080,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1026
1080
  hasDiff: boolean;
1027
1081
  endEmitted: boolean;
1028
1082
  };
1083
+ toolSearchExecution?: 'server' | 'client';
1029
1084
  }
1030
1085
  | undefined
1031
1086
  > = {};
@@ -1054,6 +1109,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1054
1109
  > = {};
1055
1110
 
1056
1111
  let serviceTier: string | undefined;
1112
+ const hostedToolSearchCallIds: string[] = [];
1057
1113
 
1058
1114
  return {
1059
1115
  stream: response.pipeThrough(
@@ -1188,6 +1244,28 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1188
1244
  input: '{}',
1189
1245
  providerExecuted: true,
1190
1246
  });
1247
+ } else if (value.item.type === 'tool_search_call') {
1248
+ const toolCallId = value.item.id;
1249
+ const toolName =
1250
+ toolNameMapping.toCustomToolName('tool_search');
1251
+ const isHosted = value.item.execution === 'server';
1252
+
1253
+ ongoingToolCalls[value.output_index] = {
1254
+ toolName,
1255
+ toolCallId,
1256
+ toolSearchExecution: value.item.execution ?? 'server',
1257
+ };
1258
+
1259
+ if (isHosted) {
1260
+ controller.enqueue({
1261
+ type: 'tool-input-start',
1262
+ id: toolCallId,
1263
+ toolName,
1264
+ providerExecuted: true,
1265
+ });
1266
+ }
1267
+ } else if (value.item.type === 'tool_search_output') {
1268
+ // handled on output_item.done so we can pair it with the call
1191
1269
  } else if (
1192
1270
  value.item.type === 'mcp_call' ||
1193
1271
  value.item.type === 'mcp_list_tools' ||
@@ -1418,6 +1496,67 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1418
1496
  result: value.item.result,
1419
1497
  } satisfies InferSchema<typeof imageGenerationOutputSchema>,
1420
1498
  });
1499
+ } else if (value.item.type === 'tool_search_call') {
1500
+ const toolCall = ongoingToolCalls[value.output_index];
1501
+ const isHosted = value.item.execution === 'server';
1502
+
1503
+ if (toolCall != null) {
1504
+ const toolCallId = isHosted
1505
+ ? toolCall.toolCallId
1506
+ : (value.item.call_id ?? value.item.id);
1507
+
1508
+ if (isHosted) {
1509
+ hostedToolSearchCallIds.push(toolCallId);
1510
+ } else {
1511
+ controller.enqueue({
1512
+ type: 'tool-input-start',
1513
+ id: toolCallId,
1514
+ toolName: toolCall.toolName,
1515
+ });
1516
+ }
1517
+
1518
+ controller.enqueue({
1519
+ type: 'tool-input-end',
1520
+ id: toolCallId,
1521
+ });
1522
+
1523
+ controller.enqueue({
1524
+ type: 'tool-call',
1525
+ toolCallId,
1526
+ toolName: toolCall.toolName,
1527
+ input: JSON.stringify({
1528
+ arguments: value.item.arguments,
1529
+ call_id: isHosted ? null : toolCallId,
1530
+ } satisfies InferSchema<typeof toolSearchInputSchema>),
1531
+ ...(isHosted ? { providerExecuted: true } : {}),
1532
+ providerMetadata: {
1533
+ [providerOptionsName]: {
1534
+ itemId: value.item.id,
1535
+ },
1536
+ },
1537
+ });
1538
+ }
1539
+
1540
+ ongoingToolCalls[value.output_index] = undefined;
1541
+ } else if (value.item.type === 'tool_search_output') {
1542
+ const toolCallId =
1543
+ value.item.call_id ??
1544
+ hostedToolSearchCallIds.shift() ??
1545
+ value.item.id;
1546
+
1547
+ controller.enqueue({
1548
+ type: 'tool-result',
1549
+ toolCallId,
1550
+ toolName: toolNameMapping.toCustomToolName('tool_search'),
1551
+ result: {
1552
+ tools: value.item.tools,
1553
+ } satisfies InferSchema<typeof toolSearchOutputSchema>,
1554
+ providerMetadata: {
1555
+ [providerOptionsName]: {
1556
+ itemId: value.item.id,
1557
+ },
1558
+ },
1559
+ });
1421
1560
  } else if (value.item.type === 'mcp_call') {
1422
1561
  ongoingToolCalls[value.output_index] = undefined;
1423
1562