@ai-sdk/openai 3.0.35 → 3.0.37

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -958,6 +958,84 @@ Your execute function must return:
958
958
  - **status** _'completed' | 'failed'_ - Whether the patch was applied successfully
959
959
  - **output** _string_ (optional) - Human-readable log text (e.g., results or error messages)
960
960
 
961
+ #### Custom Tool
962
+
963
+ The OpenAI Responses API supports
964
+ [custom tools](https://developers.openai.com/api/docs/guides/function-calling/#custom-tools)
965
+ through the `openai.tools.customTool` tool.
966
+ Custom tools return a raw string instead of JSON, optionally constrained to a grammar
967
+ (regex or Lark syntax). This makes them useful for generating structured text like
968
+ SQL queries, code snippets, or any output that must match a specific pattern.
969
+
970
+ ```ts
971
+ import { openai } from '@ai-sdk/openai';
972
+ import { generateText, stepCountIs } from 'ai';
973
+
974
+ const result = await generateText({
975
+ model: openai.responses('gpt-5.2-codex'),
976
+ tools: {
977
+ write_sql: openai.tools.customTool({
978
+ name: 'write_sql',
979
+ description: 'Write a SQL SELECT query to answer the user question.',
980
+ format: {
981
+ type: 'grammar',
982
+ syntax: 'regex',
983
+ definition: 'SELECT .+',
984
+ },
985
+ execute: async input => {
986
+ // input is a raw string matching the grammar, e.g. "SELECT * FROM users WHERE age > 25"
987
+ const rows = await db.query(input);
988
+ return JSON.stringify(rows);
989
+ },
990
+ }),
991
+ },
992
+ toolChoice: 'required',
993
+ prompt: 'Write a SQL query to get all users older than 25.',
994
+ stopWhen: stepCountIs(3),
995
+ });
996
+ ```
997
+
998
+ Custom tools also work with `streamText`:
999
+
1000
+ ```ts
1001
+ import { openai } from '@ai-sdk/openai';
1002
+ import { streamText } from 'ai';
1003
+
1004
+ const result = streamText({
1005
+ model: openai.responses('gpt-5.2-codex'),
1006
+ tools: {
1007
+ write_sql: openai.tools.customTool({
1008
+ name: 'write_sql',
1009
+ description: 'Write a SQL SELECT query to answer the user question.',
1010
+ format: {
1011
+ type: 'grammar',
1012
+ syntax: 'regex',
1013
+ definition: 'SELECT .+',
1014
+ },
1015
+ }),
1016
+ },
1017
+ toolChoice: 'required',
1018
+ prompt: 'Write a SQL query to get all users older than 25.',
1019
+ });
1020
+
1021
+ for await (const chunk of result.fullStream) {
1022
+ if (chunk.type === 'tool-call') {
1023
+ console.log(`Tool: ${chunk.toolName}`);
1024
+ console.log(`Input: ${chunk.input}`);
1025
+ }
1026
+ }
1027
+ ```
1028
+
1029
+ The custom tool can be configured with:
1030
+
1031
+ - **name** _string_ (required) - The name of the custom tool. Used to identify the tool in tool calls.
1032
+ - **description** _string_ (optional) - A description of what the tool does, to help the model understand when to use it.
1033
+ - **format** _object_ (optional) - The output format constraint. Omit for unconstrained text output.
1034
+ - **type** _'grammar' | 'text'_ - The format type. Use `'grammar'` for constrained output or `'text'` for explicit unconstrained text.
1035
+ - **syntax** _'regex' | 'lark'_ - (grammar only) The grammar syntax. Use `'regex'` for regular expression patterns or `'lark'` for [Lark parser grammar](https://lark-parser.readthedocs.io/).
1036
+ - **definition** _string_ - (grammar only) The grammar definition string (a regex pattern or Lark grammar).
1037
+ - **execute** _function_ (optional) - An async function that receives the raw string input and returns a string result. Enables multi-turn tool calling.
1038
+
961
1039
  #### Image Inputs
962
1040
 
963
1041
  The OpenAI Responses API supports Image inputs for appropriate models.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/openai",
3
- "version": "3.0.35",
3
+ "version": "3.0.37",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -37,7 +37,7 @@
37
37
  },
38
38
  "dependencies": {
39
39
  "@ai-sdk/provider": "3.0.8",
40
- "@ai-sdk/provider-utils": "4.0.15"
40
+ "@ai-sdk/provider-utils": "4.0.16"
41
41
  },
42
42
  "devDependencies": {
43
43
  "@types/node": "20.17.24",
@@ -551,7 +551,10 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
551
551
 
552
552
  // Tool call start. OpenAI returns all information except the arguments in the first chunk.
553
553
  if (toolCalls[index] == null) {
554
- if (toolCallDelta.type !== 'function') {
554
+ if (
555
+ toolCallDelta.type != null &&
556
+ toolCallDelta.type !== 'function'
557
+ ) {
555
558
  throw new InvalidResponseDataError({
556
559
  data: toolCallDelta,
557
560
  message: `Expected 'function' type.`,
@@ -1,5 +1,6 @@
1
1
  import { applyPatch } from './tool/apply-patch';
2
2
  import { codeInterpreter } from './tool/code-interpreter';
3
+ import { customTool } from './tool/custom';
3
4
  import { fileSearch } from './tool/file-search';
4
5
  import { imageGeneration } from './tool/image-generation';
5
6
  import { localShell } from './tool/local-shell';
@@ -18,6 +19,17 @@ export const openaiTools = {
18
19
  */
19
20
  applyPatch,
20
21
 
22
+ /**
23
+ * Custom tools let callers constrain model output to a grammar (regex or
24
+ * Lark syntax). The model returns a `custom_tool_call` output item whose
25
+ * `input` field is a string matching the specified grammar.
26
+ *
27
+ * @param name - The name of the custom tool.
28
+ * @param description - An optional description of the tool.
29
+ * @param format - The output format constraint (grammar type, syntax, and definition).
30
+ */
31
+ customTool,
32
+
21
33
  /**
22
34
  * The Code Interpreter tool allows models to write and run Python code in a
23
35
  * sandboxed environment to solve complex problems in domains like data analysis,
@@ -22,6 +22,7 @@ import {
22
22
  } from '../tool/local-shell';
23
23
  import { shellInputSchema, shellOutputSchema } from '../tool/shell';
24
24
  import {
25
+ OpenAIResponsesCustomToolCallOutput,
25
26
  OpenAIResponsesFunctionCallOutput,
26
27
  OpenAIResponsesInput,
27
28
  OpenAIResponsesReasoning,
@@ -47,6 +48,7 @@ export async function convertToOpenAIResponsesInput({
47
48
  hasLocalShellTool = false,
48
49
  hasShellTool = false,
49
50
  hasApplyPatchTool = false,
51
+ customProviderToolNames,
50
52
  }: {
51
53
  prompt: LanguageModelV3Prompt;
52
54
  toolNameMapping: ToolNameMapping;
@@ -58,6 +60,7 @@ export async function convertToOpenAIResponsesInput({
58
60
  hasLocalShellTool?: boolean;
59
61
  hasShellTool?: boolean;
60
62
  hasApplyPatchTool?: boolean;
63
+ customProviderToolNames?: Set<string>;
61
64
  }): Promise<{
62
65
  input: OpenAIResponsesInput;
63
66
  warnings: Array<SharedV3Warning>;
@@ -277,6 +280,20 @@ export async function convertToOpenAIResponsesInput({
277
280
  break;
278
281
  }
279
282
 
283
+ if (customProviderToolNames?.has(resolvedToolName)) {
284
+ input.push({
285
+ type: 'custom_tool_call',
286
+ call_id: part.toolCallId,
287
+ name: resolvedToolName,
288
+ input:
289
+ typeof part.input === 'string'
290
+ ? part.input
291
+ : JSON.stringify(part.input),
292
+ id,
293
+ });
294
+ break;
295
+ }
296
+
280
297
  input.push({
281
298
  type: 'function_call',
282
299
  call_id: part.toolCallId,
@@ -575,6 +592,63 @@ export async function convertToOpenAIResponsesInput({
575
592
  continue;
576
593
  }
577
594
 
595
+ if (customProviderToolNames?.has(resolvedToolName)) {
596
+ let outputValue: OpenAIResponsesCustomToolCallOutput['output'];
597
+ switch (output.type) {
598
+ case 'text':
599
+ case 'error-text':
600
+ outputValue = output.value;
601
+ break;
602
+ case 'execution-denied':
603
+ outputValue = output.reason ?? 'Tool execution denied.';
604
+ break;
605
+ case 'json':
606
+ case 'error-json':
607
+ outputValue = JSON.stringify(output.value);
608
+ break;
609
+ case 'content':
610
+ outputValue = output.value
611
+ .map(item => {
612
+ switch (item.type) {
613
+ case 'text':
614
+ return { type: 'input_text' as const, text: item.text };
615
+ case 'image-data':
616
+ return {
617
+ type: 'input_image' as const,
618
+ image_url: `data:${item.mediaType};base64,${item.data}`,
619
+ };
620
+ case 'image-url':
621
+ return {
622
+ type: 'input_image' as const,
623
+ image_url: item.url,
624
+ };
625
+ case 'file-data':
626
+ return {
627
+ type: 'input_file' as const,
628
+ filename: item.filename ?? 'data',
629
+ file_data: `data:${item.mediaType};base64,${item.data}`,
630
+ };
631
+ default:
632
+ warnings.push({
633
+ type: 'other',
634
+ message: `unsupported custom tool content part type: ${item.type}`,
635
+ });
636
+ return undefined;
637
+ }
638
+ })
639
+ .filter(isNonNullable);
640
+ break;
641
+ default:
642
+ outputValue = '';
643
+ }
644
+ input.push({
645
+ type: 'custom_tool_call_output',
646
+ call_id: part.toolCallId,
647
+ output: outputValue,
648
+ } satisfies OpenAIResponsesCustomToolCallOutput);
649
+ continue;
650
+ }
651
+
578
652
  let contentValue: OpenAIResponsesFunctionCallOutput['output'];
579
653
  switch (output.type) {
580
654
  case 'text':
@@ -10,6 +10,8 @@ export type OpenAIResponsesInputItem =
10
10
  | OpenAIResponsesAssistantMessage
11
11
  | OpenAIResponsesFunctionCall
12
12
  | OpenAIResponsesFunctionCallOutput
13
+ | OpenAIResponsesCustomToolCall
14
+ | OpenAIResponsesCustomToolCallOutput
13
15
  | OpenAIResponsesMcpApprovalResponse
14
16
  | OpenAIResponsesComputerCall
15
17
  | OpenAIResponsesLocalShellCall
@@ -94,6 +96,20 @@ export type OpenAIResponsesFunctionCallOutput = {
94
96
  >;
95
97
  };
96
98
 
99
+ export type OpenAIResponsesCustomToolCall = {
100
+ type: 'custom_tool_call';
101
+ id?: string;
102
+ call_id: string;
103
+ name: string;
104
+ input: string;
105
+ };
106
+
107
+ export type OpenAIResponsesCustomToolCallOutput = {
108
+ type: 'custom_tool_call_output';
109
+ call_id: string;
110
+ output: OpenAIResponsesFunctionCallOutput['output'];
111
+ };
112
+
97
113
  export type OpenAIResponsesMcpApprovalResponse = {
98
114
  type: 'mcp_approval_response';
99
115
  approval_request_id: string;
@@ -326,6 +342,20 @@ export type OpenAIResponsesTool =
326
342
  server_description: string | undefined;
327
343
  server_url: string | undefined;
328
344
  }
345
+ | {
346
+ type: 'custom';
347
+ name: string;
348
+ description?: string;
349
+ format?:
350
+ | {
351
+ type: 'grammar';
352
+ syntax: 'regex' | 'lark';
353
+ definition: string;
354
+ }
355
+ | {
356
+ type: 'text';
357
+ };
358
+ }
329
359
  | {
330
360
  type: 'local_shell';
331
361
  }
@@ -527,6 +557,13 @@ export const openaiResponsesChunkSchema = lazySchema(() =>
527
557
  }),
528
558
  ]),
529
559
  }),
560
+ z.object({
561
+ type: z.literal('custom_tool_call'),
562
+ id: z.string(),
563
+ call_id: z.string(),
564
+ name: z.string(),
565
+ input: z.string(),
566
+ }),
530
567
  z.object({
531
568
  type: z.literal('shell_call'),
532
569
  id: z.string(),
@@ -579,6 +616,14 @@ export const openaiResponsesChunkSchema = lazySchema(() =>
579
616
  arguments: z.string(),
580
617
  status: z.literal('completed'),
581
618
  }),
619
+ z.object({
620
+ type: z.literal('custom_tool_call'),
621
+ id: z.string(),
622
+ call_id: z.string(),
623
+ name: z.string(),
624
+ input: z.string(),
625
+ status: z.literal('completed'),
626
+ }),
582
627
  z.object({
583
628
  type: z.literal('code_interpreter_call'),
584
629
  id: z.string(),
@@ -778,6 +823,12 @@ export const openaiResponsesChunkSchema = lazySchema(() =>
778
823
  output_index: z.number(),
779
824
  delta: z.string(),
780
825
  }),
826
+ z.object({
827
+ type: z.literal('response.custom_tool_call_input.delta'),
828
+ item_id: z.string(),
829
+ output_index: z.number(),
830
+ delta: z.string(),
831
+ }),
781
832
  z.object({
782
833
  type: z.literal('response.image_generation_call.partial_image'),
783
834
  item_id: z.string(),
@@ -1059,6 +1110,13 @@ export const openaiResponsesResponseSchema = lazySchema(() =>
1059
1110
  arguments: z.string(),
1060
1111
  id: z.string(),
1061
1112
  }),
1113
+ z.object({
1114
+ type: z.literal('custom_tool_call'),
1115
+ call_id: z.string(),
1116
+ name: z.string(),
1117
+ input: z.string(),
1118
+ id: z.string(),
1119
+ }),
1062
1120
  z.object({
1063
1121
  type: z.literal('computer_call'),
1064
1122
  id: z.string(),
@@ -194,6 +194,22 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
194
194
  'openai.mcp': 'mcp',
195
195
  'openai.apply_patch': 'apply_patch',
196
196
  },
197
+ resolveProviderToolName: tool =>
198
+ tool.id === 'openai.custom'
199
+ ? (tool.args as { name?: string }).name
200
+ : undefined,
201
+ });
202
+
203
+ const customProviderToolNames = new Set<string>();
204
+ const {
205
+ tools: openaiTools,
206
+ toolChoice: openaiToolChoice,
207
+ toolWarnings,
208
+ } = await prepareResponsesTools({
209
+ tools,
210
+ toolChoice,
211
+ toolNameMapping,
212
+ customProviderToolNames,
197
213
  });
198
214
 
199
215
  const { input, warnings: inputWarnings } =
@@ -212,6 +228,10 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
212
228
  hasLocalShellTool: hasOpenAITool('openai.local_shell'),
213
229
  hasShellTool: hasOpenAITool('openai.shell'),
214
230
  hasApplyPatchTool: hasOpenAITool('openai.apply_patch'),
231
+ customProviderToolNames:
232
+ customProviderToolNames.size > 0
233
+ ? customProviderToolNames
234
+ : undefined,
215
235
  });
216
236
 
217
237
  warnings.push(...inputWarnings);
@@ -408,15 +428,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
408
428
  delete (baseArgs as any).service_tier;
409
429
  }
410
430
 
411
- const {
412
- tools: openaiTools,
413
- toolChoice: openaiToolChoice,
414
- toolWarnings,
415
- } = await prepareResponsesTools({
416
- tools,
417
- toolChoice,
418
- });
419
-
420
431
  const shellToolEnvType = (
421
432
  tools?.find(
422
433
  tool => tool.type === 'provider' && tool.id === 'openai.shell',
@@ -716,6 +727,24 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
716
727
  break;
717
728
  }
718
729
 
730
+ case 'custom_tool_call': {
731
+ hasFunctionCall = true;
732
+ const toolName = toolNameMapping.toCustomToolName(part.name);
733
+
734
+ content.push({
735
+ type: 'tool-call',
736
+ toolCallId: part.call_id,
737
+ toolName,
738
+ input: JSON.stringify(part.input),
739
+ providerMetadata: {
740
+ [providerOptionsName]: {
741
+ itemId: part.id,
742
+ },
743
+ },
744
+ });
745
+ break;
746
+ }
747
+
719
748
  case 'web_search_call': {
720
749
  content.push({
721
750
  type: 'tool-call',
@@ -1062,6 +1091,20 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1062
1091
  id: value.item.call_id,
1063
1092
  toolName: value.item.name,
1064
1093
  });
1094
+ } else if (value.item.type === 'custom_tool_call') {
1095
+ const toolName = toolNameMapping.toCustomToolName(
1096
+ value.item.name,
1097
+ );
1098
+ ongoingToolCalls[value.output_index] = {
1099
+ toolName,
1100
+ toolCallId: value.item.call_id,
1101
+ };
1102
+
1103
+ controller.enqueue({
1104
+ type: 'tool-input-start',
1105
+ id: value.item.call_id,
1106
+ toolName,
1107
+ });
1065
1108
  } else if (value.item.type === 'web_search_call') {
1066
1109
  ongoingToolCalls[value.output_index] = {
1067
1110
  toolName: toolNameMapping.toCustomToolName(
@@ -1275,6 +1318,29 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1275
1318
  },
1276
1319
  },
1277
1320
  });
1321
+ } else if (value.item.type === 'custom_tool_call') {
1322
+ ongoingToolCalls[value.output_index] = undefined;
1323
+ hasFunctionCall = true;
1324
+ const toolName = toolNameMapping.toCustomToolName(
1325
+ value.item.name,
1326
+ );
1327
+
1328
+ controller.enqueue({
1329
+ type: 'tool-input-end',
1330
+ id: value.item.call_id,
1331
+ });
1332
+
1333
+ controller.enqueue({
1334
+ type: 'tool-call',
1335
+ toolCallId: value.item.call_id,
1336
+ toolName,
1337
+ input: JSON.stringify(value.item.input),
1338
+ providerMetadata: {
1339
+ [providerOptionsName]: {
1340
+ itemId: value.item.id,
1341
+ },
1342
+ },
1343
+ });
1278
1344
  } else if (value.item.type === 'web_search_call') {
1279
1345
  ongoingToolCalls[value.output_index] = undefined;
1280
1346
 
@@ -1585,6 +1651,16 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1585
1651
  } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
1586
1652
  const toolCall = ongoingToolCalls[value.output_index];
1587
1653
 
1654
+ if (toolCall != null) {
1655
+ controller.enqueue({
1656
+ type: 'tool-input-delta',
1657
+ id: toolCall.toolCallId,
1658
+ delta: value.delta,
1659
+ });
1660
+ }
1661
+ } else if (isResponseCustomToolCallInputDeltaChunk(value)) {
1662
+ const toolCall = ongoingToolCalls[value.output_index];
1663
+
1588
1664
  if (toolCall != null) {
1589
1665
  controller.enqueue({
1590
1666
  type: 'tool-input-delta',
@@ -1923,6 +1999,15 @@ function isResponseFunctionCallArgumentsDeltaChunk(
1923
1999
  } {
1924
2000
  return chunk.type === 'response.function_call_arguments.delta';
1925
2001
  }
2002
+
2003
+ function isResponseCustomToolCallInputDeltaChunk(
2004
+ chunk: OpenAIResponsesChunk,
2005
+ ): chunk is OpenAIResponsesChunk & {
2006
+ type: 'response.custom_tool_call_input.delta';
2007
+ } {
2008
+ return chunk.type === 'response.custom_tool_call_input.delta';
2009
+ }
2010
+
1926
2011
  function isResponseImageGenerationCallPartialImageChunk(
1927
2012
  chunk: OpenAIResponsesChunk,
1928
2013
  ): chunk is OpenAIResponsesChunk & {
@@ -3,10 +3,11 @@ import {
3
3
  SharedV3Warning,
4
4
  UnsupportedFunctionalityError,
5
5
  } from '@ai-sdk/provider';
6
- import { validateTypes } from '@ai-sdk/provider-utils';
6
+ import { ToolNameMapping, validateTypes } from '@ai-sdk/provider-utils';
7
7
  import { codeInterpreterArgsSchema } from '../tool/code-interpreter';
8
8
  import { fileSearchArgsSchema } from '../tool/file-search';
9
9
  import { imageGenerationArgsSchema } from '../tool/image-generation';
10
+ import { customArgsSchema } from '../tool/custom';
10
11
  import { mcpArgsSchema } from '../tool/mcp';
11
12
  import { shellArgsSchema } from '../tool/shell';
12
13
  import { webSearchArgsSchema } from '../tool/web-search';
@@ -16,9 +17,13 @@ import { OpenAIResponsesTool } from './openai-responses-api';
16
17
  export async function prepareResponsesTools({
17
18
  tools,
18
19
  toolChoice,
20
+ toolNameMapping,
21
+ customProviderToolNames,
19
22
  }: {
20
23
  tools: LanguageModelV3CallOptions['tools'];
21
24
  toolChoice: LanguageModelV3CallOptions['toolChoice'] | undefined;
25
+ toolNameMapping?: ToolNameMapping;
26
+ customProviderToolNames?: Set<string>;
22
27
  }): Promise<{
23
28
  tools?: Array<OpenAIResponsesTool>;
24
29
  toolChoice?:
@@ -29,6 +34,7 @@ export async function prepareResponsesTools({
29
34
  | { type: 'web_search_preview' }
30
35
  | { type: 'web_search' }
31
36
  | { type: 'function'; name: string }
37
+ | { type: 'custom'; name: string }
32
38
  | { type: 'code_interpreter' }
33
39
  | { type: 'mcp' }
34
40
  | { type: 'image_generation' }
@@ -45,6 +51,8 @@ export async function prepareResponsesTools({
45
51
  }
46
52
 
47
53
  const openaiTools: Array<OpenAIResponsesTool> = [];
54
+ const resolvedCustomProviderToolNames =
55
+ customProviderToolNames ?? new Set<string>();
48
56
 
49
57
  for (const tool of tools) {
50
58
  switch (tool.type) {
@@ -225,6 +233,21 @@ export async function prepareResponsesTools({
225
233
 
226
234
  break;
227
235
  }
236
+ case 'openai.custom': {
237
+ const args = await validateTypes({
238
+ value: tool.args,
239
+ schema: customArgsSchema,
240
+ });
241
+
242
+ openaiTools.push({
243
+ type: 'custom',
244
+ name: args.name,
245
+ description: args.description,
246
+ format: args.format,
247
+ });
248
+ resolvedCustomProviderToolNames.add(args.name);
249
+ break;
250
+ }
228
251
  }
229
252
  break;
230
253
  }
@@ -248,21 +271,28 @@ export async function prepareResponsesTools({
248
271
  case 'none':
249
272
  case 'required':
250
273
  return { tools: openaiTools, toolChoice: type, toolWarnings };
251
- case 'tool':
274
+ case 'tool': {
275
+ const resolvedToolName =
276
+ toolNameMapping?.toProviderToolName(toolChoice.toolName) ??
277
+ toolChoice.toolName;
278
+
252
279
  return {
253
280
  tools: openaiTools,
254
281
  toolChoice:
255
- toolChoice.toolName === 'code_interpreter' ||
256
- toolChoice.toolName === 'file_search' ||
257
- toolChoice.toolName === 'image_generation' ||
258
- toolChoice.toolName === 'web_search_preview' ||
259
- toolChoice.toolName === 'web_search' ||
260
- toolChoice.toolName === 'mcp' ||
261
- toolChoice.toolName === 'apply_patch'
262
- ? { type: toolChoice.toolName }
263
- : { type: 'function', name: toolChoice.toolName },
282
+ resolvedToolName === 'code_interpreter' ||
283
+ resolvedToolName === 'file_search' ||
284
+ resolvedToolName === 'image_generation' ||
285
+ resolvedToolName === 'web_search_preview' ||
286
+ resolvedToolName === 'web_search' ||
287
+ resolvedToolName === 'mcp' ||
288
+ resolvedToolName === 'apply_patch'
289
+ ? { type: resolvedToolName }
290
+ : resolvedCustomProviderToolNames.has(resolvedToolName)
291
+ ? { type: 'custom', name: resolvedToolName }
292
+ : { type: 'function', name: resolvedToolName },
264
293
  toolWarnings,
265
294
  };
295
+ }
266
296
  default: {
267
297
  const _exhaustiveCheck: never = type;
268
298
  throw new UnsupportedFunctionalityError({
@@ -0,0 +1,64 @@
1
+ import {
2
+ createProviderToolFactory,
3
+ lazySchema,
4
+ zodSchema,
5
+ } from '@ai-sdk/provider-utils';
6
+ import { z } from 'zod/v4';
7
+
8
+ export const customArgsSchema = lazySchema(() =>
9
+ zodSchema(
10
+ z.object({
11
+ name: z.string(),
12
+ description: z.string().optional(),
13
+ format: z
14
+ .union([
15
+ z.object({
16
+ type: z.literal('grammar'),
17
+ syntax: z.enum(['regex', 'lark']),
18
+ definition: z.string(),
19
+ }),
20
+ z.object({
21
+ type: z.literal('text'),
22
+ }),
23
+ ])
24
+ .optional(),
25
+ }),
26
+ ),
27
+ );
28
+
29
+ const customInputSchema = lazySchema(() => zodSchema(z.string()));
30
+
31
+ export const customToolFactory = createProviderToolFactory<
32
+ string,
33
+ {
34
+ /**
35
+ * The name of the custom tool, used to identify it in the API.
36
+ */
37
+ name: string;
38
+
39
+ /**
40
+ * An optional description of what the tool does.
41
+ */
42
+ description?: string;
43
+
44
+ /**
45
+ * The output format specification for the tool.
46
+ * Omit for unconstrained text output.
47
+ */
48
+ format?:
49
+ | {
50
+ type: 'grammar';
51
+ syntax: 'regex' | 'lark';
52
+ definition: string;
53
+ }
54
+ | {
55
+ type: 'text';
56
+ };
57
+ }
58
+ >({
59
+ id: 'openai.custom',
60
+ inputSchema: customInputSchema,
61
+ });
62
+
63
+ export const customTool = (args: Parameters<typeof customToolFactory>[0]) =>
64
+ customToolFactory(args);