@ai-sdk/openai 3.0.36 → 3.0.38

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -958,6 +958,84 @@ Your execute function must return:
958
958
  - **status** _'completed' | 'failed'_ - Whether the patch was applied successfully
959
959
  - **output** _string_ (optional) - Human-readable log text (e.g., results or error messages)
960
960
 
961
+ #### Custom Tool
962
+
963
+ The OpenAI Responses API supports
964
+ [custom tools](https://developers.openai.com/api/docs/guides/function-calling/#custom-tools)
965
+ through the `openai.tools.customTool` tool.
966
+ Custom tools return a raw string instead of JSON, optionally constrained to a grammar
967
+ (regex or Lark syntax). This makes them useful for generating structured text like
968
+ SQL queries, code snippets, or any output that must match a specific pattern.
969
+
970
+ ```ts
971
+ import { openai } from '@ai-sdk/openai';
972
+ import { generateText, stepCountIs } from 'ai';
973
+
974
+ const result = await generateText({
975
+ model: openai.responses('gpt-5.2-codex'),
976
+ tools: {
977
+ write_sql: openai.tools.customTool({
978
+ name: 'write_sql',
979
+ description: 'Write a SQL SELECT query to answer the user question.',
980
+ format: {
981
+ type: 'grammar',
982
+ syntax: 'regex',
983
+ definition: 'SELECT .+',
984
+ },
985
+ execute: async input => {
986
+ // input is a raw string matching the grammar, e.g. "SELECT * FROM users WHERE age > 25"
987
+ const rows = await db.query(input);
988
+ return JSON.stringify(rows);
989
+ },
990
+ }),
991
+ },
992
+ toolChoice: 'required',
993
+ prompt: 'Write a SQL query to get all users older than 25.',
994
+ stopWhen: stepCountIs(3),
995
+ });
996
+ ```
997
+
998
+ Custom tools also work with `streamText`:
999
+
1000
+ ```ts
1001
+ import { openai } from '@ai-sdk/openai';
1002
+ import { streamText } from 'ai';
1003
+
1004
+ const result = streamText({
1005
+ model: openai.responses('gpt-5.2-codex'),
1006
+ tools: {
1007
+ write_sql: openai.tools.customTool({
1008
+ name: 'write_sql',
1009
+ description: 'Write a SQL SELECT query to answer the user question.',
1010
+ format: {
1011
+ type: 'grammar',
1012
+ syntax: 'regex',
1013
+ definition: 'SELECT .+',
1014
+ },
1015
+ }),
1016
+ },
1017
+ toolChoice: 'required',
1018
+ prompt: 'Write a SQL query to get all users older than 25.',
1019
+ });
1020
+
1021
+ for await (const chunk of result.fullStream) {
1022
+ if (chunk.type === 'tool-call') {
1023
+ console.log(`Tool: ${chunk.toolName}`);
1024
+ console.log(`Input: ${chunk.input}`);
1025
+ }
1026
+ }
1027
+ ```
1028
+
1029
+ The custom tool can be configured with:
1030
+
1031
+ - **name** _string_ (required) - The name of the custom tool. Used to identify the tool in tool calls.
1032
+ - **description** _string_ (optional) - A description of what the tool does, to help the model understand when to use it.
1033
+ - **format** _object_ (optional) - The output format constraint. Omit for unconstrained text output.
1034
+ - **type** _'grammar' | 'text'_ - The format type. Use `'grammar'` for constrained output or `'text'` for explicit unconstrained text.
1035
+ - **syntax** _'regex' | 'lark'_ - (grammar only) The grammar syntax. Use `'regex'` for regular expression patterns or `'lark'` for [Lark parser grammar](https://lark-parser.readthedocs.io/).
1036
+ - **definition** _string_ - (grammar only) The grammar definition string (a regex pattern or Lark grammar).
1037
+ - **execute** _function_ (optional) - An async function that receives the raw string input and returns a string result. Enables multi-turn tool calling.
1038
+
961
1039
  #### Image Inputs
962
1040
 
963
1041
  The OpenAI Responses API supports Image inputs for appropriate models.
@@ -1429,7 +1507,7 @@ The following optional provider options are available for OpenAI chat models:
1429
1507
 
1430
1508
  OpenAI has introduced the `o1`,`o3`, and `o4` series of [reasoning models](https://platform.openai.com/docs/guides/reasoning).
1431
1509
  Currently, `o4-mini`, `o3`, `o3-mini`, and `o1` are available via both the chat and responses APIs. The
1432
- models `codex-mini-latest` and `computer-use-preview` are available only via the [responses API](#responses-models).
1510
+ model `gpt-5.1-codex-mini` is available only via the [responses API](#responses-models).
1433
1511
 
1434
1512
  Reasoning models currently only generate text, have several limitations, and are only supported using `generateText` and `streamText`.
1435
1513
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/openai",
3
- "version": "3.0.36",
3
+ "version": "3.0.38",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -37,7 +37,7 @@
37
37
  },
38
38
  "dependencies": {
39
39
  "@ai-sdk/provider": "3.0.8",
40
- "@ai-sdk/provider-utils": "4.0.15"
40
+ "@ai-sdk/provider-utils": "4.0.16"
41
41
  },
42
42
  "devDependencies": {
43
43
  "@types/node": "20.17.24",
@@ -32,15 +32,10 @@ export type OpenAIChatModelId =
32
32
  | 'gpt-4o-search-preview-2025-03-11'
33
33
  | 'gpt-4o-mini-search-preview'
34
34
  | 'gpt-4o-mini-search-preview-2025-03-11'
35
- | 'gpt-4-turbo'
36
- | 'gpt-4-turbo-2024-04-09'
37
- | 'gpt-4'
38
- | 'gpt-4-0613'
39
35
  | 'gpt-3.5-turbo-0125'
40
36
  | 'gpt-3.5-turbo'
41
37
  | 'gpt-3.5-turbo-1106'
42
38
  | 'gpt-3.5-turbo-16k'
43
- | 'chatgpt-4o-latest'
44
39
  | 'gpt-5'
45
40
  | 'gpt-5-2025-08-07'
46
41
  | 'gpt-5-mini'
@@ -33,8 +33,6 @@ export function getOpenAILanguageModelCapabilities(
33
33
  modelId.startsWith('o1') ||
34
34
  modelId.startsWith('o3') ||
35
35
  modelId.startsWith('o4-mini') ||
36
- modelId.startsWith('codex-mini') ||
37
- modelId.startsWith('computer-use-preview') ||
38
36
  (modelId.startsWith('gpt-5') && !modelId.startsWith('gpt-5-chat'));
39
37
 
40
38
  // https://platform.openai.com/docs/guides/latest-model#gpt-5-1-parameter-compatibility
@@ -1,5 +1,6 @@
1
1
  import { applyPatch } from './tool/apply-patch';
2
2
  import { codeInterpreter } from './tool/code-interpreter';
3
+ import { customTool } from './tool/custom';
3
4
  import { fileSearch } from './tool/file-search';
4
5
  import { imageGeneration } from './tool/image-generation';
5
6
  import { localShell } from './tool/local-shell';
@@ -18,6 +19,17 @@ export const openaiTools = {
18
19
  */
19
20
  applyPatch,
20
21
 
22
+ /**
23
+ * Custom tools let callers constrain model output to a grammar (regex or
24
+ * Lark syntax). The model returns a `custom_tool_call` output item whose
25
+ * `input` field is a string matching the specified grammar.
26
+ *
27
+ * @param name - The name of the custom tool.
28
+ * @param description - An optional description of the tool.
29
+ * @param format - The output format constraint (grammar type, syntax, and definition).
30
+ */
31
+ customTool,
32
+
21
33
  /**
22
34
  * The Code Interpreter tool allows models to write and run Python code in a
23
35
  * sandboxed environment to solve complex problems in domains like data analysis,
@@ -61,7 +73,7 @@ export const openaiTools = {
61
73
  * Local shell is a tool that allows agents to run shell commands locally
62
74
  * on a machine you or the user provides.
63
75
  *
64
- * Supported models: `gpt-5-codex` and `codex-mini-latest`
76
+ * Supported models: `gpt-5-codex`
65
77
  */
66
78
  localShell,
67
79
 
@@ -22,6 +22,7 @@ import {
22
22
  } from '../tool/local-shell';
23
23
  import { shellInputSchema, shellOutputSchema } from '../tool/shell';
24
24
  import {
25
+ OpenAIResponsesCustomToolCallOutput,
25
26
  OpenAIResponsesFunctionCallOutput,
26
27
  OpenAIResponsesInput,
27
28
  OpenAIResponsesReasoning,
@@ -47,6 +48,7 @@ export async function convertToOpenAIResponsesInput({
47
48
  hasLocalShellTool = false,
48
49
  hasShellTool = false,
49
50
  hasApplyPatchTool = false,
51
+ customProviderToolNames,
50
52
  }: {
51
53
  prompt: LanguageModelV3Prompt;
52
54
  toolNameMapping: ToolNameMapping;
@@ -58,6 +60,7 @@ export async function convertToOpenAIResponsesInput({
58
60
  hasLocalShellTool?: boolean;
59
61
  hasShellTool?: boolean;
60
62
  hasApplyPatchTool?: boolean;
63
+ customProviderToolNames?: Set<string>;
61
64
  }): Promise<{
62
65
  input: OpenAIResponsesInput;
63
66
  warnings: Array<SharedV3Warning>;
@@ -277,6 +280,20 @@ export async function convertToOpenAIResponsesInput({
277
280
  break;
278
281
  }
279
282
 
283
+ if (customProviderToolNames?.has(resolvedToolName)) {
284
+ input.push({
285
+ type: 'custom_tool_call',
286
+ call_id: part.toolCallId,
287
+ name: resolvedToolName,
288
+ input:
289
+ typeof part.input === 'string'
290
+ ? part.input
291
+ : JSON.stringify(part.input),
292
+ id,
293
+ });
294
+ break;
295
+ }
296
+
280
297
  input.push({
281
298
  type: 'function_call',
282
299
  call_id: part.toolCallId,
@@ -575,6 +592,63 @@ export async function convertToOpenAIResponsesInput({
575
592
  continue;
576
593
  }
577
594
 
595
+ if (customProviderToolNames?.has(resolvedToolName)) {
596
+ let outputValue: OpenAIResponsesCustomToolCallOutput['output'];
597
+ switch (output.type) {
598
+ case 'text':
599
+ case 'error-text':
600
+ outputValue = output.value;
601
+ break;
602
+ case 'execution-denied':
603
+ outputValue = output.reason ?? 'Tool execution denied.';
604
+ break;
605
+ case 'json':
606
+ case 'error-json':
607
+ outputValue = JSON.stringify(output.value);
608
+ break;
609
+ case 'content':
610
+ outputValue = output.value
611
+ .map(item => {
612
+ switch (item.type) {
613
+ case 'text':
614
+ return { type: 'input_text' as const, text: item.text };
615
+ case 'image-data':
616
+ return {
617
+ type: 'input_image' as const,
618
+ image_url: `data:${item.mediaType};base64,${item.data}`,
619
+ };
620
+ case 'image-url':
621
+ return {
622
+ type: 'input_image' as const,
623
+ image_url: item.url,
624
+ };
625
+ case 'file-data':
626
+ return {
627
+ type: 'input_file' as const,
628
+ filename: item.filename ?? 'data',
629
+ file_data: `data:${item.mediaType};base64,${item.data}`,
630
+ };
631
+ default:
632
+ warnings.push({
633
+ type: 'other',
634
+ message: `unsupported custom tool content part type: ${item.type}`,
635
+ });
636
+ return undefined;
637
+ }
638
+ })
639
+ .filter(isNonNullable);
640
+ break;
641
+ default:
642
+ outputValue = '';
643
+ }
644
+ input.push({
645
+ type: 'custom_tool_call_output',
646
+ call_id: part.toolCallId,
647
+ output: outputValue,
648
+ } satisfies OpenAIResponsesCustomToolCallOutput);
649
+ continue;
650
+ }
651
+
578
652
  let contentValue: OpenAIResponsesFunctionCallOutput['output'];
579
653
  switch (output.type) {
580
654
  case 'text':
@@ -10,6 +10,8 @@ export type OpenAIResponsesInputItem =
10
10
  | OpenAIResponsesAssistantMessage
11
11
  | OpenAIResponsesFunctionCall
12
12
  | OpenAIResponsesFunctionCallOutput
13
+ | OpenAIResponsesCustomToolCall
14
+ | OpenAIResponsesCustomToolCallOutput
13
15
  | OpenAIResponsesMcpApprovalResponse
14
16
  | OpenAIResponsesComputerCall
15
17
  | OpenAIResponsesLocalShellCall
@@ -94,6 +96,20 @@ export type OpenAIResponsesFunctionCallOutput = {
94
96
  >;
95
97
  };
96
98
 
99
+ export type OpenAIResponsesCustomToolCall = {
100
+ type: 'custom_tool_call';
101
+ id?: string;
102
+ call_id: string;
103
+ name: string;
104
+ input: string;
105
+ };
106
+
107
+ export type OpenAIResponsesCustomToolCallOutput = {
108
+ type: 'custom_tool_call_output';
109
+ call_id: string;
110
+ output: OpenAIResponsesFunctionCallOutput['output'];
111
+ };
112
+
97
113
  export type OpenAIResponsesMcpApprovalResponse = {
98
114
  type: 'mcp_approval_response';
99
115
  approval_request_id: string;
@@ -326,6 +342,20 @@ export type OpenAIResponsesTool =
326
342
  server_description: string | undefined;
327
343
  server_url: string | undefined;
328
344
  }
345
+ | {
346
+ type: 'custom';
347
+ name: string;
348
+ description?: string;
349
+ format?:
350
+ | {
351
+ type: 'grammar';
352
+ syntax: 'regex' | 'lark';
353
+ definition: string;
354
+ }
355
+ | {
356
+ type: 'text';
357
+ };
358
+ }
329
359
  | {
330
360
  type: 'local_shell';
331
361
  }
@@ -527,6 +557,13 @@ export const openaiResponsesChunkSchema = lazySchema(() =>
527
557
  }),
528
558
  ]),
529
559
  }),
560
+ z.object({
561
+ type: z.literal('custom_tool_call'),
562
+ id: z.string(),
563
+ call_id: z.string(),
564
+ name: z.string(),
565
+ input: z.string(),
566
+ }),
530
567
  z.object({
531
568
  type: z.literal('shell_call'),
532
569
  id: z.string(),
@@ -579,6 +616,14 @@ export const openaiResponsesChunkSchema = lazySchema(() =>
579
616
  arguments: z.string(),
580
617
  status: z.literal('completed'),
581
618
  }),
619
+ z.object({
620
+ type: z.literal('custom_tool_call'),
621
+ id: z.string(),
622
+ call_id: z.string(),
623
+ name: z.string(),
624
+ input: z.string(),
625
+ status: z.literal('completed'),
626
+ }),
582
627
  z.object({
583
628
  type: z.literal('code_interpreter_call'),
584
629
  id: z.string(),
@@ -778,6 +823,12 @@ export const openaiResponsesChunkSchema = lazySchema(() =>
778
823
  output_index: z.number(),
779
824
  delta: z.string(),
780
825
  }),
826
+ z.object({
827
+ type: z.literal('response.custom_tool_call_input.delta'),
828
+ item_id: z.string(),
829
+ output_index: z.number(),
830
+ delta: z.string(),
831
+ }),
781
832
  z.object({
782
833
  type: z.literal('response.image_generation_call.partial_image'),
783
834
  item_id: z.string(),
@@ -1059,6 +1110,13 @@ export const openaiResponsesResponseSchema = lazySchema(() =>
1059
1110
  arguments: z.string(),
1060
1111
  id: z.string(),
1061
1112
  }),
1113
+ z.object({
1114
+ type: z.literal('custom_tool_call'),
1115
+ call_id: z.string(),
1116
+ name: z.string(),
1117
+ input: z.string(),
1118
+ id: z.string(),
1119
+ }),
1062
1120
  z.object({
1063
1121
  type: z.literal('computer_call'),
1064
1122
  id: z.string(),
@@ -194,6 +194,22 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
194
194
  'openai.mcp': 'mcp',
195
195
  'openai.apply_patch': 'apply_patch',
196
196
  },
197
+ resolveProviderToolName: tool =>
198
+ tool.id === 'openai.custom'
199
+ ? (tool.args as { name?: string }).name
200
+ : undefined,
201
+ });
202
+
203
+ const customProviderToolNames = new Set<string>();
204
+ const {
205
+ tools: openaiTools,
206
+ toolChoice: openaiToolChoice,
207
+ toolWarnings,
208
+ } = await prepareResponsesTools({
209
+ tools,
210
+ toolChoice,
211
+ toolNameMapping,
212
+ customProviderToolNames,
197
213
  });
198
214
 
199
215
  const { input, warnings: inputWarnings } =
@@ -212,6 +228,10 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
212
228
  hasLocalShellTool: hasOpenAITool('openai.local_shell'),
213
229
  hasShellTool: hasOpenAITool('openai.shell'),
214
230
  hasApplyPatchTool: hasOpenAITool('openai.apply_patch'),
231
+ customProviderToolNames:
232
+ customProviderToolNames.size > 0
233
+ ? customProviderToolNames
234
+ : undefined,
215
235
  });
216
236
 
217
237
  warnings.push(...inputWarnings);
@@ -408,15 +428,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
408
428
  delete (baseArgs as any).service_tier;
409
429
  }
410
430
 
411
- const {
412
- tools: openaiTools,
413
- toolChoice: openaiToolChoice,
414
- toolWarnings,
415
- } = await prepareResponsesTools({
416
- tools,
417
- toolChoice,
418
- });
419
-
420
431
  const shellToolEnvType = (
421
432
  tools?.find(
422
433
  tool => tool.type === 'provider' && tool.id === 'openai.shell',
@@ -716,6 +727,24 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
716
727
  break;
717
728
  }
718
729
 
730
+ case 'custom_tool_call': {
731
+ hasFunctionCall = true;
732
+ const toolName = toolNameMapping.toCustomToolName(part.name);
733
+
734
+ content.push({
735
+ type: 'tool-call',
736
+ toolCallId: part.call_id,
737
+ toolName,
738
+ input: JSON.stringify(part.input),
739
+ providerMetadata: {
740
+ [providerOptionsName]: {
741
+ itemId: part.id,
742
+ },
743
+ },
744
+ });
745
+ break;
746
+ }
747
+
719
748
  case 'web_search_call': {
720
749
  content.push({
721
750
  type: 'tool-call',
@@ -1062,6 +1091,20 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1062
1091
  id: value.item.call_id,
1063
1092
  toolName: value.item.name,
1064
1093
  });
1094
+ } else if (value.item.type === 'custom_tool_call') {
1095
+ const toolName = toolNameMapping.toCustomToolName(
1096
+ value.item.name,
1097
+ );
1098
+ ongoingToolCalls[value.output_index] = {
1099
+ toolName,
1100
+ toolCallId: value.item.call_id,
1101
+ };
1102
+
1103
+ controller.enqueue({
1104
+ type: 'tool-input-start',
1105
+ id: value.item.call_id,
1106
+ toolName,
1107
+ });
1065
1108
  } else if (value.item.type === 'web_search_call') {
1066
1109
  ongoingToolCalls[value.output_index] = {
1067
1110
  toolName: toolNameMapping.toCustomToolName(
@@ -1275,6 +1318,29 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1275
1318
  },
1276
1319
  },
1277
1320
  });
1321
+ } else if (value.item.type === 'custom_tool_call') {
1322
+ ongoingToolCalls[value.output_index] = undefined;
1323
+ hasFunctionCall = true;
1324
+ const toolName = toolNameMapping.toCustomToolName(
1325
+ value.item.name,
1326
+ );
1327
+
1328
+ controller.enqueue({
1329
+ type: 'tool-input-end',
1330
+ id: value.item.call_id,
1331
+ });
1332
+
1333
+ controller.enqueue({
1334
+ type: 'tool-call',
1335
+ toolCallId: value.item.call_id,
1336
+ toolName,
1337
+ input: JSON.stringify(value.item.input),
1338
+ providerMetadata: {
1339
+ [providerOptionsName]: {
1340
+ itemId: value.item.id,
1341
+ },
1342
+ },
1343
+ });
1278
1344
  } else if (value.item.type === 'web_search_call') {
1279
1345
  ongoingToolCalls[value.output_index] = undefined;
1280
1346
 
@@ -1585,6 +1651,16 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
1585
1651
  } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
1586
1652
  const toolCall = ongoingToolCalls[value.output_index];
1587
1653
 
1654
+ if (toolCall != null) {
1655
+ controller.enqueue({
1656
+ type: 'tool-input-delta',
1657
+ id: toolCall.toolCallId,
1658
+ delta: value.delta,
1659
+ });
1660
+ }
1661
+ } else if (isResponseCustomToolCallInputDeltaChunk(value)) {
1662
+ const toolCall = ongoingToolCalls[value.output_index];
1663
+
1588
1664
  if (toolCall != null) {
1589
1665
  controller.enqueue({
1590
1666
  type: 'tool-input-delta',
@@ -1923,6 +1999,15 @@ function isResponseFunctionCallArgumentsDeltaChunk(
1923
1999
  } {
1924
2000
  return chunk.type === 'response.function_call_arguments.delta';
1925
2001
  }
2002
+
2003
+ function isResponseCustomToolCallInputDeltaChunk(
2004
+ chunk: OpenAIResponsesChunk,
2005
+ ): chunk is OpenAIResponsesChunk & {
2006
+ type: 'response.custom_tool_call_input.delta';
2007
+ } {
2008
+ return chunk.type === 'response.custom_tool_call_input.delta';
2009
+ }
2010
+
1926
2011
  function isResponseImageGenerationCallPartialImageChunk(
1927
2012
  chunk: OpenAIResponsesChunk,
1928
2013
  ): chunk is OpenAIResponsesChunk & {
@@ -15,16 +15,10 @@ export const openaiResponsesReasoningModelIds = [
15
15
  'o1-2024-12-17',
16
16
  'o3',
17
17
  'o3-2025-04-16',
18
- 'o3-deep-research',
19
- 'o3-deep-research-2025-06-26',
20
18
  'o3-mini',
21
19
  'o3-mini-2025-01-31',
22
20
  'o4-mini',
23
21
  'o4-mini-2025-04-16',
24
- 'o4-mini-deep-research',
25
- 'o4-mini-deep-research-2025-06-26',
26
- 'codex-mini-latest',
27
- 'computer-use-preview',
28
22
  'gpt-5',
29
23
  'gpt-5-2025-08-07',
30
24
  'gpt-5-codex',
@@ -58,7 +52,6 @@ export const openaiResponsesModelIds = [
58
52
  'gpt-4o-2024-08-06',
59
53
  'gpt-4o-2024-11-20',
60
54
  'gpt-4o-audio-preview',
61
- 'gpt-4o-audio-preview-2024-10-01',
62
55
  'gpt-4o-audio-preview-2024-12-17',
63
56
  'gpt-4o-search-preview',
64
57
  'gpt-4o-search-preview-2025-03-11',
@@ -66,38 +59,23 @@ export const openaiResponsesModelIds = [
66
59
  'gpt-4o-mini-search-preview-2025-03-11',
67
60
  'gpt-4o-mini',
68
61
  'gpt-4o-mini-2024-07-18',
69
- 'gpt-4-turbo',
70
- 'gpt-4-turbo-2024-04-09',
71
- 'gpt-4-turbo-preview',
72
- 'gpt-4-0125-preview',
73
- 'gpt-4-1106-preview',
74
- 'gpt-4',
75
- 'gpt-4-0613',
76
- 'gpt-4.5-preview',
77
- 'gpt-4.5-preview-2025-02-27',
78
62
  'gpt-3.5-turbo-0125',
79
63
  'gpt-3.5-turbo',
80
64
  'gpt-3.5-turbo-1106',
81
- 'chatgpt-4o-latest',
82
65
  'gpt-5-chat-latest',
83
66
  ...openaiResponsesReasoningModelIds,
84
67
  ] as const;
85
68
 
86
69
  export type OpenAIResponsesModelId =
87
- | 'chatgpt-4o-latest'
88
70
  | 'gpt-3.5-turbo-0125'
89
71
  | 'gpt-3.5-turbo-1106'
90
72
  | 'gpt-3.5-turbo'
91
- | 'gpt-4-0613'
92
- | 'gpt-4-turbo-2024-04-09'
93
- | 'gpt-4-turbo'
94
73
  | 'gpt-4.1-2025-04-14'
95
74
  | 'gpt-4.1-mini-2025-04-14'
96
75
  | 'gpt-4.1-mini'
97
76
  | 'gpt-4.1-nano-2025-04-14'
98
77
  | 'gpt-4.1-nano'
99
78
  | 'gpt-4.1'
100
- | 'gpt-4'
101
79
  | 'gpt-4o-2024-05-13'
102
80
  | 'gpt-4o-2024-08-06'
103
81
  | 'gpt-4o-2024-11-20'