@ai-sdk/openai 2.0.9 → 2.0.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,30 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.11
4
+
5
+ ### Patch Changes
6
+
7
+ - 097b452: feat(openai, azure): add configurable file ID prefixes for Responses API
8
+
9
+ - Added `fileIdPrefixes` option to OpenAI Responses API configuration
10
+ - Azure OpenAI now supports `assistant-` prefixed file IDs (replacing previous `file-` prefix support)
11
+ - OpenAI maintains backward compatibility with default `file-` prefix
12
+ - File ID detection is disabled when `fileIdPrefixes` is undefined, gracefully falling back to base64 processing
13
+
14
+ - 87cf954: feat(provider/openai): add support for prompt_cache_key
15
+ - a3d98a9: feat(provider/openai): add support for safety_identifier
16
+ - 110d167: fix(openai): add missing file_search_call handlers in responses streaming
17
+ - 8d3c747: chore(openai): remove deprecated GPT-4.5-preview models and improve autocomplete control
18
+ - Updated dependencies [38ac190]
19
+ - @ai-sdk/provider-utils@3.0.2
20
+
21
+ ## 2.0.10
22
+
23
+ ### Patch Changes
24
+
25
+ - a274b01: refactor(provider/openai): restructure files
26
+ - b48e0ff: feat(provider/openai): add code interpreter tool (responses api)
27
+
3
28
  ## 2.0.9
4
29
 
5
30
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1,9 +1,9 @@
1
- import { LanguageModelV2, ProviderV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
1
+ import { ProviderV2, LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
3
  import { FetchFunction } from '@ai-sdk/provider-utils';
4
4
  import { z } from 'zod/v4';
5
5
 
6
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
6
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
7
7
 
8
8
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
9
9
 
@@ -12,6 +12,11 @@ type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large
12
12
  type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
13
13
 
14
14
  declare const openaiTools: {
15
+ codeInterpreter: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
16
+ container?: string | {
17
+ fileIds?: string[];
18
+ };
19
+ }>;
15
20
  fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{
16
21
  query: string;
17
22
  }, {
@@ -41,68 +46,18 @@ declare const openaiTools: {
41
46
  }>;
42
47
  };
43
48
 
44
- type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
45
-
46
- type OpenAIConfig = {
47
- provider: string;
48
- url: (options: {
49
- modelId: string;
50
- path: string;
51
- }) => string;
52
- headers: () => Record<string, string | undefined>;
53
- fetch?: FetchFunction;
54
- generateId?: () => string;
55
- };
56
-
57
- declare const openaiResponsesModelIds: readonly ["gpt-4.1", "gpt-4.1-2025-04-14", "gpt-4.1-mini", "gpt-4.1-mini-2025-04-14", "gpt-4.1-nano", "gpt-4.1-nano-2025-04-14", "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20", "gpt-4o-audio-preview", "gpt-4o-audio-preview-2024-10-01", "gpt-4o-audio-preview-2024-12-17", "gpt-4o-search-preview", "gpt-4o-search-preview-2025-03-11", "gpt-4o-mini-search-preview", "gpt-4o-mini-search-preview-2025-03-11", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-0125-preview", "gpt-4-1106-preview", "gpt-4", "gpt-4-0613", "gpt-4.5-preview", "gpt-4.5-preview-2025-02-27", "gpt-3.5-turbo-0125", "gpt-3.5-turbo", "gpt-3.5-turbo-1106", "chatgpt-4o-latest", "o1", "o1-2024-12-17", "o3-mini", "o3-mini-2025-01-31", "o3", "o3-2025-04-16", "o4-mini", "o4-mini-2025-04-16", "codex-mini-latest", "computer-use-preview", "gpt-5", "gpt-5-2025-08-07", "gpt-5-mini", "gpt-5-mini-2025-08-07", "gpt-5-nano", "gpt-5-nano-2025-08-07", "gpt-5-chat-latest"];
58
- type OpenAIResponsesModelId = (typeof openaiResponsesModelIds)[number] | (string & {});
59
-
60
- declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
61
- readonly specificationVersion = "v2";
62
- readonly modelId: OpenAIResponsesModelId;
63
- private readonly config;
64
- constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
65
- readonly supportedUrls: Record<string, RegExp[]>;
66
- get provider(): string;
67
- private getArgs;
68
- doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
69
- doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
70
- }
71
- declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
72
- metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
73
- parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
74
- previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
75
- store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
76
- user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
77
- reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
78
- strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
79
- instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
80
- reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
81
- serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
82
- auto: "auto";
83
- flex: "flex";
84
- priority: "priority";
85
- }>>>;
86
- include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
87
- "reasoning.encrypted_content": "reasoning.encrypted_content";
88
- "file_search_call.results": "file_search_call.results";
89
- }>>>>;
90
- textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
91
- low: "low";
92
- medium: "medium";
93
- high: "high";
94
- }>>>;
95
- }, z.core.$strip>;
96
- type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
49
+ type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
97
50
 
98
51
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
99
52
 
53
+ type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
54
+
100
55
  interface OpenAIProvider extends ProviderV2 {
101
56
  (modelId: OpenAIResponsesModelId): LanguageModelV2;
102
57
  /**
103
58
  Creates an OpenAI model for text generation.
104
59
  */
105
- languageModel(modelId: OpenAIResponsesModelId): OpenAIResponsesLanguageModel;
60
+ languageModel(modelId: OpenAIResponsesModelId): LanguageModelV2;
106
61
  /**
107
62
  Creates an OpenAI chat model for text generation.
108
63
  */
@@ -188,4 +143,33 @@ Default OpenAI provider instance.
188
143
  */
189
144
  declare const openai: OpenAIProvider;
190
145
 
146
+ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
147
+ metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
148
+ parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
149
+ previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
150
+ store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
151
+ user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
152
+ reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
153
+ strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
154
+ instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
155
+ reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
156
+ serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
157
+ auto: "auto";
158
+ flex: "flex";
159
+ priority: "priority";
160
+ }>>>;
161
+ include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
162
+ "reasoning.encrypted_content": "reasoning.encrypted_content";
163
+ "file_search_call.results": "file_search_call.results";
164
+ }>>>>;
165
+ textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
166
+ low: "low";
167
+ medium: "medium";
168
+ high: "high";
169
+ }>>>;
170
+ promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
171
+ safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
172
+ }, z.core.$strip>;
173
+ type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
174
+
191
175
  export { type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, createOpenAI, openai };
package/dist/index.d.ts CHANGED
@@ -1,9 +1,9 @@
1
- import { LanguageModelV2, ProviderV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
1
+ import { ProviderV2, LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
3
  import { FetchFunction } from '@ai-sdk/provider-utils';
4
4
  import { z } from 'zod/v4';
5
5
 
6
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
6
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
7
7
 
8
8
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
9
9
 
@@ -12,6 +12,11 @@ type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large
12
12
  type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
13
13
 
14
14
  declare const openaiTools: {
15
+ codeInterpreter: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
16
+ container?: string | {
17
+ fileIds?: string[];
18
+ };
19
+ }>;
15
20
  fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{
16
21
  query: string;
17
22
  }, {
@@ -41,68 +46,18 @@ declare const openaiTools: {
41
46
  }>;
42
47
  };
43
48
 
44
- type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
45
-
46
- type OpenAIConfig = {
47
- provider: string;
48
- url: (options: {
49
- modelId: string;
50
- path: string;
51
- }) => string;
52
- headers: () => Record<string, string | undefined>;
53
- fetch?: FetchFunction;
54
- generateId?: () => string;
55
- };
56
-
57
- declare const openaiResponsesModelIds: readonly ["gpt-4.1", "gpt-4.1-2025-04-14", "gpt-4.1-mini", "gpt-4.1-mini-2025-04-14", "gpt-4.1-nano", "gpt-4.1-nano-2025-04-14", "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20", "gpt-4o-audio-preview", "gpt-4o-audio-preview-2024-10-01", "gpt-4o-audio-preview-2024-12-17", "gpt-4o-search-preview", "gpt-4o-search-preview-2025-03-11", "gpt-4o-mini-search-preview", "gpt-4o-mini-search-preview-2025-03-11", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-0125-preview", "gpt-4-1106-preview", "gpt-4", "gpt-4-0613", "gpt-4.5-preview", "gpt-4.5-preview-2025-02-27", "gpt-3.5-turbo-0125", "gpt-3.5-turbo", "gpt-3.5-turbo-1106", "chatgpt-4o-latest", "o1", "o1-2024-12-17", "o3-mini", "o3-mini-2025-01-31", "o3", "o3-2025-04-16", "o4-mini", "o4-mini-2025-04-16", "codex-mini-latest", "computer-use-preview", "gpt-5", "gpt-5-2025-08-07", "gpt-5-mini", "gpt-5-mini-2025-08-07", "gpt-5-nano", "gpt-5-nano-2025-08-07", "gpt-5-chat-latest"];
58
- type OpenAIResponsesModelId = (typeof openaiResponsesModelIds)[number] | (string & {});
59
-
60
- declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
61
- readonly specificationVersion = "v2";
62
- readonly modelId: OpenAIResponsesModelId;
63
- private readonly config;
64
- constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
65
- readonly supportedUrls: Record<string, RegExp[]>;
66
- get provider(): string;
67
- private getArgs;
68
- doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
69
- doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
70
- }
71
- declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
72
- metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
73
- parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
74
- previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
75
- store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
76
- user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
77
- reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
78
- strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
79
- instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
80
- reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
81
- serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
82
- auto: "auto";
83
- flex: "flex";
84
- priority: "priority";
85
- }>>>;
86
- include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
87
- "reasoning.encrypted_content": "reasoning.encrypted_content";
88
- "file_search_call.results": "file_search_call.results";
89
- }>>>>;
90
- textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
91
- low: "low";
92
- medium: "medium";
93
- high: "high";
94
- }>>>;
95
- }, z.core.$strip>;
96
- type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
49
+ type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
97
50
 
98
51
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
99
52
 
53
+ type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
54
+
100
55
  interface OpenAIProvider extends ProviderV2 {
101
56
  (modelId: OpenAIResponsesModelId): LanguageModelV2;
102
57
  /**
103
58
  Creates an OpenAI model for text generation.
104
59
  */
105
- languageModel(modelId: OpenAIResponsesModelId): OpenAIResponsesLanguageModel;
60
+ languageModel(modelId: OpenAIResponsesModelId): LanguageModelV2;
106
61
  /**
107
62
  Creates an OpenAI chat model for text generation.
108
63
  */
@@ -188,4 +143,33 @@ Default OpenAI provider instance.
188
143
  */
189
144
  declare const openai: OpenAIProvider;
190
145
 
146
+ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
147
+ metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
148
+ parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
149
+ previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
150
+ store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
151
+ user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
152
+ reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
153
+ strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
154
+ instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
155
+ reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
156
+ serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
157
+ auto: "auto";
158
+ flex: "flex";
159
+ priority: "priority";
160
+ }>>>;
161
+ include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
162
+ "reasoning.encrypted_content": "reasoning.encrypted_content";
163
+ "file_search_call.results": "file_search_call.results";
164
+ }>>>>;
165
+ textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
166
+ low: "low";
167
+ medium: "medium";
168
+ high: "high";
169
+ }>>>;
170
+ promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
171
+ safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
172
+ }, z.core.$strip>;
173
+ type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
174
+
191
175
  export { type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, createOpenAI, openai };