@ai-sdk/openai 2.1.0-beta.1 → 2.1.0-beta.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,104 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.1.0-beta.11
4
+
5
+ ### Patch Changes
6
+
7
+ - 0adc679: feat(provider): shared spec v3
8
+ - 2b0caef: feat(provider/openai): preview image generation results
9
+ - Updated dependencies [0adc679]
10
+ - Updated dependencies [2b0caef]
11
+ - @ai-sdk/provider-utils@3.1.0-beta.6
12
+ - @ai-sdk/provider@2.1.0-beta.4
13
+
14
+ ## 2.1.0-beta.10
15
+
16
+ ### Patch Changes
17
+
18
+ - d64ece9: enables image_generation capabilities in the Azure provider through the Responses API.
19
+
20
+ ## 2.1.0-beta.9
21
+
22
+ ### Patch Changes
23
+
24
+ - 9a51b92: support OPENAI_BASE_URL env
25
+
26
+ ## 2.1.0-beta.8
27
+
28
+ ### Patch Changes
29
+
30
+ - 4122d2a: feat(provider/openai): add gpt-5-codex model id
31
+ - 3997a42: feat(provider/openai): local shell tool
32
+ - cb4d238: The built in Code Interpreter tool input code is streamed in `tool-input-<start/delta/end>` chunks.
33
+
34
+ ## 2.1.0-beta.7
35
+
36
+ ### Patch Changes
37
+
38
+ - 77f2b20: enables code_interpreter and file_search capabilities in the Azure provider through the Responses API
39
+ - 8dac895: feat: `LanguageModelV3`
40
+ - 10c1322: fix: moved dependency `@ai-sdk/test-server` to devDependencies
41
+ - Updated dependencies [8dac895]
42
+ - @ai-sdk/provider-utils@3.1.0-beta.5
43
+ - @ai-sdk/provider@2.1.0-beta.3
44
+
45
+ ## 2.1.0-beta.6
46
+
47
+ ### Patch Changes
48
+
49
+ - fe49278: feat(provider/openai): only send item references for reasoning when store: true
50
+
51
+ ## 2.1.0-beta.5
52
+
53
+ ### Patch Changes
54
+
55
+ - 4616b86: chore: update zod peer depenedency version
56
+ - Updated dependencies [4616b86]
57
+ - @ai-sdk/provider-utils@3.1.0-beta.4
58
+
59
+ ## 2.1.0-beta.4
60
+
61
+ ### Patch Changes
62
+
63
+ - ed329cb: feat: `Provider-V3`
64
+ - 522f6b8: feat: `ImageModelV3`
65
+ - Updated dependencies [ed329cb]
66
+ - Updated dependencies [522f6b8]
67
+ - @ai-sdk/provider@2.1.0-beta.2
68
+ - @ai-sdk/provider-utils@3.1.0-beta.3
69
+
70
+ ## 2.1.0-beta.3
71
+
72
+ ### Patch Changes
73
+
74
+ - 2e86082: feat(provider/openai): `OpenAIChatLanguageModelOptions` type
75
+
76
+ ```ts
77
+ import { openai, type OpenAIChatLanguageModelOptions } from '@ai-sdk/openai';
78
+ import { generateText } from 'ai';
79
+
80
+ await generateText({
81
+ model: openai.chat('gpt-4o'),
82
+ prompt: 'Invent a new holiday and describe its traditions.',
83
+ providerOptions: {
84
+ openai: {
85
+ user: 'user-123',
86
+ } satisfies OpenAIChatLanguageModelOptions,
87
+ },
88
+ });
89
+ ```
90
+
91
+ ## 2.1.0-beta.2
92
+
93
+ ### Patch Changes
94
+
95
+ - 4920119: fix the "incomplete_details" key from nullable to nullish for openai compatibility
96
+ - 0c4822d: feat: `EmbeddingModelV3`
97
+ - 1cad0ab: feat: add provider version to user-agent header
98
+ - Updated dependencies [0c4822d]
99
+ - @ai-sdk/provider@2.1.0-beta.1
100
+ - @ai-sdk/provider-utils@3.1.0-beta.2
101
+
3
102
  ## 2.1.0-beta.1
4
103
 
5
104
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1,9 +1,40 @@
1
- import { ProviderV2, LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
1
+ import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
3
  import { FetchFunction } from '@ai-sdk/provider-utils';
4
4
  import { z } from 'zod/v4';
5
5
 
6
6
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
7
+ declare const openaiChatLanguageModelOptions: z.ZodObject<{
8
+ logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
9
+ logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
10
+ parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
11
+ user: z.ZodOptional<z.ZodString>;
12
+ reasoningEffort: z.ZodOptional<z.ZodEnum<{
13
+ minimal: "minimal";
14
+ low: "low";
15
+ medium: "medium";
16
+ high: "high";
17
+ }>>;
18
+ maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
19
+ store: z.ZodOptional<z.ZodBoolean>;
20
+ metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
21
+ prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
22
+ structuredOutputs: z.ZodOptional<z.ZodBoolean>;
23
+ serviceTier: z.ZodOptional<z.ZodEnum<{
24
+ auto: "auto";
25
+ flex: "flex";
26
+ priority: "priority";
27
+ }>>;
28
+ strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
29
+ textVerbosity: z.ZodOptional<z.ZodEnum<{
30
+ low: "low";
31
+ medium: "medium";
32
+ high: "high";
33
+ }>>;
34
+ promptCacheKey: z.ZodOptional<z.ZodString>;
35
+ safetyIdentifier: z.ZodOptional<z.ZodString>;
36
+ }, z.core.$strip>;
37
+ type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
7
38
 
8
39
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
9
40
 
@@ -151,11 +182,16 @@ declare const openaiTools: {
151
182
  *
152
183
  * Must have name `image_generation`.
153
184
  *
154
- * @param size - Image dimensions (e.g., 1024x1024, 1024x1536)
155
- * @param quality - Rendering quality (e.g. low, medium, high)
156
- * @param format - File output format
157
- * @param compression - Compression level (0-100%) for JPEG and WebP formats
158
- * @param background - Transparent or opaque
185
+ * @param background - Background type for the generated image. One of 'auto', 'opaque', or 'transparent'.
186
+ * @param inputFidelity - Input fidelity for the generated image. One of 'low' or 'high'.
187
+ * @param inputImageMask - Optional mask for inpainting. Contains fileId and/or imageUrl.
188
+ * @param model - The image generation model to use. Default: gpt-image-1.
189
+ * @param moderation - Moderation level for the generated image. Default: 'auto'.
190
+ * @param outputCompression - Compression level for the output image (0-100).
191
+ * @param outputFormat - The output format of the generated image. One of 'png', 'jpeg', or 'webp'.
192
+ * @param partialImages - Number of partial images to generate in streaming mode (0-3).
193
+ * @param quality - The quality of the generated image. One of 'auto', 'low', 'medium', or 'high'.
194
+ * @param size - The size of the generated image. One of 'auto', '1024x1024', '1024x1536', or '1536x1024'.
159
195
  */
160
196
  imageGeneration: (args?: {
161
197
  background?: "auto" | "opaque" | "transparent";
@@ -168,11 +204,32 @@ declare const openaiTools: {
168
204
  moderation?: "auto";
169
205
  outputCompression?: number;
170
206
  outputFormat?: "png" | "jpeg" | "webp";
207
+ partialImages?: number;
171
208
  quality?: "auto" | "low" | "medium" | "high";
172
209
  size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024";
173
210
  }) => _ai_sdk_provider_utils.Tool<{}, {
174
211
  result: string;
175
212
  }>;
213
+ /**
214
+ * Local shell is a tool that allows agents to run shell commands locally
215
+ * on a machine you or the user provides.
216
+ *
217
+ * Supported models: `gpt-5-codex` and `codex-mini-latest`
218
+ *
219
+ * Must have name `local_shell`.
220
+ */
221
+ localShell: _ai_sdk_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{
222
+ action: {
223
+ type: "exec";
224
+ command: string[];
225
+ timeoutMs?: number;
226
+ user?: string;
227
+ workingDirectory?: string;
228
+ env?: Record<string, string>;
229
+ };
230
+ }, {
231
+ output: string;
232
+ }, {}>;
176
233
  /**
177
234
  * Web search allows models to access up-to-date information from the internet
178
235
  * and provide answers with sourced citations.
@@ -207,50 +264,50 @@ declare const openaiTools: {
207
264
  webSearch: (args?: Parameters<typeof webSearchToolFactory>[0]) => _ai_sdk_provider_utils.Tool<{}, unknown>;
208
265
  };
209
266
 
210
- type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
267
+ type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
211
268
 
212
269
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
213
270
 
214
271
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
215
272
 
216
- interface OpenAIProvider extends ProviderV2 {
217
- (modelId: OpenAIResponsesModelId): LanguageModelV2;
273
+ interface OpenAIProvider extends ProviderV3 {
274
+ (modelId: OpenAIResponsesModelId): LanguageModelV3;
218
275
  /**
219
276
  Creates an OpenAI model for text generation.
220
277
  */
221
- languageModel(modelId: OpenAIResponsesModelId): LanguageModelV2;
278
+ languageModel(modelId: OpenAIResponsesModelId): LanguageModelV3;
222
279
  /**
223
280
  Creates an OpenAI chat model for text generation.
224
281
  */
225
- chat(modelId: OpenAIChatModelId): LanguageModelV2;
282
+ chat(modelId: OpenAIChatModelId): LanguageModelV3;
226
283
  /**
227
284
  Creates an OpenAI responses API model for text generation.
228
285
  */
229
- responses(modelId: OpenAIResponsesModelId): LanguageModelV2;
286
+ responses(modelId: OpenAIResponsesModelId): LanguageModelV3;
230
287
  /**
231
288
  Creates an OpenAI completion model for text generation.
232
289
  */
233
- completion(modelId: OpenAICompletionModelId): LanguageModelV2;
290
+ completion(modelId: OpenAICompletionModelId): LanguageModelV3;
234
291
  /**
235
292
  Creates a model for text embeddings.
236
293
  */
237
- embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
294
+ embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3<string>;
238
295
  /**
239
296
  Creates a model for text embeddings.
240
297
  */
241
- textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
298
+ textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3<string>;
242
299
  /**
243
300
  Creates a model for text embeddings.
244
301
  */
245
- textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
302
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3<string>;
246
303
  /**
247
304
  Creates a model for image generation.
248
305
  */
249
- image(modelId: OpenAIImageModelId): ImageModelV2;
306
+ image(modelId: OpenAIImageModelId): ImageModelV3;
250
307
  /**
251
308
  Creates a model for image generation.
252
309
  */
253
- imageModel(modelId: OpenAIImageModelId): ImageModelV2;
310
+ imageModel(modelId: OpenAIImageModelId): ImageModelV3;
254
311
  /**
255
312
  Creates a model for transcription.
256
313
  */
@@ -336,4 +393,6 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
336
393
  }, z.core.$strip>;
337
394
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
338
395
 
339
- export { type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, createOpenAI, openai };
396
+ declare const VERSION: string;
397
+
398
+ export { type OpenAIChatLanguageModelOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, VERSION, createOpenAI, openai };
package/dist/index.d.ts CHANGED
@@ -1,9 +1,40 @@
1
- import { ProviderV2, LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
1
+ import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
3
  import { FetchFunction } from '@ai-sdk/provider-utils';
4
4
  import { z } from 'zod/v4';
5
5
 
6
6
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
7
+ declare const openaiChatLanguageModelOptions: z.ZodObject<{
8
+ logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
9
+ logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
10
+ parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
11
+ user: z.ZodOptional<z.ZodString>;
12
+ reasoningEffort: z.ZodOptional<z.ZodEnum<{
13
+ minimal: "minimal";
14
+ low: "low";
15
+ medium: "medium";
16
+ high: "high";
17
+ }>>;
18
+ maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
19
+ store: z.ZodOptional<z.ZodBoolean>;
20
+ metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
21
+ prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
22
+ structuredOutputs: z.ZodOptional<z.ZodBoolean>;
23
+ serviceTier: z.ZodOptional<z.ZodEnum<{
24
+ auto: "auto";
25
+ flex: "flex";
26
+ priority: "priority";
27
+ }>>;
28
+ strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
29
+ textVerbosity: z.ZodOptional<z.ZodEnum<{
30
+ low: "low";
31
+ medium: "medium";
32
+ high: "high";
33
+ }>>;
34
+ promptCacheKey: z.ZodOptional<z.ZodString>;
35
+ safetyIdentifier: z.ZodOptional<z.ZodString>;
36
+ }, z.core.$strip>;
37
+ type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
7
38
 
8
39
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
9
40
 
@@ -151,11 +182,16 @@ declare const openaiTools: {
151
182
  *
152
183
  * Must have name `image_generation`.
153
184
  *
154
- * @param size - Image dimensions (e.g., 1024x1024, 1024x1536)
155
- * @param quality - Rendering quality (e.g. low, medium, high)
156
- * @param format - File output format
157
- * @param compression - Compression level (0-100%) for JPEG and WebP formats
158
- * @param background - Transparent or opaque
185
+ * @param background - Background type for the generated image. One of 'auto', 'opaque', or 'transparent'.
186
+ * @param inputFidelity - Input fidelity for the generated image. One of 'low' or 'high'.
187
+ * @param inputImageMask - Optional mask for inpainting. Contains fileId and/or imageUrl.
188
+ * @param model - The image generation model to use. Default: gpt-image-1.
189
+ * @param moderation - Moderation level for the generated image. Default: 'auto'.
190
+ * @param outputCompression - Compression level for the output image (0-100).
191
+ * @param outputFormat - The output format of the generated image. One of 'png', 'jpeg', or 'webp'.
192
+ * @param partialImages - Number of partial images to generate in streaming mode (0-3).
193
+ * @param quality - The quality of the generated image. One of 'auto', 'low', 'medium', or 'high'.
194
+ * @param size - The size of the generated image. One of 'auto', '1024x1024', '1024x1536', or '1536x1024'.
159
195
  */
160
196
  imageGeneration: (args?: {
161
197
  background?: "auto" | "opaque" | "transparent";
@@ -168,11 +204,32 @@ declare const openaiTools: {
168
204
  moderation?: "auto";
169
205
  outputCompression?: number;
170
206
  outputFormat?: "png" | "jpeg" | "webp";
207
+ partialImages?: number;
171
208
  quality?: "auto" | "low" | "medium" | "high";
172
209
  size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024";
173
210
  }) => _ai_sdk_provider_utils.Tool<{}, {
174
211
  result: string;
175
212
  }>;
213
+ /**
214
+ * Local shell is a tool that allows agents to run shell commands locally
215
+ * on a machine you or the user provides.
216
+ *
217
+ * Supported models: `gpt-5-codex` and `codex-mini-latest`
218
+ *
219
+ * Must have name `local_shell`.
220
+ */
221
+ localShell: _ai_sdk_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{
222
+ action: {
223
+ type: "exec";
224
+ command: string[];
225
+ timeoutMs?: number;
226
+ user?: string;
227
+ workingDirectory?: string;
228
+ env?: Record<string, string>;
229
+ };
230
+ }, {
231
+ output: string;
232
+ }, {}>;
176
233
  /**
177
234
  * Web search allows models to access up-to-date information from the internet
178
235
  * and provide answers with sourced citations.
@@ -207,50 +264,50 @@ declare const openaiTools: {
207
264
  webSearch: (args?: Parameters<typeof webSearchToolFactory>[0]) => _ai_sdk_provider_utils.Tool<{}, unknown>;
208
265
  };
209
266
 
210
- type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
267
+ type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
211
268
 
212
269
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
213
270
 
214
271
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
215
272
 
216
- interface OpenAIProvider extends ProviderV2 {
217
- (modelId: OpenAIResponsesModelId): LanguageModelV2;
273
+ interface OpenAIProvider extends ProviderV3 {
274
+ (modelId: OpenAIResponsesModelId): LanguageModelV3;
218
275
  /**
219
276
  Creates an OpenAI model for text generation.
220
277
  */
221
- languageModel(modelId: OpenAIResponsesModelId): LanguageModelV2;
278
+ languageModel(modelId: OpenAIResponsesModelId): LanguageModelV3;
222
279
  /**
223
280
  Creates an OpenAI chat model for text generation.
224
281
  */
225
- chat(modelId: OpenAIChatModelId): LanguageModelV2;
282
+ chat(modelId: OpenAIChatModelId): LanguageModelV3;
226
283
  /**
227
284
  Creates an OpenAI responses API model for text generation.
228
285
  */
229
- responses(modelId: OpenAIResponsesModelId): LanguageModelV2;
286
+ responses(modelId: OpenAIResponsesModelId): LanguageModelV3;
230
287
  /**
231
288
  Creates an OpenAI completion model for text generation.
232
289
  */
233
- completion(modelId: OpenAICompletionModelId): LanguageModelV2;
290
+ completion(modelId: OpenAICompletionModelId): LanguageModelV3;
234
291
  /**
235
292
  Creates a model for text embeddings.
236
293
  */
237
- embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
294
+ embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3<string>;
238
295
  /**
239
296
  Creates a model for text embeddings.
240
297
  */
241
- textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
298
+ textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3<string>;
242
299
  /**
243
300
  Creates a model for text embeddings.
244
301
  */
245
- textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
302
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3<string>;
246
303
  /**
247
304
  Creates a model for image generation.
248
305
  */
249
- image(modelId: OpenAIImageModelId): ImageModelV2;
306
+ image(modelId: OpenAIImageModelId): ImageModelV3;
250
307
  /**
251
308
  Creates a model for image generation.
252
309
  */
253
- imageModel(modelId: OpenAIImageModelId): ImageModelV2;
310
+ imageModel(modelId: OpenAIImageModelId): ImageModelV3;
254
311
  /**
255
312
  Creates a model for transcription.
256
313
  */
@@ -336,4 +393,6 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
336
393
  }, z.core.$strip>;
337
394
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
338
395
 
339
- export { type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, createOpenAI, openai };
396
+ declare const VERSION: string;
397
+
398
+ export { type OpenAIChatLanguageModelOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, VERSION, createOpenAI, openai };