@ai-sdk/openai 2.1.0-beta.9 → 3.0.0-beta.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +77 -0
- package/dist/index.d.mts +54 -75
- package/dist/index.d.ts +54 -75
- package/dist/index.js +1391 -1043
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1345 -952
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +174 -152
- package/dist/internal/index.d.ts +174 -152
- package/dist/internal/index.js +1387 -1033
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +1353 -958
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +4 -4
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,82 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 3.0.0-beta.18
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 95f65c2: chore: use import \* from zod/v4
|
|
8
|
+
- 95f65c2: chore: load zod schemas lazily
|
|
9
|
+
- Updated dependencies [95f65c2]
|
|
10
|
+
- Updated dependencies [95f65c2]
|
|
11
|
+
- @ai-sdk/provider-utils@4.0.0-beta.11
|
|
12
|
+
|
|
13
|
+
## 3.0.0-beta.17
|
|
14
|
+
|
|
15
|
+
### Major Changes
|
|
16
|
+
|
|
17
|
+
- dee8b05: ai SDK 6 beta
|
|
18
|
+
|
|
19
|
+
### Patch Changes
|
|
20
|
+
|
|
21
|
+
- Updated dependencies [dee8b05]
|
|
22
|
+
- @ai-sdk/provider@3.0.0-beta.6
|
|
23
|
+
- @ai-sdk/provider-utils@4.0.0-beta.10
|
|
24
|
+
|
|
25
|
+
## 2.1.0-beta.16
|
|
26
|
+
|
|
27
|
+
### Patch Changes
|
|
28
|
+
|
|
29
|
+
- Updated dependencies [521c537]
|
|
30
|
+
- @ai-sdk/provider-utils@3.1.0-beta.9
|
|
31
|
+
|
|
32
|
+
## 2.1.0-beta.15
|
|
33
|
+
|
|
34
|
+
### Patch Changes
|
|
35
|
+
|
|
36
|
+
- Updated dependencies [e06565c]
|
|
37
|
+
- @ai-sdk/provider-utils@3.1.0-beta.8
|
|
38
|
+
|
|
39
|
+
## 2.1.0-beta.14
|
|
40
|
+
|
|
41
|
+
### Patch Changes
|
|
42
|
+
|
|
43
|
+
- 000e87b: fix(provider/openai): add providerExecuted flag to tool start chunks
|
|
44
|
+
|
|
45
|
+
## 2.1.0-beta.13
|
|
46
|
+
|
|
47
|
+
### Patch Changes
|
|
48
|
+
|
|
49
|
+
- 357cfd7: feat(provider/openai): add new model IDs `gpt-image-1-mini`, `gpt-5-pro`, `gpt-5-pro-2025-10-06`
|
|
50
|
+
|
|
51
|
+
## 2.1.0-beta.12
|
|
52
|
+
|
|
53
|
+
### Patch Changes
|
|
54
|
+
|
|
55
|
+
- 046aa3b: feat(provider): speech model v3 spec
|
|
56
|
+
- e8109d3: feat: tool execution approval
|
|
57
|
+
- 21e20c0: feat(provider): transcription model v3 spec
|
|
58
|
+
- Updated dependencies [046aa3b]
|
|
59
|
+
- Updated dependencies [e8109d3]
|
|
60
|
+
- @ai-sdk/provider@2.1.0-beta.5
|
|
61
|
+
- @ai-sdk/provider-utils@3.1.0-beta.7
|
|
62
|
+
|
|
63
|
+
## 2.1.0-beta.11
|
|
64
|
+
|
|
65
|
+
### Patch Changes
|
|
66
|
+
|
|
67
|
+
- 0adc679: feat(provider): shared spec v3
|
|
68
|
+
- 2b0caef: feat(provider/openai): preview image generation results
|
|
69
|
+
- Updated dependencies [0adc679]
|
|
70
|
+
- Updated dependencies [2b0caef]
|
|
71
|
+
- @ai-sdk/provider-utils@3.1.0-beta.6
|
|
72
|
+
- @ai-sdk/provider@2.1.0-beta.4
|
|
73
|
+
|
|
74
|
+
## 2.1.0-beta.10
|
|
75
|
+
|
|
76
|
+
### Patch Changes
|
|
77
|
+
|
|
78
|
+
- d64ece9: enables image_generation capabilities in the Azure provider through the Responses API.
|
|
79
|
+
|
|
3
80
|
## 2.1.0-beta.9
|
|
4
81
|
|
|
5
82
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -1,46 +1,32 @@
|
|
|
1
|
-
import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3,
|
|
1
|
+
import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
|
|
2
2
|
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
|
|
3
|
-
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
4
|
-
import { z } from 'zod/v4';
|
|
3
|
+
import { InferValidator, FetchFunction } from '@ai-sdk/provider-utils';
|
|
5
4
|
|
|
6
5
|
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
|
|
7
|
-
declare const openaiChatLanguageModelOptions:
|
|
8
|
-
logitBias
|
|
9
|
-
logprobs
|
|
10
|
-
parallelToolCalls
|
|
11
|
-
user
|
|
12
|
-
reasoningEffort
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
flex: "flex";
|
|
26
|
-
priority: "priority";
|
|
27
|
-
}>>;
|
|
28
|
-
strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
|
|
29
|
-
textVerbosity: z.ZodOptional<z.ZodEnum<{
|
|
30
|
-
low: "low";
|
|
31
|
-
medium: "medium";
|
|
32
|
-
high: "high";
|
|
33
|
-
}>>;
|
|
34
|
-
promptCacheKey: z.ZodOptional<z.ZodString>;
|
|
35
|
-
safetyIdentifier: z.ZodOptional<z.ZodString>;
|
|
36
|
-
}, z.core.$strip>;
|
|
37
|
-
type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
|
|
6
|
+
declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidator<{
|
|
7
|
+
logitBias?: Record<number, number> | undefined;
|
|
8
|
+
logprobs?: number | boolean | undefined;
|
|
9
|
+
parallelToolCalls?: boolean | undefined;
|
|
10
|
+
user?: string | undefined;
|
|
11
|
+
reasoningEffort?: "minimal" | "low" | "medium" | "high" | undefined;
|
|
12
|
+
maxCompletionTokens?: number | undefined;
|
|
13
|
+
store?: boolean | undefined;
|
|
14
|
+
metadata?: Record<string, string> | undefined;
|
|
15
|
+
prediction?: Record<string, any> | undefined;
|
|
16
|
+
structuredOutputs?: boolean | undefined;
|
|
17
|
+
serviceTier?: "auto" | "flex" | "priority" | undefined;
|
|
18
|
+
strictJsonSchema?: boolean | undefined;
|
|
19
|
+
textVerbosity?: "low" | "medium" | "high" | undefined;
|
|
20
|
+
promptCacheKey?: string | undefined;
|
|
21
|
+
safetyIdentifier?: string | undefined;
|
|
22
|
+
}>;
|
|
23
|
+
type OpenAIChatLanguageModelOptions = InferValidator<typeof openaiChatLanguageModelOptions>;
|
|
38
24
|
|
|
39
25
|
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
40
26
|
|
|
41
27
|
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
|
|
42
28
|
|
|
43
|
-
type OpenAIImageModelId = '
|
|
29
|
+
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | (string & {});
|
|
44
30
|
|
|
45
31
|
declare const webSearchToolFactory: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
|
|
46
32
|
/**
|
|
@@ -182,11 +168,16 @@ declare const openaiTools: {
|
|
|
182
168
|
*
|
|
183
169
|
* Must have name `image_generation`.
|
|
184
170
|
*
|
|
185
|
-
* @param
|
|
186
|
-
* @param
|
|
187
|
-
* @param
|
|
188
|
-
* @param
|
|
189
|
-
* @param
|
|
171
|
+
* @param background - Background type for the generated image. One of 'auto', 'opaque', or 'transparent'.
|
|
172
|
+
* @param inputFidelity - Input fidelity for the generated image. One of 'low' or 'high'.
|
|
173
|
+
* @param inputImageMask - Optional mask for inpainting. Contains fileId and/or imageUrl.
|
|
174
|
+
* @param model - The image generation model to use. Default: gpt-image-1.
|
|
175
|
+
* @param moderation - Moderation level for the generated image. Default: 'auto'.
|
|
176
|
+
* @param outputCompression - Compression level for the output image (0-100).
|
|
177
|
+
* @param outputFormat - The output format of the generated image. One of 'png', 'jpeg', or 'webp'.
|
|
178
|
+
* @param partialImages - Number of partial images to generate in streaming mode (0-3).
|
|
179
|
+
* @param quality - The quality of the generated image. One of 'auto', 'low', 'medium', or 'high'.
|
|
180
|
+
* @param size - The size of the generated image. One of 'auto', '1024x1024', '1024x1536', or '1536x1024'.
|
|
190
181
|
*/
|
|
191
182
|
imageGeneration: (args?: {
|
|
192
183
|
background?: "auto" | "opaque" | "transparent";
|
|
@@ -199,6 +190,7 @@ declare const openaiTools: {
|
|
|
199
190
|
moderation?: "auto";
|
|
200
191
|
outputCompression?: number;
|
|
201
192
|
outputFormat?: "png" | "jpeg" | "webp";
|
|
193
|
+
partialImages?: number;
|
|
202
194
|
quality?: "auto" | "low" | "medium" | "high";
|
|
203
195
|
size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024";
|
|
204
196
|
}) => _ai_sdk_provider_utils.Tool<{}, {
|
|
@@ -258,7 +250,26 @@ declare const openaiTools: {
|
|
|
258
250
|
webSearch: (args?: Parameters<typeof webSearchToolFactory>[0]) => _ai_sdk_provider_utils.Tool<{}, unknown>;
|
|
259
251
|
};
|
|
260
252
|
|
|
261
|
-
type OpenAIResponsesModelId = '
|
|
253
|
+
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
254
|
+
declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyValidator<{
|
|
255
|
+
include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
|
|
256
|
+
instructions?: string | null | undefined;
|
|
257
|
+
logprobs?: number | boolean | undefined;
|
|
258
|
+
maxToolCalls?: number | null | undefined;
|
|
259
|
+
metadata?: any;
|
|
260
|
+
parallelToolCalls?: boolean | null | undefined;
|
|
261
|
+
previousResponseId?: string | null | undefined;
|
|
262
|
+
promptCacheKey?: string | null | undefined;
|
|
263
|
+
reasoningEffort?: string | null | undefined;
|
|
264
|
+
reasoningSummary?: string | null | undefined;
|
|
265
|
+
safetyIdentifier?: string | null | undefined;
|
|
266
|
+
serviceTier?: "auto" | "flex" | "priority" | null | undefined;
|
|
267
|
+
store?: boolean | null | undefined;
|
|
268
|
+
strictJsonSchema?: boolean | null | undefined;
|
|
269
|
+
textVerbosity?: "low" | "medium" | "high" | null | undefined;
|
|
270
|
+
user?: string | null | undefined;
|
|
271
|
+
}>;
|
|
272
|
+
type OpenAIResponsesProviderOptions = InferValidator<typeof openaiResponsesProviderOptionsSchema>;
|
|
262
273
|
|
|
263
274
|
type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
|
|
264
275
|
|
|
@@ -305,11 +316,11 @@ interface OpenAIProvider extends ProviderV3 {
|
|
|
305
316
|
/**
|
|
306
317
|
Creates a model for transcription.
|
|
307
318
|
*/
|
|
308
|
-
transcription(modelId: OpenAITranscriptionModelId):
|
|
319
|
+
transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV3;
|
|
309
320
|
/**
|
|
310
321
|
Creates a model for speech generation.
|
|
311
322
|
*/
|
|
312
|
-
speech(modelId: OpenAISpeechModelId):
|
|
323
|
+
speech(modelId: OpenAISpeechModelId): SpeechModelV3;
|
|
313
324
|
/**
|
|
314
325
|
OpenAI-specific tools.
|
|
315
326
|
*/
|
|
@@ -355,38 +366,6 @@ Default OpenAI provider instance.
|
|
|
355
366
|
*/
|
|
356
367
|
declare const openai: OpenAIProvider;
|
|
357
368
|
|
|
358
|
-
declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
359
|
-
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
360
|
-
"file_search_call.results": "file_search_call.results";
|
|
361
|
-
"message.output_text.logprobs": "message.output_text.logprobs";
|
|
362
|
-
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
363
|
-
}>>>>;
|
|
364
|
-
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
365
|
-
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
366
|
-
maxToolCalls: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
367
|
-
metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
|
|
368
|
-
parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
369
|
-
previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
370
|
-
promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
371
|
-
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
372
|
-
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
373
|
-
safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
374
|
-
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
375
|
-
auto: "auto";
|
|
376
|
-
flex: "flex";
|
|
377
|
-
priority: "priority";
|
|
378
|
-
}>>>;
|
|
379
|
-
store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
380
|
-
strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
381
|
-
textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
382
|
-
low: "low";
|
|
383
|
-
medium: "medium";
|
|
384
|
-
high: "high";
|
|
385
|
-
}>>>;
|
|
386
|
-
user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
387
|
-
}, z.core.$strip>;
|
|
388
|
-
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
389
|
-
|
|
390
369
|
declare const VERSION: string;
|
|
391
370
|
|
|
392
371
|
export { type OpenAIChatLanguageModelOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, VERSION, createOpenAI, openai };
|
package/dist/index.d.ts
CHANGED
|
@@ -1,46 +1,32 @@
|
|
|
1
|
-
import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3,
|
|
1
|
+
import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
|
|
2
2
|
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
|
|
3
|
-
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
4
|
-
import { z } from 'zod/v4';
|
|
3
|
+
import { InferValidator, FetchFunction } from '@ai-sdk/provider-utils';
|
|
5
4
|
|
|
6
5
|
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
|
|
7
|
-
declare const openaiChatLanguageModelOptions:
|
|
8
|
-
logitBias
|
|
9
|
-
logprobs
|
|
10
|
-
parallelToolCalls
|
|
11
|
-
user
|
|
12
|
-
reasoningEffort
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
flex: "flex";
|
|
26
|
-
priority: "priority";
|
|
27
|
-
}>>;
|
|
28
|
-
strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
|
|
29
|
-
textVerbosity: z.ZodOptional<z.ZodEnum<{
|
|
30
|
-
low: "low";
|
|
31
|
-
medium: "medium";
|
|
32
|
-
high: "high";
|
|
33
|
-
}>>;
|
|
34
|
-
promptCacheKey: z.ZodOptional<z.ZodString>;
|
|
35
|
-
safetyIdentifier: z.ZodOptional<z.ZodString>;
|
|
36
|
-
}, z.core.$strip>;
|
|
37
|
-
type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
|
|
6
|
+
declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidator<{
|
|
7
|
+
logitBias?: Record<number, number> | undefined;
|
|
8
|
+
logprobs?: number | boolean | undefined;
|
|
9
|
+
parallelToolCalls?: boolean | undefined;
|
|
10
|
+
user?: string | undefined;
|
|
11
|
+
reasoningEffort?: "minimal" | "low" | "medium" | "high" | undefined;
|
|
12
|
+
maxCompletionTokens?: number | undefined;
|
|
13
|
+
store?: boolean | undefined;
|
|
14
|
+
metadata?: Record<string, string> | undefined;
|
|
15
|
+
prediction?: Record<string, any> | undefined;
|
|
16
|
+
structuredOutputs?: boolean | undefined;
|
|
17
|
+
serviceTier?: "auto" | "flex" | "priority" | undefined;
|
|
18
|
+
strictJsonSchema?: boolean | undefined;
|
|
19
|
+
textVerbosity?: "low" | "medium" | "high" | undefined;
|
|
20
|
+
promptCacheKey?: string | undefined;
|
|
21
|
+
safetyIdentifier?: string | undefined;
|
|
22
|
+
}>;
|
|
23
|
+
type OpenAIChatLanguageModelOptions = InferValidator<typeof openaiChatLanguageModelOptions>;
|
|
38
24
|
|
|
39
25
|
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
40
26
|
|
|
41
27
|
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
|
|
42
28
|
|
|
43
|
-
type OpenAIImageModelId = '
|
|
29
|
+
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | (string & {});
|
|
44
30
|
|
|
45
31
|
declare const webSearchToolFactory: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
|
|
46
32
|
/**
|
|
@@ -182,11 +168,16 @@ declare const openaiTools: {
|
|
|
182
168
|
*
|
|
183
169
|
* Must have name `image_generation`.
|
|
184
170
|
*
|
|
185
|
-
* @param
|
|
186
|
-
* @param
|
|
187
|
-
* @param
|
|
188
|
-
* @param
|
|
189
|
-
* @param
|
|
171
|
+
* @param background - Background type for the generated image. One of 'auto', 'opaque', or 'transparent'.
|
|
172
|
+
* @param inputFidelity - Input fidelity for the generated image. One of 'low' or 'high'.
|
|
173
|
+
* @param inputImageMask - Optional mask for inpainting. Contains fileId and/or imageUrl.
|
|
174
|
+
* @param model - The image generation model to use. Default: gpt-image-1.
|
|
175
|
+
* @param moderation - Moderation level for the generated image. Default: 'auto'.
|
|
176
|
+
* @param outputCompression - Compression level for the output image (0-100).
|
|
177
|
+
* @param outputFormat - The output format of the generated image. One of 'png', 'jpeg', or 'webp'.
|
|
178
|
+
* @param partialImages - Number of partial images to generate in streaming mode (0-3).
|
|
179
|
+
* @param quality - The quality of the generated image. One of 'auto', 'low', 'medium', or 'high'.
|
|
180
|
+
* @param size - The size of the generated image. One of 'auto', '1024x1024', '1024x1536', or '1536x1024'.
|
|
190
181
|
*/
|
|
191
182
|
imageGeneration: (args?: {
|
|
192
183
|
background?: "auto" | "opaque" | "transparent";
|
|
@@ -199,6 +190,7 @@ declare const openaiTools: {
|
|
|
199
190
|
moderation?: "auto";
|
|
200
191
|
outputCompression?: number;
|
|
201
192
|
outputFormat?: "png" | "jpeg" | "webp";
|
|
193
|
+
partialImages?: number;
|
|
202
194
|
quality?: "auto" | "low" | "medium" | "high";
|
|
203
195
|
size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024";
|
|
204
196
|
}) => _ai_sdk_provider_utils.Tool<{}, {
|
|
@@ -258,7 +250,26 @@ declare const openaiTools: {
|
|
|
258
250
|
webSearch: (args?: Parameters<typeof webSearchToolFactory>[0]) => _ai_sdk_provider_utils.Tool<{}, unknown>;
|
|
259
251
|
};
|
|
260
252
|
|
|
261
|
-
type OpenAIResponsesModelId = '
|
|
253
|
+
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
254
|
+
declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyValidator<{
|
|
255
|
+
include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
|
|
256
|
+
instructions?: string | null | undefined;
|
|
257
|
+
logprobs?: number | boolean | undefined;
|
|
258
|
+
maxToolCalls?: number | null | undefined;
|
|
259
|
+
metadata?: any;
|
|
260
|
+
parallelToolCalls?: boolean | null | undefined;
|
|
261
|
+
previousResponseId?: string | null | undefined;
|
|
262
|
+
promptCacheKey?: string | null | undefined;
|
|
263
|
+
reasoningEffort?: string | null | undefined;
|
|
264
|
+
reasoningSummary?: string | null | undefined;
|
|
265
|
+
safetyIdentifier?: string | null | undefined;
|
|
266
|
+
serviceTier?: "auto" | "flex" | "priority" | null | undefined;
|
|
267
|
+
store?: boolean | null | undefined;
|
|
268
|
+
strictJsonSchema?: boolean | null | undefined;
|
|
269
|
+
textVerbosity?: "low" | "medium" | "high" | null | undefined;
|
|
270
|
+
user?: string | null | undefined;
|
|
271
|
+
}>;
|
|
272
|
+
type OpenAIResponsesProviderOptions = InferValidator<typeof openaiResponsesProviderOptionsSchema>;
|
|
262
273
|
|
|
263
274
|
type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
|
|
264
275
|
|
|
@@ -305,11 +316,11 @@ interface OpenAIProvider extends ProviderV3 {
|
|
|
305
316
|
/**
|
|
306
317
|
Creates a model for transcription.
|
|
307
318
|
*/
|
|
308
|
-
transcription(modelId: OpenAITranscriptionModelId):
|
|
319
|
+
transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV3;
|
|
309
320
|
/**
|
|
310
321
|
Creates a model for speech generation.
|
|
311
322
|
*/
|
|
312
|
-
speech(modelId: OpenAISpeechModelId):
|
|
323
|
+
speech(modelId: OpenAISpeechModelId): SpeechModelV3;
|
|
313
324
|
/**
|
|
314
325
|
OpenAI-specific tools.
|
|
315
326
|
*/
|
|
@@ -355,38 +366,6 @@ Default OpenAI provider instance.
|
|
|
355
366
|
*/
|
|
356
367
|
declare const openai: OpenAIProvider;
|
|
357
368
|
|
|
358
|
-
declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
359
|
-
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
360
|
-
"file_search_call.results": "file_search_call.results";
|
|
361
|
-
"message.output_text.logprobs": "message.output_text.logprobs";
|
|
362
|
-
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
363
|
-
}>>>>;
|
|
364
|
-
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
365
|
-
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
366
|
-
maxToolCalls: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
367
|
-
metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
|
|
368
|
-
parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
369
|
-
previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
370
|
-
promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
371
|
-
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
372
|
-
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
373
|
-
safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
374
|
-
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
375
|
-
auto: "auto";
|
|
376
|
-
flex: "flex";
|
|
377
|
-
priority: "priority";
|
|
378
|
-
}>>>;
|
|
379
|
-
store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
380
|
-
strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
381
|
-
textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
382
|
-
low: "low";
|
|
383
|
-
medium: "medium";
|
|
384
|
-
high: "high";
|
|
385
|
-
}>>>;
|
|
386
|
-
user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
387
|
-
}, z.core.$strip>;
|
|
388
|
-
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
389
|
-
|
|
390
369
|
declare const VERSION: string;
|
|
391
370
|
|
|
392
371
|
export { type OpenAIChatLanguageModelOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, VERSION, createOpenAI, openai };
|