@ai-sdk/openai 4.0.0-beta.4 → 4.0.0-beta.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/index.d.mts +17 -17
  3. package/dist/index.d.ts +17 -17
  4. package/dist/index.js +13 -12
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +13 -12
  7. package/dist/index.mjs.map +1 -1
  8. package/dist/internal/index.d.mts +28 -28
  9. package/dist/internal/index.d.ts +28 -28
  10. package/dist/internal/index.js +11 -10
  11. package/dist/internal/index.js.map +1 -1
  12. package/dist/internal/index.mjs +11 -10
  13. package/dist/internal/index.mjs.map +1 -1
  14. package/docs/03-openai.mdx +3 -0
  15. package/package.json +5 -5
  16. package/src/chat/convert-openai-chat-usage.ts +2 -2
  17. package/src/chat/convert-to-openai-chat-messages.ts +5 -5
  18. package/src/chat/map-openai-finish-reason.ts +2 -2
  19. package/src/chat/openai-chat-language-model.ts +22 -22
  20. package/src/chat/openai-chat-options.ts +1 -0
  21. package/src/chat/openai-chat-prepare-tools.ts +6 -6
  22. package/src/completion/convert-openai-completion-usage.ts +2 -2
  23. package/src/completion/convert-to-openai-completion-prompt.ts +2 -2
  24. package/src/completion/map-openai-finish-reason.ts +2 -2
  25. package/src/completion/openai-completion-language-model.ts +20 -20
  26. package/src/embedding/openai-embedding-model.ts +5 -5
  27. package/src/image/openai-image-model.ts +9 -9
  28. package/src/openai-language-model-capabilities.ts +1 -0
  29. package/src/openai-provider.ts +21 -21
  30. package/src/responses/convert-openai-responses-usage.ts +2 -2
  31. package/src/responses/convert-to-openai-responses-input.ts +7 -7
  32. package/src/responses/map-openai-responses-finish-reason.ts +2 -2
  33. package/src/responses/openai-responses-language-model.ts +29 -29
  34. package/src/responses/openai-responses-options.ts +4 -2
  35. package/src/responses/openai-responses-prepare-tools.ts +6 -6
  36. package/src/speech/openai-speech-model.ts +7 -7
  37. package/src/transcription/openai-transcription-model.ts +8 -8
@@ -1,8 +1,8 @@
1
- import { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3CallOptions, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
1
+ import { LanguageModelV4, LanguageModelV4CallOptions, LanguageModelV4GenerateResult, LanguageModelV4StreamResult, EmbeddingModelV4, ImageModelV4, TranscriptionModelV4CallOptions, TranscriptionModelV4, SpeechModelV4 } from '@ai-sdk/provider';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
3
  import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
4
4
 
5
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
5
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.3-chat-latest' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
6
6
  declare const openaiLanguageModelChatOptions: _ai_sdk_provider_utils.LazySchema<{
7
7
  logitBias?: Record<number, number> | undefined;
8
8
  logprobs?: number | boolean | undefined;
@@ -33,8 +33,8 @@ type OpenAIChatConfig = {
33
33
  }) => string;
34
34
  fetch?: FetchFunction;
35
35
  };
36
- declare class OpenAIChatLanguageModel implements LanguageModelV3 {
37
- readonly specificationVersion = "v3";
36
+ declare class OpenAIChatLanguageModel implements LanguageModelV4 {
37
+ readonly specificationVersion = "v4";
38
38
  readonly modelId: OpenAIChatModelId;
39
39
  readonly supportedUrls: {
40
40
  'image/*': RegExp[];
@@ -43,8 +43,8 @@ declare class OpenAIChatLanguageModel implements LanguageModelV3 {
43
43
  constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
44
44
  get provider(): string;
45
45
  private getArgs;
46
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
47
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
46
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
47
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
48
48
  }
49
49
 
50
50
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | 'gpt-3.5-turbo-instruct-0914' | (string & {});
@@ -66,8 +66,8 @@ type OpenAICompletionConfig = {
66
66
  }) => string;
67
67
  fetch?: FetchFunction;
68
68
  };
69
- declare class OpenAICompletionLanguageModel implements LanguageModelV3 {
70
- readonly specificationVersion = "v3";
69
+ declare class OpenAICompletionLanguageModel implements LanguageModelV4 {
70
+ readonly specificationVersion = "v4";
71
71
  readonly modelId: OpenAICompletionModelId;
72
72
  private readonly config;
73
73
  private get providerOptionsName();
@@ -75,8 +75,8 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV3 {
75
75
  get provider(): string;
76
76
  readonly supportedUrls: Record<string, RegExp[]>;
77
77
  private getArgs;
78
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
79
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
78
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
79
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
80
80
  }
81
81
 
82
82
  type OpenAIConfig = {
@@ -106,15 +106,15 @@ declare const openaiEmbeddingModelOptions: _ai_sdk_provider_utils.LazySchema<{
106
106
  }>;
107
107
  type OpenAIEmbeddingModelOptions = InferSchema<typeof openaiEmbeddingModelOptions>;
108
108
 
109
- declare class OpenAIEmbeddingModel implements EmbeddingModelV3 {
110
- readonly specificationVersion = "v3";
109
+ declare class OpenAIEmbeddingModel implements EmbeddingModelV4 {
110
+ readonly specificationVersion = "v4";
111
111
  readonly modelId: OpenAIEmbeddingModelId;
112
112
  readonly maxEmbeddingsPerCall = 2048;
113
113
  readonly supportsParallelCalls = true;
114
114
  private readonly config;
115
115
  get provider(): string;
116
116
  constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
117
- doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>>;
117
+ doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>>;
118
118
  }
119
119
 
120
120
  type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | 'gpt-image-1.5' | 'chatgpt-image-latest' | (string & {});
@@ -126,14 +126,14 @@ interface OpenAIImageModelConfig extends OpenAIConfig {
126
126
  currentDate?: () => Date;
127
127
  };
128
128
  }
129
- declare class OpenAIImageModel implements ImageModelV3 {
129
+ declare class OpenAIImageModel implements ImageModelV4 {
130
130
  readonly modelId: OpenAIImageModelId;
131
131
  private readonly config;
132
- readonly specificationVersion = "v3";
132
+ readonly specificationVersion = "v4";
133
133
  get maxImagesPerCall(): number;
134
134
  get provider(): string;
135
135
  constructor(modelId: OpenAIImageModelId, config: OpenAIImageModelConfig);
136
- doGenerate({ prompt, files, mask, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
136
+ doGenerate({ prompt, files, mask, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV4['doGenerate']>>>;
137
137
  }
138
138
 
139
139
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-mini-transcribe-2025-03-20' | 'gpt-4o-mini-transcribe-2025-12-15' | 'gpt-4o-transcribe' | 'gpt-4o-transcribe-diarize' | (string & {});
@@ -146,7 +146,7 @@ declare const openAITranscriptionModelOptions: _ai_sdk_provider_utils.LazySchema
146
146
  }>;
147
147
  type OpenAITranscriptionModelOptions = InferSchema<typeof openAITranscriptionModelOptions>;
148
148
 
149
- type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV3CallOptions, 'providerOptions'> & {
149
+ type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV4CallOptions, 'providerOptions'> & {
150
150
  providerOptions?: {
151
151
  openai?: OpenAITranscriptionModelOptions;
152
152
  };
@@ -156,14 +156,14 @@ interface OpenAITranscriptionModelConfig extends OpenAIConfig {
156
156
  currentDate?: () => Date;
157
157
  };
158
158
  }
159
- declare class OpenAITranscriptionModel implements TranscriptionModelV3 {
159
+ declare class OpenAITranscriptionModel implements TranscriptionModelV4 {
160
160
  readonly modelId: OpenAITranscriptionModelId;
161
161
  private readonly config;
162
- readonly specificationVersion = "v3";
162
+ readonly specificationVersion = "v4";
163
163
  get provider(): string;
164
164
  constructor(modelId: OpenAITranscriptionModelId, config: OpenAITranscriptionModelConfig);
165
165
  private getArgs;
166
- doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV3['doGenerate']>>>;
166
+ doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV4['doGenerate']>>>;
167
167
  }
168
168
 
169
169
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-1106' | 'tts-1-hd' | 'tts-1-hd-1106' | 'gpt-4o-mini-tts' | 'gpt-4o-mini-tts-2025-03-20' | 'gpt-4o-mini-tts-2025-12-15' | (string & {});
@@ -178,28 +178,28 @@ interface OpenAISpeechModelConfig extends OpenAIConfig {
178
178
  currentDate?: () => Date;
179
179
  };
180
180
  }
181
- declare class OpenAISpeechModel implements SpeechModelV3 {
181
+ declare class OpenAISpeechModel implements SpeechModelV4 {
182
182
  readonly modelId: OpenAISpeechModelId;
183
183
  private readonly config;
184
- readonly specificationVersion = "v3";
184
+ readonly specificationVersion = "v4";
185
185
  get provider(): string;
186
186
  constructor(modelId: OpenAISpeechModelId, config: OpenAISpeechModelConfig);
187
187
  private getArgs;
188
- doGenerate(options: Parameters<SpeechModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV3['doGenerate']>>>;
188
+ doGenerate(options: Parameters<SpeechModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV4['doGenerate']>>>;
189
189
  }
190
190
 
191
- type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5.3-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
191
+ type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-chat-latest' | 'gpt-5.3-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
192
192
 
193
- declare class OpenAIResponsesLanguageModel implements LanguageModelV3 {
194
- readonly specificationVersion = "v3";
193
+ declare class OpenAIResponsesLanguageModel implements LanguageModelV4 {
194
+ readonly specificationVersion = "v4";
195
195
  readonly modelId: OpenAIResponsesModelId;
196
196
  private readonly config;
197
197
  constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
198
198
  readonly supportedUrls: Record<string, RegExp[]>;
199
199
  get provider(): string;
200
200
  private getArgs;
201
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
202
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
201
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
202
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
203
203
  }
204
204
 
205
205
  /**
@@ -1,8 +1,8 @@
1
- import { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3CallOptions, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
1
+ import { LanguageModelV4, LanguageModelV4CallOptions, LanguageModelV4GenerateResult, LanguageModelV4StreamResult, EmbeddingModelV4, ImageModelV4, TranscriptionModelV4CallOptions, TranscriptionModelV4, SpeechModelV4 } from '@ai-sdk/provider';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
3
  import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
4
4
 
5
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
5
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.3-chat-latest' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
6
6
  declare const openaiLanguageModelChatOptions: _ai_sdk_provider_utils.LazySchema<{
7
7
  logitBias?: Record<number, number> | undefined;
8
8
  logprobs?: number | boolean | undefined;
@@ -33,8 +33,8 @@ type OpenAIChatConfig = {
33
33
  }) => string;
34
34
  fetch?: FetchFunction;
35
35
  };
36
- declare class OpenAIChatLanguageModel implements LanguageModelV3 {
37
- readonly specificationVersion = "v3";
36
+ declare class OpenAIChatLanguageModel implements LanguageModelV4 {
37
+ readonly specificationVersion = "v4";
38
38
  readonly modelId: OpenAIChatModelId;
39
39
  readonly supportedUrls: {
40
40
  'image/*': RegExp[];
@@ -43,8 +43,8 @@ declare class OpenAIChatLanguageModel implements LanguageModelV3 {
43
43
  constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
44
44
  get provider(): string;
45
45
  private getArgs;
46
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
47
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
46
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
47
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
48
48
  }
49
49
 
50
50
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | 'gpt-3.5-turbo-instruct-0914' | (string & {});
@@ -66,8 +66,8 @@ type OpenAICompletionConfig = {
66
66
  }) => string;
67
67
  fetch?: FetchFunction;
68
68
  };
69
- declare class OpenAICompletionLanguageModel implements LanguageModelV3 {
70
- readonly specificationVersion = "v3";
69
+ declare class OpenAICompletionLanguageModel implements LanguageModelV4 {
70
+ readonly specificationVersion = "v4";
71
71
  readonly modelId: OpenAICompletionModelId;
72
72
  private readonly config;
73
73
  private get providerOptionsName();
@@ -75,8 +75,8 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV3 {
75
75
  get provider(): string;
76
76
  readonly supportedUrls: Record<string, RegExp[]>;
77
77
  private getArgs;
78
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
79
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
78
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
79
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
80
80
  }
81
81
 
82
82
  type OpenAIConfig = {
@@ -106,15 +106,15 @@ declare const openaiEmbeddingModelOptions: _ai_sdk_provider_utils.LazySchema<{
106
106
  }>;
107
107
  type OpenAIEmbeddingModelOptions = InferSchema<typeof openaiEmbeddingModelOptions>;
108
108
 
109
- declare class OpenAIEmbeddingModel implements EmbeddingModelV3 {
110
- readonly specificationVersion = "v3";
109
+ declare class OpenAIEmbeddingModel implements EmbeddingModelV4 {
110
+ readonly specificationVersion = "v4";
111
111
  readonly modelId: OpenAIEmbeddingModelId;
112
112
  readonly maxEmbeddingsPerCall = 2048;
113
113
  readonly supportsParallelCalls = true;
114
114
  private readonly config;
115
115
  get provider(): string;
116
116
  constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
117
- doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>>;
117
+ doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>>;
118
118
  }
119
119
 
120
120
  type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | 'gpt-image-1.5' | 'chatgpt-image-latest' | (string & {});
@@ -126,14 +126,14 @@ interface OpenAIImageModelConfig extends OpenAIConfig {
126
126
  currentDate?: () => Date;
127
127
  };
128
128
  }
129
- declare class OpenAIImageModel implements ImageModelV3 {
129
+ declare class OpenAIImageModel implements ImageModelV4 {
130
130
  readonly modelId: OpenAIImageModelId;
131
131
  private readonly config;
132
- readonly specificationVersion = "v3";
132
+ readonly specificationVersion = "v4";
133
133
  get maxImagesPerCall(): number;
134
134
  get provider(): string;
135
135
  constructor(modelId: OpenAIImageModelId, config: OpenAIImageModelConfig);
136
- doGenerate({ prompt, files, mask, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
136
+ doGenerate({ prompt, files, mask, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV4['doGenerate']>>>;
137
137
  }
138
138
 
139
139
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-mini-transcribe-2025-03-20' | 'gpt-4o-mini-transcribe-2025-12-15' | 'gpt-4o-transcribe' | 'gpt-4o-transcribe-diarize' | (string & {});
@@ -146,7 +146,7 @@ declare const openAITranscriptionModelOptions: _ai_sdk_provider_utils.LazySchema
146
146
  }>;
147
147
  type OpenAITranscriptionModelOptions = InferSchema<typeof openAITranscriptionModelOptions>;
148
148
 
149
- type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV3CallOptions, 'providerOptions'> & {
149
+ type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV4CallOptions, 'providerOptions'> & {
150
150
  providerOptions?: {
151
151
  openai?: OpenAITranscriptionModelOptions;
152
152
  };
@@ -156,14 +156,14 @@ interface OpenAITranscriptionModelConfig extends OpenAIConfig {
156
156
  currentDate?: () => Date;
157
157
  };
158
158
  }
159
- declare class OpenAITranscriptionModel implements TranscriptionModelV3 {
159
+ declare class OpenAITranscriptionModel implements TranscriptionModelV4 {
160
160
  readonly modelId: OpenAITranscriptionModelId;
161
161
  private readonly config;
162
- readonly specificationVersion = "v3";
162
+ readonly specificationVersion = "v4";
163
163
  get provider(): string;
164
164
  constructor(modelId: OpenAITranscriptionModelId, config: OpenAITranscriptionModelConfig);
165
165
  private getArgs;
166
- doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV3['doGenerate']>>>;
166
+ doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV4['doGenerate']>>>;
167
167
  }
168
168
 
169
169
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-1106' | 'tts-1-hd' | 'tts-1-hd-1106' | 'gpt-4o-mini-tts' | 'gpt-4o-mini-tts-2025-03-20' | 'gpt-4o-mini-tts-2025-12-15' | (string & {});
@@ -178,28 +178,28 @@ interface OpenAISpeechModelConfig extends OpenAIConfig {
178
178
  currentDate?: () => Date;
179
179
  };
180
180
  }
181
- declare class OpenAISpeechModel implements SpeechModelV3 {
181
+ declare class OpenAISpeechModel implements SpeechModelV4 {
182
182
  readonly modelId: OpenAISpeechModelId;
183
183
  private readonly config;
184
- readonly specificationVersion = "v3";
184
+ readonly specificationVersion = "v4";
185
185
  get provider(): string;
186
186
  constructor(modelId: OpenAISpeechModelId, config: OpenAISpeechModelConfig);
187
187
  private getArgs;
188
- doGenerate(options: Parameters<SpeechModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV3['doGenerate']>>>;
188
+ doGenerate(options: Parameters<SpeechModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV4['doGenerate']>>>;
189
189
  }
190
190
 
191
- type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5.3-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
191
+ type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-chat-latest' | 'gpt-5.3-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
192
192
 
193
- declare class OpenAIResponsesLanguageModel implements LanguageModelV3 {
194
- readonly specificationVersion = "v3";
193
+ declare class OpenAIResponsesLanguageModel implements LanguageModelV4 {
194
+ readonly specificationVersion = "v4";
195
195
  readonly modelId: OpenAIResponsesModelId;
196
196
  private readonly config;
197
197
  constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
198
198
  readonly supportedUrls: Record<string, RegExp[]>;
199
199
  get provider(): string;
200
200
  private getArgs;
201
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
202
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
201
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
202
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
203
203
  }
204
204
 
205
205
  /**
@@ -84,7 +84,7 @@ function getOpenAILanguageModelCapabilities(modelId) {
84
84
  const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
85
85
  const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
86
86
  const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
87
- const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2") || modelId.startsWith("gpt-5.4");
87
+ const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2") || modelId.startsWith("gpt-5.3") || modelId.startsWith("gpt-5.4");
88
88
  const systemMessageMode = isReasoningModel ? "developer" : "system";
89
89
  return {
90
90
  supportsFlexProcessing,
@@ -675,7 +675,7 @@ function prepareChatTools({
675
675
  // src/chat/openai-chat-language-model.ts
676
676
  var OpenAIChatLanguageModel = class {
677
677
  constructor(modelId, config) {
678
- this.specificationVersion = "v3";
678
+ this.specificationVersion = "v4";
679
679
  this.supportedUrls = {
680
680
  "image/*": [/^https?:\/\/.*$/]
681
681
  };
@@ -1410,7 +1410,7 @@ var openaiLanguageModelCompletionOptions = (0, import_provider_utils7.lazySchema
1410
1410
  // src/completion/openai-completion-language-model.ts
1411
1411
  var OpenAICompletionLanguageModel = class {
1412
1412
  constructor(modelId, config) {
1413
- this.specificationVersion = "v3";
1413
+ this.specificationVersion = "v4";
1414
1414
  this.supportedUrls = {
1415
1415
  // No URLs are supported for completion models.
1416
1416
  };
@@ -1675,7 +1675,7 @@ var openaiTextEmbeddingResponseSchema = (0, import_provider_utils10.lazySchema)(
1675
1675
  // src/embedding/openai-embedding-model.ts
1676
1676
  var OpenAIEmbeddingModel = class {
1677
1677
  constructor(modelId, config) {
1678
- this.specificationVersion = "v3";
1678
+ this.specificationVersion = "v4";
1679
1679
  this.maxEmbeddingsPerCall = 2048;
1680
1680
  this.supportsParallelCalls = true;
1681
1681
  this.modelId = modelId;
@@ -1796,7 +1796,7 @@ var OpenAIImageModel = class {
1796
1796
  constructor(modelId, config) {
1797
1797
  this.modelId = modelId;
1798
1798
  this.config = config;
1799
- this.specificationVersion = "v3";
1799
+ this.specificationVersion = "v4";
1800
1800
  }
1801
1801
  get maxImagesPerCall() {
1802
1802
  var _a;
@@ -2117,7 +2117,7 @@ var OpenAITranscriptionModel = class {
2117
2117
  constructor(modelId, config) {
2118
2118
  this.modelId = modelId;
2119
2119
  this.config = config;
2120
- this.specificationVersion = "v3";
2120
+ this.specificationVersion = "v4";
2121
2121
  }
2122
2122
  get provider() {
2123
2123
  return this.config.provider;
@@ -2240,7 +2240,7 @@ var OpenAISpeechModel = class {
2240
2240
  constructor(modelId, config) {
2241
2241
  this.modelId = modelId;
2242
2242
  this.config = config;
2243
- this.specificationVersion = "v3";
2243
+ this.specificationVersion = "v4";
2244
2244
  }
2245
2245
  get provider() {
2246
2246
  return this.config.provider;
@@ -3906,11 +3906,12 @@ var openaiResponsesReasoningModelIds = [
3906
3906
  "gpt-5.2-chat-latest",
3907
3907
  "gpt-5.2-pro",
3908
3908
  "gpt-5.2-codex",
3909
+ "gpt-5.3-chat-latest",
3910
+ "gpt-5.3-codex",
3909
3911
  "gpt-5.4",
3910
3912
  "gpt-5.4-2026-03-05",
3911
3913
  "gpt-5.4-pro",
3912
- "gpt-5.4-pro-2026-03-05",
3913
- "gpt-5.3-codex"
3914
+ "gpt-5.4-pro-2026-03-05"
3914
3915
  ];
3915
3916
  var openaiResponsesModelIds = [
3916
3917
  "gpt-4.1",
@@ -4684,7 +4685,7 @@ function extractApprovalRequestIdToToolCallIdMapping(prompt) {
4684
4685
  }
4685
4686
  var OpenAIResponsesLanguageModel = class {
4686
4687
  constructor(modelId, config) {
4687
- this.specificationVersion = "v3";
4688
+ this.specificationVersion = "v4";
4688
4689
  this.supportedUrls = {
4689
4690
  "image/*": [/^https?:\/\/.*$/],
4690
4691
  "application/pdf": [/^https?:\/\/.*$/]