@ai-sdk/openai 4.0.0-beta.3 → 4.0.0-beta.31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/CHANGELOG.md +320 -22
  2. package/README.md +2 -0
  3. package/dist/index.d.ts +139 -36
  4. package/dist/index.js +2343 -1490
  5. package/dist/index.js.map +1 -1
  6. package/dist/internal/index.d.ts +168 -45
  7. package/dist/internal/index.js +2112 -1511
  8. package/dist/internal/index.js.map +1 -1
  9. package/docs/03-openai.mdx +274 -9
  10. package/package.json +9 -12
  11. package/src/chat/convert-openai-chat-usage.ts +2 -2
  12. package/src/chat/convert-to-openai-chat-messages.ts +26 -15
  13. package/src/chat/map-openai-finish-reason.ts +2 -2
  14. package/src/chat/openai-chat-language-model.ts +52 -28
  15. package/src/chat/openai-chat-options.ts +5 -0
  16. package/src/chat/openai-chat-prepare-tools.ts +6 -6
  17. package/src/completion/convert-openai-completion-usage.ts +2 -2
  18. package/src/completion/convert-to-openai-completion-prompt.ts +2 -2
  19. package/src/completion/map-openai-finish-reason.ts +2 -2
  20. package/src/completion/openai-completion-language-model.ts +40 -23
  21. package/src/embedding/openai-embedding-model.ts +23 -6
  22. package/src/files/openai-files-api.ts +17 -0
  23. package/src/files/openai-files-options.ts +18 -0
  24. package/src/files/openai-files.ts +102 -0
  25. package/src/image/openai-image-model.ts +28 -11
  26. package/src/index.ts +2 -0
  27. package/src/openai-config.ts +6 -6
  28. package/src/openai-language-model-capabilities.ts +3 -2
  29. package/src/openai-provider.ts +54 -21
  30. package/src/openai-tools.ts +12 -1
  31. package/src/responses/convert-openai-responses-usage.ts +2 -2
  32. package/src/responses/convert-to-openai-responses-input.ts +211 -37
  33. package/src/responses/map-openai-responses-finish-reason.ts +2 -2
  34. package/src/responses/openai-responses-api.ts +136 -2
  35. package/src/responses/openai-responses-language-model.ts +252 -39
  36. package/src/responses/openai-responses-options.ts +24 -2
  37. package/src/responses/openai-responses-prepare-tools.ts +47 -14
  38. package/src/responses/openai-responses-provider-metadata.ts +10 -0
  39. package/src/skills/openai-skills-api.ts +31 -0
  40. package/src/skills/openai-skills.ts +87 -0
  41. package/src/speech/openai-speech-model.ts +25 -8
  42. package/src/tool/custom.ts +0 -6
  43. package/src/tool/shell.ts +7 -2
  44. package/src/tool/tool-search.ts +98 -0
  45. package/src/transcription/openai-transcription-model.ts +26 -9
  46. package/dist/index.d.mts +0 -1107
  47. package/dist/index.mjs +0 -6497
  48. package/dist/index.mjs.map +0 -1
  49. package/dist/internal/index.d.mts +0 -1137
  50. package/dist/internal/index.mjs +0 -6310
  51. package/dist/internal/index.mjs.map +0 -1
@@ -1,8 +1,9 @@
1
- import { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3CallOptions, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
1
+ import * as _ai_sdk_provider from '@ai-sdk/provider';
2
+ import { LanguageModelV4, LanguageModelV4CallOptions, LanguageModelV4GenerateResult, LanguageModelV4StreamResult, EmbeddingModelV4, ImageModelV4, TranscriptionModelV4CallOptions, TranscriptionModelV4, SpeechModelV4, JSONValue } from '@ai-sdk/provider';
2
3
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
- import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
4
+ import { InferSchema, WORKFLOW_SERIALIZE, WORKFLOW_DESERIALIZE, FetchFunction } from '@ai-sdk/provider-utils';
4
5
 
5
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
6
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.3-chat-latest' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-mini' | 'gpt-5.4-mini-2026-03-17' | 'gpt-5.4-nano' | 'gpt-5.4-nano-2026-03-17' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
6
7
  declare const openaiLanguageModelChatOptions: _ai_sdk_provider_utils.LazySchema<{
7
8
  logitBias?: Record<number, number> | undefined;
8
9
  logprobs?: number | boolean | undefined;
@@ -26,25 +27,33 @@ type OpenAILanguageModelChatOptions = InferSchema<typeof openaiLanguageModelChat
26
27
 
27
28
  type OpenAIChatConfig = {
28
29
  provider: string;
29
- headers: () => Record<string, string | undefined>;
30
+ headers?: () => Record<string, string | undefined>;
30
31
  url: (options: {
31
32
  modelId: string;
32
33
  path: string;
33
34
  }) => string;
34
35
  fetch?: FetchFunction;
35
36
  };
36
- declare class OpenAIChatLanguageModel implements LanguageModelV3 {
37
- readonly specificationVersion = "v3";
37
+ declare class OpenAIChatLanguageModel implements LanguageModelV4 {
38
+ readonly specificationVersion = "v4";
38
39
  readonly modelId: OpenAIChatModelId;
39
40
  readonly supportedUrls: {
40
41
  'image/*': RegExp[];
41
42
  };
42
43
  private readonly config;
44
+ static [WORKFLOW_SERIALIZE](model: OpenAIChatLanguageModel): {
45
+ modelId: string;
46
+ config: _ai_sdk_provider.JSONObject;
47
+ };
48
+ static [WORKFLOW_DESERIALIZE](options: {
49
+ modelId: OpenAIChatModelId;
50
+ config: OpenAIChatConfig;
51
+ }): OpenAIChatLanguageModel;
43
52
  constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
44
53
  get provider(): string;
45
54
  private getArgs;
46
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
47
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
55
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
56
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
48
57
  }
49
58
 
50
59
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | 'gpt-3.5-turbo-instruct-0914' | (string & {});
@@ -59,24 +68,32 @@ type OpenAILanguageModelCompletionOptions = InferSchema<typeof openaiLanguageMod
59
68
 
60
69
  type OpenAICompletionConfig = {
61
70
  provider: string;
62
- headers: () => Record<string, string | undefined>;
71
+ headers?: () => Record<string, string | undefined>;
63
72
  url: (options: {
64
73
  modelId: string;
65
74
  path: string;
66
75
  }) => string;
67
76
  fetch?: FetchFunction;
68
77
  };
69
- declare class OpenAICompletionLanguageModel implements LanguageModelV3 {
70
- readonly specificationVersion = "v3";
78
+ declare class OpenAICompletionLanguageModel implements LanguageModelV4 {
79
+ readonly specificationVersion = "v4";
71
80
  readonly modelId: OpenAICompletionModelId;
72
81
  private readonly config;
73
82
  private get providerOptionsName();
83
+ static [WORKFLOW_SERIALIZE](model: OpenAICompletionLanguageModel): {
84
+ modelId: string;
85
+ config: _ai_sdk_provider.JSONObject;
86
+ };
87
+ static [WORKFLOW_DESERIALIZE](options: {
88
+ modelId: OpenAICompletionModelId;
89
+ config: OpenAICompletionConfig;
90
+ }): OpenAICompletionLanguageModel;
74
91
  constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
75
92
  get provider(): string;
76
93
  readonly supportedUrls: Record<string, RegExp[]>;
77
94
  private getArgs;
78
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
79
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
95
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
96
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
80
97
  }
81
98
 
82
99
  type OpenAIConfig = {
@@ -85,16 +102,16 @@ type OpenAIConfig = {
85
102
  modelId: string;
86
103
  path: string;
87
104
  }) => string;
88
- headers: () => Record<string, string | undefined>;
105
+ headers?: () => Record<string, string | undefined>;
89
106
  fetch?: FetchFunction;
90
107
  generateId?: () => string;
91
108
  /**
92
- * File ID prefixes used to identify file IDs in Responses API.
93
- * When undefined, all file data is treated as base64 content.
109
+ * This is soft-deprecated. Use provider references (e.g. `{ openai: 'file-abc123' }`)
110
+ * in file part data instead. File ID prefixes used to identify file IDs
111
+ * in Responses API. When undefined, all string file data is treated as
112
+ * base64 content.
94
113
  *
95
- * Examples:
96
- * - OpenAI: ['file-'] for IDs like 'file-abc123'
97
- * - Azure OpenAI: ['assistant-'] for IDs like 'assistant-abc123'
114
+ * TODO: remove in v8
98
115
  */
99
116
  fileIdPrefixes?: readonly string[];
100
117
  };
@@ -106,15 +123,23 @@ declare const openaiEmbeddingModelOptions: _ai_sdk_provider_utils.LazySchema<{
106
123
  }>;
107
124
  type OpenAIEmbeddingModelOptions = InferSchema<typeof openaiEmbeddingModelOptions>;
108
125
 
109
- declare class OpenAIEmbeddingModel implements EmbeddingModelV3 {
110
- readonly specificationVersion = "v3";
126
+ declare class OpenAIEmbeddingModel implements EmbeddingModelV4 {
127
+ readonly specificationVersion = "v4";
111
128
  readonly modelId: OpenAIEmbeddingModelId;
112
129
  readonly maxEmbeddingsPerCall = 2048;
113
130
  readonly supportsParallelCalls = true;
114
131
  private readonly config;
132
+ static [WORKFLOW_SERIALIZE](model: OpenAIEmbeddingModel): {
133
+ modelId: string;
134
+ config: _ai_sdk_provider.JSONObject;
135
+ };
136
+ static [WORKFLOW_DESERIALIZE](options: {
137
+ modelId: OpenAIEmbeddingModelId;
138
+ config: OpenAIConfig;
139
+ }): OpenAIEmbeddingModel;
115
140
  get provider(): string;
116
141
  constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
117
- doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>>;
142
+ doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>>;
118
143
  }
119
144
 
120
145
  type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | 'gpt-image-1.5' | 'chatgpt-image-latest' | (string & {});
@@ -126,14 +151,22 @@ interface OpenAIImageModelConfig extends OpenAIConfig {
126
151
  currentDate?: () => Date;
127
152
  };
128
153
  }
129
- declare class OpenAIImageModel implements ImageModelV3 {
154
+ declare class OpenAIImageModel implements ImageModelV4 {
130
155
  readonly modelId: OpenAIImageModelId;
131
156
  private readonly config;
132
- readonly specificationVersion = "v3";
157
+ readonly specificationVersion = "v4";
158
+ static [WORKFLOW_SERIALIZE](model: OpenAIImageModel): {
159
+ modelId: string;
160
+ config: _ai_sdk_provider.JSONObject;
161
+ };
162
+ static [WORKFLOW_DESERIALIZE](options: {
163
+ modelId: OpenAIImageModelId;
164
+ config: OpenAIImageModelConfig;
165
+ }): OpenAIImageModel;
133
166
  get maxImagesPerCall(): number;
134
167
  get provider(): string;
135
168
  constructor(modelId: OpenAIImageModelId, config: OpenAIImageModelConfig);
136
- doGenerate({ prompt, files, mask, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
169
+ doGenerate({ prompt, files, mask, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV4['doGenerate']>>>;
137
170
  }
138
171
 
139
172
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-mini-transcribe-2025-03-20' | 'gpt-4o-mini-transcribe-2025-12-15' | 'gpt-4o-transcribe' | 'gpt-4o-transcribe-diarize' | (string & {});
@@ -146,7 +179,7 @@ declare const openAITranscriptionModelOptions: _ai_sdk_provider_utils.LazySchema
146
179
  }>;
147
180
  type OpenAITranscriptionModelOptions = InferSchema<typeof openAITranscriptionModelOptions>;
148
181
 
149
- type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV3CallOptions, 'providerOptions'> & {
182
+ type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV4CallOptions, 'providerOptions'> & {
150
183
  providerOptions?: {
151
184
  openai?: OpenAITranscriptionModelOptions;
152
185
  };
@@ -156,14 +189,22 @@ interface OpenAITranscriptionModelConfig extends OpenAIConfig {
156
189
  currentDate?: () => Date;
157
190
  };
158
191
  }
159
- declare class OpenAITranscriptionModel implements TranscriptionModelV3 {
192
+ declare class OpenAITranscriptionModel implements TranscriptionModelV4 {
160
193
  readonly modelId: OpenAITranscriptionModelId;
161
194
  private readonly config;
162
- readonly specificationVersion = "v3";
195
+ readonly specificationVersion = "v4";
196
+ static [WORKFLOW_SERIALIZE](model: OpenAITranscriptionModel): {
197
+ modelId: string;
198
+ config: _ai_sdk_provider.JSONObject;
199
+ };
200
+ static [WORKFLOW_DESERIALIZE](options: {
201
+ modelId: OpenAITranscriptionModelId;
202
+ config: OpenAITranscriptionModelConfig;
203
+ }): OpenAITranscriptionModel;
163
204
  get provider(): string;
164
205
  constructor(modelId: OpenAITranscriptionModelId, config: OpenAITranscriptionModelConfig);
165
206
  private getArgs;
166
- doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV3['doGenerate']>>>;
207
+ doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV4['doGenerate']>>>;
167
208
  }
168
209
 
169
210
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-1106' | 'tts-1-hd' | 'tts-1-hd-1106' | 'gpt-4o-mini-tts' | 'gpt-4o-mini-tts-2025-03-20' | 'gpt-4o-mini-tts-2025-12-15' | (string & {});
@@ -178,28 +219,44 @@ interface OpenAISpeechModelConfig extends OpenAIConfig {
178
219
  currentDate?: () => Date;
179
220
  };
180
221
  }
181
- declare class OpenAISpeechModel implements SpeechModelV3 {
222
+ declare class OpenAISpeechModel implements SpeechModelV4 {
182
223
  readonly modelId: OpenAISpeechModelId;
183
224
  private readonly config;
184
- readonly specificationVersion = "v3";
225
+ readonly specificationVersion = "v4";
226
+ static [WORKFLOW_SERIALIZE](model: OpenAISpeechModel): {
227
+ modelId: string;
228
+ config: _ai_sdk_provider.JSONObject;
229
+ };
230
+ static [WORKFLOW_DESERIALIZE](options: {
231
+ modelId: OpenAISpeechModelId;
232
+ config: OpenAISpeechModelConfig;
233
+ }): OpenAISpeechModel;
185
234
  get provider(): string;
186
235
  constructor(modelId: OpenAISpeechModelId, config: OpenAISpeechModelConfig);
187
236
  private getArgs;
188
- doGenerate(options: Parameters<SpeechModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV3['doGenerate']>>>;
237
+ doGenerate(options: Parameters<SpeechModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV4['doGenerate']>>>;
189
238
  }
190
239
 
191
- type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5.3-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
240
+ type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-chat-latest' | 'gpt-5.3-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-mini' | 'gpt-5.4-mini-2026-03-17' | 'gpt-5.4-nano' | 'gpt-5.4-nano-2026-03-17' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
192
241
 
193
- declare class OpenAIResponsesLanguageModel implements LanguageModelV3 {
194
- readonly specificationVersion = "v3";
242
+ declare class OpenAIResponsesLanguageModel implements LanguageModelV4 {
243
+ readonly specificationVersion = "v4";
195
244
  readonly modelId: OpenAIResponsesModelId;
196
245
  private readonly config;
246
+ static [WORKFLOW_SERIALIZE](model: OpenAIResponsesLanguageModel): {
247
+ modelId: string;
248
+ config: _ai_sdk_provider.JSONObject;
249
+ };
250
+ static [WORKFLOW_DESERIALIZE](options: {
251
+ modelId: OpenAIResponsesModelId;
252
+ config: OpenAIConfig;
253
+ }): OpenAIResponsesLanguageModel;
197
254
  constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
198
255
  readonly supportedUrls: Record<string, RegExp[]>;
199
256
  get provider(): string;
200
257
  private getArgs;
201
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
202
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
258
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
259
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
203
260
  }
204
261
 
205
262
  /**
@@ -265,6 +322,28 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
265
322
  } | null | undefined;
266
323
  service_tier?: string | null | undefined;
267
324
  };
325
+ } | {
326
+ type: "response.failed";
327
+ response: {
328
+ error?: {
329
+ message: string;
330
+ code?: string | null | undefined;
331
+ } | null | undefined;
332
+ incomplete_details?: {
333
+ reason: string;
334
+ } | null | undefined;
335
+ usage?: {
336
+ input_tokens: number;
337
+ output_tokens: number;
338
+ input_tokens_details?: {
339
+ cached_tokens?: number | null | undefined;
340
+ } | null | undefined;
341
+ output_tokens_details?: {
342
+ reasoning_tokens?: number | null | undefined;
343
+ } | null | undefined;
344
+ } | null | undefined;
345
+ service_tier?: string | null | undefined;
346
+ };
268
347
  } | {
269
348
  type: "response.created";
270
349
  response: {
@@ -359,6 +438,10 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
359
438
  action: {
360
439
  commands: string[];
361
440
  };
441
+ } | {
442
+ type: "compaction";
443
+ id: string;
444
+ encrypted_content?: string | null | undefined;
362
445
  } | {
363
446
  type: "shell_call_output";
364
447
  id: string;
@@ -374,6 +457,20 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
374
457
  exit_code: number;
375
458
  };
376
459
  }[];
460
+ } | {
461
+ type: "tool_search_call";
462
+ id: string;
463
+ execution: "server" | "client";
464
+ call_id: string | null;
465
+ status: "completed" | "in_progress" | "incomplete";
466
+ arguments: unknown;
467
+ } | {
468
+ type: "tool_search_output";
469
+ id: string;
470
+ execution: "server" | "client";
471
+ call_id: string | null;
472
+ status: "completed" | "in_progress" | "incomplete";
473
+ tools: Record<string, JSONValue | undefined>[];
377
474
  };
378
475
  } | {
379
476
  type: "response.output_item.done";
@@ -528,6 +625,10 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
528
625
  action: {
529
626
  commands: string[];
530
627
  };
628
+ } | {
629
+ type: "compaction";
630
+ id: string;
631
+ encrypted_content: string;
531
632
  } | {
532
633
  type: "shell_call_output";
533
634
  id: string;
@@ -543,6 +644,20 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
543
644
  exit_code: number;
544
645
  };
545
646
  }[];
647
+ } | {
648
+ type: "tool_search_call";
649
+ id: string;
650
+ execution: "server" | "client";
651
+ call_id: string | null;
652
+ status: "completed" | "in_progress" | "incomplete";
653
+ arguments: unknown;
654
+ } | {
655
+ type: "tool_search_output";
656
+ id: string;
657
+ execution: "server" | "client";
658
+ call_id: string | null;
659
+ status: "completed" | "in_progress" | "incomplete";
660
+ tools: Record<string, JSONValue | undefined>[];
546
661
  };
547
662
  } | {
548
663
  type: "response.function_call_arguments.delta";
@@ -652,6 +767,14 @@ type OpenaiResponsesReasoningProviderMetadata = {
652
767
  type OpenaiResponsesProviderMetadata = {
653
768
  openai: ResponsesProviderMetadata;
654
769
  };
770
+ type ResponsesCompactionProviderMetadata = {
771
+ type: 'compaction';
772
+ itemId: string;
773
+ encryptedContent?: string;
774
+ };
775
+ type OpenaiResponsesCompactionProviderMetadata = {
776
+ openai: ResponsesCompactionProviderMetadata;
777
+ };
655
778
  type ResponsesTextProviderMetadata = {
656
779
  itemId: string;
657
780
  phase?: 'commentary' | 'final_answer' | null;
@@ -772,7 +895,7 @@ declare const applyPatchToolFactory: _ai_sdk_provider_utils.ProviderToolFactoryW
772
895
  * (e.g., patch results or errors).
773
896
  */
774
897
  output?: string;
775
- }, {}>;
898
+ }, {}, {}>;
776
899
  /**
777
900
  * The apply_patch tool lets GPT-5.1 create, update, and delete files in your
778
901
  * codebase using structured diffs. Instead of just suggesting edits, the model
@@ -800,7 +923,7 @@ declare const applyPatch: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSc
800
923
  * (e.g., patch results or errors).
801
924
  */
802
925
  output?: string;
803
- }, {}>;
926
+ }, {}, {}>;
804
927
 
805
928
  declare const codeInterpreterInputSchema: _ai_sdk_provider_utils.LazySchema<{
806
929
  containerId: string;
@@ -857,7 +980,7 @@ declare const codeInterpreterToolFactory: _ai_sdk_provider_utils.ProviderToolFac
857
980
  */
858
981
  url: string;
859
982
  }> | null;
860
- }, CodeInterpreterArgs>;
983
+ }, CodeInterpreterArgs, {}>;
861
984
  declare const codeInterpreter: (args?: CodeInterpreterArgs) => _ai_sdk_provider_utils.Tool<{
862
985
  /**
863
986
  * The code to run, or null if not available.
@@ -885,7 +1008,7 @@ declare const codeInterpreter: (args?: CodeInterpreterArgs) => _ai_sdk_provider_
885
1008
  */
886
1009
  url: string;
887
1010
  }> | null;
888
- }>;
1011
+ }, {}>;
889
1012
 
890
1013
  declare const fileSearchArgsSchema: _ai_sdk_provider_utils.LazySchema<{
891
1014
  vectorStoreIds: string[];
@@ -968,7 +1091,7 @@ declare const fileSearch: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSc
968
1091
  * A filter to apply.
969
1092
  */
970
1093
  filters?: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter;
971
- }>;
1094
+ }, {}>;
972
1095
 
973
1096
  declare const imageGenerationArgsSchema: _ai_sdk_provider_utils.LazySchema<{
974
1097
  background?: "auto" | "transparent" | "opaque" | undefined;
@@ -1049,7 +1172,7 @@ declare const imageGeneration: (args?: ImageGenerationArgs) => _ai_sdk_provider_
1049
1172
  * The generated image encoded in base64.
1050
1173
  */
1051
1174
  result: string;
1052
- }>;
1175
+ }, {}>;
1053
1176
 
1054
1177
  declare const webSearchPreviewArgsSchema: _ai_sdk_provider_utils.LazySchema<{
1055
1178
  searchContextSize?: "low" | "medium" | "high" | undefined;
@@ -1132,6 +1255,6 @@ declare const webSearchPreview: _ai_sdk_provider_utils.ProviderToolFactoryWithOu
1132
1255
  */
1133
1256
  timezone?: string;
1134
1257
  };
1135
- }>;
1258
+ }, {}>;
1136
1259
 
1137
- export { type ApplyPatchOperation, OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingModelOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAILanguageModelChatOptions, type OpenAILanguageModelCompletionOptions, OpenAIResponsesLanguageModel, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAISpeechModelOptions, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, type OpenaiResponsesProviderMetadata, type OpenaiResponsesReasoningProviderMetadata, type OpenaiResponsesSourceDocumentProviderMetadata, type OpenaiResponsesTextProviderMetadata, type ResponsesProviderMetadata, type ResponsesReasoningProviderMetadata, type ResponsesSourceDocumentProviderMetadata, type ResponsesTextProviderMetadata, applyPatch, applyPatchArgsSchema, applyPatchInputSchema, applyPatchOutputSchema, applyPatchToolFactory, codeInterpreter, codeInterpreterArgsSchema, codeInterpreterInputSchema, codeInterpreterOutputSchema, codeInterpreterToolFactory, fileSearch, fileSearchArgsSchema, fileSearchOutputSchema, hasDefaultResponseFormat, imageGeneration, imageGenerationArgsSchema, imageGenerationOutputSchema, modelMaxImagesPerCall, openAITranscriptionModelOptions, openaiEmbeddingModelOptions, openaiLanguageModelChatOptions, openaiLanguageModelCompletionOptions, openaiSpeechModelOptionsSchema, webSearchPreview, webSearchPreviewArgsSchema, webSearchPreviewInputSchema };
1260
+ export { type ApplyPatchOperation, OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingModelOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAILanguageModelChatOptions, type OpenAILanguageModelCompletionOptions, OpenAIResponsesLanguageModel, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAISpeechModelOptions, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, type OpenaiResponsesCompactionProviderMetadata, type OpenaiResponsesProviderMetadata, type OpenaiResponsesReasoningProviderMetadata, type OpenaiResponsesSourceDocumentProviderMetadata, type OpenaiResponsesTextProviderMetadata, type ResponsesCompactionProviderMetadata, type ResponsesProviderMetadata, type ResponsesReasoningProviderMetadata, type ResponsesSourceDocumentProviderMetadata, type ResponsesTextProviderMetadata, applyPatch, applyPatchArgsSchema, applyPatchInputSchema, applyPatchOutputSchema, applyPatchToolFactory, codeInterpreter, codeInterpreterArgsSchema, codeInterpreterInputSchema, codeInterpreterOutputSchema, codeInterpreterToolFactory, fileSearch, fileSearchArgsSchema, fileSearchOutputSchema, hasDefaultResponseFormat, imageGeneration, imageGenerationArgsSchema, imageGenerationOutputSchema, modelMaxImagesPerCall, openAITranscriptionModelOptions, openaiEmbeddingModelOptions, openaiLanguageModelChatOptions, openaiLanguageModelCompletionOptions, openaiSpeechModelOptionsSchema, webSearchPreview, webSearchPreviewArgsSchema, webSearchPreviewInputSchema };