@ai-sdk/openai 4.0.0-beta.2 → 4.0.0-beta.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/CHANGELOG.md +234 -22
  2. package/README.md +2 -0
  3. package/dist/index.d.mts +134 -35
  4. package/dist/index.d.ts +134 -35
  5. package/dist/index.js +1700 -1139
  6. package/dist/index.js.map +1 -1
  7. package/dist/index.mjs +1697 -1117
  8. package/dist/index.mjs.map +1 -1
  9. package/dist/internal/index.d.mts +107 -41
  10. package/dist/internal/index.d.ts +107 -41
  11. package/dist/internal/index.js +1380 -939
  12. package/dist/internal/index.js.map +1 -1
  13. package/dist/internal/index.mjs +1371 -917
  14. package/dist/internal/index.mjs.map +1 -1
  15. package/docs/03-openai.mdx +274 -9
  16. package/package.json +3 -5
  17. package/src/chat/convert-openai-chat-usage.ts +2 -2
  18. package/src/chat/convert-to-openai-chat-messages.ts +26 -15
  19. package/src/chat/map-openai-finish-reason.ts +2 -2
  20. package/src/chat/openai-chat-language-model.ts +32 -24
  21. package/src/chat/openai-chat-options.ts +5 -0
  22. package/src/chat/openai-chat-prepare-tools.ts +6 -6
  23. package/src/completion/convert-openai-completion-usage.ts +2 -2
  24. package/src/completion/convert-to-openai-completion-prompt.ts +2 -2
  25. package/src/completion/map-openai-finish-reason.ts +2 -2
  26. package/src/completion/openai-completion-language-model.ts +20 -20
  27. package/src/embedding/openai-embedding-model.ts +5 -5
  28. package/src/files/openai-files-api.ts +17 -0
  29. package/src/files/openai-files-options.ts +18 -0
  30. package/src/files/openai-files.ts +102 -0
  31. package/src/image/openai-image-model.ts +9 -9
  32. package/src/index.ts +2 -0
  33. package/src/openai-config.ts +5 -5
  34. package/src/openai-language-model-capabilities.ts +3 -2
  35. package/src/openai-provider.ts +39 -21
  36. package/src/openai-tools.ts +12 -1
  37. package/src/responses/convert-openai-responses-usage.ts +2 -2
  38. package/src/responses/convert-to-openai-responses-input.ts +188 -14
  39. package/src/responses/map-openai-responses-finish-reason.ts +2 -2
  40. package/src/responses/openai-responses-api.ts +136 -2
  41. package/src/responses/openai-responses-language-model.ts +233 -37
  42. package/src/responses/openai-responses-options.ts +24 -2
  43. package/src/responses/openai-responses-prepare-tools.ts +34 -9
  44. package/src/responses/openai-responses-provider-metadata.ts +10 -0
  45. package/src/speech/openai-speech-model.ts +7 -7
  46. package/src/tool/custom.ts +0 -6
  47. package/src/tool/tool-search.ts +98 -0
  48. package/src/transcription/openai-transcription-model.ts +8 -8
@@ -1,8 +1,8 @@
1
- import { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3CallOptions, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
1
+ import { LanguageModelV4, LanguageModelV4CallOptions, LanguageModelV4GenerateResult, LanguageModelV4StreamResult, EmbeddingModelV4, ImageModelV4, TranscriptionModelV4CallOptions, TranscriptionModelV4, SpeechModelV4, JSONValue } from '@ai-sdk/provider';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
3
  import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
4
4
 
5
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
5
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-audio-preview-2025-06-03' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.3-chat-latest' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-mini' | 'gpt-5.4-mini-2026-03-17' | 'gpt-5.4-nano' | 'gpt-5.4-nano-2026-03-17' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | (string & {});
6
6
  declare const openaiLanguageModelChatOptions: _ai_sdk_provider_utils.LazySchema<{
7
7
  logitBias?: Record<number, number> | undefined;
8
8
  logprobs?: number | boolean | undefined;
@@ -33,8 +33,8 @@ type OpenAIChatConfig = {
33
33
  }) => string;
34
34
  fetch?: FetchFunction;
35
35
  };
36
- declare class OpenAIChatLanguageModel implements LanguageModelV3 {
37
- readonly specificationVersion = "v3";
36
+ declare class OpenAIChatLanguageModel implements LanguageModelV4 {
37
+ readonly specificationVersion = "v4";
38
38
  readonly modelId: OpenAIChatModelId;
39
39
  readonly supportedUrls: {
40
40
  'image/*': RegExp[];
@@ -43,8 +43,8 @@ declare class OpenAIChatLanguageModel implements LanguageModelV3 {
43
43
  constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
44
44
  get provider(): string;
45
45
  private getArgs;
46
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
47
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
46
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
47
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
48
48
  }
49
49
 
50
50
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | 'gpt-3.5-turbo-instruct-0914' | (string & {});
@@ -66,8 +66,8 @@ type OpenAICompletionConfig = {
66
66
  }) => string;
67
67
  fetch?: FetchFunction;
68
68
  };
69
- declare class OpenAICompletionLanguageModel implements LanguageModelV3 {
70
- readonly specificationVersion = "v3";
69
+ declare class OpenAICompletionLanguageModel implements LanguageModelV4 {
70
+ readonly specificationVersion = "v4";
71
71
  readonly modelId: OpenAICompletionModelId;
72
72
  private readonly config;
73
73
  private get providerOptionsName();
@@ -75,8 +75,8 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV3 {
75
75
  get provider(): string;
76
76
  readonly supportedUrls: Record<string, RegExp[]>;
77
77
  private getArgs;
78
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
79
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
78
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
79
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
80
80
  }
81
81
 
82
82
  type OpenAIConfig = {
@@ -89,12 +89,12 @@ type OpenAIConfig = {
89
89
  fetch?: FetchFunction;
90
90
  generateId?: () => string;
91
91
  /**
92
- * File ID prefixes used to identify file IDs in Responses API.
93
- * When undefined, all file data is treated as base64 content.
92
+ * This is soft-deprecated. Use provider references (e.g. `{ openai: 'file-abc123' }`)
93
+ * in file part data instead. File ID prefixes used to identify file IDs
94
+ * in Responses API. When undefined, all string file data is treated as
95
+ * base64 content.
94
96
  *
95
- * Examples:
96
- * - OpenAI: ['file-'] for IDs like 'file-abc123'
97
- * - Azure OpenAI: ['assistant-'] for IDs like 'assistant-abc123'
97
+ * TODO: remove in v8
98
98
  */
99
99
  fileIdPrefixes?: readonly string[];
100
100
  };
@@ -106,15 +106,15 @@ declare const openaiEmbeddingModelOptions: _ai_sdk_provider_utils.LazySchema<{
106
106
  }>;
107
107
  type OpenAIEmbeddingModelOptions = InferSchema<typeof openaiEmbeddingModelOptions>;
108
108
 
109
- declare class OpenAIEmbeddingModel implements EmbeddingModelV3 {
110
- readonly specificationVersion = "v3";
109
+ declare class OpenAIEmbeddingModel implements EmbeddingModelV4 {
110
+ readonly specificationVersion = "v4";
111
111
  readonly modelId: OpenAIEmbeddingModelId;
112
112
  readonly maxEmbeddingsPerCall = 2048;
113
113
  readonly supportsParallelCalls = true;
114
114
  private readonly config;
115
115
  get provider(): string;
116
116
  constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
117
- doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>>;
117
+ doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>>;
118
118
  }
119
119
 
120
120
  type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | 'gpt-image-1.5' | 'chatgpt-image-latest' | (string & {});
@@ -126,14 +126,14 @@ interface OpenAIImageModelConfig extends OpenAIConfig {
126
126
  currentDate?: () => Date;
127
127
  };
128
128
  }
129
- declare class OpenAIImageModel implements ImageModelV3 {
129
+ declare class OpenAIImageModel implements ImageModelV4 {
130
130
  readonly modelId: OpenAIImageModelId;
131
131
  private readonly config;
132
- readonly specificationVersion = "v3";
132
+ readonly specificationVersion = "v4";
133
133
  get maxImagesPerCall(): number;
134
134
  get provider(): string;
135
135
  constructor(modelId: OpenAIImageModelId, config: OpenAIImageModelConfig);
136
- doGenerate({ prompt, files, mask, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
136
+ doGenerate({ prompt, files, mask, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV4['doGenerate']>>>;
137
137
  }
138
138
 
139
139
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-mini-transcribe-2025-03-20' | 'gpt-4o-mini-transcribe-2025-12-15' | 'gpt-4o-transcribe' | 'gpt-4o-transcribe-diarize' | (string & {});
@@ -146,7 +146,7 @@ declare const openAITranscriptionModelOptions: _ai_sdk_provider_utils.LazySchema
146
146
  }>;
147
147
  type OpenAITranscriptionModelOptions = InferSchema<typeof openAITranscriptionModelOptions>;
148
148
 
149
- type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV3CallOptions, 'providerOptions'> & {
149
+ type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV4CallOptions, 'providerOptions'> & {
150
150
  providerOptions?: {
151
151
  openai?: OpenAITranscriptionModelOptions;
152
152
  };
@@ -156,14 +156,14 @@ interface OpenAITranscriptionModelConfig extends OpenAIConfig {
156
156
  currentDate?: () => Date;
157
157
  };
158
158
  }
159
- declare class OpenAITranscriptionModel implements TranscriptionModelV3 {
159
+ declare class OpenAITranscriptionModel implements TranscriptionModelV4 {
160
160
  readonly modelId: OpenAITranscriptionModelId;
161
161
  private readonly config;
162
- readonly specificationVersion = "v3";
162
+ readonly specificationVersion = "v4";
163
163
  get provider(): string;
164
164
  constructor(modelId: OpenAITranscriptionModelId, config: OpenAITranscriptionModelConfig);
165
165
  private getArgs;
166
- doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV3['doGenerate']>>>;
166
+ doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV4['doGenerate']>>>;
167
167
  }
168
168
 
169
169
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-1106' | 'tts-1-hd' | 'tts-1-hd-1106' | 'gpt-4o-mini-tts' | 'gpt-4o-mini-tts-2025-03-20' | 'gpt-4o-mini-tts-2025-12-15' | (string & {});
@@ -178,28 +178,28 @@ interface OpenAISpeechModelConfig extends OpenAIConfig {
178
178
  currentDate?: () => Date;
179
179
  };
180
180
  }
181
- declare class OpenAISpeechModel implements SpeechModelV3 {
181
+ declare class OpenAISpeechModel implements SpeechModelV4 {
182
182
  readonly modelId: OpenAISpeechModelId;
183
183
  private readonly config;
184
- readonly specificationVersion = "v3";
184
+ readonly specificationVersion = "v4";
185
185
  get provider(): string;
186
186
  constructor(modelId: OpenAISpeechModelId, config: OpenAISpeechModelConfig);
187
187
  private getArgs;
188
- doGenerate(options: Parameters<SpeechModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV3['doGenerate']>>>;
188
+ doGenerate(options: Parameters<SpeechModelV4['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV4['doGenerate']>>>;
189
189
  }
190
190
 
191
- type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5.3-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
191
+ type OpenAIResponsesModelId = 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-2025-11-13' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-2025-12-11' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5.2-pro-2025-12-11' | 'gpt-5.2-codex' | 'gpt-5.3-chat-latest' | 'gpt-5.3-codex' | 'gpt-5.4' | 'gpt-5.4-2026-03-05' | 'gpt-5.4-mini' | 'gpt-5.4-mini-2026-03-17' | 'gpt-5.4-nano' | 'gpt-5.4-nano-2026-03-17' | 'gpt-5.4-pro' | 'gpt-5.4-pro-2026-03-05' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | 'o4-mini' | 'o4-mini-2025-04-16' | (string & {});
192
192
 
193
- declare class OpenAIResponsesLanguageModel implements LanguageModelV3 {
194
- readonly specificationVersion = "v3";
193
+ declare class OpenAIResponsesLanguageModel implements LanguageModelV4 {
194
+ readonly specificationVersion = "v4";
195
195
  readonly modelId: OpenAIResponsesModelId;
196
196
  private readonly config;
197
197
  constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
198
198
  readonly supportedUrls: Record<string, RegExp[]>;
199
199
  get provider(): string;
200
200
  private getArgs;
201
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
202
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
201
+ doGenerate(options: LanguageModelV4CallOptions): Promise<LanguageModelV4GenerateResult>;
202
+ doStream(options: LanguageModelV4CallOptions): Promise<LanguageModelV4StreamResult>;
203
203
  }
204
204
 
205
205
  /**
@@ -265,6 +265,28 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
265
265
  } | null | undefined;
266
266
  service_tier?: string | null | undefined;
267
267
  };
268
+ } | {
269
+ type: "response.failed";
270
+ response: {
271
+ error?: {
272
+ message: string;
273
+ code?: string | null | undefined;
274
+ } | null | undefined;
275
+ incomplete_details?: {
276
+ reason: string;
277
+ } | null | undefined;
278
+ usage?: {
279
+ input_tokens: number;
280
+ output_tokens: number;
281
+ input_tokens_details?: {
282
+ cached_tokens?: number | null | undefined;
283
+ } | null | undefined;
284
+ output_tokens_details?: {
285
+ reasoning_tokens?: number | null | undefined;
286
+ } | null | undefined;
287
+ } | null | undefined;
288
+ service_tier?: string | null | undefined;
289
+ };
268
290
  } | {
269
291
  type: "response.created";
270
292
  response: {
@@ -359,6 +381,10 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
359
381
  action: {
360
382
  commands: string[];
361
383
  };
384
+ } | {
385
+ type: "compaction";
386
+ id: string;
387
+ encrypted_content?: string | null | undefined;
362
388
  } | {
363
389
  type: "shell_call_output";
364
390
  id: string;
@@ -374,6 +400,20 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
374
400
  exit_code: number;
375
401
  };
376
402
  }[];
403
+ } | {
404
+ type: "tool_search_call";
405
+ id: string;
406
+ execution: "server" | "client";
407
+ call_id: string | null;
408
+ status: "completed" | "in_progress" | "incomplete";
409
+ arguments: unknown;
410
+ } | {
411
+ type: "tool_search_output";
412
+ id: string;
413
+ execution: "server" | "client";
414
+ call_id: string | null;
415
+ status: "completed" | "in_progress" | "incomplete";
416
+ tools: Record<string, JSONValue | undefined>[];
377
417
  };
378
418
  } | {
379
419
  type: "response.output_item.done";
@@ -528,6 +568,10 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
528
568
  action: {
529
569
  commands: string[];
530
570
  };
571
+ } | {
572
+ type: "compaction";
573
+ id: string;
574
+ encrypted_content: string;
531
575
  } | {
532
576
  type: "shell_call_output";
533
577
  id: string;
@@ -543,6 +587,20 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
543
587
  exit_code: number;
544
588
  };
545
589
  }[];
590
+ } | {
591
+ type: "tool_search_call";
592
+ id: string;
593
+ execution: "server" | "client";
594
+ call_id: string | null;
595
+ status: "completed" | "in_progress" | "incomplete";
596
+ arguments: unknown;
597
+ } | {
598
+ type: "tool_search_output";
599
+ id: string;
600
+ execution: "server" | "client";
601
+ call_id: string | null;
602
+ status: "completed" | "in_progress" | "incomplete";
603
+ tools: Record<string, JSONValue | undefined>[];
546
604
  };
547
605
  } | {
548
606
  type: "response.function_call_arguments.delta";
@@ -652,6 +710,14 @@ type OpenaiResponsesReasoningProviderMetadata = {
652
710
  type OpenaiResponsesProviderMetadata = {
653
711
  openai: ResponsesProviderMetadata;
654
712
  };
713
+ type ResponsesCompactionProviderMetadata = {
714
+ type: 'compaction';
715
+ itemId: string;
716
+ encryptedContent?: string;
717
+ };
718
+ type OpenaiResponsesCompactionProviderMetadata = {
719
+ openai: ResponsesCompactionProviderMetadata;
720
+ };
655
721
  type ResponsesTextProviderMetadata = {
656
722
  itemId: string;
657
723
  phase?: 'commentary' | 'final_answer' | null;
@@ -772,7 +838,7 @@ declare const applyPatchToolFactory: _ai_sdk_provider_utils.ProviderToolFactoryW
772
838
  * (e.g., patch results or errors).
773
839
  */
774
840
  output?: string;
775
- }, {}>;
841
+ }, {}, {}>;
776
842
  /**
777
843
  * The apply_patch tool lets GPT-5.1 create, update, and delete files in your
778
844
  * codebase using structured diffs. Instead of just suggesting edits, the model
@@ -800,7 +866,7 @@ declare const applyPatch: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSc
800
866
  * (e.g., patch results or errors).
801
867
  */
802
868
  output?: string;
803
- }, {}>;
869
+ }, {}, {}>;
804
870
 
805
871
  declare const codeInterpreterInputSchema: _ai_sdk_provider_utils.LazySchema<{
806
872
  containerId: string;
@@ -857,7 +923,7 @@ declare const codeInterpreterToolFactory: _ai_sdk_provider_utils.ProviderToolFac
857
923
  */
858
924
  url: string;
859
925
  }> | null;
860
- }, CodeInterpreterArgs>;
926
+ }, CodeInterpreterArgs, {}>;
861
927
  declare const codeInterpreter: (args?: CodeInterpreterArgs) => _ai_sdk_provider_utils.Tool<{
862
928
  /**
863
929
  * The code to run, or null if not available.
@@ -885,7 +951,7 @@ declare const codeInterpreter: (args?: CodeInterpreterArgs) => _ai_sdk_provider_
885
951
  */
886
952
  url: string;
887
953
  }> | null;
888
- }>;
954
+ }, {}>;
889
955
 
890
956
  declare const fileSearchArgsSchema: _ai_sdk_provider_utils.LazySchema<{
891
957
  vectorStoreIds: string[];
@@ -968,7 +1034,7 @@ declare const fileSearch: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSc
968
1034
  * A filter to apply.
969
1035
  */
970
1036
  filters?: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter;
971
- }>;
1037
+ }, {}>;
972
1038
 
973
1039
  declare const imageGenerationArgsSchema: _ai_sdk_provider_utils.LazySchema<{
974
1040
  background?: "auto" | "transparent" | "opaque" | undefined;
@@ -1049,7 +1115,7 @@ declare const imageGeneration: (args?: ImageGenerationArgs) => _ai_sdk_provider_
1049
1115
  * The generated image encoded in base64.
1050
1116
  */
1051
1117
  result: string;
1052
- }>;
1118
+ }, {}>;
1053
1119
 
1054
1120
  declare const webSearchPreviewArgsSchema: _ai_sdk_provider_utils.LazySchema<{
1055
1121
  searchContextSize?: "low" | "medium" | "high" | undefined;
@@ -1132,6 +1198,6 @@ declare const webSearchPreview: _ai_sdk_provider_utils.ProviderToolFactoryWithOu
1132
1198
  */
1133
1199
  timezone?: string;
1134
1200
  };
1135
- }>;
1201
+ }, {}>;
1136
1202
 
1137
- export { type ApplyPatchOperation, OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingModelOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAILanguageModelChatOptions, type OpenAILanguageModelCompletionOptions, OpenAIResponsesLanguageModel, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAISpeechModelOptions, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, type OpenaiResponsesProviderMetadata, type OpenaiResponsesReasoningProviderMetadata, type OpenaiResponsesSourceDocumentProviderMetadata, type OpenaiResponsesTextProviderMetadata, type ResponsesProviderMetadata, type ResponsesReasoningProviderMetadata, type ResponsesSourceDocumentProviderMetadata, type ResponsesTextProviderMetadata, applyPatch, applyPatchArgsSchema, applyPatchInputSchema, applyPatchOutputSchema, applyPatchToolFactory, codeInterpreter, codeInterpreterArgsSchema, codeInterpreterInputSchema, codeInterpreterOutputSchema, codeInterpreterToolFactory, fileSearch, fileSearchArgsSchema, fileSearchOutputSchema, hasDefaultResponseFormat, imageGeneration, imageGenerationArgsSchema, imageGenerationOutputSchema, modelMaxImagesPerCall, openAITranscriptionModelOptions, openaiEmbeddingModelOptions, openaiLanguageModelChatOptions, openaiLanguageModelCompletionOptions, openaiSpeechModelOptionsSchema, webSearchPreview, webSearchPreviewArgsSchema, webSearchPreviewInputSchema };
1203
+ export { type ApplyPatchOperation, OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingModelOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAILanguageModelChatOptions, type OpenAILanguageModelCompletionOptions, OpenAIResponsesLanguageModel, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAISpeechModelOptions, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, type OpenaiResponsesCompactionProviderMetadata, type OpenaiResponsesProviderMetadata, type OpenaiResponsesReasoningProviderMetadata, type OpenaiResponsesSourceDocumentProviderMetadata, type OpenaiResponsesTextProviderMetadata, type ResponsesCompactionProviderMetadata, type ResponsesProviderMetadata, type ResponsesReasoningProviderMetadata, type ResponsesSourceDocumentProviderMetadata, type ResponsesTextProviderMetadata, applyPatch, applyPatchArgsSchema, applyPatchInputSchema, applyPatchOutputSchema, applyPatchToolFactory, codeInterpreter, codeInterpreterArgsSchema, codeInterpreterInputSchema, codeInterpreterOutputSchema, codeInterpreterToolFactory, fileSearch, fileSearchArgsSchema, fileSearchOutputSchema, hasDefaultResponseFormat, imageGeneration, imageGenerationArgsSchema, imageGenerationOutputSchema, modelMaxImagesPerCall, openAITranscriptionModelOptions, openaiEmbeddingModelOptions, openaiLanguageModelChatOptions, openaiLanguageModelCompletionOptions, openaiSpeechModelOptionsSchema, webSearchPreview, webSearchPreviewArgsSchema, webSearchPreviewInputSchema };