@ai-sdk/openai 0.0.0-85f9a635-20240518005312 → 0.0.0-fd764a60-20260114143805

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,180 +1,869 @@
1
- import { LanguageModelV1, EmbeddingModelV1 } from '@ai-sdk/provider';
1
+ import * as _ai_sdk_provider from '@ai-sdk/provider';
2
+ import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
3
+ import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
4
+ import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
2
5
 
3
- type OpenAIChatModelId = 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4-vision-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k-0613' | (string & {});
4
- interface OpenAIChatSettings {
5
- /**
6
- Modify the likelihood of specified tokens appearing in the completion.
7
-
8
- Accepts a JSON object that maps tokens (specified by their token ID in
9
- the GPT tokenizer) to an associated bias value from -100 to 100. You
10
- can use this tokenizer tool to convert text to token IDs. Mathematically,
11
- the bias is added to the logits generated by the model prior to sampling.
12
- The exact effect will vary per model, but values between -1 and 1 should
13
- decrease or increase likelihood of selection; values like -100 or 100
14
- should result in a ban or exclusive selection of the relevant token.
15
-
16
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
17
- token from being generated.
18
- */
19
- logitBias?: Record<number, number>;
20
- /**
21
- Return the log probabilities of the tokens. Including logprobs will increase
22
- the response size and can slow down response times. However, it can
23
- be useful to better understand how the model is behaving.
24
-
25
- Setting to true will return the log probabilities of the tokens that
26
- were generated.
27
-
28
- Setting to a number will return the log probabilities of the top n
29
- tokens that were generated.
30
- */
31
- logprobs?: boolean | number;
32
- /**
33
- A unique identifier representing your end-user, which can help OpenAI to
34
- monitor and detect abuse. Learn more.
35
- */
36
- user?: string;
37
- }
38
-
39
- type OpenAIChatConfig = {
40
- provider: string;
41
- baseURL: string;
42
- compatibility: 'strict' | 'compatible';
43
- headers: () => Record<string, string | undefined>;
44
- };
45
- declare class OpenAIChatLanguageModel implements LanguageModelV1 {
46
- readonly specificationVersion = "v1";
47
- readonly defaultObjectGenerationMode = "tool";
48
- readonly modelId: OpenAIChatModelId;
49
- readonly settings: OpenAIChatSettings;
50
- private readonly config;
51
- constructor(modelId: OpenAIChatModelId, settings: OpenAIChatSettings, config: OpenAIChatConfig);
52
- get provider(): string;
53
- private getArgs;
54
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
55
- doRawStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Omit<Awaited<ReturnType<LanguageModelV1['doStream']>>, 'stream'> & {
56
- stream: ReadableStream<Uint8Array>;
57
- }>;
58
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
59
- }
6
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.2' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | (string & {});
7
+ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<{
8
+ logitBias?: Record<number, number> | undefined;
9
+ logprobs?: number | boolean | undefined;
10
+ parallelToolCalls?: boolean | undefined;
11
+ user?: string | undefined;
12
+ reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh" | undefined;
13
+ maxCompletionTokens?: number | undefined;
14
+ store?: boolean | undefined;
15
+ metadata?: Record<string, string> | undefined;
16
+ prediction?: Record<string, any> | undefined;
17
+ serviceTier?: "default" | "auto" | "flex" | "priority" | undefined;
18
+ strictJsonSchema?: boolean | undefined;
19
+ textVerbosity?: "low" | "medium" | "high" | undefined;
20
+ promptCacheKey?: string | undefined;
21
+ promptCacheRetention?: "in_memory" | "24h" | undefined;
22
+ safetyIdentifier?: string | undefined;
23
+ systemMessageMode?: "remove" | "system" | "developer" | undefined;
24
+ forceReasoning?: boolean | undefined;
25
+ }>;
26
+ type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
60
27
 
61
28
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
62
- interface OpenAICompletionSettings {
29
+
30
+ type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
31
+
32
+ type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | 'gpt-image-1.5' | (string & {});
33
+
34
+ declare const webSearchToolFactory: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{}, {
63
35
  /**
64
- Echo back the prompt in addition to the completion.
36
+ * An object describing the specific action taken in this web search call.
37
+ * Includes details on how the model used the web (search, open_page, find_in_page).
65
38
  */
66
- echo?: boolean;
39
+ action: {
40
+ /**
41
+ * Action type "search" - Performs a web search query.
42
+ */
43
+ type: "search";
44
+ /**
45
+ * The search query.
46
+ */
47
+ query?: string;
48
+ } | {
49
+ /**
50
+ * Action type "openPage" - Opens a specific URL from search results.
51
+ */
52
+ type: "openPage";
53
+ /**
54
+ * The URL opened by the model.
55
+ */
56
+ url?: string | null;
57
+ } | {
58
+ /**
59
+ * Action type "findInPage": Searches for a pattern within a loaded page.
60
+ */
61
+ type: "findInPage";
62
+ /**
63
+ * The URL of the page searched for the pattern.
64
+ */
65
+ url?: string | null;
66
+ /**
67
+ * The pattern or text to search for within the page.
68
+ */
69
+ pattern?: string | null;
70
+ };
67
71
  /**
68
- Modify the likelihood of specified tokens appearing in the completion.
69
-
70
- Accepts a JSON object that maps tokens (specified by their token ID in
71
- the GPT tokenizer) to an associated bias value from -100 to 100. You
72
- can use this tokenizer tool to convert text to token IDs. Mathematically,
73
- the bias is added to the logits generated by the model prior to sampling.
74
- The exact effect will vary per model, but values between -1 and 1 should
75
- decrease or increase likelihood of selection; values like -100 or 100
76
- should result in a ban or exclusive selection of the relevant token.
77
-
78
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
79
- token from being generated.
72
+ * Optional sources cited by the model for the web search call.
80
73
  */
81
- logitBias?: Record<number, number>;
74
+ sources?: Array<{
75
+ type: "url";
76
+ url: string;
77
+ } | {
78
+ type: "api";
79
+ name: string;
80
+ }>;
81
+ }, {
82
82
  /**
83
- Return the log probabilities of the tokens. Including logprobs will increase
84
- the response size and can slow down response times. However, it can
85
- be useful to better understand how the model is behaving.
86
-
87
- Setting to true will return the log probabilities of the tokens that
88
- were generated.
89
-
90
- Setting to a number will return the log probabilities of the top n
91
- tokens that were generated.
83
+ * Whether to use external web access for fetching live content.
84
+ * - true: Fetch live web content (default)
85
+ * - false: Use cached/indexed results
92
86
  */
93
- logprobs?: boolean | number;
87
+ externalWebAccess?: boolean;
94
88
  /**
95
- The suffix that comes after a completion of inserted text.
89
+ * Filters for the search.
96
90
  */
97
- suffix?: string;
91
+ filters?: {
92
+ /**
93
+ * Allowed domains for the search.
94
+ * If not provided, all domains are allowed.
95
+ * Subdomains of the provided domains are allowed as well.
96
+ */
97
+ allowedDomains?: string[];
98
+ };
98
99
  /**
99
- A unique identifier representing your end-user, which can help OpenAI to
100
- monitor and detect abuse. Learn more.
100
+ * Search context size to use for the web search.
101
+ * - high: Most comprehensive context, highest cost, slower response
102
+ * - medium: Balanced context, cost, and latency (default)
103
+ * - low: Least context, lowest cost, fastest response
101
104
  */
102
- user?: string;
103
- }
105
+ searchContextSize?: "low" | "medium" | "high";
106
+ /**
107
+ * User location information to provide geographically relevant search results.
108
+ */
109
+ userLocation?: {
110
+ /**
111
+ * Type of location (always 'approximate')
112
+ */
113
+ type: "approximate";
114
+ /**
115
+ * Two-letter ISO country code (e.g., 'US', 'GB')
116
+ */
117
+ country?: string;
118
+ /**
119
+ * City name (free text, e.g., 'Minneapolis')
120
+ */
121
+ city?: string;
122
+ /**
123
+ * Region name (free text, e.g., 'Minnesota')
124
+ */
125
+ region?: string;
126
+ /**
127
+ * IANA timezone (e.g., 'America/Chicago')
128
+ */
129
+ timezone?: string;
130
+ };
131
+ }>;
104
132
 
105
- type OpenAICompletionConfig = {
106
- provider: string;
107
- baseURL: string;
108
- compatibility: 'strict' | 'compatible';
109
- headers: () => Record<string, string | undefined>;
133
+ /**
134
+ * A filter used to compare a specified attribute key to a given value using a defined comparison operation.
135
+ */
136
+ type OpenAIResponsesFileSearchToolComparisonFilter = {
137
+ /**
138
+ * The key to compare against the value.
139
+ */
140
+ key: string;
141
+ /**
142
+ * Specifies the comparison operator: eq, ne, gt, gte, lt, lte, in, nin.
143
+ */
144
+ type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte' | 'in' | 'nin';
145
+ /**
146
+ * The value to compare against the attribute key; supports string, number, boolean, or array of string types.
147
+ */
148
+ value: string | number | boolean | string[];
110
149
  };
111
- declare class OpenAICompletionLanguageModel implements LanguageModelV1 {
112
- readonly specificationVersion = "v1";
113
- readonly defaultObjectGenerationMode: undefined;
114
- readonly modelId: OpenAICompletionModelId;
115
- readonly settings: OpenAICompletionSettings;
116
- private readonly config;
117
- constructor(modelId: OpenAICompletionModelId, settings: OpenAICompletionSettings, config: OpenAICompletionConfig);
118
- get provider(): string;
119
- private getArgs;
120
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
121
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
122
- }
150
+ /**
151
+ * Combine multiple filters using and or or.
152
+ */
153
+ type OpenAIResponsesFileSearchToolCompoundFilter = {
154
+ /**
155
+ * Type of operation: and or or.
156
+ */
157
+ type: 'and' | 'or';
158
+ /**
159
+ * Array of filters to combine. Items can be ComparisonFilter or CompoundFilter.
160
+ */
161
+ filters: Array<OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter>;
162
+ };
163
+ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
164
+ type: "unknown_chunk";
165
+ message: string;
166
+ } | {
167
+ type: "response.output_text.delta";
168
+ item_id: string;
169
+ delta: string;
170
+ logprobs?: {
171
+ token: string;
172
+ logprob: number;
173
+ top_logprobs: {
174
+ token: string;
175
+ logprob: number;
176
+ }[];
177
+ }[] | null | undefined;
178
+ } | {
179
+ type: "response.completed" | "response.incomplete";
180
+ response: {
181
+ usage: {
182
+ input_tokens: number;
183
+ output_tokens: number;
184
+ input_tokens_details?: {
185
+ cached_tokens?: number | null | undefined;
186
+ } | null | undefined;
187
+ output_tokens_details?: {
188
+ reasoning_tokens?: number | null | undefined;
189
+ } | null | undefined;
190
+ };
191
+ incomplete_details?: {
192
+ reason: string;
193
+ } | null | undefined;
194
+ service_tier?: string | null | undefined;
195
+ };
196
+ } | {
197
+ type: "response.created";
198
+ response: {
199
+ id: string;
200
+ created_at: number;
201
+ model: string;
202
+ service_tier?: string | null | undefined;
203
+ };
204
+ } | {
205
+ type: "response.output_item.added";
206
+ output_index: number;
207
+ item: {
208
+ type: "message";
209
+ id: string;
210
+ } | {
211
+ type: "reasoning";
212
+ id: string;
213
+ encrypted_content?: string | null | undefined;
214
+ } | {
215
+ type: "function_call";
216
+ id: string;
217
+ call_id: string;
218
+ name: string;
219
+ arguments: string;
220
+ } | {
221
+ type: "web_search_call";
222
+ id: string;
223
+ status: string;
224
+ } | {
225
+ type: "computer_call";
226
+ id: string;
227
+ status: string;
228
+ } | {
229
+ type: "file_search_call";
230
+ id: string;
231
+ } | {
232
+ type: "image_generation_call";
233
+ id: string;
234
+ } | {
235
+ type: "code_interpreter_call";
236
+ id: string;
237
+ container_id: string;
238
+ code: string | null;
239
+ outputs: ({
240
+ type: "logs";
241
+ logs: string;
242
+ } | {
243
+ type: "image";
244
+ url: string;
245
+ })[] | null;
246
+ status: string;
247
+ } | {
248
+ type: "mcp_call";
249
+ id: string;
250
+ status: string;
251
+ approval_request_id?: string | null | undefined;
252
+ } | {
253
+ type: "mcp_list_tools";
254
+ id: string;
255
+ } | {
256
+ type: "mcp_approval_request";
257
+ id: string;
258
+ } | {
259
+ type: "apply_patch_call";
260
+ id: string;
261
+ call_id: string;
262
+ status: "completed" | "in_progress";
263
+ operation: {
264
+ type: "create_file";
265
+ path: string;
266
+ diff: string;
267
+ } | {
268
+ type: "delete_file";
269
+ path: string;
270
+ } | {
271
+ type: "update_file";
272
+ path: string;
273
+ diff: string;
274
+ };
275
+ } | {
276
+ type: "shell_call";
277
+ id: string;
278
+ call_id: string;
279
+ status: "completed" | "in_progress" | "incomplete";
280
+ action: {
281
+ commands: string[];
282
+ };
283
+ };
284
+ } | {
285
+ type: "response.output_item.done";
286
+ output_index: number;
287
+ item: {
288
+ type: "message";
289
+ id: string;
290
+ } | {
291
+ type: "reasoning";
292
+ id: string;
293
+ encrypted_content?: string | null | undefined;
294
+ } | {
295
+ type: "function_call";
296
+ id: string;
297
+ call_id: string;
298
+ name: string;
299
+ arguments: string;
300
+ status: "completed";
301
+ } | {
302
+ type: "code_interpreter_call";
303
+ id: string;
304
+ code: string | null;
305
+ container_id: string;
306
+ outputs: ({
307
+ type: "logs";
308
+ logs: string;
309
+ } | {
310
+ type: "image";
311
+ url: string;
312
+ })[] | null;
313
+ } | {
314
+ type: "image_generation_call";
315
+ id: string;
316
+ result: string;
317
+ } | {
318
+ type: "web_search_call";
319
+ id: string;
320
+ status: string;
321
+ action: {
322
+ type: "search";
323
+ query?: string | null | undefined;
324
+ sources?: ({
325
+ type: "url";
326
+ url: string;
327
+ } | {
328
+ type: "api";
329
+ name: string;
330
+ })[] | null | undefined;
331
+ } | {
332
+ type: "open_page";
333
+ url?: string | null | undefined;
334
+ } | {
335
+ type: "find_in_page";
336
+ url?: string | null | undefined;
337
+ pattern?: string | null | undefined;
338
+ };
339
+ } | {
340
+ type: "file_search_call";
341
+ id: string;
342
+ queries: string[];
343
+ results?: {
344
+ attributes: Record<string, string | number | boolean>;
345
+ file_id: string;
346
+ filename: string;
347
+ score: number;
348
+ text: string;
349
+ }[] | null | undefined;
350
+ } | {
351
+ type: "local_shell_call";
352
+ id: string;
353
+ call_id: string;
354
+ action: {
355
+ type: "exec";
356
+ command: string[];
357
+ timeout_ms?: number | undefined;
358
+ user?: string | undefined;
359
+ working_directory?: string | undefined;
360
+ env?: Record<string, string> | undefined;
361
+ };
362
+ } | {
363
+ type: "computer_call";
364
+ id: string;
365
+ status: "completed";
366
+ } | {
367
+ type: "mcp_call";
368
+ id: string;
369
+ status: string;
370
+ arguments: string;
371
+ name: string;
372
+ server_label: string;
373
+ output?: string | null | undefined;
374
+ error?: string | {
375
+ [x: string]: unknown;
376
+ type?: string | undefined;
377
+ code?: string | number | undefined;
378
+ message?: string | undefined;
379
+ } | null | undefined;
380
+ approval_request_id?: string | null | undefined;
381
+ } | {
382
+ type: "mcp_list_tools";
383
+ id: string;
384
+ server_label: string;
385
+ tools: {
386
+ name: string;
387
+ input_schema: any;
388
+ description?: string | undefined;
389
+ annotations?: Record<string, unknown> | undefined;
390
+ }[];
391
+ error?: string | {
392
+ [x: string]: unknown;
393
+ type?: string | undefined;
394
+ code?: string | number | undefined;
395
+ message?: string | undefined;
396
+ } | undefined;
397
+ } | {
398
+ type: "mcp_approval_request";
399
+ id: string;
400
+ server_label: string;
401
+ name: string;
402
+ arguments: string;
403
+ approval_request_id?: string | undefined;
404
+ } | {
405
+ type: "apply_patch_call";
406
+ id: string;
407
+ call_id: string;
408
+ status: "completed" | "in_progress";
409
+ operation: {
410
+ type: "create_file";
411
+ path: string;
412
+ diff: string;
413
+ } | {
414
+ type: "delete_file";
415
+ path: string;
416
+ } | {
417
+ type: "update_file";
418
+ path: string;
419
+ diff: string;
420
+ };
421
+ } | {
422
+ type: "shell_call";
423
+ id: string;
424
+ call_id: string;
425
+ status: "completed" | "in_progress" | "incomplete";
426
+ action: {
427
+ commands: string[];
428
+ };
429
+ };
430
+ } | {
431
+ type: "response.function_call_arguments.delta";
432
+ item_id: string;
433
+ output_index: number;
434
+ delta: string;
435
+ } | {
436
+ type: "response.image_generation_call.partial_image";
437
+ item_id: string;
438
+ output_index: number;
439
+ partial_image_b64: string;
440
+ } | {
441
+ type: "response.code_interpreter_call_code.delta";
442
+ item_id: string;
443
+ output_index: number;
444
+ delta: string;
445
+ } | {
446
+ type: "response.code_interpreter_call_code.done";
447
+ item_id: string;
448
+ output_index: number;
449
+ code: string;
450
+ } | {
451
+ type: "response.output_text.annotation.added";
452
+ annotation: {
453
+ type: "url_citation";
454
+ start_index: number;
455
+ end_index: number;
456
+ url: string;
457
+ title: string;
458
+ } | {
459
+ type: "file_citation";
460
+ file_id: string;
461
+ filename: string;
462
+ index: number;
463
+ } | {
464
+ type: "container_file_citation";
465
+ container_id: string;
466
+ file_id: string;
467
+ filename: string;
468
+ start_index: number;
469
+ end_index: number;
470
+ } | {
471
+ type: "file_path";
472
+ file_id: string;
473
+ index: number;
474
+ };
475
+ } | {
476
+ type: "response.reasoning_summary_part.added";
477
+ item_id: string;
478
+ summary_index: number;
479
+ } | {
480
+ type: "response.reasoning_summary_text.delta";
481
+ item_id: string;
482
+ summary_index: number;
483
+ delta: string;
484
+ } | {
485
+ type: "response.reasoning_summary_part.done";
486
+ item_id: string;
487
+ summary_index: number;
488
+ } | {
489
+ type: "response.apply_patch_call_operation_diff.delta";
490
+ item_id: string;
491
+ output_index: number;
492
+ delta: string;
493
+ obfuscation?: string | null | undefined;
494
+ } | {
495
+ type: "response.apply_patch_call_operation_diff.done";
496
+ item_id: string;
497
+ output_index: number;
498
+ diff: string;
499
+ } | {
500
+ type: "error";
501
+ sequence_number: number;
502
+ error: {
503
+ type: string;
504
+ code: string;
505
+ message: string;
506
+ param?: string | null | undefined;
507
+ };
508
+ }>;
123
509
 
124
- type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
125
- interface OpenAIEmbeddingSettings {
510
+ /**
511
+ * Type definitions for the apply_patch operations.
512
+ */
513
+ type ApplyPatchOperation = {
514
+ type: 'create_file';
126
515
  /**
127
- Override the maximum number of embeddings per call.
516
+ * Path of the file to create relative to the workspace root.
128
517
  */
129
- maxEmbeddingsPerCall?: number;
518
+ path: string;
130
519
  /**
131
- Override the parallelism of embedding calls.
132
- */
133
- supportsParallelCalls?: boolean;
520
+ * Unified diff content to apply when creating the file.
521
+ */
522
+ diff: string;
523
+ } | {
524
+ type: 'delete_file';
134
525
  /**
135
- The number of dimensions the resulting output embeddings should have.
136
- Only supported in text-embedding-3 and later models.
526
+ * Path of the file to delete relative to the workspace root.
137
527
  */
138
- dimensions?: number;
528
+ path: string;
529
+ } | {
530
+ type: 'update_file';
139
531
  /**
140
- A unique identifier representing your end-user, which can help OpenAI to
141
- monitor and detect abuse. Learn more.
142
- */
143
- user?: string;
144
- }
532
+ * Path of the file to update relative to the workspace root.
533
+ */
534
+ path: string;
535
+ /**
536
+ * Unified diff content to apply to the existing file.
537
+ */
538
+ diff: string;
539
+ };
145
540
 
146
- type OpenAIEmbeddingConfig = {
147
- provider: string;
148
- baseURL: string;
149
- headers: () => Record<string, string | undefined>;
541
+ declare const openaiTools: {
542
+ /**
543
+ * The apply_patch tool lets GPT-5.1 create, update, and delete files in your
544
+ * codebase using structured diffs. Instead of just suggesting edits, the model
545
+ * emits patch operations that your application applies and then reports back on,
546
+ * enabling iterative, multi-step code editing workflows.
547
+ *
548
+ */
549
+ applyPatch: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{
550
+ callId: string;
551
+ operation: ApplyPatchOperation;
552
+ }, {
553
+ status: "completed" | "failed";
554
+ output?: string;
555
+ }, {}>;
556
+ /**
557
+ * The Code Interpreter tool allows models to write and run Python code in a
558
+ * sandboxed environment to solve complex problems in domains like data analysis,
559
+ * coding, and math.
560
+ *
561
+ * @param container - The container to use for the code interpreter.
562
+ */
563
+ codeInterpreter: (args?: {
564
+ container?: string | {
565
+ fileIds?: string[];
566
+ };
567
+ }) => _ai_sdk_provider_utils.Tool<{
568
+ code?: string | null;
569
+ containerId: string;
570
+ }, {
571
+ outputs?: Array<{
572
+ type: "logs";
573
+ logs: string;
574
+ } | {
575
+ type: "image";
576
+ url: string;
577
+ }> | null;
578
+ }>;
579
+ /**
580
+ * File search is a tool available in the Responses API. It enables models to
581
+ * retrieve information in a knowledge base of previously uploaded files through
582
+ * semantic and keyword search.
583
+ *
584
+ * @param vectorStoreIds - The vector store IDs to use for the file search.
585
+ * @param maxNumResults - The maximum number of results to return.
586
+ * @param ranking - The ranking options to use for the file search.
587
+ * @param filters - The filters to use for the file search.
588
+ */
589
+ fileSearch: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{}, {
590
+ queries: string[];
591
+ results: null | {
592
+ attributes: Record<string, unknown>;
593
+ fileId: string;
594
+ filename: string;
595
+ score: number;
596
+ text: string;
597
+ }[];
598
+ }, {
599
+ vectorStoreIds: string[];
600
+ maxNumResults?: number;
601
+ ranking?: {
602
+ ranker?: string;
603
+ scoreThreshold?: number;
604
+ };
605
+ filters?: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter;
606
+ }>;
607
+ /**
608
+ * The image generation tool allows you to generate images using a text prompt,
609
+ * and optionally image inputs. It leverages the GPT Image model,
610
+ * and automatically optimizes text inputs for improved performance.
611
+ *
612
+ * @param background - Background type for the generated image. One of 'auto', 'opaque', or 'transparent'.
613
+ * @param inputFidelity - Input fidelity for the generated image. One of 'low' or 'high'.
614
+ * @param inputImageMask - Optional mask for inpainting. Contains fileId and/or imageUrl.
615
+ * @param model - The image generation model to use. Default: gpt-image-1.
616
+ * @param moderation - Moderation level for the generated image. Default: 'auto'.
617
+ * @param outputCompression - Compression level for the output image (0-100).
618
+ * @param outputFormat - The output format of the generated image. One of 'png', 'jpeg', or 'webp'.
619
+ * @param partialImages - Number of partial images to generate in streaming mode (0-3).
620
+ * @param quality - The quality of the generated image. One of 'auto', 'low', 'medium', or 'high'.
621
+ * @param size - The size of the generated image. One of 'auto', '1024x1024', '1024x1536', or '1536x1024'.
622
+ */
623
+ imageGeneration: (args?: {
624
+ background?: "auto" | "opaque" | "transparent";
625
+ inputFidelity?: "low" | "high";
626
+ inputImageMask?: {
627
+ fileId?: string;
628
+ imageUrl?: string;
629
+ };
630
+ model?: string;
631
+ moderation?: "auto";
632
+ outputCompression?: number;
633
+ outputFormat?: "png" | "jpeg" | "webp";
634
+ partialImages?: number;
635
+ quality?: "auto" | "low" | "medium" | "high";
636
+ size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024";
637
+ }) => _ai_sdk_provider_utils.Tool<{}, {
638
+ result: string;
639
+ }>;
640
+ /**
641
+ * Local shell is a tool that allows agents to run shell commands locally
642
+ * on a machine you or the user provides.
643
+ *
644
+ * Supported models: `gpt-5-codex` and `codex-mini-latest`
645
+ */
646
+ localShell: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{
647
+ action: {
648
+ type: "exec";
649
+ command: string[];
650
+ timeoutMs?: number;
651
+ user?: string;
652
+ workingDirectory?: string;
653
+ env?: Record<string, string>;
654
+ };
655
+ }, {
656
+ output: string;
657
+ }, {}>;
658
+ /**
659
+ * The shell tool allows the model to interact with your local computer through
660
+ * a controlled command-line interface. The model proposes shell commands; your
661
+ * integration executes them and returns the outputs.
662
+ *
663
+ * Available through the Responses API for use with GPT-5.1.
664
+ *
665
+ * WARNING: Running arbitrary shell commands can be dangerous. Always sandbox
666
+ * execution or add strict allow-/deny-lists before forwarding a command to
667
+ * the system shell.
668
+ */
669
+ shell: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{
670
+ action: {
671
+ commands: string[];
672
+ timeoutMs?: number;
673
+ maxOutputLength?: number;
674
+ };
675
+ }, {
676
+ output: Array<{
677
+ stdout: string;
678
+ stderr: string;
679
+ outcome: {
680
+ type: "timeout";
681
+ } | {
682
+ type: "exit";
683
+ exitCode: number;
684
+ };
685
+ }>;
686
+ }, {}>;
687
+ /**
688
+ * Web search allows models to access up-to-date information from the internet
689
+ * and provide answers with sourced citations.
690
+ *
691
+ * @param searchContextSize - The search context size to use for the web search.
692
+ * @param userLocation - The user location to use for the web search.
693
+ */
694
+ webSearchPreview: _ai_sdk_provider_utils.ProviderToolFactoryWithOutputSchema<{}, {
695
+ action: {
696
+ type: "search";
697
+ query?: string;
698
+ } | {
699
+ type: "openPage";
700
+ url?: string | null;
701
+ } | {
702
+ type: "findInPage";
703
+ url?: string | null;
704
+ pattern?: string | null;
705
+ };
706
+ }, {
707
+ searchContextSize?: "low" | "medium" | "high";
708
+ userLocation?: {
709
+ type: "approximate";
710
+ country?: string;
711
+ city?: string;
712
+ region?: string;
713
+ timezone?: string;
714
+ };
715
+ }>;
716
+ /**
717
+ * Web search allows models to access up-to-date information from the internet
718
+ * and provide answers with sourced citations.
719
+ *
720
+ * @param filters - The filters to use for the web search.
721
+ * @param searchContextSize - The search context size to use for the web search.
722
+ * @param userLocation - The user location to use for the web search.
723
+ */
724
+ webSearch: (args?: Parameters<typeof webSearchToolFactory>[0]) => _ai_sdk_provider_utils.Tool<{}, {
725
+ action: {
726
+ type: "search";
727
+ query?: string;
728
+ } | {
729
+ type: "openPage";
730
+ url?: string | null;
731
+ } | {
732
+ type: "findInPage";
733
+ url?: string | null;
734
+ pattern?: string | null;
735
+ };
736
+ sources?: Array<{
737
+ type: "url";
738
+ url: string;
739
+ } | {
740
+ type: "api";
741
+ name: string;
742
+ }>;
743
+ }>;
744
+ /**
745
+ * MCP (Model Context Protocol) allows models to call tools exposed by
746
+ * remote MCP servers or service connectors.
747
+ *
748
+ * @param serverLabel - Label to identify the MCP server.
749
+ * @param allowedTools - Allowed tool names or filter object.
750
+ * @param authorization - OAuth access token for the MCP server/connector.
751
+ * @param connectorId - Identifier for a service connector.
752
+ * @param headers - Optional headers to include in MCP requests.
753
+ * // param requireApproval - Approval policy ('always'|'never'|filter object). (Removed - always 'never')
754
+ * @param serverDescription - Optional description of the server.
755
+ * @param serverUrl - URL for the MCP server.
756
+ */
757
+ mcp: (args: {
758
+ serverLabel: string;
759
+ allowedTools?: string[] | {
760
+ readOnly?: boolean;
761
+ toolNames?: string[];
762
+ };
763
+ authorization?: string;
764
+ connectorId?: string;
765
+ headers?: Record<string, string>;
766
+ requireApproval?: "always" | "never" | {
767
+ never?: {
768
+ toolNames?: string[];
769
+ };
770
+ };
771
+ serverDescription?: string;
772
+ serverUrl?: string;
773
+ }) => _ai_sdk_provider_utils.Tool<{}, {
774
+ type: "call";
775
+ serverLabel: string;
776
+ name: string;
777
+ arguments: string;
778
+ output?: string | null;
779
+ error?: _ai_sdk_provider.JSONValue;
780
+ }>;
150
781
  };
151
- declare class OpenAIEmbeddingModel implements EmbeddingModelV1<string> {
152
- readonly specificationVersion = "v1";
153
- readonly modelId: OpenAIEmbeddingModelId;
154
- private readonly config;
155
- private readonly settings;
156
- get provider(): string;
157
- get maxEmbeddingsPerCall(): number;
158
- get supportsParallelCalls(): boolean;
159
- constructor(modelId: OpenAIEmbeddingModelId, settings: OpenAIEmbeddingSettings, config: OpenAIEmbeddingConfig);
160
- doEmbed({ values, abortSignal, }: Parameters<EmbeddingModelV1<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV1<string>['doEmbed']>>>;
161
- }
162
782
 
163
- interface OpenAIProvider {
164
- (modelId: 'gpt-3.5-turbo-instruct', settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
165
- (modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): OpenAIChatLanguageModel;
783
+ type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5.1-codex-max' | 'gpt-5.2' | 'gpt-5.2-chat-latest' | 'gpt-5.2-pro' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
784
+ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
785
+ conversation?: string | null | undefined;
786
+ include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
787
+ instructions?: string | null | undefined;
788
+ logprobs?: number | boolean | undefined;
789
+ maxToolCalls?: number | null | undefined;
790
+ metadata?: any;
791
+ parallelToolCalls?: boolean | null | undefined;
792
+ previousResponseId?: string | null | undefined;
793
+ promptCacheKey?: string | null | undefined;
794
+ promptCacheRetention?: "in_memory" | "24h" | null | undefined;
795
+ reasoningEffort?: string | null | undefined;
796
+ reasoningSummary?: string | null | undefined;
797
+ safetyIdentifier?: string | null | undefined;
798
+ serviceTier?: "default" | "auto" | "flex" | "priority" | null | undefined;
799
+ store?: boolean | null | undefined;
800
+ strictJsonSchema?: boolean | null | undefined;
801
+ textVerbosity?: "low" | "medium" | "high" | null | undefined;
802
+ truncation?: "auto" | "disabled" | null | undefined;
803
+ user?: string | null | undefined;
804
+ systemMessageMode?: "remove" | "system" | "developer" | undefined;
805
+ forceReasoning?: boolean | undefined;
806
+ }>;
807
+ type OpenAIResponsesProviderOptions = InferSchema<typeof openaiResponsesProviderOptionsSchema>;
808
+
809
+ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
810
+
811
+ type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
812
+
813
+ interface OpenAIProvider extends ProviderV3 {
814
+ (modelId: OpenAIResponsesModelId): LanguageModelV3;
815
+ /**
816
+ Creates an OpenAI model for text generation.
817
+ */
818
+ languageModel(modelId: OpenAIResponsesModelId): LanguageModelV3;
166
819
  /**
167
820
  Creates an OpenAI chat model for text generation.
168
821
  */
169
- chat(modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): OpenAIChatLanguageModel;
822
+ chat(modelId: OpenAIChatModelId): LanguageModelV3;
823
+ /**
824
+ Creates an OpenAI responses API model for text generation.
825
+ */
826
+ responses(modelId: OpenAIResponsesModelId): LanguageModelV3;
170
827
  /**
171
828
  Creates an OpenAI completion model for text generation.
172
829
  */
173
- completion(modelId: OpenAICompletionModelId, settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
830
+ completion(modelId: OpenAICompletionModelId): LanguageModelV3;
174
831
  /**
175
832
  Creates a model for text embeddings.
176
833
  */
177
- embedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): OpenAIEmbeddingModel;
834
+ embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
835
+ /**
836
+ Creates a model for text embeddings.
837
+ */
838
+ embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
839
+ /**
840
+ * @deprecated Use `embedding` instead.
841
+ */
842
+ textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
843
+ /**
844
+ * @deprecated Use `embeddingModel` instead.
845
+ */
846
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
847
+ /**
848
+ Creates a model for image generation.
849
+ */
850
+ image(modelId: OpenAIImageModelId): ImageModelV3;
851
+ /**
852
+ Creates a model for image generation.
853
+ */
854
+ imageModel(modelId: OpenAIImageModelId): ImageModelV3;
855
+ /**
856
+ Creates a model for transcription.
857
+ */
858
+ transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV3;
859
+ /**
860
+ Creates a model for speech generation.
861
+ */
862
+ speech(modelId: OpenAISpeechModelId): SpeechModelV3;
863
+ /**
864
+ OpenAI-specific tools.
865
+ */
866
+ tools: typeof openaiTools;
178
867
  }
179
868
  interface OpenAIProviderSettings {
180
869
  /**
@@ -182,10 +871,6 @@ interface OpenAIProviderSettings {
182
871
  */
183
872
  baseURL?: string;
184
873
  /**
185
- @deprecated Use `baseURL` instead.
186
- */
187
- baseUrl?: string;
188
- /**
189
874
  API key for authenticating requests.
190
875
  */
191
876
  apiKey?: string;
@@ -202,54 +887,52 @@ interface OpenAIProviderSettings {
202
887
  */
203
888
  headers?: Record<string, string>;
204
889
  /**
205
- OpenAI compatibility mode. Should be set to `strict` when using the OpenAI API,
206
- and `compatible` when using 3rd party providers. In `compatible` mode, newer
207
- information such as streamOptions are not being sent. Defaults to 'compatible'.
890
+ Provider name. Overrides the `openai` default name for 3rd party providers.
208
891
  */
209
- compatibility?: 'strict' | 'compatible';
892
+ name?: string;
893
+ /**
894
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
895
+ or to provide a custom fetch implementation for e.g. testing.
896
+ */
897
+ fetch?: FetchFunction;
210
898
  }
211
899
  /**
212
900
  Create an OpenAI provider instance.
213
901
  */
214
902
  declare function createOpenAI(options?: OpenAIProviderSettings): OpenAIProvider;
215
903
  /**
216
- Default OpenAI provider instance. It uses 'strict' compatibility mode.
904
+ Default OpenAI provider instance.
217
905
  */
218
906
  declare const openai: OpenAIProvider;
219
907
 
220
- /**
221
- @deprecated Use `createOpenAI` instead.
222
- */
223
- declare class OpenAI {
224
- /**
225
- Use a different URL prefix for API calls, e.g. to use proxy servers.
226
- The default prefix is `https://api.openai.com/v1`.
227
- */
228
- readonly baseURL: string;
229
- /**
230
- API key that is being send using the `Authorization` header.
231
- It defaults to the `OPENAI_API_KEY` environment variable.
232
- */
233
- readonly apiKey?: string;
234
- /**
235
- OpenAI Organization.
236
- */
237
- readonly organization?: string;
238
- /**
239
- OpenAI project.
240
- */
241
- readonly project?: string;
242
- /**
243
- Custom headers to include in the requests.
244
- */
245
- readonly headers?: Record<string, string>;
246
- /**
247
- * Creates a new OpenAI provider instance.
248
- */
249
- constructor(options?: OpenAIProviderSettings);
250
- private get baseConfig();
251
- chat(modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): OpenAIChatLanguageModel;
252
- completion(modelId: OpenAICompletionModelId, settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
253
- }
908
+ type OpenaiResponsesChunk = InferSchema<typeof openaiResponsesChunkSchema>;
909
+ type ResponsesOutputTextAnnotationProviderMetadata = Extract<OpenaiResponsesChunk, {
910
+ type: 'response.output_text.annotation.added';
911
+ }>['annotation'];
912
+ type ResponsesTextProviderMetadata = {
913
+ itemId: string;
914
+ annotations?: Array<ResponsesOutputTextAnnotationProviderMetadata>;
915
+ };
916
+ type OpenaiResponsesTextProviderMetadata = {
917
+ openai: ResponsesTextProviderMetadata;
918
+ };
919
+ type ResponsesSourceDocumentProviderMetadata = {
920
+ type: 'file_citation';
921
+ fileId: string;
922
+ index: number;
923
+ } | {
924
+ type: 'container_file_citation';
925
+ fileId: string;
926
+ containerId: string;
927
+ } | {
928
+ type: 'file_path';
929
+ fileId: string;
930
+ index: number;
931
+ };
932
+ type OpenaiResponsesSourceDocumentProviderMetadata = {
933
+ openai: ResponsesSourceDocumentProviderMetadata;
934
+ };
935
+
936
+ declare const VERSION: string;
254
937
 
255
- export { OpenAI, type OpenAIProvider, type OpenAIProviderSettings, createOpenAI, openai };
938
+ export { type OpenAIChatLanguageModelOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, type OpenaiResponsesSourceDocumentProviderMetadata, type OpenaiResponsesTextProviderMetadata, VERSION, createOpenAI, openai };