@ai-sdk/openai 2.1.0-beta.1 → 2.1.0-beta.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,9 +1,10 @@
1
- import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
1
+ import { LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
2
+ import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
2
3
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
4
  import { z } from 'zod/v4';
4
5
 
5
6
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
6
- declare const openaiProviderOptions: z.ZodObject<{
7
+ declare const openaiChatLanguageModelOptions: z.ZodObject<{
7
8
  logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
8
9
  logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
9
10
  parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
@@ -33,7 +34,7 @@ declare const openaiProviderOptions: z.ZodObject<{
33
34
  promptCacheKey: z.ZodOptional<z.ZodString>;
34
35
  safetyIdentifier: z.ZodOptional<z.ZodString>;
35
36
  }, z.core.$strip>;
36
- type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
37
+ type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
37
38
 
38
39
  type OpenAIChatConfig = {
39
40
  provider: string;
@@ -44,8 +45,8 @@ type OpenAIChatConfig = {
44
45
  }) => string;
45
46
  fetch?: FetchFunction;
46
47
  };
47
- declare class OpenAIChatLanguageModel implements LanguageModelV2 {
48
- readonly specificationVersion = "v2";
48
+ declare class OpenAIChatLanguageModel implements LanguageModelV3 {
49
+ readonly specificationVersion = "v3";
49
50
  readonly modelId: OpenAIChatModelId;
50
51
  readonly supportedUrls: {
51
52
  'image/*': RegExp[];
@@ -54,8 +55,8 @@ declare class OpenAIChatLanguageModel implements LanguageModelV2 {
54
55
  constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
55
56
  get provider(): string;
56
57
  private getArgs;
57
- doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
58
- doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
58
+ doGenerate(options: Parameters<LanguageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV3['doGenerate']>>>;
59
+ doStream(options: Parameters<LanguageModelV3['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV3['doStream']>>>;
59
60
  }
60
61
 
61
62
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
@@ -77,8 +78,8 @@ type OpenAICompletionConfig = {
77
78
  }) => string;
78
79
  fetch?: FetchFunction;
79
80
  };
80
- declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
81
- readonly specificationVersion = "v2";
81
+ declare class OpenAICompletionLanguageModel implements LanguageModelV3 {
82
+ readonly specificationVersion = "v3";
82
83
  readonly modelId: OpenAICompletionModelId;
83
84
  private readonly config;
84
85
  private get providerOptionsName();
@@ -86,8 +87,8 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
86
87
  get provider(): string;
87
88
  readonly supportedUrls: Record<string, RegExp[]>;
88
89
  private getArgs;
89
- doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
90
- doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
90
+ doGenerate(options: Parameters<LanguageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV3['doGenerate']>>>;
91
+ doStream(options: Parameters<LanguageModelV3['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV3['doStream']>>>;
91
92
  }
92
93
 
93
94
  type OpenAIConfig = {
@@ -117,15 +118,15 @@ declare const openaiEmbeddingProviderOptions: z.ZodObject<{
117
118
  }, z.core.$strip>;
118
119
  type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOptions>;
119
120
 
120
- declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
121
- readonly specificationVersion = "v2";
121
+ declare class OpenAIEmbeddingModel implements EmbeddingModelV3<string> {
122
+ readonly specificationVersion = "v3";
122
123
  readonly modelId: OpenAIEmbeddingModelId;
123
124
  readonly maxEmbeddingsPerCall = 2048;
124
125
  readonly supportsParallelCalls = true;
125
126
  private readonly config;
126
127
  get provider(): string;
127
128
  constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
128
- doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>>;
129
+ doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3<string>['doEmbed']>>>;
129
130
  }
130
131
 
131
132
  type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
@@ -137,14 +138,14 @@ interface OpenAIImageModelConfig extends OpenAIConfig {
137
138
  currentDate?: () => Date;
138
139
  };
139
140
  }
140
- declare class OpenAIImageModel implements ImageModelV2 {
141
+ declare class OpenAIImageModel implements ImageModelV3 {
141
142
  readonly modelId: OpenAIImageModelId;
142
143
  private readonly config;
143
- readonly specificationVersion = "v2";
144
+ readonly specificationVersion = "v3";
144
145
  get maxImagesPerCall(): number;
145
146
  get provider(): string;
146
147
  constructor(modelId: OpenAIImageModelId, config: OpenAIImageModelConfig);
147
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV2['doGenerate']>>>;
148
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV3['doGenerate']>>>;
148
149
  }
149
150
 
150
151
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
@@ -202,18 +203,18 @@ declare class OpenAISpeechModel implements SpeechModelV2 {
202
203
  doGenerate(options: Parameters<SpeechModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV2['doGenerate']>>>;
203
204
  }
204
205
 
205
- type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
206
+ type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
206
207
 
207
- declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
208
- readonly specificationVersion = "v2";
208
+ declare class OpenAIResponsesLanguageModel implements LanguageModelV3 {
209
+ readonly specificationVersion = "v3";
209
210
  readonly modelId: OpenAIResponsesModelId;
210
211
  private readonly config;
211
212
  constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
212
213
  readonly supportedUrls: Record<string, RegExp[]>;
213
214
  get provider(): string;
214
215
  private getArgs;
215
- doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
216
- doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
216
+ doGenerate(options: Parameters<LanguageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV3['doGenerate']>>>;
217
+ doStream(options: Parameters<LanguageModelV3['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV3['doStream']>>>;
217
218
  }
218
219
  declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
219
220
  include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
@@ -247,4 +248,318 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
247
248
  }, z.core.$strip>;
248
249
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
249
250
 
250
- export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
251
+ declare const codeInterpreterInputSchema: z.ZodObject<{
252
+ code: z.ZodOptional<z.ZodNullable<z.ZodString>>;
253
+ containerId: z.ZodString;
254
+ }, z.core.$strip>;
255
+ declare const codeInterpreterOutputSchema: z.ZodObject<{
256
+ outputs: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodDiscriminatedUnion<[z.ZodObject<{
257
+ type: z.ZodLiteral<"logs">;
258
+ logs: z.ZodString;
259
+ }, z.core.$strip>, z.ZodObject<{
260
+ type: z.ZodLiteral<"image">;
261
+ url: z.ZodString;
262
+ }, z.core.$strip>]>>>>;
263
+ }, z.core.$strip>;
264
+ declare const codeInterpreterArgsSchema: z.ZodObject<{
265
+ container: z.ZodOptional<z.ZodUnion<readonly [z.ZodString, z.ZodObject<{
266
+ fileIds: z.ZodOptional<z.ZodArray<z.ZodString>>;
267
+ }, z.core.$strip>]>>;
268
+ }, z.core.$strip>;
269
+ type CodeInterpreterArgs = {
270
+ /**
271
+ * The code interpreter container.
272
+ * Can be a container ID
273
+ * or an object that specifies uploaded file IDs to make available to your code.
274
+ */
275
+ container?: string | {
276
+ fileIds?: string[];
277
+ };
278
+ };
279
+ declare const codeInterpreterToolFactory: _ai_sdk_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{
280
+ /**
281
+ * The code to run, or null if not available.
282
+ */
283
+ code?: string | null;
284
+ /**
285
+ * The ID of the container used to run the code.
286
+ */
287
+ containerId: string;
288
+ }, {
289
+ /**
290
+ * The outputs generated by the code interpreter, such as logs or images.
291
+ * Can be null if no outputs are available.
292
+ */
293
+ outputs?: Array<{
294
+ type: "logs";
295
+ /**
296
+ * The logs output from the code interpreter.
297
+ */
298
+ logs: string;
299
+ } | {
300
+ type: "image";
301
+ /**
302
+ * The URL of the image output from the code interpreter.
303
+ */
304
+ url: string;
305
+ }> | null;
306
+ }, CodeInterpreterArgs>;
307
+ declare const codeInterpreter: (args?: CodeInterpreterArgs) => _ai_sdk_provider_utils.Tool<{
308
+ /**
309
+ * The code to run, or null if not available.
310
+ */
311
+ code?: string | null;
312
+ /**
313
+ * The ID of the container used to run the code.
314
+ */
315
+ containerId: string;
316
+ }, {
317
+ /**
318
+ * The outputs generated by the code interpreter, such as logs or images.
319
+ * Can be null if no outputs are available.
320
+ */
321
+ outputs?: Array<{
322
+ type: "logs";
323
+ /**
324
+ * The logs output from the code interpreter.
325
+ */
326
+ logs: string;
327
+ } | {
328
+ type: "image";
329
+ /**
330
+ * The URL of the image output from the code interpreter.
331
+ */
332
+ url: string;
333
+ }> | null;
334
+ }>;
335
+
336
+ /**
337
+ * A filter used to compare a specified attribute key to a given value using a defined comparison operation.
338
+ */
339
+ type OpenAIResponsesFileSearchToolComparisonFilter = {
340
+ /**
341
+ * The key to compare against the value.
342
+ */
343
+ key: string;
344
+ /**
345
+ * Specifies the comparison operator: eq, ne, gt, gte, lt, lte.
346
+ */
347
+ type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte';
348
+ /**
349
+ * The value to compare against the attribute key; supports string, number, or boolean types.
350
+ */
351
+ value: string | number | boolean;
352
+ };
353
+ /**
354
+ * Combine multiple filters using and or or.
355
+ */
356
+ type OpenAIResponsesFileSearchToolCompoundFilter = {
357
+ /**
358
+ * Type of operation: and or or.
359
+ */
360
+ type: 'and' | 'or';
361
+ /**
362
+ * Array of filters to combine. Items can be ComparisonFilter or CompoundFilter.
363
+ */
364
+ filters: Array<OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter>;
365
+ };
366
+
367
+ declare const fileSearchArgsSchema: z.ZodObject<{
368
+ vectorStoreIds: z.ZodArray<z.ZodString>;
369
+ maxNumResults: z.ZodOptional<z.ZodNumber>;
370
+ ranking: z.ZodOptional<z.ZodObject<{
371
+ ranker: z.ZodOptional<z.ZodString>;
372
+ scoreThreshold: z.ZodOptional<z.ZodNumber>;
373
+ }, z.core.$strip>>;
374
+ filters: z.ZodOptional<z.ZodUnion<readonly [z.ZodObject<{
375
+ key: z.ZodString;
376
+ type: z.ZodEnum<{
377
+ lt: "lt";
378
+ ne: "ne";
379
+ eq: "eq";
380
+ gt: "gt";
381
+ gte: "gte";
382
+ lte: "lte";
383
+ }>;
384
+ value: z.ZodUnion<readonly [z.ZodString, z.ZodNumber, z.ZodBoolean]>;
385
+ }, z.core.$strip>, z.ZodType<any, unknown, z.core.$ZodTypeInternals<any, unknown>>]>>;
386
+ }, z.core.$strip>;
387
+ declare const fileSearchOutputSchema: z.ZodObject<{
388
+ queries: z.ZodArray<z.ZodString>;
389
+ results: z.ZodNullable<z.ZodArray<z.ZodObject<{
390
+ attributes: z.ZodRecord<z.ZodString, z.ZodUnknown>;
391
+ fileId: z.ZodString;
392
+ filename: z.ZodString;
393
+ score: z.ZodNumber;
394
+ text: z.ZodString;
395
+ }, z.core.$strip>>>;
396
+ }, z.core.$strip>;
397
+ declare const fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{}, {
398
+ /**
399
+ * The search query to execute.
400
+ */
401
+ queries: string[];
402
+ /**
403
+ * The results of the file search tool call.
404
+ */
405
+ results: null | {
406
+ /**
407
+ * Set of 16 key-value pairs that can be attached to an object.
408
+ * This can be useful for storing additional information about the object
409
+ * in a structured format, and querying for objects via API or the dashboard.
410
+ * Keys are strings with a maximum length of 64 characters.
411
+ * Values are strings with a maximum length of 512 characters, booleans, or numbers.
412
+ */
413
+ attributes: Record<string, unknown>;
414
+ /**
415
+ * The unique ID of the file.
416
+ */
417
+ fileId: string;
418
+ /**
419
+ * The name of the file.
420
+ */
421
+ filename: string;
422
+ /**
423
+ * The relevance score of the file - a value between 0 and 1.
424
+ */
425
+ score: number;
426
+ /**
427
+ * The text that was retrieved from the file.
428
+ */
429
+ text: string;
430
+ }[];
431
+ }, {
432
+ /**
433
+ * List of vector store IDs to search through.
434
+ */
435
+ vectorStoreIds: string[];
436
+ /**
437
+ * Maximum number of search results to return. Defaults to 10.
438
+ */
439
+ maxNumResults?: number;
440
+ /**
441
+ * Ranking options for the search.
442
+ */
443
+ ranking?: {
444
+ /**
445
+ * The ranker to use for the file search.
446
+ */
447
+ ranker?: string;
448
+ /**
449
+ * The score threshold for the file search, a number between 0 and 1.
450
+ * Numbers closer to 1 will attempt to return only the most relevant results,
451
+ * but may return fewer results.
452
+ */
453
+ scoreThreshold?: number;
454
+ };
455
+ /**
456
+ * A filter to apply.
457
+ */
458
+ filters?: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter;
459
+ }>;
460
+
461
+ declare const imageGenerationArgsSchema: z.ZodObject<{
462
+ background: z.ZodOptional<z.ZodEnum<{
463
+ auto: "auto";
464
+ opaque: "opaque";
465
+ transparent: "transparent";
466
+ }>>;
467
+ inputFidelity: z.ZodOptional<z.ZodEnum<{
468
+ low: "low";
469
+ high: "high";
470
+ }>>;
471
+ inputImageMask: z.ZodOptional<z.ZodObject<{
472
+ fileId: z.ZodOptional<z.ZodString>;
473
+ imageUrl: z.ZodOptional<z.ZodString>;
474
+ }, z.core.$strip>>;
475
+ model: z.ZodOptional<z.ZodString>;
476
+ moderation: z.ZodOptional<z.ZodEnum<{
477
+ auto: "auto";
478
+ }>>;
479
+ outputCompression: z.ZodOptional<z.ZodNumber>;
480
+ outputFormat: z.ZodOptional<z.ZodEnum<{
481
+ png: "png";
482
+ jpeg: "jpeg";
483
+ webp: "webp";
484
+ }>>;
485
+ partialImages: z.ZodOptional<z.ZodNumber>;
486
+ quality: z.ZodOptional<z.ZodEnum<{
487
+ low: "low";
488
+ medium: "medium";
489
+ high: "high";
490
+ auto: "auto";
491
+ }>>;
492
+ size: z.ZodOptional<z.ZodEnum<{
493
+ auto: "auto";
494
+ "1024x1024": "1024x1024";
495
+ "1024x1536": "1024x1536";
496
+ "1536x1024": "1536x1024";
497
+ }>>;
498
+ }, z.core.$strict>;
499
+ declare const imageGenerationOutputSchema: z.ZodObject<{
500
+ result: z.ZodString;
501
+ }, z.core.$strip>;
502
+ type ImageGenerationArgs = {
503
+ /**
504
+ * Background type for the generated image. Default is 'auto'.
505
+ */
506
+ background?: 'auto' | 'opaque' | 'transparent';
507
+ /**
508
+ * Input fidelity for the generated image. Default is 'low'.
509
+ */
510
+ inputFidelity?: 'low' | 'high';
511
+ /**
512
+ * Optional mask for inpainting.
513
+ * Contains image_url (string, optional) and file_id (string, optional).
514
+ */
515
+ inputImageMask?: {
516
+ /**
517
+ * File ID for the mask image.
518
+ */
519
+ fileId?: string;
520
+ /**
521
+ * Base64-encoded mask image.
522
+ */
523
+ imageUrl?: string;
524
+ };
525
+ /**
526
+ * The image generation model to use. Default: gpt-image-1.
527
+ */
528
+ model?: string;
529
+ /**
530
+ * Moderation level for the generated image. Default: auto.
531
+ */
532
+ moderation?: 'auto';
533
+ /**
534
+ * Compression level for the output image. Default: 100.
535
+ */
536
+ outputCompression?: number;
537
+ /**
538
+ * The output format of the generated image. One of png, webp, or jpeg.
539
+ * Default: png
540
+ */
541
+ outputFormat?: 'png' | 'jpeg' | 'webp';
542
+ /**
543
+ * Number of partial images to generate in streaming mode, from 0 (default value) to 3.
544
+ */
545
+ partialImages?: number;
546
+ /**
547
+ * The quality of the generated image.
548
+ * One of low, medium, high, or auto. Default: auto.
549
+ */
550
+ quality?: 'auto' | 'low' | 'medium' | 'high';
551
+ /**
552
+ * The size of the generated image.
553
+ * One of 1024x1024, 1024x1536, 1536x1024, or auto.
554
+ * Default: auto.
555
+ */
556
+ size?: 'auto' | '1024x1024' | '1024x1536' | '1536x1024';
557
+ };
558
+ declare const imageGeneration: (args?: ImageGenerationArgs) => _ai_sdk_provider_utils.Tool<{}, {
559
+ /**
560
+ * The generated image encoded in base64.
561
+ */
562
+ result: string;
563
+ }>;
564
+
565
+ export { OpenAIChatLanguageModel, type OpenAIChatLanguageModelOptions, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, codeInterpreter, codeInterpreterArgsSchema, codeInterpreterInputSchema, codeInterpreterOutputSchema, codeInterpreterToolFactory, fileSearch, fileSearchArgsSchema, fileSearchOutputSchema, hasDefaultResponseFormat, imageGeneration, imageGenerationArgsSchema, imageGenerationOutputSchema, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiChatLanguageModelOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions };