@ai-sdk/openai 2.0.0-canary.11 → 2.0.0-canary.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- import { LanguageModelV2, EmbeddingModelV2, ImageModelV1, TranscriptionModelV1CallOptions, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV1CallOptions, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { z } from 'zod';
4
4
 
@@ -40,6 +40,12 @@ declare const openaiProviderOptions: z.ZodObject<{
40
40
  * Parameters for prediction mode.
41
41
  */
42
42
  prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
43
+ /**
44
+ * Whether to use structured outputs.
45
+ *
46
+ * @default true
47
+ */
48
+ structuredOutputs: z.ZodOptional<z.ZodBoolean>;
43
49
  }, "strip", z.ZodTypeAny, {
44
50
  user?: string | undefined;
45
51
  logitBias?: Record<number, number> | undefined;
@@ -49,6 +55,7 @@ declare const openaiProviderOptions: z.ZodObject<{
49
55
  store?: boolean | undefined;
50
56
  metadata?: Record<string, string> | undefined;
51
57
  prediction?: Record<string, any> | undefined;
58
+ structuredOutputs?: boolean | undefined;
52
59
  }, {
53
60
  user?: string | undefined;
54
61
  logitBias?: Record<number, number> | undefined;
@@ -58,16 +65,9 @@ declare const openaiProviderOptions: z.ZodObject<{
58
65
  store?: boolean | undefined;
59
66
  metadata?: Record<string, string> | undefined;
60
67
  prediction?: Record<string, any> | undefined;
68
+ structuredOutputs?: boolean | undefined;
61
69
  }>;
62
70
  type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
63
- interface OpenAIChatSettings {
64
- /**
65
- Whether to use structured outputs. Defaults to false.
66
-
67
- When enabled, tool calls and object generation will be strict and follow the provided schema.
68
- */
69
- structuredOutputs?: boolean;
70
- }
71
71
 
72
72
  type OpenAIChatConfig = {
73
73
  provider: string;
@@ -82,9 +82,8 @@ type OpenAIChatConfig = {
82
82
  declare class OpenAIChatLanguageModel implements LanguageModelV2 {
83
83
  readonly specificationVersion = "v2";
84
84
  readonly modelId: OpenAIChatModelId;
85
- readonly settings: OpenAIChatSettings;
86
85
  private readonly config;
87
- constructor(modelId: OpenAIChatModelId, settings: OpenAIChatSettings, config: OpenAIChatConfig);
86
+ constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
88
87
  get provider(): string;
89
88
  getSupportedUrls(): Promise<Record<string, RegExp[]>>;
90
89
  private getArgs;
@@ -93,11 +92,11 @@ declare class OpenAIChatLanguageModel implements LanguageModelV2 {
93
92
  }
94
93
 
95
94
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
96
- interface OpenAICompletionSettings {
95
+ declare const openaiCompletionProviderOptions: z.ZodObject<{
97
96
  /**
98
97
  Echo back the prompt in addition to the completion.
99
98
  */
100
- echo?: boolean;
99
+ echo: z.ZodOptional<z.ZodBoolean>;
101
100
  /**
102
101
  Modify the likelihood of specified tokens appearing in the completion.
103
102
 
@@ -111,18 +110,29 @@ interface OpenAICompletionSettings {
111
110
 
112
111
  As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
113
112
  token from being generated.
114
- */
115
- logitBias?: Record<number, number>;
113
+ */
114
+ logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
116
115
  /**
117
116
  The suffix that comes after a completion of inserted text.
118
- */
119
- suffix?: string;
117
+ */
118
+ suffix: z.ZodOptional<z.ZodString>;
120
119
  /**
121
120
  A unique identifier representing your end-user, which can help OpenAI to
122
121
  monitor and detect abuse. Learn more.
123
- */
124
- user?: string;
125
- }
122
+ */
123
+ user: z.ZodOptional<z.ZodString>;
124
+ }, "strip", z.ZodTypeAny, {
125
+ user?: string | undefined;
126
+ logitBias?: Record<string, number> | undefined;
127
+ echo?: boolean | undefined;
128
+ suffix?: string | undefined;
129
+ }, {
130
+ user?: string | undefined;
131
+ logitBias?: Record<string, number> | undefined;
132
+ echo?: boolean | undefined;
133
+ suffix?: string | undefined;
134
+ }>;
135
+ type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>;
126
136
 
127
137
  type OpenAICompletionConfig = {
128
138
  provider: string;
@@ -137,9 +147,9 @@ type OpenAICompletionConfig = {
137
147
  declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
138
148
  readonly specificationVersion = "v2";
139
149
  readonly modelId: OpenAICompletionModelId;
140
- readonly settings: OpenAICompletionSettings;
141
150
  private readonly config;
142
- constructor(modelId: OpenAICompletionModelId, settings: OpenAICompletionSettings, config: OpenAICompletionConfig);
151
+ private get providerOptionsName();
152
+ constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
143
153
  get provider(): string;
144
154
  getSupportedUrls(): Promise<Record<string, RegExp[]>>;
145
155
  private getArgs;
@@ -159,16 +169,6 @@ type OpenAIConfig = {
159
169
  };
160
170
 
161
171
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
162
- interface OpenAIEmbeddingSettings {
163
- /**
164
- Override the maximum number of embeddings per call.
165
- */
166
- maxEmbeddingsPerCall?: number;
167
- /**
168
- Override the parallelism of embedding calls.
169
- */
170
- supportsParallelCalls?: boolean;
171
- }
172
172
  declare const openaiEmbeddingProviderOptions: z.ZodObject<{
173
173
  /**
174
174
  The number of dimensions the resulting output embeddings should have.
@@ -192,17 +192,17 @@ type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOpti
192
192
  declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
193
193
  readonly specificationVersion = "v2";
194
194
  readonly modelId: OpenAIEmbeddingModelId;
195
+ readonly maxEmbeddingsPerCall = 2048;
196
+ readonly supportsParallelCalls = true;
195
197
  private readonly config;
196
- private readonly settings;
197
198
  get provider(): string;
198
- get maxEmbeddingsPerCall(): number;
199
- get supportsParallelCalls(): boolean;
200
- constructor(modelId: OpenAIEmbeddingModelId, settings: OpenAIEmbeddingSettings, config: OpenAIConfig);
199
+ constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
201
200
  doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>>;
202
201
  }
203
202
 
204
- type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
203
+ type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
205
204
  declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
205
+ declare const hasDefaultResponseFormat: Set<string>;
206
206
  interface OpenAIImageSettings {
207
207
  /**
208
208
  Override the maximum number of images per call (default is dependent on the
@@ -216,7 +216,7 @@ interface OpenAIImageModelConfig extends OpenAIConfig {
216
216
  currentDate?: () => Date;
217
217
  };
218
218
  }
219
- declare class OpenAIImageModel implements ImageModelV1 {
219
+ declare class OpenAIImageModel implements ImageModelV2 {
220
220
  readonly modelId: OpenAIImageModelId;
221
221
  private readonly settings;
222
222
  private readonly config;
@@ -224,47 +224,39 @@ declare class OpenAIImageModel implements ImageModelV1 {
224
224
  get maxImagesPerCall(): number;
225
225
  get provider(): string;
226
226
  constructor(modelId: OpenAIImageModelId, settings: OpenAIImageSettings, config: OpenAIImageModelConfig);
227
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV1['doGenerate']>>>;
227
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV2['doGenerate']>>>;
228
228
  }
229
229
 
230
230
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
231
- type OpenAITranscriptionModelOptions = {
231
+ declare const openAITranscriptionProviderOptions: z.ZodObject<{
232
232
  /**
233
233
  * Additional information to include in the transcription response.
234
234
  */
235
- include?: string[];
235
+ include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
236
236
  /**
237
237
  * The language of the input audio in ISO-639-1 format.
238
238
  */
239
- language?: string;
239
+ language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
240
240
  /**
241
241
  * An optional text to guide the model's style or continue a previous audio segment.
242
242
  */
243
- prompt?: string;
243
+ prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
244
244
  /**
245
245
  * The sampling temperature, between 0 and 1.
246
246
  * @default 0
247
247
  */
248
- temperature?: number;
248
+ temperature: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
249
249
  /**
250
250
  * The timestamp granularities to populate for this transcription.
251
251
  * @default ['segment']
252
252
  */
253
- timestamp_granularities?: Array<'word' | 'segment'>;
254
- };
255
-
256
- declare const openAIProviderOptionsSchema: z.ZodObject<{
257
- include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
258
- language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
259
- prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
260
- temperature: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodNumber>>>;
261
- timestampGranularities: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
253
+ timestampGranularities: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
262
254
  }, "strip", z.ZodTypeAny, {
263
- temperature: number | null;
264
- timestampGranularities: ("word" | "segment")[] | null;
265
255
  prompt?: string | null | undefined;
256
+ temperature?: number | null | undefined;
266
257
  include?: string[] | null | undefined;
267
258
  language?: string | null | undefined;
259
+ timestampGranularities?: ("word" | "segment")[] | null | undefined;
268
260
  }, {
269
261
  prompt?: string | null | undefined;
270
262
  temperature?: number | null | undefined;
@@ -272,9 +264,11 @@ declare const openAIProviderOptionsSchema: z.ZodObject<{
272
264
  language?: string | null | undefined;
273
265
  timestampGranularities?: ("word" | "segment")[] | null | undefined;
274
266
  }>;
267
+ type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
268
+
275
269
  type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV1CallOptions, 'providerOptions'> & {
276
270
  providerOptions?: {
277
- openai?: z.infer<typeof openAIProviderOptionsSchema>;
271
+ openai?: OpenAITranscriptionProviderOptions;
278
272
  };
279
273
  };
280
274
  interface OpenAITranscriptionModelConfig extends OpenAIConfig {
@@ -366,4 +360,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
366
360
  }>;
367
361
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
368
362
 
369
- export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, modelMaxImagesPerCall, openaiEmbeddingProviderOptions, openaiProviderOptions };
363
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
@@ -1,4 +1,4 @@
1
- import { LanguageModelV2, EmbeddingModelV2, ImageModelV1, TranscriptionModelV1CallOptions, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV1CallOptions, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { z } from 'zod';
4
4
 
@@ -40,6 +40,12 @@ declare const openaiProviderOptions: z.ZodObject<{
40
40
  * Parameters for prediction mode.
41
41
  */
42
42
  prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
43
+ /**
44
+ * Whether to use structured outputs.
45
+ *
46
+ * @default true
47
+ */
48
+ structuredOutputs: z.ZodOptional<z.ZodBoolean>;
43
49
  }, "strip", z.ZodTypeAny, {
44
50
  user?: string | undefined;
45
51
  logitBias?: Record<number, number> | undefined;
@@ -49,6 +55,7 @@ declare const openaiProviderOptions: z.ZodObject<{
49
55
  store?: boolean | undefined;
50
56
  metadata?: Record<string, string> | undefined;
51
57
  prediction?: Record<string, any> | undefined;
58
+ structuredOutputs?: boolean | undefined;
52
59
  }, {
53
60
  user?: string | undefined;
54
61
  logitBias?: Record<number, number> | undefined;
@@ -58,16 +65,9 @@ declare const openaiProviderOptions: z.ZodObject<{
58
65
  store?: boolean | undefined;
59
66
  metadata?: Record<string, string> | undefined;
60
67
  prediction?: Record<string, any> | undefined;
68
+ structuredOutputs?: boolean | undefined;
61
69
  }>;
62
70
  type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
63
- interface OpenAIChatSettings {
64
- /**
65
- Whether to use structured outputs. Defaults to false.
66
-
67
- When enabled, tool calls and object generation will be strict and follow the provided schema.
68
- */
69
- structuredOutputs?: boolean;
70
- }
71
71
 
72
72
  type OpenAIChatConfig = {
73
73
  provider: string;
@@ -82,9 +82,8 @@ type OpenAIChatConfig = {
82
82
  declare class OpenAIChatLanguageModel implements LanguageModelV2 {
83
83
  readonly specificationVersion = "v2";
84
84
  readonly modelId: OpenAIChatModelId;
85
- readonly settings: OpenAIChatSettings;
86
85
  private readonly config;
87
- constructor(modelId: OpenAIChatModelId, settings: OpenAIChatSettings, config: OpenAIChatConfig);
86
+ constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
88
87
  get provider(): string;
89
88
  getSupportedUrls(): Promise<Record<string, RegExp[]>>;
90
89
  private getArgs;
@@ -93,11 +92,11 @@ declare class OpenAIChatLanguageModel implements LanguageModelV2 {
93
92
  }
94
93
 
95
94
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
96
- interface OpenAICompletionSettings {
95
+ declare const openaiCompletionProviderOptions: z.ZodObject<{
97
96
  /**
98
97
  Echo back the prompt in addition to the completion.
99
98
  */
100
- echo?: boolean;
99
+ echo: z.ZodOptional<z.ZodBoolean>;
101
100
  /**
102
101
  Modify the likelihood of specified tokens appearing in the completion.
103
102
 
@@ -111,18 +110,29 @@ interface OpenAICompletionSettings {
111
110
 
112
111
  As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
113
112
  token from being generated.
114
- */
115
- logitBias?: Record<number, number>;
113
+ */
114
+ logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
116
115
  /**
117
116
  The suffix that comes after a completion of inserted text.
118
- */
119
- suffix?: string;
117
+ */
118
+ suffix: z.ZodOptional<z.ZodString>;
120
119
  /**
121
120
  A unique identifier representing your end-user, which can help OpenAI to
122
121
  monitor and detect abuse. Learn more.
123
- */
124
- user?: string;
125
- }
122
+ */
123
+ user: z.ZodOptional<z.ZodString>;
124
+ }, "strip", z.ZodTypeAny, {
125
+ user?: string | undefined;
126
+ logitBias?: Record<string, number> | undefined;
127
+ echo?: boolean | undefined;
128
+ suffix?: string | undefined;
129
+ }, {
130
+ user?: string | undefined;
131
+ logitBias?: Record<string, number> | undefined;
132
+ echo?: boolean | undefined;
133
+ suffix?: string | undefined;
134
+ }>;
135
+ type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>;
126
136
 
127
137
  type OpenAICompletionConfig = {
128
138
  provider: string;
@@ -137,9 +147,9 @@ type OpenAICompletionConfig = {
137
147
  declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
138
148
  readonly specificationVersion = "v2";
139
149
  readonly modelId: OpenAICompletionModelId;
140
- readonly settings: OpenAICompletionSettings;
141
150
  private readonly config;
142
- constructor(modelId: OpenAICompletionModelId, settings: OpenAICompletionSettings, config: OpenAICompletionConfig);
151
+ private get providerOptionsName();
152
+ constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
143
153
  get provider(): string;
144
154
  getSupportedUrls(): Promise<Record<string, RegExp[]>>;
145
155
  private getArgs;
@@ -159,16 +169,6 @@ type OpenAIConfig = {
159
169
  };
160
170
 
161
171
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
162
- interface OpenAIEmbeddingSettings {
163
- /**
164
- Override the maximum number of embeddings per call.
165
- */
166
- maxEmbeddingsPerCall?: number;
167
- /**
168
- Override the parallelism of embedding calls.
169
- */
170
- supportsParallelCalls?: boolean;
171
- }
172
172
  declare const openaiEmbeddingProviderOptions: z.ZodObject<{
173
173
  /**
174
174
  The number of dimensions the resulting output embeddings should have.
@@ -192,17 +192,17 @@ type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOpti
192
192
  declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
193
193
  readonly specificationVersion = "v2";
194
194
  readonly modelId: OpenAIEmbeddingModelId;
195
+ readonly maxEmbeddingsPerCall = 2048;
196
+ readonly supportsParallelCalls = true;
195
197
  private readonly config;
196
- private readonly settings;
197
198
  get provider(): string;
198
- get maxEmbeddingsPerCall(): number;
199
- get supportsParallelCalls(): boolean;
200
- constructor(modelId: OpenAIEmbeddingModelId, settings: OpenAIEmbeddingSettings, config: OpenAIConfig);
199
+ constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
201
200
  doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>>;
202
201
  }
203
202
 
204
- type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
203
+ type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
205
204
  declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
205
+ declare const hasDefaultResponseFormat: Set<string>;
206
206
  interface OpenAIImageSettings {
207
207
  /**
208
208
  Override the maximum number of images per call (default is dependent on the
@@ -216,7 +216,7 @@ interface OpenAIImageModelConfig extends OpenAIConfig {
216
216
  currentDate?: () => Date;
217
217
  };
218
218
  }
219
- declare class OpenAIImageModel implements ImageModelV1 {
219
+ declare class OpenAIImageModel implements ImageModelV2 {
220
220
  readonly modelId: OpenAIImageModelId;
221
221
  private readonly settings;
222
222
  private readonly config;
@@ -224,47 +224,39 @@ declare class OpenAIImageModel implements ImageModelV1 {
224
224
  get maxImagesPerCall(): number;
225
225
  get provider(): string;
226
226
  constructor(modelId: OpenAIImageModelId, settings: OpenAIImageSettings, config: OpenAIImageModelConfig);
227
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV1['doGenerate']>>>;
227
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV2['doGenerate']>>>;
228
228
  }
229
229
 
230
230
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
231
- type OpenAITranscriptionModelOptions = {
231
+ declare const openAITranscriptionProviderOptions: z.ZodObject<{
232
232
  /**
233
233
  * Additional information to include in the transcription response.
234
234
  */
235
- include?: string[];
235
+ include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
236
236
  /**
237
237
  * The language of the input audio in ISO-639-1 format.
238
238
  */
239
- language?: string;
239
+ language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
240
240
  /**
241
241
  * An optional text to guide the model's style or continue a previous audio segment.
242
242
  */
243
- prompt?: string;
243
+ prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
244
244
  /**
245
245
  * The sampling temperature, between 0 and 1.
246
246
  * @default 0
247
247
  */
248
- temperature?: number;
248
+ temperature: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
249
249
  /**
250
250
  * The timestamp granularities to populate for this transcription.
251
251
  * @default ['segment']
252
252
  */
253
- timestamp_granularities?: Array<'word' | 'segment'>;
254
- };
255
-
256
- declare const openAIProviderOptionsSchema: z.ZodObject<{
257
- include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
258
- language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
259
- prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
260
- temperature: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodNumber>>>;
261
- timestampGranularities: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
253
+ timestampGranularities: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
262
254
  }, "strip", z.ZodTypeAny, {
263
- temperature: number | null;
264
- timestampGranularities: ("word" | "segment")[] | null;
265
255
  prompt?: string | null | undefined;
256
+ temperature?: number | null | undefined;
266
257
  include?: string[] | null | undefined;
267
258
  language?: string | null | undefined;
259
+ timestampGranularities?: ("word" | "segment")[] | null | undefined;
268
260
  }, {
269
261
  prompt?: string | null | undefined;
270
262
  temperature?: number | null | undefined;
@@ -272,9 +264,11 @@ declare const openAIProviderOptionsSchema: z.ZodObject<{
272
264
  language?: string | null | undefined;
273
265
  timestampGranularities?: ("word" | "segment")[] | null | undefined;
274
266
  }>;
267
+ type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
268
+
275
269
  type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV1CallOptions, 'providerOptions'> & {
276
270
  providerOptions?: {
277
- openai?: z.infer<typeof openAIProviderOptionsSchema>;
271
+ openai?: OpenAITranscriptionProviderOptions;
278
272
  };
279
273
  };
280
274
  interface OpenAITranscriptionModelConfig extends OpenAIConfig {
@@ -366,4 +360,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
366
360
  }>;
367
361
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
368
362
 
369
- export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, modelMaxImagesPerCall, openaiEmbeddingProviderOptions, openaiProviderOptions };
363
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };