@ai-sdk/openai 2.0.0-alpha.9 → 2.0.0-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,84 +1,28 @@
1
- import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV1CallOptions, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { z } from 'zod';
3
+ import { z } from 'zod/v4';
4
4
 
5
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
6
6
  declare const openaiProviderOptions: z.ZodObject<{
7
- /**
8
- * Modify the likelihood of specified tokens appearing in the completion.
9
- *
10
- * Accepts a JSON object that maps tokens (specified by their token ID in
11
- * the GPT tokenizer) to an associated bias value from -100 to 100.
12
- */
13
- logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
14
- /**
15
- * Return the log probabilities of the tokens.
16
- *
17
- * Setting to true will return the log probabilities of the tokens that
18
- * were generated.
19
- *
20
- * Setting to a number will return the log probabilities of the top n
21
- * tokens that were generated.
22
- */
23
- logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
24
- /**
25
- * Whether to enable parallel function calling during tool use. Default to true.
26
- */
7
+ logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
8
+ logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
27
9
  parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
28
- /**
29
- * A unique identifier representing your end-user, which can help OpenAI to
30
- * monitor and detect abuse.
31
- */
32
10
  user: z.ZodOptional<z.ZodString>;
33
- /**
34
- * Reasoning effort for reasoning models. Defaults to `medium`.
35
- */
36
- reasoningEffort: z.ZodOptional<z.ZodEnum<["low", "medium", "high"]>>;
37
- /**
38
- * Maximum number of completion tokens to generate. Useful for reasoning models.
39
- */
11
+ reasoningEffort: z.ZodOptional<z.ZodEnum<{
12
+ low: "low";
13
+ medium: "medium";
14
+ high: "high";
15
+ }>>;
40
16
  maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
41
- /**
42
- * Whether to enable persistence in responses API.
43
- */
44
17
  store: z.ZodOptional<z.ZodBoolean>;
45
- /**
46
- * Metadata to associate with the request.
47
- */
48
18
  metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
49
- /**
50
- * Parameters for prediction mode.
51
- */
52
19
  prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
53
- /**
54
- * Whether to use structured outputs.
55
- *
56
- * @default true
57
- */
58
20
  structuredOutputs: z.ZodOptional<z.ZodBoolean>;
59
- }, "strip", z.ZodTypeAny, {
60
- user?: string | undefined;
61
- logitBias?: Record<number, number> | undefined;
62
- logprobs?: number | boolean | undefined;
63
- parallelToolCalls?: boolean | undefined;
64
- reasoningEffort?: "low" | "medium" | "high" | undefined;
65
- maxCompletionTokens?: number | undefined;
66
- store?: boolean | undefined;
67
- metadata?: Record<string, string> | undefined;
68
- prediction?: Record<string, any> | undefined;
69
- structuredOutputs?: boolean | undefined;
70
- }, {
71
- user?: string | undefined;
72
- logitBias?: Record<number, number> | undefined;
73
- logprobs?: number | boolean | undefined;
74
- parallelToolCalls?: boolean | undefined;
75
- reasoningEffort?: "low" | "medium" | "high" | undefined;
76
- maxCompletionTokens?: number | undefined;
77
- store?: boolean | undefined;
78
- metadata?: Record<string, string> | undefined;
79
- prediction?: Record<string, any> | undefined;
80
- structuredOutputs?: boolean | undefined;
81
- }>;
21
+ serviceTier: z.ZodOptional<z.ZodEnum<{
22
+ auto: "auto";
23
+ flex: "flex";
24
+ }>>;
25
+ }, z.core.$strip>;
82
26
  type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
83
27
 
84
28
  type OpenAIChatConfig = {
@@ -106,57 +50,12 @@ declare class OpenAIChatLanguageModel implements LanguageModelV2 {
106
50
 
107
51
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
108
52
  declare const openaiCompletionProviderOptions: z.ZodObject<{
109
- /**
110
- Echo back the prompt in addition to the completion.
111
- */
112
53
  echo: z.ZodOptional<z.ZodBoolean>;
113
- /**
114
- Modify the likelihood of specified tokens appearing in the completion.
115
-
116
- Accepts a JSON object that maps tokens (specified by their token ID in
117
- the GPT tokenizer) to an associated bias value from -100 to 100. You
118
- can use this tokenizer tool to convert text to token IDs. Mathematically,
119
- the bias is added to the logits generated by the model prior to sampling.
120
- The exact effect will vary per model, but values between -1 and 1 should
121
- decrease or increase likelihood of selection; values like -100 or 100
122
- should result in a ban or exclusive selection of the relevant token.
123
-
124
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
125
- token from being generated.
126
- */
127
54
  logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
128
- /**
129
- The suffix that comes after a completion of inserted text.
130
- */
131
55
  suffix: z.ZodOptional<z.ZodString>;
132
- /**
133
- A unique identifier representing your end-user, which can help OpenAI to
134
- monitor and detect abuse. Learn more.
135
- */
136
56
  user: z.ZodOptional<z.ZodString>;
137
- /**
138
- Return the log probabilities of the tokens. Including logprobs will increase
139
- the response size and can slow down response times. However, it can
140
- be useful to better understand how the model is behaving.
141
- Setting to true will return the log probabilities of the tokens that
142
- were generated.
143
- Setting to a number will return the log probabilities of the top n
144
- tokens that were generated.
145
- */
146
- logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
147
- }, "strip", z.ZodTypeAny, {
148
- user?: string | undefined;
149
- logitBias?: Record<string, number> | undefined;
150
- logprobs?: number | boolean | undefined;
151
- echo?: boolean | undefined;
152
- suffix?: string | undefined;
153
- }, {
154
- user?: string | undefined;
155
- logitBias?: Record<string, number> | undefined;
156
- logprobs?: number | boolean | undefined;
157
- echo?: boolean | undefined;
158
- suffix?: string | undefined;
159
- }>;
57
+ logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
58
+ }, z.core.$strip>;
160
59
  type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>;
161
60
 
162
61
  type OpenAICompletionConfig = {
@@ -194,23 +93,9 @@ type OpenAIConfig = {
194
93
 
195
94
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
196
95
  declare const openaiEmbeddingProviderOptions: z.ZodObject<{
197
- /**
198
- The number of dimensions the resulting output embeddings should have.
199
- Only supported in text-embedding-3 and later models.
200
- */
201
96
  dimensions: z.ZodOptional<z.ZodNumber>;
202
- /**
203
- A unique identifier representing your end-user, which can help OpenAI to
204
- monitor and detect abuse. Learn more.
205
- */
206
97
  user: z.ZodOptional<z.ZodString>;
207
- }, "strip", z.ZodTypeAny, {
208
- user?: string | undefined;
209
- dimensions?: number | undefined;
210
- }, {
211
- user?: string | undefined;
212
- dimensions?: number | undefined;
213
- }>;
98
+ }, z.core.$strip>;
214
99
  type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOptions>;
215
100
 
216
101
  declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
@@ -245,44 +130,18 @@ declare class OpenAIImageModel implements ImageModelV2 {
245
130
 
246
131
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
247
132
  declare const openAITranscriptionProviderOptions: z.ZodObject<{
248
- /**
249
- * Additional information to include in the transcription response.
250
- */
251
- include: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
252
- /**
253
- * The language of the input audio in ISO-639-1 format.
254
- */
133
+ include: z.ZodOptional<z.ZodArray<z.ZodString>>;
255
134
  language: z.ZodOptional<z.ZodString>;
256
- /**
257
- * An optional text to guide the model's style or continue a previous audio segment.
258
- */
259
135
  prompt: z.ZodOptional<z.ZodString>;
260
- /**
261
- * The sampling temperature, between 0 and 1.
262
- * @default 0
263
- */
264
136
  temperature: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
265
- /**
266
- * The timestamp granularities to populate for this transcription.
267
- * @default ['segment']
268
- */
269
- timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>;
270
- }, "strip", z.ZodTypeAny, {
271
- prompt?: string | undefined;
272
- temperature?: number | undefined;
273
- include?: string[] | undefined;
274
- language?: string | undefined;
275
- timestampGranularities?: ("word" | "segment")[] | undefined;
276
- }, {
277
- prompt?: string | undefined;
278
- temperature?: number | undefined;
279
- include?: string[] | undefined;
280
- language?: string | undefined;
281
- timestampGranularities?: ("word" | "segment")[] | undefined;
282
- }>;
137
+ timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<{
138
+ word: "word";
139
+ segment: "segment";
140
+ }>>>>;
141
+ }, z.core.$strip>;
283
142
  type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
284
143
 
285
- type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV1CallOptions, 'providerOptions'> & {
144
+ type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV2CallOptions, 'providerOptions'> & {
286
145
  providerOptions?: {
287
146
  openai?: OpenAITranscriptionProviderOptions;
288
147
  };
@@ -292,14 +151,14 @@ interface OpenAITranscriptionModelConfig extends OpenAIConfig {
292
151
  currentDate?: () => Date;
293
152
  };
294
153
  }
295
- declare class OpenAITranscriptionModel implements TranscriptionModelV1 {
154
+ declare class OpenAITranscriptionModel implements TranscriptionModelV2 {
296
155
  readonly modelId: OpenAITranscriptionModelId;
297
156
  private readonly config;
298
- readonly specificationVersion = "v1";
157
+ readonly specificationVersion = "v2";
299
158
  get provider(): string;
300
159
  constructor(modelId: OpenAITranscriptionModelId, config: OpenAITranscriptionModelConfig);
301
160
  private getArgs;
302
- doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV1['doGenerate']>>>;
161
+ doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV2['doGenerate']>>>;
303
162
  }
304
163
 
305
164
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
@@ -307,27 +166,21 @@ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string &
307
166
  declare const OpenAIProviderOptionsSchema: z.ZodObject<{
308
167
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
309
168
  speed: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
310
- }, "strip", z.ZodTypeAny, {
311
- instructions?: string | null | undefined;
312
- speed?: number | null | undefined;
313
- }, {
314
- instructions?: string | null | undefined;
315
- speed?: number | null | undefined;
316
- }>;
169
+ }, z.core.$strip>;
317
170
  type OpenAISpeechCallOptions = z.infer<typeof OpenAIProviderOptionsSchema>;
318
171
  interface OpenAISpeechModelConfig extends OpenAIConfig {
319
172
  _internal?: {
320
173
  currentDate?: () => Date;
321
174
  };
322
175
  }
323
- declare class OpenAISpeechModel implements SpeechModelV1 {
176
+ declare class OpenAISpeechModel implements SpeechModelV2 {
324
177
  readonly modelId: OpenAISpeechModelId;
325
178
  private readonly config;
326
- readonly specificationVersion = "v1";
179
+ readonly specificationVersion = "v2";
327
180
  get provider(): string;
328
181
  constructor(modelId: OpenAISpeechModelId, config: OpenAISpeechModelConfig);
329
182
  private getArgs;
330
- doGenerate(options: Parameters<SpeechModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV1['doGenerate']>>>;
183
+ doGenerate(options: Parameters<SpeechModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV2['doGenerate']>>>;
331
184
  }
332
185
 
333
186
  type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
@@ -353,27 +206,11 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
353
206
  strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
354
207
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
355
208
  reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
356
- }, "strip", z.ZodTypeAny, {
357
- user?: string | null | undefined;
358
- parallelToolCalls?: boolean | null | undefined;
359
- reasoningEffort?: string | null | undefined;
360
- store?: boolean | null | undefined;
361
- metadata?: any;
362
- instructions?: string | null | undefined;
363
- previousResponseId?: string | null | undefined;
364
- strictSchemas?: boolean | null | undefined;
365
- reasoningSummary?: string | null | undefined;
366
- }, {
367
- user?: string | null | undefined;
368
- parallelToolCalls?: boolean | null | undefined;
369
- reasoningEffort?: string | null | undefined;
370
- store?: boolean | null | undefined;
371
- metadata?: any;
372
- instructions?: string | null | undefined;
373
- previousResponseId?: string | null | undefined;
374
- strictSchemas?: boolean | null | undefined;
375
- reasoningSummary?: string | null | undefined;
376
- }>;
209
+ serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
210
+ auto: "auto";
211
+ flex: "flex";
212
+ }>>>;
213
+ }, z.core.$strip>;
377
214
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
378
215
 
379
216
  export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
@@ -1,84 +1,28 @@
1
- import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV1CallOptions, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { z } from 'zod';
3
+ import { z } from 'zod/v4';
4
4
 
5
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
6
6
  declare const openaiProviderOptions: z.ZodObject<{
7
- /**
8
- * Modify the likelihood of specified tokens appearing in the completion.
9
- *
10
- * Accepts a JSON object that maps tokens (specified by their token ID in
11
- * the GPT tokenizer) to an associated bias value from -100 to 100.
12
- */
13
- logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
14
- /**
15
- * Return the log probabilities of the tokens.
16
- *
17
- * Setting to true will return the log probabilities of the tokens that
18
- * were generated.
19
- *
20
- * Setting to a number will return the log probabilities of the top n
21
- * tokens that were generated.
22
- */
23
- logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
24
- /**
25
- * Whether to enable parallel function calling during tool use. Default to true.
26
- */
7
+ logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
8
+ logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
27
9
  parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
28
- /**
29
- * A unique identifier representing your end-user, which can help OpenAI to
30
- * monitor and detect abuse.
31
- */
32
10
  user: z.ZodOptional<z.ZodString>;
33
- /**
34
- * Reasoning effort for reasoning models. Defaults to `medium`.
35
- */
36
- reasoningEffort: z.ZodOptional<z.ZodEnum<["low", "medium", "high"]>>;
37
- /**
38
- * Maximum number of completion tokens to generate. Useful for reasoning models.
39
- */
11
+ reasoningEffort: z.ZodOptional<z.ZodEnum<{
12
+ low: "low";
13
+ medium: "medium";
14
+ high: "high";
15
+ }>>;
40
16
  maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
41
- /**
42
- * Whether to enable persistence in responses API.
43
- */
44
17
  store: z.ZodOptional<z.ZodBoolean>;
45
- /**
46
- * Metadata to associate with the request.
47
- */
48
18
  metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
49
- /**
50
- * Parameters for prediction mode.
51
- */
52
19
  prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
53
- /**
54
- * Whether to use structured outputs.
55
- *
56
- * @default true
57
- */
58
20
  structuredOutputs: z.ZodOptional<z.ZodBoolean>;
59
- }, "strip", z.ZodTypeAny, {
60
- user?: string | undefined;
61
- logitBias?: Record<number, number> | undefined;
62
- logprobs?: number | boolean | undefined;
63
- parallelToolCalls?: boolean | undefined;
64
- reasoningEffort?: "low" | "medium" | "high" | undefined;
65
- maxCompletionTokens?: number | undefined;
66
- store?: boolean | undefined;
67
- metadata?: Record<string, string> | undefined;
68
- prediction?: Record<string, any> | undefined;
69
- structuredOutputs?: boolean | undefined;
70
- }, {
71
- user?: string | undefined;
72
- logitBias?: Record<number, number> | undefined;
73
- logprobs?: number | boolean | undefined;
74
- parallelToolCalls?: boolean | undefined;
75
- reasoningEffort?: "low" | "medium" | "high" | undefined;
76
- maxCompletionTokens?: number | undefined;
77
- store?: boolean | undefined;
78
- metadata?: Record<string, string> | undefined;
79
- prediction?: Record<string, any> | undefined;
80
- structuredOutputs?: boolean | undefined;
81
- }>;
21
+ serviceTier: z.ZodOptional<z.ZodEnum<{
22
+ auto: "auto";
23
+ flex: "flex";
24
+ }>>;
25
+ }, z.core.$strip>;
82
26
  type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
83
27
 
84
28
  type OpenAIChatConfig = {
@@ -106,57 +50,12 @@ declare class OpenAIChatLanguageModel implements LanguageModelV2 {
106
50
 
107
51
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
108
52
  declare const openaiCompletionProviderOptions: z.ZodObject<{
109
- /**
110
- Echo back the prompt in addition to the completion.
111
- */
112
53
  echo: z.ZodOptional<z.ZodBoolean>;
113
- /**
114
- Modify the likelihood of specified tokens appearing in the completion.
115
-
116
- Accepts a JSON object that maps tokens (specified by their token ID in
117
- the GPT tokenizer) to an associated bias value from -100 to 100. You
118
- can use this tokenizer tool to convert text to token IDs. Mathematically,
119
- the bias is added to the logits generated by the model prior to sampling.
120
- The exact effect will vary per model, but values between -1 and 1 should
121
- decrease or increase likelihood of selection; values like -100 or 100
122
- should result in a ban or exclusive selection of the relevant token.
123
-
124
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
125
- token from being generated.
126
- */
127
54
  logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
128
- /**
129
- The suffix that comes after a completion of inserted text.
130
- */
131
55
  suffix: z.ZodOptional<z.ZodString>;
132
- /**
133
- A unique identifier representing your end-user, which can help OpenAI to
134
- monitor and detect abuse. Learn more.
135
- */
136
56
  user: z.ZodOptional<z.ZodString>;
137
- /**
138
- Return the log probabilities of the tokens. Including logprobs will increase
139
- the response size and can slow down response times. However, it can
140
- be useful to better understand how the model is behaving.
141
- Setting to true will return the log probabilities of the tokens that
142
- were generated.
143
- Setting to a number will return the log probabilities of the top n
144
- tokens that were generated.
145
- */
146
- logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
147
- }, "strip", z.ZodTypeAny, {
148
- user?: string | undefined;
149
- logitBias?: Record<string, number> | undefined;
150
- logprobs?: number | boolean | undefined;
151
- echo?: boolean | undefined;
152
- suffix?: string | undefined;
153
- }, {
154
- user?: string | undefined;
155
- logitBias?: Record<string, number> | undefined;
156
- logprobs?: number | boolean | undefined;
157
- echo?: boolean | undefined;
158
- suffix?: string | undefined;
159
- }>;
57
+ logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
58
+ }, z.core.$strip>;
160
59
  type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>;
161
60
 
162
61
  type OpenAICompletionConfig = {
@@ -194,23 +93,9 @@ type OpenAIConfig = {
194
93
 
195
94
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
196
95
  declare const openaiEmbeddingProviderOptions: z.ZodObject<{
197
- /**
198
- The number of dimensions the resulting output embeddings should have.
199
- Only supported in text-embedding-3 and later models.
200
- */
201
96
  dimensions: z.ZodOptional<z.ZodNumber>;
202
- /**
203
- A unique identifier representing your end-user, which can help OpenAI to
204
- monitor and detect abuse. Learn more.
205
- */
206
97
  user: z.ZodOptional<z.ZodString>;
207
- }, "strip", z.ZodTypeAny, {
208
- user?: string | undefined;
209
- dimensions?: number | undefined;
210
- }, {
211
- user?: string | undefined;
212
- dimensions?: number | undefined;
213
- }>;
98
+ }, z.core.$strip>;
214
99
  type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOptions>;
215
100
 
216
101
  declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
@@ -245,44 +130,18 @@ declare class OpenAIImageModel implements ImageModelV2 {
245
130
 
246
131
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
247
132
  declare const openAITranscriptionProviderOptions: z.ZodObject<{
248
- /**
249
- * Additional information to include in the transcription response.
250
- */
251
- include: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
252
- /**
253
- * The language of the input audio in ISO-639-1 format.
254
- */
133
+ include: z.ZodOptional<z.ZodArray<z.ZodString>>;
255
134
  language: z.ZodOptional<z.ZodString>;
256
- /**
257
- * An optional text to guide the model's style or continue a previous audio segment.
258
- */
259
135
  prompt: z.ZodOptional<z.ZodString>;
260
- /**
261
- * The sampling temperature, between 0 and 1.
262
- * @default 0
263
- */
264
136
  temperature: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
265
- /**
266
- * The timestamp granularities to populate for this transcription.
267
- * @default ['segment']
268
- */
269
- timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>;
270
- }, "strip", z.ZodTypeAny, {
271
- prompt?: string | undefined;
272
- temperature?: number | undefined;
273
- include?: string[] | undefined;
274
- language?: string | undefined;
275
- timestampGranularities?: ("word" | "segment")[] | undefined;
276
- }, {
277
- prompt?: string | undefined;
278
- temperature?: number | undefined;
279
- include?: string[] | undefined;
280
- language?: string | undefined;
281
- timestampGranularities?: ("word" | "segment")[] | undefined;
282
- }>;
137
+ timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<{
138
+ word: "word";
139
+ segment: "segment";
140
+ }>>>>;
141
+ }, z.core.$strip>;
283
142
  type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
284
143
 
285
- type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV1CallOptions, 'providerOptions'> & {
144
+ type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV2CallOptions, 'providerOptions'> & {
286
145
  providerOptions?: {
287
146
  openai?: OpenAITranscriptionProviderOptions;
288
147
  };
@@ -292,14 +151,14 @@ interface OpenAITranscriptionModelConfig extends OpenAIConfig {
292
151
  currentDate?: () => Date;
293
152
  };
294
153
  }
295
- declare class OpenAITranscriptionModel implements TranscriptionModelV1 {
154
+ declare class OpenAITranscriptionModel implements TranscriptionModelV2 {
296
155
  readonly modelId: OpenAITranscriptionModelId;
297
156
  private readonly config;
298
- readonly specificationVersion = "v1";
157
+ readonly specificationVersion = "v2";
299
158
  get provider(): string;
300
159
  constructor(modelId: OpenAITranscriptionModelId, config: OpenAITranscriptionModelConfig);
301
160
  private getArgs;
302
- doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV1['doGenerate']>>>;
161
+ doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV2['doGenerate']>>>;
303
162
  }
304
163
 
305
164
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
@@ -307,27 +166,21 @@ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string &
307
166
  declare const OpenAIProviderOptionsSchema: z.ZodObject<{
308
167
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
309
168
  speed: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
310
- }, "strip", z.ZodTypeAny, {
311
- instructions?: string | null | undefined;
312
- speed?: number | null | undefined;
313
- }, {
314
- instructions?: string | null | undefined;
315
- speed?: number | null | undefined;
316
- }>;
169
+ }, z.core.$strip>;
317
170
  type OpenAISpeechCallOptions = z.infer<typeof OpenAIProviderOptionsSchema>;
318
171
  interface OpenAISpeechModelConfig extends OpenAIConfig {
319
172
  _internal?: {
320
173
  currentDate?: () => Date;
321
174
  };
322
175
  }
323
- declare class OpenAISpeechModel implements SpeechModelV1 {
176
+ declare class OpenAISpeechModel implements SpeechModelV2 {
324
177
  readonly modelId: OpenAISpeechModelId;
325
178
  private readonly config;
326
- readonly specificationVersion = "v1";
179
+ readonly specificationVersion = "v2";
327
180
  get provider(): string;
328
181
  constructor(modelId: OpenAISpeechModelId, config: OpenAISpeechModelConfig);
329
182
  private getArgs;
330
- doGenerate(options: Parameters<SpeechModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV1['doGenerate']>>>;
183
+ doGenerate(options: Parameters<SpeechModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV2['doGenerate']>>>;
331
184
  }
332
185
 
333
186
  type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
@@ -353,27 +206,11 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
353
206
  strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
354
207
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
355
208
  reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
356
- }, "strip", z.ZodTypeAny, {
357
- user?: string | null | undefined;
358
- parallelToolCalls?: boolean | null | undefined;
359
- reasoningEffort?: string | null | undefined;
360
- store?: boolean | null | undefined;
361
- metadata?: any;
362
- instructions?: string | null | undefined;
363
- previousResponseId?: string | null | undefined;
364
- strictSchemas?: boolean | null | undefined;
365
- reasoningSummary?: string | null | undefined;
366
- }, {
367
- user?: string | null | undefined;
368
- parallelToolCalls?: boolean | null | undefined;
369
- reasoningEffort?: string | null | undefined;
370
- store?: boolean | null | undefined;
371
- metadata?: any;
372
- instructions?: string | null | undefined;
373
- previousResponseId?: string | null | undefined;
374
- strictSchemas?: boolean | null | undefined;
375
- reasoningSummary?: string | null | undefined;
376
- }>;
209
+ serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
210
+ auto: "auto";
211
+ flex: "flex";
212
+ }>>>;
213
+ }, z.core.$strip>;
377
214
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
378
215
 
379
216
  export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };