@ai-sdk/openai 1.3.22 → 2.0.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,89 +1,88 @@
1
- import { LanguageModelV1, EmbeddingModelV1, ImageModelV1, TranscriptionModelV1CallOptions, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV1CallOptions, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
3
  import { z } from 'zod';
4
4
 
5
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
6
- interface OpenAIChatSettings {
6
+ declare const openaiProviderOptions: z.ZodObject<{
7
7
  /**
8
- Modify the likelihood of specified tokens appearing in the completion.
9
-
10
- Accepts a JSON object that maps tokens (specified by their token ID in
11
- the GPT tokenizer) to an associated bias value from -100 to 100. You
12
- can use this tokenizer tool to convert text to token IDs. Mathematically,
13
- the bias is added to the logits generated by the model prior to sampling.
14
- The exact effect will vary per model, but values between -1 and 1 should
15
- decrease or increase likelihood of selection; values like -100 or 100
16
- should result in a ban or exclusive selection of the relevant token.
17
-
18
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
19
- token from being generated.
20
- */
21
- logitBias?: Record<number, number>;
8
+ * Modify the likelihood of specified tokens appearing in the completion.
9
+ *
10
+ * Accepts a JSON object that maps tokens (specified by their token ID in
11
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
12
+ */
13
+ logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
22
14
  /**
23
- Return the log probabilities of the tokens. Including logprobs will increase
24
- the response size and can slow down response times. However, it can
25
- be useful to better understand how the model is behaving.
26
-
27
- Setting to true will return the log probabilities of the tokens that
28
- were generated.
29
-
30
- Setting to a number will return the log probabilities of the top n
31
- tokens that were generated.
32
- */
33
- logprobs?: boolean | number;
15
+ * Return the log probabilities of the tokens.
16
+ *
17
+ * Setting to true will return the log probabilities of the tokens that
18
+ * were generated.
19
+ *
20
+ * Setting to a number will return the log probabilities of the top n
21
+ * tokens that were generated.
22
+ */
23
+ logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
34
24
  /**
35
- Whether to enable parallel function calling during tool use. Default to true.
25
+ * Whether to enable parallel function calling during tool use. Default to true.
36
26
  */
37
- parallelToolCalls?: boolean;
27
+ parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
38
28
  /**
39
- Whether to use structured outputs. Defaults to false.
40
-
41
- When enabled, tool calls and object generation will be strict and follow the provided schema.
42
- */
43
- structuredOutputs?: boolean;
29
+ * A unique identifier representing your end-user, which can help OpenAI to
30
+ * monitor and detect abuse.
31
+ */
32
+ user: z.ZodOptional<z.ZodString>;
44
33
  /**
45
- Whether to use legacy function calling. Defaults to false.
46
-
47
- Required by some open source inference engines which do not support the `tools` API. May also
48
- provide a workaround for `parallelToolCalls` resulting in the provider buffering tool calls,
49
- which causes `streamObject` to be non-streaming.
50
-
51
- Prefer setting `parallelToolCalls: false` over this option.
52
-
53
- @deprecated this API is supported but deprecated by OpenAI.
34
+ * Reasoning effort for reasoning models. Defaults to `medium`.
54
35
  */
55
- useLegacyFunctionCalling?: boolean;
36
+ reasoningEffort: z.ZodOptional<z.ZodEnum<["low", "medium", "high"]>>;
56
37
  /**
57
- A unique identifier representing your end-user, which can help OpenAI to
58
- monitor and detect abuse. Learn more.
59
- */
60
- user?: string;
38
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
39
+ */
40
+ maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
61
41
  /**
62
- Automatically download images and pass the image as data to the model.
63
- OpenAI supports image URLs for public models, so this is only needed for
64
- private models or when the images are not publicly accessible.
65
-
66
- Defaults to `false`.
42
+ * Whether to enable persistence in responses API.
67
43
  */
68
- downloadImages?: boolean;
44
+ store: z.ZodOptional<z.ZodBoolean>;
69
45
  /**
70
- Simulates streaming by using a normal generate call and returning it as a stream.
71
- Enable this if the model that you are using does not support streaming.
72
-
73
- Defaults to `false`.
74
-
75
- @deprecated Use `simulateStreamingMiddleware` instead.
46
+ * Metadata to associate with the request.
76
47
  */
77
- simulateStreaming?: boolean;
48
+ metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
78
49
  /**
79
- Reasoning effort for reasoning models. Defaults to `medium`.
50
+ * Parameters for prediction mode.
80
51
  */
81
- reasoningEffort?: 'low' | 'medium' | 'high';
82
- }
52
+ prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
53
+ /**
54
+ * Whether to use structured outputs.
55
+ *
56
+ * @default true
57
+ */
58
+ structuredOutputs: z.ZodOptional<z.ZodBoolean>;
59
+ }, "strip", z.ZodTypeAny, {
60
+ user?: string | undefined;
61
+ logitBias?: Record<number, number> | undefined;
62
+ logprobs?: number | boolean | undefined;
63
+ parallelToolCalls?: boolean | undefined;
64
+ reasoningEffort?: "low" | "medium" | "high" | undefined;
65
+ maxCompletionTokens?: number | undefined;
66
+ store?: boolean | undefined;
67
+ metadata?: Record<string, string> | undefined;
68
+ prediction?: Record<string, any> | undefined;
69
+ structuredOutputs?: boolean | undefined;
70
+ }, {
71
+ user?: string | undefined;
72
+ logitBias?: Record<number, number> | undefined;
73
+ logprobs?: number | boolean | undefined;
74
+ parallelToolCalls?: boolean | undefined;
75
+ reasoningEffort?: "low" | "medium" | "high" | undefined;
76
+ maxCompletionTokens?: number | undefined;
77
+ store?: boolean | undefined;
78
+ metadata?: Record<string, string> | undefined;
79
+ prediction?: Record<string, any> | undefined;
80
+ structuredOutputs?: boolean | undefined;
81
+ }>;
82
+ type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
83
83
 
84
84
  type OpenAIChatConfig = {
85
85
  provider: string;
86
- compatibility: 'strict' | 'compatible';
87
86
  headers: () => Record<string, string | undefined>;
88
87
  url: (options: {
89
88
  modelId: string;
@@ -91,27 +90,26 @@ type OpenAIChatConfig = {
91
90
  }) => string;
92
91
  fetch?: FetchFunction;
93
92
  };
94
- declare class OpenAIChatLanguageModel implements LanguageModelV1 {
95
- readonly specificationVersion = "v1";
93
+ declare class OpenAIChatLanguageModel implements LanguageModelV2 {
94
+ readonly specificationVersion = "v2";
96
95
  readonly modelId: OpenAIChatModelId;
97
- readonly settings: OpenAIChatSettings;
96
+ readonly supportedUrls: {
97
+ 'image/*': RegExp[];
98
+ };
98
99
  private readonly config;
99
- constructor(modelId: OpenAIChatModelId, settings: OpenAIChatSettings, config: OpenAIChatConfig);
100
- get supportsStructuredOutputs(): boolean;
101
- get defaultObjectGenerationMode(): "tool" | "json";
100
+ constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
102
101
  get provider(): string;
103
- get supportsImageUrls(): boolean;
104
102
  private getArgs;
105
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
106
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
103
+ doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
104
+ doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
107
105
  }
108
106
 
109
107
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
110
- interface OpenAICompletionSettings {
108
+ declare const openaiCompletionProviderOptions: z.ZodObject<{
111
109
  /**
112
110
  Echo back the prompt in addition to the completion.
113
111
  */
114
- echo?: boolean;
112
+ echo: z.ZodOptional<z.ZodBoolean>;
115
113
  /**
116
114
  Modify the likelihood of specified tokens appearing in the completion.
117
115
 
@@ -125,34 +123,44 @@ interface OpenAICompletionSettings {
125
123
 
126
124
  As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
127
125
  token from being generated.
128
- */
129
- logitBias?: Record<number, number>;
126
+ */
127
+ logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
128
+ /**
129
+ The suffix that comes after a completion of inserted text.
130
+ */
131
+ suffix: z.ZodOptional<z.ZodString>;
132
+ /**
133
+ A unique identifier representing your end-user, which can help OpenAI to
134
+ monitor and detect abuse. Learn more.
135
+ */
136
+ user: z.ZodOptional<z.ZodString>;
130
137
  /**
131
138
  Return the log probabilities of the tokens. Including logprobs will increase
132
139
  the response size and can slow down response times. However, it can
133
140
  be useful to better understand how the model is behaving.
134
-
135
141
  Setting to true will return the log probabilities of the tokens that
136
142
  were generated.
137
-
138
143
  Setting to a number will return the log probabilities of the top n
139
144
  tokens that were generated.
140
145
  */
141
- logprobs?: boolean | number;
142
- /**
143
- The suffix that comes after a completion of inserted text.
144
- */
145
- suffix?: string;
146
- /**
147
- A unique identifier representing your end-user, which can help OpenAI to
148
- monitor and detect abuse. Learn more.
149
- */
150
- user?: string;
151
- }
146
+ logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
147
+ }, "strip", z.ZodTypeAny, {
148
+ user?: string | undefined;
149
+ logitBias?: Record<string, number> | undefined;
150
+ logprobs?: number | boolean | undefined;
151
+ echo?: boolean | undefined;
152
+ suffix?: string | undefined;
153
+ }, {
154
+ user?: string | undefined;
155
+ logitBias?: Record<string, number> | undefined;
156
+ logprobs?: number | boolean | undefined;
157
+ echo?: boolean | undefined;
158
+ suffix?: string | undefined;
159
+ }>;
160
+ type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>;
152
161
 
153
162
  type OpenAICompletionConfig = {
154
163
  provider: string;
155
- compatibility: 'strict' | 'compatible';
156
164
  headers: () => Record<string, string | undefined>;
157
165
  url: (options: {
158
166
  modelId: string;
@@ -160,17 +168,17 @@ type OpenAICompletionConfig = {
160
168
  }) => string;
161
169
  fetch?: FetchFunction;
162
170
  };
163
- declare class OpenAICompletionLanguageModel implements LanguageModelV1 {
164
- readonly specificationVersion = "v1";
165
- readonly defaultObjectGenerationMode: undefined;
171
+ declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
172
+ readonly specificationVersion = "v2";
166
173
  readonly modelId: OpenAICompletionModelId;
167
- readonly settings: OpenAICompletionSettings;
168
174
  private readonly config;
169
- constructor(modelId: OpenAICompletionModelId, settings: OpenAICompletionSettings, config: OpenAICompletionConfig);
175
+ private get providerOptionsName();
176
+ constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
170
177
  get provider(): string;
178
+ readonly supportedUrls: Record<string, RegExp[]>;
171
179
  private getArgs;
172
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
173
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
180
+ doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
181
+ doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
174
182
  }
175
183
 
176
184
  type OpenAIConfig = {
@@ -185,114 +193,98 @@ type OpenAIConfig = {
185
193
  };
186
194
 
187
195
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
188
- interface OpenAIEmbeddingSettings {
189
- /**
190
- Override the maximum number of embeddings per call.
191
- */
192
- maxEmbeddingsPerCall?: number;
193
- /**
194
- Override the parallelism of embedding calls.
195
- */
196
- supportsParallelCalls?: boolean;
196
+ declare const openaiEmbeddingProviderOptions: z.ZodObject<{
197
197
  /**
198
198
  The number of dimensions the resulting output embeddings should have.
199
199
  Only supported in text-embedding-3 and later models.
200
200
  */
201
- dimensions?: number;
201
+ dimensions: z.ZodOptional<z.ZodNumber>;
202
202
  /**
203
203
  A unique identifier representing your end-user, which can help OpenAI to
204
204
  monitor and detect abuse. Learn more.
205
205
  */
206
- user?: string;
207
- }
206
+ user: z.ZodOptional<z.ZodString>;
207
+ }, "strip", z.ZodTypeAny, {
208
+ user?: string | undefined;
209
+ dimensions?: number | undefined;
210
+ }, {
211
+ user?: string | undefined;
212
+ dimensions?: number | undefined;
213
+ }>;
214
+ type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOptions>;
208
215
 
209
- declare class OpenAIEmbeddingModel implements EmbeddingModelV1<string> {
210
- readonly specificationVersion = "v1";
216
+ declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
217
+ readonly specificationVersion = "v2";
211
218
  readonly modelId: OpenAIEmbeddingModelId;
219
+ readonly maxEmbeddingsPerCall = 2048;
220
+ readonly supportsParallelCalls = true;
212
221
  private readonly config;
213
- private readonly settings;
214
222
  get provider(): string;
215
- get maxEmbeddingsPerCall(): number;
216
- get supportsParallelCalls(): boolean;
217
- constructor(modelId: OpenAIEmbeddingModelId, settings: OpenAIEmbeddingSettings, config: OpenAIConfig);
218
- doEmbed({ values, headers, abortSignal, }: Parameters<EmbeddingModelV1<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV1<string>['doEmbed']>>>;
223
+ constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
224
+ doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>>;
219
225
  }
220
226
 
221
227
  type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
222
228
  declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
223
229
  declare const hasDefaultResponseFormat: Set<string>;
224
- interface OpenAIImageSettings {
225
- /**
226
- Override the maximum number of images per call (default is dependent on the
227
- model, or 1 for an unknown model).
228
- */
229
- maxImagesPerCall?: number;
230
- }
231
230
 
232
231
  interface OpenAIImageModelConfig extends OpenAIConfig {
233
232
  _internal?: {
234
233
  currentDate?: () => Date;
235
234
  };
236
235
  }
237
- declare class OpenAIImageModel implements ImageModelV1 {
236
+ declare class OpenAIImageModel implements ImageModelV2 {
238
237
  readonly modelId: OpenAIImageModelId;
239
- private readonly settings;
240
238
  private readonly config;
241
- readonly specificationVersion = "v1";
239
+ readonly specificationVersion = "v2";
242
240
  get maxImagesPerCall(): number;
243
241
  get provider(): string;
244
- constructor(modelId: OpenAIImageModelId, settings: OpenAIImageSettings, config: OpenAIImageModelConfig);
245
- doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV1['doGenerate']>>>;
242
+ constructor(modelId: OpenAIImageModelId, config: OpenAIImageModelConfig);
243
+ doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV2['doGenerate']>>>;
246
244
  }
247
245
 
248
246
  type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
249
- type OpenAITranscriptionModelOptions = {
247
+ declare const openAITranscriptionProviderOptions: z.ZodObject<{
250
248
  /**
251
249
  * Additional information to include in the transcription response.
252
250
  */
253
- include?: string[];
251
+ include: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
254
252
  /**
255
253
  * The language of the input audio in ISO-639-1 format.
256
254
  */
257
- language?: string;
255
+ language: z.ZodOptional<z.ZodString>;
258
256
  /**
259
257
  * An optional text to guide the model's style or continue a previous audio segment.
260
258
  */
261
- prompt?: string;
259
+ prompt: z.ZodOptional<z.ZodString>;
262
260
  /**
263
261
  * The sampling temperature, between 0 and 1.
264
262
  * @default 0
265
263
  */
266
- temperature?: number;
264
+ temperature: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
267
265
  /**
268
266
  * The timestamp granularities to populate for this transcription.
269
267
  * @default ['segment']
270
268
  */
271
- timestamp_granularities?: Array<'word' | 'segment'>;
272
- };
273
-
274
- declare const openAIProviderOptionsSchema: z.ZodObject<{
275
- include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
276
- language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
277
- prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
278
- temperature: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodNumber>>>;
279
- timestampGranularities: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
269
+ timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>;
280
270
  }, "strip", z.ZodTypeAny, {
281
- temperature: number | null;
282
- timestampGranularities: ("word" | "segment")[] | null;
283
- prompt?: string | null | undefined;
284
- include?: string[] | null | undefined;
285
- language?: string | null | undefined;
271
+ prompt?: string | undefined;
272
+ temperature?: number | undefined;
273
+ include?: string[] | undefined;
274
+ language?: string | undefined;
275
+ timestampGranularities?: ("word" | "segment")[] | undefined;
286
276
  }, {
287
- prompt?: string | null | undefined;
288
- temperature?: number | null | undefined;
289
- include?: string[] | null | undefined;
290
- language?: string | null | undefined;
291
- timestampGranularities?: ("word" | "segment")[] | null | undefined;
277
+ prompt?: string | undefined;
278
+ temperature?: number | undefined;
279
+ include?: string[] | undefined;
280
+ language?: string | undefined;
281
+ timestampGranularities?: ("word" | "segment")[] | undefined;
292
282
  }>;
283
+ type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
284
+
293
285
  type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV1CallOptions, 'providerOptions'> & {
294
286
  providerOptions?: {
295
- openai?: z.infer<typeof openAIProviderOptionsSchema>;
287
+ openai?: OpenAITranscriptionProviderOptions;
296
288
  };
297
289
  };
298
290
  interface OpenAITranscriptionModelConfig extends OpenAIConfig {
@@ -340,17 +332,16 @@ declare class OpenAISpeechModel implements SpeechModelV1 {
340
332
 
341
333
  type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
342
334
 
343
- declare class OpenAIResponsesLanguageModel implements LanguageModelV1 {
344
- readonly specificationVersion = "v1";
345
- readonly defaultObjectGenerationMode = "json";
346
- readonly supportsStructuredOutputs = true;
335
+ declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
336
+ readonly specificationVersion = "v2";
347
337
  readonly modelId: OpenAIResponsesModelId;
348
338
  private readonly config;
349
339
  constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
340
+ readonly supportedUrls: Record<string, RegExp[]>;
350
341
  get provider(): string;
351
342
  private getArgs;
352
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
353
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
343
+ doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
344
+ doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
354
345
  }
355
346
  declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
356
347
  metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
@@ -364,25 +355,25 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
364
355
  reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
365
356
  }, "strip", z.ZodTypeAny, {
366
357
  user?: string | null | undefined;
358
+ parallelToolCalls?: boolean | null | undefined;
359
+ reasoningEffort?: string | null | undefined;
367
360
  store?: boolean | null | undefined;
368
361
  metadata?: any;
369
- reasoningEffort?: string | null | undefined;
370
362
  instructions?: string | null | undefined;
371
- parallelToolCalls?: boolean | null | undefined;
372
363
  previousResponseId?: string | null | undefined;
373
364
  strictSchemas?: boolean | null | undefined;
374
365
  reasoningSummary?: string | null | undefined;
375
366
  }, {
376
367
  user?: string | null | undefined;
368
+ parallelToolCalls?: boolean | null | undefined;
369
+ reasoningEffort?: string | null | undefined;
377
370
  store?: boolean | null | undefined;
378
371
  metadata?: any;
379
- reasoningEffort?: string | null | undefined;
380
372
  instructions?: string | null | undefined;
381
- parallelToolCalls?: boolean | null | undefined;
382
373
  previousResponseId?: string | null | undefined;
383
374
  strictSchemas?: boolean | null | undefined;
384
375
  reasoningSummary?: string | null | undefined;
385
376
  }>;
386
377
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
387
378
 
388
- export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionSettings, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, hasDefaultResponseFormat, modelMaxImagesPerCall };
379
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };