@zenning/openai 1.6.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,292 +1,91 @@
1
- import { LanguageModelV1, ProviderV1, EmbeddingModelV1, ImageModelV1, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
1
+ import { ProviderV2, LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2, SpeechModelV2 } from '@zenning/provider';
2
+ import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
2
3
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { z } from 'zod';
4
+ import { z } from 'zod/v4';
4
5
 
5
- type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
6
- interface OpenAIChatSettings {
7
- /**
8
- Modify the likelihood of specified tokens appearing in the completion.
9
-
10
- Accepts a JSON object that maps tokens (specified by their token ID in
11
- the GPT tokenizer) to an associated bias value from -100 to 100. You
12
- can use this tokenizer tool to convert text to token IDs. Mathematically,
13
- the bias is added to the logits generated by the model prior to sampling.
14
- The exact effect will vary per model, but values between -1 and 1 should
15
- decrease or increase likelihood of selection; values like -100 or 100
16
- should result in a ban or exclusive selection of the relevant token.
17
-
18
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
19
- token from being generated.
20
- */
21
- logitBias?: Record<number, number>;
22
- /**
23
- Return the log probabilities of the tokens. Including logprobs will increase
24
- the response size and can slow down response times. However, it can
25
- be useful to better understand how the model is behaving.
26
-
27
- Setting to true will return the log probabilities of the tokens that
28
- were generated.
29
-
30
- Setting to a number will return the log probabilities of the top n
31
- tokens that were generated.
32
- */
33
- logprobs?: boolean | number;
34
- /**
35
- Whether to enable parallel function calling during tool use. Default to true.
36
- */
37
- parallelToolCalls?: boolean;
38
- /**
39
- Whether to use structured outputs. Defaults to false.
40
-
41
- When enabled, tool calls and object generation will be strict and follow the provided schema.
42
- */
43
- structuredOutputs?: boolean;
44
- /**
45
- Whether to use legacy function calling. Defaults to false.
46
-
47
- Required by some open source inference engines which do not support the `tools` API. May also
48
- provide a workaround for `parallelToolCalls` resulting in the provider buffering tool calls,
49
- which causes `streamObject` to be non-streaming.
50
-
51
- Prefer setting `parallelToolCalls: false` over this option.
52
-
53
- @deprecated this API is supported but deprecated by OpenAI.
54
- */
55
- useLegacyFunctionCalling?: boolean;
56
- /**
57
- A unique identifier representing your end-user, which can help OpenAI to
58
- monitor and detect abuse. Learn more.
59
- */
60
- user?: string;
61
- /**
62
- Automatically download images and pass the image as data to the model.
63
- OpenAI supports image URLs for public models, so this is only needed for
64
- private models or when the images are not publicly accessible.
65
-
66
- Defaults to `false`.
67
- */
68
- downloadImages?: boolean;
69
- /**
70
- Simulates streaming by using a normal generate call and returning it as a stream.
71
- Enable this if the model that you are using does not support streaming.
72
-
73
- Defaults to `false`.
74
-
75
- @deprecated Use `simulateStreamingMiddleware` instead.
76
- */
77
- simulateStreaming?: boolean;
78
- /**
79
- Reasoning effort for reasoning models. Defaults to `medium`.
80
- */
81
- reasoningEffort?: 'low' | 'medium' | 'high';
82
- }
6
+ type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
83
7
 
84
8
  type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
85
- interface OpenAICompletionSettings {
86
- /**
87
- Echo back the prompt in addition to the completion.
88
- */
89
- echo?: boolean;
90
- /**
91
- Modify the likelihood of specified tokens appearing in the completion.
92
-
93
- Accepts a JSON object that maps tokens (specified by their token ID in
94
- the GPT tokenizer) to an associated bias value from -100 to 100. You
95
- can use this tokenizer tool to convert text to token IDs. Mathematically,
96
- the bias is added to the logits generated by the model prior to sampling.
97
- The exact effect will vary per model, but values between -1 and 1 should
98
- decrease or increase likelihood of selection; values like -100 or 100
99
- should result in a ban or exclusive selection of the relevant token.
100
-
101
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
102
- token from being generated.
103
- */
104
- logitBias?: Record<number, number>;
105
- /**
106
- Return the log probabilities of the tokens. Including logprobs will increase
107
- the response size and can slow down response times. However, it can
108
- be useful to better understand how the model is behaving.
109
-
110
- Setting to true will return the log probabilities of the tokens that
111
- were generated.
112
-
113
- Setting to a number will return the log probabilities of the top n
114
- tokens that were generated.
115
- */
116
- logprobs?: boolean | number;
117
- /**
118
- The suffix that comes after a completion of inserted text.
119
- */
120
- suffix?: string;
121
- /**
122
- A unique identifier representing your end-user, which can help OpenAI to
123
- monitor and detect abuse. Learn more.
124
- */
125
- user?: string;
126
- }
127
-
128
- type OpenAICompletionConfig = {
129
- provider: string;
130
- compatibility: 'strict' | 'compatible';
131
- headers: () => Record<string, string | undefined>;
132
- url: (options: {
133
- modelId: string;
134
- path: string;
135
- }) => string;
136
- fetch?: FetchFunction;
137
- };
138
- declare class OpenAICompletionLanguageModel implements LanguageModelV1 {
139
- readonly specificationVersion = "v1";
140
- readonly defaultObjectGenerationMode: undefined;
141
- readonly modelId: OpenAICompletionModelId;
142
- readonly settings: OpenAICompletionSettings;
143
- private readonly config;
144
- constructor(modelId: OpenAICompletionModelId, settings: OpenAICompletionSettings, config: OpenAICompletionConfig);
145
- get provider(): string;
146
- private getArgs;
147
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
148
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
149
- }
150
9
 
151
10
  type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
152
- interface OpenAIEmbeddingSettings {
153
- /**
154
- Override the maximum number of embeddings per call.
155
- */
156
- maxEmbeddingsPerCall?: number;
157
- /**
158
- Override the parallelism of embedding calls.
159
- */
160
- supportsParallelCalls?: boolean;
161
- /**
162
- The number of dimensions the resulting output embeddings should have.
163
- Only supported in text-embedding-3 and later models.
164
- */
165
- dimensions?: number;
166
- /**
167
- A unique identifier representing your end-user, which can help OpenAI to
168
- monitor and detect abuse. Learn more.
169
- */
170
- user?: string;
171
- }
172
11
 
173
12
  type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
174
- interface OpenAIImageSettings {
13
+
14
+ declare const webSearchToolFactory: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
175
15
  /**
176
- Override the maximum number of images per call (default is dependent on the
177
- model, or 1 for an unknown model).
16
+ * Filters for the search.
178
17
  */
179
- maxImagesPerCall?: number;
180
- }
181
-
182
- type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
183
-
184
- type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
185
-
186
- declare function codeInterpreter(args?: {
187
- container?: string | {
188
- fileIds?: string[];
189
- };
190
- }): {
191
- type: "provider-defined";
192
- id: "openai.code_interpreter";
193
- name: "code_interpreter";
194
- args: {
195
- container?: string | {
196
- fileIds?: string[];
197
- };
198
- };
199
- };
200
-
201
- declare function fileSearch(args?: {
202
- vectorStoreIds?: string[];
203
- maxNumResults?: number;
204
- ranking?: {
205
- ranker?: 'auto' | 'default-2024-08-21';
206
- };
207
- filters?: {
208
- key: string;
209
- type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte';
210
- value: string | number | boolean;
211
- } | {
212
- type: 'and' | 'or';
213
- filters: any[];
214
- };
215
- }): {
216
- type: "provider-defined";
217
- id: "openai.file_search";
218
- name: "file_search";
219
- args: {
220
- vectorStoreIds?: string[];
221
- maxNumResults?: number;
222
- ranking?: {
223
- ranker?: "auto" | "default-2024-08-21";
224
- };
225
- filters?: {
226
- key: string;
227
- type: "eq" | "ne" | "gt" | "gte" | "lt" | "lte";
228
- value: string | number | boolean;
229
- } | {
230
- type: "and" | "or";
231
- filters: any[];
232
- };
233
- };
234
- };
235
-
236
- declare function webSearch(args?: {
237
18
  filters?: {
19
+ /**
20
+ * Allowed domains for the search.
21
+ * If not provided, all domains are allowed.
22
+ * Subdomains of the provided domains are allowed as well.
23
+ */
238
24
  allowedDomains?: string[];
239
25
  };
240
- searchContextSize?: 'low' | 'medium' | 'high';
26
+ /**
27
+ * Search context size to use for the web search.
28
+ * - high: Most comprehensive context, highest cost, slower response
29
+ * - medium: Balanced context, cost, and latency (default)
30
+ * - low: Least context, lowest cost, fastest response
31
+ */
32
+ searchContextSize?: "low" | "medium" | "high";
33
+ /**
34
+ * User location information to provide geographically relevant search results.
35
+ */
241
36
  userLocation?: {
242
- type: 'approximate';
37
+ /**
38
+ * Type of location (always 'approximate')
39
+ */
40
+ type: "approximate";
41
+ /**
42
+ * Two-letter ISO country code (e.g., 'US', 'GB')
43
+ */
243
44
  country?: string;
45
+ /**
46
+ * City name (free text, e.g., 'Minneapolis')
47
+ */
244
48
  city?: string;
49
+ /**
50
+ * Region name (free text, e.g., 'Minnesota')
51
+ */
245
52
  region?: string;
53
+ /**
54
+ * IANA timezone (e.g., 'America/Chicago')
55
+ */
246
56
  timezone?: string;
247
57
  };
248
- }): {
249
- type: "provider-defined";
250
- id: "openai.web_search";
251
- name: "web_search";
252
- args: {
253
- filters?: {
254
- allowedDomains?: string[];
255
- };
256
- searchContextSize?: "low" | "medium" | "high";
257
- userLocation?: {
258
- type: "approximate";
259
- country?: string;
260
- city?: string;
261
- region?: string;
262
- timezone?: string;
263
- };
264
- };
265
- };
58
+ }>;
266
59
 
267
- declare function webSearchPreview(args?: {
268
- searchContextSize?: 'low' | 'medium' | 'high';
269
- userLocation?: {
270
- type: 'approximate';
271
- country?: string;
272
- city?: string;
273
- region?: string;
274
- timezone?: string;
275
- };
276
- }): {
277
- type: "provider-defined";
278
- id: "openai.web_search_preview";
279
- name: "web_search_preview";
280
- args: {
281
- searchContextSize?: "low" | "medium" | "high";
282
- userLocation?: {
283
- type: "approximate";
284
- country?: string;
285
- city?: string;
286
- region?: string;
287
- timezone?: string;
288
- };
289
- };
60
+ /**
61
+ * A filter used to compare a specified attribute key to a given value using a defined comparison operation.
62
+ */
63
+ type OpenAIResponsesFileSearchToolComparisonFilter = {
64
+ /**
65
+ * The key to compare against the value.
66
+ */
67
+ key: string;
68
+ /**
69
+ * Specifies the comparison operator: eq, ne, gt, gte, lt, lte.
70
+ */
71
+ type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte';
72
+ /**
73
+ * The value to compare against the attribute key; supports string, number, or boolean types.
74
+ */
75
+ value: string | number | boolean;
76
+ };
77
+ /**
78
+ * Combine multiple filters using and or or.
79
+ */
80
+ type OpenAIResponsesFileSearchToolCompoundFilter = {
81
+ /**
82
+ * Type of operation: and or or.
83
+ */
84
+ type: 'and' | 'or';
85
+ /**
86
+ * Array of filters to combine. Items can be ComparisonFilter or CompoundFilter.
87
+ */
88
+ filters: Array<OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter>;
290
89
  };
291
90
 
292
91
  declare const openaiTools: {
@@ -299,7 +98,22 @@ declare const openaiTools: {
299
98
  *
300
99
  * Must have name `code_interpreter`.
301
100
  */
302
- codeInterpreter: typeof codeInterpreter;
101
+ codeInterpreter: (args?: {
102
+ container?: string | {
103
+ fileIds?: string[];
104
+ };
105
+ }) => _ai_sdk_provider_utils.Tool<{
106
+ code?: string | null;
107
+ containerId: string;
108
+ }, {
109
+ outputs?: Array<{
110
+ type: "logs";
111
+ logs: string;
112
+ } | {
113
+ type: "image";
114
+ url: string;
115
+ }> | null;
116
+ }>;
303
117
  /**
304
118
  * File search is a tool available in the Responses API. It enables models to
305
119
  * retrieve information in a knowledge base of previously uploaded files through
@@ -312,7 +126,53 @@ declare const openaiTools: {
312
126
  * @param ranking - The ranking options to use for the file search.
313
127
  * @param filters - The filters to use for the file search.
314
128
  */
315
- fileSearch: typeof fileSearch;
129
+ fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{}, {
130
+ queries: string[];
131
+ results: null | {
132
+ attributes: Record<string, unknown>;
133
+ fileId: string;
134
+ filename: string;
135
+ score: number;
136
+ text: string;
137
+ }[];
138
+ }, {
139
+ vectorStoreIds: string[];
140
+ maxNumResults?: number;
141
+ ranking?: {
142
+ ranker?: string;
143
+ scoreThreshold?: number;
144
+ };
145
+ filters?: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter;
146
+ }>;
147
+ /**
148
+ * The image generation tool allows you to generate images using a text prompt,
149
+ * and optionally image inputs. It leverages the GPT Image model,
150
+ * and automatically optimizes text inputs for improved performance.
151
+ *
152
+ * Must have name `image_generation`.
153
+ *
154
+ * @param size - Image dimensions (e.g., 1024x1024, 1024x1536)
155
+ * @param quality - Rendering quality (e.g. low, medium, high)
156
+ * @param format - File output format
157
+ * @param compression - Compression level (0-100%) for JPEG and WebP formats
158
+ * @param background - Transparent or opaque
159
+ */
160
+ imageGeneration: (args?: {
161
+ background?: "auto" | "opaque" | "transparent";
162
+ inputFidelity?: "low" | "high";
163
+ inputImageMask?: {
164
+ fileId?: string;
165
+ imageUrl?: string;
166
+ };
167
+ model?: string;
168
+ moderation?: "auto";
169
+ outputCompression?: number;
170
+ outputFormat?: "png" | "jpeg" | "webp";
171
+ quality?: "auto" | "low" | "medium" | "high";
172
+ size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024";
173
+ }) => _ai_sdk_provider_utils.Tool<{}, {
174
+ result: string;
175
+ }>;
316
176
  /**
317
177
  * Web search allows models to access up-to-date information from the internet
318
178
  * and provide answers with sourced citations.
@@ -324,7 +184,16 @@ declare const openaiTools: {
324
184
  *
325
185
  * @deprecated Use `webSearch` instead.
326
186
  */
327
- webSearchPreview: typeof webSearchPreview;
187
+ webSearchPreview: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
188
+ searchContextSize?: "low" | "medium" | "high";
189
+ userLocation?: {
190
+ type: "approximate";
191
+ country?: string;
192
+ city?: string;
193
+ region?: string;
194
+ timezone?: string;
195
+ };
196
+ }>;
328
197
  /**
329
198
  * Web search allows models to access up-to-date information from the internet
330
199
  * and provide answers with sourced citations.
@@ -335,61 +204,61 @@ declare const openaiTools: {
335
204
  * @param searchContextSize - The search context size to use for the web search.
336
205
  * @param userLocation - The user location to use for the web search.
337
206
  */
338
- webSearch: typeof webSearch;
207
+ webSearch: (args?: Parameters<typeof webSearchToolFactory>[0]) => _ai_sdk_provider_utils.Tool<{}, unknown>;
339
208
  };
340
209
 
210
+ type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
211
+
341
212
  type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
342
213
 
343
- interface OpenAIProvider extends ProviderV1 {
344
- (modelId: 'gpt-3.5-turbo-instruct', settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
345
- (modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): LanguageModelV1;
214
+ type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
215
+
216
+ interface OpenAIProvider extends ProviderV2 {
217
+ (modelId: OpenAIResponsesModelId): LanguageModelV2;
346
218
  /**
347
219
  Creates an OpenAI model for text generation.
348
220
  */
349
- languageModel(modelId: 'gpt-3.5-turbo-instruct', settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
350
- languageModel(modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): LanguageModelV1;
221
+ languageModel(modelId: OpenAIResponsesModelId): LanguageModelV2;
351
222
  /**
352
223
  Creates an OpenAI chat model for text generation.
353
224
  */
354
- chat(modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): LanguageModelV1;
225
+ chat(modelId: OpenAIChatModelId): LanguageModelV2;
355
226
  /**
356
227
  Creates an OpenAI responses API model for text generation.
357
228
  */
358
- responses(modelId: OpenAIResponsesModelId): LanguageModelV1;
229
+ responses(modelId: OpenAIResponsesModelId): LanguageModelV2;
359
230
  /**
360
231
  Creates an OpenAI completion model for text generation.
361
232
  */
362
- completion(modelId: OpenAICompletionModelId, settings?: OpenAICompletionSettings): LanguageModelV1;
233
+ completion(modelId: OpenAICompletionModelId): LanguageModelV2;
363
234
  /**
364
235
  Creates a model for text embeddings.
365
236
  */
366
- embedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
237
+ embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
367
238
  /**
368
239
  Creates a model for text embeddings.
369
-
370
- @deprecated Use `textEmbeddingModel` instead.
371
240
  */
372
- textEmbedding(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
241
+ textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
373
242
  /**
374
243
  Creates a model for text embeddings.
375
244
  */
376
- textEmbeddingModel(modelId: OpenAIEmbeddingModelId, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
245
+ textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
377
246
  /**
378
247
  Creates a model for image generation.
379
248
  */
380
- image(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV1;
249
+ image(modelId: OpenAIImageModelId): ImageModelV2;
381
250
  /**
382
251
  Creates a model for image generation.
383
252
  */
384
- imageModel(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV1;
253
+ imageModel(modelId: OpenAIImageModelId): ImageModelV2;
385
254
  /**
386
255
  Creates a model for transcription.
387
256
  */
388
- transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV1;
257
+ transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV2;
389
258
  /**
390
259
  Creates a model for speech generation.
391
260
  */
392
- speech(modelId: OpenAISpeechModelId): SpeechModelV1;
261
+ speech(modelId: OpenAISpeechModelId): SpeechModelV2;
393
262
  /**
394
263
  OpenAI-specific tools.
395
264
  */
@@ -417,12 +286,6 @@ interface OpenAIProviderSettings {
417
286
  */
418
287
  headers?: Record<string, string>;
419
288
  /**
420
- OpenAI compatibility mode. Should be set to `strict` when using the OpenAI API,
421
- and `compatible` when using 3rd party providers. In `compatible` mode, newer
422
- information such as streamOptions are not being sent. Defaults to 'compatible'.
423
- */
424
- compatibility?: 'strict' | 'compatible';
425
- /**
426
289
  Provider name. Overrides the `openai` default name for 3rd party providers.
427
290
  */
428
291
  name?: string;
@@ -437,47 +300,40 @@ Create an OpenAI provider instance.
437
300
  */
438
301
  declare function createOpenAI(options?: OpenAIProviderSettings): OpenAIProvider;
439
302
  /**
440
- Default OpenAI provider instance. It uses 'strict' compatibility mode.
303
+ Default OpenAI provider instance.
441
304
  */
442
305
  declare const openai: OpenAIProvider;
443
306
 
444
307
  declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
308
+ include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
309
+ "file_search_call.results": "file_search_call.results";
310
+ "message.output_text.logprobs": "message.output_text.logprobs";
311
+ "reasoning.encrypted_content": "reasoning.encrypted_content";
312
+ }>>>>;
313
+ instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
314
+ logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
315
+ maxToolCalls: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
445
316
  metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
446
317
  parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
447
- include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<["file_search_call.results", "code_interpreter_call.output", "web_search_call.results", "message.input_image.image_url", "computer_call_output.output.image_url", "reasoning.encrypted_content", "message.output_text.logprobs"]>, "many">>>;
448
318
  previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
449
- forceNoTemperature: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
450
- store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
451
- user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
319
+ promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
452
320
  reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
453
- strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
454
- instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
455
321
  reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
456
- }, "strip", z.ZodTypeAny, {
457
- user?: string | null | undefined;
458
- forceNoTemperature?: boolean | null | undefined;
459
- store?: boolean | null | undefined;
460
- metadata?: any;
461
- reasoningEffort?: string | null | undefined;
462
- include?: ("file_search_call.results" | "code_interpreter_call.output" | "web_search_call.results" | "message.input_image.image_url" | "computer_call_output.output.image_url" | "reasoning.encrypted_content" | "message.output_text.logprobs")[] | null | undefined;
463
- parallelToolCalls?: boolean | null | undefined;
464
- previousResponseId?: string | null | undefined;
465
- strictSchemas?: boolean | null | undefined;
466
- instructions?: string | null | undefined;
467
- reasoningSummary?: string | null | undefined;
468
- }, {
469
- user?: string | null | undefined;
470
- forceNoTemperature?: boolean | null | undefined;
471
- store?: boolean | null | undefined;
472
- metadata?: any;
473
- reasoningEffort?: string | null | undefined;
474
- include?: ("file_search_call.results" | "code_interpreter_call.output" | "web_search_call.results" | "message.input_image.image_url" | "computer_call_output.output.image_url" | "reasoning.encrypted_content" | "message.output_text.logprobs")[] | null | undefined;
475
- parallelToolCalls?: boolean | null | undefined;
476
- previousResponseId?: string | null | undefined;
477
- strictSchemas?: boolean | null | undefined;
478
- instructions?: string | null | undefined;
479
- reasoningSummary?: string | null | undefined;
480
- }>;
322
+ safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
323
+ serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
324
+ auto: "auto";
325
+ flex: "flex";
326
+ priority: "priority";
327
+ }>>>;
328
+ store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
329
+ strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
330
+ textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
331
+ low: "low";
332
+ medium: "medium";
333
+ high: "high";
334
+ }>>>;
335
+ user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
336
+ }, z.core.$strip>;
481
337
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
482
338
 
483
339
  export { type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, createOpenAI, openai };