@ai-sdk/openai 2.0.0-beta.1 → 2.0.0-beta.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +75 -0
- package/dist/index.d.mts +12 -27
- package/dist/index.d.ts +12 -27
- package/dist/index.js +592 -397
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +426 -229
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +35 -205
- package/dist/internal/index.d.ts +35 -205
- package/dist/internal/index.js +584 -389
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +421 -224
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +4 -4
|
@@ -1,93 +1,29 @@
|
|
|
1
1
|
import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
|
-
import { z } from 'zod';
|
|
3
|
+
import { z } from 'zod/v4';
|
|
4
4
|
|
|
5
5
|
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
|
|
6
6
|
declare const openaiProviderOptions: z.ZodObject<{
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
*
|
|
10
|
-
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
11
|
-
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
12
|
-
*/
|
|
13
|
-
logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
|
|
14
|
-
/**
|
|
15
|
-
* Return the log probabilities of the tokens.
|
|
16
|
-
*
|
|
17
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
18
|
-
* were generated.
|
|
19
|
-
*
|
|
20
|
-
* Setting to a number will return the log probabilities of the top n
|
|
21
|
-
* tokens that were generated.
|
|
22
|
-
*/
|
|
23
|
-
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
24
|
-
/**
|
|
25
|
-
* Whether to enable parallel function calling during tool use. Default to true.
|
|
26
|
-
*/
|
|
7
|
+
logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
|
|
8
|
+
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
27
9
|
parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
|
|
28
|
-
/**
|
|
29
|
-
* A unique identifier representing your end-user, which can help OpenAI to
|
|
30
|
-
* monitor and detect abuse.
|
|
31
|
-
*/
|
|
32
10
|
user: z.ZodOptional<z.ZodString>;
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
39
|
-
*/
|
|
11
|
+
reasoningEffort: z.ZodOptional<z.ZodEnum<{
|
|
12
|
+
low: "low";
|
|
13
|
+
medium: "medium";
|
|
14
|
+
high: "high";
|
|
15
|
+
}>>;
|
|
40
16
|
maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
|
|
41
|
-
/**
|
|
42
|
-
* Whether to enable persistence in responses API.
|
|
43
|
-
*/
|
|
44
17
|
store: z.ZodOptional<z.ZodBoolean>;
|
|
45
|
-
/**
|
|
46
|
-
* Metadata to associate with the request.
|
|
47
|
-
*/
|
|
48
18
|
metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
|
|
49
|
-
/**
|
|
50
|
-
* Parameters for prediction mode.
|
|
51
|
-
*/
|
|
52
19
|
prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
|
|
53
|
-
/**
|
|
54
|
-
* Whether to use structured outputs.
|
|
55
|
-
*
|
|
56
|
-
* @default true
|
|
57
|
-
*/
|
|
58
20
|
structuredOutputs: z.ZodOptional<z.ZodBoolean>;
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
serviceTier: z.ZodOptional<z.ZodEnum<["auto", "flex"]>>;
|
|
66
|
-
}, "strip", z.ZodTypeAny, {
|
|
67
|
-
user?: string | undefined;
|
|
68
|
-
logitBias?: Record<number, number> | undefined;
|
|
69
|
-
logprobs?: number | boolean | undefined;
|
|
70
|
-
parallelToolCalls?: boolean | undefined;
|
|
71
|
-
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
72
|
-
maxCompletionTokens?: number | undefined;
|
|
73
|
-
store?: boolean | undefined;
|
|
74
|
-
metadata?: Record<string, string> | undefined;
|
|
75
|
-
prediction?: Record<string, any> | undefined;
|
|
76
|
-
structuredOutputs?: boolean | undefined;
|
|
77
|
-
serviceTier?: "auto" | "flex" | undefined;
|
|
78
|
-
}, {
|
|
79
|
-
user?: string | undefined;
|
|
80
|
-
logitBias?: Record<number, number> | undefined;
|
|
81
|
-
logprobs?: number | boolean | undefined;
|
|
82
|
-
parallelToolCalls?: boolean | undefined;
|
|
83
|
-
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
84
|
-
maxCompletionTokens?: number | undefined;
|
|
85
|
-
store?: boolean | undefined;
|
|
86
|
-
metadata?: Record<string, string> | undefined;
|
|
87
|
-
prediction?: Record<string, any> | undefined;
|
|
88
|
-
structuredOutputs?: boolean | undefined;
|
|
89
|
-
serviceTier?: "auto" | "flex" | undefined;
|
|
90
|
-
}>;
|
|
21
|
+
serviceTier: z.ZodOptional<z.ZodEnum<{
|
|
22
|
+
auto: "auto";
|
|
23
|
+
flex: "flex";
|
|
24
|
+
}>>;
|
|
25
|
+
strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
|
|
26
|
+
}, z.core.$strip>;
|
|
91
27
|
type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
|
|
92
28
|
|
|
93
29
|
type OpenAIChatConfig = {
|
|
@@ -115,57 +51,12 @@ declare class OpenAIChatLanguageModel implements LanguageModelV2 {
|
|
|
115
51
|
|
|
116
52
|
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
117
53
|
declare const openaiCompletionProviderOptions: z.ZodObject<{
|
|
118
|
-
/**
|
|
119
|
-
Echo back the prompt in addition to the completion.
|
|
120
|
-
*/
|
|
121
54
|
echo: z.ZodOptional<z.ZodBoolean>;
|
|
122
|
-
/**
|
|
123
|
-
Modify the likelihood of specified tokens appearing in the completion.
|
|
124
|
-
|
|
125
|
-
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
126
|
-
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
127
|
-
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
128
|
-
the bias is added to the logits generated by the model prior to sampling.
|
|
129
|
-
The exact effect will vary per model, but values between -1 and 1 should
|
|
130
|
-
decrease or increase likelihood of selection; values like -100 or 100
|
|
131
|
-
should result in a ban or exclusive selection of the relevant token.
|
|
132
|
-
|
|
133
|
-
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
134
|
-
token from being generated.
|
|
135
|
-
*/
|
|
136
55
|
logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
|
|
137
|
-
/**
|
|
138
|
-
The suffix that comes after a completion of inserted text.
|
|
139
|
-
*/
|
|
140
56
|
suffix: z.ZodOptional<z.ZodString>;
|
|
141
|
-
/**
|
|
142
|
-
A unique identifier representing your end-user, which can help OpenAI to
|
|
143
|
-
monitor and detect abuse. Learn more.
|
|
144
|
-
*/
|
|
145
57
|
user: z.ZodOptional<z.ZodString>;
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
the response size and can slow down response times. However, it can
|
|
149
|
-
be useful to better understand how the model is behaving.
|
|
150
|
-
Setting to true will return the log probabilities of the tokens that
|
|
151
|
-
were generated.
|
|
152
|
-
Setting to a number will return the log probabilities of the top n
|
|
153
|
-
tokens that were generated.
|
|
154
|
-
*/
|
|
155
|
-
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
156
|
-
}, "strip", z.ZodTypeAny, {
|
|
157
|
-
user?: string | undefined;
|
|
158
|
-
logitBias?: Record<string, number> | undefined;
|
|
159
|
-
logprobs?: number | boolean | undefined;
|
|
160
|
-
echo?: boolean | undefined;
|
|
161
|
-
suffix?: string | undefined;
|
|
162
|
-
}, {
|
|
163
|
-
user?: string | undefined;
|
|
164
|
-
logitBias?: Record<string, number> | undefined;
|
|
165
|
-
logprobs?: number | boolean | undefined;
|
|
166
|
-
echo?: boolean | undefined;
|
|
167
|
-
suffix?: string | undefined;
|
|
168
|
-
}>;
|
|
58
|
+
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
59
|
+
}, z.core.$strip>;
|
|
169
60
|
type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>;
|
|
170
61
|
|
|
171
62
|
type OpenAICompletionConfig = {
|
|
@@ -203,23 +94,9 @@ type OpenAIConfig = {
|
|
|
203
94
|
|
|
204
95
|
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
|
|
205
96
|
declare const openaiEmbeddingProviderOptions: z.ZodObject<{
|
|
206
|
-
/**
|
|
207
|
-
The number of dimensions the resulting output embeddings should have.
|
|
208
|
-
Only supported in text-embedding-3 and later models.
|
|
209
|
-
*/
|
|
210
97
|
dimensions: z.ZodOptional<z.ZodNumber>;
|
|
211
|
-
/**
|
|
212
|
-
A unique identifier representing your end-user, which can help OpenAI to
|
|
213
|
-
monitor and detect abuse. Learn more.
|
|
214
|
-
*/
|
|
215
98
|
user: z.ZodOptional<z.ZodString>;
|
|
216
|
-
},
|
|
217
|
-
user?: string | undefined;
|
|
218
|
-
dimensions?: number | undefined;
|
|
219
|
-
}, {
|
|
220
|
-
user?: string | undefined;
|
|
221
|
-
dimensions?: number | undefined;
|
|
222
|
-
}>;
|
|
99
|
+
}, z.core.$strip>;
|
|
223
100
|
type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOptions>;
|
|
224
101
|
|
|
225
102
|
declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
|
|
@@ -254,41 +131,15 @@ declare class OpenAIImageModel implements ImageModelV2 {
|
|
|
254
131
|
|
|
255
132
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
256
133
|
declare const openAITranscriptionProviderOptions: z.ZodObject<{
|
|
257
|
-
|
|
258
|
-
* Additional information to include in the transcription response.
|
|
259
|
-
*/
|
|
260
|
-
include: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
|
|
261
|
-
/**
|
|
262
|
-
* The language of the input audio in ISO-639-1 format.
|
|
263
|
-
*/
|
|
134
|
+
include: z.ZodOptional<z.ZodArray<z.ZodString>>;
|
|
264
135
|
language: z.ZodOptional<z.ZodString>;
|
|
265
|
-
/**
|
|
266
|
-
* An optional text to guide the model's style or continue a previous audio segment.
|
|
267
|
-
*/
|
|
268
136
|
prompt: z.ZodOptional<z.ZodString>;
|
|
269
|
-
/**
|
|
270
|
-
* The sampling temperature, between 0 and 1.
|
|
271
|
-
* @default 0
|
|
272
|
-
*/
|
|
273
137
|
temperature: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
}, "strip", z.ZodTypeAny, {
|
|
280
|
-
prompt?: string | undefined;
|
|
281
|
-
temperature?: number | undefined;
|
|
282
|
-
include?: string[] | undefined;
|
|
283
|
-
language?: string | undefined;
|
|
284
|
-
timestampGranularities?: ("word" | "segment")[] | undefined;
|
|
285
|
-
}, {
|
|
286
|
-
prompt?: string | undefined;
|
|
287
|
-
temperature?: number | undefined;
|
|
288
|
-
include?: string[] | undefined;
|
|
289
|
-
language?: string | undefined;
|
|
290
|
-
timestampGranularities?: ("word" | "segment")[] | undefined;
|
|
291
|
-
}>;
|
|
138
|
+
timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<{
|
|
139
|
+
word: "word";
|
|
140
|
+
segment: "segment";
|
|
141
|
+
}>>>>;
|
|
142
|
+
}, z.core.$strip>;
|
|
292
143
|
type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
|
|
293
144
|
|
|
294
145
|
type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV2CallOptions, 'providerOptions'> & {
|
|
@@ -316,13 +167,7 @@ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string &
|
|
|
316
167
|
declare const OpenAIProviderOptionsSchema: z.ZodObject<{
|
|
317
168
|
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
318
169
|
speed: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
|
|
319
|
-
},
|
|
320
|
-
instructions?: string | null | undefined;
|
|
321
|
-
speed?: number | null | undefined;
|
|
322
|
-
}, {
|
|
323
|
-
instructions?: string | null | undefined;
|
|
324
|
-
speed?: number | null | undefined;
|
|
325
|
-
}>;
|
|
170
|
+
}, z.core.$strip>;
|
|
326
171
|
type OpenAISpeechCallOptions = z.infer<typeof OpenAIProviderOptionsSchema>;
|
|
327
172
|
interface OpenAISpeechModelConfig extends OpenAIConfig {
|
|
328
173
|
_internal?: {
|
|
@@ -339,7 +184,8 @@ declare class OpenAISpeechModel implements SpeechModelV2 {
|
|
|
339
184
|
doGenerate(options: Parameters<SpeechModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV2['doGenerate']>>>;
|
|
340
185
|
}
|
|
341
186
|
|
|
342
|
-
|
|
187
|
+
declare const openaiResponsesModelIds: readonly ["gpt-4.1", "gpt-4.1-2025-04-14", "gpt-4.1-mini", "gpt-4.1-mini-2025-04-14", "gpt-4.1-nano", "gpt-4.1-nano-2025-04-14", "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20", "gpt-4o-audio-preview", "gpt-4o-audio-preview-2024-10-01", "gpt-4o-audio-preview-2024-12-17", "gpt-4o-search-preview", "gpt-4o-search-preview-2025-03-11", "gpt-4o-mini-search-preview", "gpt-4o-mini-search-preview-2025-03-11", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-0125-preview", "gpt-4-1106-preview", "gpt-4", "gpt-4-0613", "gpt-4.5-preview", "gpt-4.5-preview-2025-02-27", "gpt-3.5-turbo-0125", "gpt-3.5-turbo", "gpt-3.5-turbo-1106", "chatgpt-4o-latest", "o1", "o1-2024-12-17", "o1-mini", "o1-mini-2024-09-12", "o1-preview", "o1-preview-2024-09-12", "o3-mini", "o3-mini-2025-01-31", "o3", "o3-2025-04-16", "o4-mini", "o4-mini-2025-04-16", "codex-mini-latest", "computer-use-preview"];
|
|
188
|
+
type OpenAIResponsesModelId = (typeof openaiResponsesModelIds)[number] | (string & {});
|
|
343
189
|
|
|
344
190
|
declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|
345
191
|
readonly specificationVersion = "v2";
|
|
@@ -359,33 +205,17 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
359
205
|
store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
360
206
|
user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
361
207
|
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
362
|
-
|
|
208
|
+
strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
363
209
|
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
364
210
|
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
365
|
-
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
instructions?: string | null | undefined;
|
|
374
|
-
previousResponseId?: string | null | undefined;
|
|
375
|
-
strictSchemas?: boolean | null | undefined;
|
|
376
|
-
reasoningSummary?: string | null | undefined;
|
|
377
|
-
}, {
|
|
378
|
-
user?: string | null | undefined;
|
|
379
|
-
parallelToolCalls?: boolean | null | undefined;
|
|
380
|
-
reasoningEffort?: string | null | undefined;
|
|
381
|
-
store?: boolean | null | undefined;
|
|
382
|
-
metadata?: any;
|
|
383
|
-
serviceTier?: "auto" | "flex" | null | undefined;
|
|
384
|
-
instructions?: string | null | undefined;
|
|
385
|
-
previousResponseId?: string | null | undefined;
|
|
386
|
-
strictSchemas?: boolean | null | undefined;
|
|
387
|
-
reasoningSummary?: string | null | undefined;
|
|
388
|
-
}>;
|
|
211
|
+
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
212
|
+
auto: "auto";
|
|
213
|
+
flex: "flex";
|
|
214
|
+
}>>>;
|
|
215
|
+
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
216
|
+
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
217
|
+
}>>>>;
|
|
218
|
+
}, z.core.$strip>;
|
|
389
219
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
390
220
|
|
|
391
221
|
export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
|
package/dist/internal/index.d.ts
CHANGED
|
@@ -1,93 +1,29 @@
|
|
|
1
1
|
import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
|
-
import { z } from 'zod';
|
|
3
|
+
import { z } from 'zod/v4';
|
|
4
4
|
|
|
5
5
|
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
|
|
6
6
|
declare const openaiProviderOptions: z.ZodObject<{
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
*
|
|
10
|
-
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
11
|
-
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
12
|
-
*/
|
|
13
|
-
logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
|
|
14
|
-
/**
|
|
15
|
-
* Return the log probabilities of the tokens.
|
|
16
|
-
*
|
|
17
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
18
|
-
* were generated.
|
|
19
|
-
*
|
|
20
|
-
* Setting to a number will return the log probabilities of the top n
|
|
21
|
-
* tokens that were generated.
|
|
22
|
-
*/
|
|
23
|
-
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
24
|
-
/**
|
|
25
|
-
* Whether to enable parallel function calling during tool use. Default to true.
|
|
26
|
-
*/
|
|
7
|
+
logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
|
|
8
|
+
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
27
9
|
parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
|
|
28
|
-
/**
|
|
29
|
-
* A unique identifier representing your end-user, which can help OpenAI to
|
|
30
|
-
* monitor and detect abuse.
|
|
31
|
-
*/
|
|
32
10
|
user: z.ZodOptional<z.ZodString>;
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
39
|
-
*/
|
|
11
|
+
reasoningEffort: z.ZodOptional<z.ZodEnum<{
|
|
12
|
+
low: "low";
|
|
13
|
+
medium: "medium";
|
|
14
|
+
high: "high";
|
|
15
|
+
}>>;
|
|
40
16
|
maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
|
|
41
|
-
/**
|
|
42
|
-
* Whether to enable persistence in responses API.
|
|
43
|
-
*/
|
|
44
17
|
store: z.ZodOptional<z.ZodBoolean>;
|
|
45
|
-
/**
|
|
46
|
-
* Metadata to associate with the request.
|
|
47
|
-
*/
|
|
48
18
|
metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
|
|
49
|
-
/**
|
|
50
|
-
* Parameters for prediction mode.
|
|
51
|
-
*/
|
|
52
19
|
prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
|
|
53
|
-
/**
|
|
54
|
-
* Whether to use structured outputs.
|
|
55
|
-
*
|
|
56
|
-
* @default true
|
|
57
|
-
*/
|
|
58
20
|
structuredOutputs: z.ZodOptional<z.ZodBoolean>;
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
serviceTier: z.ZodOptional<z.ZodEnum<["auto", "flex"]>>;
|
|
66
|
-
}, "strip", z.ZodTypeAny, {
|
|
67
|
-
user?: string | undefined;
|
|
68
|
-
logitBias?: Record<number, number> | undefined;
|
|
69
|
-
logprobs?: number | boolean | undefined;
|
|
70
|
-
parallelToolCalls?: boolean | undefined;
|
|
71
|
-
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
72
|
-
maxCompletionTokens?: number | undefined;
|
|
73
|
-
store?: boolean | undefined;
|
|
74
|
-
metadata?: Record<string, string> | undefined;
|
|
75
|
-
prediction?: Record<string, any> | undefined;
|
|
76
|
-
structuredOutputs?: boolean | undefined;
|
|
77
|
-
serviceTier?: "auto" | "flex" | undefined;
|
|
78
|
-
}, {
|
|
79
|
-
user?: string | undefined;
|
|
80
|
-
logitBias?: Record<number, number> | undefined;
|
|
81
|
-
logprobs?: number | boolean | undefined;
|
|
82
|
-
parallelToolCalls?: boolean | undefined;
|
|
83
|
-
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
84
|
-
maxCompletionTokens?: number | undefined;
|
|
85
|
-
store?: boolean | undefined;
|
|
86
|
-
metadata?: Record<string, string> | undefined;
|
|
87
|
-
prediction?: Record<string, any> | undefined;
|
|
88
|
-
structuredOutputs?: boolean | undefined;
|
|
89
|
-
serviceTier?: "auto" | "flex" | undefined;
|
|
90
|
-
}>;
|
|
21
|
+
serviceTier: z.ZodOptional<z.ZodEnum<{
|
|
22
|
+
auto: "auto";
|
|
23
|
+
flex: "flex";
|
|
24
|
+
}>>;
|
|
25
|
+
strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
|
|
26
|
+
}, z.core.$strip>;
|
|
91
27
|
type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
|
|
92
28
|
|
|
93
29
|
type OpenAIChatConfig = {
|
|
@@ -115,57 +51,12 @@ declare class OpenAIChatLanguageModel implements LanguageModelV2 {
|
|
|
115
51
|
|
|
116
52
|
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
117
53
|
declare const openaiCompletionProviderOptions: z.ZodObject<{
|
|
118
|
-
/**
|
|
119
|
-
Echo back the prompt in addition to the completion.
|
|
120
|
-
*/
|
|
121
54
|
echo: z.ZodOptional<z.ZodBoolean>;
|
|
122
|
-
/**
|
|
123
|
-
Modify the likelihood of specified tokens appearing in the completion.
|
|
124
|
-
|
|
125
|
-
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
126
|
-
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
127
|
-
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
128
|
-
the bias is added to the logits generated by the model prior to sampling.
|
|
129
|
-
The exact effect will vary per model, but values between -1 and 1 should
|
|
130
|
-
decrease or increase likelihood of selection; values like -100 or 100
|
|
131
|
-
should result in a ban or exclusive selection of the relevant token.
|
|
132
|
-
|
|
133
|
-
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
134
|
-
token from being generated.
|
|
135
|
-
*/
|
|
136
55
|
logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
|
|
137
|
-
/**
|
|
138
|
-
The suffix that comes after a completion of inserted text.
|
|
139
|
-
*/
|
|
140
56
|
suffix: z.ZodOptional<z.ZodString>;
|
|
141
|
-
/**
|
|
142
|
-
A unique identifier representing your end-user, which can help OpenAI to
|
|
143
|
-
monitor and detect abuse. Learn more.
|
|
144
|
-
*/
|
|
145
57
|
user: z.ZodOptional<z.ZodString>;
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
the response size and can slow down response times. However, it can
|
|
149
|
-
be useful to better understand how the model is behaving.
|
|
150
|
-
Setting to true will return the log probabilities of the tokens that
|
|
151
|
-
were generated.
|
|
152
|
-
Setting to a number will return the log probabilities of the top n
|
|
153
|
-
tokens that were generated.
|
|
154
|
-
*/
|
|
155
|
-
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
156
|
-
}, "strip", z.ZodTypeAny, {
|
|
157
|
-
user?: string | undefined;
|
|
158
|
-
logitBias?: Record<string, number> | undefined;
|
|
159
|
-
logprobs?: number | boolean | undefined;
|
|
160
|
-
echo?: boolean | undefined;
|
|
161
|
-
suffix?: string | undefined;
|
|
162
|
-
}, {
|
|
163
|
-
user?: string | undefined;
|
|
164
|
-
logitBias?: Record<string, number> | undefined;
|
|
165
|
-
logprobs?: number | boolean | undefined;
|
|
166
|
-
echo?: boolean | undefined;
|
|
167
|
-
suffix?: string | undefined;
|
|
168
|
-
}>;
|
|
58
|
+
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
59
|
+
}, z.core.$strip>;
|
|
169
60
|
type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>;
|
|
170
61
|
|
|
171
62
|
type OpenAICompletionConfig = {
|
|
@@ -203,23 +94,9 @@ type OpenAIConfig = {
|
|
|
203
94
|
|
|
204
95
|
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
|
|
205
96
|
declare const openaiEmbeddingProviderOptions: z.ZodObject<{
|
|
206
|
-
/**
|
|
207
|
-
The number of dimensions the resulting output embeddings should have.
|
|
208
|
-
Only supported in text-embedding-3 and later models.
|
|
209
|
-
*/
|
|
210
97
|
dimensions: z.ZodOptional<z.ZodNumber>;
|
|
211
|
-
/**
|
|
212
|
-
A unique identifier representing your end-user, which can help OpenAI to
|
|
213
|
-
monitor and detect abuse. Learn more.
|
|
214
|
-
*/
|
|
215
98
|
user: z.ZodOptional<z.ZodString>;
|
|
216
|
-
},
|
|
217
|
-
user?: string | undefined;
|
|
218
|
-
dimensions?: number | undefined;
|
|
219
|
-
}, {
|
|
220
|
-
user?: string | undefined;
|
|
221
|
-
dimensions?: number | undefined;
|
|
222
|
-
}>;
|
|
99
|
+
}, z.core.$strip>;
|
|
223
100
|
type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOptions>;
|
|
224
101
|
|
|
225
102
|
declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
|
|
@@ -254,41 +131,15 @@ declare class OpenAIImageModel implements ImageModelV2 {
|
|
|
254
131
|
|
|
255
132
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
256
133
|
declare const openAITranscriptionProviderOptions: z.ZodObject<{
|
|
257
|
-
|
|
258
|
-
* Additional information to include in the transcription response.
|
|
259
|
-
*/
|
|
260
|
-
include: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
|
|
261
|
-
/**
|
|
262
|
-
* The language of the input audio in ISO-639-1 format.
|
|
263
|
-
*/
|
|
134
|
+
include: z.ZodOptional<z.ZodArray<z.ZodString>>;
|
|
264
135
|
language: z.ZodOptional<z.ZodString>;
|
|
265
|
-
/**
|
|
266
|
-
* An optional text to guide the model's style or continue a previous audio segment.
|
|
267
|
-
*/
|
|
268
136
|
prompt: z.ZodOptional<z.ZodString>;
|
|
269
|
-
/**
|
|
270
|
-
* The sampling temperature, between 0 and 1.
|
|
271
|
-
* @default 0
|
|
272
|
-
*/
|
|
273
137
|
temperature: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
}, "strip", z.ZodTypeAny, {
|
|
280
|
-
prompt?: string | undefined;
|
|
281
|
-
temperature?: number | undefined;
|
|
282
|
-
include?: string[] | undefined;
|
|
283
|
-
language?: string | undefined;
|
|
284
|
-
timestampGranularities?: ("word" | "segment")[] | undefined;
|
|
285
|
-
}, {
|
|
286
|
-
prompt?: string | undefined;
|
|
287
|
-
temperature?: number | undefined;
|
|
288
|
-
include?: string[] | undefined;
|
|
289
|
-
language?: string | undefined;
|
|
290
|
-
timestampGranularities?: ("word" | "segment")[] | undefined;
|
|
291
|
-
}>;
|
|
138
|
+
timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<{
|
|
139
|
+
word: "word";
|
|
140
|
+
segment: "segment";
|
|
141
|
+
}>>>>;
|
|
142
|
+
}, z.core.$strip>;
|
|
292
143
|
type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
|
|
293
144
|
|
|
294
145
|
type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV2CallOptions, 'providerOptions'> & {
|
|
@@ -316,13 +167,7 @@ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string &
|
|
|
316
167
|
declare const OpenAIProviderOptionsSchema: z.ZodObject<{
|
|
317
168
|
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
318
169
|
speed: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
|
|
319
|
-
},
|
|
320
|
-
instructions?: string | null | undefined;
|
|
321
|
-
speed?: number | null | undefined;
|
|
322
|
-
}, {
|
|
323
|
-
instructions?: string | null | undefined;
|
|
324
|
-
speed?: number | null | undefined;
|
|
325
|
-
}>;
|
|
170
|
+
}, z.core.$strip>;
|
|
326
171
|
type OpenAISpeechCallOptions = z.infer<typeof OpenAIProviderOptionsSchema>;
|
|
327
172
|
interface OpenAISpeechModelConfig extends OpenAIConfig {
|
|
328
173
|
_internal?: {
|
|
@@ -339,7 +184,8 @@ declare class OpenAISpeechModel implements SpeechModelV2 {
|
|
|
339
184
|
doGenerate(options: Parameters<SpeechModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV2['doGenerate']>>>;
|
|
340
185
|
}
|
|
341
186
|
|
|
342
|
-
|
|
187
|
+
declare const openaiResponsesModelIds: readonly ["gpt-4.1", "gpt-4.1-2025-04-14", "gpt-4.1-mini", "gpt-4.1-mini-2025-04-14", "gpt-4.1-nano", "gpt-4.1-nano-2025-04-14", "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20", "gpt-4o-audio-preview", "gpt-4o-audio-preview-2024-10-01", "gpt-4o-audio-preview-2024-12-17", "gpt-4o-search-preview", "gpt-4o-search-preview-2025-03-11", "gpt-4o-mini-search-preview", "gpt-4o-mini-search-preview-2025-03-11", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-0125-preview", "gpt-4-1106-preview", "gpt-4", "gpt-4-0613", "gpt-4.5-preview", "gpt-4.5-preview-2025-02-27", "gpt-3.5-turbo-0125", "gpt-3.5-turbo", "gpt-3.5-turbo-1106", "chatgpt-4o-latest", "o1", "o1-2024-12-17", "o1-mini", "o1-mini-2024-09-12", "o1-preview", "o1-preview-2024-09-12", "o3-mini", "o3-mini-2025-01-31", "o3", "o3-2025-04-16", "o4-mini", "o4-mini-2025-04-16", "codex-mini-latest", "computer-use-preview"];
|
|
188
|
+
type OpenAIResponsesModelId = (typeof openaiResponsesModelIds)[number] | (string & {});
|
|
343
189
|
|
|
344
190
|
declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|
345
191
|
readonly specificationVersion = "v2";
|
|
@@ -359,33 +205,17 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
359
205
|
store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
360
206
|
user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
361
207
|
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
362
|
-
|
|
208
|
+
strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
363
209
|
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
364
210
|
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
365
|
-
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
instructions?: string | null | undefined;
|
|
374
|
-
previousResponseId?: string | null | undefined;
|
|
375
|
-
strictSchemas?: boolean | null | undefined;
|
|
376
|
-
reasoningSummary?: string | null | undefined;
|
|
377
|
-
}, {
|
|
378
|
-
user?: string | null | undefined;
|
|
379
|
-
parallelToolCalls?: boolean | null | undefined;
|
|
380
|
-
reasoningEffort?: string | null | undefined;
|
|
381
|
-
store?: boolean | null | undefined;
|
|
382
|
-
metadata?: any;
|
|
383
|
-
serviceTier?: "auto" | "flex" | null | undefined;
|
|
384
|
-
instructions?: string | null | undefined;
|
|
385
|
-
previousResponseId?: string | null | undefined;
|
|
386
|
-
strictSchemas?: boolean | null | undefined;
|
|
387
|
-
reasoningSummary?: string | null | undefined;
|
|
388
|
-
}>;
|
|
211
|
+
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
212
|
+
auto: "auto";
|
|
213
|
+
flex: "flex";
|
|
214
|
+
}>>>;
|
|
215
|
+
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
216
|
+
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
217
|
+
}>>>>;
|
|
218
|
+
}, z.core.$strip>;
|
|
389
219
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
390
220
|
|
|
391
221
|
export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
|