@ai-sdk/openai 2.0.0-beta.1 → 2.0.0-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +10 -0
- package/dist/index.d.mts +6 -25
- package/dist/index.d.ts +6 -25
- package/dist/index.js +320 -320
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +16 -16
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +28 -203
- package/dist/internal/index.d.ts +28 -203
- package/dist/internal/index.js +320 -320
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +16 -16
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
|
@@ -1,93 +1,28 @@
|
|
|
1
1
|
import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
|
-
import { z } from 'zod';
|
|
3
|
+
import { z } from 'zod/v4';
|
|
4
4
|
|
|
5
5
|
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
|
|
6
6
|
declare const openaiProviderOptions: z.ZodObject<{
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
*
|
|
10
|
-
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
11
|
-
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
12
|
-
*/
|
|
13
|
-
logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
|
|
14
|
-
/**
|
|
15
|
-
* Return the log probabilities of the tokens.
|
|
16
|
-
*
|
|
17
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
18
|
-
* were generated.
|
|
19
|
-
*
|
|
20
|
-
* Setting to a number will return the log probabilities of the top n
|
|
21
|
-
* tokens that were generated.
|
|
22
|
-
*/
|
|
23
|
-
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
24
|
-
/**
|
|
25
|
-
* Whether to enable parallel function calling during tool use. Default to true.
|
|
26
|
-
*/
|
|
7
|
+
logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
|
|
8
|
+
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
27
9
|
parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
|
|
28
|
-
/**
|
|
29
|
-
* A unique identifier representing your end-user, which can help OpenAI to
|
|
30
|
-
* monitor and detect abuse.
|
|
31
|
-
*/
|
|
32
10
|
user: z.ZodOptional<z.ZodString>;
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
39
|
-
*/
|
|
11
|
+
reasoningEffort: z.ZodOptional<z.ZodEnum<{
|
|
12
|
+
low: "low";
|
|
13
|
+
medium: "medium";
|
|
14
|
+
high: "high";
|
|
15
|
+
}>>;
|
|
40
16
|
maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
|
|
41
|
-
/**
|
|
42
|
-
* Whether to enable persistence in responses API.
|
|
43
|
-
*/
|
|
44
17
|
store: z.ZodOptional<z.ZodBoolean>;
|
|
45
|
-
/**
|
|
46
|
-
* Metadata to associate with the request.
|
|
47
|
-
*/
|
|
48
18
|
metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
|
|
49
|
-
/**
|
|
50
|
-
* Parameters for prediction mode.
|
|
51
|
-
*/
|
|
52
19
|
prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
|
|
53
|
-
/**
|
|
54
|
-
* Whether to use structured outputs.
|
|
55
|
-
*
|
|
56
|
-
* @default true
|
|
57
|
-
*/
|
|
58
20
|
structuredOutputs: z.ZodOptional<z.ZodBoolean>;
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
*/
|
|
65
|
-
serviceTier: z.ZodOptional<z.ZodEnum<["auto", "flex"]>>;
|
|
66
|
-
}, "strip", z.ZodTypeAny, {
|
|
67
|
-
user?: string | undefined;
|
|
68
|
-
logitBias?: Record<number, number> | undefined;
|
|
69
|
-
logprobs?: number | boolean | undefined;
|
|
70
|
-
parallelToolCalls?: boolean | undefined;
|
|
71
|
-
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
72
|
-
maxCompletionTokens?: number | undefined;
|
|
73
|
-
store?: boolean | undefined;
|
|
74
|
-
metadata?: Record<string, string> | undefined;
|
|
75
|
-
prediction?: Record<string, any> | undefined;
|
|
76
|
-
structuredOutputs?: boolean | undefined;
|
|
77
|
-
serviceTier?: "auto" | "flex" | undefined;
|
|
78
|
-
}, {
|
|
79
|
-
user?: string | undefined;
|
|
80
|
-
logitBias?: Record<number, number> | undefined;
|
|
81
|
-
logprobs?: number | boolean | undefined;
|
|
82
|
-
parallelToolCalls?: boolean | undefined;
|
|
83
|
-
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
84
|
-
maxCompletionTokens?: number | undefined;
|
|
85
|
-
store?: boolean | undefined;
|
|
86
|
-
metadata?: Record<string, string> | undefined;
|
|
87
|
-
prediction?: Record<string, any> | undefined;
|
|
88
|
-
structuredOutputs?: boolean | undefined;
|
|
89
|
-
serviceTier?: "auto" | "flex" | undefined;
|
|
90
|
-
}>;
|
|
21
|
+
serviceTier: z.ZodOptional<z.ZodEnum<{
|
|
22
|
+
auto: "auto";
|
|
23
|
+
flex: "flex";
|
|
24
|
+
}>>;
|
|
25
|
+
}, z.core.$strip>;
|
|
91
26
|
type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
|
|
92
27
|
|
|
93
28
|
type OpenAIChatConfig = {
|
|
@@ -115,57 +50,12 @@ declare class OpenAIChatLanguageModel implements LanguageModelV2 {
|
|
|
115
50
|
|
|
116
51
|
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
117
52
|
declare const openaiCompletionProviderOptions: z.ZodObject<{
|
|
118
|
-
/**
|
|
119
|
-
Echo back the prompt in addition to the completion.
|
|
120
|
-
*/
|
|
121
53
|
echo: z.ZodOptional<z.ZodBoolean>;
|
|
122
|
-
/**
|
|
123
|
-
Modify the likelihood of specified tokens appearing in the completion.
|
|
124
|
-
|
|
125
|
-
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
126
|
-
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
127
|
-
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
128
|
-
the bias is added to the logits generated by the model prior to sampling.
|
|
129
|
-
The exact effect will vary per model, but values between -1 and 1 should
|
|
130
|
-
decrease or increase likelihood of selection; values like -100 or 100
|
|
131
|
-
should result in a ban or exclusive selection of the relevant token.
|
|
132
|
-
|
|
133
|
-
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
134
|
-
token from being generated.
|
|
135
|
-
*/
|
|
136
54
|
logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
|
|
137
|
-
/**
|
|
138
|
-
The suffix that comes after a completion of inserted text.
|
|
139
|
-
*/
|
|
140
55
|
suffix: z.ZodOptional<z.ZodString>;
|
|
141
|
-
/**
|
|
142
|
-
A unique identifier representing your end-user, which can help OpenAI to
|
|
143
|
-
monitor and detect abuse. Learn more.
|
|
144
|
-
*/
|
|
145
56
|
user: z.ZodOptional<z.ZodString>;
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
the response size and can slow down response times. However, it can
|
|
149
|
-
be useful to better understand how the model is behaving.
|
|
150
|
-
Setting to true will return the log probabilities of the tokens that
|
|
151
|
-
were generated.
|
|
152
|
-
Setting to a number will return the log probabilities of the top n
|
|
153
|
-
tokens that were generated.
|
|
154
|
-
*/
|
|
155
|
-
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
156
|
-
}, "strip", z.ZodTypeAny, {
|
|
157
|
-
user?: string | undefined;
|
|
158
|
-
logitBias?: Record<string, number> | undefined;
|
|
159
|
-
logprobs?: number | boolean | undefined;
|
|
160
|
-
echo?: boolean | undefined;
|
|
161
|
-
suffix?: string | undefined;
|
|
162
|
-
}, {
|
|
163
|
-
user?: string | undefined;
|
|
164
|
-
logitBias?: Record<string, number> | undefined;
|
|
165
|
-
logprobs?: number | boolean | undefined;
|
|
166
|
-
echo?: boolean | undefined;
|
|
167
|
-
suffix?: string | undefined;
|
|
168
|
-
}>;
|
|
57
|
+
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
58
|
+
}, z.core.$strip>;
|
|
169
59
|
type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>;
|
|
170
60
|
|
|
171
61
|
type OpenAICompletionConfig = {
|
|
@@ -203,23 +93,9 @@ type OpenAIConfig = {
|
|
|
203
93
|
|
|
204
94
|
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
|
|
205
95
|
declare const openaiEmbeddingProviderOptions: z.ZodObject<{
|
|
206
|
-
/**
|
|
207
|
-
The number of dimensions the resulting output embeddings should have.
|
|
208
|
-
Only supported in text-embedding-3 and later models.
|
|
209
|
-
*/
|
|
210
96
|
dimensions: z.ZodOptional<z.ZodNumber>;
|
|
211
|
-
/**
|
|
212
|
-
A unique identifier representing your end-user, which can help OpenAI to
|
|
213
|
-
monitor and detect abuse. Learn more.
|
|
214
|
-
*/
|
|
215
97
|
user: z.ZodOptional<z.ZodString>;
|
|
216
|
-
},
|
|
217
|
-
user?: string | undefined;
|
|
218
|
-
dimensions?: number | undefined;
|
|
219
|
-
}, {
|
|
220
|
-
user?: string | undefined;
|
|
221
|
-
dimensions?: number | undefined;
|
|
222
|
-
}>;
|
|
98
|
+
}, z.core.$strip>;
|
|
223
99
|
type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOptions>;
|
|
224
100
|
|
|
225
101
|
declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
|
|
@@ -254,41 +130,15 @@ declare class OpenAIImageModel implements ImageModelV2 {
|
|
|
254
130
|
|
|
255
131
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
256
132
|
declare const openAITranscriptionProviderOptions: z.ZodObject<{
|
|
257
|
-
|
|
258
|
-
* Additional information to include in the transcription response.
|
|
259
|
-
*/
|
|
260
|
-
include: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
|
|
261
|
-
/**
|
|
262
|
-
* The language of the input audio in ISO-639-1 format.
|
|
263
|
-
*/
|
|
133
|
+
include: z.ZodOptional<z.ZodArray<z.ZodString>>;
|
|
264
134
|
language: z.ZodOptional<z.ZodString>;
|
|
265
|
-
/**
|
|
266
|
-
* An optional text to guide the model's style or continue a previous audio segment.
|
|
267
|
-
*/
|
|
268
135
|
prompt: z.ZodOptional<z.ZodString>;
|
|
269
|
-
/**
|
|
270
|
-
* The sampling temperature, between 0 and 1.
|
|
271
|
-
* @default 0
|
|
272
|
-
*/
|
|
273
136
|
temperature: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
}, "strip", z.ZodTypeAny, {
|
|
280
|
-
prompt?: string | undefined;
|
|
281
|
-
temperature?: number | undefined;
|
|
282
|
-
include?: string[] | undefined;
|
|
283
|
-
language?: string | undefined;
|
|
284
|
-
timestampGranularities?: ("word" | "segment")[] | undefined;
|
|
285
|
-
}, {
|
|
286
|
-
prompt?: string | undefined;
|
|
287
|
-
temperature?: number | undefined;
|
|
288
|
-
include?: string[] | undefined;
|
|
289
|
-
language?: string | undefined;
|
|
290
|
-
timestampGranularities?: ("word" | "segment")[] | undefined;
|
|
291
|
-
}>;
|
|
137
|
+
timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<{
|
|
138
|
+
word: "word";
|
|
139
|
+
segment: "segment";
|
|
140
|
+
}>>>>;
|
|
141
|
+
}, z.core.$strip>;
|
|
292
142
|
type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
|
|
293
143
|
|
|
294
144
|
type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV2CallOptions, 'providerOptions'> & {
|
|
@@ -316,13 +166,7 @@ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string &
|
|
|
316
166
|
declare const OpenAIProviderOptionsSchema: z.ZodObject<{
|
|
317
167
|
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
318
168
|
speed: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
|
|
319
|
-
},
|
|
320
|
-
instructions?: string | null | undefined;
|
|
321
|
-
speed?: number | null | undefined;
|
|
322
|
-
}, {
|
|
323
|
-
instructions?: string | null | undefined;
|
|
324
|
-
speed?: number | null | undefined;
|
|
325
|
-
}>;
|
|
169
|
+
}, z.core.$strip>;
|
|
326
170
|
type OpenAISpeechCallOptions = z.infer<typeof OpenAIProviderOptionsSchema>;
|
|
327
171
|
interface OpenAISpeechModelConfig extends OpenAIConfig {
|
|
328
172
|
_internal?: {
|
|
@@ -362,30 +206,11 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
362
206
|
strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
363
207
|
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
364
208
|
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
365
|
-
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
store?: boolean | null | undefined;
|
|
371
|
-
metadata?: any;
|
|
372
|
-
serviceTier?: "auto" | "flex" | null | undefined;
|
|
373
|
-
instructions?: string | null | undefined;
|
|
374
|
-
previousResponseId?: string | null | undefined;
|
|
375
|
-
strictSchemas?: boolean | null | undefined;
|
|
376
|
-
reasoningSummary?: string | null | undefined;
|
|
377
|
-
}, {
|
|
378
|
-
user?: string | null | undefined;
|
|
379
|
-
parallelToolCalls?: boolean | null | undefined;
|
|
380
|
-
reasoningEffort?: string | null | undefined;
|
|
381
|
-
store?: boolean | null | undefined;
|
|
382
|
-
metadata?: any;
|
|
383
|
-
serviceTier?: "auto" | "flex" | null | undefined;
|
|
384
|
-
instructions?: string | null | undefined;
|
|
385
|
-
previousResponseId?: string | null | undefined;
|
|
386
|
-
strictSchemas?: boolean | null | undefined;
|
|
387
|
-
reasoningSummary?: string | null | undefined;
|
|
388
|
-
}>;
|
|
209
|
+
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
210
|
+
auto: "auto";
|
|
211
|
+
flex: "flex";
|
|
212
|
+
}>>>;
|
|
213
|
+
}, z.core.$strip>;
|
|
389
214
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
390
215
|
|
|
391
216
|
export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
|
package/dist/internal/index.d.ts
CHANGED
|
@@ -1,93 +1,28 @@
|
|
|
1
1
|
import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
|
-
import { z } from 'zod';
|
|
3
|
+
import { z } from 'zod/v4';
|
|
4
4
|
|
|
5
5
|
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
|
|
6
6
|
declare const openaiProviderOptions: z.ZodObject<{
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
*
|
|
10
|
-
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
11
|
-
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
12
|
-
*/
|
|
13
|
-
logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
|
|
14
|
-
/**
|
|
15
|
-
* Return the log probabilities of the tokens.
|
|
16
|
-
*
|
|
17
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
18
|
-
* were generated.
|
|
19
|
-
*
|
|
20
|
-
* Setting to a number will return the log probabilities of the top n
|
|
21
|
-
* tokens that were generated.
|
|
22
|
-
*/
|
|
23
|
-
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
24
|
-
/**
|
|
25
|
-
* Whether to enable parallel function calling during tool use. Default to true.
|
|
26
|
-
*/
|
|
7
|
+
logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
|
|
8
|
+
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
27
9
|
parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
|
|
28
|
-
/**
|
|
29
|
-
* A unique identifier representing your end-user, which can help OpenAI to
|
|
30
|
-
* monitor and detect abuse.
|
|
31
|
-
*/
|
|
32
10
|
user: z.ZodOptional<z.ZodString>;
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
39
|
-
*/
|
|
11
|
+
reasoningEffort: z.ZodOptional<z.ZodEnum<{
|
|
12
|
+
low: "low";
|
|
13
|
+
medium: "medium";
|
|
14
|
+
high: "high";
|
|
15
|
+
}>>;
|
|
40
16
|
maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
|
|
41
|
-
/**
|
|
42
|
-
* Whether to enable persistence in responses API.
|
|
43
|
-
*/
|
|
44
17
|
store: z.ZodOptional<z.ZodBoolean>;
|
|
45
|
-
/**
|
|
46
|
-
* Metadata to associate with the request.
|
|
47
|
-
*/
|
|
48
18
|
metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
|
|
49
|
-
/**
|
|
50
|
-
* Parameters for prediction mode.
|
|
51
|
-
*/
|
|
52
19
|
prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
|
|
53
|
-
/**
|
|
54
|
-
* Whether to use structured outputs.
|
|
55
|
-
*
|
|
56
|
-
* @default true
|
|
57
|
-
*/
|
|
58
20
|
structuredOutputs: z.ZodOptional<z.ZodBoolean>;
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
*/
|
|
65
|
-
serviceTier: z.ZodOptional<z.ZodEnum<["auto", "flex"]>>;
|
|
66
|
-
}, "strip", z.ZodTypeAny, {
|
|
67
|
-
user?: string | undefined;
|
|
68
|
-
logitBias?: Record<number, number> | undefined;
|
|
69
|
-
logprobs?: number | boolean | undefined;
|
|
70
|
-
parallelToolCalls?: boolean | undefined;
|
|
71
|
-
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
72
|
-
maxCompletionTokens?: number | undefined;
|
|
73
|
-
store?: boolean | undefined;
|
|
74
|
-
metadata?: Record<string, string> | undefined;
|
|
75
|
-
prediction?: Record<string, any> | undefined;
|
|
76
|
-
structuredOutputs?: boolean | undefined;
|
|
77
|
-
serviceTier?: "auto" | "flex" | undefined;
|
|
78
|
-
}, {
|
|
79
|
-
user?: string | undefined;
|
|
80
|
-
logitBias?: Record<number, number> | undefined;
|
|
81
|
-
logprobs?: number | boolean | undefined;
|
|
82
|
-
parallelToolCalls?: boolean | undefined;
|
|
83
|
-
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
84
|
-
maxCompletionTokens?: number | undefined;
|
|
85
|
-
store?: boolean | undefined;
|
|
86
|
-
metadata?: Record<string, string> | undefined;
|
|
87
|
-
prediction?: Record<string, any> | undefined;
|
|
88
|
-
structuredOutputs?: boolean | undefined;
|
|
89
|
-
serviceTier?: "auto" | "flex" | undefined;
|
|
90
|
-
}>;
|
|
21
|
+
serviceTier: z.ZodOptional<z.ZodEnum<{
|
|
22
|
+
auto: "auto";
|
|
23
|
+
flex: "flex";
|
|
24
|
+
}>>;
|
|
25
|
+
}, z.core.$strip>;
|
|
91
26
|
type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
|
|
92
27
|
|
|
93
28
|
type OpenAIChatConfig = {
|
|
@@ -115,57 +50,12 @@ declare class OpenAIChatLanguageModel implements LanguageModelV2 {
|
|
|
115
50
|
|
|
116
51
|
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
117
52
|
declare const openaiCompletionProviderOptions: z.ZodObject<{
|
|
118
|
-
/**
|
|
119
|
-
Echo back the prompt in addition to the completion.
|
|
120
|
-
*/
|
|
121
53
|
echo: z.ZodOptional<z.ZodBoolean>;
|
|
122
|
-
/**
|
|
123
|
-
Modify the likelihood of specified tokens appearing in the completion.
|
|
124
|
-
|
|
125
|
-
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
126
|
-
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
127
|
-
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
128
|
-
the bias is added to the logits generated by the model prior to sampling.
|
|
129
|
-
The exact effect will vary per model, but values between -1 and 1 should
|
|
130
|
-
decrease or increase likelihood of selection; values like -100 or 100
|
|
131
|
-
should result in a ban or exclusive selection of the relevant token.
|
|
132
|
-
|
|
133
|
-
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
134
|
-
token from being generated.
|
|
135
|
-
*/
|
|
136
54
|
logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
|
|
137
|
-
/**
|
|
138
|
-
The suffix that comes after a completion of inserted text.
|
|
139
|
-
*/
|
|
140
55
|
suffix: z.ZodOptional<z.ZodString>;
|
|
141
|
-
/**
|
|
142
|
-
A unique identifier representing your end-user, which can help OpenAI to
|
|
143
|
-
monitor and detect abuse. Learn more.
|
|
144
|
-
*/
|
|
145
56
|
user: z.ZodOptional<z.ZodString>;
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
the response size and can slow down response times. However, it can
|
|
149
|
-
be useful to better understand how the model is behaving.
|
|
150
|
-
Setting to true will return the log probabilities of the tokens that
|
|
151
|
-
were generated.
|
|
152
|
-
Setting to a number will return the log probabilities of the top n
|
|
153
|
-
tokens that were generated.
|
|
154
|
-
*/
|
|
155
|
-
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
156
|
-
}, "strip", z.ZodTypeAny, {
|
|
157
|
-
user?: string | undefined;
|
|
158
|
-
logitBias?: Record<string, number> | undefined;
|
|
159
|
-
logprobs?: number | boolean | undefined;
|
|
160
|
-
echo?: boolean | undefined;
|
|
161
|
-
suffix?: string | undefined;
|
|
162
|
-
}, {
|
|
163
|
-
user?: string | undefined;
|
|
164
|
-
logitBias?: Record<string, number> | undefined;
|
|
165
|
-
logprobs?: number | boolean | undefined;
|
|
166
|
-
echo?: boolean | undefined;
|
|
167
|
-
suffix?: string | undefined;
|
|
168
|
-
}>;
|
|
57
|
+
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
58
|
+
}, z.core.$strip>;
|
|
169
59
|
type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>;
|
|
170
60
|
|
|
171
61
|
type OpenAICompletionConfig = {
|
|
@@ -203,23 +93,9 @@ type OpenAIConfig = {
|
|
|
203
93
|
|
|
204
94
|
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
|
|
205
95
|
declare const openaiEmbeddingProviderOptions: z.ZodObject<{
|
|
206
|
-
/**
|
|
207
|
-
The number of dimensions the resulting output embeddings should have.
|
|
208
|
-
Only supported in text-embedding-3 and later models.
|
|
209
|
-
*/
|
|
210
96
|
dimensions: z.ZodOptional<z.ZodNumber>;
|
|
211
|
-
/**
|
|
212
|
-
A unique identifier representing your end-user, which can help OpenAI to
|
|
213
|
-
monitor and detect abuse. Learn more.
|
|
214
|
-
*/
|
|
215
97
|
user: z.ZodOptional<z.ZodString>;
|
|
216
|
-
},
|
|
217
|
-
user?: string | undefined;
|
|
218
|
-
dimensions?: number | undefined;
|
|
219
|
-
}, {
|
|
220
|
-
user?: string | undefined;
|
|
221
|
-
dimensions?: number | undefined;
|
|
222
|
-
}>;
|
|
98
|
+
}, z.core.$strip>;
|
|
223
99
|
type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOptions>;
|
|
224
100
|
|
|
225
101
|
declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
|
|
@@ -254,41 +130,15 @@ declare class OpenAIImageModel implements ImageModelV2 {
|
|
|
254
130
|
|
|
255
131
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
256
132
|
declare const openAITranscriptionProviderOptions: z.ZodObject<{
|
|
257
|
-
|
|
258
|
-
* Additional information to include in the transcription response.
|
|
259
|
-
*/
|
|
260
|
-
include: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
|
|
261
|
-
/**
|
|
262
|
-
* The language of the input audio in ISO-639-1 format.
|
|
263
|
-
*/
|
|
133
|
+
include: z.ZodOptional<z.ZodArray<z.ZodString>>;
|
|
264
134
|
language: z.ZodOptional<z.ZodString>;
|
|
265
|
-
/**
|
|
266
|
-
* An optional text to guide the model's style or continue a previous audio segment.
|
|
267
|
-
*/
|
|
268
135
|
prompt: z.ZodOptional<z.ZodString>;
|
|
269
|
-
/**
|
|
270
|
-
* The sampling temperature, between 0 and 1.
|
|
271
|
-
* @default 0
|
|
272
|
-
*/
|
|
273
136
|
temperature: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
}, "strip", z.ZodTypeAny, {
|
|
280
|
-
prompt?: string | undefined;
|
|
281
|
-
temperature?: number | undefined;
|
|
282
|
-
include?: string[] | undefined;
|
|
283
|
-
language?: string | undefined;
|
|
284
|
-
timestampGranularities?: ("word" | "segment")[] | undefined;
|
|
285
|
-
}, {
|
|
286
|
-
prompt?: string | undefined;
|
|
287
|
-
temperature?: number | undefined;
|
|
288
|
-
include?: string[] | undefined;
|
|
289
|
-
language?: string | undefined;
|
|
290
|
-
timestampGranularities?: ("word" | "segment")[] | undefined;
|
|
291
|
-
}>;
|
|
137
|
+
timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<{
|
|
138
|
+
word: "word";
|
|
139
|
+
segment: "segment";
|
|
140
|
+
}>>>>;
|
|
141
|
+
}, z.core.$strip>;
|
|
292
142
|
type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
|
|
293
143
|
|
|
294
144
|
type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV2CallOptions, 'providerOptions'> & {
|
|
@@ -316,13 +166,7 @@ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string &
|
|
|
316
166
|
declare const OpenAIProviderOptionsSchema: z.ZodObject<{
|
|
317
167
|
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
318
168
|
speed: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
|
|
319
|
-
},
|
|
320
|
-
instructions?: string | null | undefined;
|
|
321
|
-
speed?: number | null | undefined;
|
|
322
|
-
}, {
|
|
323
|
-
instructions?: string | null | undefined;
|
|
324
|
-
speed?: number | null | undefined;
|
|
325
|
-
}>;
|
|
169
|
+
}, z.core.$strip>;
|
|
326
170
|
type OpenAISpeechCallOptions = z.infer<typeof OpenAIProviderOptionsSchema>;
|
|
327
171
|
interface OpenAISpeechModelConfig extends OpenAIConfig {
|
|
328
172
|
_internal?: {
|
|
@@ -362,30 +206,11 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
362
206
|
strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
363
207
|
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
364
208
|
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
365
|
-
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
store?: boolean | null | undefined;
|
|
371
|
-
metadata?: any;
|
|
372
|
-
serviceTier?: "auto" | "flex" | null | undefined;
|
|
373
|
-
instructions?: string | null | undefined;
|
|
374
|
-
previousResponseId?: string | null | undefined;
|
|
375
|
-
strictSchemas?: boolean | null | undefined;
|
|
376
|
-
reasoningSummary?: string | null | undefined;
|
|
377
|
-
}, {
|
|
378
|
-
user?: string | null | undefined;
|
|
379
|
-
parallelToolCalls?: boolean | null | undefined;
|
|
380
|
-
reasoningEffort?: string | null | undefined;
|
|
381
|
-
store?: boolean | null | undefined;
|
|
382
|
-
metadata?: any;
|
|
383
|
-
serviceTier?: "auto" | "flex" | null | undefined;
|
|
384
|
-
instructions?: string | null | undefined;
|
|
385
|
-
previousResponseId?: string | null | undefined;
|
|
386
|
-
strictSchemas?: boolean | null | undefined;
|
|
387
|
-
reasoningSummary?: string | null | undefined;
|
|
388
|
-
}>;
|
|
209
|
+
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
210
|
+
auto: "auto";
|
|
211
|
+
flex: "flex";
|
|
212
|
+
}>>>;
|
|
213
|
+
}, z.core.$strip>;
|
|
389
214
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
390
215
|
|
|
391
216
|
export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
|