@ai-sdk/openai 2.0.0-canary.9 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +654 -0
- package/README.md +2 -2
- package/dist/index.d.mts +83 -175
- package/dist/index.d.ts +83 -175
- package/dist/index.js +1320 -625
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1272 -573
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +84 -252
- package/dist/internal/index.d.ts +84 -252
- package/dist/internal/index.js +1312 -582
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +1263 -533
- package/dist/internal/index.mjs.map +1 -1
- package/internal.d.ts +1 -0
- package/package.json +8 -7
|
@@ -1,97 +1,34 @@
|
|
|
1
|
-
import { LanguageModelV2, EmbeddingModelV2,
|
|
1
|
+
import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
|
-
import { z } from 'zod';
|
|
3
|
+
import { z } from 'zod/v4';
|
|
4
4
|
|
|
5
5
|
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
|
|
6
6
|
declare const openaiProviderOptions: z.ZodObject<{
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
*
|
|
10
|
-
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
11
|
-
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
12
|
-
*/
|
|
13
|
-
logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
|
|
14
|
-
/**
|
|
15
|
-
* Return the log probabilities of the tokens.
|
|
16
|
-
*
|
|
17
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
18
|
-
* were generated.
|
|
19
|
-
*
|
|
20
|
-
* Setting to a number will return the log probabilities of the top n
|
|
21
|
-
* tokens that were generated.
|
|
22
|
-
*/
|
|
23
|
-
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
24
|
-
/**
|
|
25
|
-
* Whether to enable parallel function calling during tool use. Default to true.
|
|
26
|
-
*/
|
|
7
|
+
logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
|
|
8
|
+
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
27
9
|
parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
|
|
28
|
-
/**
|
|
29
|
-
* A unique identifier representing your end-user, which can help OpenAI to
|
|
30
|
-
* monitor and detect abuse.
|
|
31
|
-
*/
|
|
32
10
|
user: z.ZodOptional<z.ZodString>;
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
39
|
-
*/
|
|
11
|
+
reasoningEffort: z.ZodOptional<z.ZodEnum<{
|
|
12
|
+
low: "low";
|
|
13
|
+
medium: "medium";
|
|
14
|
+
high: "high";
|
|
15
|
+
}>>;
|
|
40
16
|
maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
|
|
41
|
-
/**
|
|
42
|
-
* Whether to enable persistence in responses API.
|
|
43
|
-
*/
|
|
44
17
|
store: z.ZodOptional<z.ZodBoolean>;
|
|
45
|
-
/**
|
|
46
|
-
* Metadata to associate with the request.
|
|
47
|
-
*/
|
|
48
18
|
metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
|
|
49
|
-
/**
|
|
50
|
-
* Parameters for prediction mode.
|
|
51
|
-
*/
|
|
52
19
|
prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
metadata?: Record<string, string> | undefined;
|
|
62
|
-
prediction?: Record<string, any> | undefined;
|
|
63
|
-
}, {
|
|
64
|
-
user?: string | undefined;
|
|
65
|
-
logitBias?: Record<number, number> | undefined;
|
|
66
|
-
logprobs?: number | boolean | undefined;
|
|
67
|
-
parallelToolCalls?: boolean | undefined;
|
|
68
|
-
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
69
|
-
maxCompletionTokens?: number | undefined;
|
|
70
|
-
store?: boolean | undefined;
|
|
71
|
-
metadata?: Record<string, string> | undefined;
|
|
72
|
-
prediction?: Record<string, any> | undefined;
|
|
73
|
-
}>;
|
|
20
|
+
structuredOutputs: z.ZodOptional<z.ZodBoolean>;
|
|
21
|
+
serviceTier: z.ZodOptional<z.ZodEnum<{
|
|
22
|
+
auto: "auto";
|
|
23
|
+
flex: "flex";
|
|
24
|
+
priority: "priority";
|
|
25
|
+
}>>;
|
|
26
|
+
strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
|
|
27
|
+
}, z.core.$strip>;
|
|
74
28
|
type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
|
|
75
|
-
interface OpenAIChatSettings {
|
|
76
|
-
/**
|
|
77
|
-
Whether to use structured outputs. Defaults to false.
|
|
78
|
-
|
|
79
|
-
When enabled, tool calls and object generation will be strict and follow the provided schema.
|
|
80
|
-
*/
|
|
81
|
-
structuredOutputs?: boolean;
|
|
82
|
-
/**
|
|
83
|
-
Automatically download images and pass the image as data to the model.
|
|
84
|
-
OpenAI supports image URLs for public models, so this is only needed for
|
|
85
|
-
private models or when the images are not publicly accessible.
|
|
86
|
-
|
|
87
|
-
Defaults to `false`.
|
|
88
|
-
*/
|
|
89
|
-
downloadImages?: boolean;
|
|
90
|
-
}
|
|
91
29
|
|
|
92
30
|
type OpenAIChatConfig = {
|
|
93
31
|
provider: string;
|
|
94
|
-
compatibility: 'strict' | 'compatible';
|
|
95
32
|
headers: () => Record<string, string | undefined>;
|
|
96
33
|
url: (options: {
|
|
97
34
|
modelId: string;
|
|
@@ -102,65 +39,29 @@ type OpenAIChatConfig = {
|
|
|
102
39
|
declare class OpenAIChatLanguageModel implements LanguageModelV2 {
|
|
103
40
|
readonly specificationVersion = "v2";
|
|
104
41
|
readonly modelId: OpenAIChatModelId;
|
|
105
|
-
readonly
|
|
42
|
+
readonly supportedUrls: {
|
|
43
|
+
'image/*': RegExp[];
|
|
44
|
+
};
|
|
106
45
|
private readonly config;
|
|
107
|
-
constructor(modelId: OpenAIChatModelId,
|
|
108
|
-
get supportsStructuredOutputs(): boolean;
|
|
109
|
-
get defaultObjectGenerationMode(): "tool" | "json";
|
|
46
|
+
constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
|
|
110
47
|
get provider(): string;
|
|
111
|
-
get supportsImageUrls(): boolean;
|
|
112
48
|
private getArgs;
|
|
113
49
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
114
50
|
doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
115
51
|
}
|
|
116
52
|
|
|
117
53
|
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
127
|
-
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
128
|
-
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
129
|
-
the bias is added to the logits generated by the model prior to sampling.
|
|
130
|
-
The exact effect will vary per model, but values between -1 and 1 should
|
|
131
|
-
decrease or increase likelihood of selection; values like -100 or 100
|
|
132
|
-
should result in a ban or exclusive selection of the relevant token.
|
|
133
|
-
|
|
134
|
-
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
135
|
-
token from being generated.
|
|
136
|
-
*/
|
|
137
|
-
logitBias?: Record<number, number>;
|
|
138
|
-
/**
|
|
139
|
-
Return the log probabilities of the tokens. Including logprobs will increase
|
|
140
|
-
the response size and can slow down response times. However, it can
|
|
141
|
-
be useful to better understand how the model is behaving.
|
|
142
|
-
|
|
143
|
-
Setting to true will return the log probabilities of the tokens that
|
|
144
|
-
were generated.
|
|
145
|
-
|
|
146
|
-
Setting to a number will return the log probabilities of the top n
|
|
147
|
-
tokens that were generated.
|
|
148
|
-
*/
|
|
149
|
-
logprobs?: boolean | number;
|
|
150
|
-
/**
|
|
151
|
-
The suffix that comes after a completion of inserted text.
|
|
152
|
-
*/
|
|
153
|
-
suffix?: string;
|
|
154
|
-
/**
|
|
155
|
-
A unique identifier representing your end-user, which can help OpenAI to
|
|
156
|
-
monitor and detect abuse. Learn more.
|
|
157
|
-
*/
|
|
158
|
-
user?: string;
|
|
159
|
-
}
|
|
54
|
+
declare const openaiCompletionProviderOptions: z.ZodObject<{
|
|
55
|
+
echo: z.ZodOptional<z.ZodBoolean>;
|
|
56
|
+
logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
|
|
57
|
+
suffix: z.ZodOptional<z.ZodString>;
|
|
58
|
+
user: z.ZodOptional<z.ZodString>;
|
|
59
|
+
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
60
|
+
}, z.core.$strip>;
|
|
61
|
+
type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>;
|
|
160
62
|
|
|
161
63
|
type OpenAICompletionConfig = {
|
|
162
64
|
provider: string;
|
|
163
|
-
compatibility: 'strict' | 'compatible';
|
|
164
65
|
headers: () => Record<string, string | undefined>;
|
|
165
66
|
url: (options: {
|
|
166
67
|
modelId: string;
|
|
@@ -170,12 +71,12 @@ type OpenAICompletionConfig = {
|
|
|
170
71
|
};
|
|
171
72
|
declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
|
|
172
73
|
readonly specificationVersion = "v2";
|
|
173
|
-
readonly defaultObjectGenerationMode: undefined;
|
|
174
74
|
readonly modelId: OpenAICompletionModelId;
|
|
175
|
-
readonly settings: OpenAICompletionSettings;
|
|
176
75
|
private readonly config;
|
|
177
|
-
|
|
76
|
+
private get providerOptionsName();
|
|
77
|
+
constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
|
|
178
78
|
get provider(): string;
|
|
79
|
+
readonly supportedUrls: Record<string, RegExp[]>;
|
|
179
80
|
private getArgs;
|
|
180
81
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
181
82
|
doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
@@ -193,113 +94,58 @@ type OpenAIConfig = {
|
|
|
193
94
|
};
|
|
194
95
|
|
|
195
96
|
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
/**
|
|
202
|
-
Override the parallelism of embedding calls.
|
|
203
|
-
*/
|
|
204
|
-
supportsParallelCalls?: boolean;
|
|
205
|
-
/**
|
|
206
|
-
The number of dimensions the resulting output embeddings should have.
|
|
207
|
-
Only supported in text-embedding-3 and later models.
|
|
208
|
-
*/
|
|
209
|
-
dimensions?: number;
|
|
210
|
-
/**
|
|
211
|
-
A unique identifier representing your end-user, which can help OpenAI to
|
|
212
|
-
monitor and detect abuse. Learn more.
|
|
213
|
-
*/
|
|
214
|
-
user?: string;
|
|
215
|
-
}
|
|
97
|
+
declare const openaiEmbeddingProviderOptions: z.ZodObject<{
|
|
98
|
+
dimensions: z.ZodOptional<z.ZodNumber>;
|
|
99
|
+
user: z.ZodOptional<z.ZodString>;
|
|
100
|
+
}, z.core.$strip>;
|
|
101
|
+
type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOptions>;
|
|
216
102
|
|
|
217
103
|
declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
|
|
218
104
|
readonly specificationVersion = "v2";
|
|
219
105
|
readonly modelId: OpenAIEmbeddingModelId;
|
|
106
|
+
readonly maxEmbeddingsPerCall = 2048;
|
|
107
|
+
readonly supportsParallelCalls = true;
|
|
220
108
|
private readonly config;
|
|
221
|
-
private readonly settings;
|
|
222
109
|
get provider(): string;
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
constructor(modelId: OpenAIEmbeddingModelId, settings: OpenAIEmbeddingSettings, config: OpenAIConfig);
|
|
226
|
-
doEmbed({ values, headers, abortSignal, }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>>;
|
|
110
|
+
constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
|
|
111
|
+
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>>;
|
|
227
112
|
}
|
|
228
113
|
|
|
229
|
-
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
114
|
+
type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
230
115
|
declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
|
|
231
|
-
|
|
232
|
-
/**
|
|
233
|
-
Override the maximum number of images per call (default is dependent on the
|
|
234
|
-
model, or 1 for an unknown model).
|
|
235
|
-
*/
|
|
236
|
-
maxImagesPerCall?: number;
|
|
237
|
-
}
|
|
116
|
+
declare const hasDefaultResponseFormat: Set<string>;
|
|
238
117
|
|
|
239
118
|
interface OpenAIImageModelConfig extends OpenAIConfig {
|
|
240
119
|
_internal?: {
|
|
241
120
|
currentDate?: () => Date;
|
|
242
121
|
};
|
|
243
122
|
}
|
|
244
|
-
declare class OpenAIImageModel implements
|
|
123
|
+
declare class OpenAIImageModel implements ImageModelV2 {
|
|
245
124
|
readonly modelId: OpenAIImageModelId;
|
|
246
|
-
private readonly settings;
|
|
247
125
|
private readonly config;
|
|
248
|
-
readonly specificationVersion = "
|
|
126
|
+
readonly specificationVersion = "v2";
|
|
249
127
|
get maxImagesPerCall(): number;
|
|
250
128
|
get provider(): string;
|
|
251
|
-
constructor(modelId: OpenAIImageModelId,
|
|
252
|
-
doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<
|
|
129
|
+
constructor(modelId: OpenAIImageModelId, config: OpenAIImageModelConfig);
|
|
130
|
+
doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV2['doGenerate']>>>;
|
|
253
131
|
}
|
|
254
132
|
|
|
255
133
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
/**
|
|
270
|
-
* The sampling temperature, between 0 and 1.
|
|
271
|
-
* @default 0
|
|
272
|
-
*/
|
|
273
|
-
temperature?: number;
|
|
274
|
-
/**
|
|
275
|
-
* The timestamp granularities to populate for this transcription.
|
|
276
|
-
* @default ['segment']
|
|
277
|
-
*/
|
|
278
|
-
timestamp_granularities?: Array<'word' | 'segment'>;
|
|
279
|
-
};
|
|
280
|
-
|
|
281
|
-
declare const openAIProviderOptionsSchema: z.ZodObject<{
|
|
282
|
-
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
|
|
283
|
-
language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
284
|
-
prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
285
|
-
temperature: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodNumber>>>;
|
|
286
|
-
timestampGranularities: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
|
|
287
|
-
}, "strip", z.ZodTypeAny, {
|
|
288
|
-
temperature: number | null;
|
|
289
|
-
timestampGranularities: ("word" | "segment")[] | null;
|
|
290
|
-
prompt?: string | null | undefined;
|
|
291
|
-
include?: string[] | null | undefined;
|
|
292
|
-
language?: string | null | undefined;
|
|
293
|
-
}, {
|
|
294
|
-
prompt?: string | null | undefined;
|
|
295
|
-
temperature?: number | null | undefined;
|
|
296
|
-
include?: string[] | null | undefined;
|
|
297
|
-
language?: string | null | undefined;
|
|
298
|
-
timestampGranularities?: ("word" | "segment")[] | null | undefined;
|
|
299
|
-
}>;
|
|
300
|
-
type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV1CallOptions, 'providerOptions'> & {
|
|
134
|
+
declare const openAITranscriptionProviderOptions: z.ZodObject<{
|
|
135
|
+
include: z.ZodOptional<z.ZodArray<z.ZodString>>;
|
|
136
|
+
language: z.ZodOptional<z.ZodString>;
|
|
137
|
+
prompt: z.ZodOptional<z.ZodString>;
|
|
138
|
+
temperature: z.ZodOptional<z.ZodDefault<z.ZodNumber>>;
|
|
139
|
+
timestampGranularities: z.ZodOptional<z.ZodDefault<z.ZodArray<z.ZodEnum<{
|
|
140
|
+
word: "word";
|
|
141
|
+
segment: "segment";
|
|
142
|
+
}>>>>;
|
|
143
|
+
}, z.core.$strip>;
|
|
144
|
+
type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
|
|
145
|
+
|
|
146
|
+
type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV2CallOptions, 'providerOptions'> & {
|
|
301
147
|
providerOptions?: {
|
|
302
|
-
openai?:
|
|
148
|
+
openai?: OpenAITranscriptionProviderOptions;
|
|
303
149
|
};
|
|
304
150
|
};
|
|
305
151
|
interface OpenAITranscriptionModelConfig extends OpenAIConfig {
|
|
@@ -307,14 +153,14 @@ interface OpenAITranscriptionModelConfig extends OpenAIConfig {
|
|
|
307
153
|
currentDate?: () => Date;
|
|
308
154
|
};
|
|
309
155
|
}
|
|
310
|
-
declare class OpenAITranscriptionModel implements
|
|
156
|
+
declare class OpenAITranscriptionModel implements TranscriptionModelV2 {
|
|
311
157
|
readonly modelId: OpenAITranscriptionModelId;
|
|
312
158
|
private readonly config;
|
|
313
|
-
readonly specificationVersion = "
|
|
159
|
+
readonly specificationVersion = "v2";
|
|
314
160
|
get provider(): string;
|
|
315
161
|
constructor(modelId: OpenAITranscriptionModelId, config: OpenAITranscriptionModelConfig);
|
|
316
162
|
private getArgs;
|
|
317
|
-
doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<
|
|
163
|
+
doGenerate(options: OpenAITranscriptionCallOptions): Promise<Awaited<ReturnType<TranscriptionModelV2['doGenerate']>>>;
|
|
318
164
|
}
|
|
319
165
|
|
|
320
166
|
type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
|
|
@@ -322,38 +168,32 @@ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string &
|
|
|
322
168
|
declare const OpenAIProviderOptionsSchema: z.ZodObject<{
|
|
323
169
|
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
324
170
|
speed: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
|
|
325
|
-
},
|
|
326
|
-
instructions?: string | null | undefined;
|
|
327
|
-
speed?: number | null | undefined;
|
|
328
|
-
}, {
|
|
329
|
-
instructions?: string | null | undefined;
|
|
330
|
-
speed?: number | null | undefined;
|
|
331
|
-
}>;
|
|
171
|
+
}, z.core.$strip>;
|
|
332
172
|
type OpenAISpeechCallOptions = z.infer<typeof OpenAIProviderOptionsSchema>;
|
|
333
173
|
interface OpenAISpeechModelConfig extends OpenAIConfig {
|
|
334
174
|
_internal?: {
|
|
335
175
|
currentDate?: () => Date;
|
|
336
176
|
};
|
|
337
177
|
}
|
|
338
|
-
declare class OpenAISpeechModel implements
|
|
178
|
+
declare class OpenAISpeechModel implements SpeechModelV2 {
|
|
339
179
|
readonly modelId: OpenAISpeechModelId;
|
|
340
180
|
private readonly config;
|
|
341
|
-
readonly specificationVersion = "
|
|
181
|
+
readonly specificationVersion = "v2";
|
|
342
182
|
get provider(): string;
|
|
343
183
|
constructor(modelId: OpenAISpeechModelId, config: OpenAISpeechModelConfig);
|
|
344
184
|
private getArgs;
|
|
345
|
-
doGenerate(options: Parameters<
|
|
185
|
+
doGenerate(options: Parameters<SpeechModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV2['doGenerate']>>>;
|
|
346
186
|
}
|
|
347
187
|
|
|
348
|
-
|
|
188
|
+
declare const openaiResponsesModelIds: readonly ["gpt-4.1", "gpt-4.1-2025-04-14", "gpt-4.1-mini", "gpt-4.1-mini-2025-04-14", "gpt-4.1-nano", "gpt-4.1-nano-2025-04-14", "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20", "gpt-4o-audio-preview", "gpt-4o-audio-preview-2024-10-01", "gpt-4o-audio-preview-2024-12-17", "gpt-4o-search-preview", "gpt-4o-search-preview-2025-03-11", "gpt-4o-mini-search-preview", "gpt-4o-mini-search-preview-2025-03-11", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-0125-preview", "gpt-4-1106-preview", "gpt-4", "gpt-4-0613", "gpt-4.5-preview", "gpt-4.5-preview-2025-02-27", "gpt-3.5-turbo-0125", "gpt-3.5-turbo", "gpt-3.5-turbo-1106", "chatgpt-4o-latest", "o1", "o1-2024-12-17", "o1-mini", "o1-mini-2024-09-12", "o1-preview", "o1-preview-2024-09-12", "o3-mini", "o3-mini-2025-01-31", "o3", "o3-2025-04-16", "o4-mini", "o4-mini-2025-04-16", "codex-mini-latest", "computer-use-preview"];
|
|
189
|
+
type OpenAIResponsesModelId = (typeof openaiResponsesModelIds)[number] | (string & {});
|
|
349
190
|
|
|
350
191
|
declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|
351
192
|
readonly specificationVersion = "v2";
|
|
352
|
-
readonly defaultObjectGenerationMode = "json";
|
|
353
|
-
readonly supportsStructuredOutputs = true;
|
|
354
193
|
readonly modelId: OpenAIResponsesModelId;
|
|
355
194
|
private readonly config;
|
|
356
195
|
constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
|
|
196
|
+
readonly supportedUrls: Record<string, RegExp[]>;
|
|
357
197
|
get provider(): string;
|
|
358
198
|
private getArgs;
|
|
359
199
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
@@ -366,27 +206,19 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
366
206
|
store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
367
207
|
user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
368
208
|
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
369
|
-
|
|
209
|
+
strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
370
210
|
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
}
|
|
381
|
-
|
|
382
|
-
parallelToolCalls?: boolean | null | undefined;
|
|
383
|
-
reasoningEffort?: string | null | undefined;
|
|
384
|
-
store?: boolean | null | undefined;
|
|
385
|
-
metadata?: any;
|
|
386
|
-
instructions?: string | null | undefined;
|
|
387
|
-
previousResponseId?: string | null | undefined;
|
|
388
|
-
strictSchemas?: boolean | null | undefined;
|
|
389
|
-
}>;
|
|
211
|
+
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
212
|
+
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
213
|
+
auto: "auto";
|
|
214
|
+
flex: "flex";
|
|
215
|
+
priority: "priority";
|
|
216
|
+
}>>>;
|
|
217
|
+
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
218
|
+
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
219
|
+
"file_search_call.results": "file_search_call.results";
|
|
220
|
+
}>>>>;
|
|
221
|
+
}, z.core.$strip>;
|
|
390
222
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
391
223
|
|
|
392
|
-
export { OpenAIChatLanguageModel, type OpenAIChatModelId,
|
|
224
|
+
export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
|