@ai-sdk/openai 3.0.0-beta.17 → 3.0.0-beta.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +10 -0
- package/dist/index.d.mts +38 -65
- package/dist/index.d.ts +38 -65
- package/dist/index.js +1339 -1033
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1293 -942
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +101 -183
- package/dist/internal/index.d.ts +101 -183
- package/dist/internal/index.js +1336 -1028
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +1305 -953
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/internal/index.d.ts
CHANGED
|
@@ -1,40 +1,26 @@
|
|
|
1
1
|
import { LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV3CallOptions, TranscriptionModelV3, SpeechModelV3 } from '@ai-sdk/provider';
|
|
2
2
|
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
|
|
3
|
-
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
4
|
-
import { z } from 'zod/v4';
|
|
3
|
+
import { InferValidator, FetchFunction } from '@ai-sdk/provider-utils';
|
|
5
4
|
|
|
6
5
|
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
|
|
7
|
-
declare const openaiChatLanguageModelOptions:
|
|
8
|
-
logitBias
|
|
9
|
-
logprobs
|
|
10
|
-
parallelToolCalls
|
|
11
|
-
user
|
|
12
|
-
reasoningEffort
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
flex: "flex";
|
|
26
|
-
priority: "priority";
|
|
27
|
-
}>>;
|
|
28
|
-
strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
|
|
29
|
-
textVerbosity: z.ZodOptional<z.ZodEnum<{
|
|
30
|
-
low: "low";
|
|
31
|
-
medium: "medium";
|
|
32
|
-
high: "high";
|
|
33
|
-
}>>;
|
|
34
|
-
promptCacheKey: z.ZodOptional<z.ZodString>;
|
|
35
|
-
safetyIdentifier: z.ZodOptional<z.ZodString>;
|
|
36
|
-
}, z.core.$strip>;
|
|
37
|
-
type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
|
|
6
|
+
declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidator<{
|
|
7
|
+
logitBias?: Record<number, number> | undefined;
|
|
8
|
+
logprobs?: number | boolean | undefined;
|
|
9
|
+
parallelToolCalls?: boolean | undefined;
|
|
10
|
+
user?: string | undefined;
|
|
11
|
+
reasoningEffort?: "minimal" | "low" | "medium" | "high" | undefined;
|
|
12
|
+
maxCompletionTokens?: number | undefined;
|
|
13
|
+
store?: boolean | undefined;
|
|
14
|
+
metadata?: Record<string, string> | undefined;
|
|
15
|
+
prediction?: Record<string, any> | undefined;
|
|
16
|
+
structuredOutputs?: boolean | undefined;
|
|
17
|
+
serviceTier?: "auto" | "flex" | "priority" | undefined;
|
|
18
|
+
strictJsonSchema?: boolean | undefined;
|
|
19
|
+
textVerbosity?: "low" | "medium" | "high" | undefined;
|
|
20
|
+
promptCacheKey?: string | undefined;
|
|
21
|
+
safetyIdentifier?: string | undefined;
|
|
22
|
+
}>;
|
|
23
|
+
type OpenAIChatLanguageModelOptions = InferValidator<typeof openaiChatLanguageModelOptions>;
|
|
38
24
|
|
|
39
25
|
type OpenAIChatConfig = {
|
|
40
26
|
provider: string;
|
|
@@ -60,14 +46,14 @@ declare class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
60
46
|
}
|
|
61
47
|
|
|
62
48
|
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
63
|
-
declare const openaiCompletionProviderOptions:
|
|
64
|
-
echo
|
|
65
|
-
logitBias
|
|
66
|
-
suffix
|
|
67
|
-
user
|
|
68
|
-
logprobs
|
|
69
|
-
}
|
|
70
|
-
type OpenAICompletionProviderOptions =
|
|
49
|
+
declare const openaiCompletionProviderOptions: _ai_sdk_provider_utils.LazyValidator<{
|
|
50
|
+
echo?: boolean | undefined;
|
|
51
|
+
logitBias?: Record<string, number> | undefined;
|
|
52
|
+
suffix?: string | undefined;
|
|
53
|
+
user?: string | undefined;
|
|
54
|
+
logprobs?: number | boolean | undefined;
|
|
55
|
+
}>;
|
|
56
|
+
type OpenAICompletionProviderOptions = InferValidator<typeof openaiCompletionProviderOptions>;
|
|
71
57
|
|
|
72
58
|
type OpenAICompletionConfig = {
|
|
73
59
|
provider: string;
|
|
@@ -112,11 +98,11 @@ type OpenAIConfig = {
|
|
|
112
98
|
};
|
|
113
99
|
|
|
114
100
|
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
|
|
115
|
-
declare const openaiEmbeddingProviderOptions:
|
|
116
|
-
dimensions
|
|
117
|
-
user
|
|
118
|
-
}
|
|
119
|
-
type OpenAIEmbeddingProviderOptions =
|
|
101
|
+
declare const openaiEmbeddingProviderOptions: _ai_sdk_provider_utils.LazyValidator<{
|
|
102
|
+
dimensions?: number | undefined;
|
|
103
|
+
user?: string | undefined;
|
|
104
|
+
}>;
|
|
105
|
+
type OpenAIEmbeddingProviderOptions = InferValidator<typeof openaiEmbeddingProviderOptions>;
|
|
120
106
|
|
|
121
107
|
declare class OpenAIEmbeddingModel implements EmbeddingModelV3<string> {
|
|
122
108
|
readonly specificationVersion = "v3";
|
|
@@ -149,17 +135,14 @@ declare class OpenAIImageModel implements ImageModelV3 {
|
|
|
149
135
|
}
|
|
150
136
|
|
|
151
137
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
152
|
-
declare const openAITranscriptionProviderOptions:
|
|
153
|
-
include
|
|
154
|
-
language
|
|
155
|
-
prompt
|
|
156
|
-
temperature
|
|
157
|
-
timestampGranularities
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
}>>>>;
|
|
161
|
-
}, z.core.$strip>;
|
|
162
|
-
type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
|
|
138
|
+
declare const openAITranscriptionProviderOptions: _ai_sdk_provider_utils.LazyValidator<{
|
|
139
|
+
include?: string[] | undefined;
|
|
140
|
+
language?: string | undefined;
|
|
141
|
+
prompt?: string | undefined;
|
|
142
|
+
temperature?: number | undefined;
|
|
143
|
+
timestampGranularities?: ("word" | "segment")[] | undefined;
|
|
144
|
+
}>;
|
|
145
|
+
type OpenAITranscriptionProviderOptions = InferValidator<typeof openAITranscriptionProviderOptions>;
|
|
163
146
|
|
|
164
147
|
type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV3CallOptions, 'providerOptions'> & {
|
|
165
148
|
providerOptions?: {
|
|
@@ -182,12 +165,12 @@ declare class OpenAITranscriptionModel implements TranscriptionModelV3 {
|
|
|
182
165
|
}
|
|
183
166
|
|
|
184
167
|
type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
|
|
168
|
+
declare const openaiSpeechProviderOptionsSchema: _ai_sdk_provider_utils.LazyValidator<{
|
|
169
|
+
instructions?: string | null | undefined;
|
|
170
|
+
speed?: number | null | undefined;
|
|
171
|
+
}>;
|
|
172
|
+
type OpenAISpeechCallOptions = InferValidator<typeof openaiSpeechProviderOptionsSchema>;
|
|
185
173
|
|
|
186
|
-
declare const OpenAIProviderOptionsSchema: z.ZodObject<{
|
|
187
|
-
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
188
|
-
speed: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
|
|
189
|
-
}, z.core.$strip>;
|
|
190
|
-
type OpenAISpeechCallOptions = z.infer<typeof OpenAIProviderOptionsSchema>;
|
|
191
174
|
interface OpenAISpeechModelConfig extends OpenAIConfig {
|
|
192
175
|
_internal?: {
|
|
193
176
|
currentDate?: () => Date;
|
|
@@ -216,56 +199,25 @@ declare class OpenAIResponsesLanguageModel implements LanguageModelV3 {
|
|
|
216
199
|
doGenerate(options: Parameters<LanguageModelV3['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV3['doGenerate']>>>;
|
|
217
200
|
doStream(options: Parameters<LanguageModelV3['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV3['doStream']>>>;
|
|
218
201
|
}
|
|
219
|
-
declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
220
|
-
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
221
|
-
"file_search_call.results": "file_search_call.results";
|
|
222
|
-
"message.output_text.logprobs": "message.output_text.logprobs";
|
|
223
|
-
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
224
|
-
}>>>>;
|
|
225
|
-
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
226
|
-
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
227
|
-
maxToolCalls: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
228
|
-
metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
|
|
229
|
-
parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
230
|
-
previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
231
|
-
promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
232
|
-
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
233
|
-
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
234
|
-
safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
235
|
-
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
236
|
-
auto: "auto";
|
|
237
|
-
flex: "flex";
|
|
238
|
-
priority: "priority";
|
|
239
|
-
}>>>;
|
|
240
|
-
store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
241
|
-
strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
242
|
-
textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
243
|
-
low: "low";
|
|
244
|
-
medium: "medium";
|
|
245
|
-
high: "high";
|
|
246
|
-
}>>>;
|
|
247
|
-
user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
248
|
-
}, z.core.$strip>;
|
|
249
|
-
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
250
202
|
|
|
251
|
-
declare const codeInterpreterInputSchema:
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
}
|
|
255
|
-
declare const codeInterpreterOutputSchema:
|
|
256
|
-
outputs
|
|
257
|
-
type:
|
|
258
|
-
logs:
|
|
259
|
-
}
|
|
260
|
-
type:
|
|
261
|
-
url:
|
|
262
|
-
}
|
|
263
|
-
}
|
|
264
|
-
declare const codeInterpreterArgsSchema:
|
|
265
|
-
container
|
|
266
|
-
fileIds
|
|
267
|
-
}
|
|
268
|
-
}
|
|
203
|
+
declare const codeInterpreterInputSchema: _ai_sdk_provider_utils.LazySchema<{
|
|
204
|
+
containerId: string;
|
|
205
|
+
code?: string | null | undefined;
|
|
206
|
+
}>;
|
|
207
|
+
declare const codeInterpreterOutputSchema: _ai_sdk_provider_utils.LazySchema<{
|
|
208
|
+
outputs?: ({
|
|
209
|
+
type: "logs";
|
|
210
|
+
logs: string;
|
|
211
|
+
} | {
|
|
212
|
+
type: "image";
|
|
213
|
+
url: string;
|
|
214
|
+
})[] | null | undefined;
|
|
215
|
+
}>;
|
|
216
|
+
declare const codeInterpreterArgsSchema: _ai_sdk_provider_utils.LazySchema<{
|
|
217
|
+
container?: string | {
|
|
218
|
+
fileIds?: string[] | undefined;
|
|
219
|
+
} | undefined;
|
|
220
|
+
}>;
|
|
269
221
|
type CodeInterpreterArgs = {
|
|
270
222
|
/**
|
|
271
223
|
* The code interpreter container.
|
|
@@ -364,36 +316,25 @@ type OpenAIResponsesFileSearchToolCompoundFilter = {
|
|
|
364
316
|
filters: Array<OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter>;
|
|
365
317
|
};
|
|
366
318
|
|
|
367
|
-
declare const fileSearchArgsSchema:
|
|
368
|
-
vectorStoreIds:
|
|
369
|
-
maxNumResults
|
|
370
|
-
ranking
|
|
371
|
-
ranker
|
|
372
|
-
scoreThreshold
|
|
373
|
-
}
|
|
374
|
-
filters
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
}, z.core.$strip>;
|
|
387
|
-
declare const fileSearchOutputSchema: z.ZodObject<{
|
|
388
|
-
queries: z.ZodArray<z.ZodString>;
|
|
389
|
-
results: z.ZodNullable<z.ZodArray<z.ZodObject<{
|
|
390
|
-
attributes: z.ZodRecord<z.ZodString, z.ZodUnknown>;
|
|
391
|
-
fileId: z.ZodString;
|
|
392
|
-
filename: z.ZodString;
|
|
393
|
-
score: z.ZodNumber;
|
|
394
|
-
text: z.ZodString;
|
|
395
|
-
}, z.core.$strip>>>;
|
|
396
|
-
}, z.core.$strip>;
|
|
319
|
+
declare const fileSearchArgsSchema: _ai_sdk_provider_utils.LazySchema<{
|
|
320
|
+
vectorStoreIds: string[];
|
|
321
|
+
maxNumResults?: number | undefined;
|
|
322
|
+
ranking?: {
|
|
323
|
+
ranker?: string | undefined;
|
|
324
|
+
scoreThreshold?: number | undefined;
|
|
325
|
+
} | undefined;
|
|
326
|
+
filters?: any;
|
|
327
|
+
}>;
|
|
328
|
+
declare const fileSearchOutputSchema: _ai_sdk_provider_utils.LazySchema<{
|
|
329
|
+
queries: string[];
|
|
330
|
+
results: {
|
|
331
|
+
attributes: Record<string, unknown>;
|
|
332
|
+
fileId: string;
|
|
333
|
+
filename: string;
|
|
334
|
+
score: number;
|
|
335
|
+
text: string;
|
|
336
|
+
}[] | null;
|
|
337
|
+
}>;
|
|
397
338
|
declare const fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{}, {
|
|
398
339
|
/**
|
|
399
340
|
* The search query to execute.
|
|
@@ -458,47 +399,24 @@ declare const fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactoryWithO
|
|
|
458
399
|
filters?: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter;
|
|
459
400
|
}>;
|
|
460
401
|
|
|
461
|
-
declare const imageGenerationArgsSchema:
|
|
462
|
-
background
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
outputCompression: z.ZodOptional<z.ZodNumber>;
|
|
480
|
-
outputFormat: z.ZodOptional<z.ZodEnum<{
|
|
481
|
-
png: "png";
|
|
482
|
-
jpeg: "jpeg";
|
|
483
|
-
webp: "webp";
|
|
484
|
-
}>>;
|
|
485
|
-
partialImages: z.ZodOptional<z.ZodNumber>;
|
|
486
|
-
quality: z.ZodOptional<z.ZodEnum<{
|
|
487
|
-
low: "low";
|
|
488
|
-
medium: "medium";
|
|
489
|
-
high: "high";
|
|
490
|
-
auto: "auto";
|
|
491
|
-
}>>;
|
|
492
|
-
size: z.ZodOptional<z.ZodEnum<{
|
|
493
|
-
auto: "auto";
|
|
494
|
-
"1024x1024": "1024x1024";
|
|
495
|
-
"1024x1536": "1024x1536";
|
|
496
|
-
"1536x1024": "1536x1024";
|
|
497
|
-
}>>;
|
|
498
|
-
}, z.core.$strict>;
|
|
499
|
-
declare const imageGenerationOutputSchema: z.ZodObject<{
|
|
500
|
-
result: z.ZodString;
|
|
501
|
-
}, z.core.$strip>;
|
|
402
|
+
declare const imageGenerationArgsSchema: _ai_sdk_provider_utils.LazySchema<{
|
|
403
|
+
background?: "auto" | "opaque" | "transparent" | undefined;
|
|
404
|
+
inputFidelity?: "low" | "high" | undefined;
|
|
405
|
+
inputImageMask?: {
|
|
406
|
+
fileId?: string | undefined;
|
|
407
|
+
imageUrl?: string | undefined;
|
|
408
|
+
} | undefined;
|
|
409
|
+
model?: string | undefined;
|
|
410
|
+
moderation?: "auto" | undefined;
|
|
411
|
+
outputCompression?: number | undefined;
|
|
412
|
+
outputFormat?: "png" | "jpeg" | "webp" | undefined;
|
|
413
|
+
partialImages?: number | undefined;
|
|
414
|
+
quality?: "auto" | "low" | "medium" | "high" | undefined;
|
|
415
|
+
size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024" | undefined;
|
|
416
|
+
}>;
|
|
417
|
+
declare const imageGenerationOutputSchema: _ai_sdk_provider_utils.LazySchema<{
|
|
418
|
+
result: string;
|
|
419
|
+
}>;
|
|
502
420
|
type ImageGenerationArgs = {
|
|
503
421
|
/**
|
|
504
422
|
* Background type for the generated image. Default is 'auto'.
|
|
@@ -562,4 +480,4 @@ declare const imageGeneration: (args?: ImageGenerationArgs) => _ai_sdk_provider_
|
|
|
562
480
|
result: string;
|
|
563
481
|
}>;
|
|
564
482
|
|
|
565
|
-
export { OpenAIChatLanguageModel, type OpenAIChatLanguageModelOptions, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, OpenAIResponsesLanguageModel, type
|
|
483
|
+
export { OpenAIChatLanguageModel, type OpenAIChatLanguageModelOptions, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, OpenAIResponsesLanguageModel, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, codeInterpreter, codeInterpreterArgsSchema, codeInterpreterInputSchema, codeInterpreterOutputSchema, codeInterpreterToolFactory, fileSearch, fileSearchArgsSchema, fileSearchOutputSchema, hasDefaultResponseFormat, imageGeneration, imageGenerationArgsSchema, imageGenerationOutputSchema, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiChatLanguageModelOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiSpeechProviderOptionsSchema };
|