@ai-sdk/openai 2.0.0-canary.12 → 2.0.0-canary.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/index.d.mts +7 -25
- package/dist/index.d.ts +7 -25
- package/dist/index.js +178 -164
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +178 -164
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +25 -43
- package/dist/internal/index.d.ts +25 -43
- package/dist/internal/index.js +174 -158
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +173 -158
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
|
@@ -40,6 +40,12 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
40
40
|
* Parameters for prediction mode.
|
|
41
41
|
*/
|
|
42
42
|
prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
|
|
43
|
+
/**
|
|
44
|
+
* Whether to use structured outputs.
|
|
45
|
+
*
|
|
46
|
+
* @default true
|
|
47
|
+
*/
|
|
48
|
+
structuredOutputs: z.ZodOptional<z.ZodBoolean>;
|
|
43
49
|
}, "strip", z.ZodTypeAny, {
|
|
44
50
|
user?: string | undefined;
|
|
45
51
|
logitBias?: Record<number, number> | undefined;
|
|
@@ -49,6 +55,7 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
49
55
|
store?: boolean | undefined;
|
|
50
56
|
metadata?: Record<string, string> | undefined;
|
|
51
57
|
prediction?: Record<string, any> | undefined;
|
|
58
|
+
structuredOutputs?: boolean | undefined;
|
|
52
59
|
}, {
|
|
53
60
|
user?: string | undefined;
|
|
54
61
|
logitBias?: Record<number, number> | undefined;
|
|
@@ -58,16 +65,9 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
58
65
|
store?: boolean | undefined;
|
|
59
66
|
metadata?: Record<string, string> | undefined;
|
|
60
67
|
prediction?: Record<string, any> | undefined;
|
|
68
|
+
structuredOutputs?: boolean | undefined;
|
|
61
69
|
}>;
|
|
62
70
|
type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
|
|
63
|
-
interface OpenAIChatSettings {
|
|
64
|
-
/**
|
|
65
|
-
Whether to use structured outputs. Defaults to false.
|
|
66
|
-
|
|
67
|
-
When enabled, tool calls and object generation will be strict and follow the provided schema.
|
|
68
|
-
*/
|
|
69
|
-
structuredOutputs?: boolean;
|
|
70
|
-
}
|
|
71
71
|
|
|
72
72
|
type OpenAIChatConfig = {
|
|
73
73
|
provider: string;
|
|
@@ -82,9 +82,8 @@ type OpenAIChatConfig = {
|
|
|
82
82
|
declare class OpenAIChatLanguageModel implements LanguageModelV2 {
|
|
83
83
|
readonly specificationVersion = "v2";
|
|
84
84
|
readonly modelId: OpenAIChatModelId;
|
|
85
|
-
readonly settings: OpenAIChatSettings;
|
|
86
85
|
private readonly config;
|
|
87
|
-
constructor(modelId: OpenAIChatModelId,
|
|
86
|
+
constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
|
|
88
87
|
get provider(): string;
|
|
89
88
|
getSupportedUrls(): Promise<Record<string, RegExp[]>>;
|
|
90
89
|
private getArgs;
|
|
@@ -170,16 +169,6 @@ type OpenAIConfig = {
|
|
|
170
169
|
};
|
|
171
170
|
|
|
172
171
|
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
|
|
173
|
-
interface OpenAIEmbeddingSettings {
|
|
174
|
-
/**
|
|
175
|
-
Override the maximum number of embeddings per call.
|
|
176
|
-
*/
|
|
177
|
-
maxEmbeddingsPerCall?: number;
|
|
178
|
-
/**
|
|
179
|
-
Override the parallelism of embedding calls.
|
|
180
|
-
*/
|
|
181
|
-
supportsParallelCalls?: boolean;
|
|
182
|
-
}
|
|
183
172
|
declare const openaiEmbeddingProviderOptions: z.ZodObject<{
|
|
184
173
|
/**
|
|
185
174
|
The number of dimensions the resulting output embeddings should have.
|
|
@@ -203,16 +192,15 @@ type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOpti
|
|
|
203
192
|
declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
|
|
204
193
|
readonly specificationVersion = "v2";
|
|
205
194
|
readonly modelId: OpenAIEmbeddingModelId;
|
|
195
|
+
readonly maxEmbeddingsPerCall = 2048;
|
|
196
|
+
readonly supportsParallelCalls = true;
|
|
206
197
|
private readonly config;
|
|
207
|
-
private readonly settings;
|
|
208
198
|
get provider(): string;
|
|
209
|
-
|
|
210
|
-
get supportsParallelCalls(): boolean;
|
|
211
|
-
constructor(modelId: OpenAIEmbeddingModelId, settings: OpenAIEmbeddingSettings, config: OpenAIConfig);
|
|
199
|
+
constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
|
|
212
200
|
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>>;
|
|
213
201
|
}
|
|
214
202
|
|
|
215
|
-
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
203
|
+
type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
216
204
|
declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
|
|
217
205
|
declare const hasDefaultResponseFormat: Set<string>;
|
|
218
206
|
interface OpenAIImageSettings {
|
|
@@ -240,43 +228,35 @@ declare class OpenAIImageModel implements ImageModelV2 {
|
|
|
240
228
|
}
|
|
241
229
|
|
|
242
230
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
243
|
-
|
|
231
|
+
declare const openAITranscriptionProviderOptions: z.ZodObject<{
|
|
244
232
|
/**
|
|
245
233
|
* Additional information to include in the transcription response.
|
|
246
234
|
*/
|
|
247
|
-
include
|
|
235
|
+
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
|
|
248
236
|
/**
|
|
249
237
|
* The language of the input audio in ISO-639-1 format.
|
|
250
238
|
*/
|
|
251
|
-
language
|
|
239
|
+
language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
252
240
|
/**
|
|
253
241
|
* An optional text to guide the model's style or continue a previous audio segment.
|
|
254
242
|
*/
|
|
255
|
-
prompt
|
|
243
|
+
prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
256
244
|
/**
|
|
257
245
|
* The sampling temperature, between 0 and 1.
|
|
258
246
|
* @default 0
|
|
259
247
|
*/
|
|
260
|
-
temperature
|
|
248
|
+
temperature: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
|
|
261
249
|
/**
|
|
262
250
|
* The timestamp granularities to populate for this transcription.
|
|
263
251
|
* @default ['segment']
|
|
264
252
|
*/
|
|
265
|
-
|
|
266
|
-
};
|
|
267
|
-
|
|
268
|
-
declare const openAIProviderOptionsSchema: z.ZodObject<{
|
|
269
|
-
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
|
|
270
|
-
language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
271
|
-
prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
272
|
-
temperature: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodNumber>>>;
|
|
273
|
-
timestampGranularities: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
|
|
253
|
+
timestampGranularities: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
|
|
274
254
|
}, "strip", z.ZodTypeAny, {
|
|
275
|
-
temperature: number | null;
|
|
276
|
-
timestampGranularities: ("word" | "segment")[] | null;
|
|
277
255
|
prompt?: string | null | undefined;
|
|
256
|
+
temperature?: number | null | undefined;
|
|
278
257
|
include?: string[] | null | undefined;
|
|
279
258
|
language?: string | null | undefined;
|
|
259
|
+
timestampGranularities?: ("word" | "segment")[] | null | undefined;
|
|
280
260
|
}, {
|
|
281
261
|
prompt?: string | null | undefined;
|
|
282
262
|
temperature?: number | null | undefined;
|
|
@@ -284,9 +264,11 @@ declare const openAIProviderOptionsSchema: z.ZodObject<{
|
|
|
284
264
|
language?: string | null | undefined;
|
|
285
265
|
timestampGranularities?: ("word" | "segment")[] | null | undefined;
|
|
286
266
|
}>;
|
|
267
|
+
type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
|
|
268
|
+
|
|
287
269
|
type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV1CallOptions, 'providerOptions'> & {
|
|
288
270
|
providerOptions?: {
|
|
289
|
-
openai?:
|
|
271
|
+
openai?: OpenAITranscriptionProviderOptions;
|
|
290
272
|
};
|
|
291
273
|
};
|
|
292
274
|
interface OpenAITranscriptionModelConfig extends OpenAIConfig {
|
|
@@ -378,4 +360,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
378
360
|
}>;
|
|
379
361
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
380
362
|
|
|
381
|
-
export { OpenAIChatLanguageModel, type OpenAIChatModelId,
|
|
363
|
+
export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
|
package/dist/internal/index.d.ts
CHANGED
|
@@ -40,6 +40,12 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
40
40
|
* Parameters for prediction mode.
|
|
41
41
|
*/
|
|
42
42
|
prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
|
|
43
|
+
/**
|
|
44
|
+
* Whether to use structured outputs.
|
|
45
|
+
*
|
|
46
|
+
* @default true
|
|
47
|
+
*/
|
|
48
|
+
structuredOutputs: z.ZodOptional<z.ZodBoolean>;
|
|
43
49
|
}, "strip", z.ZodTypeAny, {
|
|
44
50
|
user?: string | undefined;
|
|
45
51
|
logitBias?: Record<number, number> | undefined;
|
|
@@ -49,6 +55,7 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
49
55
|
store?: boolean | undefined;
|
|
50
56
|
metadata?: Record<string, string> | undefined;
|
|
51
57
|
prediction?: Record<string, any> | undefined;
|
|
58
|
+
structuredOutputs?: boolean | undefined;
|
|
52
59
|
}, {
|
|
53
60
|
user?: string | undefined;
|
|
54
61
|
logitBias?: Record<number, number> | undefined;
|
|
@@ -58,16 +65,9 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
58
65
|
store?: boolean | undefined;
|
|
59
66
|
metadata?: Record<string, string> | undefined;
|
|
60
67
|
prediction?: Record<string, any> | undefined;
|
|
68
|
+
structuredOutputs?: boolean | undefined;
|
|
61
69
|
}>;
|
|
62
70
|
type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
|
|
63
|
-
interface OpenAIChatSettings {
|
|
64
|
-
/**
|
|
65
|
-
Whether to use structured outputs. Defaults to false.
|
|
66
|
-
|
|
67
|
-
When enabled, tool calls and object generation will be strict and follow the provided schema.
|
|
68
|
-
*/
|
|
69
|
-
structuredOutputs?: boolean;
|
|
70
|
-
}
|
|
71
71
|
|
|
72
72
|
type OpenAIChatConfig = {
|
|
73
73
|
provider: string;
|
|
@@ -82,9 +82,8 @@ type OpenAIChatConfig = {
|
|
|
82
82
|
declare class OpenAIChatLanguageModel implements LanguageModelV2 {
|
|
83
83
|
readonly specificationVersion = "v2";
|
|
84
84
|
readonly modelId: OpenAIChatModelId;
|
|
85
|
-
readonly settings: OpenAIChatSettings;
|
|
86
85
|
private readonly config;
|
|
87
|
-
constructor(modelId: OpenAIChatModelId,
|
|
86
|
+
constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
|
|
88
87
|
get provider(): string;
|
|
89
88
|
getSupportedUrls(): Promise<Record<string, RegExp[]>>;
|
|
90
89
|
private getArgs;
|
|
@@ -170,16 +169,6 @@ type OpenAIConfig = {
|
|
|
170
169
|
};
|
|
171
170
|
|
|
172
171
|
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
|
|
173
|
-
interface OpenAIEmbeddingSettings {
|
|
174
|
-
/**
|
|
175
|
-
Override the maximum number of embeddings per call.
|
|
176
|
-
*/
|
|
177
|
-
maxEmbeddingsPerCall?: number;
|
|
178
|
-
/**
|
|
179
|
-
Override the parallelism of embedding calls.
|
|
180
|
-
*/
|
|
181
|
-
supportsParallelCalls?: boolean;
|
|
182
|
-
}
|
|
183
172
|
declare const openaiEmbeddingProviderOptions: z.ZodObject<{
|
|
184
173
|
/**
|
|
185
174
|
The number of dimensions the resulting output embeddings should have.
|
|
@@ -203,16 +192,15 @@ type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOpti
|
|
|
203
192
|
declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
|
|
204
193
|
readonly specificationVersion = "v2";
|
|
205
194
|
readonly modelId: OpenAIEmbeddingModelId;
|
|
195
|
+
readonly maxEmbeddingsPerCall = 2048;
|
|
196
|
+
readonly supportsParallelCalls = true;
|
|
206
197
|
private readonly config;
|
|
207
|
-
private readonly settings;
|
|
208
198
|
get provider(): string;
|
|
209
|
-
|
|
210
|
-
get supportsParallelCalls(): boolean;
|
|
211
|
-
constructor(modelId: OpenAIEmbeddingModelId, settings: OpenAIEmbeddingSettings, config: OpenAIConfig);
|
|
199
|
+
constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
|
|
212
200
|
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>>;
|
|
213
201
|
}
|
|
214
202
|
|
|
215
|
-
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
203
|
+
type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
216
204
|
declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
|
|
217
205
|
declare const hasDefaultResponseFormat: Set<string>;
|
|
218
206
|
interface OpenAIImageSettings {
|
|
@@ -240,43 +228,35 @@ declare class OpenAIImageModel implements ImageModelV2 {
|
|
|
240
228
|
}
|
|
241
229
|
|
|
242
230
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
243
|
-
|
|
231
|
+
declare const openAITranscriptionProviderOptions: z.ZodObject<{
|
|
244
232
|
/**
|
|
245
233
|
* Additional information to include in the transcription response.
|
|
246
234
|
*/
|
|
247
|
-
include
|
|
235
|
+
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
|
|
248
236
|
/**
|
|
249
237
|
* The language of the input audio in ISO-639-1 format.
|
|
250
238
|
*/
|
|
251
|
-
language
|
|
239
|
+
language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
252
240
|
/**
|
|
253
241
|
* An optional text to guide the model's style or continue a previous audio segment.
|
|
254
242
|
*/
|
|
255
|
-
prompt
|
|
243
|
+
prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
256
244
|
/**
|
|
257
245
|
* The sampling temperature, between 0 and 1.
|
|
258
246
|
* @default 0
|
|
259
247
|
*/
|
|
260
|
-
temperature
|
|
248
|
+
temperature: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
|
|
261
249
|
/**
|
|
262
250
|
* The timestamp granularities to populate for this transcription.
|
|
263
251
|
* @default ['segment']
|
|
264
252
|
*/
|
|
265
|
-
|
|
266
|
-
};
|
|
267
|
-
|
|
268
|
-
declare const openAIProviderOptionsSchema: z.ZodObject<{
|
|
269
|
-
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
|
|
270
|
-
language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
271
|
-
prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
272
|
-
temperature: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodNumber>>>;
|
|
273
|
-
timestampGranularities: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
|
|
253
|
+
timestampGranularities: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
|
|
274
254
|
}, "strip", z.ZodTypeAny, {
|
|
275
|
-
temperature: number | null;
|
|
276
|
-
timestampGranularities: ("word" | "segment")[] | null;
|
|
277
255
|
prompt?: string | null | undefined;
|
|
256
|
+
temperature?: number | null | undefined;
|
|
278
257
|
include?: string[] | null | undefined;
|
|
279
258
|
language?: string | null | undefined;
|
|
259
|
+
timestampGranularities?: ("word" | "segment")[] | null | undefined;
|
|
280
260
|
}, {
|
|
281
261
|
prompt?: string | null | undefined;
|
|
282
262
|
temperature?: number | null | undefined;
|
|
@@ -284,9 +264,11 @@ declare const openAIProviderOptionsSchema: z.ZodObject<{
|
|
|
284
264
|
language?: string | null | undefined;
|
|
285
265
|
timestampGranularities?: ("word" | "segment")[] | null | undefined;
|
|
286
266
|
}>;
|
|
267
|
+
type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
|
|
268
|
+
|
|
287
269
|
type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV1CallOptions, 'providerOptions'> & {
|
|
288
270
|
providerOptions?: {
|
|
289
|
-
openai?:
|
|
271
|
+
openai?: OpenAITranscriptionProviderOptions;
|
|
290
272
|
};
|
|
291
273
|
};
|
|
292
274
|
interface OpenAITranscriptionModelConfig extends OpenAIConfig {
|
|
@@ -378,4 +360,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
378
360
|
}>;
|
|
379
361
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
380
362
|
|
|
381
|
-
export { OpenAIChatLanguageModel, type OpenAIChatModelId,
|
|
363
|
+
export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
|