@ai-sdk/openai 2.0.0-canary.12 → 2.0.0-canary.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +24 -0
- package/dist/index.d.mts +8 -26
- package/dist/index.d.ts +8 -26
- package/dist/index.js +200 -180
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +200 -180
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +31 -47
- package/dist/internal/index.d.ts +31 -47
- package/dist/internal/index.js +196 -174
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +195 -174
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
|
@@ -40,6 +40,12 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
40
40
|
* Parameters for prediction mode.
|
|
41
41
|
*/
|
|
42
42
|
prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
|
|
43
|
+
/**
|
|
44
|
+
* Whether to use structured outputs.
|
|
45
|
+
*
|
|
46
|
+
* @default true
|
|
47
|
+
*/
|
|
48
|
+
structuredOutputs: z.ZodOptional<z.ZodBoolean>;
|
|
43
49
|
}, "strip", z.ZodTypeAny, {
|
|
44
50
|
user?: string | undefined;
|
|
45
51
|
logitBias?: Record<number, number> | undefined;
|
|
@@ -49,6 +55,7 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
49
55
|
store?: boolean | undefined;
|
|
50
56
|
metadata?: Record<string, string> | undefined;
|
|
51
57
|
prediction?: Record<string, any> | undefined;
|
|
58
|
+
structuredOutputs?: boolean | undefined;
|
|
52
59
|
}, {
|
|
53
60
|
user?: string | undefined;
|
|
54
61
|
logitBias?: Record<number, number> | undefined;
|
|
@@ -58,16 +65,9 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
58
65
|
store?: boolean | undefined;
|
|
59
66
|
metadata?: Record<string, string> | undefined;
|
|
60
67
|
prediction?: Record<string, any> | undefined;
|
|
68
|
+
structuredOutputs?: boolean | undefined;
|
|
61
69
|
}>;
|
|
62
70
|
type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
|
|
63
|
-
interface OpenAIChatSettings {
|
|
64
|
-
/**
|
|
65
|
-
Whether to use structured outputs. Defaults to false.
|
|
66
|
-
|
|
67
|
-
When enabled, tool calls and object generation will be strict and follow the provided schema.
|
|
68
|
-
*/
|
|
69
|
-
structuredOutputs?: boolean;
|
|
70
|
-
}
|
|
71
71
|
|
|
72
72
|
type OpenAIChatConfig = {
|
|
73
73
|
provider: string;
|
|
@@ -82,11 +82,12 @@ type OpenAIChatConfig = {
|
|
|
82
82
|
declare class OpenAIChatLanguageModel implements LanguageModelV2 {
|
|
83
83
|
readonly specificationVersion = "v2";
|
|
84
84
|
readonly modelId: OpenAIChatModelId;
|
|
85
|
-
readonly
|
|
85
|
+
readonly supportedUrls: {
|
|
86
|
+
'image/*': RegExp[];
|
|
87
|
+
};
|
|
86
88
|
private readonly config;
|
|
87
|
-
constructor(modelId: OpenAIChatModelId,
|
|
89
|
+
constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
|
|
88
90
|
get provider(): string;
|
|
89
|
-
getSupportedUrls(): Promise<Record<string, RegExp[]>>;
|
|
90
91
|
private getArgs;
|
|
91
92
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
92
93
|
doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
@@ -152,7 +153,7 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
|
|
|
152
153
|
private get providerOptionsName();
|
|
153
154
|
constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
|
|
154
155
|
get provider(): string;
|
|
155
|
-
|
|
156
|
+
readonly supportedUrls: Record<string, RegExp[]>;
|
|
156
157
|
private getArgs;
|
|
157
158
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
158
159
|
doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
@@ -170,16 +171,6 @@ type OpenAIConfig = {
|
|
|
170
171
|
};
|
|
171
172
|
|
|
172
173
|
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
|
|
173
|
-
interface OpenAIEmbeddingSettings {
|
|
174
|
-
/**
|
|
175
|
-
Override the maximum number of embeddings per call.
|
|
176
|
-
*/
|
|
177
|
-
maxEmbeddingsPerCall?: number;
|
|
178
|
-
/**
|
|
179
|
-
Override the parallelism of embedding calls.
|
|
180
|
-
*/
|
|
181
|
-
supportsParallelCalls?: boolean;
|
|
182
|
-
}
|
|
183
174
|
declare const openaiEmbeddingProviderOptions: z.ZodObject<{
|
|
184
175
|
/**
|
|
185
176
|
The number of dimensions the resulting output embeddings should have.
|
|
@@ -203,16 +194,15 @@ type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOpti
|
|
|
203
194
|
declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
|
|
204
195
|
readonly specificationVersion = "v2";
|
|
205
196
|
readonly modelId: OpenAIEmbeddingModelId;
|
|
197
|
+
readonly maxEmbeddingsPerCall = 2048;
|
|
198
|
+
readonly supportsParallelCalls = true;
|
|
206
199
|
private readonly config;
|
|
207
|
-
private readonly settings;
|
|
208
200
|
get provider(): string;
|
|
209
|
-
|
|
210
|
-
get supportsParallelCalls(): boolean;
|
|
211
|
-
constructor(modelId: OpenAIEmbeddingModelId, settings: OpenAIEmbeddingSettings, config: OpenAIConfig);
|
|
201
|
+
constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
|
|
212
202
|
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>>;
|
|
213
203
|
}
|
|
214
204
|
|
|
215
|
-
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
205
|
+
type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
216
206
|
declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
|
|
217
207
|
declare const hasDefaultResponseFormat: Set<string>;
|
|
218
208
|
interface OpenAIImageSettings {
|
|
@@ -232,7 +222,7 @@ declare class OpenAIImageModel implements ImageModelV2 {
|
|
|
232
222
|
readonly modelId: OpenAIImageModelId;
|
|
233
223
|
private readonly settings;
|
|
234
224
|
private readonly config;
|
|
235
|
-
readonly specificationVersion = "
|
|
225
|
+
readonly specificationVersion = "v2";
|
|
236
226
|
get maxImagesPerCall(): number;
|
|
237
227
|
get provider(): string;
|
|
238
228
|
constructor(modelId: OpenAIImageModelId, settings: OpenAIImageSettings, config: OpenAIImageModelConfig);
|
|
@@ -240,43 +230,35 @@ declare class OpenAIImageModel implements ImageModelV2 {
|
|
|
240
230
|
}
|
|
241
231
|
|
|
242
232
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
243
|
-
|
|
233
|
+
declare const openAITranscriptionProviderOptions: z.ZodObject<{
|
|
244
234
|
/**
|
|
245
235
|
* Additional information to include in the transcription response.
|
|
246
236
|
*/
|
|
247
|
-
include
|
|
237
|
+
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
|
|
248
238
|
/**
|
|
249
239
|
* The language of the input audio in ISO-639-1 format.
|
|
250
240
|
*/
|
|
251
|
-
language
|
|
241
|
+
language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
252
242
|
/**
|
|
253
243
|
* An optional text to guide the model's style or continue a previous audio segment.
|
|
254
244
|
*/
|
|
255
|
-
prompt
|
|
245
|
+
prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
256
246
|
/**
|
|
257
247
|
* The sampling temperature, between 0 and 1.
|
|
258
248
|
* @default 0
|
|
259
249
|
*/
|
|
260
|
-
temperature
|
|
250
|
+
temperature: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
|
|
261
251
|
/**
|
|
262
252
|
* The timestamp granularities to populate for this transcription.
|
|
263
253
|
* @default ['segment']
|
|
264
254
|
*/
|
|
265
|
-
|
|
266
|
-
};
|
|
267
|
-
|
|
268
|
-
declare const openAIProviderOptionsSchema: z.ZodObject<{
|
|
269
|
-
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
|
|
270
|
-
language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
271
|
-
prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
272
|
-
temperature: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodNumber>>>;
|
|
273
|
-
timestampGranularities: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
|
|
255
|
+
timestampGranularities: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
|
|
274
256
|
}, "strip", z.ZodTypeAny, {
|
|
275
|
-
temperature: number | null;
|
|
276
|
-
timestampGranularities: ("word" | "segment")[] | null;
|
|
277
257
|
prompt?: string | null | undefined;
|
|
258
|
+
temperature?: number | null | undefined;
|
|
278
259
|
include?: string[] | null | undefined;
|
|
279
260
|
language?: string | null | undefined;
|
|
261
|
+
timestampGranularities?: ("word" | "segment")[] | null | undefined;
|
|
280
262
|
}, {
|
|
281
263
|
prompt?: string | null | undefined;
|
|
282
264
|
temperature?: number | null | undefined;
|
|
@@ -284,9 +266,11 @@ declare const openAIProviderOptionsSchema: z.ZodObject<{
|
|
|
284
266
|
language?: string | null | undefined;
|
|
285
267
|
timestampGranularities?: ("word" | "segment")[] | null | undefined;
|
|
286
268
|
}>;
|
|
269
|
+
type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
|
|
270
|
+
|
|
287
271
|
type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV1CallOptions, 'providerOptions'> & {
|
|
288
272
|
providerOptions?: {
|
|
289
|
-
openai?:
|
|
273
|
+
openai?: OpenAITranscriptionProviderOptions;
|
|
290
274
|
};
|
|
291
275
|
};
|
|
292
276
|
interface OpenAITranscriptionModelConfig extends OpenAIConfig {
|
|
@@ -339,7 +323,7 @@ declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|
|
339
323
|
readonly modelId: OpenAIResponsesModelId;
|
|
340
324
|
private readonly config;
|
|
341
325
|
constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
|
|
342
|
-
|
|
326
|
+
readonly supportedUrls: Record<string, RegExp[]>;
|
|
343
327
|
get provider(): string;
|
|
344
328
|
private getArgs;
|
|
345
329
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
@@ -378,4 +362,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
378
362
|
}>;
|
|
379
363
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
380
364
|
|
|
381
|
-
export { OpenAIChatLanguageModel, type OpenAIChatModelId,
|
|
365
|
+
export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
|
package/dist/internal/index.d.ts
CHANGED
|
@@ -40,6 +40,12 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
40
40
|
* Parameters for prediction mode.
|
|
41
41
|
*/
|
|
42
42
|
prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
|
|
43
|
+
/**
|
|
44
|
+
* Whether to use structured outputs.
|
|
45
|
+
*
|
|
46
|
+
* @default true
|
|
47
|
+
*/
|
|
48
|
+
structuredOutputs: z.ZodOptional<z.ZodBoolean>;
|
|
43
49
|
}, "strip", z.ZodTypeAny, {
|
|
44
50
|
user?: string | undefined;
|
|
45
51
|
logitBias?: Record<number, number> | undefined;
|
|
@@ -49,6 +55,7 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
49
55
|
store?: boolean | undefined;
|
|
50
56
|
metadata?: Record<string, string> | undefined;
|
|
51
57
|
prediction?: Record<string, any> | undefined;
|
|
58
|
+
structuredOutputs?: boolean | undefined;
|
|
52
59
|
}, {
|
|
53
60
|
user?: string | undefined;
|
|
54
61
|
logitBias?: Record<number, number> | undefined;
|
|
@@ -58,16 +65,9 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
58
65
|
store?: boolean | undefined;
|
|
59
66
|
metadata?: Record<string, string> | undefined;
|
|
60
67
|
prediction?: Record<string, any> | undefined;
|
|
68
|
+
structuredOutputs?: boolean | undefined;
|
|
61
69
|
}>;
|
|
62
70
|
type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
|
|
63
|
-
interface OpenAIChatSettings {
|
|
64
|
-
/**
|
|
65
|
-
Whether to use structured outputs. Defaults to false.
|
|
66
|
-
|
|
67
|
-
When enabled, tool calls and object generation will be strict and follow the provided schema.
|
|
68
|
-
*/
|
|
69
|
-
structuredOutputs?: boolean;
|
|
70
|
-
}
|
|
71
71
|
|
|
72
72
|
type OpenAIChatConfig = {
|
|
73
73
|
provider: string;
|
|
@@ -82,11 +82,12 @@ type OpenAIChatConfig = {
|
|
|
82
82
|
declare class OpenAIChatLanguageModel implements LanguageModelV2 {
|
|
83
83
|
readonly specificationVersion = "v2";
|
|
84
84
|
readonly modelId: OpenAIChatModelId;
|
|
85
|
-
readonly
|
|
85
|
+
readonly supportedUrls: {
|
|
86
|
+
'image/*': RegExp[];
|
|
87
|
+
};
|
|
86
88
|
private readonly config;
|
|
87
|
-
constructor(modelId: OpenAIChatModelId,
|
|
89
|
+
constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
|
|
88
90
|
get provider(): string;
|
|
89
|
-
getSupportedUrls(): Promise<Record<string, RegExp[]>>;
|
|
90
91
|
private getArgs;
|
|
91
92
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
92
93
|
doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
@@ -152,7 +153,7 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
|
|
|
152
153
|
private get providerOptionsName();
|
|
153
154
|
constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
|
|
154
155
|
get provider(): string;
|
|
155
|
-
|
|
156
|
+
readonly supportedUrls: Record<string, RegExp[]>;
|
|
156
157
|
private getArgs;
|
|
157
158
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
158
159
|
doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
@@ -170,16 +171,6 @@ type OpenAIConfig = {
|
|
|
170
171
|
};
|
|
171
172
|
|
|
172
173
|
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
|
|
173
|
-
interface OpenAIEmbeddingSettings {
|
|
174
|
-
/**
|
|
175
|
-
Override the maximum number of embeddings per call.
|
|
176
|
-
*/
|
|
177
|
-
maxEmbeddingsPerCall?: number;
|
|
178
|
-
/**
|
|
179
|
-
Override the parallelism of embedding calls.
|
|
180
|
-
*/
|
|
181
|
-
supportsParallelCalls?: boolean;
|
|
182
|
-
}
|
|
183
174
|
declare const openaiEmbeddingProviderOptions: z.ZodObject<{
|
|
184
175
|
/**
|
|
185
176
|
The number of dimensions the resulting output embeddings should have.
|
|
@@ -203,16 +194,15 @@ type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOpti
|
|
|
203
194
|
declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
|
|
204
195
|
readonly specificationVersion = "v2";
|
|
205
196
|
readonly modelId: OpenAIEmbeddingModelId;
|
|
197
|
+
readonly maxEmbeddingsPerCall = 2048;
|
|
198
|
+
readonly supportsParallelCalls = true;
|
|
206
199
|
private readonly config;
|
|
207
|
-
private readonly settings;
|
|
208
200
|
get provider(): string;
|
|
209
|
-
|
|
210
|
-
get supportsParallelCalls(): boolean;
|
|
211
|
-
constructor(modelId: OpenAIEmbeddingModelId, settings: OpenAIEmbeddingSettings, config: OpenAIConfig);
|
|
201
|
+
constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
|
|
212
202
|
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>>;
|
|
213
203
|
}
|
|
214
204
|
|
|
215
|
-
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
205
|
+
type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
216
206
|
declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
|
|
217
207
|
declare const hasDefaultResponseFormat: Set<string>;
|
|
218
208
|
interface OpenAIImageSettings {
|
|
@@ -232,7 +222,7 @@ declare class OpenAIImageModel implements ImageModelV2 {
|
|
|
232
222
|
readonly modelId: OpenAIImageModelId;
|
|
233
223
|
private readonly settings;
|
|
234
224
|
private readonly config;
|
|
235
|
-
readonly specificationVersion = "
|
|
225
|
+
readonly specificationVersion = "v2";
|
|
236
226
|
get maxImagesPerCall(): number;
|
|
237
227
|
get provider(): string;
|
|
238
228
|
constructor(modelId: OpenAIImageModelId, settings: OpenAIImageSettings, config: OpenAIImageModelConfig);
|
|
@@ -240,43 +230,35 @@ declare class OpenAIImageModel implements ImageModelV2 {
|
|
|
240
230
|
}
|
|
241
231
|
|
|
242
232
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
243
|
-
|
|
233
|
+
declare const openAITranscriptionProviderOptions: z.ZodObject<{
|
|
244
234
|
/**
|
|
245
235
|
* Additional information to include in the transcription response.
|
|
246
236
|
*/
|
|
247
|
-
include
|
|
237
|
+
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
|
|
248
238
|
/**
|
|
249
239
|
* The language of the input audio in ISO-639-1 format.
|
|
250
240
|
*/
|
|
251
|
-
language
|
|
241
|
+
language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
252
242
|
/**
|
|
253
243
|
* An optional text to guide the model's style or continue a previous audio segment.
|
|
254
244
|
*/
|
|
255
|
-
prompt
|
|
245
|
+
prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
256
246
|
/**
|
|
257
247
|
* The sampling temperature, between 0 and 1.
|
|
258
248
|
* @default 0
|
|
259
249
|
*/
|
|
260
|
-
temperature
|
|
250
|
+
temperature: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodNumber>>>;
|
|
261
251
|
/**
|
|
262
252
|
* The timestamp granularities to populate for this transcription.
|
|
263
253
|
* @default ['segment']
|
|
264
254
|
*/
|
|
265
|
-
|
|
266
|
-
};
|
|
267
|
-
|
|
268
|
-
declare const openAIProviderOptionsSchema: z.ZodObject<{
|
|
269
|
-
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodString, "many">>>;
|
|
270
|
-
language: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
271
|
-
prompt: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
272
|
-
temperature: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodNumber>>>;
|
|
273
|
-
timestampGranularities: z.ZodDefault<z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
|
|
255
|
+
timestampGranularities: z.ZodOptional<z.ZodNullable<z.ZodDefault<z.ZodArray<z.ZodEnum<["word", "segment"]>, "many">>>>;
|
|
274
256
|
}, "strip", z.ZodTypeAny, {
|
|
275
|
-
temperature: number | null;
|
|
276
|
-
timestampGranularities: ("word" | "segment")[] | null;
|
|
277
257
|
prompt?: string | null | undefined;
|
|
258
|
+
temperature?: number | null | undefined;
|
|
278
259
|
include?: string[] | null | undefined;
|
|
279
260
|
language?: string | null | undefined;
|
|
261
|
+
timestampGranularities?: ("word" | "segment")[] | null | undefined;
|
|
280
262
|
}, {
|
|
281
263
|
prompt?: string | null | undefined;
|
|
282
264
|
temperature?: number | null | undefined;
|
|
@@ -284,9 +266,11 @@ declare const openAIProviderOptionsSchema: z.ZodObject<{
|
|
|
284
266
|
language?: string | null | undefined;
|
|
285
267
|
timestampGranularities?: ("word" | "segment")[] | null | undefined;
|
|
286
268
|
}>;
|
|
269
|
+
type OpenAITranscriptionProviderOptions = z.infer<typeof openAITranscriptionProviderOptions>;
|
|
270
|
+
|
|
287
271
|
type OpenAITranscriptionCallOptions = Omit<TranscriptionModelV1CallOptions, 'providerOptions'> & {
|
|
288
272
|
providerOptions?: {
|
|
289
|
-
openai?:
|
|
273
|
+
openai?: OpenAITranscriptionProviderOptions;
|
|
290
274
|
};
|
|
291
275
|
};
|
|
292
276
|
interface OpenAITranscriptionModelConfig extends OpenAIConfig {
|
|
@@ -339,7 +323,7 @@ declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|
|
339
323
|
readonly modelId: OpenAIResponsesModelId;
|
|
340
324
|
private readonly config;
|
|
341
325
|
constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
|
|
342
|
-
|
|
326
|
+
readonly supportedUrls: Record<string, RegExp[]>;
|
|
343
327
|
get provider(): string;
|
|
344
328
|
private getArgs;
|
|
345
329
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
@@ -378,4 +362,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
378
362
|
}>;
|
|
379
363
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
380
364
|
|
|
381
|
-
export { OpenAIChatLanguageModel, type OpenAIChatModelId,
|
|
365
|
+
export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
|