@ai-sdk/openai 2.0.0-canary.10 → 2.0.0-canary.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +23 -0
- package/dist/index.d.mts +11 -50
- package/dist/index.d.ts +11 -50
- package/dist/index.js +273 -301
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +278 -305
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +30 -39
- package/dist/internal/index.d.ts +30 -39
- package/dist/internal/index.js +273 -294
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +276 -298
- package/dist/internal/index.mjs.map +1 -1
- package/internal.d.ts +1 -0
- package/package.json +5 -4
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { LanguageModelV2, EmbeddingModelV2,
|
|
1
|
+
import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV1CallOptions, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
3
|
import { z } from 'zod';
|
|
4
4
|
|
|
@@ -11,16 +11,6 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
11
11
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
12
12
|
*/
|
|
13
13
|
logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
|
|
14
|
-
/**
|
|
15
|
-
* Return the log probabilities of the tokens.
|
|
16
|
-
*
|
|
17
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
18
|
-
* were generated.
|
|
19
|
-
*
|
|
20
|
-
* Setting to a number will return the log probabilities of the top n
|
|
21
|
-
* tokens that were generated.
|
|
22
|
-
*/
|
|
23
|
-
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
24
14
|
/**
|
|
25
15
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
26
16
|
*/
|
|
@@ -53,7 +43,6 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
53
43
|
}, "strip", z.ZodTypeAny, {
|
|
54
44
|
user?: string | undefined;
|
|
55
45
|
logitBias?: Record<number, number> | undefined;
|
|
56
|
-
logprobs?: number | boolean | undefined;
|
|
57
46
|
parallelToolCalls?: boolean | undefined;
|
|
58
47
|
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
59
48
|
maxCompletionTokens?: number | undefined;
|
|
@@ -63,7 +52,6 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
63
52
|
}, {
|
|
64
53
|
user?: string | undefined;
|
|
65
54
|
logitBias?: Record<number, number> | undefined;
|
|
66
|
-
logprobs?: number | boolean | undefined;
|
|
67
55
|
parallelToolCalls?: boolean | undefined;
|
|
68
56
|
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
69
57
|
maxCompletionTokens?: number | undefined;
|
|
@@ -105,11 +93,11 @@ declare class OpenAIChatLanguageModel implements LanguageModelV2 {
|
|
|
105
93
|
}
|
|
106
94
|
|
|
107
95
|
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
108
|
-
|
|
96
|
+
declare const openaiCompletionProviderOptions: z.ZodObject<{
|
|
109
97
|
/**
|
|
110
98
|
Echo back the prompt in addition to the completion.
|
|
111
99
|
*/
|
|
112
|
-
echo
|
|
100
|
+
echo: z.ZodOptional<z.ZodBoolean>;
|
|
113
101
|
/**
|
|
114
102
|
Modify the likelihood of specified tokens appearing in the completion.
|
|
115
103
|
|
|
@@ -123,30 +111,29 @@ interface OpenAICompletionSettings {
|
|
|
123
111
|
|
|
124
112
|
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
125
113
|
token from being generated.
|
|
126
|
-
|
|
127
|
-
logitBias
|
|
128
|
-
/**
|
|
129
|
-
Return the log probabilities of the tokens. Including logprobs will increase
|
|
130
|
-
the response size and can slow down response times. However, it can
|
|
131
|
-
be useful to better understand how the model is behaving.
|
|
132
|
-
|
|
133
|
-
Setting to true will return the log probabilities of the tokens that
|
|
134
|
-
were generated.
|
|
135
|
-
|
|
136
|
-
Setting to a number will return the log probabilities of the top n
|
|
137
|
-
tokens that were generated.
|
|
138
|
-
*/
|
|
139
|
-
logprobs?: boolean | number;
|
|
114
|
+
*/
|
|
115
|
+
logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
|
|
140
116
|
/**
|
|
141
117
|
The suffix that comes after a completion of inserted text.
|
|
142
|
-
|
|
143
|
-
suffix
|
|
118
|
+
*/
|
|
119
|
+
suffix: z.ZodOptional<z.ZodString>;
|
|
144
120
|
/**
|
|
145
121
|
A unique identifier representing your end-user, which can help OpenAI to
|
|
146
122
|
monitor and detect abuse. Learn more.
|
|
147
|
-
|
|
148
|
-
user
|
|
149
|
-
}
|
|
123
|
+
*/
|
|
124
|
+
user: z.ZodOptional<z.ZodString>;
|
|
125
|
+
}, "strip", z.ZodTypeAny, {
|
|
126
|
+
user?: string | undefined;
|
|
127
|
+
logitBias?: Record<string, number> | undefined;
|
|
128
|
+
echo?: boolean | undefined;
|
|
129
|
+
suffix?: string | undefined;
|
|
130
|
+
}, {
|
|
131
|
+
user?: string | undefined;
|
|
132
|
+
logitBias?: Record<string, number> | undefined;
|
|
133
|
+
echo?: boolean | undefined;
|
|
134
|
+
suffix?: string | undefined;
|
|
135
|
+
}>;
|
|
136
|
+
type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>;
|
|
150
137
|
|
|
151
138
|
type OpenAICompletionConfig = {
|
|
152
139
|
provider: string;
|
|
@@ -161,9 +148,9 @@ type OpenAICompletionConfig = {
|
|
|
161
148
|
declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
|
|
162
149
|
readonly specificationVersion = "v2";
|
|
163
150
|
readonly modelId: OpenAICompletionModelId;
|
|
164
|
-
readonly settings: OpenAICompletionSettings;
|
|
165
151
|
private readonly config;
|
|
166
|
-
|
|
152
|
+
private get providerOptionsName();
|
|
153
|
+
constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
|
|
167
154
|
get provider(): string;
|
|
168
155
|
getSupportedUrls(): Promise<Record<string, RegExp[]>>;
|
|
169
156
|
private getArgs;
|
|
@@ -227,6 +214,7 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
|
|
|
227
214
|
|
|
228
215
|
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
229
216
|
declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
|
|
217
|
+
declare const hasDefaultResponseFormat: Set<string>;
|
|
230
218
|
interface OpenAIImageSettings {
|
|
231
219
|
/**
|
|
232
220
|
Override the maximum number of images per call (default is dependent on the
|
|
@@ -240,7 +228,7 @@ interface OpenAIImageModelConfig extends OpenAIConfig {
|
|
|
240
228
|
currentDate?: () => Date;
|
|
241
229
|
};
|
|
242
230
|
}
|
|
243
|
-
declare class OpenAIImageModel implements
|
|
231
|
+
declare class OpenAIImageModel implements ImageModelV2 {
|
|
244
232
|
readonly modelId: OpenAIImageModelId;
|
|
245
233
|
private readonly settings;
|
|
246
234
|
private readonly config;
|
|
@@ -248,7 +236,7 @@ declare class OpenAIImageModel implements ImageModelV1 {
|
|
|
248
236
|
get maxImagesPerCall(): number;
|
|
249
237
|
get provider(): string;
|
|
250
238
|
constructor(modelId: OpenAIImageModelId, settings: OpenAIImageSettings, config: OpenAIImageModelConfig);
|
|
251
|
-
doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<
|
|
239
|
+
doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV2['doGenerate']>>>;
|
|
252
240
|
}
|
|
253
241
|
|
|
254
242
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
@@ -366,6 +354,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
366
354
|
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
367
355
|
strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
368
356
|
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
357
|
+
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
369
358
|
}, "strip", z.ZodTypeAny, {
|
|
370
359
|
user?: string | null | undefined;
|
|
371
360
|
parallelToolCalls?: boolean | null | undefined;
|
|
@@ -375,6 +364,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
375
364
|
instructions?: string | null | undefined;
|
|
376
365
|
previousResponseId?: string | null | undefined;
|
|
377
366
|
strictSchemas?: boolean | null | undefined;
|
|
367
|
+
reasoningSummary?: string | null | undefined;
|
|
378
368
|
}, {
|
|
379
369
|
user?: string | null | undefined;
|
|
380
370
|
parallelToolCalls?: boolean | null | undefined;
|
|
@@ -384,7 +374,8 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
384
374
|
instructions?: string | null | undefined;
|
|
385
375
|
previousResponseId?: string | null | undefined;
|
|
386
376
|
strictSchemas?: boolean | null | undefined;
|
|
377
|
+
reasoningSummary?: string | null | undefined;
|
|
387
378
|
}>;
|
|
388
379
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
389
380
|
|
|
390
|
-
export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type
|
|
381
|
+
export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
|
package/dist/internal/index.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { LanguageModelV2, EmbeddingModelV2,
|
|
1
|
+
import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV1CallOptions, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
3
|
import { z } from 'zod';
|
|
4
4
|
|
|
@@ -11,16 +11,6 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
11
11
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
12
12
|
*/
|
|
13
13
|
logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
|
|
14
|
-
/**
|
|
15
|
-
* Return the log probabilities of the tokens.
|
|
16
|
-
*
|
|
17
|
-
* Setting to true will return the log probabilities of the tokens that
|
|
18
|
-
* were generated.
|
|
19
|
-
*
|
|
20
|
-
* Setting to a number will return the log probabilities of the top n
|
|
21
|
-
* tokens that were generated.
|
|
22
|
-
*/
|
|
23
|
-
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
24
14
|
/**
|
|
25
15
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
26
16
|
*/
|
|
@@ -53,7 +43,6 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
53
43
|
}, "strip", z.ZodTypeAny, {
|
|
54
44
|
user?: string | undefined;
|
|
55
45
|
logitBias?: Record<number, number> | undefined;
|
|
56
|
-
logprobs?: number | boolean | undefined;
|
|
57
46
|
parallelToolCalls?: boolean | undefined;
|
|
58
47
|
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
59
48
|
maxCompletionTokens?: number | undefined;
|
|
@@ -63,7 +52,6 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
63
52
|
}, {
|
|
64
53
|
user?: string | undefined;
|
|
65
54
|
logitBias?: Record<number, number> | undefined;
|
|
66
|
-
logprobs?: number | boolean | undefined;
|
|
67
55
|
parallelToolCalls?: boolean | undefined;
|
|
68
56
|
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
69
57
|
maxCompletionTokens?: number | undefined;
|
|
@@ -105,11 +93,11 @@ declare class OpenAIChatLanguageModel implements LanguageModelV2 {
|
|
|
105
93
|
}
|
|
106
94
|
|
|
107
95
|
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
108
|
-
|
|
96
|
+
declare const openaiCompletionProviderOptions: z.ZodObject<{
|
|
109
97
|
/**
|
|
110
98
|
Echo back the prompt in addition to the completion.
|
|
111
99
|
*/
|
|
112
|
-
echo
|
|
100
|
+
echo: z.ZodOptional<z.ZodBoolean>;
|
|
113
101
|
/**
|
|
114
102
|
Modify the likelihood of specified tokens appearing in the completion.
|
|
115
103
|
|
|
@@ -123,30 +111,29 @@ interface OpenAICompletionSettings {
|
|
|
123
111
|
|
|
124
112
|
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
125
113
|
token from being generated.
|
|
126
|
-
|
|
127
|
-
logitBias
|
|
128
|
-
/**
|
|
129
|
-
Return the log probabilities of the tokens. Including logprobs will increase
|
|
130
|
-
the response size and can slow down response times. However, it can
|
|
131
|
-
be useful to better understand how the model is behaving.
|
|
132
|
-
|
|
133
|
-
Setting to true will return the log probabilities of the tokens that
|
|
134
|
-
were generated.
|
|
135
|
-
|
|
136
|
-
Setting to a number will return the log probabilities of the top n
|
|
137
|
-
tokens that were generated.
|
|
138
|
-
*/
|
|
139
|
-
logprobs?: boolean | number;
|
|
114
|
+
*/
|
|
115
|
+
logitBias: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodNumber>>;
|
|
140
116
|
/**
|
|
141
117
|
The suffix that comes after a completion of inserted text.
|
|
142
|
-
|
|
143
|
-
suffix
|
|
118
|
+
*/
|
|
119
|
+
suffix: z.ZodOptional<z.ZodString>;
|
|
144
120
|
/**
|
|
145
121
|
A unique identifier representing your end-user, which can help OpenAI to
|
|
146
122
|
monitor and detect abuse. Learn more.
|
|
147
|
-
|
|
148
|
-
user
|
|
149
|
-
}
|
|
123
|
+
*/
|
|
124
|
+
user: z.ZodOptional<z.ZodString>;
|
|
125
|
+
}, "strip", z.ZodTypeAny, {
|
|
126
|
+
user?: string | undefined;
|
|
127
|
+
logitBias?: Record<string, number> | undefined;
|
|
128
|
+
echo?: boolean | undefined;
|
|
129
|
+
suffix?: string | undefined;
|
|
130
|
+
}, {
|
|
131
|
+
user?: string | undefined;
|
|
132
|
+
logitBias?: Record<string, number> | undefined;
|
|
133
|
+
echo?: boolean | undefined;
|
|
134
|
+
suffix?: string | undefined;
|
|
135
|
+
}>;
|
|
136
|
+
type OpenAICompletionProviderOptions = z.infer<typeof openaiCompletionProviderOptions>;
|
|
150
137
|
|
|
151
138
|
type OpenAICompletionConfig = {
|
|
152
139
|
provider: string;
|
|
@@ -161,9 +148,9 @@ type OpenAICompletionConfig = {
|
|
|
161
148
|
declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
|
|
162
149
|
readonly specificationVersion = "v2";
|
|
163
150
|
readonly modelId: OpenAICompletionModelId;
|
|
164
|
-
readonly settings: OpenAICompletionSettings;
|
|
165
151
|
private readonly config;
|
|
166
|
-
|
|
152
|
+
private get providerOptionsName();
|
|
153
|
+
constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
|
|
167
154
|
get provider(): string;
|
|
168
155
|
getSupportedUrls(): Promise<Record<string, RegExp[]>>;
|
|
169
156
|
private getArgs;
|
|
@@ -227,6 +214,7 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
|
|
|
227
214
|
|
|
228
215
|
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
229
216
|
declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
|
|
217
|
+
declare const hasDefaultResponseFormat: Set<string>;
|
|
230
218
|
interface OpenAIImageSettings {
|
|
231
219
|
/**
|
|
232
220
|
Override the maximum number of images per call (default is dependent on the
|
|
@@ -240,7 +228,7 @@ interface OpenAIImageModelConfig extends OpenAIConfig {
|
|
|
240
228
|
currentDate?: () => Date;
|
|
241
229
|
};
|
|
242
230
|
}
|
|
243
|
-
declare class OpenAIImageModel implements
|
|
231
|
+
declare class OpenAIImageModel implements ImageModelV2 {
|
|
244
232
|
readonly modelId: OpenAIImageModelId;
|
|
245
233
|
private readonly settings;
|
|
246
234
|
private readonly config;
|
|
@@ -248,7 +236,7 @@ declare class OpenAIImageModel implements ImageModelV1 {
|
|
|
248
236
|
get maxImagesPerCall(): number;
|
|
249
237
|
get provider(): string;
|
|
250
238
|
constructor(modelId: OpenAIImageModelId, settings: OpenAIImageSettings, config: OpenAIImageModelConfig);
|
|
251
|
-
doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<
|
|
239
|
+
doGenerate({ prompt, n, size, aspectRatio, seed, providerOptions, headers, abortSignal, }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<ImageModelV2['doGenerate']>>>;
|
|
252
240
|
}
|
|
253
241
|
|
|
254
242
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
@@ -366,6 +354,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
366
354
|
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
367
355
|
strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
368
356
|
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
357
|
+
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
369
358
|
}, "strip", z.ZodTypeAny, {
|
|
370
359
|
user?: string | null | undefined;
|
|
371
360
|
parallelToolCalls?: boolean | null | undefined;
|
|
@@ -375,6 +364,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
375
364
|
instructions?: string | null | undefined;
|
|
376
365
|
previousResponseId?: string | null | undefined;
|
|
377
366
|
strictSchemas?: boolean | null | undefined;
|
|
367
|
+
reasoningSummary?: string | null | undefined;
|
|
378
368
|
}, {
|
|
379
369
|
user?: string | null | undefined;
|
|
380
370
|
parallelToolCalls?: boolean | null | undefined;
|
|
@@ -384,7 +374,8 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
384
374
|
instructions?: string | null | undefined;
|
|
385
375
|
previousResponseId?: string | null | undefined;
|
|
386
376
|
strictSchemas?: boolean | null | undefined;
|
|
377
|
+
reasoningSummary?: string | null | undefined;
|
|
387
378
|
}>;
|
|
388
379
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
389
380
|
|
|
390
|
-
export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type
|
|
381
|
+
export { OpenAIChatLanguageModel, type OpenAIChatModelId, type OpenAIChatSettings, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, type OpenAIEmbeddingSettings, OpenAIImageModel, type OpenAIImageModelId, type OpenAIImageSettings, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionModelOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
|