@ai-sdk/openai 4.0.0-beta.3 → 4.0.0-beta.31
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +320 -22
- package/README.md +2 -0
- package/dist/index.d.ts +139 -36
- package/dist/index.js +2343 -1490
- package/dist/index.js.map +1 -1
- package/dist/internal/index.d.ts +168 -45
- package/dist/internal/index.js +2112 -1511
- package/dist/internal/index.js.map +1 -1
- package/docs/03-openai.mdx +274 -9
- package/package.json +9 -12
- package/src/chat/convert-openai-chat-usage.ts +2 -2
- package/src/chat/convert-to-openai-chat-messages.ts +26 -15
- package/src/chat/map-openai-finish-reason.ts +2 -2
- package/src/chat/openai-chat-language-model.ts +52 -28
- package/src/chat/openai-chat-options.ts +5 -0
- package/src/chat/openai-chat-prepare-tools.ts +6 -6
- package/src/completion/convert-openai-completion-usage.ts +2 -2
- package/src/completion/convert-to-openai-completion-prompt.ts +2 -2
- package/src/completion/map-openai-finish-reason.ts +2 -2
- package/src/completion/openai-completion-language-model.ts +40 -23
- package/src/embedding/openai-embedding-model.ts +23 -6
- package/src/files/openai-files-api.ts +17 -0
- package/src/files/openai-files-options.ts +18 -0
- package/src/files/openai-files.ts +102 -0
- package/src/image/openai-image-model.ts +28 -11
- package/src/index.ts +2 -0
- package/src/openai-config.ts +6 -6
- package/src/openai-language-model-capabilities.ts +3 -2
- package/src/openai-provider.ts +54 -21
- package/src/openai-tools.ts +12 -1
- package/src/responses/convert-openai-responses-usage.ts +2 -2
- package/src/responses/convert-to-openai-responses-input.ts +211 -37
- package/src/responses/map-openai-responses-finish-reason.ts +2 -2
- package/src/responses/openai-responses-api.ts +136 -2
- package/src/responses/openai-responses-language-model.ts +252 -39
- package/src/responses/openai-responses-options.ts +24 -2
- package/src/responses/openai-responses-prepare-tools.ts +47 -14
- package/src/responses/openai-responses-provider-metadata.ts +10 -0
- package/src/skills/openai-skills-api.ts +31 -0
- package/src/skills/openai-skills.ts +87 -0
- package/src/speech/openai-speech-model.ts +25 -8
- package/src/tool/custom.ts +0 -6
- package/src/tool/shell.ts +7 -2
- package/src/tool/tool-search.ts +98 -0
- package/src/transcription/openai-transcription-model.ts +26 -9
- package/dist/index.d.mts +0 -1107
- package/dist/index.mjs +0 -6497
- package/dist/index.mjs.map +0 -1
- package/dist/internal/index.d.mts +0 -1137
- package/dist/internal/index.mjs +0 -6310
- package/dist/internal/index.mjs.map +0 -1
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
import {
|
|
2
2
|
InvalidResponseDataError,
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
3
|
+
LanguageModelV4,
|
|
4
|
+
LanguageModelV4CallOptions,
|
|
5
|
+
LanguageModelV4Content,
|
|
6
|
+
LanguageModelV4FinishReason,
|
|
7
|
+
LanguageModelV4GenerateResult,
|
|
8
|
+
LanguageModelV4StreamPart,
|
|
9
|
+
LanguageModelV4StreamResult,
|
|
10
|
+
SharedV4ProviderMetadata,
|
|
11
|
+
SharedV4Warning,
|
|
12
12
|
} from '@ai-sdk/provider';
|
|
13
13
|
import {
|
|
14
14
|
FetchFunction,
|
|
@@ -17,9 +17,13 @@ import {
|
|
|
17
17
|
createEventSourceResponseHandler,
|
|
18
18
|
createJsonResponseHandler,
|
|
19
19
|
generateId,
|
|
20
|
+
isCustomReasoning,
|
|
20
21
|
isParsableJson,
|
|
21
22
|
parseProviderOptions,
|
|
22
23
|
postJsonToApi,
|
|
24
|
+
serializeModelOptions,
|
|
25
|
+
WORKFLOW_DESERIALIZE,
|
|
26
|
+
WORKFLOW_SERIALIZE,
|
|
23
27
|
} from '@ai-sdk/provider-utils';
|
|
24
28
|
import { openaiFailedResponseHandler } from '../openai-error';
|
|
25
29
|
import { getOpenAILanguageModelCapabilities } from '../openai-language-model-capabilities';
|
|
@@ -43,13 +47,13 @@ import { prepareChatTools } from './openai-chat-prepare-tools';
|
|
|
43
47
|
|
|
44
48
|
type OpenAIChatConfig = {
|
|
45
49
|
provider: string;
|
|
46
|
-
headers
|
|
50
|
+
headers?: () => Record<string, string | undefined>;
|
|
47
51
|
url: (options: { modelId: string; path: string }) => string;
|
|
48
52
|
fetch?: FetchFunction;
|
|
49
53
|
};
|
|
50
54
|
|
|
51
|
-
export class OpenAIChatLanguageModel implements
|
|
52
|
-
readonly specificationVersion = '
|
|
55
|
+
export class OpenAIChatLanguageModel implements LanguageModelV4 {
|
|
56
|
+
readonly specificationVersion = 'v4';
|
|
53
57
|
|
|
54
58
|
readonly modelId: OpenAIChatModelId;
|
|
55
59
|
|
|
@@ -59,6 +63,20 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
59
63
|
|
|
60
64
|
private readonly config: OpenAIChatConfig;
|
|
61
65
|
|
|
66
|
+
static [WORKFLOW_SERIALIZE](model: OpenAIChatLanguageModel) {
|
|
67
|
+
return serializeModelOptions({
|
|
68
|
+
modelId: model.modelId,
|
|
69
|
+
config: model.config,
|
|
70
|
+
});
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
static [WORKFLOW_DESERIALIZE](options: {
|
|
74
|
+
modelId: OpenAIChatModelId;
|
|
75
|
+
config: OpenAIChatConfig;
|
|
76
|
+
}) {
|
|
77
|
+
return new OpenAIChatLanguageModel(options.modelId, options.config);
|
|
78
|
+
}
|
|
79
|
+
|
|
62
80
|
constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig) {
|
|
63
81
|
this.modelId = modelId;
|
|
64
82
|
this.config = config;
|
|
@@ -81,9 +99,10 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
81
99
|
seed,
|
|
82
100
|
tools,
|
|
83
101
|
toolChoice,
|
|
102
|
+
reasoning,
|
|
84
103
|
providerOptions,
|
|
85
|
-
}:
|
|
86
|
-
const warnings:
|
|
104
|
+
}: LanguageModelV4CallOptions) {
|
|
105
|
+
const warnings: SharedV4Warning[] = [];
|
|
87
106
|
|
|
88
107
|
// Parse provider options
|
|
89
108
|
const openaiOptions =
|
|
@@ -94,6 +113,12 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
94
113
|
})) ?? {};
|
|
95
114
|
|
|
96
115
|
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
116
|
+
|
|
117
|
+
// AI SDK reasoning values map directly to the OpenAI reasoning values.
|
|
118
|
+
const resolvedReasoningEffort =
|
|
119
|
+
openaiOptions.reasoningEffort ??
|
|
120
|
+
(isCustomReasoning(reasoning) ? reasoning : undefined);
|
|
121
|
+
|
|
97
122
|
const isReasoningModel =
|
|
98
123
|
openaiOptions.forceReasoning ?? modelCapabilities.isReasoningModel;
|
|
99
124
|
|
|
@@ -168,7 +193,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
168
193
|
store: openaiOptions.store,
|
|
169
194
|
metadata: openaiOptions.metadata,
|
|
170
195
|
prediction: openaiOptions.prediction,
|
|
171
|
-
reasoning_effort:
|
|
196
|
+
reasoning_effort: resolvedReasoningEffort,
|
|
172
197
|
service_tier: openaiOptions.serviceTier,
|
|
173
198
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
174
199
|
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
@@ -184,7 +209,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
184
209
|
// when reasoning effort is none, gpt-5.1 models allow temperature, topP, logprobs
|
|
185
210
|
// https://platform.openai.com/docs/guides/latest-model#gpt-5-1-parameter-compatibility
|
|
186
211
|
if (
|
|
187
|
-
|
|
212
|
+
resolvedReasoningEffort !== 'none' ||
|
|
188
213
|
!modelCapabilities.supportsNonReasoningParameters
|
|
189
214
|
) {
|
|
190
215
|
if (baseArgs.temperature != null) {
|
|
@@ -314,8 +339,8 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
314
339
|
}
|
|
315
340
|
|
|
316
341
|
async doGenerate(
|
|
317
|
-
options:
|
|
318
|
-
): Promise<
|
|
342
|
+
options: LanguageModelV4CallOptions,
|
|
343
|
+
): Promise<LanguageModelV4GenerateResult> {
|
|
319
344
|
const { args: body, warnings } = await this.getArgs(options);
|
|
320
345
|
|
|
321
346
|
const {
|
|
@@ -327,7 +352,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
327
352
|
path: '/chat/completions',
|
|
328
353
|
modelId: this.modelId,
|
|
329
354
|
}),
|
|
330
|
-
headers: combineHeaders(this.config.headers(), options.headers),
|
|
355
|
+
headers: combineHeaders(this.config.headers?.(), options.headers),
|
|
331
356
|
body,
|
|
332
357
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
333
358
|
successfulResponseHandler: createJsonResponseHandler(
|
|
@@ -338,7 +363,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
338
363
|
});
|
|
339
364
|
|
|
340
365
|
const choice = response.choices[0];
|
|
341
|
-
const content: Array<
|
|
366
|
+
const content: Array<LanguageModelV4Content> = [];
|
|
342
367
|
|
|
343
368
|
// text content:
|
|
344
369
|
const text = choice.message.content;
|
|
@@ -369,8 +394,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
369
394
|
|
|
370
395
|
// provider metadata:
|
|
371
396
|
const completionTokenDetails = response.usage?.completion_tokens_details;
|
|
372
|
-
const
|
|
373
|
-
const providerMetadata: SharedV3ProviderMetadata = { openai: {} };
|
|
397
|
+
const providerMetadata: SharedV4ProviderMetadata = { openai: {} };
|
|
374
398
|
if (completionTokenDetails?.accepted_prediction_tokens != null) {
|
|
375
399
|
providerMetadata.openai.acceptedPredictionTokens =
|
|
376
400
|
completionTokenDetails?.accepted_prediction_tokens;
|
|
@@ -402,8 +426,8 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
402
426
|
}
|
|
403
427
|
|
|
404
428
|
async doStream(
|
|
405
|
-
options:
|
|
406
|
-
): Promise<
|
|
429
|
+
options: LanguageModelV4CallOptions,
|
|
430
|
+
): Promise<LanguageModelV4StreamResult> {
|
|
407
431
|
const { args, warnings } = await this.getArgs(options);
|
|
408
432
|
|
|
409
433
|
const body = {
|
|
@@ -419,7 +443,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
419
443
|
path: '/chat/completions',
|
|
420
444
|
modelId: this.modelId,
|
|
421
445
|
}),
|
|
422
|
-
headers: combineHeaders(this.config.headers(), options.headers),
|
|
446
|
+
headers: combineHeaders(this.config.headers?.(), options.headers),
|
|
423
447
|
body,
|
|
424
448
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
425
449
|
successfulResponseHandler: createEventSourceResponseHandler(
|
|
@@ -439,7 +463,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
439
463
|
hasFinished: boolean;
|
|
440
464
|
}> = [];
|
|
441
465
|
|
|
442
|
-
let finishReason:
|
|
466
|
+
let finishReason: LanguageModelV4FinishReason = {
|
|
443
467
|
unified: 'other',
|
|
444
468
|
raw: undefined,
|
|
445
469
|
};
|
|
@@ -447,13 +471,13 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
447
471
|
let metadataExtracted = false;
|
|
448
472
|
let isActiveText = false;
|
|
449
473
|
|
|
450
|
-
const providerMetadata:
|
|
474
|
+
const providerMetadata: SharedV4ProviderMetadata = { openai: {} };
|
|
451
475
|
|
|
452
476
|
return {
|
|
453
477
|
stream: response.pipeThrough(
|
|
454
478
|
new TransformStream<
|
|
455
479
|
ParseResult<OpenAIChatChunk>,
|
|
456
|
-
|
|
480
|
+
LanguageModelV4StreamPart
|
|
457
481
|
>({
|
|
458
482
|
start(controller) {
|
|
459
483
|
controller.enqueue({ type: 'stream-start', warnings });
|
|
@@ -51,8 +51,13 @@ export type OpenAIChatModelId =
|
|
|
51
51
|
| 'gpt-5.2-chat-latest'
|
|
52
52
|
| 'gpt-5.2-pro'
|
|
53
53
|
| 'gpt-5.2-pro-2025-12-11'
|
|
54
|
+
| 'gpt-5.3-chat-latest'
|
|
54
55
|
| 'gpt-5.4'
|
|
55
56
|
| 'gpt-5.4-2026-03-05'
|
|
57
|
+
| 'gpt-5.4-mini'
|
|
58
|
+
| 'gpt-5.4-mini-2026-03-17'
|
|
59
|
+
| 'gpt-5.4-nano'
|
|
60
|
+
| 'gpt-5.4-nano-2026-03-17'
|
|
56
61
|
| 'gpt-5.4-pro'
|
|
57
62
|
| 'gpt-5.4-pro-2026-03-05'
|
|
58
63
|
| (string & {});
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import {
|
|
2
|
-
|
|
3
|
-
|
|
2
|
+
LanguageModelV4CallOptions,
|
|
3
|
+
SharedV4Warning,
|
|
4
4
|
UnsupportedFunctionalityError,
|
|
5
5
|
} from '@ai-sdk/provider';
|
|
6
6
|
import {
|
|
@@ -12,17 +12,17 @@ export function prepareChatTools({
|
|
|
12
12
|
tools,
|
|
13
13
|
toolChoice,
|
|
14
14
|
}: {
|
|
15
|
-
tools:
|
|
16
|
-
toolChoice?:
|
|
15
|
+
tools: LanguageModelV4CallOptions['tools'];
|
|
16
|
+
toolChoice?: LanguageModelV4CallOptions['toolChoice'];
|
|
17
17
|
}): {
|
|
18
18
|
tools?: OpenAIChatFunctionTool[];
|
|
19
19
|
toolChoice?: OpenAIChatToolChoice;
|
|
20
|
-
toolWarnings: Array<
|
|
20
|
+
toolWarnings: Array<SharedV4Warning>;
|
|
21
21
|
} {
|
|
22
22
|
// when the tools array is empty, change it to undefined to prevent errors:
|
|
23
23
|
tools = tools?.length ? tools : undefined;
|
|
24
24
|
|
|
25
|
-
const toolWarnings:
|
|
25
|
+
const toolWarnings: SharedV4Warning[] = [];
|
|
26
26
|
|
|
27
27
|
if (tools == null) {
|
|
28
28
|
return { tools: undefined, toolChoice: undefined, toolWarnings };
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { LanguageModelV4Usage } from '@ai-sdk/provider';
|
|
2
2
|
|
|
3
3
|
export type OpenAICompletionUsage = {
|
|
4
4
|
prompt_tokens?: number | null;
|
|
@@ -8,7 +8,7 @@ export type OpenAICompletionUsage = {
|
|
|
8
8
|
|
|
9
9
|
export function convertOpenAICompletionUsage(
|
|
10
10
|
usage: OpenAICompletionUsage | undefined | null,
|
|
11
|
-
):
|
|
11
|
+
): LanguageModelV4Usage {
|
|
12
12
|
if (usage == null) {
|
|
13
13
|
return {
|
|
14
14
|
inputTokens: {
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import {
|
|
2
2
|
InvalidPromptError,
|
|
3
|
-
|
|
3
|
+
LanguageModelV4Prompt,
|
|
4
4
|
UnsupportedFunctionalityError,
|
|
5
5
|
} from '@ai-sdk/provider';
|
|
6
6
|
|
|
@@ -9,7 +9,7 @@ export function convertToOpenAICompletionPrompt({
|
|
|
9
9
|
user = 'user',
|
|
10
10
|
assistant = 'assistant',
|
|
11
11
|
}: {
|
|
12
|
-
prompt:
|
|
12
|
+
prompt: LanguageModelV4Prompt;
|
|
13
13
|
user?: string;
|
|
14
14
|
assistant?: string;
|
|
15
15
|
}): {
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { LanguageModelV4FinishReason } from '@ai-sdk/provider';
|
|
2
2
|
|
|
3
3
|
export function mapOpenAIFinishReason(
|
|
4
4
|
finishReason: string | null | undefined,
|
|
5
|
-
):
|
|
5
|
+
): LanguageModelV4FinishReason['unified'] {
|
|
6
6
|
switch (finishReason) {
|
|
7
7
|
case 'stop':
|
|
8
8
|
return 'stop';
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
import {
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
2
|
+
LanguageModelV4,
|
|
3
|
+
LanguageModelV4CallOptions,
|
|
4
|
+
LanguageModelV4FinishReason,
|
|
5
|
+
LanguageModelV4GenerateResult,
|
|
6
|
+
LanguageModelV4StreamPart,
|
|
7
|
+
LanguageModelV4StreamResult,
|
|
8
|
+
SharedV4ProviderMetadata,
|
|
9
|
+
SharedV4Warning,
|
|
10
10
|
} from '@ai-sdk/provider';
|
|
11
11
|
import {
|
|
12
12
|
combineHeaders,
|
|
@@ -16,6 +16,9 @@ import {
|
|
|
16
16
|
parseProviderOptions,
|
|
17
17
|
ParseResult,
|
|
18
18
|
postJsonToApi,
|
|
19
|
+
serializeModelOptions,
|
|
20
|
+
WORKFLOW_DESERIALIZE,
|
|
21
|
+
WORKFLOW_SERIALIZE,
|
|
19
22
|
} from '@ai-sdk/provider-utils';
|
|
20
23
|
import { openaiFailedResponseHandler } from '../openai-error';
|
|
21
24
|
import {
|
|
@@ -37,13 +40,13 @@ import {
|
|
|
37
40
|
|
|
38
41
|
type OpenAICompletionConfig = {
|
|
39
42
|
provider: string;
|
|
40
|
-
headers
|
|
43
|
+
headers?: () => Record<string, string | undefined>;
|
|
41
44
|
url: (options: { modelId: string; path: string }) => string;
|
|
42
45
|
fetch?: FetchFunction;
|
|
43
46
|
};
|
|
44
47
|
|
|
45
|
-
export class OpenAICompletionLanguageModel implements
|
|
46
|
-
readonly specificationVersion = '
|
|
48
|
+
export class OpenAICompletionLanguageModel implements LanguageModelV4 {
|
|
49
|
+
readonly specificationVersion = 'v4';
|
|
47
50
|
|
|
48
51
|
readonly modelId: OpenAICompletionModelId;
|
|
49
52
|
|
|
@@ -53,6 +56,20 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
53
56
|
return this.config.provider.split('.')[0].trim();
|
|
54
57
|
}
|
|
55
58
|
|
|
59
|
+
static [WORKFLOW_SERIALIZE](model: OpenAICompletionLanguageModel) {
|
|
60
|
+
return serializeModelOptions({
|
|
61
|
+
modelId: model.modelId,
|
|
62
|
+
config: model.config,
|
|
63
|
+
});
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
static [WORKFLOW_DESERIALIZE](options: {
|
|
67
|
+
modelId: OpenAICompletionModelId;
|
|
68
|
+
config: OpenAICompletionConfig;
|
|
69
|
+
}) {
|
|
70
|
+
return new OpenAICompletionLanguageModel(options.modelId, options.config);
|
|
71
|
+
}
|
|
72
|
+
|
|
56
73
|
constructor(
|
|
57
74
|
modelId: OpenAICompletionModelId,
|
|
58
75
|
config: OpenAICompletionConfig,
|
|
@@ -83,8 +100,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
83
100
|
toolChoice,
|
|
84
101
|
seed,
|
|
85
102
|
providerOptions,
|
|
86
|
-
}:
|
|
87
|
-
const warnings:
|
|
103
|
+
}: LanguageModelV4CallOptions) {
|
|
104
|
+
const warnings: SharedV4Warning[] = [];
|
|
88
105
|
|
|
89
106
|
// Parse provider options
|
|
90
107
|
const openaiOptions = {
|
|
@@ -161,8 +178,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
161
178
|
}
|
|
162
179
|
|
|
163
180
|
async doGenerate(
|
|
164
|
-
options:
|
|
165
|
-
): Promise<
|
|
181
|
+
options: LanguageModelV4CallOptions,
|
|
182
|
+
): Promise<LanguageModelV4GenerateResult> {
|
|
166
183
|
const { args, warnings } = await this.getArgs(options);
|
|
167
184
|
|
|
168
185
|
const {
|
|
@@ -174,7 +191,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
174
191
|
path: '/completions',
|
|
175
192
|
modelId: this.modelId,
|
|
176
193
|
}),
|
|
177
|
-
headers: combineHeaders(this.config.headers(), options.headers),
|
|
194
|
+
headers: combineHeaders(this.config.headers?.(), options.headers),
|
|
178
195
|
body: args,
|
|
179
196
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
180
197
|
successfulResponseHandler: createJsonResponseHandler(
|
|
@@ -186,7 +203,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
186
203
|
|
|
187
204
|
const choice = response.choices[0];
|
|
188
205
|
|
|
189
|
-
const providerMetadata:
|
|
206
|
+
const providerMetadata: SharedV4ProviderMetadata = { openai: {} };
|
|
190
207
|
|
|
191
208
|
if (choice.logprobs != null) {
|
|
192
209
|
providerMetadata.openai.logprobs = choice.logprobs;
|
|
@@ -211,8 +228,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
211
228
|
}
|
|
212
229
|
|
|
213
230
|
async doStream(
|
|
214
|
-
options:
|
|
215
|
-
): Promise<
|
|
231
|
+
options: LanguageModelV4CallOptions,
|
|
232
|
+
): Promise<LanguageModelV4StreamResult> {
|
|
216
233
|
const { args, warnings } = await this.getArgs(options);
|
|
217
234
|
|
|
218
235
|
const body = {
|
|
@@ -229,7 +246,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
229
246
|
path: '/completions',
|
|
230
247
|
modelId: this.modelId,
|
|
231
248
|
}),
|
|
232
|
-
headers: combineHeaders(this.config.headers(), options.headers),
|
|
249
|
+
headers: combineHeaders(this.config.headers?.(), options.headers),
|
|
233
250
|
body,
|
|
234
251
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
235
252
|
successfulResponseHandler: createEventSourceResponseHandler(
|
|
@@ -239,11 +256,11 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
239
256
|
fetch: this.config.fetch,
|
|
240
257
|
});
|
|
241
258
|
|
|
242
|
-
let finishReason:
|
|
259
|
+
let finishReason: LanguageModelV4FinishReason = {
|
|
243
260
|
unified: 'other',
|
|
244
261
|
raw: undefined,
|
|
245
262
|
};
|
|
246
|
-
const providerMetadata:
|
|
263
|
+
const providerMetadata: SharedV4ProviderMetadata = { openai: {} };
|
|
247
264
|
let usage: OpenAICompletionUsage | undefined = undefined;
|
|
248
265
|
let isFirstChunk = true;
|
|
249
266
|
|
|
@@ -251,7 +268,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
251
268
|
stream: response.pipeThrough(
|
|
252
269
|
new TransformStream<
|
|
253
270
|
ParseResult<OpenAICompletionChunk>,
|
|
254
|
-
|
|
271
|
+
LanguageModelV4StreamPart
|
|
255
272
|
>({
|
|
256
273
|
start(controller) {
|
|
257
274
|
controller.enqueue({ type: 'stream-start', warnings });
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import {
|
|
2
|
-
|
|
2
|
+
EmbeddingModelV4,
|
|
3
3
|
TooManyEmbeddingValuesForCallError,
|
|
4
4
|
} from '@ai-sdk/provider';
|
|
5
5
|
import {
|
|
@@ -7,6 +7,9 @@ import {
|
|
|
7
7
|
createJsonResponseHandler,
|
|
8
8
|
parseProviderOptions,
|
|
9
9
|
postJsonToApi,
|
|
10
|
+
serializeModelOptions,
|
|
11
|
+
WORKFLOW_DESERIALIZE,
|
|
12
|
+
WORKFLOW_SERIALIZE,
|
|
10
13
|
} from '@ai-sdk/provider-utils';
|
|
11
14
|
import { OpenAIConfig } from '../openai-config';
|
|
12
15
|
import { openaiFailedResponseHandler } from '../openai-error';
|
|
@@ -16,14 +19,28 @@ import {
|
|
|
16
19
|
} from './openai-embedding-options';
|
|
17
20
|
import { openaiTextEmbeddingResponseSchema } from './openai-embedding-api';
|
|
18
21
|
|
|
19
|
-
export class OpenAIEmbeddingModel implements
|
|
20
|
-
readonly specificationVersion = '
|
|
22
|
+
export class OpenAIEmbeddingModel implements EmbeddingModelV4 {
|
|
23
|
+
readonly specificationVersion = 'v4';
|
|
21
24
|
readonly modelId: OpenAIEmbeddingModelId;
|
|
22
25
|
readonly maxEmbeddingsPerCall = 2048;
|
|
23
26
|
readonly supportsParallelCalls = true;
|
|
24
27
|
|
|
25
28
|
private readonly config: OpenAIConfig;
|
|
26
29
|
|
|
30
|
+
static [WORKFLOW_SERIALIZE](model: OpenAIEmbeddingModel) {
|
|
31
|
+
return serializeModelOptions({
|
|
32
|
+
modelId: model.modelId,
|
|
33
|
+
config: model.config,
|
|
34
|
+
});
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
static [WORKFLOW_DESERIALIZE](options: {
|
|
38
|
+
modelId: OpenAIEmbeddingModelId;
|
|
39
|
+
config: OpenAIConfig;
|
|
40
|
+
}) {
|
|
41
|
+
return new OpenAIEmbeddingModel(options.modelId, options.config);
|
|
42
|
+
}
|
|
43
|
+
|
|
27
44
|
get provider(): string {
|
|
28
45
|
return this.config.provider;
|
|
29
46
|
}
|
|
@@ -38,8 +55,8 @@ export class OpenAIEmbeddingModel implements EmbeddingModelV3 {
|
|
|
38
55
|
headers,
|
|
39
56
|
abortSignal,
|
|
40
57
|
providerOptions,
|
|
41
|
-
}: Parameters<
|
|
42
|
-
Awaited<ReturnType<
|
|
58
|
+
}: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<
|
|
59
|
+
Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>
|
|
43
60
|
> {
|
|
44
61
|
if (values.length > this.maxEmbeddingsPerCall) {
|
|
45
62
|
throw new TooManyEmbeddingValuesForCallError({
|
|
@@ -67,7 +84,7 @@ export class OpenAIEmbeddingModel implements EmbeddingModelV3 {
|
|
|
67
84
|
path: '/embeddings',
|
|
68
85
|
modelId: this.modelId,
|
|
69
86
|
}),
|
|
70
|
-
headers: combineHeaders(this.config.headers(), headers),
|
|
87
|
+
headers: combineHeaders(this.config.headers?.(), headers),
|
|
71
88
|
body: {
|
|
72
89
|
model: this.modelId,
|
|
73
90
|
input: values,
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import { lazySchema, zodSchema } from '@ai-sdk/provider-utils';
|
|
2
|
+
import { z } from 'zod/v4';
|
|
3
|
+
|
|
4
|
+
export const openaiFilesResponseSchema = lazySchema(() =>
|
|
5
|
+
zodSchema(
|
|
6
|
+
z.object({
|
|
7
|
+
id: z.string(),
|
|
8
|
+
object: z.string().nullish(),
|
|
9
|
+
bytes: z.number().nullish(),
|
|
10
|
+
created_at: z.number().nullish(),
|
|
11
|
+
filename: z.string().nullish(),
|
|
12
|
+
purpose: z.string().nullish(),
|
|
13
|
+
status: z.string().nullish(),
|
|
14
|
+
expires_at: z.number().nullish(),
|
|
15
|
+
}),
|
|
16
|
+
),
|
|
17
|
+
);
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
|
|
2
|
+
import { z } from 'zod/v4';
|
|
3
|
+
|
|
4
|
+
export const openaiFilesOptionsSchema = lazySchema(() =>
|
|
5
|
+
zodSchema(
|
|
6
|
+
z.object({
|
|
7
|
+
/*
|
|
8
|
+
* Required by the OpenAI API, but optional here because
|
|
9
|
+
* the SDK defaults to "assistants" — by far the most common
|
|
10
|
+
* purpose when uploading files in this context.
|
|
11
|
+
*/
|
|
12
|
+
purpose: z.string().optional(),
|
|
13
|
+
expiresAfter: z.number().optional(),
|
|
14
|
+
}),
|
|
15
|
+
),
|
|
16
|
+
);
|
|
17
|
+
|
|
18
|
+
export type OpenAIFilesOptions = InferSchema<typeof openaiFilesOptionsSchema>;
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import {
|
|
2
|
+
FilesV4,
|
|
3
|
+
FilesV4UploadFileCallOptions,
|
|
4
|
+
FilesV4UploadFileResult,
|
|
5
|
+
} from '@ai-sdk/provider';
|
|
6
|
+
import {
|
|
7
|
+
combineHeaders,
|
|
8
|
+
convertBase64ToUint8Array,
|
|
9
|
+
createJsonResponseHandler,
|
|
10
|
+
FetchFunction,
|
|
11
|
+
parseProviderOptions,
|
|
12
|
+
postFormDataToApi,
|
|
13
|
+
} from '@ai-sdk/provider-utils';
|
|
14
|
+
import { openaiFailedResponseHandler } from '../openai-error';
|
|
15
|
+
import { openaiFilesResponseSchema } from './openai-files-api';
|
|
16
|
+
import {
|
|
17
|
+
openaiFilesOptionsSchema,
|
|
18
|
+
OpenAIFilesOptions,
|
|
19
|
+
} from './openai-files-options';
|
|
20
|
+
|
|
21
|
+
interface OpenAIFilesConfig {
|
|
22
|
+
provider: string;
|
|
23
|
+
baseURL: string;
|
|
24
|
+
headers: () => Record<string, string | undefined>;
|
|
25
|
+
fetch?: FetchFunction;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
export class OpenAIFiles implements FilesV4 {
|
|
29
|
+
readonly specificationVersion = 'v4';
|
|
30
|
+
|
|
31
|
+
get provider(): string {
|
|
32
|
+
return this.config.provider;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
constructor(private readonly config: OpenAIFilesConfig) {}
|
|
36
|
+
|
|
37
|
+
async uploadFile({
|
|
38
|
+
data,
|
|
39
|
+
mediaType,
|
|
40
|
+
filename,
|
|
41
|
+
providerOptions,
|
|
42
|
+
}: FilesV4UploadFileCallOptions): Promise<FilesV4UploadFileResult> {
|
|
43
|
+
const openaiOptions = (await parseProviderOptions({
|
|
44
|
+
provider: 'openai',
|
|
45
|
+
providerOptions,
|
|
46
|
+
schema: openaiFilesOptionsSchema,
|
|
47
|
+
})) as OpenAIFilesOptions | undefined;
|
|
48
|
+
|
|
49
|
+
const fileBytes =
|
|
50
|
+
data instanceof Uint8Array ? data : convertBase64ToUint8Array(data);
|
|
51
|
+
|
|
52
|
+
const blob = new Blob([fileBytes], {
|
|
53
|
+
type: mediaType,
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
const formData = new FormData();
|
|
57
|
+
if (filename != null) {
|
|
58
|
+
formData.append('file', blob, filename);
|
|
59
|
+
} else {
|
|
60
|
+
formData.append('file', blob);
|
|
61
|
+
}
|
|
62
|
+
formData.append('purpose', openaiOptions?.purpose ?? 'assistants');
|
|
63
|
+
|
|
64
|
+
if (openaiOptions?.expiresAfter != null) {
|
|
65
|
+
formData.append('expires_after', String(openaiOptions.expiresAfter));
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
const { value: response } = await postFormDataToApi({
|
|
69
|
+
url: `${this.config.baseURL}/files`,
|
|
70
|
+
headers: combineHeaders(this.config.headers()),
|
|
71
|
+
formData,
|
|
72
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
73
|
+
successfulResponseHandler: createJsonResponseHandler(
|
|
74
|
+
openaiFilesResponseSchema,
|
|
75
|
+
),
|
|
76
|
+
fetch: this.config.fetch,
|
|
77
|
+
});
|
|
78
|
+
|
|
79
|
+
return {
|
|
80
|
+
warnings: [],
|
|
81
|
+
providerReference: { openai: response.id },
|
|
82
|
+
...((response.filename ?? filename)
|
|
83
|
+
? { filename: response.filename ?? filename }
|
|
84
|
+
: {}),
|
|
85
|
+
...(mediaType != null ? { mediaType } : {}),
|
|
86
|
+
providerMetadata: {
|
|
87
|
+
openai: {
|
|
88
|
+
...(response.filename != null ? { filename: response.filename } : {}),
|
|
89
|
+
...(response.purpose != null ? { purpose: response.purpose } : {}),
|
|
90
|
+
...(response.bytes != null ? { bytes: response.bytes } : {}),
|
|
91
|
+
...(response.created_at != null
|
|
92
|
+
? { createdAt: response.created_at }
|
|
93
|
+
: {}),
|
|
94
|
+
...(response.status != null ? { status: response.status } : {}),
|
|
95
|
+
...(response.expires_at != null
|
|
96
|
+
? { expiresAt: response.expires_at }
|
|
97
|
+
: {}),
|
|
98
|
+
},
|
|
99
|
+
},
|
|
100
|
+
};
|
|
101
|
+
}
|
|
102
|
+
}
|