@ai-sdk/openai 4.0.0-beta.4 → 4.0.0-beta.40
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +387 -22
- package/README.md +2 -0
- package/dist/index.d.ts +162 -45
- package/dist/index.js +2341 -1572
- package/dist/index.js.map +1 -1
- package/dist/internal/index.d.ts +174 -51
- package/dist/internal/index.js +2110 -1593
- package/dist/internal/index.js.map +1 -1
- package/docs/03-openai.mdx +274 -9
- package/package.json +13 -14
- package/src/chat/convert-openai-chat-usage.ts +2 -2
- package/src/chat/convert-to-openai-chat-messages.ts +33 -18
- package/src/chat/map-openai-finish-reason.ts +2 -2
- package/src/chat/openai-chat-language-model.ts +62 -158
- package/src/chat/openai-chat-options.ts +5 -0
- package/src/chat/openai-chat-prepare-tools.ts +6 -6
- package/src/completion/convert-openai-completion-usage.ts +2 -2
- package/src/completion/convert-to-openai-completion-prompt.ts +2 -2
- package/src/completion/map-openai-finish-reason.ts +2 -2
- package/src/completion/openai-completion-language-model.ts +40 -23
- package/src/embedding/openai-embedding-model.ts +23 -6
- package/src/files/openai-files-api.ts +17 -0
- package/src/files/openai-files-options.ts +18 -0
- package/src/files/openai-files.ts +102 -0
- package/src/image/openai-image-model.ts +28 -11
- package/src/image/openai-image-options.ts +3 -0
- package/src/index.ts +2 -0
- package/src/openai-config.ts +6 -6
- package/src/openai-language-model-capabilities.ts +3 -2
- package/src/openai-provider.ts +54 -21
- package/src/openai-tools.ts +12 -1
- package/src/responses/convert-openai-responses-usage.ts +2 -2
- package/src/responses/convert-to-openai-responses-input.ts +194 -39
- package/src/responses/map-openai-responses-finish-reason.ts +2 -2
- package/src/responses/openai-responses-api.ts +136 -2
- package/src/responses/openai-responses-language-model.ts +252 -39
- package/src/responses/openai-responses-options.ts +24 -2
- package/src/responses/openai-responses-prepare-tools.ts +47 -14
- package/src/responses/openai-responses-provider-metadata.ts +10 -0
- package/src/skills/openai-skills-api.ts +31 -0
- package/src/skills/openai-skills.ts +87 -0
- package/src/speech/openai-speech-model.ts +25 -8
- package/src/tool/apply-patch.ts +33 -32
- package/src/tool/code-interpreter.ts +40 -41
- package/src/tool/custom.ts +2 -8
- package/src/tool/file-search.ts +2 -2
- package/src/tool/image-generation.ts +2 -2
- package/src/tool/local-shell.ts +2 -2
- package/src/tool/mcp.ts +2 -2
- package/src/tool/shell.ts +9 -4
- package/src/tool/tool-search.ts +98 -0
- package/src/tool/web-search-preview.ts +2 -2
- package/src/tool/web-search.ts +2 -2
- package/src/transcription/openai-transcription-model.ts +26 -9
- package/dist/index.d.mts +0 -1107
- package/dist/index.mjs +0 -6508
- package/dist/index.mjs.map +0 -1
- package/dist/internal/index.d.mts +0 -1137
- package/dist/internal/index.mjs +0 -6321
- package/dist/internal/index.mjs.map +0 -1
|
@@ -1,25 +1,28 @@
|
|
|
1
1
|
import {
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
SharedV3Warning,
|
|
2
|
+
LanguageModelV4,
|
|
3
|
+
LanguageModelV4CallOptions,
|
|
4
|
+
LanguageModelV4Content,
|
|
5
|
+
LanguageModelV4FinishReason,
|
|
6
|
+
LanguageModelV4GenerateResult,
|
|
7
|
+
LanguageModelV4StreamPart,
|
|
8
|
+
LanguageModelV4StreamResult,
|
|
9
|
+
SharedV4ProviderMetadata,
|
|
10
|
+
SharedV4Warning,
|
|
12
11
|
} from '@ai-sdk/provider';
|
|
13
12
|
import {
|
|
14
13
|
FetchFunction,
|
|
15
14
|
ParseResult,
|
|
15
|
+
StreamingToolCallTracker,
|
|
16
16
|
combineHeaders,
|
|
17
17
|
createEventSourceResponseHandler,
|
|
18
18
|
createJsonResponseHandler,
|
|
19
19
|
generateId,
|
|
20
|
-
|
|
20
|
+
isCustomReasoning,
|
|
21
21
|
parseProviderOptions,
|
|
22
22
|
postJsonToApi,
|
|
23
|
+
serializeModelOptions,
|
|
24
|
+
WORKFLOW_DESERIALIZE,
|
|
25
|
+
WORKFLOW_SERIALIZE,
|
|
23
26
|
} from '@ai-sdk/provider-utils';
|
|
24
27
|
import { openaiFailedResponseHandler } from '../openai-error';
|
|
25
28
|
import { getOpenAILanguageModelCapabilities } from '../openai-language-model-capabilities';
|
|
@@ -43,13 +46,13 @@ import { prepareChatTools } from './openai-chat-prepare-tools';
|
|
|
43
46
|
|
|
44
47
|
type OpenAIChatConfig = {
|
|
45
48
|
provider: string;
|
|
46
|
-
headers
|
|
49
|
+
headers?: () => Record<string, string | undefined>;
|
|
47
50
|
url: (options: { modelId: string; path: string }) => string;
|
|
48
51
|
fetch?: FetchFunction;
|
|
49
52
|
};
|
|
50
53
|
|
|
51
|
-
export class OpenAIChatLanguageModel implements
|
|
52
|
-
readonly specificationVersion = '
|
|
54
|
+
export class OpenAIChatLanguageModel implements LanguageModelV4 {
|
|
55
|
+
readonly specificationVersion = 'v4';
|
|
53
56
|
|
|
54
57
|
readonly modelId: OpenAIChatModelId;
|
|
55
58
|
|
|
@@ -59,6 +62,20 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
59
62
|
|
|
60
63
|
private readonly config: OpenAIChatConfig;
|
|
61
64
|
|
|
65
|
+
static [WORKFLOW_SERIALIZE](model: OpenAIChatLanguageModel) {
|
|
66
|
+
return serializeModelOptions({
|
|
67
|
+
modelId: model.modelId,
|
|
68
|
+
config: model.config,
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
static [WORKFLOW_DESERIALIZE](options: {
|
|
73
|
+
modelId: OpenAIChatModelId;
|
|
74
|
+
config: OpenAIChatConfig;
|
|
75
|
+
}) {
|
|
76
|
+
return new OpenAIChatLanguageModel(options.modelId, options.config);
|
|
77
|
+
}
|
|
78
|
+
|
|
62
79
|
constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig) {
|
|
63
80
|
this.modelId = modelId;
|
|
64
81
|
this.config = config;
|
|
@@ -81,9 +98,10 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
81
98
|
seed,
|
|
82
99
|
tools,
|
|
83
100
|
toolChoice,
|
|
101
|
+
reasoning,
|
|
84
102
|
providerOptions,
|
|
85
|
-
}:
|
|
86
|
-
const warnings:
|
|
103
|
+
}: LanguageModelV4CallOptions) {
|
|
104
|
+
const warnings: SharedV4Warning[] = [];
|
|
87
105
|
|
|
88
106
|
// Parse provider options
|
|
89
107
|
const openaiOptions =
|
|
@@ -94,6 +112,12 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
94
112
|
})) ?? {};
|
|
95
113
|
|
|
96
114
|
const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
|
|
115
|
+
|
|
116
|
+
// AI SDK reasoning values map directly to the OpenAI reasoning values.
|
|
117
|
+
const resolvedReasoningEffort =
|
|
118
|
+
openaiOptions.reasoningEffort ??
|
|
119
|
+
(isCustomReasoning(reasoning) ? reasoning : undefined);
|
|
120
|
+
|
|
97
121
|
const isReasoningModel =
|
|
98
122
|
openaiOptions.forceReasoning ?? modelCapabilities.isReasoningModel;
|
|
99
123
|
|
|
@@ -168,7 +192,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
168
192
|
store: openaiOptions.store,
|
|
169
193
|
metadata: openaiOptions.metadata,
|
|
170
194
|
prediction: openaiOptions.prediction,
|
|
171
|
-
reasoning_effort:
|
|
195
|
+
reasoning_effort: resolvedReasoningEffort,
|
|
172
196
|
service_tier: openaiOptions.serviceTier,
|
|
173
197
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
174
198
|
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
@@ -184,7 +208,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
184
208
|
// when reasoning effort is none, gpt-5.1 models allow temperature, topP, logprobs
|
|
185
209
|
// https://platform.openai.com/docs/guides/latest-model#gpt-5-1-parameter-compatibility
|
|
186
210
|
if (
|
|
187
|
-
|
|
211
|
+
resolvedReasoningEffort !== 'none' ||
|
|
188
212
|
!modelCapabilities.supportsNonReasoningParameters
|
|
189
213
|
) {
|
|
190
214
|
if (baseArgs.temperature != null) {
|
|
@@ -314,8 +338,8 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
314
338
|
}
|
|
315
339
|
|
|
316
340
|
async doGenerate(
|
|
317
|
-
options:
|
|
318
|
-
): Promise<
|
|
341
|
+
options: LanguageModelV4CallOptions,
|
|
342
|
+
): Promise<LanguageModelV4GenerateResult> {
|
|
319
343
|
const { args: body, warnings } = await this.getArgs(options);
|
|
320
344
|
|
|
321
345
|
const {
|
|
@@ -327,7 +351,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
327
351
|
path: '/chat/completions',
|
|
328
352
|
modelId: this.modelId,
|
|
329
353
|
}),
|
|
330
|
-
headers: combineHeaders(this.config.headers(), options.headers),
|
|
354
|
+
headers: combineHeaders(this.config.headers?.(), options.headers),
|
|
331
355
|
body,
|
|
332
356
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
333
357
|
successfulResponseHandler: createJsonResponseHandler(
|
|
@@ -338,7 +362,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
338
362
|
});
|
|
339
363
|
|
|
340
364
|
const choice = response.choices[0];
|
|
341
|
-
const content: Array<
|
|
365
|
+
const content: Array<LanguageModelV4Content> = [];
|
|
342
366
|
|
|
343
367
|
// text content:
|
|
344
368
|
const text = choice.message.content;
|
|
@@ -369,8 +393,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
369
393
|
|
|
370
394
|
// provider metadata:
|
|
371
395
|
const completionTokenDetails = response.usage?.completion_tokens_details;
|
|
372
|
-
const
|
|
373
|
-
const providerMetadata: SharedV3ProviderMetadata = { openai: {} };
|
|
396
|
+
const providerMetadata: SharedV4ProviderMetadata = { openai: {} };
|
|
374
397
|
if (completionTokenDetails?.accepted_prediction_tokens != null) {
|
|
375
398
|
providerMetadata.openai.acceptedPredictionTokens =
|
|
376
399
|
completionTokenDetails?.accepted_prediction_tokens;
|
|
@@ -402,8 +425,8 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
402
425
|
}
|
|
403
426
|
|
|
404
427
|
async doStream(
|
|
405
|
-
options:
|
|
406
|
-
): Promise<
|
|
428
|
+
options: LanguageModelV4CallOptions,
|
|
429
|
+
): Promise<LanguageModelV4StreamResult> {
|
|
407
430
|
const { args, warnings } = await this.getArgs(options);
|
|
408
431
|
|
|
409
432
|
const body = {
|
|
@@ -419,7 +442,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
419
442
|
path: '/chat/completions',
|
|
420
443
|
modelId: this.modelId,
|
|
421
444
|
}),
|
|
422
|
-
headers: combineHeaders(this.config.headers(), options.headers),
|
|
445
|
+
headers: combineHeaders(this.config.headers?.(), options.headers),
|
|
423
446
|
body,
|
|
424
447
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
425
448
|
successfulResponseHandler: createEventSourceResponseHandler(
|
|
@@ -429,17 +452,9 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
429
452
|
fetch: this.config.fetch,
|
|
430
453
|
});
|
|
431
454
|
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
function: {
|
|
436
|
-
name: string;
|
|
437
|
-
arguments: string;
|
|
438
|
-
};
|
|
439
|
-
hasFinished: boolean;
|
|
440
|
-
}> = [];
|
|
441
|
-
|
|
442
|
-
let finishReason: LanguageModelV3FinishReason = {
|
|
455
|
+
let toolCallTracker: StreamingToolCallTracker;
|
|
456
|
+
|
|
457
|
+
let finishReason: LanguageModelV4FinishReason = {
|
|
443
458
|
unified: 'other',
|
|
444
459
|
raw: undefined,
|
|
445
460
|
};
|
|
@@ -447,15 +462,19 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
447
462
|
let metadataExtracted = false;
|
|
448
463
|
let isActiveText = false;
|
|
449
464
|
|
|
450
|
-
const providerMetadata:
|
|
465
|
+
const providerMetadata: SharedV4ProviderMetadata = { openai: {} };
|
|
451
466
|
|
|
452
467
|
return {
|
|
453
468
|
stream: response.pipeThrough(
|
|
454
469
|
new TransformStream<
|
|
455
470
|
ParseResult<OpenAIChatChunk>,
|
|
456
|
-
|
|
471
|
+
LanguageModelV4StreamPart
|
|
457
472
|
>({
|
|
458
473
|
start(controller) {
|
|
474
|
+
toolCallTracker = new StreamingToolCallTracker(controller, {
|
|
475
|
+
generateId,
|
|
476
|
+
typeValidation: 'if-present',
|
|
477
|
+
});
|
|
459
478
|
controller.enqueue({ type: 'stream-start', warnings });
|
|
460
479
|
},
|
|
461
480
|
|
|
@@ -547,124 +566,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
547
566
|
|
|
548
567
|
if (delta.tool_calls != null) {
|
|
549
568
|
for (const toolCallDelta of delta.tool_calls) {
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
// Tool call start. OpenAI returns all information except the arguments in the first chunk.
|
|
553
|
-
if (toolCalls[index] == null) {
|
|
554
|
-
if (
|
|
555
|
-
toolCallDelta.type != null &&
|
|
556
|
-
toolCallDelta.type !== 'function'
|
|
557
|
-
) {
|
|
558
|
-
throw new InvalidResponseDataError({
|
|
559
|
-
data: toolCallDelta,
|
|
560
|
-
message: `Expected 'function' type.`,
|
|
561
|
-
});
|
|
562
|
-
}
|
|
563
|
-
|
|
564
|
-
if (toolCallDelta.id == null) {
|
|
565
|
-
throw new InvalidResponseDataError({
|
|
566
|
-
data: toolCallDelta,
|
|
567
|
-
message: `Expected 'id' to be a string.`,
|
|
568
|
-
});
|
|
569
|
-
}
|
|
570
|
-
|
|
571
|
-
if (toolCallDelta.function?.name == null) {
|
|
572
|
-
throw new InvalidResponseDataError({
|
|
573
|
-
data: toolCallDelta,
|
|
574
|
-
message: `Expected 'function.name' to be a string.`,
|
|
575
|
-
});
|
|
576
|
-
}
|
|
577
|
-
|
|
578
|
-
controller.enqueue({
|
|
579
|
-
type: 'tool-input-start',
|
|
580
|
-
id: toolCallDelta.id,
|
|
581
|
-
toolName: toolCallDelta.function.name,
|
|
582
|
-
});
|
|
583
|
-
|
|
584
|
-
toolCalls[index] = {
|
|
585
|
-
id: toolCallDelta.id,
|
|
586
|
-
type: 'function',
|
|
587
|
-
function: {
|
|
588
|
-
name: toolCallDelta.function.name,
|
|
589
|
-
arguments: toolCallDelta.function.arguments ?? '',
|
|
590
|
-
},
|
|
591
|
-
hasFinished: false,
|
|
592
|
-
};
|
|
593
|
-
|
|
594
|
-
const toolCall = toolCalls[index];
|
|
595
|
-
|
|
596
|
-
if (
|
|
597
|
-
toolCall.function?.name != null &&
|
|
598
|
-
toolCall.function?.arguments != null
|
|
599
|
-
) {
|
|
600
|
-
// send delta if the argument text has already started:
|
|
601
|
-
if (toolCall.function.arguments.length > 0) {
|
|
602
|
-
controller.enqueue({
|
|
603
|
-
type: 'tool-input-delta',
|
|
604
|
-
id: toolCall.id,
|
|
605
|
-
delta: toolCall.function.arguments,
|
|
606
|
-
});
|
|
607
|
-
}
|
|
608
|
-
|
|
609
|
-
// check if tool call is complete
|
|
610
|
-
// (some providers send the full tool call in one chunk):
|
|
611
|
-
if (isParsableJson(toolCall.function.arguments)) {
|
|
612
|
-
controller.enqueue({
|
|
613
|
-
type: 'tool-input-end',
|
|
614
|
-
id: toolCall.id,
|
|
615
|
-
});
|
|
616
|
-
|
|
617
|
-
controller.enqueue({
|
|
618
|
-
type: 'tool-call',
|
|
619
|
-
toolCallId: toolCall.id ?? generateId(),
|
|
620
|
-
toolName: toolCall.function.name,
|
|
621
|
-
input: toolCall.function.arguments,
|
|
622
|
-
});
|
|
623
|
-
toolCall.hasFinished = true;
|
|
624
|
-
}
|
|
625
|
-
}
|
|
626
|
-
|
|
627
|
-
continue;
|
|
628
|
-
}
|
|
629
|
-
|
|
630
|
-
// existing tool call, merge if not finished
|
|
631
|
-
const toolCall = toolCalls[index];
|
|
632
|
-
|
|
633
|
-
if (toolCall.hasFinished) {
|
|
634
|
-
continue;
|
|
635
|
-
}
|
|
636
|
-
|
|
637
|
-
if (toolCallDelta.function?.arguments != null) {
|
|
638
|
-
toolCall.function!.arguments +=
|
|
639
|
-
toolCallDelta.function?.arguments ?? '';
|
|
640
|
-
}
|
|
641
|
-
|
|
642
|
-
// send delta
|
|
643
|
-
controller.enqueue({
|
|
644
|
-
type: 'tool-input-delta',
|
|
645
|
-
id: toolCall.id,
|
|
646
|
-
delta: toolCallDelta.function.arguments ?? '',
|
|
647
|
-
});
|
|
648
|
-
|
|
649
|
-
// check if tool call is complete
|
|
650
|
-
if (
|
|
651
|
-
toolCall.function?.name != null &&
|
|
652
|
-
toolCall.function?.arguments != null &&
|
|
653
|
-
isParsableJson(toolCall.function.arguments)
|
|
654
|
-
) {
|
|
655
|
-
controller.enqueue({
|
|
656
|
-
type: 'tool-input-end',
|
|
657
|
-
id: toolCall.id,
|
|
658
|
-
});
|
|
659
|
-
|
|
660
|
-
controller.enqueue({
|
|
661
|
-
type: 'tool-call',
|
|
662
|
-
toolCallId: toolCall.id ?? generateId(),
|
|
663
|
-
toolName: toolCall.function.name,
|
|
664
|
-
input: toolCall.function.arguments,
|
|
665
|
-
});
|
|
666
|
-
toolCall.hasFinished = true;
|
|
667
|
-
}
|
|
569
|
+
toolCallTracker.processDelta(toolCallDelta);
|
|
668
570
|
}
|
|
669
571
|
}
|
|
670
572
|
|
|
@@ -687,6 +589,8 @@ export class OpenAIChatLanguageModel implements LanguageModelV3 {
|
|
|
687
589
|
controller.enqueue({ type: 'text-end', id: '0' });
|
|
688
590
|
}
|
|
689
591
|
|
|
592
|
+
toolCallTracker.flush();
|
|
593
|
+
|
|
690
594
|
controller.enqueue({
|
|
691
595
|
type: 'finish',
|
|
692
596
|
finishReason,
|
|
@@ -51,8 +51,13 @@ export type OpenAIChatModelId =
|
|
|
51
51
|
| 'gpt-5.2-chat-latest'
|
|
52
52
|
| 'gpt-5.2-pro'
|
|
53
53
|
| 'gpt-5.2-pro-2025-12-11'
|
|
54
|
+
| 'gpt-5.3-chat-latest'
|
|
54
55
|
| 'gpt-5.4'
|
|
55
56
|
| 'gpt-5.4-2026-03-05'
|
|
57
|
+
| 'gpt-5.4-mini'
|
|
58
|
+
| 'gpt-5.4-mini-2026-03-17'
|
|
59
|
+
| 'gpt-5.4-nano'
|
|
60
|
+
| 'gpt-5.4-nano-2026-03-17'
|
|
56
61
|
| 'gpt-5.4-pro'
|
|
57
62
|
| 'gpt-5.4-pro-2026-03-05'
|
|
58
63
|
| (string & {});
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import {
|
|
2
|
-
|
|
3
|
-
|
|
2
|
+
LanguageModelV4CallOptions,
|
|
3
|
+
SharedV4Warning,
|
|
4
4
|
UnsupportedFunctionalityError,
|
|
5
5
|
} from '@ai-sdk/provider';
|
|
6
6
|
import {
|
|
@@ -12,17 +12,17 @@ export function prepareChatTools({
|
|
|
12
12
|
tools,
|
|
13
13
|
toolChoice,
|
|
14
14
|
}: {
|
|
15
|
-
tools:
|
|
16
|
-
toolChoice?:
|
|
15
|
+
tools: LanguageModelV4CallOptions['tools'];
|
|
16
|
+
toolChoice?: LanguageModelV4CallOptions['toolChoice'];
|
|
17
17
|
}): {
|
|
18
18
|
tools?: OpenAIChatFunctionTool[];
|
|
19
19
|
toolChoice?: OpenAIChatToolChoice;
|
|
20
|
-
toolWarnings: Array<
|
|
20
|
+
toolWarnings: Array<SharedV4Warning>;
|
|
21
21
|
} {
|
|
22
22
|
// when the tools array is empty, change it to undefined to prevent errors:
|
|
23
23
|
tools = tools?.length ? tools : undefined;
|
|
24
24
|
|
|
25
|
-
const toolWarnings:
|
|
25
|
+
const toolWarnings: SharedV4Warning[] = [];
|
|
26
26
|
|
|
27
27
|
if (tools == null) {
|
|
28
28
|
return { tools: undefined, toolChoice: undefined, toolWarnings };
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { LanguageModelV4Usage } from '@ai-sdk/provider';
|
|
2
2
|
|
|
3
3
|
export type OpenAICompletionUsage = {
|
|
4
4
|
prompt_tokens?: number | null;
|
|
@@ -8,7 +8,7 @@ export type OpenAICompletionUsage = {
|
|
|
8
8
|
|
|
9
9
|
export function convertOpenAICompletionUsage(
|
|
10
10
|
usage: OpenAICompletionUsage | undefined | null,
|
|
11
|
-
):
|
|
11
|
+
): LanguageModelV4Usage {
|
|
12
12
|
if (usage == null) {
|
|
13
13
|
return {
|
|
14
14
|
inputTokens: {
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import {
|
|
2
2
|
InvalidPromptError,
|
|
3
|
-
|
|
3
|
+
LanguageModelV4Prompt,
|
|
4
4
|
UnsupportedFunctionalityError,
|
|
5
5
|
} from '@ai-sdk/provider';
|
|
6
6
|
|
|
@@ -9,7 +9,7 @@ export function convertToOpenAICompletionPrompt({
|
|
|
9
9
|
user = 'user',
|
|
10
10
|
assistant = 'assistant',
|
|
11
11
|
}: {
|
|
12
|
-
prompt:
|
|
12
|
+
prompt: LanguageModelV4Prompt;
|
|
13
13
|
user?: string;
|
|
14
14
|
assistant?: string;
|
|
15
15
|
}): {
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { LanguageModelV4FinishReason } from '@ai-sdk/provider';
|
|
2
2
|
|
|
3
3
|
export function mapOpenAIFinishReason(
|
|
4
4
|
finishReason: string | null | undefined,
|
|
5
|
-
):
|
|
5
|
+
): LanguageModelV4FinishReason['unified'] {
|
|
6
6
|
switch (finishReason) {
|
|
7
7
|
case 'stop':
|
|
8
8
|
return 'stop';
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
import {
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
2
|
+
LanguageModelV4,
|
|
3
|
+
LanguageModelV4CallOptions,
|
|
4
|
+
LanguageModelV4FinishReason,
|
|
5
|
+
LanguageModelV4GenerateResult,
|
|
6
|
+
LanguageModelV4StreamPart,
|
|
7
|
+
LanguageModelV4StreamResult,
|
|
8
|
+
SharedV4ProviderMetadata,
|
|
9
|
+
SharedV4Warning,
|
|
10
10
|
} from '@ai-sdk/provider';
|
|
11
11
|
import {
|
|
12
12
|
combineHeaders,
|
|
@@ -16,6 +16,9 @@ import {
|
|
|
16
16
|
parseProviderOptions,
|
|
17
17
|
ParseResult,
|
|
18
18
|
postJsonToApi,
|
|
19
|
+
serializeModelOptions,
|
|
20
|
+
WORKFLOW_DESERIALIZE,
|
|
21
|
+
WORKFLOW_SERIALIZE,
|
|
19
22
|
} from '@ai-sdk/provider-utils';
|
|
20
23
|
import { openaiFailedResponseHandler } from '../openai-error';
|
|
21
24
|
import {
|
|
@@ -37,13 +40,13 @@ import {
|
|
|
37
40
|
|
|
38
41
|
type OpenAICompletionConfig = {
|
|
39
42
|
provider: string;
|
|
40
|
-
headers
|
|
43
|
+
headers?: () => Record<string, string | undefined>;
|
|
41
44
|
url: (options: { modelId: string; path: string }) => string;
|
|
42
45
|
fetch?: FetchFunction;
|
|
43
46
|
};
|
|
44
47
|
|
|
45
|
-
export class OpenAICompletionLanguageModel implements
|
|
46
|
-
readonly specificationVersion = '
|
|
48
|
+
export class OpenAICompletionLanguageModel implements LanguageModelV4 {
|
|
49
|
+
readonly specificationVersion = 'v4';
|
|
47
50
|
|
|
48
51
|
readonly modelId: OpenAICompletionModelId;
|
|
49
52
|
|
|
@@ -53,6 +56,20 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
53
56
|
return this.config.provider.split('.')[0].trim();
|
|
54
57
|
}
|
|
55
58
|
|
|
59
|
+
static [WORKFLOW_SERIALIZE](model: OpenAICompletionLanguageModel) {
|
|
60
|
+
return serializeModelOptions({
|
|
61
|
+
modelId: model.modelId,
|
|
62
|
+
config: model.config,
|
|
63
|
+
});
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
static [WORKFLOW_DESERIALIZE](options: {
|
|
67
|
+
modelId: OpenAICompletionModelId;
|
|
68
|
+
config: OpenAICompletionConfig;
|
|
69
|
+
}) {
|
|
70
|
+
return new OpenAICompletionLanguageModel(options.modelId, options.config);
|
|
71
|
+
}
|
|
72
|
+
|
|
56
73
|
constructor(
|
|
57
74
|
modelId: OpenAICompletionModelId,
|
|
58
75
|
config: OpenAICompletionConfig,
|
|
@@ -83,8 +100,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
83
100
|
toolChoice,
|
|
84
101
|
seed,
|
|
85
102
|
providerOptions,
|
|
86
|
-
}:
|
|
87
|
-
const warnings:
|
|
103
|
+
}: LanguageModelV4CallOptions) {
|
|
104
|
+
const warnings: SharedV4Warning[] = [];
|
|
88
105
|
|
|
89
106
|
// Parse provider options
|
|
90
107
|
const openaiOptions = {
|
|
@@ -161,8 +178,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
161
178
|
}
|
|
162
179
|
|
|
163
180
|
async doGenerate(
|
|
164
|
-
options:
|
|
165
|
-
): Promise<
|
|
181
|
+
options: LanguageModelV4CallOptions,
|
|
182
|
+
): Promise<LanguageModelV4GenerateResult> {
|
|
166
183
|
const { args, warnings } = await this.getArgs(options);
|
|
167
184
|
|
|
168
185
|
const {
|
|
@@ -174,7 +191,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
174
191
|
path: '/completions',
|
|
175
192
|
modelId: this.modelId,
|
|
176
193
|
}),
|
|
177
|
-
headers: combineHeaders(this.config.headers(), options.headers),
|
|
194
|
+
headers: combineHeaders(this.config.headers?.(), options.headers),
|
|
178
195
|
body: args,
|
|
179
196
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
180
197
|
successfulResponseHandler: createJsonResponseHandler(
|
|
@@ -186,7 +203,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
186
203
|
|
|
187
204
|
const choice = response.choices[0];
|
|
188
205
|
|
|
189
|
-
const providerMetadata:
|
|
206
|
+
const providerMetadata: SharedV4ProviderMetadata = { openai: {} };
|
|
190
207
|
|
|
191
208
|
if (choice.logprobs != null) {
|
|
192
209
|
providerMetadata.openai.logprobs = choice.logprobs;
|
|
@@ -211,8 +228,8 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
211
228
|
}
|
|
212
229
|
|
|
213
230
|
async doStream(
|
|
214
|
-
options:
|
|
215
|
-
): Promise<
|
|
231
|
+
options: LanguageModelV4CallOptions,
|
|
232
|
+
): Promise<LanguageModelV4StreamResult> {
|
|
216
233
|
const { args, warnings } = await this.getArgs(options);
|
|
217
234
|
|
|
218
235
|
const body = {
|
|
@@ -229,7 +246,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
229
246
|
path: '/completions',
|
|
230
247
|
modelId: this.modelId,
|
|
231
248
|
}),
|
|
232
|
-
headers: combineHeaders(this.config.headers(), options.headers),
|
|
249
|
+
headers: combineHeaders(this.config.headers?.(), options.headers),
|
|
233
250
|
body,
|
|
234
251
|
failedResponseHandler: openaiFailedResponseHandler,
|
|
235
252
|
successfulResponseHandler: createEventSourceResponseHandler(
|
|
@@ -239,11 +256,11 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
239
256
|
fetch: this.config.fetch,
|
|
240
257
|
});
|
|
241
258
|
|
|
242
|
-
let finishReason:
|
|
259
|
+
let finishReason: LanguageModelV4FinishReason = {
|
|
243
260
|
unified: 'other',
|
|
244
261
|
raw: undefined,
|
|
245
262
|
};
|
|
246
|
-
const providerMetadata:
|
|
263
|
+
const providerMetadata: SharedV4ProviderMetadata = { openai: {} };
|
|
247
264
|
let usage: OpenAICompletionUsage | undefined = undefined;
|
|
248
265
|
let isFirstChunk = true;
|
|
249
266
|
|
|
@@ -251,7 +268,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV3 {
|
|
|
251
268
|
stream: response.pipeThrough(
|
|
252
269
|
new TransformStream<
|
|
253
270
|
ParseResult<OpenAICompletionChunk>,
|
|
254
|
-
|
|
271
|
+
LanguageModelV4StreamPart
|
|
255
272
|
>({
|
|
256
273
|
start(controller) {
|
|
257
274
|
controller.enqueue({ type: 'stream-start', warnings });
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import {
|
|
2
|
-
|
|
2
|
+
EmbeddingModelV4,
|
|
3
3
|
TooManyEmbeddingValuesForCallError,
|
|
4
4
|
} from '@ai-sdk/provider';
|
|
5
5
|
import {
|
|
@@ -7,6 +7,9 @@ import {
|
|
|
7
7
|
createJsonResponseHandler,
|
|
8
8
|
parseProviderOptions,
|
|
9
9
|
postJsonToApi,
|
|
10
|
+
serializeModelOptions,
|
|
11
|
+
WORKFLOW_DESERIALIZE,
|
|
12
|
+
WORKFLOW_SERIALIZE,
|
|
10
13
|
} from '@ai-sdk/provider-utils';
|
|
11
14
|
import { OpenAIConfig } from '../openai-config';
|
|
12
15
|
import { openaiFailedResponseHandler } from '../openai-error';
|
|
@@ -16,14 +19,28 @@ import {
|
|
|
16
19
|
} from './openai-embedding-options';
|
|
17
20
|
import { openaiTextEmbeddingResponseSchema } from './openai-embedding-api';
|
|
18
21
|
|
|
19
|
-
export class OpenAIEmbeddingModel implements
|
|
20
|
-
readonly specificationVersion = '
|
|
22
|
+
export class OpenAIEmbeddingModel implements EmbeddingModelV4 {
|
|
23
|
+
readonly specificationVersion = 'v4';
|
|
21
24
|
readonly modelId: OpenAIEmbeddingModelId;
|
|
22
25
|
readonly maxEmbeddingsPerCall = 2048;
|
|
23
26
|
readonly supportsParallelCalls = true;
|
|
24
27
|
|
|
25
28
|
private readonly config: OpenAIConfig;
|
|
26
29
|
|
|
30
|
+
static [WORKFLOW_SERIALIZE](model: OpenAIEmbeddingModel) {
|
|
31
|
+
return serializeModelOptions({
|
|
32
|
+
modelId: model.modelId,
|
|
33
|
+
config: model.config,
|
|
34
|
+
});
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
static [WORKFLOW_DESERIALIZE](options: {
|
|
38
|
+
modelId: OpenAIEmbeddingModelId;
|
|
39
|
+
config: OpenAIConfig;
|
|
40
|
+
}) {
|
|
41
|
+
return new OpenAIEmbeddingModel(options.modelId, options.config);
|
|
42
|
+
}
|
|
43
|
+
|
|
27
44
|
get provider(): string {
|
|
28
45
|
return this.config.provider;
|
|
29
46
|
}
|
|
@@ -38,8 +55,8 @@ export class OpenAIEmbeddingModel implements EmbeddingModelV3 {
|
|
|
38
55
|
headers,
|
|
39
56
|
abortSignal,
|
|
40
57
|
providerOptions,
|
|
41
|
-
}: Parameters<
|
|
42
|
-
Awaited<ReturnType<
|
|
58
|
+
}: Parameters<EmbeddingModelV4['doEmbed']>[0]): Promise<
|
|
59
|
+
Awaited<ReturnType<EmbeddingModelV4['doEmbed']>>
|
|
43
60
|
> {
|
|
44
61
|
if (values.length > this.maxEmbeddingsPerCall) {
|
|
45
62
|
throw new TooManyEmbeddingValuesForCallError({
|
|
@@ -67,7 +84,7 @@ export class OpenAIEmbeddingModel implements EmbeddingModelV3 {
|
|
|
67
84
|
path: '/embeddings',
|
|
68
85
|
modelId: this.modelId,
|
|
69
86
|
}),
|
|
70
|
-
headers: combineHeaders(this.config.headers(), headers),
|
|
87
|
+
headers: combineHeaders(this.config.headers?.(), headers),
|
|
71
88
|
body: {
|
|
72
89
|
model: this.modelId,
|
|
73
90
|
input: values,
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import { lazySchema, zodSchema } from '@ai-sdk/provider-utils';
|
|
2
|
+
import { z } from 'zod/v4';
|
|
3
|
+
|
|
4
|
+
export const openaiFilesResponseSchema = lazySchema(() =>
|
|
5
|
+
zodSchema(
|
|
6
|
+
z.object({
|
|
7
|
+
id: z.string(),
|
|
8
|
+
object: z.string().nullish(),
|
|
9
|
+
bytes: z.number().nullish(),
|
|
10
|
+
created_at: z.number().nullish(),
|
|
11
|
+
filename: z.string().nullish(),
|
|
12
|
+
purpose: z.string().nullish(),
|
|
13
|
+
status: z.string().nullish(),
|
|
14
|
+
expires_at: z.number().nullish(),
|
|
15
|
+
}),
|
|
16
|
+
),
|
|
17
|
+
);
|