@ai-sdk/openai 1.0.12 → 1.0.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/index.js +75 -41
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +75 -41
- package/dist/index.mjs.map +1 -1
- package/internal/dist/index.js +52 -40
- package/internal/dist/index.js.map +1 -1
- package/internal/dist/index.mjs +52 -40
- package/internal/dist/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.mjs
CHANGED
|
@@ -514,7 +514,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
514
514
|
}
|
|
515
515
|
}
|
|
516
516
|
async doGenerate(options) {
|
|
517
|
-
var _a, _b, _c, _d, _e, _f, _g, _h
|
|
517
|
+
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
518
518
|
const { args: body, warnings } = this.getArgs(options);
|
|
519
519
|
const { responseHeaders, value: response } = await postJsonToApi({
|
|
520
520
|
url: this.config.url({
|
|
@@ -532,18 +532,23 @@ var OpenAIChatLanguageModel = class {
|
|
|
532
532
|
});
|
|
533
533
|
const { messages: rawPrompt, ...rawSettings } = body;
|
|
534
534
|
const choice = response.choices[0];
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
535
|
+
const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
|
|
536
|
+
const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
|
|
537
|
+
const providerMetadata = { openai: {} };
|
|
538
|
+
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
|
|
539
|
+
providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
|
|
540
|
+
}
|
|
541
|
+
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
|
|
542
|
+
providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
|
|
543
|
+
}
|
|
544
|
+
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
|
|
545
|
+
providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
|
|
546
|
+
}
|
|
547
|
+
if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
|
|
548
|
+
providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
|
|
544
549
|
}
|
|
545
550
|
return {
|
|
546
|
-
text: (
|
|
551
|
+
text: (_c = choice.message.content) != null ? _c : void 0,
|
|
547
552
|
toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
|
|
548
553
|
{
|
|
549
554
|
toolCallType: "function",
|
|
@@ -551,7 +556,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
551
556
|
toolName: choice.message.function_call.name,
|
|
552
557
|
args: choice.message.function_call.arguments
|
|
553
558
|
}
|
|
554
|
-
] : (
|
|
559
|
+
] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
|
|
555
560
|
var _a2;
|
|
556
561
|
return {
|
|
557
562
|
toolCallType: "function",
|
|
@@ -562,8 +567,8 @@ var OpenAIChatLanguageModel = class {
|
|
|
562
567
|
}),
|
|
563
568
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
564
569
|
usage: {
|
|
565
|
-
promptTokens: (
|
|
566
|
-
completionTokens: (
|
|
570
|
+
promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
|
|
571
|
+
completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
|
|
567
572
|
},
|
|
568
573
|
rawCall: { rawPrompt, rawSettings },
|
|
569
574
|
rawResponse: { headers: responseHeaders },
|
|
@@ -642,12 +647,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
642
647
|
let logprobs;
|
|
643
648
|
let isFirstChunk = true;
|
|
644
649
|
const { useLegacyFunctionCalling } = this.settings;
|
|
645
|
-
|
|
650
|
+
const providerMetadata = { openai: {} };
|
|
646
651
|
return {
|
|
647
652
|
stream: response.pipeThrough(
|
|
648
653
|
new TransformStream({
|
|
649
654
|
transform(chunk, controller) {
|
|
650
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l
|
|
655
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
|
651
656
|
if (!chunk.success) {
|
|
652
657
|
finishReason = "error";
|
|
653
658
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -667,22 +672,27 @@ var OpenAIChatLanguageModel = class {
|
|
|
667
672
|
});
|
|
668
673
|
}
|
|
669
674
|
if (value.usage != null) {
|
|
670
|
-
usage = {
|
|
671
|
-
promptTokens: (_a = value.usage.prompt_tokens) != null ? _a : void 0,
|
|
672
|
-
completionTokens: (_b = value.usage.completion_tokens) != null ? _b : void 0
|
|
673
|
-
};
|
|
674
675
|
const {
|
|
675
|
-
|
|
676
|
-
|
|
676
|
+
prompt_tokens,
|
|
677
|
+
completion_tokens,
|
|
678
|
+
prompt_tokens_details,
|
|
679
|
+
completion_tokens_details
|
|
677
680
|
} = value.usage;
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
681
|
+
usage = {
|
|
682
|
+
promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
|
|
683
|
+
completionTokens: completion_tokens != null ? completion_tokens : void 0
|
|
684
|
+
};
|
|
685
|
+
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
|
|
686
|
+
providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
|
|
687
|
+
}
|
|
688
|
+
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
|
|
689
|
+
providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
|
|
690
|
+
}
|
|
691
|
+
if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
|
|
692
|
+
providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
|
|
693
|
+
}
|
|
694
|
+
if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
|
|
695
|
+
providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
|
|
686
696
|
}
|
|
687
697
|
}
|
|
688
698
|
const choice = value.choices[0];
|
|
@@ -730,7 +740,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
730
740
|
message: `Expected 'id' to be a string.`
|
|
731
741
|
});
|
|
732
742
|
}
|
|
733
|
-
if (((
|
|
743
|
+
if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
|
|
734
744
|
throw new InvalidResponseDataError({
|
|
735
745
|
data: toolCallDelta,
|
|
736
746
|
message: `Expected 'function.name' to be a string.`
|
|
@@ -741,12 +751,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
741
751
|
type: "function",
|
|
742
752
|
function: {
|
|
743
753
|
name: toolCallDelta.function.name,
|
|
744
|
-
arguments: (
|
|
754
|
+
arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
|
|
745
755
|
},
|
|
746
756
|
hasFinished: false
|
|
747
757
|
};
|
|
748
758
|
const toolCall2 = toolCalls[index];
|
|
749
|
-
if (((
|
|
759
|
+
if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
|
|
750
760
|
if (toolCall2.function.arguments.length > 0) {
|
|
751
761
|
controller.enqueue({
|
|
752
762
|
type: "tool-call-delta",
|
|
@@ -760,7 +770,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
760
770
|
controller.enqueue({
|
|
761
771
|
type: "tool-call",
|
|
762
772
|
toolCallType: "function",
|
|
763
|
-
toolCallId: (
|
|
773
|
+
toolCallId: (_e = toolCall2.id) != null ? _e : generateId(),
|
|
764
774
|
toolName: toolCall2.function.name,
|
|
765
775
|
args: toolCall2.function.arguments
|
|
766
776
|
});
|
|
@@ -773,21 +783,21 @@ var OpenAIChatLanguageModel = class {
|
|
|
773
783
|
if (toolCall.hasFinished) {
|
|
774
784
|
continue;
|
|
775
785
|
}
|
|
776
|
-
if (((
|
|
777
|
-
toolCall.function.arguments += (
|
|
786
|
+
if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
|
|
787
|
+
toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
|
|
778
788
|
}
|
|
779
789
|
controller.enqueue({
|
|
780
790
|
type: "tool-call-delta",
|
|
781
791
|
toolCallType: "function",
|
|
782
792
|
toolCallId: toolCall.id,
|
|
783
793
|
toolName: toolCall.function.name,
|
|
784
|
-
argsTextDelta: (
|
|
794
|
+
argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
|
|
785
795
|
});
|
|
786
|
-
if (((
|
|
796
|
+
if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && isParsableJson(toolCall.function.arguments)) {
|
|
787
797
|
controller.enqueue({
|
|
788
798
|
type: "tool-call",
|
|
789
799
|
toolCallType: "function",
|
|
790
|
-
toolCallId: (
|
|
800
|
+
toolCallId: (_l = toolCall.id) != null ? _l : generateId(),
|
|
791
801
|
toolName: toolCall.function.name,
|
|
792
802
|
args: toolCall.function.arguments
|
|
793
803
|
});
|
|
@@ -825,7 +835,9 @@ var openaiTokenUsageSchema = z2.object({
|
|
|
825
835
|
cached_tokens: z2.number().nullish()
|
|
826
836
|
}).nullish(),
|
|
827
837
|
completion_tokens_details: z2.object({
|
|
828
|
-
reasoning_tokens: z2.number().nullish()
|
|
838
|
+
reasoning_tokens: z2.number().nullish(),
|
|
839
|
+
accepted_prediction_tokens: z2.number().nullish(),
|
|
840
|
+
rejected_prediction_tokens: z2.number().nullish()
|
|
829
841
|
}).nullish()
|
|
830
842
|
}).nullish();
|
|
831
843
|
var openaiChatResponseSchema = z2.object({
|
|
@@ -1386,12 +1398,20 @@ import {
|
|
|
1386
1398
|
postJsonToApi as postJsonToApi4
|
|
1387
1399
|
} from "@ai-sdk/provider-utils";
|
|
1388
1400
|
import { z as z5 } from "zod";
|
|
1401
|
+
var modelMaxImagesPerCall = {
|
|
1402
|
+
"dall-e-3": 1,
|
|
1403
|
+
"dall-e-2": 10
|
|
1404
|
+
};
|
|
1389
1405
|
var OpenAIImageModel = class {
|
|
1390
1406
|
constructor(modelId, config) {
|
|
1391
1407
|
this.specificationVersion = "v1";
|
|
1392
1408
|
this.modelId = modelId;
|
|
1393
1409
|
this.config = config;
|
|
1394
1410
|
}
|
|
1411
|
+
get maxImagesPerCall() {
|
|
1412
|
+
var _a;
|
|
1413
|
+
return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
|
|
1414
|
+
}
|
|
1395
1415
|
get provider() {
|
|
1396
1416
|
return this.config.provider;
|
|
1397
1417
|
}
|
|
@@ -1399,11 +1419,24 @@ var OpenAIImageModel = class {
|
|
|
1399
1419
|
prompt,
|
|
1400
1420
|
n,
|
|
1401
1421
|
size,
|
|
1422
|
+
aspectRatio,
|
|
1423
|
+
seed,
|
|
1402
1424
|
providerOptions,
|
|
1403
1425
|
headers,
|
|
1404
1426
|
abortSignal
|
|
1405
1427
|
}) {
|
|
1406
1428
|
var _a;
|
|
1429
|
+
const warnings = [];
|
|
1430
|
+
if (aspectRatio != null) {
|
|
1431
|
+
warnings.push({
|
|
1432
|
+
type: "unsupported-setting",
|
|
1433
|
+
setting: "aspectRatio",
|
|
1434
|
+
details: "This model does not support aspect ratio. Use `size` instead."
|
|
1435
|
+
});
|
|
1436
|
+
}
|
|
1437
|
+
if (seed != null) {
|
|
1438
|
+
warnings.push({ type: "unsupported-setting", setting: "seed" });
|
|
1439
|
+
}
|
|
1407
1440
|
const { value: response } = await postJsonToApi4({
|
|
1408
1441
|
url: this.config.url({
|
|
1409
1442
|
path: "/images/generations",
|
|
@@ -1426,7 +1459,8 @@ var OpenAIImageModel = class {
|
|
|
1426
1459
|
fetch: this.config.fetch
|
|
1427
1460
|
});
|
|
1428
1461
|
return {
|
|
1429
|
-
images: response.data.map((item) => item.b64_json)
|
|
1462
|
+
images: response.data.map((item) => item.b64_json),
|
|
1463
|
+
warnings
|
|
1430
1464
|
};
|
|
1431
1465
|
}
|
|
1432
1466
|
};
|