ai 3.3.34 → 3.3.36
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +27 -0
- package/dist/index.d.mts +106 -66
- package/dist/index.d.ts +106 -66
- package/dist/index.js +283 -224
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +286 -227
- package/dist/index.mjs.map +1 -1
- package/package.json +6 -6
- package/rsc/dist/index.d.ts +0 -4
- package/rsc/dist/rsc-server.d.mts +0 -4
- package/rsc/dist/rsc-server.mjs +11 -0
- package/rsc/dist/rsc-server.mjs.map +1 -1
- package/svelte/dist/index.d.mts +13 -16
- package/svelte/dist/index.d.ts +13 -16
- package/svelte/dist/index.js +5 -4
- package/svelte/dist/index.js.map +1 -1
- package/svelte/dist/index.mjs +5 -4
- package/svelte/dist/index.mjs.map +1 -1
package/dist/index.mjs
CHANGED
@@ -1067,6 +1067,7 @@ function prepareCallSettings({
|
|
1067
1067
|
maxTokens,
|
1068
1068
|
temperature,
|
1069
1069
|
topP,
|
1070
|
+
topK,
|
1070
1071
|
presencePenalty,
|
1071
1072
|
frequencyPenalty,
|
1072
1073
|
stopSequences,
|
@@ -1107,6 +1108,15 @@ function prepareCallSettings({
|
|
1107
1108
|
});
|
1108
1109
|
}
|
1109
1110
|
}
|
1111
|
+
if (topK != null) {
|
1112
|
+
if (typeof topK !== "number") {
|
1113
|
+
throw new InvalidArgumentError({
|
1114
|
+
parameter: "topK",
|
1115
|
+
value: topP,
|
1116
|
+
message: "topP must be a number"
|
1117
|
+
});
|
1118
|
+
}
|
1119
|
+
}
|
1110
1120
|
if (presencePenalty != null) {
|
1111
1121
|
if (typeof presencePenalty !== "number") {
|
1112
1122
|
throw new InvalidArgumentError({
|
@@ -1154,6 +1164,7 @@ function prepareCallSettings({
|
|
1154
1164
|
maxTokens,
|
1155
1165
|
temperature: temperature != null ? temperature : 0,
|
1156
1166
|
topP,
|
1167
|
+
topK,
|
1157
1168
|
presencePenalty,
|
1158
1169
|
frequencyPenalty,
|
1159
1170
|
stopSequences: stopSequences != null && stopSequences.length > 0 ? stopSequences : void 0,
|
@@ -2752,64 +2763,20 @@ var experimental_streamObject = streamObject;
|
|
2752
2763
|
// core/generate-text/generate-text.ts
|
2753
2764
|
import { createIdGenerator as createIdGenerator3 } from "@ai-sdk/provider-utils";
|
2754
2765
|
|
2755
|
-
//
|
2756
|
-
import {
|
2757
|
-
|
2758
|
-
|
2759
|
-
|
2760
|
-
|
2761
|
-
|
2762
|
-
|
2763
|
-
|
2764
|
-
|
2765
|
-
|
2766
|
-
|
2767
|
-
|
2768
|
-
|
2769
|
-
return {
|
2770
|
-
tools: void 0,
|
2771
|
-
toolChoice: void 0
|
2772
|
-
};
|
2773
|
-
}
|
2774
|
-
return {
|
2775
|
-
tools: Object.entries(tools).map(([name11, tool2]) => ({
|
2776
|
-
type: "function",
|
2777
|
-
name: name11,
|
2778
|
-
description: tool2.description,
|
2779
|
-
parameters: asSchema2(tool2.parameters).jsonSchema
|
2780
|
-
})),
|
2781
|
-
toolChoice: toolChoice == null ? { type: "auto" } : typeof toolChoice === "string" ? { type: toolChoice } : { type: "tool", toolName: toolChoice.toolName }
|
2782
|
-
};
|
2783
|
-
}
|
2784
|
-
|
2785
|
-
// core/generate-text/to-response-messages.ts
|
2786
|
-
function toResponseMessages({
|
2787
|
-
text = "",
|
2788
|
-
toolCalls,
|
2789
|
-
toolResults
|
2790
|
-
}) {
|
2791
|
-
const responseMessages = [];
|
2792
|
-
responseMessages.push({
|
2793
|
-
role: "assistant",
|
2794
|
-
content: [{ type: "text", text }, ...toolCalls]
|
2795
|
-
});
|
2796
|
-
if (toolResults.length > 0) {
|
2797
|
-
responseMessages.push({
|
2798
|
-
role: "tool",
|
2799
|
-
content: toolResults.map((result) => ({
|
2800
|
-
type: "tool-result",
|
2801
|
-
toolCallId: result.toolCallId,
|
2802
|
-
toolName: result.toolName,
|
2803
|
-
result: result.result
|
2804
|
-
}))
|
2805
|
-
});
|
2806
|
-
}
|
2807
|
-
return responseMessages;
|
2808
|
-
}
|
2809
|
-
|
2810
|
-
// core/generate-text/tool-call.ts
|
2811
|
-
import { safeParseJSON as safeParseJSON2 } from "@ai-sdk/provider-utils";
|
2812
|
-
import { asSchema as asSchema3 } from "@ai-sdk/ui-utils";
|
2766
|
+
// errors/index.ts
|
2767
|
+
import {
|
2768
|
+
AISDKError as AISDKError10,
|
2769
|
+
APICallError as APICallError2,
|
2770
|
+
EmptyResponseBodyError,
|
2771
|
+
InvalidPromptError as InvalidPromptError2,
|
2772
|
+
InvalidResponseDataError,
|
2773
|
+
JSONParseError,
|
2774
|
+
LoadAPIKeyError,
|
2775
|
+
NoContentGeneratedError,
|
2776
|
+
NoSuchModelError,
|
2777
|
+
TypeValidationError as TypeValidationError2,
|
2778
|
+
UnsupportedFunctionalityError as UnsupportedFunctionalityError2
|
2779
|
+
} from "@ai-sdk/provider";
|
2813
2780
|
|
2814
2781
|
// errors/invalid-tool-arguments-error.ts
|
2815
2782
|
import { AISDKError as AISDKError7, getErrorMessage as getErrorMessage3 } from "@ai-sdk/provider";
|
@@ -2897,7 +2864,85 @@ var NoSuchToolError = class extends AISDKError8 {
|
|
2897
2864
|
};
|
2898
2865
|
_a8 = symbol8;
|
2899
2866
|
|
2867
|
+
// core/prompt/message-conversion-error.ts
|
2868
|
+
import { AISDKError as AISDKError9 } from "@ai-sdk/provider";
|
2869
|
+
var name9 = "AI_MessageConversionError";
|
2870
|
+
var marker9 = `vercel.ai.error.${name9}`;
|
2871
|
+
var symbol9 = Symbol.for(marker9);
|
2872
|
+
var _a9;
|
2873
|
+
var MessageConversionError = class extends AISDKError9 {
|
2874
|
+
constructor({
|
2875
|
+
originalMessage,
|
2876
|
+
message
|
2877
|
+
}) {
|
2878
|
+
super({ name: name9, message });
|
2879
|
+
this[_a9] = true;
|
2880
|
+
this.originalMessage = originalMessage;
|
2881
|
+
}
|
2882
|
+
static isInstance(error) {
|
2883
|
+
return AISDKError9.hasMarker(error, marker9);
|
2884
|
+
}
|
2885
|
+
};
|
2886
|
+
_a9 = symbol9;
|
2887
|
+
|
2888
|
+
// core/prompt/prepare-tools-and-tool-choice.ts
|
2889
|
+
import { asSchema as asSchema2 } from "@ai-sdk/ui-utils";
|
2890
|
+
|
2891
|
+
// core/util/is-non-empty-object.ts
|
2892
|
+
function isNonEmptyObject(object) {
|
2893
|
+
return object != null && Object.keys(object).length > 0;
|
2894
|
+
}
|
2895
|
+
|
2896
|
+
// core/prompt/prepare-tools-and-tool-choice.ts
|
2897
|
+
function prepareToolsAndToolChoice({
|
2898
|
+
tools,
|
2899
|
+
toolChoice
|
2900
|
+
}) {
|
2901
|
+
if (!isNonEmptyObject(tools)) {
|
2902
|
+
return {
|
2903
|
+
tools: void 0,
|
2904
|
+
toolChoice: void 0
|
2905
|
+
};
|
2906
|
+
}
|
2907
|
+
return {
|
2908
|
+
tools: Object.entries(tools).map(([name11, tool2]) => ({
|
2909
|
+
type: "function",
|
2910
|
+
name: name11,
|
2911
|
+
description: tool2.description,
|
2912
|
+
parameters: asSchema2(tool2.parameters).jsonSchema
|
2913
|
+
})),
|
2914
|
+
toolChoice: toolChoice == null ? { type: "auto" } : typeof toolChoice === "string" ? { type: toolChoice } : { type: "tool", toolName: toolChoice.toolName }
|
2915
|
+
};
|
2916
|
+
}
|
2917
|
+
|
2918
|
+
// core/generate-text/to-response-messages.ts
|
2919
|
+
function toResponseMessages({
|
2920
|
+
text = "",
|
2921
|
+
toolCalls,
|
2922
|
+
toolResults
|
2923
|
+
}) {
|
2924
|
+
const responseMessages = [];
|
2925
|
+
responseMessages.push({
|
2926
|
+
role: "assistant",
|
2927
|
+
content: [{ type: "text", text }, ...toolCalls]
|
2928
|
+
});
|
2929
|
+
if (toolResults.length > 0) {
|
2930
|
+
responseMessages.push({
|
2931
|
+
role: "tool",
|
2932
|
+
content: toolResults.map((result) => ({
|
2933
|
+
type: "tool-result",
|
2934
|
+
toolCallId: result.toolCallId,
|
2935
|
+
toolName: result.toolName,
|
2936
|
+
result: result.result
|
2937
|
+
}))
|
2938
|
+
});
|
2939
|
+
}
|
2940
|
+
return responseMessages;
|
2941
|
+
}
|
2942
|
+
|
2900
2943
|
// core/generate-text/tool-call.ts
|
2944
|
+
import { safeParseJSON as safeParseJSON2 } from "@ai-sdk/provider-utils";
|
2945
|
+
import { asSchema as asSchema3 } from "@ai-sdk/ui-utils";
|
2901
2946
|
function parseToolCall({
|
2902
2947
|
toolCall,
|
2903
2948
|
tools
|
@@ -2946,6 +2991,7 @@ async function generateText({
|
|
2946
2991
|
headers,
|
2947
2992
|
maxAutomaticRoundtrips = 0,
|
2948
2993
|
maxToolRoundtrips = maxAutomaticRoundtrips,
|
2994
|
+
maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
|
2949
2995
|
experimental_telemetry: telemetry,
|
2950
2996
|
experimental_providerMetadata: providerMetadata,
|
2951
2997
|
_internal: {
|
@@ -2955,6 +3001,13 @@ async function generateText({
|
|
2955
3001
|
...settings
|
2956
3002
|
}) {
|
2957
3003
|
var _a11;
|
3004
|
+
if (maxSteps < 1) {
|
3005
|
+
throw new InvalidArgumentError({
|
3006
|
+
parameter: "maxSteps",
|
3007
|
+
value: maxSteps,
|
3008
|
+
message: "maxSteps must be at least 1"
|
3009
|
+
});
|
3010
|
+
}
|
2958
3011
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
2959
3012
|
model,
|
2960
3013
|
telemetry,
|
@@ -2976,7 +3029,7 @@ async function generateText({
|
|
2976
3029
|
"ai.prompt": {
|
2977
3030
|
input: () => JSON.stringify({ system, prompt, messages })
|
2978
3031
|
},
|
2979
|
-
"ai.settings.
|
3032
|
+
"ai.settings.maxSteps": maxSteps
|
2980
3033
|
}
|
2981
3034
|
}),
|
2982
3035
|
tracer,
|
@@ -3000,16 +3053,16 @@ async function generateText({
|
|
3000
3053
|
let currentModelResponse;
|
3001
3054
|
let currentToolCalls = [];
|
3002
3055
|
let currentToolResults = [];
|
3003
|
-
let
|
3056
|
+
let stepCount = 0;
|
3004
3057
|
const responseMessages = [];
|
3005
|
-
const
|
3058
|
+
const steps = [];
|
3006
3059
|
const usage = {
|
3007
3060
|
completionTokens: 0,
|
3008
3061
|
promptTokens: 0,
|
3009
3062
|
totalTokens: 0
|
3010
3063
|
};
|
3011
3064
|
do {
|
3012
|
-
const currentInputFormat =
|
3065
|
+
const currentInputFormat = stepCount === 0 ? validatedPrompt.type : "messages";
|
3013
3066
|
currentModelResponse = await retry(
|
3014
3067
|
() => recordSpan({
|
3015
3068
|
name: "ai.generateText.doGenerate",
|
@@ -3106,7 +3159,7 @@ async function generateText({
|
|
3106
3159
|
usage.completionTokens += currentUsage.completionTokens;
|
3107
3160
|
usage.promptTokens += currentUsage.promptTokens;
|
3108
3161
|
usage.totalTokens += currentUsage.totalTokens;
|
3109
|
-
|
3162
|
+
steps.push({
|
3110
3163
|
text: (_b = currentModelResponse.text) != null ? _b : "",
|
3111
3164
|
toolCalls: currentToolCalls,
|
3112
3165
|
toolResults: currentToolResults,
|
@@ -3133,8 +3186,8 @@ async function generateText({
|
|
3133
3186
|
} while (
|
3134
3187
|
// there are tool calls:
|
3135
3188
|
currentToolCalls.length > 0 && // all current tool calls have results:
|
3136
|
-
currentToolResults.length === currentToolCalls.length && // the number of
|
3137
|
-
|
3189
|
+
currentToolResults.length === currentToolCalls.length && // the number of steps is less than the maximum:
|
3190
|
+
++stepCount < maxSteps
|
3138
3191
|
);
|
3139
3192
|
span.setAttributes(
|
3140
3193
|
selectTelemetryAttributes({
|
@@ -3176,7 +3229,7 @@ async function generateText({
|
|
3176
3229
|
},
|
3177
3230
|
logprobs: currentModelResponse.logprobs,
|
3178
3231
|
responseMessages,
|
3179
|
-
|
3232
|
+
steps,
|
3180
3233
|
providerMetadata: currentModelResponse.providerMetadata
|
3181
3234
|
});
|
3182
3235
|
}
|
@@ -3251,7 +3304,8 @@ var DefaultGenerateTextResult = class {
|
|
3251
3304
|
this.warnings = options.warnings;
|
3252
3305
|
this.response = options.response;
|
3253
3306
|
this.responseMessages = options.responseMessages;
|
3254
|
-
this.roundtrips = options.
|
3307
|
+
this.roundtrips = options.steps;
|
3308
|
+
this.steps = options.steps;
|
3255
3309
|
this.experimental_providerMetadata = options.providerMetadata;
|
3256
3310
|
this.rawResponse = {
|
3257
3311
|
headers: options.response.headers
|
@@ -3616,6 +3670,7 @@ async function streamText({
|
|
3616
3670
|
abortSignal,
|
3617
3671
|
headers,
|
3618
3672
|
maxToolRoundtrips = 0,
|
3673
|
+
maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
|
3619
3674
|
experimental_telemetry: telemetry,
|
3620
3675
|
experimental_providerMetadata: providerMetadata,
|
3621
3676
|
experimental_toolCallStreaming: toolCallStreaming = false,
|
@@ -3629,6 +3684,13 @@ async function streamText({
|
|
3629
3684
|
...settings
|
3630
3685
|
}) {
|
3631
3686
|
var _a11;
|
3687
|
+
if (maxSteps < 1) {
|
3688
|
+
throw new InvalidArgumentError({
|
3689
|
+
parameter: "maxSteps",
|
3690
|
+
value: maxSteps,
|
3691
|
+
message: "maxSteps must be at least 1"
|
3692
|
+
});
|
3693
|
+
}
|
3632
3694
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
3633
3695
|
model,
|
3634
3696
|
telemetry,
|
@@ -3646,14 +3708,15 @@ async function streamText({
|
|
3646
3708
|
// specific settings that only make sense on the outer level:
|
3647
3709
|
"ai.prompt": {
|
3648
3710
|
input: () => JSON.stringify({ system, prompt, messages })
|
3649
|
-
}
|
3711
|
+
},
|
3712
|
+
"ai.settings.maxSteps": maxSteps
|
3650
3713
|
}
|
3651
3714
|
}),
|
3652
3715
|
tracer,
|
3653
3716
|
endWhenDone: false,
|
3654
3717
|
fn: async (rootSpan) => {
|
3655
3718
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
3656
|
-
const
|
3719
|
+
const startStep = async ({
|
3657
3720
|
promptMessages: promptMessages2,
|
3658
3721
|
promptType
|
3659
3722
|
}) => {
|
@@ -3735,7 +3798,7 @@ async function streamText({
|
|
3735
3798
|
result: { stream, warnings, rawResponse },
|
3736
3799
|
doStreamSpan,
|
3737
3800
|
startTimestampMs
|
3738
|
-
} = await
|
3801
|
+
} = await startStep({
|
3739
3802
|
promptType: validatePrompt({ system, prompt, messages }).type,
|
3740
3803
|
promptMessages
|
3741
3804
|
});
|
@@ -3749,8 +3812,8 @@ async function streamText({
|
|
3749
3812
|
doStreamSpan,
|
3750
3813
|
telemetry,
|
3751
3814
|
startTimestampMs,
|
3752
|
-
|
3753
|
-
|
3815
|
+
maxSteps,
|
3816
|
+
startStep,
|
3754
3817
|
promptMessages,
|
3755
3818
|
modelId: model.modelId,
|
3756
3819
|
now: now2,
|
@@ -3771,8 +3834,8 @@ var DefaultStreamTextResult = class {
|
|
3771
3834
|
doStreamSpan,
|
3772
3835
|
telemetry,
|
3773
3836
|
startTimestampMs,
|
3774
|
-
|
3775
|
-
|
3837
|
+
maxSteps,
|
3838
|
+
startStep,
|
3776
3839
|
promptMessages,
|
3777
3840
|
modelId,
|
3778
3841
|
now: now2,
|
@@ -3791,6 +3854,8 @@ var DefaultStreamTextResult = class {
|
|
3791
3854
|
this.toolCalls = toolCallsPromise;
|
3792
3855
|
const { resolve: resolveToolResults, promise: toolResultsPromise } = createResolvablePromise();
|
3793
3856
|
this.toolResults = toolResultsPromise;
|
3857
|
+
const { resolve: resolveSteps, promise: stepsPromise } = createResolvablePromise();
|
3858
|
+
this.steps = stepsPromise;
|
3794
3859
|
const {
|
3795
3860
|
resolve: resolveProviderMetadata,
|
3796
3861
|
promise: providerMetadataPromise
|
@@ -3804,12 +3869,13 @@ var DefaultStreamTextResult = class {
|
|
3804
3869
|
close: closeStitchableStream
|
3805
3870
|
} = createStitchableStream();
|
3806
3871
|
this.originalStream = stitchableStream;
|
3872
|
+
const stepResults = [];
|
3807
3873
|
const self = this;
|
3808
|
-
function
|
3874
|
+
function addStepStream({
|
3809
3875
|
stream: stream2,
|
3810
3876
|
startTimestamp,
|
3811
3877
|
doStreamSpan: doStreamSpan2,
|
3812
|
-
|
3878
|
+
currentStep,
|
3813
3879
|
promptMessages: promptMessages2,
|
3814
3880
|
usage = {
|
3815
3881
|
promptTokens: 0,
|
@@ -3817,19 +3883,19 @@ var DefaultStreamTextResult = class {
|
|
3817
3883
|
totalTokens: 0
|
3818
3884
|
}
|
3819
3885
|
}) {
|
3820
|
-
const
|
3821
|
-
const
|
3822
|
-
let
|
3823
|
-
let
|
3886
|
+
const stepToolCalls = [];
|
3887
|
+
const stepToolResults = [];
|
3888
|
+
let stepFinishReason = "unknown";
|
3889
|
+
let stepUsage = {
|
3824
3890
|
promptTokens: 0,
|
3825
3891
|
completionTokens: 0,
|
3826
3892
|
totalTokens: 0
|
3827
3893
|
};
|
3828
|
-
let
|
3829
|
-
let
|
3830
|
-
let
|
3831
|
-
let
|
3832
|
-
let
|
3894
|
+
let stepProviderMetadata;
|
3895
|
+
let stepFirstChunk = true;
|
3896
|
+
let stepText = "";
|
3897
|
+
let stepLogProbs;
|
3898
|
+
let stepResponse = {
|
3833
3899
|
id: generateId3(),
|
3834
3900
|
timestamp: currentDate(),
|
3835
3901
|
modelId
|
@@ -3839,9 +3905,9 @@ var DefaultStreamTextResult = class {
|
|
3839
3905
|
new TransformStream({
|
3840
3906
|
async transform(chunk, controller) {
|
3841
3907
|
var _a11, _b, _c;
|
3842
|
-
if (
|
3908
|
+
if (stepFirstChunk) {
|
3843
3909
|
const msToFirstChunk = now2() - startTimestamp;
|
3844
|
-
|
3910
|
+
stepFirstChunk = false;
|
3845
3911
|
doStreamSpan2.addEvent("ai.stream.firstChunk", {
|
3846
3912
|
"ai.response.msToFirstChunk": msToFirstChunk,
|
3847
3913
|
// deprecated:
|
@@ -3860,40 +3926,40 @@ var DefaultStreamTextResult = class {
|
|
3860
3926
|
switch (chunkType) {
|
3861
3927
|
case "text-delta": {
|
3862
3928
|
controller.enqueue(chunk);
|
3863
|
-
|
3929
|
+
stepText += chunk.textDelta;
|
3864
3930
|
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
3865
3931
|
break;
|
3866
3932
|
}
|
3867
3933
|
case "tool-call": {
|
3868
3934
|
controller.enqueue(chunk);
|
3869
|
-
|
3935
|
+
stepToolCalls.push(chunk);
|
3870
3936
|
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
3871
3937
|
break;
|
3872
3938
|
}
|
3873
3939
|
case "tool-result": {
|
3874
3940
|
controller.enqueue(chunk);
|
3875
|
-
|
3941
|
+
stepToolResults.push(chunk);
|
3876
3942
|
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
3877
3943
|
break;
|
3878
3944
|
}
|
3879
3945
|
case "response-metadata": {
|
3880
|
-
|
3881
|
-
id: (_a11 = chunk.id) != null ? _a11 :
|
3882
|
-
timestamp: (_b = chunk.timestamp) != null ? _b :
|
3883
|
-
modelId: (_c = chunk.modelId) != null ? _c :
|
3946
|
+
stepResponse = {
|
3947
|
+
id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
|
3948
|
+
timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
|
3949
|
+
modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
|
3884
3950
|
};
|
3885
3951
|
break;
|
3886
3952
|
}
|
3887
3953
|
case "finish": {
|
3888
|
-
|
3889
|
-
|
3890
|
-
|
3891
|
-
|
3954
|
+
stepUsage = chunk.usage;
|
3955
|
+
stepFinishReason = chunk.finishReason;
|
3956
|
+
stepProviderMetadata = chunk.experimental_providerMetadata;
|
3957
|
+
stepLogProbs = chunk.logprobs;
|
3892
3958
|
const msToFinish = now2() - startTimestamp;
|
3893
3959
|
doStreamSpan2.addEvent("ai.stream.finish");
|
3894
3960
|
doStreamSpan2.setAttributes({
|
3895
3961
|
"ai.response.msToFinish": msToFinish,
|
3896
|
-
"ai.response.avgCompletionTokensPerSecond": 1e3 *
|
3962
|
+
"ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
|
3897
3963
|
});
|
3898
3964
|
break;
|
3899
3965
|
}
|
@@ -3905,7 +3971,7 @@ var DefaultStreamTextResult = class {
|
|
3905
3971
|
}
|
3906
3972
|
case "error": {
|
3907
3973
|
controller.enqueue(chunk);
|
3908
|
-
|
3974
|
+
stepFinishReason = "error";
|
3909
3975
|
break;
|
3910
3976
|
}
|
3911
3977
|
default: {
|
@@ -3917,41 +3983,52 @@ var DefaultStreamTextResult = class {
|
|
3917
3983
|
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
3918
3984
|
async flush(controller) {
|
3919
3985
|
controller.enqueue({
|
3920
|
-
type: "
|
3921
|
-
finishReason:
|
3922
|
-
usage:
|
3923
|
-
experimental_providerMetadata:
|
3924
|
-
logprobs:
|
3925
|
-
response:
|
3986
|
+
type: "step-finish",
|
3987
|
+
finishReason: stepFinishReason,
|
3988
|
+
usage: stepUsage,
|
3989
|
+
experimental_providerMetadata: stepProviderMetadata,
|
3990
|
+
logprobs: stepLogProbs,
|
3991
|
+
response: stepResponse
|
3926
3992
|
});
|
3927
|
-
|
3993
|
+
stepResults.push({
|
3994
|
+
text: stepText,
|
3995
|
+
toolCalls: stepToolCalls,
|
3996
|
+
toolResults: stepToolResults,
|
3997
|
+
finishReason: stepFinishReason,
|
3998
|
+
usage: stepUsage,
|
3999
|
+
warnings: self.warnings,
|
4000
|
+
logprobs: stepLogProbs,
|
4001
|
+
response: stepResponse,
|
4002
|
+
rawResponse: self.rawResponse
|
4003
|
+
});
|
4004
|
+
const telemetryToolCalls = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
|
3928
4005
|
try {
|
3929
4006
|
doStreamSpan2.setAttributes(
|
3930
4007
|
selectTelemetryAttributes({
|
3931
4008
|
telemetry,
|
3932
4009
|
attributes: {
|
3933
|
-
"ai.response.finishReason":
|
3934
|
-
"ai.response.text": { output: () =>
|
4010
|
+
"ai.response.finishReason": stepFinishReason,
|
4011
|
+
"ai.response.text": { output: () => stepText },
|
3935
4012
|
"ai.response.toolCalls": {
|
3936
4013
|
output: () => telemetryToolCalls
|
3937
4014
|
},
|
3938
|
-
"ai.response.id":
|
3939
|
-
"ai.response.model":
|
3940
|
-
"ai.response.timestamp":
|
3941
|
-
"ai.usage.promptTokens":
|
3942
|
-
"ai.usage.completionTokens":
|
4015
|
+
"ai.response.id": stepResponse.id,
|
4016
|
+
"ai.response.model": stepResponse.modelId,
|
4017
|
+
"ai.response.timestamp": stepResponse.timestamp.toISOString(),
|
4018
|
+
"ai.usage.promptTokens": stepUsage.promptTokens,
|
4019
|
+
"ai.usage.completionTokens": stepUsage.completionTokens,
|
3943
4020
|
// deprecated
|
3944
|
-
"ai.finishReason":
|
3945
|
-
"ai.result.text": { output: () =>
|
4021
|
+
"ai.finishReason": stepFinishReason,
|
4022
|
+
"ai.result.text": { output: () => stepText },
|
3946
4023
|
"ai.result.toolCalls": {
|
3947
4024
|
output: () => telemetryToolCalls
|
3948
4025
|
},
|
3949
4026
|
// standardized gen-ai llm span attributes:
|
3950
|
-
"gen_ai.response.finish_reasons": [
|
3951
|
-
"gen_ai.response.id":
|
3952
|
-
"gen_ai.response.model":
|
3953
|
-
"gen_ai.usage.input_tokens":
|
3954
|
-
"gen_ai.usage.output_tokens":
|
4027
|
+
"gen_ai.response.finish_reasons": [stepFinishReason],
|
4028
|
+
"gen_ai.response.id": stepResponse.id,
|
4029
|
+
"gen_ai.response.model": stepResponse.modelId,
|
4030
|
+
"gen_ai.usage.input_tokens": stepUsage.promptTokens,
|
4031
|
+
"gen_ai.usage.output_tokens": stepUsage.completionTokens
|
3955
4032
|
}
|
3956
4033
|
})
|
3957
4034
|
);
|
@@ -3960,21 +4037,21 @@ var DefaultStreamTextResult = class {
|
|
3960
4037
|
doStreamSpan2.end();
|
3961
4038
|
}
|
3962
4039
|
const combinedUsage = {
|
3963
|
-
promptTokens: usage.promptTokens +
|
3964
|
-
completionTokens: usage.completionTokens +
|
3965
|
-
totalTokens: usage.totalTokens +
|
4040
|
+
promptTokens: usage.promptTokens + stepUsage.promptTokens,
|
4041
|
+
completionTokens: usage.completionTokens + stepUsage.completionTokens,
|
4042
|
+
totalTokens: usage.totalTokens + stepUsage.totalTokens
|
3966
4043
|
};
|
3967
4044
|
if (
|
3968
4045
|
// there are tool calls:
|
3969
|
-
|
3970
|
-
|
3971
|
-
|
4046
|
+
stepToolCalls.length > 0 && // all current tool calls have results:
|
4047
|
+
stepToolResults.length === stepToolCalls.length && // the number of steps is less than the maximum:
|
4048
|
+
currentStep + 1 < maxSteps
|
3972
4049
|
) {
|
3973
4050
|
promptMessages2.push(
|
3974
4051
|
...toResponseMessages({
|
3975
|
-
text:
|
3976
|
-
toolCalls:
|
3977
|
-
toolResults:
|
4052
|
+
text: stepText,
|
4053
|
+
toolCalls: stepToolCalls,
|
4054
|
+
toolResults: stepToolResults
|
3978
4055
|
}).map(
|
3979
4056
|
(message) => convertToLanguageModelMessage(message, null)
|
3980
4057
|
)
|
@@ -3983,17 +4060,17 @@ var DefaultStreamTextResult = class {
|
|
3983
4060
|
result,
|
3984
4061
|
doStreamSpan: doStreamSpan3,
|
3985
4062
|
startTimestampMs: startTimestamp2
|
3986
|
-
} = await
|
4063
|
+
} = await startStep({
|
3987
4064
|
promptType: "messages",
|
3988
4065
|
promptMessages: promptMessages2
|
3989
4066
|
});
|
3990
4067
|
self.warnings = result.warnings;
|
3991
4068
|
self.rawResponse = result.rawResponse;
|
3992
|
-
|
4069
|
+
addStepStream({
|
3993
4070
|
stream: result.stream,
|
3994
4071
|
startTimestamp: startTimestamp2,
|
3995
4072
|
doStreamSpan: doStreamSpan3,
|
3996
|
-
|
4073
|
+
currentStep: currentStep + 1,
|
3997
4074
|
promptMessages: promptMessages2,
|
3998
4075
|
usage: combinedUsage
|
3999
4076
|
});
|
@@ -4002,27 +4079,27 @@ var DefaultStreamTextResult = class {
|
|
4002
4079
|
try {
|
4003
4080
|
controller.enqueue({
|
4004
4081
|
type: "finish",
|
4005
|
-
finishReason:
|
4082
|
+
finishReason: stepFinishReason,
|
4006
4083
|
usage: combinedUsage,
|
4007
|
-
experimental_providerMetadata:
|
4008
|
-
logprobs:
|
4009
|
-
response:
|
4084
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4085
|
+
logprobs: stepLogProbs,
|
4086
|
+
response: stepResponse
|
4010
4087
|
});
|
4011
4088
|
closeStitchableStream();
|
4012
4089
|
rootSpan.setAttributes(
|
4013
4090
|
selectTelemetryAttributes({
|
4014
4091
|
telemetry,
|
4015
4092
|
attributes: {
|
4016
|
-
"ai.response.finishReason":
|
4017
|
-
"ai.response.text": { output: () =>
|
4093
|
+
"ai.response.finishReason": stepFinishReason,
|
4094
|
+
"ai.response.text": { output: () => stepText },
|
4018
4095
|
"ai.response.toolCalls": {
|
4019
4096
|
output: () => telemetryToolCalls
|
4020
4097
|
},
|
4021
4098
|
"ai.usage.promptTokens": combinedUsage.promptTokens,
|
4022
4099
|
"ai.usage.completionTokens": combinedUsage.completionTokens,
|
4023
4100
|
// deprecated
|
4024
|
-
"ai.finishReason":
|
4025
|
-
"ai.result.text": { output: () =>
|
4101
|
+
"ai.finishReason": stepFinishReason,
|
4102
|
+
"ai.result.text": { output: () => stepText },
|
4026
4103
|
"ai.result.toolCalls": {
|
4027
4104
|
output: () => telemetryToolCalls
|
4028
4105
|
}
|
@@ -4030,32 +4107,35 @@ var DefaultStreamTextResult = class {
|
|
4030
4107
|
})
|
4031
4108
|
);
|
4032
4109
|
resolveUsage(combinedUsage);
|
4033
|
-
resolveFinishReason(
|
4034
|
-
resolveText(
|
4035
|
-
resolveToolCalls(
|
4036
|
-
resolveProviderMetadata(
|
4037
|
-
resolveToolResults(
|
4110
|
+
resolveFinishReason(stepFinishReason);
|
4111
|
+
resolveText(stepText);
|
4112
|
+
resolveToolCalls(stepToolCalls);
|
4113
|
+
resolveProviderMetadata(stepProviderMetadata);
|
4114
|
+
resolveToolResults(stepToolResults);
|
4038
4115
|
resolveResponse({
|
4039
|
-
...
|
4116
|
+
...stepResponse,
|
4040
4117
|
headers: rawResponse == null ? void 0 : rawResponse.headers
|
4041
4118
|
});
|
4119
|
+
resolveSteps(stepResults);
|
4042
4120
|
await (onFinish == null ? void 0 : onFinish({
|
4043
|
-
finishReason:
|
4121
|
+
finishReason: stepFinishReason,
|
4044
4122
|
usage: combinedUsage,
|
4045
|
-
text:
|
4046
|
-
toolCalls:
|
4123
|
+
text: stepText,
|
4124
|
+
toolCalls: stepToolCalls,
|
4047
4125
|
// The tool results are inferred as a never[] type, because they are
|
4048
4126
|
// optional and the execute method with an inferred result type is
|
4049
4127
|
// optional as well. Therefore we need to cast the toolResults to any.
|
4050
4128
|
// The type exposed to the users will be correctly inferred.
|
4051
|
-
toolResults:
|
4129
|
+
toolResults: stepToolResults,
|
4052
4130
|
rawResponse,
|
4053
4131
|
response: {
|
4054
|
-
...
|
4132
|
+
...stepResponse,
|
4055
4133
|
headers: rawResponse == null ? void 0 : rawResponse.headers
|
4056
4134
|
},
|
4057
4135
|
warnings,
|
4058
|
-
experimental_providerMetadata:
|
4136
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4137
|
+
steps: stepResults
|
4138
|
+
// see tool results comment above
|
4059
4139
|
}));
|
4060
4140
|
} catch (error) {
|
4061
4141
|
controller.error(error);
|
@@ -4067,11 +4147,11 @@ var DefaultStreamTextResult = class {
|
|
4067
4147
|
)
|
4068
4148
|
);
|
4069
4149
|
}
|
4070
|
-
|
4150
|
+
addStepStream({
|
4071
4151
|
stream,
|
4072
4152
|
startTimestamp: startTimestampMs,
|
4073
4153
|
doStreamSpan,
|
4074
|
-
|
4154
|
+
currentStep: 0,
|
4075
4155
|
promptMessages,
|
4076
4156
|
usage: void 0
|
4077
4157
|
});
|
@@ -4112,8 +4192,9 @@ var DefaultStreamTextResult = class {
|
|
4112
4192
|
}
|
4113
4193
|
toDataStreamInternal({
|
4114
4194
|
callbacks = {},
|
4115
|
-
getErrorMessage: getErrorMessage4 = () => ""
|
4195
|
+
getErrorMessage: getErrorMessage4 = () => "",
|
4116
4196
|
// mask error messages for safety by default
|
4197
|
+
sendUsage = true
|
4117
4198
|
} = {}) {
|
4118
4199
|
let aggregatedResponse = "";
|
4119
4200
|
const callbackTransformer = new TransformStream({
|
@@ -4143,10 +4224,11 @@ var DefaultStreamTextResult = class {
|
|
4143
4224
|
transform: async (chunk, controller) => {
|
4144
4225
|
const chunkType = chunk.type;
|
4145
4226
|
switch (chunkType) {
|
4146
|
-
case "text-delta":
|
4227
|
+
case "text-delta": {
|
4147
4228
|
controller.enqueue(formatStreamPart("text", chunk.textDelta));
|
4148
4229
|
break;
|
4149
|
-
|
4230
|
+
}
|
4231
|
+
case "tool-call-streaming-start": {
|
4150
4232
|
controller.enqueue(
|
4151
4233
|
formatStreamPart("tool_call_streaming_start", {
|
4152
4234
|
toolCallId: chunk.toolCallId,
|
@@ -4154,7 +4236,8 @@ var DefaultStreamTextResult = class {
|
|
4154
4236
|
})
|
4155
4237
|
);
|
4156
4238
|
break;
|
4157
|
-
|
4239
|
+
}
|
4240
|
+
case "tool-call-delta": {
|
4158
4241
|
controller.enqueue(
|
4159
4242
|
formatStreamPart("tool_call_delta", {
|
4160
4243
|
toolCallId: chunk.toolCallId,
|
@@ -4162,7 +4245,8 @@ var DefaultStreamTextResult = class {
|
|
4162
4245
|
})
|
4163
4246
|
);
|
4164
4247
|
break;
|
4165
|
-
|
4248
|
+
}
|
4249
|
+
case "tool-call": {
|
4166
4250
|
controller.enqueue(
|
4167
4251
|
formatStreamPart("tool_call", {
|
4168
4252
|
toolCallId: chunk.toolCallId,
|
@@ -4171,7 +4255,8 @@ var DefaultStreamTextResult = class {
|
|
4171
4255
|
})
|
4172
4256
|
);
|
4173
4257
|
break;
|
4174
|
-
|
4258
|
+
}
|
4259
|
+
case "tool-result": {
|
4175
4260
|
controller.enqueue(
|
4176
4261
|
formatStreamPart("tool_result", {
|
4177
4262
|
toolCallId: chunk.toolCallId,
|
@@ -4179,33 +4264,37 @@ var DefaultStreamTextResult = class {
|
|
4179
4264
|
})
|
4180
4265
|
);
|
4181
4266
|
break;
|
4182
|
-
|
4267
|
+
}
|
4268
|
+
case "error": {
|
4183
4269
|
controller.enqueue(
|
4184
4270
|
formatStreamPart("error", getErrorMessage4(chunk.error))
|
4185
4271
|
);
|
4186
4272
|
break;
|
4187
|
-
|
4273
|
+
}
|
4274
|
+
case "step-finish": {
|
4188
4275
|
controller.enqueue(
|
4189
|
-
formatStreamPart("
|
4276
|
+
formatStreamPart("finish_step", {
|
4190
4277
|
finishReason: chunk.finishReason,
|
4191
|
-
usage: {
|
4278
|
+
usage: sendUsage ? {
|
4192
4279
|
promptTokens: chunk.usage.promptTokens,
|
4193
4280
|
completionTokens: chunk.usage.completionTokens
|
4194
|
-
}
|
4281
|
+
} : void 0
|
4195
4282
|
})
|
4196
4283
|
);
|
4197
4284
|
break;
|
4198
|
-
|
4285
|
+
}
|
4286
|
+
case "finish": {
|
4199
4287
|
controller.enqueue(
|
4200
4288
|
formatStreamPart("finish_message", {
|
4201
4289
|
finishReason: chunk.finishReason,
|
4202
|
-
usage: {
|
4290
|
+
usage: sendUsage ? {
|
4203
4291
|
promptTokens: chunk.usage.promptTokens,
|
4204
4292
|
completionTokens: chunk.usage.completionTokens
|
4205
|
-
}
|
4293
|
+
} : void 0
|
4206
4294
|
})
|
4207
4295
|
);
|
4208
4296
|
break;
|
4297
|
+
}
|
4209
4298
|
default: {
|
4210
4299
|
const exhaustiveCheck = chunkType;
|
4211
4300
|
throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
|
@@ -4226,6 +4315,7 @@ var DefaultStreamTextResult = class {
|
|
4226
4315
|
};
|
4227
4316
|
const data = options == null ? void 0 : "data" in options ? options.data : void 0;
|
4228
4317
|
const getErrorMessage4 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
|
4318
|
+
const sendUsage = options == null ? void 0 : "sendUsage" in options ? options.sendUsage : void 0;
|
4229
4319
|
writeToServerResponse({
|
4230
4320
|
response,
|
4231
4321
|
status: init == null ? void 0 : init.status,
|
@@ -4234,7 +4324,7 @@ var DefaultStreamTextResult = class {
|
|
4234
4324
|
contentType: "text/plain; charset=utf-8",
|
4235
4325
|
dataStreamVersion: "v1"
|
4236
4326
|
}),
|
4237
|
-
stream: this.toDataStream({ data, getErrorMessage: getErrorMessage4 })
|
4327
|
+
stream: this.toDataStream({ data, getErrorMessage: getErrorMessage4, sendUsage })
|
4238
4328
|
});
|
4239
4329
|
}
|
4240
4330
|
pipeTextStreamToResponse(response, init) {
|
@@ -4253,7 +4343,8 @@ var DefaultStreamTextResult = class {
|
|
4253
4343
|
}
|
4254
4344
|
toDataStream(options) {
|
4255
4345
|
const stream = this.toDataStreamInternal({
|
4256
|
-
getErrorMessage: options == null ? void 0 : options.getErrorMessage
|
4346
|
+
getErrorMessage: options == null ? void 0 : options.getErrorMessage,
|
4347
|
+
sendUsage: options == null ? void 0 : options.sendUsage
|
4257
4348
|
});
|
4258
4349
|
return (options == null ? void 0 : options.data) ? mergeStreams(options == null ? void 0 : options.data.stream, stream) : stream;
|
4259
4350
|
}
|
@@ -4266,14 +4357,18 @@ var DefaultStreamTextResult = class {
|
|
4266
4357
|
};
|
4267
4358
|
const data = options == null ? void 0 : "data" in options ? options.data : void 0;
|
4268
4359
|
const getErrorMessage4 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
|
4269
|
-
|
4270
|
-
|
4271
|
-
|
4272
|
-
|
4273
|
-
|
4274
|
-
|
4275
|
-
|
4276
|
-
|
4360
|
+
const sendUsage = options == null ? void 0 : "sendUsage" in options ? options.sendUsage : void 0;
|
4361
|
+
return new Response(
|
4362
|
+
this.toDataStream({ data, getErrorMessage: getErrorMessage4, sendUsage }),
|
4363
|
+
{
|
4364
|
+
status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
|
4365
|
+
statusText: init == null ? void 0 : init.statusText,
|
4366
|
+
headers: prepareResponseHeaders(init, {
|
4367
|
+
contentType: "text/plain; charset=utf-8",
|
4368
|
+
dataStreamVersion: "v1"
|
4369
|
+
})
|
4370
|
+
}
|
4371
|
+
);
|
4277
4372
|
}
|
4278
4373
|
toTextStreamResponse(init) {
|
4279
4374
|
var _a11;
|
@@ -4375,27 +4470,6 @@ function attachmentsToParts(attachments) {
|
|
4375
4470
|
return parts;
|
4376
4471
|
}
|
4377
4472
|
|
4378
|
-
// core/prompt/message-conversion-error.ts
|
4379
|
-
import { AISDKError as AISDKError9 } from "@ai-sdk/provider";
|
4380
|
-
var name9 = "AI_MessageConversionError";
|
4381
|
-
var marker9 = `vercel.ai.error.${name9}`;
|
4382
|
-
var symbol9 = Symbol.for(marker9);
|
4383
|
-
var _a9;
|
4384
|
-
var MessageConversionError = class extends AISDKError9 {
|
4385
|
-
constructor({
|
4386
|
-
originalMessage,
|
4387
|
-
message
|
4388
|
-
}) {
|
4389
|
-
super({ name: name9, message });
|
4390
|
-
this[_a9] = true;
|
4391
|
-
this.originalMessage = originalMessage;
|
4392
|
-
}
|
4393
|
-
static isInstance(error) {
|
4394
|
-
return AISDKError9.hasMarker(error, marker9);
|
4395
|
-
}
|
4396
|
-
};
|
4397
|
-
_a9 = symbol9;
|
4398
|
-
|
4399
4473
|
// core/prompt/convert-to-core-messages.ts
|
4400
4474
|
function convertToCoreMessages(messages) {
|
4401
4475
|
const coreMessages = [];
|
@@ -4475,7 +4549,7 @@ function convertToCoreMessages(messages) {
|
|
4475
4549
|
}
|
4476
4550
|
|
4477
4551
|
// core/registry/custom-provider.ts
|
4478
|
-
import { NoSuchModelError } from "@ai-sdk/provider";
|
4552
|
+
import { NoSuchModelError as NoSuchModelError2 } from "@ai-sdk/provider";
|
4479
4553
|
function experimental_customProvider({
|
4480
4554
|
languageModels,
|
4481
4555
|
textEmbeddingModels,
|
@@ -4489,7 +4563,7 @@ function experimental_customProvider({
|
|
4489
4563
|
if (fallbackProvider) {
|
4490
4564
|
return fallbackProvider.languageModel(modelId);
|
4491
4565
|
}
|
4492
|
-
throw new
|
4566
|
+
throw new NoSuchModelError2({ modelId, modelType: "languageModel" });
|
4493
4567
|
},
|
4494
4568
|
textEmbeddingModel(modelId) {
|
4495
4569
|
if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
|
@@ -4498,18 +4572,18 @@ function experimental_customProvider({
|
|
4498
4572
|
if (fallbackProvider) {
|
4499
4573
|
return fallbackProvider.textEmbeddingModel(modelId);
|
4500
4574
|
}
|
4501
|
-
throw new
|
4575
|
+
throw new NoSuchModelError2({ modelId, modelType: "textEmbeddingModel" });
|
4502
4576
|
}
|
4503
4577
|
};
|
4504
4578
|
}
|
4505
4579
|
|
4506
4580
|
// core/registry/no-such-provider-error.ts
|
4507
|
-
import { AISDKError as
|
4581
|
+
import { AISDKError as AISDKError11, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
|
4508
4582
|
var name10 = "AI_NoSuchProviderError";
|
4509
4583
|
var marker10 = `vercel.ai.error.${name10}`;
|
4510
4584
|
var symbol10 = Symbol.for(marker10);
|
4511
4585
|
var _a10;
|
4512
|
-
var NoSuchProviderError = class extends
|
4586
|
+
var NoSuchProviderError = class extends NoSuchModelError3 {
|
4513
4587
|
constructor({
|
4514
4588
|
modelId,
|
4515
4589
|
modelType,
|
@@ -4523,7 +4597,7 @@ var NoSuchProviderError = class extends NoSuchModelError2 {
|
|
4523
4597
|
this.availableProviders = availableProviders;
|
4524
4598
|
}
|
4525
4599
|
static isInstance(error) {
|
4526
|
-
return
|
4600
|
+
return AISDKError11.hasMarker(error, marker10);
|
4527
4601
|
}
|
4528
4602
|
/**
|
4529
4603
|
* @deprecated use `isInstance` instead
|
@@ -4549,7 +4623,7 @@ var NoSuchProviderError = class extends NoSuchModelError2 {
|
|
4549
4623
|
_a10 = symbol10;
|
4550
4624
|
|
4551
4625
|
// core/registry/provider-registry.ts
|
4552
|
-
import { NoSuchModelError as
|
4626
|
+
import { NoSuchModelError as NoSuchModelError4 } from "@ai-sdk/provider";
|
4553
4627
|
function experimental_createProviderRegistry(providers) {
|
4554
4628
|
const registry = new DefaultProviderRegistry();
|
4555
4629
|
for (const [id, provider] of Object.entries(providers)) {
|
@@ -4583,7 +4657,7 @@ var DefaultProviderRegistry = class {
|
|
4583
4657
|
splitId(id, modelType) {
|
4584
4658
|
const index = id.indexOf(":");
|
4585
4659
|
if (index === -1) {
|
4586
|
-
throw new
|
4660
|
+
throw new NoSuchModelError4({
|
4587
4661
|
modelId: id,
|
4588
4662
|
modelType,
|
4589
4663
|
message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId:modelId")`
|
@@ -4596,7 +4670,7 @@ var DefaultProviderRegistry = class {
|
|
4596
4670
|
const [providerId, modelId] = this.splitId(id, "languageModel");
|
4597
4671
|
const model = (_b = (_a11 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a11, modelId);
|
4598
4672
|
if (model == null) {
|
4599
|
-
throw new
|
4673
|
+
throw new NoSuchModelError4({ modelId: id, modelType: "languageModel" });
|
4600
4674
|
}
|
4601
4675
|
return model;
|
4602
4676
|
}
|
@@ -4606,7 +4680,7 @@ var DefaultProviderRegistry = class {
|
|
4606
4680
|
const provider = this.getProvider(providerId);
|
4607
4681
|
const model = (_c = (_a11 = provider.textEmbeddingModel) == null ? void 0 : _a11.call(provider, modelId)) != null ? _c : "textEmbedding" in provider ? (_b = provider.textEmbedding) == null ? void 0 : _b.call(provider, modelId) : void 0;
|
4608
4682
|
if (model == null) {
|
4609
|
-
throw new
|
4683
|
+
throw new NoSuchModelError4({
|
4610
4684
|
modelId: id,
|
4611
4685
|
modelType: "textEmbeddingModel"
|
4612
4686
|
});
|
@@ -4645,21 +4719,6 @@ function magnitude(vector) {
|
|
4645
4719
|
return Math.sqrt(dotProduct(vector, vector));
|
4646
4720
|
}
|
4647
4721
|
|
4648
|
-
// errors/index.ts
|
4649
|
-
import {
|
4650
|
-
AISDKError as AISDKError11,
|
4651
|
-
APICallError as APICallError2,
|
4652
|
-
EmptyResponseBodyError,
|
4653
|
-
InvalidPromptError as InvalidPromptError2,
|
4654
|
-
InvalidResponseDataError,
|
4655
|
-
JSONParseError,
|
4656
|
-
LoadAPIKeyError,
|
4657
|
-
NoContentGeneratedError,
|
4658
|
-
NoSuchModelError as NoSuchModelError4,
|
4659
|
-
TypeValidationError as TypeValidationError2,
|
4660
|
-
UnsupportedFunctionalityError as UnsupportedFunctionalityError2
|
4661
|
-
} from "@ai-sdk/provider";
|
4662
|
-
|
4663
4722
|
// streams/ai-stream.ts
|
4664
4723
|
import {
|
4665
4724
|
createParser
|
@@ -5713,7 +5772,7 @@ var StreamingTextResponse = class extends Response {
|
|
5713
5772
|
var generateId2 = generateIdImpl;
|
5714
5773
|
var nanoid = generateIdImpl;
|
5715
5774
|
export {
|
5716
|
-
|
5775
|
+
AISDKError10 as AISDKError,
|
5717
5776
|
AIStream,
|
5718
5777
|
APICallError2 as APICallError,
|
5719
5778
|
AWSBedrockAnthropicMessagesStream,
|
@@ -5743,7 +5802,7 @@ export {
|
|
5743
5802
|
MistralStream,
|
5744
5803
|
NoContentGeneratedError,
|
5745
5804
|
NoObjectGeneratedError,
|
5746
|
-
|
5805
|
+
NoSuchModelError,
|
5747
5806
|
NoSuchProviderError,
|
5748
5807
|
NoSuchToolError,
|
5749
5808
|
OpenAIStream,
|