ai 3.3.35 → 3.3.36
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/dist/index.d.mts +102 -66
- package/dist/index.d.ts +102 -66
- package/dist/index.js +261 -209
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +264 -212
- package/dist/index.mjs.map +1 -1
- package/package.json +6 -6
- package/rsc/dist/index.d.ts +0 -4
- package/rsc/dist/rsc-server.d.mts +0 -4
- package/rsc/dist/rsc-server.mjs +11 -0
- package/rsc/dist/rsc-server.mjs.map +1 -1
- package/svelte/dist/index.d.mts +13 -16
- package/svelte/dist/index.d.ts +13 -16
- package/svelte/dist/index.js +5 -4
- package/svelte/dist/index.js.map +1 -1
- package/svelte/dist/index.mjs +5 -4
- package/svelte/dist/index.mjs.map +1 -1
package/dist/index.mjs
CHANGED
@@ -1067,6 +1067,7 @@ function prepareCallSettings({
|
|
1067
1067
|
maxTokens,
|
1068
1068
|
temperature,
|
1069
1069
|
topP,
|
1070
|
+
topK,
|
1070
1071
|
presencePenalty,
|
1071
1072
|
frequencyPenalty,
|
1072
1073
|
stopSequences,
|
@@ -1107,6 +1108,15 @@ function prepareCallSettings({
|
|
1107
1108
|
});
|
1108
1109
|
}
|
1109
1110
|
}
|
1111
|
+
if (topK != null) {
|
1112
|
+
if (typeof topK !== "number") {
|
1113
|
+
throw new InvalidArgumentError({
|
1114
|
+
parameter: "topK",
|
1115
|
+
value: topP,
|
1116
|
+
message: "topP must be a number"
|
1117
|
+
});
|
1118
|
+
}
|
1119
|
+
}
|
1110
1120
|
if (presencePenalty != null) {
|
1111
1121
|
if (typeof presencePenalty !== "number") {
|
1112
1122
|
throw new InvalidArgumentError({
|
@@ -1154,6 +1164,7 @@ function prepareCallSettings({
|
|
1154
1164
|
maxTokens,
|
1155
1165
|
temperature: temperature != null ? temperature : 0,
|
1156
1166
|
topP,
|
1167
|
+
topK,
|
1157
1168
|
presencePenalty,
|
1158
1169
|
frequencyPenalty,
|
1159
1170
|
stopSequences: stopSequences != null && stopSequences.length > 0 ? stopSequences : void 0,
|
@@ -2752,64 +2763,20 @@ var experimental_streamObject = streamObject;
|
|
2752
2763
|
// core/generate-text/generate-text.ts
|
2753
2764
|
import { createIdGenerator as createIdGenerator3 } from "@ai-sdk/provider-utils";
|
2754
2765
|
|
2755
|
-
//
|
2756
|
-
import {
|
2757
|
-
|
2758
|
-
|
2759
|
-
|
2760
|
-
|
2761
|
-
|
2762
|
-
|
2763
|
-
|
2764
|
-
|
2765
|
-
|
2766
|
-
|
2767
|
-
|
2768
|
-
|
2769
|
-
return {
|
2770
|
-
tools: void 0,
|
2771
|
-
toolChoice: void 0
|
2772
|
-
};
|
2773
|
-
}
|
2774
|
-
return {
|
2775
|
-
tools: Object.entries(tools).map(([name11, tool2]) => ({
|
2776
|
-
type: "function",
|
2777
|
-
name: name11,
|
2778
|
-
description: tool2.description,
|
2779
|
-
parameters: asSchema2(tool2.parameters).jsonSchema
|
2780
|
-
})),
|
2781
|
-
toolChoice: toolChoice == null ? { type: "auto" } : typeof toolChoice === "string" ? { type: toolChoice } : { type: "tool", toolName: toolChoice.toolName }
|
2782
|
-
};
|
2783
|
-
}
|
2784
|
-
|
2785
|
-
// core/generate-text/to-response-messages.ts
|
2786
|
-
function toResponseMessages({
|
2787
|
-
text = "",
|
2788
|
-
toolCalls,
|
2789
|
-
toolResults
|
2790
|
-
}) {
|
2791
|
-
const responseMessages = [];
|
2792
|
-
responseMessages.push({
|
2793
|
-
role: "assistant",
|
2794
|
-
content: [{ type: "text", text }, ...toolCalls]
|
2795
|
-
});
|
2796
|
-
if (toolResults.length > 0) {
|
2797
|
-
responseMessages.push({
|
2798
|
-
role: "tool",
|
2799
|
-
content: toolResults.map((result) => ({
|
2800
|
-
type: "tool-result",
|
2801
|
-
toolCallId: result.toolCallId,
|
2802
|
-
toolName: result.toolName,
|
2803
|
-
result: result.result
|
2804
|
-
}))
|
2805
|
-
});
|
2806
|
-
}
|
2807
|
-
return responseMessages;
|
2808
|
-
}
|
2809
|
-
|
2810
|
-
// core/generate-text/tool-call.ts
|
2811
|
-
import { safeParseJSON as safeParseJSON2 } from "@ai-sdk/provider-utils";
|
2812
|
-
import { asSchema as asSchema3 } from "@ai-sdk/ui-utils";
|
2766
|
+
// errors/index.ts
|
2767
|
+
import {
|
2768
|
+
AISDKError as AISDKError10,
|
2769
|
+
APICallError as APICallError2,
|
2770
|
+
EmptyResponseBodyError,
|
2771
|
+
InvalidPromptError as InvalidPromptError2,
|
2772
|
+
InvalidResponseDataError,
|
2773
|
+
JSONParseError,
|
2774
|
+
LoadAPIKeyError,
|
2775
|
+
NoContentGeneratedError,
|
2776
|
+
NoSuchModelError,
|
2777
|
+
TypeValidationError as TypeValidationError2,
|
2778
|
+
UnsupportedFunctionalityError as UnsupportedFunctionalityError2
|
2779
|
+
} from "@ai-sdk/provider";
|
2813
2780
|
|
2814
2781
|
// errors/invalid-tool-arguments-error.ts
|
2815
2782
|
import { AISDKError as AISDKError7, getErrorMessage as getErrorMessage3 } from "@ai-sdk/provider";
|
@@ -2897,7 +2864,85 @@ var NoSuchToolError = class extends AISDKError8 {
|
|
2897
2864
|
};
|
2898
2865
|
_a8 = symbol8;
|
2899
2866
|
|
2867
|
+
// core/prompt/message-conversion-error.ts
|
2868
|
+
import { AISDKError as AISDKError9 } from "@ai-sdk/provider";
|
2869
|
+
var name9 = "AI_MessageConversionError";
|
2870
|
+
var marker9 = `vercel.ai.error.${name9}`;
|
2871
|
+
var symbol9 = Symbol.for(marker9);
|
2872
|
+
var _a9;
|
2873
|
+
var MessageConversionError = class extends AISDKError9 {
|
2874
|
+
constructor({
|
2875
|
+
originalMessage,
|
2876
|
+
message
|
2877
|
+
}) {
|
2878
|
+
super({ name: name9, message });
|
2879
|
+
this[_a9] = true;
|
2880
|
+
this.originalMessage = originalMessage;
|
2881
|
+
}
|
2882
|
+
static isInstance(error) {
|
2883
|
+
return AISDKError9.hasMarker(error, marker9);
|
2884
|
+
}
|
2885
|
+
};
|
2886
|
+
_a9 = symbol9;
|
2887
|
+
|
2888
|
+
// core/prompt/prepare-tools-and-tool-choice.ts
|
2889
|
+
import { asSchema as asSchema2 } from "@ai-sdk/ui-utils";
|
2890
|
+
|
2891
|
+
// core/util/is-non-empty-object.ts
|
2892
|
+
function isNonEmptyObject(object) {
|
2893
|
+
return object != null && Object.keys(object).length > 0;
|
2894
|
+
}
|
2895
|
+
|
2896
|
+
// core/prompt/prepare-tools-and-tool-choice.ts
|
2897
|
+
function prepareToolsAndToolChoice({
|
2898
|
+
tools,
|
2899
|
+
toolChoice
|
2900
|
+
}) {
|
2901
|
+
if (!isNonEmptyObject(tools)) {
|
2902
|
+
return {
|
2903
|
+
tools: void 0,
|
2904
|
+
toolChoice: void 0
|
2905
|
+
};
|
2906
|
+
}
|
2907
|
+
return {
|
2908
|
+
tools: Object.entries(tools).map(([name11, tool2]) => ({
|
2909
|
+
type: "function",
|
2910
|
+
name: name11,
|
2911
|
+
description: tool2.description,
|
2912
|
+
parameters: asSchema2(tool2.parameters).jsonSchema
|
2913
|
+
})),
|
2914
|
+
toolChoice: toolChoice == null ? { type: "auto" } : typeof toolChoice === "string" ? { type: toolChoice } : { type: "tool", toolName: toolChoice.toolName }
|
2915
|
+
};
|
2916
|
+
}
|
2917
|
+
|
2918
|
+
// core/generate-text/to-response-messages.ts
|
2919
|
+
function toResponseMessages({
|
2920
|
+
text = "",
|
2921
|
+
toolCalls,
|
2922
|
+
toolResults
|
2923
|
+
}) {
|
2924
|
+
const responseMessages = [];
|
2925
|
+
responseMessages.push({
|
2926
|
+
role: "assistant",
|
2927
|
+
content: [{ type: "text", text }, ...toolCalls]
|
2928
|
+
});
|
2929
|
+
if (toolResults.length > 0) {
|
2930
|
+
responseMessages.push({
|
2931
|
+
role: "tool",
|
2932
|
+
content: toolResults.map((result) => ({
|
2933
|
+
type: "tool-result",
|
2934
|
+
toolCallId: result.toolCallId,
|
2935
|
+
toolName: result.toolName,
|
2936
|
+
result: result.result
|
2937
|
+
}))
|
2938
|
+
});
|
2939
|
+
}
|
2940
|
+
return responseMessages;
|
2941
|
+
}
|
2942
|
+
|
2900
2943
|
// core/generate-text/tool-call.ts
|
2944
|
+
import { safeParseJSON as safeParseJSON2 } from "@ai-sdk/provider-utils";
|
2945
|
+
import { asSchema as asSchema3 } from "@ai-sdk/ui-utils";
|
2901
2946
|
function parseToolCall({
|
2902
2947
|
toolCall,
|
2903
2948
|
tools
|
@@ -2946,6 +2991,7 @@ async function generateText({
|
|
2946
2991
|
headers,
|
2947
2992
|
maxAutomaticRoundtrips = 0,
|
2948
2993
|
maxToolRoundtrips = maxAutomaticRoundtrips,
|
2994
|
+
maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
|
2949
2995
|
experimental_telemetry: telemetry,
|
2950
2996
|
experimental_providerMetadata: providerMetadata,
|
2951
2997
|
_internal: {
|
@@ -2955,6 +3001,13 @@ async function generateText({
|
|
2955
3001
|
...settings
|
2956
3002
|
}) {
|
2957
3003
|
var _a11;
|
3004
|
+
if (maxSteps < 1) {
|
3005
|
+
throw new InvalidArgumentError({
|
3006
|
+
parameter: "maxSteps",
|
3007
|
+
value: maxSteps,
|
3008
|
+
message: "maxSteps must be at least 1"
|
3009
|
+
});
|
3010
|
+
}
|
2958
3011
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
2959
3012
|
model,
|
2960
3013
|
telemetry,
|
@@ -2976,7 +3029,7 @@ async function generateText({
|
|
2976
3029
|
"ai.prompt": {
|
2977
3030
|
input: () => JSON.stringify({ system, prompt, messages })
|
2978
3031
|
},
|
2979
|
-
"ai.settings.
|
3032
|
+
"ai.settings.maxSteps": maxSteps
|
2980
3033
|
}
|
2981
3034
|
}),
|
2982
3035
|
tracer,
|
@@ -3000,16 +3053,16 @@ async function generateText({
|
|
3000
3053
|
let currentModelResponse;
|
3001
3054
|
let currentToolCalls = [];
|
3002
3055
|
let currentToolResults = [];
|
3003
|
-
let
|
3056
|
+
let stepCount = 0;
|
3004
3057
|
const responseMessages = [];
|
3005
|
-
const
|
3058
|
+
const steps = [];
|
3006
3059
|
const usage = {
|
3007
3060
|
completionTokens: 0,
|
3008
3061
|
promptTokens: 0,
|
3009
3062
|
totalTokens: 0
|
3010
3063
|
};
|
3011
3064
|
do {
|
3012
|
-
const currentInputFormat =
|
3065
|
+
const currentInputFormat = stepCount === 0 ? validatedPrompt.type : "messages";
|
3013
3066
|
currentModelResponse = await retry(
|
3014
3067
|
() => recordSpan({
|
3015
3068
|
name: "ai.generateText.doGenerate",
|
@@ -3106,7 +3159,7 @@ async function generateText({
|
|
3106
3159
|
usage.completionTokens += currentUsage.completionTokens;
|
3107
3160
|
usage.promptTokens += currentUsage.promptTokens;
|
3108
3161
|
usage.totalTokens += currentUsage.totalTokens;
|
3109
|
-
|
3162
|
+
steps.push({
|
3110
3163
|
text: (_b = currentModelResponse.text) != null ? _b : "",
|
3111
3164
|
toolCalls: currentToolCalls,
|
3112
3165
|
toolResults: currentToolResults,
|
@@ -3133,8 +3186,8 @@ async function generateText({
|
|
3133
3186
|
} while (
|
3134
3187
|
// there are tool calls:
|
3135
3188
|
currentToolCalls.length > 0 && // all current tool calls have results:
|
3136
|
-
currentToolResults.length === currentToolCalls.length && // the number of
|
3137
|
-
|
3189
|
+
currentToolResults.length === currentToolCalls.length && // the number of steps is less than the maximum:
|
3190
|
+
++stepCount < maxSteps
|
3138
3191
|
);
|
3139
3192
|
span.setAttributes(
|
3140
3193
|
selectTelemetryAttributes({
|
@@ -3176,7 +3229,7 @@ async function generateText({
|
|
3176
3229
|
},
|
3177
3230
|
logprobs: currentModelResponse.logprobs,
|
3178
3231
|
responseMessages,
|
3179
|
-
|
3232
|
+
steps,
|
3180
3233
|
providerMetadata: currentModelResponse.providerMetadata
|
3181
3234
|
});
|
3182
3235
|
}
|
@@ -3251,7 +3304,8 @@ var DefaultGenerateTextResult = class {
|
|
3251
3304
|
this.warnings = options.warnings;
|
3252
3305
|
this.response = options.response;
|
3253
3306
|
this.responseMessages = options.responseMessages;
|
3254
|
-
this.roundtrips = options.
|
3307
|
+
this.roundtrips = options.steps;
|
3308
|
+
this.steps = options.steps;
|
3255
3309
|
this.experimental_providerMetadata = options.providerMetadata;
|
3256
3310
|
this.rawResponse = {
|
3257
3311
|
headers: options.response.headers
|
@@ -3616,6 +3670,7 @@ async function streamText({
|
|
3616
3670
|
abortSignal,
|
3617
3671
|
headers,
|
3618
3672
|
maxToolRoundtrips = 0,
|
3673
|
+
maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
|
3619
3674
|
experimental_telemetry: telemetry,
|
3620
3675
|
experimental_providerMetadata: providerMetadata,
|
3621
3676
|
experimental_toolCallStreaming: toolCallStreaming = false,
|
@@ -3629,6 +3684,13 @@ async function streamText({
|
|
3629
3684
|
...settings
|
3630
3685
|
}) {
|
3631
3686
|
var _a11;
|
3687
|
+
if (maxSteps < 1) {
|
3688
|
+
throw new InvalidArgumentError({
|
3689
|
+
parameter: "maxSteps",
|
3690
|
+
value: maxSteps,
|
3691
|
+
message: "maxSteps must be at least 1"
|
3692
|
+
});
|
3693
|
+
}
|
3632
3694
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
3633
3695
|
model,
|
3634
3696
|
telemetry,
|
@@ -3646,14 +3708,15 @@ async function streamText({
|
|
3646
3708
|
// specific settings that only make sense on the outer level:
|
3647
3709
|
"ai.prompt": {
|
3648
3710
|
input: () => JSON.stringify({ system, prompt, messages })
|
3649
|
-
}
|
3711
|
+
},
|
3712
|
+
"ai.settings.maxSteps": maxSteps
|
3650
3713
|
}
|
3651
3714
|
}),
|
3652
3715
|
tracer,
|
3653
3716
|
endWhenDone: false,
|
3654
3717
|
fn: async (rootSpan) => {
|
3655
3718
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
3656
|
-
const
|
3719
|
+
const startStep = async ({
|
3657
3720
|
promptMessages: promptMessages2,
|
3658
3721
|
promptType
|
3659
3722
|
}) => {
|
@@ -3735,7 +3798,7 @@ async function streamText({
|
|
3735
3798
|
result: { stream, warnings, rawResponse },
|
3736
3799
|
doStreamSpan,
|
3737
3800
|
startTimestampMs
|
3738
|
-
} = await
|
3801
|
+
} = await startStep({
|
3739
3802
|
promptType: validatePrompt({ system, prompt, messages }).type,
|
3740
3803
|
promptMessages
|
3741
3804
|
});
|
@@ -3749,8 +3812,8 @@ async function streamText({
|
|
3749
3812
|
doStreamSpan,
|
3750
3813
|
telemetry,
|
3751
3814
|
startTimestampMs,
|
3752
|
-
|
3753
|
-
|
3815
|
+
maxSteps,
|
3816
|
+
startStep,
|
3754
3817
|
promptMessages,
|
3755
3818
|
modelId: model.modelId,
|
3756
3819
|
now: now2,
|
@@ -3771,8 +3834,8 @@ var DefaultStreamTextResult = class {
|
|
3771
3834
|
doStreamSpan,
|
3772
3835
|
telemetry,
|
3773
3836
|
startTimestampMs,
|
3774
|
-
|
3775
|
-
|
3837
|
+
maxSteps,
|
3838
|
+
startStep,
|
3776
3839
|
promptMessages,
|
3777
3840
|
modelId,
|
3778
3841
|
now: now2,
|
@@ -3791,6 +3854,8 @@ var DefaultStreamTextResult = class {
|
|
3791
3854
|
this.toolCalls = toolCallsPromise;
|
3792
3855
|
const { resolve: resolveToolResults, promise: toolResultsPromise } = createResolvablePromise();
|
3793
3856
|
this.toolResults = toolResultsPromise;
|
3857
|
+
const { resolve: resolveSteps, promise: stepsPromise } = createResolvablePromise();
|
3858
|
+
this.steps = stepsPromise;
|
3794
3859
|
const {
|
3795
3860
|
resolve: resolveProviderMetadata,
|
3796
3861
|
promise: providerMetadataPromise
|
@@ -3804,12 +3869,13 @@ var DefaultStreamTextResult = class {
|
|
3804
3869
|
close: closeStitchableStream
|
3805
3870
|
} = createStitchableStream();
|
3806
3871
|
this.originalStream = stitchableStream;
|
3872
|
+
const stepResults = [];
|
3807
3873
|
const self = this;
|
3808
|
-
function
|
3874
|
+
function addStepStream({
|
3809
3875
|
stream: stream2,
|
3810
3876
|
startTimestamp,
|
3811
3877
|
doStreamSpan: doStreamSpan2,
|
3812
|
-
|
3878
|
+
currentStep,
|
3813
3879
|
promptMessages: promptMessages2,
|
3814
3880
|
usage = {
|
3815
3881
|
promptTokens: 0,
|
@@ -3817,19 +3883,19 @@ var DefaultStreamTextResult = class {
|
|
3817
3883
|
totalTokens: 0
|
3818
3884
|
}
|
3819
3885
|
}) {
|
3820
|
-
const
|
3821
|
-
const
|
3822
|
-
let
|
3823
|
-
let
|
3886
|
+
const stepToolCalls = [];
|
3887
|
+
const stepToolResults = [];
|
3888
|
+
let stepFinishReason = "unknown";
|
3889
|
+
let stepUsage = {
|
3824
3890
|
promptTokens: 0,
|
3825
3891
|
completionTokens: 0,
|
3826
3892
|
totalTokens: 0
|
3827
3893
|
};
|
3828
|
-
let
|
3829
|
-
let
|
3830
|
-
let
|
3831
|
-
let
|
3832
|
-
let
|
3894
|
+
let stepProviderMetadata;
|
3895
|
+
let stepFirstChunk = true;
|
3896
|
+
let stepText = "";
|
3897
|
+
let stepLogProbs;
|
3898
|
+
let stepResponse = {
|
3833
3899
|
id: generateId3(),
|
3834
3900
|
timestamp: currentDate(),
|
3835
3901
|
modelId
|
@@ -3839,9 +3905,9 @@ var DefaultStreamTextResult = class {
|
|
3839
3905
|
new TransformStream({
|
3840
3906
|
async transform(chunk, controller) {
|
3841
3907
|
var _a11, _b, _c;
|
3842
|
-
if (
|
3908
|
+
if (stepFirstChunk) {
|
3843
3909
|
const msToFirstChunk = now2() - startTimestamp;
|
3844
|
-
|
3910
|
+
stepFirstChunk = false;
|
3845
3911
|
doStreamSpan2.addEvent("ai.stream.firstChunk", {
|
3846
3912
|
"ai.response.msToFirstChunk": msToFirstChunk,
|
3847
3913
|
// deprecated:
|
@@ -3860,40 +3926,40 @@ var DefaultStreamTextResult = class {
|
|
3860
3926
|
switch (chunkType) {
|
3861
3927
|
case "text-delta": {
|
3862
3928
|
controller.enqueue(chunk);
|
3863
|
-
|
3929
|
+
stepText += chunk.textDelta;
|
3864
3930
|
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
3865
3931
|
break;
|
3866
3932
|
}
|
3867
3933
|
case "tool-call": {
|
3868
3934
|
controller.enqueue(chunk);
|
3869
|
-
|
3935
|
+
stepToolCalls.push(chunk);
|
3870
3936
|
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
3871
3937
|
break;
|
3872
3938
|
}
|
3873
3939
|
case "tool-result": {
|
3874
3940
|
controller.enqueue(chunk);
|
3875
|
-
|
3941
|
+
stepToolResults.push(chunk);
|
3876
3942
|
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
3877
3943
|
break;
|
3878
3944
|
}
|
3879
3945
|
case "response-metadata": {
|
3880
|
-
|
3881
|
-
id: (_a11 = chunk.id) != null ? _a11 :
|
3882
|
-
timestamp: (_b = chunk.timestamp) != null ? _b :
|
3883
|
-
modelId: (_c = chunk.modelId) != null ? _c :
|
3946
|
+
stepResponse = {
|
3947
|
+
id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
|
3948
|
+
timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
|
3949
|
+
modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
|
3884
3950
|
};
|
3885
3951
|
break;
|
3886
3952
|
}
|
3887
3953
|
case "finish": {
|
3888
|
-
|
3889
|
-
|
3890
|
-
|
3891
|
-
|
3954
|
+
stepUsage = chunk.usage;
|
3955
|
+
stepFinishReason = chunk.finishReason;
|
3956
|
+
stepProviderMetadata = chunk.experimental_providerMetadata;
|
3957
|
+
stepLogProbs = chunk.logprobs;
|
3892
3958
|
const msToFinish = now2() - startTimestamp;
|
3893
3959
|
doStreamSpan2.addEvent("ai.stream.finish");
|
3894
3960
|
doStreamSpan2.setAttributes({
|
3895
3961
|
"ai.response.msToFinish": msToFinish,
|
3896
|
-
"ai.response.avgCompletionTokensPerSecond": 1e3 *
|
3962
|
+
"ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
|
3897
3963
|
});
|
3898
3964
|
break;
|
3899
3965
|
}
|
@@ -3905,7 +3971,7 @@ var DefaultStreamTextResult = class {
|
|
3905
3971
|
}
|
3906
3972
|
case "error": {
|
3907
3973
|
controller.enqueue(chunk);
|
3908
|
-
|
3974
|
+
stepFinishReason = "error";
|
3909
3975
|
break;
|
3910
3976
|
}
|
3911
3977
|
default: {
|
@@ -3917,41 +3983,52 @@ var DefaultStreamTextResult = class {
|
|
3917
3983
|
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
3918
3984
|
async flush(controller) {
|
3919
3985
|
controller.enqueue({
|
3920
|
-
type: "
|
3921
|
-
finishReason:
|
3922
|
-
usage:
|
3923
|
-
experimental_providerMetadata:
|
3924
|
-
logprobs:
|
3925
|
-
response:
|
3986
|
+
type: "step-finish",
|
3987
|
+
finishReason: stepFinishReason,
|
3988
|
+
usage: stepUsage,
|
3989
|
+
experimental_providerMetadata: stepProviderMetadata,
|
3990
|
+
logprobs: stepLogProbs,
|
3991
|
+
response: stepResponse
|
3992
|
+
});
|
3993
|
+
stepResults.push({
|
3994
|
+
text: stepText,
|
3995
|
+
toolCalls: stepToolCalls,
|
3996
|
+
toolResults: stepToolResults,
|
3997
|
+
finishReason: stepFinishReason,
|
3998
|
+
usage: stepUsage,
|
3999
|
+
warnings: self.warnings,
|
4000
|
+
logprobs: stepLogProbs,
|
4001
|
+
response: stepResponse,
|
4002
|
+
rawResponse: self.rawResponse
|
3926
4003
|
});
|
3927
|
-
const telemetryToolCalls =
|
4004
|
+
const telemetryToolCalls = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
|
3928
4005
|
try {
|
3929
4006
|
doStreamSpan2.setAttributes(
|
3930
4007
|
selectTelemetryAttributes({
|
3931
4008
|
telemetry,
|
3932
4009
|
attributes: {
|
3933
|
-
"ai.response.finishReason":
|
3934
|
-
"ai.response.text": { output: () =>
|
4010
|
+
"ai.response.finishReason": stepFinishReason,
|
4011
|
+
"ai.response.text": { output: () => stepText },
|
3935
4012
|
"ai.response.toolCalls": {
|
3936
4013
|
output: () => telemetryToolCalls
|
3937
4014
|
},
|
3938
|
-
"ai.response.id":
|
3939
|
-
"ai.response.model":
|
3940
|
-
"ai.response.timestamp":
|
3941
|
-
"ai.usage.promptTokens":
|
3942
|
-
"ai.usage.completionTokens":
|
4015
|
+
"ai.response.id": stepResponse.id,
|
4016
|
+
"ai.response.model": stepResponse.modelId,
|
4017
|
+
"ai.response.timestamp": stepResponse.timestamp.toISOString(),
|
4018
|
+
"ai.usage.promptTokens": stepUsage.promptTokens,
|
4019
|
+
"ai.usage.completionTokens": stepUsage.completionTokens,
|
3943
4020
|
// deprecated
|
3944
|
-
"ai.finishReason":
|
3945
|
-
"ai.result.text": { output: () =>
|
4021
|
+
"ai.finishReason": stepFinishReason,
|
4022
|
+
"ai.result.text": { output: () => stepText },
|
3946
4023
|
"ai.result.toolCalls": {
|
3947
4024
|
output: () => telemetryToolCalls
|
3948
4025
|
},
|
3949
4026
|
// standardized gen-ai llm span attributes:
|
3950
|
-
"gen_ai.response.finish_reasons": [
|
3951
|
-
"gen_ai.response.id":
|
3952
|
-
"gen_ai.response.model":
|
3953
|
-
"gen_ai.usage.input_tokens":
|
3954
|
-
"gen_ai.usage.output_tokens":
|
4027
|
+
"gen_ai.response.finish_reasons": [stepFinishReason],
|
4028
|
+
"gen_ai.response.id": stepResponse.id,
|
4029
|
+
"gen_ai.response.model": stepResponse.modelId,
|
4030
|
+
"gen_ai.usage.input_tokens": stepUsage.promptTokens,
|
4031
|
+
"gen_ai.usage.output_tokens": stepUsage.completionTokens
|
3955
4032
|
}
|
3956
4033
|
})
|
3957
4034
|
);
|
@@ -3960,21 +4037,21 @@ var DefaultStreamTextResult = class {
|
|
3960
4037
|
doStreamSpan2.end();
|
3961
4038
|
}
|
3962
4039
|
const combinedUsage = {
|
3963
|
-
promptTokens: usage.promptTokens +
|
3964
|
-
completionTokens: usage.completionTokens +
|
3965
|
-
totalTokens: usage.totalTokens +
|
4040
|
+
promptTokens: usage.promptTokens + stepUsage.promptTokens,
|
4041
|
+
completionTokens: usage.completionTokens + stepUsage.completionTokens,
|
4042
|
+
totalTokens: usage.totalTokens + stepUsage.totalTokens
|
3966
4043
|
};
|
3967
4044
|
if (
|
3968
4045
|
// there are tool calls:
|
3969
|
-
|
3970
|
-
|
3971
|
-
|
4046
|
+
stepToolCalls.length > 0 && // all current tool calls have results:
|
4047
|
+
stepToolResults.length === stepToolCalls.length && // the number of steps is less than the maximum:
|
4048
|
+
currentStep + 1 < maxSteps
|
3972
4049
|
) {
|
3973
4050
|
promptMessages2.push(
|
3974
4051
|
...toResponseMessages({
|
3975
|
-
text:
|
3976
|
-
toolCalls:
|
3977
|
-
toolResults:
|
4052
|
+
text: stepText,
|
4053
|
+
toolCalls: stepToolCalls,
|
4054
|
+
toolResults: stepToolResults
|
3978
4055
|
}).map(
|
3979
4056
|
(message) => convertToLanguageModelMessage(message, null)
|
3980
4057
|
)
|
@@ -3983,17 +4060,17 @@ var DefaultStreamTextResult = class {
|
|
3983
4060
|
result,
|
3984
4061
|
doStreamSpan: doStreamSpan3,
|
3985
4062
|
startTimestampMs: startTimestamp2
|
3986
|
-
} = await
|
4063
|
+
} = await startStep({
|
3987
4064
|
promptType: "messages",
|
3988
4065
|
promptMessages: promptMessages2
|
3989
4066
|
});
|
3990
4067
|
self.warnings = result.warnings;
|
3991
4068
|
self.rawResponse = result.rawResponse;
|
3992
|
-
|
4069
|
+
addStepStream({
|
3993
4070
|
stream: result.stream,
|
3994
4071
|
startTimestamp: startTimestamp2,
|
3995
4072
|
doStreamSpan: doStreamSpan3,
|
3996
|
-
|
4073
|
+
currentStep: currentStep + 1,
|
3997
4074
|
promptMessages: promptMessages2,
|
3998
4075
|
usage: combinedUsage
|
3999
4076
|
});
|
@@ -4002,27 +4079,27 @@ var DefaultStreamTextResult = class {
|
|
4002
4079
|
try {
|
4003
4080
|
controller.enqueue({
|
4004
4081
|
type: "finish",
|
4005
|
-
finishReason:
|
4082
|
+
finishReason: stepFinishReason,
|
4006
4083
|
usage: combinedUsage,
|
4007
|
-
experimental_providerMetadata:
|
4008
|
-
logprobs:
|
4009
|
-
response:
|
4084
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4085
|
+
logprobs: stepLogProbs,
|
4086
|
+
response: stepResponse
|
4010
4087
|
});
|
4011
4088
|
closeStitchableStream();
|
4012
4089
|
rootSpan.setAttributes(
|
4013
4090
|
selectTelemetryAttributes({
|
4014
4091
|
telemetry,
|
4015
4092
|
attributes: {
|
4016
|
-
"ai.response.finishReason":
|
4017
|
-
"ai.response.text": { output: () =>
|
4093
|
+
"ai.response.finishReason": stepFinishReason,
|
4094
|
+
"ai.response.text": { output: () => stepText },
|
4018
4095
|
"ai.response.toolCalls": {
|
4019
4096
|
output: () => telemetryToolCalls
|
4020
4097
|
},
|
4021
4098
|
"ai.usage.promptTokens": combinedUsage.promptTokens,
|
4022
4099
|
"ai.usage.completionTokens": combinedUsage.completionTokens,
|
4023
4100
|
// deprecated
|
4024
|
-
"ai.finishReason":
|
4025
|
-
"ai.result.text": { output: () =>
|
4101
|
+
"ai.finishReason": stepFinishReason,
|
4102
|
+
"ai.result.text": { output: () => stepText },
|
4026
4103
|
"ai.result.toolCalls": {
|
4027
4104
|
output: () => telemetryToolCalls
|
4028
4105
|
}
|
@@ -4030,32 +4107,35 @@ var DefaultStreamTextResult = class {
|
|
4030
4107
|
})
|
4031
4108
|
);
|
4032
4109
|
resolveUsage(combinedUsage);
|
4033
|
-
resolveFinishReason(
|
4034
|
-
resolveText(
|
4035
|
-
resolveToolCalls(
|
4036
|
-
resolveProviderMetadata(
|
4037
|
-
resolveToolResults(
|
4110
|
+
resolveFinishReason(stepFinishReason);
|
4111
|
+
resolveText(stepText);
|
4112
|
+
resolveToolCalls(stepToolCalls);
|
4113
|
+
resolveProviderMetadata(stepProviderMetadata);
|
4114
|
+
resolveToolResults(stepToolResults);
|
4038
4115
|
resolveResponse({
|
4039
|
-
...
|
4116
|
+
...stepResponse,
|
4040
4117
|
headers: rawResponse == null ? void 0 : rawResponse.headers
|
4041
4118
|
});
|
4119
|
+
resolveSteps(stepResults);
|
4042
4120
|
await (onFinish == null ? void 0 : onFinish({
|
4043
|
-
finishReason:
|
4121
|
+
finishReason: stepFinishReason,
|
4044
4122
|
usage: combinedUsage,
|
4045
|
-
text:
|
4046
|
-
toolCalls:
|
4123
|
+
text: stepText,
|
4124
|
+
toolCalls: stepToolCalls,
|
4047
4125
|
// The tool results are inferred as a never[] type, because they are
|
4048
4126
|
// optional and the execute method with an inferred result type is
|
4049
4127
|
// optional as well. Therefore we need to cast the toolResults to any.
|
4050
4128
|
// The type exposed to the users will be correctly inferred.
|
4051
|
-
toolResults:
|
4129
|
+
toolResults: stepToolResults,
|
4052
4130
|
rawResponse,
|
4053
4131
|
response: {
|
4054
|
-
...
|
4132
|
+
...stepResponse,
|
4055
4133
|
headers: rawResponse == null ? void 0 : rawResponse.headers
|
4056
4134
|
},
|
4057
4135
|
warnings,
|
4058
|
-
experimental_providerMetadata:
|
4136
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4137
|
+
steps: stepResults
|
4138
|
+
// see tool results comment above
|
4059
4139
|
}));
|
4060
4140
|
} catch (error) {
|
4061
4141
|
controller.error(error);
|
@@ -4067,11 +4147,11 @@ var DefaultStreamTextResult = class {
|
|
4067
4147
|
)
|
4068
4148
|
);
|
4069
4149
|
}
|
4070
|
-
|
4150
|
+
addStepStream({
|
4071
4151
|
stream,
|
4072
4152
|
startTimestamp: startTimestampMs,
|
4073
4153
|
doStreamSpan,
|
4074
|
-
|
4154
|
+
currentStep: 0,
|
4075
4155
|
promptMessages,
|
4076
4156
|
usage: void 0
|
4077
4157
|
});
|
@@ -4144,10 +4224,11 @@ var DefaultStreamTextResult = class {
|
|
4144
4224
|
transform: async (chunk, controller) => {
|
4145
4225
|
const chunkType = chunk.type;
|
4146
4226
|
switch (chunkType) {
|
4147
|
-
case "text-delta":
|
4227
|
+
case "text-delta": {
|
4148
4228
|
controller.enqueue(formatStreamPart("text", chunk.textDelta));
|
4149
4229
|
break;
|
4150
|
-
|
4230
|
+
}
|
4231
|
+
case "tool-call-streaming-start": {
|
4151
4232
|
controller.enqueue(
|
4152
4233
|
formatStreamPart("tool_call_streaming_start", {
|
4153
4234
|
toolCallId: chunk.toolCallId,
|
@@ -4155,7 +4236,8 @@ var DefaultStreamTextResult = class {
|
|
4155
4236
|
})
|
4156
4237
|
);
|
4157
4238
|
break;
|
4158
|
-
|
4239
|
+
}
|
4240
|
+
case "tool-call-delta": {
|
4159
4241
|
controller.enqueue(
|
4160
4242
|
formatStreamPart("tool_call_delta", {
|
4161
4243
|
toolCallId: chunk.toolCallId,
|
@@ -4163,7 +4245,8 @@ var DefaultStreamTextResult = class {
|
|
4163
4245
|
})
|
4164
4246
|
);
|
4165
4247
|
break;
|
4166
|
-
|
4248
|
+
}
|
4249
|
+
case "tool-call": {
|
4167
4250
|
controller.enqueue(
|
4168
4251
|
formatStreamPart("tool_call", {
|
4169
4252
|
toolCallId: chunk.toolCallId,
|
@@ -4172,7 +4255,8 @@ var DefaultStreamTextResult = class {
|
|
4172
4255
|
})
|
4173
4256
|
);
|
4174
4257
|
break;
|
4175
|
-
|
4258
|
+
}
|
4259
|
+
case "tool-result": {
|
4176
4260
|
controller.enqueue(
|
4177
4261
|
formatStreamPart("tool_result", {
|
4178
4262
|
toolCallId: chunk.toolCallId,
|
@@ -4180,14 +4264,16 @@ var DefaultStreamTextResult = class {
|
|
4180
4264
|
})
|
4181
4265
|
);
|
4182
4266
|
break;
|
4183
|
-
|
4267
|
+
}
|
4268
|
+
case "error": {
|
4184
4269
|
controller.enqueue(
|
4185
4270
|
formatStreamPart("error", getErrorMessage4(chunk.error))
|
4186
4271
|
);
|
4187
4272
|
break;
|
4188
|
-
|
4273
|
+
}
|
4274
|
+
case "step-finish": {
|
4189
4275
|
controller.enqueue(
|
4190
|
-
formatStreamPart("
|
4276
|
+
formatStreamPart("finish_step", {
|
4191
4277
|
finishReason: chunk.finishReason,
|
4192
4278
|
usage: sendUsage ? {
|
4193
4279
|
promptTokens: chunk.usage.promptTokens,
|
@@ -4196,7 +4282,8 @@ var DefaultStreamTextResult = class {
|
|
4196
4282
|
})
|
4197
4283
|
);
|
4198
4284
|
break;
|
4199
|
-
|
4285
|
+
}
|
4286
|
+
case "finish": {
|
4200
4287
|
controller.enqueue(
|
4201
4288
|
formatStreamPart("finish_message", {
|
4202
4289
|
finishReason: chunk.finishReason,
|
@@ -4207,6 +4294,7 @@ var DefaultStreamTextResult = class {
|
|
4207
4294
|
})
|
4208
4295
|
);
|
4209
4296
|
break;
|
4297
|
+
}
|
4210
4298
|
default: {
|
4211
4299
|
const exhaustiveCheck = chunkType;
|
4212
4300
|
throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
|
@@ -4382,27 +4470,6 @@ function attachmentsToParts(attachments) {
|
|
4382
4470
|
return parts;
|
4383
4471
|
}
|
4384
4472
|
|
4385
|
-
// core/prompt/message-conversion-error.ts
|
4386
|
-
import { AISDKError as AISDKError9 } from "@ai-sdk/provider";
|
4387
|
-
var name9 = "AI_MessageConversionError";
|
4388
|
-
var marker9 = `vercel.ai.error.${name9}`;
|
4389
|
-
var symbol9 = Symbol.for(marker9);
|
4390
|
-
var _a9;
|
4391
|
-
var MessageConversionError = class extends AISDKError9 {
|
4392
|
-
constructor({
|
4393
|
-
originalMessage,
|
4394
|
-
message
|
4395
|
-
}) {
|
4396
|
-
super({ name: name9, message });
|
4397
|
-
this[_a9] = true;
|
4398
|
-
this.originalMessage = originalMessage;
|
4399
|
-
}
|
4400
|
-
static isInstance(error) {
|
4401
|
-
return AISDKError9.hasMarker(error, marker9);
|
4402
|
-
}
|
4403
|
-
};
|
4404
|
-
_a9 = symbol9;
|
4405
|
-
|
4406
4473
|
// core/prompt/convert-to-core-messages.ts
|
4407
4474
|
function convertToCoreMessages(messages) {
|
4408
4475
|
const coreMessages = [];
|
@@ -4482,7 +4549,7 @@ function convertToCoreMessages(messages) {
|
|
4482
4549
|
}
|
4483
4550
|
|
4484
4551
|
// core/registry/custom-provider.ts
|
4485
|
-
import { NoSuchModelError } from "@ai-sdk/provider";
|
4552
|
+
import { NoSuchModelError as NoSuchModelError2 } from "@ai-sdk/provider";
|
4486
4553
|
function experimental_customProvider({
|
4487
4554
|
languageModels,
|
4488
4555
|
textEmbeddingModels,
|
@@ -4496,7 +4563,7 @@ function experimental_customProvider({
|
|
4496
4563
|
if (fallbackProvider) {
|
4497
4564
|
return fallbackProvider.languageModel(modelId);
|
4498
4565
|
}
|
4499
|
-
throw new
|
4566
|
+
throw new NoSuchModelError2({ modelId, modelType: "languageModel" });
|
4500
4567
|
},
|
4501
4568
|
textEmbeddingModel(modelId) {
|
4502
4569
|
if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
|
@@ -4505,18 +4572,18 @@ function experimental_customProvider({
|
|
4505
4572
|
if (fallbackProvider) {
|
4506
4573
|
return fallbackProvider.textEmbeddingModel(modelId);
|
4507
4574
|
}
|
4508
|
-
throw new
|
4575
|
+
throw new NoSuchModelError2({ modelId, modelType: "textEmbeddingModel" });
|
4509
4576
|
}
|
4510
4577
|
};
|
4511
4578
|
}
|
4512
4579
|
|
4513
4580
|
// core/registry/no-such-provider-error.ts
|
4514
|
-
import { AISDKError as
|
4581
|
+
import { AISDKError as AISDKError11, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
|
4515
4582
|
var name10 = "AI_NoSuchProviderError";
|
4516
4583
|
var marker10 = `vercel.ai.error.${name10}`;
|
4517
4584
|
var symbol10 = Symbol.for(marker10);
|
4518
4585
|
var _a10;
|
4519
|
-
var NoSuchProviderError = class extends
|
4586
|
+
var NoSuchProviderError = class extends NoSuchModelError3 {
|
4520
4587
|
constructor({
|
4521
4588
|
modelId,
|
4522
4589
|
modelType,
|
@@ -4530,7 +4597,7 @@ var NoSuchProviderError = class extends NoSuchModelError2 {
|
|
4530
4597
|
this.availableProviders = availableProviders;
|
4531
4598
|
}
|
4532
4599
|
static isInstance(error) {
|
4533
|
-
return
|
4600
|
+
return AISDKError11.hasMarker(error, marker10);
|
4534
4601
|
}
|
4535
4602
|
/**
|
4536
4603
|
* @deprecated use `isInstance` instead
|
@@ -4556,7 +4623,7 @@ var NoSuchProviderError = class extends NoSuchModelError2 {
|
|
4556
4623
|
_a10 = symbol10;
|
4557
4624
|
|
4558
4625
|
// core/registry/provider-registry.ts
|
4559
|
-
import { NoSuchModelError as
|
4626
|
+
import { NoSuchModelError as NoSuchModelError4 } from "@ai-sdk/provider";
|
4560
4627
|
function experimental_createProviderRegistry(providers) {
|
4561
4628
|
const registry = new DefaultProviderRegistry();
|
4562
4629
|
for (const [id, provider] of Object.entries(providers)) {
|
@@ -4590,7 +4657,7 @@ var DefaultProviderRegistry = class {
|
|
4590
4657
|
splitId(id, modelType) {
|
4591
4658
|
const index = id.indexOf(":");
|
4592
4659
|
if (index === -1) {
|
4593
|
-
throw new
|
4660
|
+
throw new NoSuchModelError4({
|
4594
4661
|
modelId: id,
|
4595
4662
|
modelType,
|
4596
4663
|
message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId:modelId")`
|
@@ -4603,7 +4670,7 @@ var DefaultProviderRegistry = class {
|
|
4603
4670
|
const [providerId, modelId] = this.splitId(id, "languageModel");
|
4604
4671
|
const model = (_b = (_a11 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a11, modelId);
|
4605
4672
|
if (model == null) {
|
4606
|
-
throw new
|
4673
|
+
throw new NoSuchModelError4({ modelId: id, modelType: "languageModel" });
|
4607
4674
|
}
|
4608
4675
|
return model;
|
4609
4676
|
}
|
@@ -4613,7 +4680,7 @@ var DefaultProviderRegistry = class {
|
|
4613
4680
|
const provider = this.getProvider(providerId);
|
4614
4681
|
const model = (_c = (_a11 = provider.textEmbeddingModel) == null ? void 0 : _a11.call(provider, modelId)) != null ? _c : "textEmbedding" in provider ? (_b = provider.textEmbedding) == null ? void 0 : _b.call(provider, modelId) : void 0;
|
4615
4682
|
if (model == null) {
|
4616
|
-
throw new
|
4683
|
+
throw new NoSuchModelError4({
|
4617
4684
|
modelId: id,
|
4618
4685
|
modelType: "textEmbeddingModel"
|
4619
4686
|
});
|
@@ -4652,21 +4719,6 @@ function magnitude(vector) {
|
|
4652
4719
|
return Math.sqrt(dotProduct(vector, vector));
|
4653
4720
|
}
|
4654
4721
|
|
4655
|
-
// errors/index.ts
|
4656
|
-
import {
|
4657
|
-
AISDKError as AISDKError11,
|
4658
|
-
APICallError as APICallError2,
|
4659
|
-
EmptyResponseBodyError,
|
4660
|
-
InvalidPromptError as InvalidPromptError2,
|
4661
|
-
InvalidResponseDataError,
|
4662
|
-
JSONParseError,
|
4663
|
-
LoadAPIKeyError,
|
4664
|
-
NoContentGeneratedError,
|
4665
|
-
NoSuchModelError as NoSuchModelError4,
|
4666
|
-
TypeValidationError as TypeValidationError2,
|
4667
|
-
UnsupportedFunctionalityError as UnsupportedFunctionalityError2
|
4668
|
-
} from "@ai-sdk/provider";
|
4669
|
-
|
4670
4722
|
// streams/ai-stream.ts
|
4671
4723
|
import {
|
4672
4724
|
createParser
|
@@ -5720,7 +5772,7 @@ var StreamingTextResponse = class extends Response {
|
|
5720
5772
|
var generateId2 = generateIdImpl;
|
5721
5773
|
var nanoid = generateIdImpl;
|
5722
5774
|
export {
|
5723
|
-
|
5775
|
+
AISDKError10 as AISDKError,
|
5724
5776
|
AIStream,
|
5725
5777
|
APICallError2 as APICallError,
|
5726
5778
|
AWSBedrockAnthropicMessagesStream,
|
@@ -5750,7 +5802,7 @@ export {
|
|
5750
5802
|
MistralStream,
|
5751
5803
|
NoContentGeneratedError,
|
5752
5804
|
NoObjectGeneratedError,
|
5753
|
-
|
5805
|
+
NoSuchModelError,
|
5754
5806
|
NoSuchProviderError,
|
5755
5807
|
NoSuchToolError,
|
5756
5808
|
OpenAIStream,
|