ai 3.3.34 → 3.3.36
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +27 -0
- package/dist/index.d.mts +106 -66
- package/dist/index.d.ts +106 -66
- package/dist/index.js +283 -224
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +286 -227
- package/dist/index.mjs.map +1 -1
- package/package.json +6 -6
- package/rsc/dist/index.d.ts +0 -4
- package/rsc/dist/rsc-server.d.mts +0 -4
- package/rsc/dist/rsc-server.mjs +11 -0
- package/rsc/dist/rsc-server.mjs.map +1 -1
- package/svelte/dist/index.d.mts +13 -16
- package/svelte/dist/index.d.ts +13 -16
- package/svelte/dist/index.js +5 -4
- package/svelte/dist/index.js.map +1 -1
- package/svelte/dist/index.mjs +5 -4
- package/svelte/dist/index.mjs.map +1 -1
package/dist/index.js
CHANGED
@@ -20,9 +20,9 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
|
|
20
20
|
// streams/index.ts
|
21
21
|
var streams_exports = {};
|
22
22
|
__export(streams_exports, {
|
23
|
-
AISDKError: () =>
|
23
|
+
AISDKError: () => import_provider13.AISDKError,
|
24
24
|
AIStream: () => AIStream,
|
25
|
-
APICallError: () =>
|
25
|
+
APICallError: () => import_provider13.APICallError,
|
26
26
|
AWSBedrockAnthropicMessagesStream: () => AWSBedrockAnthropicMessagesStream,
|
27
27
|
AWSBedrockAnthropicStream: () => AWSBedrockAnthropicStream,
|
28
28
|
AWSBedrockCohereStream: () => AWSBedrockCohereStream,
|
@@ -32,25 +32,25 @@ __export(streams_exports, {
|
|
32
32
|
AssistantResponse: () => AssistantResponse,
|
33
33
|
CohereStream: () => CohereStream,
|
34
34
|
DownloadError: () => DownloadError,
|
35
|
-
EmptyResponseBodyError: () =>
|
35
|
+
EmptyResponseBodyError: () => import_provider13.EmptyResponseBodyError,
|
36
36
|
GoogleGenerativeAIStream: () => GoogleGenerativeAIStream,
|
37
37
|
HuggingFaceStream: () => HuggingFaceStream,
|
38
38
|
InkeepStream: () => InkeepStream,
|
39
39
|
InvalidArgumentError: () => InvalidArgumentError,
|
40
40
|
InvalidDataContentError: () => InvalidDataContentError,
|
41
41
|
InvalidMessageRoleError: () => InvalidMessageRoleError,
|
42
|
-
InvalidPromptError: () =>
|
43
|
-
InvalidResponseDataError: () =>
|
42
|
+
InvalidPromptError: () => import_provider13.InvalidPromptError,
|
43
|
+
InvalidResponseDataError: () => import_provider13.InvalidResponseDataError,
|
44
44
|
InvalidToolArgumentsError: () => InvalidToolArgumentsError,
|
45
|
-
JSONParseError: () =>
|
45
|
+
JSONParseError: () => import_provider13.JSONParseError,
|
46
46
|
LangChainAdapter: () => langchain_adapter_exports,
|
47
47
|
LangChainStream: () => LangChainStream,
|
48
|
-
LoadAPIKeyError: () =>
|
48
|
+
LoadAPIKeyError: () => import_provider13.LoadAPIKeyError,
|
49
49
|
MessageConversionError: () => MessageConversionError,
|
50
50
|
MistralStream: () => MistralStream,
|
51
|
-
NoContentGeneratedError: () =>
|
51
|
+
NoContentGeneratedError: () => import_provider13.NoContentGeneratedError,
|
52
52
|
NoObjectGeneratedError: () => NoObjectGeneratedError,
|
53
|
-
NoSuchModelError: () =>
|
53
|
+
NoSuchModelError: () => import_provider13.NoSuchModelError,
|
54
54
|
NoSuchProviderError: () => NoSuchProviderError,
|
55
55
|
NoSuchToolError: () => NoSuchToolError,
|
56
56
|
OpenAIStream: () => OpenAIStream,
|
@@ -58,8 +58,8 @@ __export(streams_exports, {
|
|
58
58
|
RetryError: () => RetryError,
|
59
59
|
StreamData: () => StreamData2,
|
60
60
|
StreamingTextResponse: () => StreamingTextResponse,
|
61
|
-
TypeValidationError: () =>
|
62
|
-
UnsupportedFunctionalityError: () =>
|
61
|
+
TypeValidationError: () => import_provider13.TypeValidationError,
|
62
|
+
UnsupportedFunctionalityError: () => import_provider13.UnsupportedFunctionalityError,
|
63
63
|
convertToCoreMessages: () => convertToCoreMessages,
|
64
64
|
cosineSimilarity: () => cosineSimilarity,
|
65
65
|
createCallbacksTransformer: () => createCallbacksTransformer,
|
@@ -1148,6 +1148,7 @@ function prepareCallSettings({
|
|
1148
1148
|
maxTokens,
|
1149
1149
|
temperature,
|
1150
1150
|
topP,
|
1151
|
+
topK,
|
1151
1152
|
presencePenalty,
|
1152
1153
|
frequencyPenalty,
|
1153
1154
|
stopSequences,
|
@@ -1188,6 +1189,15 @@ function prepareCallSettings({
|
|
1188
1189
|
});
|
1189
1190
|
}
|
1190
1191
|
}
|
1192
|
+
if (topK != null) {
|
1193
|
+
if (typeof topK !== "number") {
|
1194
|
+
throw new InvalidArgumentError({
|
1195
|
+
parameter: "topK",
|
1196
|
+
value: topP,
|
1197
|
+
message: "topP must be a number"
|
1198
|
+
});
|
1199
|
+
}
|
1200
|
+
}
|
1191
1201
|
if (presencePenalty != null) {
|
1192
1202
|
if (typeof presencePenalty !== "number") {
|
1193
1203
|
throw new InvalidArgumentError({
|
@@ -1235,6 +1245,7 @@ function prepareCallSettings({
|
|
1235
1245
|
maxTokens,
|
1236
1246
|
temperature: temperature != null ? temperature : 0,
|
1237
1247
|
topP,
|
1248
|
+
topK,
|
1238
1249
|
presencePenalty,
|
1239
1250
|
frequencyPenalty,
|
1240
1251
|
stopSequences: stopSequences != null && stopSequences.length > 0 ? stopSequences : void 0,
|
@@ -2825,64 +2836,8 @@ var experimental_streamObject = streamObject;
|
|
2825
2836
|
// core/generate-text/generate-text.ts
|
2826
2837
|
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
2827
2838
|
|
2828
|
-
//
|
2829
|
-
var
|
2830
|
-
|
2831
|
-
// core/util/is-non-empty-object.ts
|
2832
|
-
function isNonEmptyObject(object) {
|
2833
|
-
return object != null && Object.keys(object).length > 0;
|
2834
|
-
}
|
2835
|
-
|
2836
|
-
// core/prompt/prepare-tools-and-tool-choice.ts
|
2837
|
-
function prepareToolsAndToolChoice({
|
2838
|
-
tools,
|
2839
|
-
toolChoice
|
2840
|
-
}) {
|
2841
|
-
if (!isNonEmptyObject(tools)) {
|
2842
|
-
return {
|
2843
|
-
tools: void 0,
|
2844
|
-
toolChoice: void 0
|
2845
|
-
};
|
2846
|
-
}
|
2847
|
-
return {
|
2848
|
-
tools: Object.entries(tools).map(([name11, tool2]) => ({
|
2849
|
-
type: "function",
|
2850
|
-
name: name11,
|
2851
|
-
description: tool2.description,
|
2852
|
-
parameters: (0, import_ui_utils3.asSchema)(tool2.parameters).jsonSchema
|
2853
|
-
})),
|
2854
|
-
toolChoice: toolChoice == null ? { type: "auto" } : typeof toolChoice === "string" ? { type: toolChoice } : { type: "tool", toolName: toolChoice.toolName }
|
2855
|
-
};
|
2856
|
-
}
|
2857
|
-
|
2858
|
-
// core/generate-text/to-response-messages.ts
|
2859
|
-
function toResponseMessages({
|
2860
|
-
text = "",
|
2861
|
-
toolCalls,
|
2862
|
-
toolResults
|
2863
|
-
}) {
|
2864
|
-
const responseMessages = [];
|
2865
|
-
responseMessages.push({
|
2866
|
-
role: "assistant",
|
2867
|
-
content: [{ type: "text", text }, ...toolCalls]
|
2868
|
-
});
|
2869
|
-
if (toolResults.length > 0) {
|
2870
|
-
responseMessages.push({
|
2871
|
-
role: "tool",
|
2872
|
-
content: toolResults.map((result) => ({
|
2873
|
-
type: "tool-result",
|
2874
|
-
toolCallId: result.toolCallId,
|
2875
|
-
toolName: result.toolName,
|
2876
|
-
result: result.result
|
2877
|
-
}))
|
2878
|
-
});
|
2879
|
-
}
|
2880
|
-
return responseMessages;
|
2881
|
-
}
|
2882
|
-
|
2883
|
-
// core/generate-text/tool-call.ts
|
2884
|
-
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
2885
|
-
var import_ui_utils4 = require("@ai-sdk/ui-utils");
|
2839
|
+
// errors/index.ts
|
2840
|
+
var import_provider13 = require("@ai-sdk/provider");
|
2886
2841
|
|
2887
2842
|
// errors/invalid-tool-arguments-error.ts
|
2888
2843
|
var import_provider10 = require("@ai-sdk/provider");
|
@@ -2970,7 +2925,85 @@ var NoSuchToolError = class extends import_provider11.AISDKError {
|
|
2970
2925
|
};
|
2971
2926
|
_a8 = symbol8;
|
2972
2927
|
|
2928
|
+
// core/prompt/message-conversion-error.ts
|
2929
|
+
var import_provider12 = require("@ai-sdk/provider");
|
2930
|
+
var name9 = "AI_MessageConversionError";
|
2931
|
+
var marker9 = `vercel.ai.error.${name9}`;
|
2932
|
+
var symbol9 = Symbol.for(marker9);
|
2933
|
+
var _a9;
|
2934
|
+
var MessageConversionError = class extends import_provider12.AISDKError {
|
2935
|
+
constructor({
|
2936
|
+
originalMessage,
|
2937
|
+
message
|
2938
|
+
}) {
|
2939
|
+
super({ name: name9, message });
|
2940
|
+
this[_a9] = true;
|
2941
|
+
this.originalMessage = originalMessage;
|
2942
|
+
}
|
2943
|
+
static isInstance(error) {
|
2944
|
+
return import_provider12.AISDKError.hasMarker(error, marker9);
|
2945
|
+
}
|
2946
|
+
};
|
2947
|
+
_a9 = symbol9;
|
2948
|
+
|
2949
|
+
// core/prompt/prepare-tools-and-tool-choice.ts
|
2950
|
+
var import_ui_utils3 = require("@ai-sdk/ui-utils");
|
2951
|
+
|
2952
|
+
// core/util/is-non-empty-object.ts
|
2953
|
+
function isNonEmptyObject(object) {
|
2954
|
+
return object != null && Object.keys(object).length > 0;
|
2955
|
+
}
|
2956
|
+
|
2957
|
+
// core/prompt/prepare-tools-and-tool-choice.ts
|
2958
|
+
function prepareToolsAndToolChoice({
|
2959
|
+
tools,
|
2960
|
+
toolChoice
|
2961
|
+
}) {
|
2962
|
+
if (!isNonEmptyObject(tools)) {
|
2963
|
+
return {
|
2964
|
+
tools: void 0,
|
2965
|
+
toolChoice: void 0
|
2966
|
+
};
|
2967
|
+
}
|
2968
|
+
return {
|
2969
|
+
tools: Object.entries(tools).map(([name11, tool2]) => ({
|
2970
|
+
type: "function",
|
2971
|
+
name: name11,
|
2972
|
+
description: tool2.description,
|
2973
|
+
parameters: (0, import_ui_utils3.asSchema)(tool2.parameters).jsonSchema
|
2974
|
+
})),
|
2975
|
+
toolChoice: toolChoice == null ? { type: "auto" } : typeof toolChoice === "string" ? { type: toolChoice } : { type: "tool", toolName: toolChoice.toolName }
|
2976
|
+
};
|
2977
|
+
}
|
2978
|
+
|
2979
|
+
// core/generate-text/to-response-messages.ts
|
2980
|
+
function toResponseMessages({
|
2981
|
+
text = "",
|
2982
|
+
toolCalls,
|
2983
|
+
toolResults
|
2984
|
+
}) {
|
2985
|
+
const responseMessages = [];
|
2986
|
+
responseMessages.push({
|
2987
|
+
role: "assistant",
|
2988
|
+
content: [{ type: "text", text }, ...toolCalls]
|
2989
|
+
});
|
2990
|
+
if (toolResults.length > 0) {
|
2991
|
+
responseMessages.push({
|
2992
|
+
role: "tool",
|
2993
|
+
content: toolResults.map((result) => ({
|
2994
|
+
type: "tool-result",
|
2995
|
+
toolCallId: result.toolCallId,
|
2996
|
+
toolName: result.toolName,
|
2997
|
+
result: result.result
|
2998
|
+
}))
|
2999
|
+
});
|
3000
|
+
}
|
3001
|
+
return responseMessages;
|
3002
|
+
}
|
3003
|
+
|
2973
3004
|
// core/generate-text/tool-call.ts
|
3005
|
+
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
3006
|
+
var import_ui_utils4 = require("@ai-sdk/ui-utils");
|
2974
3007
|
function parseToolCall({
|
2975
3008
|
toolCall,
|
2976
3009
|
tools
|
@@ -3019,6 +3052,7 @@ async function generateText({
|
|
3019
3052
|
headers,
|
3020
3053
|
maxAutomaticRoundtrips = 0,
|
3021
3054
|
maxToolRoundtrips = maxAutomaticRoundtrips,
|
3055
|
+
maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
|
3022
3056
|
experimental_telemetry: telemetry,
|
3023
3057
|
experimental_providerMetadata: providerMetadata,
|
3024
3058
|
_internal: {
|
@@ -3028,6 +3062,13 @@ async function generateText({
|
|
3028
3062
|
...settings
|
3029
3063
|
}) {
|
3030
3064
|
var _a11;
|
3065
|
+
if (maxSteps < 1) {
|
3066
|
+
throw new InvalidArgumentError({
|
3067
|
+
parameter: "maxSteps",
|
3068
|
+
value: maxSteps,
|
3069
|
+
message: "maxSteps must be at least 1"
|
3070
|
+
});
|
3071
|
+
}
|
3031
3072
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
3032
3073
|
model,
|
3033
3074
|
telemetry,
|
@@ -3049,7 +3090,7 @@ async function generateText({
|
|
3049
3090
|
"ai.prompt": {
|
3050
3091
|
input: () => JSON.stringify({ system, prompt, messages })
|
3051
3092
|
},
|
3052
|
-
"ai.settings.
|
3093
|
+
"ai.settings.maxSteps": maxSteps
|
3053
3094
|
}
|
3054
3095
|
}),
|
3055
3096
|
tracer,
|
@@ -3073,16 +3114,16 @@ async function generateText({
|
|
3073
3114
|
let currentModelResponse;
|
3074
3115
|
let currentToolCalls = [];
|
3075
3116
|
let currentToolResults = [];
|
3076
|
-
let
|
3117
|
+
let stepCount = 0;
|
3077
3118
|
const responseMessages = [];
|
3078
|
-
const
|
3119
|
+
const steps = [];
|
3079
3120
|
const usage = {
|
3080
3121
|
completionTokens: 0,
|
3081
3122
|
promptTokens: 0,
|
3082
3123
|
totalTokens: 0
|
3083
3124
|
};
|
3084
3125
|
do {
|
3085
|
-
const currentInputFormat =
|
3126
|
+
const currentInputFormat = stepCount === 0 ? validatedPrompt.type : "messages";
|
3086
3127
|
currentModelResponse = await retry(
|
3087
3128
|
() => recordSpan({
|
3088
3129
|
name: "ai.generateText.doGenerate",
|
@@ -3179,7 +3220,7 @@ async function generateText({
|
|
3179
3220
|
usage.completionTokens += currentUsage.completionTokens;
|
3180
3221
|
usage.promptTokens += currentUsage.promptTokens;
|
3181
3222
|
usage.totalTokens += currentUsage.totalTokens;
|
3182
|
-
|
3223
|
+
steps.push({
|
3183
3224
|
text: (_b = currentModelResponse.text) != null ? _b : "",
|
3184
3225
|
toolCalls: currentToolCalls,
|
3185
3226
|
toolResults: currentToolResults,
|
@@ -3206,8 +3247,8 @@ async function generateText({
|
|
3206
3247
|
} while (
|
3207
3248
|
// there are tool calls:
|
3208
3249
|
currentToolCalls.length > 0 && // all current tool calls have results:
|
3209
|
-
currentToolResults.length === currentToolCalls.length && // the number of
|
3210
|
-
|
3250
|
+
currentToolResults.length === currentToolCalls.length && // the number of steps is less than the maximum:
|
3251
|
+
++stepCount < maxSteps
|
3211
3252
|
);
|
3212
3253
|
span.setAttributes(
|
3213
3254
|
selectTelemetryAttributes({
|
@@ -3249,7 +3290,7 @@ async function generateText({
|
|
3249
3290
|
},
|
3250
3291
|
logprobs: currentModelResponse.logprobs,
|
3251
3292
|
responseMessages,
|
3252
|
-
|
3293
|
+
steps,
|
3253
3294
|
providerMetadata: currentModelResponse.providerMetadata
|
3254
3295
|
});
|
3255
3296
|
}
|
@@ -3324,7 +3365,8 @@ var DefaultGenerateTextResult = class {
|
|
3324
3365
|
this.warnings = options.warnings;
|
3325
3366
|
this.response = options.response;
|
3326
3367
|
this.responseMessages = options.responseMessages;
|
3327
|
-
this.roundtrips = options.
|
3368
|
+
this.roundtrips = options.steps;
|
3369
|
+
this.steps = options.steps;
|
3328
3370
|
this.experimental_providerMetadata = options.providerMetadata;
|
3329
3371
|
this.rawResponse = {
|
3330
3372
|
headers: options.response.headers
|
@@ -3689,6 +3731,7 @@ async function streamText({
|
|
3689
3731
|
abortSignal,
|
3690
3732
|
headers,
|
3691
3733
|
maxToolRoundtrips = 0,
|
3734
|
+
maxSteps = maxToolRoundtrips != null ? maxToolRoundtrips + 1 : 1,
|
3692
3735
|
experimental_telemetry: telemetry,
|
3693
3736
|
experimental_providerMetadata: providerMetadata,
|
3694
3737
|
experimental_toolCallStreaming: toolCallStreaming = false,
|
@@ -3702,6 +3745,13 @@ async function streamText({
|
|
3702
3745
|
...settings
|
3703
3746
|
}) {
|
3704
3747
|
var _a11;
|
3748
|
+
if (maxSteps < 1) {
|
3749
|
+
throw new InvalidArgumentError({
|
3750
|
+
parameter: "maxSteps",
|
3751
|
+
value: maxSteps,
|
3752
|
+
message: "maxSteps must be at least 1"
|
3753
|
+
});
|
3754
|
+
}
|
3705
3755
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
3706
3756
|
model,
|
3707
3757
|
telemetry,
|
@@ -3719,14 +3769,15 @@ async function streamText({
|
|
3719
3769
|
// specific settings that only make sense on the outer level:
|
3720
3770
|
"ai.prompt": {
|
3721
3771
|
input: () => JSON.stringify({ system, prompt, messages })
|
3722
|
-
}
|
3772
|
+
},
|
3773
|
+
"ai.settings.maxSteps": maxSteps
|
3723
3774
|
}
|
3724
3775
|
}),
|
3725
3776
|
tracer,
|
3726
3777
|
endWhenDone: false,
|
3727
3778
|
fn: async (rootSpan) => {
|
3728
3779
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
3729
|
-
const
|
3780
|
+
const startStep = async ({
|
3730
3781
|
promptMessages: promptMessages2,
|
3731
3782
|
promptType
|
3732
3783
|
}) => {
|
@@ -3808,7 +3859,7 @@ async function streamText({
|
|
3808
3859
|
result: { stream, warnings, rawResponse },
|
3809
3860
|
doStreamSpan,
|
3810
3861
|
startTimestampMs
|
3811
|
-
} = await
|
3862
|
+
} = await startStep({
|
3812
3863
|
promptType: validatePrompt({ system, prompt, messages }).type,
|
3813
3864
|
promptMessages
|
3814
3865
|
});
|
@@ -3822,8 +3873,8 @@ async function streamText({
|
|
3822
3873
|
doStreamSpan,
|
3823
3874
|
telemetry,
|
3824
3875
|
startTimestampMs,
|
3825
|
-
|
3826
|
-
|
3876
|
+
maxSteps,
|
3877
|
+
startStep,
|
3827
3878
|
promptMessages,
|
3828
3879
|
modelId: model.modelId,
|
3829
3880
|
now: now2,
|
@@ -3844,8 +3895,8 @@ var DefaultStreamTextResult = class {
|
|
3844
3895
|
doStreamSpan,
|
3845
3896
|
telemetry,
|
3846
3897
|
startTimestampMs,
|
3847
|
-
|
3848
|
-
|
3898
|
+
maxSteps,
|
3899
|
+
startStep,
|
3849
3900
|
promptMessages,
|
3850
3901
|
modelId,
|
3851
3902
|
now: now2,
|
@@ -3864,6 +3915,8 @@ var DefaultStreamTextResult = class {
|
|
3864
3915
|
this.toolCalls = toolCallsPromise;
|
3865
3916
|
const { resolve: resolveToolResults, promise: toolResultsPromise } = createResolvablePromise();
|
3866
3917
|
this.toolResults = toolResultsPromise;
|
3918
|
+
const { resolve: resolveSteps, promise: stepsPromise } = createResolvablePromise();
|
3919
|
+
this.steps = stepsPromise;
|
3867
3920
|
const {
|
3868
3921
|
resolve: resolveProviderMetadata,
|
3869
3922
|
promise: providerMetadataPromise
|
@@ -3877,12 +3930,13 @@ var DefaultStreamTextResult = class {
|
|
3877
3930
|
close: closeStitchableStream
|
3878
3931
|
} = createStitchableStream();
|
3879
3932
|
this.originalStream = stitchableStream;
|
3933
|
+
const stepResults = [];
|
3880
3934
|
const self = this;
|
3881
|
-
function
|
3935
|
+
function addStepStream({
|
3882
3936
|
stream: stream2,
|
3883
3937
|
startTimestamp,
|
3884
3938
|
doStreamSpan: doStreamSpan2,
|
3885
|
-
|
3939
|
+
currentStep,
|
3886
3940
|
promptMessages: promptMessages2,
|
3887
3941
|
usage = {
|
3888
3942
|
promptTokens: 0,
|
@@ -3890,19 +3944,19 @@ var DefaultStreamTextResult = class {
|
|
3890
3944
|
totalTokens: 0
|
3891
3945
|
}
|
3892
3946
|
}) {
|
3893
|
-
const
|
3894
|
-
const
|
3895
|
-
let
|
3896
|
-
let
|
3947
|
+
const stepToolCalls = [];
|
3948
|
+
const stepToolResults = [];
|
3949
|
+
let stepFinishReason = "unknown";
|
3950
|
+
let stepUsage = {
|
3897
3951
|
promptTokens: 0,
|
3898
3952
|
completionTokens: 0,
|
3899
3953
|
totalTokens: 0
|
3900
3954
|
};
|
3901
|
-
let
|
3902
|
-
let
|
3903
|
-
let
|
3904
|
-
let
|
3905
|
-
let
|
3955
|
+
let stepProviderMetadata;
|
3956
|
+
let stepFirstChunk = true;
|
3957
|
+
let stepText = "";
|
3958
|
+
let stepLogProbs;
|
3959
|
+
let stepResponse = {
|
3906
3960
|
id: generateId3(),
|
3907
3961
|
timestamp: currentDate(),
|
3908
3962
|
modelId
|
@@ -3912,9 +3966,9 @@ var DefaultStreamTextResult = class {
|
|
3912
3966
|
new TransformStream({
|
3913
3967
|
async transform(chunk, controller) {
|
3914
3968
|
var _a11, _b, _c;
|
3915
|
-
if (
|
3969
|
+
if (stepFirstChunk) {
|
3916
3970
|
const msToFirstChunk = now2() - startTimestamp;
|
3917
|
-
|
3971
|
+
stepFirstChunk = false;
|
3918
3972
|
doStreamSpan2.addEvent("ai.stream.firstChunk", {
|
3919
3973
|
"ai.response.msToFirstChunk": msToFirstChunk,
|
3920
3974
|
// deprecated:
|
@@ -3933,40 +3987,40 @@ var DefaultStreamTextResult = class {
|
|
3933
3987
|
switch (chunkType) {
|
3934
3988
|
case "text-delta": {
|
3935
3989
|
controller.enqueue(chunk);
|
3936
|
-
|
3990
|
+
stepText += chunk.textDelta;
|
3937
3991
|
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
3938
3992
|
break;
|
3939
3993
|
}
|
3940
3994
|
case "tool-call": {
|
3941
3995
|
controller.enqueue(chunk);
|
3942
|
-
|
3996
|
+
stepToolCalls.push(chunk);
|
3943
3997
|
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
3944
3998
|
break;
|
3945
3999
|
}
|
3946
4000
|
case "tool-result": {
|
3947
4001
|
controller.enqueue(chunk);
|
3948
|
-
|
4002
|
+
stepToolResults.push(chunk);
|
3949
4003
|
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
3950
4004
|
break;
|
3951
4005
|
}
|
3952
4006
|
case "response-metadata": {
|
3953
|
-
|
3954
|
-
id: (_a11 = chunk.id) != null ? _a11 :
|
3955
|
-
timestamp: (_b = chunk.timestamp) != null ? _b :
|
3956
|
-
modelId: (_c = chunk.modelId) != null ? _c :
|
4007
|
+
stepResponse = {
|
4008
|
+
id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
|
4009
|
+
timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
|
4010
|
+
modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
|
3957
4011
|
};
|
3958
4012
|
break;
|
3959
4013
|
}
|
3960
4014
|
case "finish": {
|
3961
|
-
|
3962
|
-
|
3963
|
-
|
3964
|
-
|
4015
|
+
stepUsage = chunk.usage;
|
4016
|
+
stepFinishReason = chunk.finishReason;
|
4017
|
+
stepProviderMetadata = chunk.experimental_providerMetadata;
|
4018
|
+
stepLogProbs = chunk.logprobs;
|
3965
4019
|
const msToFinish = now2() - startTimestamp;
|
3966
4020
|
doStreamSpan2.addEvent("ai.stream.finish");
|
3967
4021
|
doStreamSpan2.setAttributes({
|
3968
4022
|
"ai.response.msToFinish": msToFinish,
|
3969
|
-
"ai.response.avgCompletionTokensPerSecond": 1e3 *
|
4023
|
+
"ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
|
3970
4024
|
});
|
3971
4025
|
break;
|
3972
4026
|
}
|
@@ -3978,7 +4032,7 @@ var DefaultStreamTextResult = class {
|
|
3978
4032
|
}
|
3979
4033
|
case "error": {
|
3980
4034
|
controller.enqueue(chunk);
|
3981
|
-
|
4035
|
+
stepFinishReason = "error";
|
3982
4036
|
break;
|
3983
4037
|
}
|
3984
4038
|
default: {
|
@@ -3990,41 +4044,52 @@ var DefaultStreamTextResult = class {
|
|
3990
4044
|
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
3991
4045
|
async flush(controller) {
|
3992
4046
|
controller.enqueue({
|
3993
|
-
type: "
|
3994
|
-
finishReason:
|
3995
|
-
usage:
|
3996
|
-
experimental_providerMetadata:
|
3997
|
-
logprobs:
|
3998
|
-
response:
|
4047
|
+
type: "step-finish",
|
4048
|
+
finishReason: stepFinishReason,
|
4049
|
+
usage: stepUsage,
|
4050
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4051
|
+
logprobs: stepLogProbs,
|
4052
|
+
response: stepResponse
|
3999
4053
|
});
|
4000
|
-
|
4054
|
+
stepResults.push({
|
4055
|
+
text: stepText,
|
4056
|
+
toolCalls: stepToolCalls,
|
4057
|
+
toolResults: stepToolResults,
|
4058
|
+
finishReason: stepFinishReason,
|
4059
|
+
usage: stepUsage,
|
4060
|
+
warnings: self.warnings,
|
4061
|
+
logprobs: stepLogProbs,
|
4062
|
+
response: stepResponse,
|
4063
|
+
rawResponse: self.rawResponse
|
4064
|
+
});
|
4065
|
+
const telemetryToolCalls = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
|
4001
4066
|
try {
|
4002
4067
|
doStreamSpan2.setAttributes(
|
4003
4068
|
selectTelemetryAttributes({
|
4004
4069
|
telemetry,
|
4005
4070
|
attributes: {
|
4006
|
-
"ai.response.finishReason":
|
4007
|
-
"ai.response.text": { output: () =>
|
4071
|
+
"ai.response.finishReason": stepFinishReason,
|
4072
|
+
"ai.response.text": { output: () => stepText },
|
4008
4073
|
"ai.response.toolCalls": {
|
4009
4074
|
output: () => telemetryToolCalls
|
4010
4075
|
},
|
4011
|
-
"ai.response.id":
|
4012
|
-
"ai.response.model":
|
4013
|
-
"ai.response.timestamp":
|
4014
|
-
"ai.usage.promptTokens":
|
4015
|
-
"ai.usage.completionTokens":
|
4076
|
+
"ai.response.id": stepResponse.id,
|
4077
|
+
"ai.response.model": stepResponse.modelId,
|
4078
|
+
"ai.response.timestamp": stepResponse.timestamp.toISOString(),
|
4079
|
+
"ai.usage.promptTokens": stepUsage.promptTokens,
|
4080
|
+
"ai.usage.completionTokens": stepUsage.completionTokens,
|
4016
4081
|
// deprecated
|
4017
|
-
"ai.finishReason":
|
4018
|
-
"ai.result.text": { output: () =>
|
4082
|
+
"ai.finishReason": stepFinishReason,
|
4083
|
+
"ai.result.text": { output: () => stepText },
|
4019
4084
|
"ai.result.toolCalls": {
|
4020
4085
|
output: () => telemetryToolCalls
|
4021
4086
|
},
|
4022
4087
|
// standardized gen-ai llm span attributes:
|
4023
|
-
"gen_ai.response.finish_reasons": [
|
4024
|
-
"gen_ai.response.id":
|
4025
|
-
"gen_ai.response.model":
|
4026
|
-
"gen_ai.usage.input_tokens":
|
4027
|
-
"gen_ai.usage.output_tokens":
|
4088
|
+
"gen_ai.response.finish_reasons": [stepFinishReason],
|
4089
|
+
"gen_ai.response.id": stepResponse.id,
|
4090
|
+
"gen_ai.response.model": stepResponse.modelId,
|
4091
|
+
"gen_ai.usage.input_tokens": stepUsage.promptTokens,
|
4092
|
+
"gen_ai.usage.output_tokens": stepUsage.completionTokens
|
4028
4093
|
}
|
4029
4094
|
})
|
4030
4095
|
);
|
@@ -4033,21 +4098,21 @@ var DefaultStreamTextResult = class {
|
|
4033
4098
|
doStreamSpan2.end();
|
4034
4099
|
}
|
4035
4100
|
const combinedUsage = {
|
4036
|
-
promptTokens: usage.promptTokens +
|
4037
|
-
completionTokens: usage.completionTokens +
|
4038
|
-
totalTokens: usage.totalTokens +
|
4101
|
+
promptTokens: usage.promptTokens + stepUsage.promptTokens,
|
4102
|
+
completionTokens: usage.completionTokens + stepUsage.completionTokens,
|
4103
|
+
totalTokens: usage.totalTokens + stepUsage.totalTokens
|
4039
4104
|
};
|
4040
4105
|
if (
|
4041
4106
|
// there are tool calls:
|
4042
|
-
|
4043
|
-
|
4044
|
-
|
4107
|
+
stepToolCalls.length > 0 && // all current tool calls have results:
|
4108
|
+
stepToolResults.length === stepToolCalls.length && // the number of steps is less than the maximum:
|
4109
|
+
currentStep + 1 < maxSteps
|
4045
4110
|
) {
|
4046
4111
|
promptMessages2.push(
|
4047
4112
|
...toResponseMessages({
|
4048
|
-
text:
|
4049
|
-
toolCalls:
|
4050
|
-
toolResults:
|
4113
|
+
text: stepText,
|
4114
|
+
toolCalls: stepToolCalls,
|
4115
|
+
toolResults: stepToolResults
|
4051
4116
|
}).map(
|
4052
4117
|
(message) => convertToLanguageModelMessage(message, null)
|
4053
4118
|
)
|
@@ -4056,17 +4121,17 @@ var DefaultStreamTextResult = class {
|
|
4056
4121
|
result,
|
4057
4122
|
doStreamSpan: doStreamSpan3,
|
4058
4123
|
startTimestampMs: startTimestamp2
|
4059
|
-
} = await
|
4124
|
+
} = await startStep({
|
4060
4125
|
promptType: "messages",
|
4061
4126
|
promptMessages: promptMessages2
|
4062
4127
|
});
|
4063
4128
|
self.warnings = result.warnings;
|
4064
4129
|
self.rawResponse = result.rawResponse;
|
4065
|
-
|
4130
|
+
addStepStream({
|
4066
4131
|
stream: result.stream,
|
4067
4132
|
startTimestamp: startTimestamp2,
|
4068
4133
|
doStreamSpan: doStreamSpan3,
|
4069
|
-
|
4134
|
+
currentStep: currentStep + 1,
|
4070
4135
|
promptMessages: promptMessages2,
|
4071
4136
|
usage: combinedUsage
|
4072
4137
|
});
|
@@ -4075,27 +4140,27 @@ var DefaultStreamTextResult = class {
|
|
4075
4140
|
try {
|
4076
4141
|
controller.enqueue({
|
4077
4142
|
type: "finish",
|
4078
|
-
finishReason:
|
4143
|
+
finishReason: stepFinishReason,
|
4079
4144
|
usage: combinedUsage,
|
4080
|
-
experimental_providerMetadata:
|
4081
|
-
logprobs:
|
4082
|
-
response:
|
4145
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4146
|
+
logprobs: stepLogProbs,
|
4147
|
+
response: stepResponse
|
4083
4148
|
});
|
4084
4149
|
closeStitchableStream();
|
4085
4150
|
rootSpan.setAttributes(
|
4086
4151
|
selectTelemetryAttributes({
|
4087
4152
|
telemetry,
|
4088
4153
|
attributes: {
|
4089
|
-
"ai.response.finishReason":
|
4090
|
-
"ai.response.text": { output: () =>
|
4154
|
+
"ai.response.finishReason": stepFinishReason,
|
4155
|
+
"ai.response.text": { output: () => stepText },
|
4091
4156
|
"ai.response.toolCalls": {
|
4092
4157
|
output: () => telemetryToolCalls
|
4093
4158
|
},
|
4094
4159
|
"ai.usage.promptTokens": combinedUsage.promptTokens,
|
4095
4160
|
"ai.usage.completionTokens": combinedUsage.completionTokens,
|
4096
4161
|
// deprecated
|
4097
|
-
"ai.finishReason":
|
4098
|
-
"ai.result.text": { output: () =>
|
4162
|
+
"ai.finishReason": stepFinishReason,
|
4163
|
+
"ai.result.text": { output: () => stepText },
|
4099
4164
|
"ai.result.toolCalls": {
|
4100
4165
|
output: () => telemetryToolCalls
|
4101
4166
|
}
|
@@ -4103,32 +4168,35 @@ var DefaultStreamTextResult = class {
|
|
4103
4168
|
})
|
4104
4169
|
);
|
4105
4170
|
resolveUsage(combinedUsage);
|
4106
|
-
resolveFinishReason(
|
4107
|
-
resolveText(
|
4108
|
-
resolveToolCalls(
|
4109
|
-
resolveProviderMetadata(
|
4110
|
-
resolveToolResults(
|
4171
|
+
resolveFinishReason(stepFinishReason);
|
4172
|
+
resolveText(stepText);
|
4173
|
+
resolveToolCalls(stepToolCalls);
|
4174
|
+
resolveProviderMetadata(stepProviderMetadata);
|
4175
|
+
resolveToolResults(stepToolResults);
|
4111
4176
|
resolveResponse({
|
4112
|
-
...
|
4177
|
+
...stepResponse,
|
4113
4178
|
headers: rawResponse == null ? void 0 : rawResponse.headers
|
4114
4179
|
});
|
4180
|
+
resolveSteps(stepResults);
|
4115
4181
|
await (onFinish == null ? void 0 : onFinish({
|
4116
|
-
finishReason:
|
4182
|
+
finishReason: stepFinishReason,
|
4117
4183
|
usage: combinedUsage,
|
4118
|
-
text:
|
4119
|
-
toolCalls:
|
4184
|
+
text: stepText,
|
4185
|
+
toolCalls: stepToolCalls,
|
4120
4186
|
// The tool results are inferred as a never[] type, because they are
|
4121
4187
|
// optional and the execute method with an inferred result type is
|
4122
4188
|
// optional as well. Therefore we need to cast the toolResults to any.
|
4123
4189
|
// The type exposed to the users will be correctly inferred.
|
4124
|
-
toolResults:
|
4190
|
+
toolResults: stepToolResults,
|
4125
4191
|
rawResponse,
|
4126
4192
|
response: {
|
4127
|
-
...
|
4193
|
+
...stepResponse,
|
4128
4194
|
headers: rawResponse == null ? void 0 : rawResponse.headers
|
4129
4195
|
},
|
4130
4196
|
warnings,
|
4131
|
-
experimental_providerMetadata:
|
4197
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4198
|
+
steps: stepResults
|
4199
|
+
// see tool results comment above
|
4132
4200
|
}));
|
4133
4201
|
} catch (error) {
|
4134
4202
|
controller.error(error);
|
@@ -4140,11 +4208,11 @@ var DefaultStreamTextResult = class {
|
|
4140
4208
|
)
|
4141
4209
|
);
|
4142
4210
|
}
|
4143
|
-
|
4211
|
+
addStepStream({
|
4144
4212
|
stream,
|
4145
4213
|
startTimestamp: startTimestampMs,
|
4146
4214
|
doStreamSpan,
|
4147
|
-
|
4215
|
+
currentStep: 0,
|
4148
4216
|
promptMessages,
|
4149
4217
|
usage: void 0
|
4150
4218
|
});
|
@@ -4185,8 +4253,9 @@ var DefaultStreamTextResult = class {
|
|
4185
4253
|
}
|
4186
4254
|
toDataStreamInternal({
|
4187
4255
|
callbacks = {},
|
4188
|
-
getErrorMessage: getErrorMessage4 = () => ""
|
4256
|
+
getErrorMessage: getErrorMessage4 = () => "",
|
4189
4257
|
// mask error messages for safety by default
|
4258
|
+
sendUsage = true
|
4190
4259
|
} = {}) {
|
4191
4260
|
let aggregatedResponse = "";
|
4192
4261
|
const callbackTransformer = new TransformStream({
|
@@ -4216,10 +4285,11 @@ var DefaultStreamTextResult = class {
|
|
4216
4285
|
transform: async (chunk, controller) => {
|
4217
4286
|
const chunkType = chunk.type;
|
4218
4287
|
switch (chunkType) {
|
4219
|
-
case "text-delta":
|
4288
|
+
case "text-delta": {
|
4220
4289
|
controller.enqueue((0, import_ui_utils10.formatStreamPart)("text", chunk.textDelta));
|
4221
4290
|
break;
|
4222
|
-
|
4291
|
+
}
|
4292
|
+
case "tool-call-streaming-start": {
|
4223
4293
|
controller.enqueue(
|
4224
4294
|
(0, import_ui_utils10.formatStreamPart)("tool_call_streaming_start", {
|
4225
4295
|
toolCallId: chunk.toolCallId,
|
@@ -4227,7 +4297,8 @@ var DefaultStreamTextResult = class {
|
|
4227
4297
|
})
|
4228
4298
|
);
|
4229
4299
|
break;
|
4230
|
-
|
4300
|
+
}
|
4301
|
+
case "tool-call-delta": {
|
4231
4302
|
controller.enqueue(
|
4232
4303
|
(0, import_ui_utils10.formatStreamPart)("tool_call_delta", {
|
4233
4304
|
toolCallId: chunk.toolCallId,
|
@@ -4235,7 +4306,8 @@ var DefaultStreamTextResult = class {
|
|
4235
4306
|
})
|
4236
4307
|
);
|
4237
4308
|
break;
|
4238
|
-
|
4309
|
+
}
|
4310
|
+
case "tool-call": {
|
4239
4311
|
controller.enqueue(
|
4240
4312
|
(0, import_ui_utils10.formatStreamPart)("tool_call", {
|
4241
4313
|
toolCallId: chunk.toolCallId,
|
@@ -4244,7 +4316,8 @@ var DefaultStreamTextResult = class {
|
|
4244
4316
|
})
|
4245
4317
|
);
|
4246
4318
|
break;
|
4247
|
-
|
4319
|
+
}
|
4320
|
+
case "tool-result": {
|
4248
4321
|
controller.enqueue(
|
4249
4322
|
(0, import_ui_utils10.formatStreamPart)("tool_result", {
|
4250
4323
|
toolCallId: chunk.toolCallId,
|
@@ -4252,33 +4325,37 @@ var DefaultStreamTextResult = class {
|
|
4252
4325
|
})
|
4253
4326
|
);
|
4254
4327
|
break;
|
4255
|
-
|
4328
|
+
}
|
4329
|
+
case "error": {
|
4256
4330
|
controller.enqueue(
|
4257
4331
|
(0, import_ui_utils10.formatStreamPart)("error", getErrorMessage4(chunk.error))
|
4258
4332
|
);
|
4259
4333
|
break;
|
4260
|
-
|
4334
|
+
}
|
4335
|
+
case "step-finish": {
|
4261
4336
|
controller.enqueue(
|
4262
|
-
(0, import_ui_utils10.formatStreamPart)("
|
4337
|
+
(0, import_ui_utils10.formatStreamPart)("finish_step", {
|
4263
4338
|
finishReason: chunk.finishReason,
|
4264
|
-
usage: {
|
4339
|
+
usage: sendUsage ? {
|
4265
4340
|
promptTokens: chunk.usage.promptTokens,
|
4266
4341
|
completionTokens: chunk.usage.completionTokens
|
4267
|
-
}
|
4342
|
+
} : void 0
|
4268
4343
|
})
|
4269
4344
|
);
|
4270
4345
|
break;
|
4271
|
-
|
4346
|
+
}
|
4347
|
+
case "finish": {
|
4272
4348
|
controller.enqueue(
|
4273
4349
|
(0, import_ui_utils10.formatStreamPart)("finish_message", {
|
4274
4350
|
finishReason: chunk.finishReason,
|
4275
|
-
usage: {
|
4351
|
+
usage: sendUsage ? {
|
4276
4352
|
promptTokens: chunk.usage.promptTokens,
|
4277
4353
|
completionTokens: chunk.usage.completionTokens
|
4278
|
-
}
|
4354
|
+
} : void 0
|
4279
4355
|
})
|
4280
4356
|
);
|
4281
4357
|
break;
|
4358
|
+
}
|
4282
4359
|
default: {
|
4283
4360
|
const exhaustiveCheck = chunkType;
|
4284
4361
|
throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
|
@@ -4299,6 +4376,7 @@ var DefaultStreamTextResult = class {
|
|
4299
4376
|
};
|
4300
4377
|
const data = options == null ? void 0 : "data" in options ? options.data : void 0;
|
4301
4378
|
const getErrorMessage4 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
|
4379
|
+
const sendUsage = options == null ? void 0 : "sendUsage" in options ? options.sendUsage : void 0;
|
4302
4380
|
writeToServerResponse({
|
4303
4381
|
response,
|
4304
4382
|
status: init == null ? void 0 : init.status,
|
@@ -4307,7 +4385,7 @@ var DefaultStreamTextResult = class {
|
|
4307
4385
|
contentType: "text/plain; charset=utf-8",
|
4308
4386
|
dataStreamVersion: "v1"
|
4309
4387
|
}),
|
4310
|
-
stream: this.toDataStream({ data, getErrorMessage: getErrorMessage4 })
|
4388
|
+
stream: this.toDataStream({ data, getErrorMessage: getErrorMessage4, sendUsage })
|
4311
4389
|
});
|
4312
4390
|
}
|
4313
4391
|
pipeTextStreamToResponse(response, init) {
|
@@ -4326,7 +4404,8 @@ var DefaultStreamTextResult = class {
|
|
4326
4404
|
}
|
4327
4405
|
toDataStream(options) {
|
4328
4406
|
const stream = this.toDataStreamInternal({
|
4329
|
-
getErrorMessage: options == null ? void 0 : options.getErrorMessage
|
4407
|
+
getErrorMessage: options == null ? void 0 : options.getErrorMessage,
|
4408
|
+
sendUsage: options == null ? void 0 : options.sendUsage
|
4330
4409
|
});
|
4331
4410
|
return (options == null ? void 0 : options.data) ? mergeStreams(options == null ? void 0 : options.data.stream, stream) : stream;
|
4332
4411
|
}
|
@@ -4339,14 +4418,18 @@ var DefaultStreamTextResult = class {
|
|
4339
4418
|
};
|
4340
4419
|
const data = options == null ? void 0 : "data" in options ? options.data : void 0;
|
4341
4420
|
const getErrorMessage4 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
|
4342
|
-
|
4343
|
-
|
4344
|
-
|
4345
|
-
|
4346
|
-
|
4347
|
-
|
4348
|
-
|
4349
|
-
|
4421
|
+
const sendUsage = options == null ? void 0 : "sendUsage" in options ? options.sendUsage : void 0;
|
4422
|
+
return new Response(
|
4423
|
+
this.toDataStream({ data, getErrorMessage: getErrorMessage4, sendUsage }),
|
4424
|
+
{
|
4425
|
+
status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
|
4426
|
+
statusText: init == null ? void 0 : init.statusText,
|
4427
|
+
headers: prepareResponseHeaders(init, {
|
4428
|
+
contentType: "text/plain; charset=utf-8",
|
4429
|
+
dataStreamVersion: "v1"
|
4430
|
+
})
|
4431
|
+
}
|
4432
|
+
);
|
4350
4433
|
}
|
4351
4434
|
toTextStreamResponse(init) {
|
4352
4435
|
var _a11;
|
@@ -4448,27 +4531,6 @@ function attachmentsToParts(attachments) {
|
|
4448
4531
|
return parts;
|
4449
4532
|
}
|
4450
4533
|
|
4451
|
-
// core/prompt/message-conversion-error.ts
|
4452
|
-
var import_provider12 = require("@ai-sdk/provider");
|
4453
|
-
var name9 = "AI_MessageConversionError";
|
4454
|
-
var marker9 = `vercel.ai.error.${name9}`;
|
4455
|
-
var symbol9 = Symbol.for(marker9);
|
4456
|
-
var _a9;
|
4457
|
-
var MessageConversionError = class extends import_provider12.AISDKError {
|
4458
|
-
constructor({
|
4459
|
-
originalMessage,
|
4460
|
-
message
|
4461
|
-
}) {
|
4462
|
-
super({ name: name9, message });
|
4463
|
-
this[_a9] = true;
|
4464
|
-
this.originalMessage = originalMessage;
|
4465
|
-
}
|
4466
|
-
static isInstance(error) {
|
4467
|
-
return import_provider12.AISDKError.hasMarker(error, marker9);
|
4468
|
-
}
|
4469
|
-
};
|
4470
|
-
_a9 = symbol9;
|
4471
|
-
|
4472
4534
|
// core/prompt/convert-to-core-messages.ts
|
4473
4535
|
function convertToCoreMessages(messages) {
|
4474
4536
|
const coreMessages = [];
|
@@ -4548,7 +4610,7 @@ function convertToCoreMessages(messages) {
|
|
4548
4610
|
}
|
4549
4611
|
|
4550
4612
|
// core/registry/custom-provider.ts
|
4551
|
-
var
|
4613
|
+
var import_provider14 = require("@ai-sdk/provider");
|
4552
4614
|
function experimental_customProvider({
|
4553
4615
|
languageModels,
|
4554
4616
|
textEmbeddingModels,
|
@@ -4562,7 +4624,7 @@ function experimental_customProvider({
|
|
4562
4624
|
if (fallbackProvider) {
|
4563
4625
|
return fallbackProvider.languageModel(modelId);
|
4564
4626
|
}
|
4565
|
-
throw new
|
4627
|
+
throw new import_provider14.NoSuchModelError({ modelId, modelType: "languageModel" });
|
4566
4628
|
},
|
4567
4629
|
textEmbeddingModel(modelId) {
|
4568
4630
|
if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
|
@@ -4571,18 +4633,18 @@ function experimental_customProvider({
|
|
4571
4633
|
if (fallbackProvider) {
|
4572
4634
|
return fallbackProvider.textEmbeddingModel(modelId);
|
4573
4635
|
}
|
4574
|
-
throw new
|
4636
|
+
throw new import_provider14.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
|
4575
4637
|
}
|
4576
4638
|
};
|
4577
4639
|
}
|
4578
4640
|
|
4579
4641
|
// core/registry/no-such-provider-error.ts
|
4580
|
-
var
|
4642
|
+
var import_provider15 = require("@ai-sdk/provider");
|
4581
4643
|
var name10 = "AI_NoSuchProviderError";
|
4582
4644
|
var marker10 = `vercel.ai.error.${name10}`;
|
4583
4645
|
var symbol10 = Symbol.for(marker10);
|
4584
4646
|
var _a10;
|
4585
|
-
var NoSuchProviderError = class extends
|
4647
|
+
var NoSuchProviderError = class extends import_provider15.NoSuchModelError {
|
4586
4648
|
constructor({
|
4587
4649
|
modelId,
|
4588
4650
|
modelType,
|
@@ -4596,7 +4658,7 @@ var NoSuchProviderError = class extends import_provider14.NoSuchModelError {
|
|
4596
4658
|
this.availableProviders = availableProviders;
|
4597
4659
|
}
|
4598
4660
|
static isInstance(error) {
|
4599
|
-
return
|
4661
|
+
return import_provider15.AISDKError.hasMarker(error, marker10);
|
4600
4662
|
}
|
4601
4663
|
/**
|
4602
4664
|
* @deprecated use `isInstance` instead
|
@@ -4622,7 +4684,7 @@ var NoSuchProviderError = class extends import_provider14.NoSuchModelError {
|
|
4622
4684
|
_a10 = symbol10;
|
4623
4685
|
|
4624
4686
|
// core/registry/provider-registry.ts
|
4625
|
-
var
|
4687
|
+
var import_provider16 = require("@ai-sdk/provider");
|
4626
4688
|
function experimental_createProviderRegistry(providers) {
|
4627
4689
|
const registry = new DefaultProviderRegistry();
|
4628
4690
|
for (const [id, provider] of Object.entries(providers)) {
|
@@ -4656,7 +4718,7 @@ var DefaultProviderRegistry = class {
|
|
4656
4718
|
splitId(id, modelType) {
|
4657
4719
|
const index = id.indexOf(":");
|
4658
4720
|
if (index === -1) {
|
4659
|
-
throw new
|
4721
|
+
throw new import_provider16.NoSuchModelError({
|
4660
4722
|
modelId: id,
|
4661
4723
|
modelType,
|
4662
4724
|
message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId:modelId")`
|
@@ -4669,7 +4731,7 @@ var DefaultProviderRegistry = class {
|
|
4669
4731
|
const [providerId, modelId] = this.splitId(id, "languageModel");
|
4670
4732
|
const model = (_b = (_a11 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a11, modelId);
|
4671
4733
|
if (model == null) {
|
4672
|
-
throw new
|
4734
|
+
throw new import_provider16.NoSuchModelError({ modelId: id, modelType: "languageModel" });
|
4673
4735
|
}
|
4674
4736
|
return model;
|
4675
4737
|
}
|
@@ -4679,7 +4741,7 @@ var DefaultProviderRegistry = class {
|
|
4679
4741
|
const provider = this.getProvider(providerId);
|
4680
4742
|
const model = (_c = (_a11 = provider.textEmbeddingModel) == null ? void 0 : _a11.call(provider, modelId)) != null ? _c : "textEmbedding" in provider ? (_b = provider.textEmbedding) == null ? void 0 : _b.call(provider, modelId) : void 0;
|
4681
4743
|
if (model == null) {
|
4682
|
-
throw new
|
4744
|
+
throw new import_provider16.NoSuchModelError({
|
4683
4745
|
modelId: id,
|
4684
4746
|
modelType: "textEmbeddingModel"
|
4685
4747
|
});
|
@@ -4718,9 +4780,6 @@ function magnitude(vector) {
|
|
4718
4780
|
return Math.sqrt(dotProduct(vector, vector));
|
4719
4781
|
}
|
4720
4782
|
|
4721
|
-
// errors/index.ts
|
4722
|
-
var import_provider16 = require("@ai-sdk/provider");
|
4723
|
-
|
4724
4783
|
// streams/ai-stream.ts
|
4725
4784
|
var import_eventsource_parser = require("eventsource-parser");
|
4726
4785
|
function createEventStreamTransformer(customParser) {
|