ai 5.0.0-alpha.1 → 5.0.0-alpha.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +159 -0
- package/dist/index.d.mts +441 -563
- package/dist/index.d.ts +441 -563
- package/dist/index.js +1534 -1490
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1410 -1355
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +25 -5
- package/dist/internal/index.d.ts +25 -5
- package/dist/mcp-stdio/index.js.map +1 -1
- package/dist/mcp-stdio/index.mjs.map +1 -1
- package/package.json +6 -5
package/dist/index.js
CHANGED
@@ -22,10 +22,11 @@ var src_exports = {};
|
|
22
22
|
__export(src_exports, {
|
23
23
|
AISDKError: () => import_provider16.AISDKError,
|
24
24
|
APICallError: () => import_provider16.APICallError,
|
25
|
-
|
25
|
+
AbstractChat: () => AbstractChat,
|
26
26
|
DefaultChatTransport: () => DefaultChatTransport,
|
27
27
|
DownloadError: () => DownloadError,
|
28
28
|
EmptyResponseBodyError: () => import_provider16.EmptyResponseBodyError,
|
29
|
+
GLOBAL_DEFAULT_PROVIDER: () => GLOBAL_DEFAULT_PROVIDER,
|
29
30
|
InvalidArgumentError: () => InvalidArgumentError,
|
30
31
|
InvalidDataContentError: () => InvalidDataContentError,
|
31
32
|
InvalidMessageRoleError: () => InvalidMessageRoleError,
|
@@ -47,14 +48,14 @@ __export(src_exports, {
|
|
47
48
|
NoSuchToolError: () => NoSuchToolError,
|
48
49
|
Output: () => output_exports,
|
49
50
|
RetryError: () => RetryError,
|
51
|
+
SerialJobExecutor: () => SerialJobExecutor,
|
52
|
+
TextStreamChatTransport: () => TextStreamChatTransport,
|
50
53
|
ToolCallRepairError: () => ToolCallRepairError,
|
51
54
|
ToolExecutionError: () => ToolExecutionError,
|
52
55
|
TypeValidationError: () => import_provider16.TypeValidationError,
|
53
56
|
UnsupportedFunctionalityError: () => import_provider16.UnsupportedFunctionalityError,
|
54
|
-
|
55
|
-
asSchema: () => import_provider_utils26.asSchema,
|
57
|
+
asSchema: () => import_provider_utils25.asSchema,
|
56
58
|
assistantModelMessageSchema: () => assistantModelMessageSchema,
|
57
|
-
callChatApi: () => callChatApi,
|
58
59
|
callCompletionApi: () => callCompletionApi,
|
59
60
|
convertFileListToFileUIParts: () => convertFileListToFileUIParts,
|
60
61
|
convertToCoreMessages: () => convertToCoreMessages,
|
@@ -65,13 +66,12 @@ __export(src_exports, {
|
|
65
66
|
coreToolMessageSchema: () => coreToolMessageSchema,
|
66
67
|
coreUserMessageSchema: () => coreUserMessageSchema,
|
67
68
|
cosineSimilarity: () => cosineSimilarity,
|
68
|
-
createIdGenerator: () =>
|
69
|
+
createIdGenerator: () => import_provider_utils25.createIdGenerator,
|
69
70
|
createProviderRegistry: () => createProviderRegistry,
|
70
71
|
createTextStreamResponse: () => createTextStreamResponse,
|
71
72
|
createUIMessageStream: () => createUIMessageStream,
|
72
73
|
createUIMessageStreamResponse: () => createUIMessageStreamResponse,
|
73
74
|
customProvider: () => customProvider,
|
74
|
-
defaultChatStore: () => defaultChatStore,
|
75
75
|
defaultSettingsMiddleware: () => defaultSettingsMiddleware,
|
76
76
|
embed: () => embed,
|
77
77
|
embedMany: () => embedMany,
|
@@ -81,37 +81,33 @@ __export(src_exports, {
|
|
81
81
|
experimental_generateImage: () => generateImage,
|
82
82
|
experimental_generateSpeech: () => generateSpeech,
|
83
83
|
experimental_transcribe: () => transcribe,
|
84
|
-
extractMaxToolInvocationStep: () => extractMaxToolInvocationStep,
|
85
84
|
extractReasoningMiddleware: () => extractReasoningMiddleware,
|
86
|
-
generateId: () =>
|
85
|
+
generateId: () => import_provider_utils25.generateId,
|
87
86
|
generateObject: () => generateObject,
|
88
87
|
generateText: () => generateText,
|
89
88
|
getTextFromDataUrl: () => getTextFromDataUrl,
|
90
89
|
getToolInvocations: () => getToolInvocations,
|
91
90
|
hasToolCall: () => hasToolCall,
|
92
|
-
isAssistantMessageWithCompletedToolCalls: () => isAssistantMessageWithCompletedToolCalls,
|
93
91
|
isDeepEqualData: () => isDeepEqualData,
|
94
|
-
jsonSchema: () =>
|
95
|
-
maxSteps: () => maxSteps,
|
92
|
+
jsonSchema: () => import_provider_utils25.jsonSchema,
|
96
93
|
modelMessageSchema: () => modelMessageSchema,
|
97
94
|
parsePartialJson: () => parsePartialJson,
|
98
95
|
pipeTextStreamToResponse: () => pipeTextStreamToResponse,
|
99
96
|
pipeUIMessageStreamToResponse: () => pipeUIMessageStreamToResponse,
|
100
|
-
shouldResubmitMessages: () => shouldResubmitMessages,
|
101
97
|
simulateReadableStream: () => simulateReadableStream,
|
102
98
|
simulateStreamingMiddleware: () => simulateStreamingMiddleware,
|
103
99
|
smoothStream: () => smoothStream,
|
100
|
+
stepCountIs: () => stepCountIs,
|
104
101
|
streamObject: () => streamObject,
|
105
102
|
streamText: () => streamText,
|
106
103
|
systemModelMessageSchema: () => systemModelMessageSchema,
|
107
104
|
tool: () => tool,
|
108
105
|
toolModelMessageSchema: () => toolModelMessageSchema,
|
109
|
-
updateToolCallResult: () => updateToolCallResult,
|
110
106
|
userModelMessageSchema: () => userModelMessageSchema,
|
111
107
|
wrapLanguageModel: () => wrapLanguageModel
|
112
108
|
});
|
113
109
|
module.exports = __toCommonJS(src_exports);
|
114
|
-
var
|
110
|
+
var import_provider_utils25 = require("@ai-sdk/provider-utils");
|
115
111
|
|
116
112
|
// src/error/index.ts
|
117
113
|
var import_provider16 = require("@ai-sdk/provider");
|
@@ -538,19 +534,8 @@ function pipeTextStreamToResponse({
|
|
538
534
|
});
|
539
535
|
}
|
540
536
|
|
541
|
-
// src/ui/
|
542
|
-
|
543
|
-
messages,
|
544
|
-
message
|
545
|
-
}) {
|
546
|
-
return [
|
547
|
-
...messages.length > 0 && messages[messages.length - 1].id === message.id ? messages.slice(0, -1) : messages,
|
548
|
-
message
|
549
|
-
];
|
550
|
-
}
|
551
|
-
|
552
|
-
// src/ui/call-chat-api.ts
|
553
|
-
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
537
|
+
// src/ui/call-completion-api.ts
|
538
|
+
var import_provider_utils = require("@ai-sdk/provider-utils");
|
554
539
|
|
555
540
|
// src/ui-message-stream/ui-message-stream-parts.ts
|
556
541
|
var import_zod = require("zod");
|
@@ -591,14 +576,22 @@ var uiMessageStreamPartSchema = import_zod.z.union([
|
|
591
576
|
providerMetadata: import_zod.z.record(import_zod.z.any()).optional()
|
592
577
|
}),
|
593
578
|
import_zod.z.object({
|
594
|
-
type: import_zod.z.literal("source"),
|
595
|
-
|
596
|
-
id: import_zod.z.string(),
|
579
|
+
type: import_zod.z.literal("source-url"),
|
580
|
+
sourceId: import_zod.z.string(),
|
597
581
|
url: import_zod.z.string(),
|
598
582
|
title: import_zod.z.string().optional(),
|
599
583
|
providerMetadata: import_zod.z.any().optional()
|
600
584
|
// Use z.any() for generic metadata
|
601
585
|
}),
|
586
|
+
import_zod.z.object({
|
587
|
+
type: import_zod.z.literal("source-document"),
|
588
|
+
sourceId: import_zod.z.string(),
|
589
|
+
mediaType: import_zod.z.string(),
|
590
|
+
title: import_zod.z.string(),
|
591
|
+
filename: import_zod.z.string().optional(),
|
592
|
+
providerMetadata: import_zod.z.any().optional()
|
593
|
+
// Use z.any() for generic metadata
|
594
|
+
}),
|
602
595
|
import_zod.z.object({
|
603
596
|
type: import_zod.z.literal("file"),
|
604
597
|
url: import_zod.z.string(),
|
@@ -657,8 +650,170 @@ async function consumeStream({
|
|
657
650
|
}
|
658
651
|
}
|
659
652
|
|
653
|
+
// src/ui/process-text-stream.ts
|
654
|
+
async function processTextStream({
|
655
|
+
stream,
|
656
|
+
onTextPart
|
657
|
+
}) {
|
658
|
+
const reader = stream.pipeThrough(new TextDecoderStream()).getReader();
|
659
|
+
while (true) {
|
660
|
+
const { done, value } = await reader.read();
|
661
|
+
if (done) {
|
662
|
+
break;
|
663
|
+
}
|
664
|
+
await onTextPart(value);
|
665
|
+
}
|
666
|
+
}
|
667
|
+
|
668
|
+
// src/ui/call-completion-api.ts
|
669
|
+
var getOriginalFetch = () => fetch;
|
670
|
+
async function callCompletionApi({
|
671
|
+
api,
|
672
|
+
prompt,
|
673
|
+
credentials,
|
674
|
+
headers,
|
675
|
+
body,
|
676
|
+
streamProtocol = "data",
|
677
|
+
setCompletion,
|
678
|
+
setLoading,
|
679
|
+
setError,
|
680
|
+
setAbortController,
|
681
|
+
onFinish,
|
682
|
+
onError,
|
683
|
+
fetch: fetch2 = getOriginalFetch()
|
684
|
+
}) {
|
685
|
+
var _a17;
|
686
|
+
try {
|
687
|
+
setLoading(true);
|
688
|
+
setError(void 0);
|
689
|
+
const abortController = new AbortController();
|
690
|
+
setAbortController(abortController);
|
691
|
+
setCompletion("");
|
692
|
+
const response = await fetch2(api, {
|
693
|
+
method: "POST",
|
694
|
+
body: JSON.stringify({
|
695
|
+
prompt,
|
696
|
+
...body
|
697
|
+
}),
|
698
|
+
credentials,
|
699
|
+
headers: {
|
700
|
+
"Content-Type": "application/json",
|
701
|
+
...headers
|
702
|
+
},
|
703
|
+
signal: abortController.signal
|
704
|
+
}).catch((err) => {
|
705
|
+
throw err;
|
706
|
+
});
|
707
|
+
if (!response.ok) {
|
708
|
+
throw new Error(
|
709
|
+
(_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
|
710
|
+
);
|
711
|
+
}
|
712
|
+
if (!response.body) {
|
713
|
+
throw new Error("The response body is empty.");
|
714
|
+
}
|
715
|
+
let result = "";
|
716
|
+
switch (streamProtocol) {
|
717
|
+
case "text": {
|
718
|
+
await processTextStream({
|
719
|
+
stream: response.body,
|
720
|
+
onTextPart: (chunk) => {
|
721
|
+
result += chunk;
|
722
|
+
setCompletion(result);
|
723
|
+
}
|
724
|
+
});
|
725
|
+
break;
|
726
|
+
}
|
727
|
+
case "data": {
|
728
|
+
await consumeStream({
|
729
|
+
stream: (0, import_provider_utils.parseJsonEventStream)({
|
730
|
+
stream: response.body,
|
731
|
+
schema: uiMessageStreamPartSchema
|
732
|
+
}).pipeThrough(
|
733
|
+
new TransformStream({
|
734
|
+
async transform(part) {
|
735
|
+
if (!part.success) {
|
736
|
+
throw part.error;
|
737
|
+
}
|
738
|
+
const streamPart = part.value;
|
739
|
+
if (streamPart.type === "text") {
|
740
|
+
result += streamPart.text;
|
741
|
+
setCompletion(result);
|
742
|
+
} else if (streamPart.type === "error") {
|
743
|
+
throw new Error(streamPart.errorText);
|
744
|
+
}
|
745
|
+
}
|
746
|
+
})
|
747
|
+
),
|
748
|
+
onError: (error) => {
|
749
|
+
throw error;
|
750
|
+
}
|
751
|
+
});
|
752
|
+
break;
|
753
|
+
}
|
754
|
+
default: {
|
755
|
+
const exhaustiveCheck = streamProtocol;
|
756
|
+
throw new Error(`Unknown stream protocol: ${exhaustiveCheck}`);
|
757
|
+
}
|
758
|
+
}
|
759
|
+
if (onFinish) {
|
760
|
+
onFinish(prompt, result);
|
761
|
+
}
|
762
|
+
setAbortController(null);
|
763
|
+
return result;
|
764
|
+
} catch (err) {
|
765
|
+
if (err.name === "AbortError") {
|
766
|
+
setAbortController(null);
|
767
|
+
return null;
|
768
|
+
}
|
769
|
+
if (err instanceof Error) {
|
770
|
+
if (onError) {
|
771
|
+
onError(err);
|
772
|
+
}
|
773
|
+
}
|
774
|
+
setError(err);
|
775
|
+
} finally {
|
776
|
+
setLoading(false);
|
777
|
+
}
|
778
|
+
}
|
779
|
+
|
780
|
+
// src/ui/chat.ts
|
781
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
782
|
+
|
783
|
+
// src/util/serial-job-executor.ts
|
784
|
+
var SerialJobExecutor = class {
|
785
|
+
constructor() {
|
786
|
+
this.queue = [];
|
787
|
+
this.isProcessing = false;
|
788
|
+
}
|
789
|
+
async processQueue() {
|
790
|
+
if (this.isProcessing) {
|
791
|
+
return;
|
792
|
+
}
|
793
|
+
this.isProcessing = true;
|
794
|
+
while (this.queue.length > 0) {
|
795
|
+
await this.queue[0]();
|
796
|
+
this.queue.shift();
|
797
|
+
}
|
798
|
+
this.isProcessing = false;
|
799
|
+
}
|
800
|
+
async run(job) {
|
801
|
+
return new Promise((resolve, reject) => {
|
802
|
+
this.queue.push(async () => {
|
803
|
+
try {
|
804
|
+
await job();
|
805
|
+
resolve();
|
806
|
+
} catch (error) {
|
807
|
+
reject(error);
|
808
|
+
}
|
809
|
+
});
|
810
|
+
void this.processQueue();
|
811
|
+
});
|
812
|
+
}
|
813
|
+
};
|
814
|
+
|
660
815
|
// src/ui/process-ui-message-stream.ts
|
661
|
-
var
|
816
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
662
817
|
|
663
818
|
// src/util/merge-objects.ts
|
664
819
|
function mergeObjects(base, overrides) {
|
@@ -694,7 +849,7 @@ function mergeObjects(base, overrides) {
|
|
694
849
|
}
|
695
850
|
|
696
851
|
// src/util/parse-partial-json.ts
|
697
|
-
var
|
852
|
+
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
698
853
|
|
699
854
|
// src/util/fix-json.ts
|
700
855
|
function fixJson(input) {
|
@@ -1019,25 +1174,17 @@ async function parsePartialJson(jsonText) {
|
|
1019
1174
|
if (jsonText === void 0) {
|
1020
1175
|
return { value: void 0, state: "undefined-input" };
|
1021
1176
|
}
|
1022
|
-
let result = await (0,
|
1177
|
+
let result = await (0, import_provider_utils2.safeParseJSON)({ text: jsonText });
|
1023
1178
|
if (result.success) {
|
1024
1179
|
return { value: result.value, state: "successful-parse" };
|
1025
1180
|
}
|
1026
|
-
result = await (0,
|
1181
|
+
result = await (0, import_provider_utils2.safeParseJSON)({ text: fixJson(jsonText) });
|
1027
1182
|
if (result.success) {
|
1028
1183
|
return { value: result.value, state: "repaired-parse" };
|
1029
1184
|
}
|
1030
1185
|
return { value: void 0, state: "failed-parse" };
|
1031
1186
|
}
|
1032
1187
|
|
1033
|
-
// src/ui/extract-max-tool-invocation-step.ts
|
1034
|
-
function extractMaxToolInvocationStep(toolInvocations) {
|
1035
|
-
return toolInvocations == null ? void 0 : toolInvocations.reduce((max, toolInvocation) => {
|
1036
|
-
var _a17;
|
1037
|
-
return Math.max(max, (_a17 = toolInvocation.step) != null ? _a17 : 0);
|
1038
|
-
}, 0);
|
1039
|
-
}
|
1040
|
-
|
1041
1188
|
// src/ui/get-tool-invocations.ts
|
1042
1189
|
function getToolInvocations(message) {
|
1043
1190
|
return message.parts.filter(
|
@@ -1048,12 +1195,10 @@ function getToolInvocations(message) {
|
|
1048
1195
|
// src/ui/process-ui-message-stream.ts
|
1049
1196
|
function createStreamingUIMessageState({
|
1050
1197
|
lastMessage,
|
1051
|
-
newMessageId = "
|
1198
|
+
newMessageId = ""
|
1052
1199
|
} = {}) {
|
1053
|
-
var _a17;
|
1054
1200
|
const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
|
1055
|
-
const
|
1056
|
-
const message = isContinuation ? structuredClone(lastMessage) : {
|
1201
|
+
const message = isContinuation ? lastMessage : {
|
1057
1202
|
id: newMessageId,
|
1058
1203
|
metadata: {},
|
1059
1204
|
role: "assistant",
|
@@ -1063,8 +1208,7 @@ function createStreamingUIMessageState({
|
|
1063
1208
|
message,
|
1064
1209
|
activeTextPart: void 0,
|
1065
1210
|
activeReasoningPart: void 0,
|
1066
|
-
partialToolCalls: {}
|
1067
|
-
step
|
1211
|
+
partialToolCalls: {}
|
1068
1212
|
};
|
1069
1213
|
}
|
1070
1214
|
function processUIMessageStream({
|
@@ -1095,7 +1239,7 @@ function processUIMessageStream({
|
|
1095
1239
|
if (metadata != null) {
|
1096
1240
|
const mergedMetadata = state.message.metadata != null ? mergeObjects(state.message.metadata, metadata) : metadata;
|
1097
1241
|
if (messageMetadataSchema != null) {
|
1098
|
-
await (0,
|
1242
|
+
await (0, import_provider_utils3.validateTypes)({
|
1099
1243
|
value: mergedMetadata,
|
1100
1244
|
schema: messageMetadataSchema
|
1101
1245
|
});
|
@@ -1147,16 +1291,25 @@ function processUIMessageStream({
|
|
1147
1291
|
write();
|
1148
1292
|
break;
|
1149
1293
|
}
|
1150
|
-
case "source": {
|
1294
|
+
case "source-url": {
|
1151
1295
|
state.message.parts.push({
|
1152
|
-
type: "source",
|
1153
|
-
|
1154
|
-
|
1155
|
-
|
1156
|
-
|
1157
|
-
|
1158
|
-
|
1159
|
-
|
1296
|
+
type: "source-url",
|
1297
|
+
sourceId: part.sourceId,
|
1298
|
+
url: part.url,
|
1299
|
+
title: part.title,
|
1300
|
+
providerMetadata: part.providerMetadata
|
1301
|
+
});
|
1302
|
+
write();
|
1303
|
+
break;
|
1304
|
+
}
|
1305
|
+
case "source-document": {
|
1306
|
+
state.message.parts.push({
|
1307
|
+
type: "source-document",
|
1308
|
+
sourceId: part.sourceId,
|
1309
|
+
mediaType: part.mediaType,
|
1310
|
+
title: part.title,
|
1311
|
+
filename: part.filename,
|
1312
|
+
providerMetadata: part.providerMetadata
|
1160
1313
|
});
|
1161
1314
|
write();
|
1162
1315
|
break;
|
@@ -1165,13 +1318,11 @@ function processUIMessageStream({
|
|
1165
1318
|
const toolInvocations = getToolInvocations(state.message);
|
1166
1319
|
state.partialToolCalls[part.toolCallId] = {
|
1167
1320
|
text: "",
|
1168
|
-
step: state.step,
|
1169
1321
|
toolName: part.toolName,
|
1170
1322
|
index: toolInvocations.length
|
1171
1323
|
};
|
1172
1324
|
updateToolInvocationPart(part.toolCallId, {
|
1173
1325
|
state: "partial-call",
|
1174
|
-
step: state.step,
|
1175
1326
|
toolCallId: part.toolCallId,
|
1176
1327
|
toolName: part.toolName,
|
1177
1328
|
args: void 0
|
@@ -1187,7 +1338,6 @@ function processUIMessageStream({
|
|
1187
1338
|
);
|
1188
1339
|
updateToolInvocationPart(part.toolCallId, {
|
1189
1340
|
state: "partial-call",
|
1190
|
-
step: partialToolCall.step,
|
1191
1341
|
toolCallId: part.toolCallId,
|
1192
1342
|
toolName: partialToolCall.toolName,
|
1193
1343
|
args: partialArgs
|
@@ -1198,7 +1348,6 @@ function processUIMessageStream({
|
|
1198
1348
|
case "tool-call": {
|
1199
1349
|
updateToolInvocationPart(part.toolCallId, {
|
1200
1350
|
state: "call",
|
1201
|
-
step: state.step,
|
1202
1351
|
toolCallId: part.toolCallId,
|
1203
1352
|
toolName: part.toolName,
|
1204
1353
|
args: part.args
|
@@ -1211,7 +1360,6 @@ function processUIMessageStream({
|
|
1211
1360
|
if (result != null) {
|
1212
1361
|
updateToolInvocationPart(part.toolCallId, {
|
1213
1362
|
state: "result",
|
1214
|
-
step: state.step,
|
1215
1363
|
toolCallId: part.toolCallId,
|
1216
1364
|
toolName: part.toolName,
|
1217
1365
|
args: part.args,
|
@@ -1250,7 +1398,6 @@ function processUIMessageStream({
|
|
1250
1398
|
break;
|
1251
1399
|
}
|
1252
1400
|
case "finish-step": {
|
1253
|
-
state.step += 1;
|
1254
1401
|
state.activeTextPart = void 0;
|
1255
1402
|
state.activeReasoningPart = void 0;
|
1256
1403
|
await updateMessageMetadata(part.metadata);
|
@@ -1292,14 +1439,7 @@ function processUIMessageStream({
|
|
1292
1439
|
(partArg) => part.type === partArg.type && part.id === partArg.id
|
1293
1440
|
) : void 0;
|
1294
1441
|
if (existingPart != null) {
|
1295
|
-
|
1296
|
-
existingPart.value = mergeObjects(
|
1297
|
-
existingPart.data,
|
1298
|
-
part.data
|
1299
|
-
);
|
1300
|
-
} else {
|
1301
|
-
existingPart.data = part.data;
|
1302
|
-
}
|
1442
|
+
existingPart.data = isObject(existingPart.data) && isObject(part.data) ? mergeObjects(existingPart.data, part.data) : part.data;
|
1303
1443
|
} else {
|
1304
1444
|
state.message.parts.push(part);
|
1305
1445
|
}
|
@@ -1320,47 +1460,60 @@ function isObject(value) {
|
|
1320
1460
|
return typeof value === "object" && value !== null;
|
1321
1461
|
}
|
1322
1462
|
|
1323
|
-
// src/ui/
|
1324
|
-
function
|
1325
|
-
|
1463
|
+
// src/ui/should-resubmit-messages.ts
|
1464
|
+
function shouldResubmitMessages({
|
1465
|
+
originalMaxToolInvocationStep,
|
1466
|
+
originalMessageCount,
|
1467
|
+
maxSteps,
|
1468
|
+
messages
|
1326
1469
|
}) {
|
1327
|
-
|
1328
|
-
|
1329
|
-
|
1330
|
-
|
1331
|
-
|
1332
|
-
|
1333
|
-
|
1334
|
-
|
1335
|
-
|
1336
|
-
|
1337
|
-
|
1338
|
-
controller.enqueue({ type: "finish" });
|
1339
|
-
}
|
1340
|
-
})
|
1470
|
+
const lastMessage = messages[messages.length - 1];
|
1471
|
+
const lastMessageStepStartCount = lastMessage.parts.filter(
|
1472
|
+
(part) => part.type === "step-start"
|
1473
|
+
).length;
|
1474
|
+
return (
|
1475
|
+
// check if the feature is enabled:
|
1476
|
+
maxSteps > 1 && // ensure there is a last message:
|
1477
|
+
lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
|
1478
|
+
(messages.length > originalMessageCount || lastMessageStepStartCount !== originalMaxToolInvocationStep) && // check that next step is possible:
|
1479
|
+
isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
|
1480
|
+
lastMessageStepStartCount < maxSteps
|
1341
1481
|
);
|
1342
1482
|
}
|
1483
|
+
function isAssistantMessageWithCompletedToolCalls(message) {
|
1484
|
+
if (!message) {
|
1485
|
+
return false;
|
1486
|
+
}
|
1487
|
+
if (message.role !== "assistant") {
|
1488
|
+
return false;
|
1489
|
+
}
|
1490
|
+
const lastStepStartIndex = message.parts.reduce((lastIndex, part, index) => {
|
1491
|
+
return part.type === "step-start" ? index : lastIndex;
|
1492
|
+
}, -1);
|
1493
|
+
const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter((part) => part.type === "tool-invocation");
|
1494
|
+
return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every((part) => "result" in part.toolInvocation);
|
1495
|
+
}
|
1343
1496
|
|
1344
|
-
// src/ui/
|
1345
|
-
var
|
1497
|
+
// src/ui/default-chat-transport.ts
|
1498
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
1499
|
+
var getOriginalFetch2 = () => fetch;
|
1346
1500
|
async function fetchUIMessageStream({
|
1347
1501
|
api,
|
1348
1502
|
body,
|
1349
|
-
streamProtocol = "ui-message",
|
1350
1503
|
credentials,
|
1351
1504
|
headers,
|
1352
|
-
|
1353
|
-
fetch: fetch2 =
|
1505
|
+
abortSignal,
|
1506
|
+
fetch: fetch2 = getOriginalFetch2(),
|
1354
1507
|
requestType = "generate"
|
1355
1508
|
}) {
|
1356
|
-
var _a17
|
1357
|
-
const response = requestType === "resume" ? await fetch2(`${api}?
|
1509
|
+
var _a17;
|
1510
|
+
const response = requestType === "resume" ? await fetch2(`${api}?id=${body.id}`, {
|
1358
1511
|
method: "GET",
|
1359
1512
|
headers: {
|
1360
1513
|
"Content-Type": "application/json",
|
1361
1514
|
...headers
|
1362
1515
|
},
|
1363
|
-
signal:
|
1516
|
+
signal: abortSignal,
|
1364
1517
|
credentials
|
1365
1518
|
}) : await fetch2(api, {
|
1366
1519
|
method: "POST",
|
@@ -1369,20 +1522,18 @@ async function fetchUIMessageStream({
|
|
1369
1522
|
"Content-Type": "application/json",
|
1370
1523
|
...headers
|
1371
1524
|
},
|
1372
|
-
signal:
|
1525
|
+
signal: abortSignal,
|
1373
1526
|
credentials
|
1374
1527
|
});
|
1375
1528
|
if (!response.ok) {
|
1376
1529
|
throw new Error(
|
1377
|
-
(
|
1530
|
+
(_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
|
1378
1531
|
);
|
1379
1532
|
}
|
1380
1533
|
if (!response.body) {
|
1381
1534
|
throw new Error("The response body is empty.");
|
1382
1535
|
}
|
1383
|
-
return
|
1384
|
-
stream: response.body.pipeThrough(new TextDecoderStream())
|
1385
|
-
}) : (0, import_provider_utils3.parseJsonEventStream)({
|
1536
|
+
return (0, import_provider_utils4.parseJsonEventStream)({
|
1386
1537
|
stream: response.body,
|
1387
1538
|
schema: uiMessageStreamPartSchema
|
1388
1539
|
}).pipeThrough(
|
@@ -1396,552 +1547,291 @@ async function fetchUIMessageStream({
|
|
1396
1547
|
})
|
1397
1548
|
);
|
1398
1549
|
}
|
1399
|
-
|
1400
|
-
|
1401
|
-
|
1402
|
-
onFinish,
|
1403
|
-
onToolCall,
|
1404
|
-
generateId: generateId3,
|
1405
|
-
lastMessage,
|
1406
|
-
messageMetadataSchema
|
1407
|
-
}) {
|
1408
|
-
const state = createStreamingUIMessageState({
|
1409
|
-
lastMessage,
|
1410
|
-
newMessageId: generateId3()
|
1411
|
-
});
|
1412
|
-
const runUpdateMessageJob = async (job) => {
|
1413
|
-
await job({
|
1414
|
-
state,
|
1415
|
-
write: () => {
|
1416
|
-
onUpdate({ message: state.message });
|
1417
|
-
}
|
1418
|
-
});
|
1419
|
-
};
|
1420
|
-
await consumeStream({
|
1421
|
-
stream: processUIMessageStream({
|
1422
|
-
stream,
|
1423
|
-
onToolCall,
|
1424
|
-
messageMetadataSchema,
|
1425
|
-
runUpdateMessageJob
|
1426
|
-
}),
|
1427
|
-
onError: (error) => {
|
1428
|
-
throw error;
|
1429
|
-
}
|
1430
|
-
});
|
1431
|
-
onFinish == null ? void 0 : onFinish({ message: state.message });
|
1432
|
-
}
|
1433
|
-
async function callChatApi({
|
1434
|
-
api,
|
1435
|
-
body,
|
1436
|
-
streamProtocol = "ui-message",
|
1437
|
-
credentials,
|
1438
|
-
headers,
|
1439
|
-
abortController,
|
1440
|
-
onUpdate,
|
1441
|
-
onFinish,
|
1442
|
-
onToolCall,
|
1443
|
-
generateId: generateId3,
|
1444
|
-
fetch: fetch2 = getOriginalFetch(),
|
1445
|
-
lastMessage,
|
1446
|
-
requestType = "generate",
|
1447
|
-
messageMetadataSchema
|
1448
|
-
}) {
|
1449
|
-
const stream = await fetchUIMessageStream({
|
1450
|
-
api,
|
1451
|
-
body,
|
1452
|
-
streamProtocol,
|
1550
|
+
var DefaultChatTransport = class {
|
1551
|
+
constructor({
|
1552
|
+
api = "/api/chat",
|
1453
1553
|
credentials,
|
1454
1554
|
headers,
|
1455
|
-
|
1555
|
+
body,
|
1456
1556
|
fetch: fetch2,
|
1457
|
-
|
1458
|
-
})
|
1459
|
-
|
1460
|
-
|
1461
|
-
|
1462
|
-
|
1463
|
-
|
1464
|
-
|
1465
|
-
lastMessage,
|
1466
|
-
messageMetadataSchema
|
1467
|
-
});
|
1468
|
-
}
|
1469
|
-
|
1470
|
-
// src/ui/call-completion-api.ts
|
1471
|
-
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
1472
|
-
|
1473
|
-
// src/ui/process-text-stream.ts
|
1474
|
-
async function processTextStream({
|
1475
|
-
stream,
|
1476
|
-
onTextPart
|
1477
|
-
}) {
|
1478
|
-
const reader = stream.pipeThrough(new TextDecoderStream()).getReader();
|
1479
|
-
while (true) {
|
1480
|
-
const { done, value } = await reader.read();
|
1481
|
-
if (done) {
|
1482
|
-
break;
|
1483
|
-
}
|
1484
|
-
await onTextPart(value);
|
1557
|
+
prepareRequest
|
1558
|
+
} = {}) {
|
1559
|
+
this.api = api;
|
1560
|
+
this.credentials = credentials;
|
1561
|
+
this.headers = headers;
|
1562
|
+
this.body = body;
|
1563
|
+
this.fetch = fetch2;
|
1564
|
+
this.prepareRequest = prepareRequest;
|
1485
1565
|
}
|
1486
|
-
|
1487
|
-
|
1488
|
-
|
1489
|
-
|
1490
|
-
|
1491
|
-
|
1492
|
-
|
1493
|
-
|
1494
|
-
|
1495
|
-
|
1496
|
-
|
1497
|
-
|
1498
|
-
|
1499
|
-
|
1500
|
-
|
1501
|
-
|
1502
|
-
|
1503
|
-
fetch: fetch2 = getOriginalFetch2()
|
1504
|
-
}) {
|
1505
|
-
var _a17;
|
1506
|
-
try {
|
1507
|
-
setLoading(true);
|
1508
|
-
setError(void 0);
|
1509
|
-
const abortController = new AbortController();
|
1510
|
-
setAbortController(abortController);
|
1511
|
-
setCompletion("");
|
1512
|
-
const response = await fetch2(api, {
|
1513
|
-
method: "POST",
|
1514
|
-
body: JSON.stringify({
|
1515
|
-
prompt,
|
1516
|
-
...body
|
1517
|
-
}),
|
1518
|
-
credentials,
|
1519
|
-
headers: {
|
1520
|
-
"Content-Type": "application/json",
|
1521
|
-
...headers
|
1522
|
-
},
|
1523
|
-
signal: abortController.signal
|
1524
|
-
}).catch((err) => {
|
1525
|
-
throw err;
|
1566
|
+
submitMessages({
|
1567
|
+
chatId,
|
1568
|
+
messages,
|
1569
|
+
abortSignal,
|
1570
|
+
metadata,
|
1571
|
+
headers,
|
1572
|
+
body,
|
1573
|
+
requestType
|
1574
|
+
}) {
|
1575
|
+
var _a17, _b;
|
1576
|
+
const preparedRequest = (_a17 = this.prepareRequest) == null ? void 0 : _a17.call(this, {
|
1577
|
+
id: chatId,
|
1578
|
+
messages,
|
1579
|
+
body: { ...this.body, ...body },
|
1580
|
+
headers: { ...this.headers, ...headers },
|
1581
|
+
credentials: this.credentials,
|
1582
|
+
requestMetadata: metadata
|
1526
1583
|
});
|
1527
|
-
|
1528
|
-
|
1529
|
-
|
1530
|
-
)
|
1531
|
-
|
1532
|
-
|
1533
|
-
|
1534
|
-
|
1535
|
-
let result = "";
|
1536
|
-
switch (streamProtocol) {
|
1537
|
-
case "text": {
|
1538
|
-
await processTextStream({
|
1539
|
-
stream: response.body,
|
1540
|
-
onTextPart: (chunk) => {
|
1541
|
-
result += chunk;
|
1542
|
-
setCompletion(result);
|
1543
|
-
}
|
1544
|
-
});
|
1545
|
-
break;
|
1546
|
-
}
|
1547
|
-
case "data": {
|
1548
|
-
await consumeStream({
|
1549
|
-
stream: (0, import_provider_utils4.parseJsonEventStream)({
|
1550
|
-
stream: response.body,
|
1551
|
-
schema: uiMessageStreamPartSchema
|
1552
|
-
}).pipeThrough(
|
1553
|
-
new TransformStream({
|
1554
|
-
async transform(part) {
|
1555
|
-
if (!part.success) {
|
1556
|
-
throw part.error;
|
1557
|
-
}
|
1558
|
-
const streamPart = part.value;
|
1559
|
-
if (streamPart.type === "text") {
|
1560
|
-
result += streamPart.text;
|
1561
|
-
setCompletion(result);
|
1562
|
-
} else if (streamPart.type === "error") {
|
1563
|
-
throw new Error(streamPart.errorText);
|
1564
|
-
}
|
1565
|
-
}
|
1566
|
-
})
|
1567
|
-
),
|
1568
|
-
onError: (error) => {
|
1569
|
-
throw error;
|
1570
|
-
}
|
1571
|
-
});
|
1572
|
-
break;
|
1573
|
-
}
|
1574
|
-
default: {
|
1575
|
-
const exhaustiveCheck = streamProtocol;
|
1576
|
-
throw new Error(`Unknown stream protocol: ${exhaustiveCheck}`);
|
1577
|
-
}
|
1578
|
-
}
|
1579
|
-
if (onFinish) {
|
1580
|
-
onFinish(prompt, result);
|
1581
|
-
}
|
1582
|
-
setAbortController(null);
|
1583
|
-
return result;
|
1584
|
-
} catch (err) {
|
1585
|
-
if (err.name === "AbortError") {
|
1586
|
-
setAbortController(null);
|
1587
|
-
return null;
|
1588
|
-
}
|
1589
|
-
if (err instanceof Error) {
|
1590
|
-
if (onError) {
|
1591
|
-
onError(err);
|
1592
|
-
}
|
1593
|
-
}
|
1594
|
-
setError(err);
|
1595
|
-
} finally {
|
1596
|
-
setLoading(false);
|
1597
|
-
}
|
1598
|
-
}
|
1599
|
-
|
1600
|
-
// src/ui/chat-store.ts
|
1601
|
-
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
1602
|
-
|
1603
|
-
// src/util/serial-job-executor.ts
|
1604
|
-
var SerialJobExecutor = class {
|
1605
|
-
constructor() {
|
1606
|
-
this.queue = [];
|
1607
|
-
this.isProcessing = false;
|
1608
|
-
}
|
1609
|
-
async processQueue() {
|
1610
|
-
if (this.isProcessing) {
|
1611
|
-
return;
|
1612
|
-
}
|
1613
|
-
this.isProcessing = true;
|
1614
|
-
while (this.queue.length > 0) {
|
1615
|
-
await this.queue[0]();
|
1616
|
-
this.queue.shift();
|
1617
|
-
}
|
1618
|
-
this.isProcessing = false;
|
1619
|
-
}
|
1620
|
-
async run(job) {
|
1621
|
-
return new Promise((resolve, reject) => {
|
1622
|
-
this.queue.push(async () => {
|
1623
|
-
try {
|
1624
|
-
await job();
|
1625
|
-
resolve();
|
1626
|
-
} catch (error) {
|
1627
|
-
reject(error);
|
1628
|
-
}
|
1629
|
-
});
|
1630
|
-
void this.processQueue();
|
1584
|
+
return fetchUIMessageStream({
|
1585
|
+
api: this.api,
|
1586
|
+
body: (preparedRequest == null ? void 0 : preparedRequest.body) !== void 0 ? preparedRequest.body : { ...this.body, ...body, id: chatId, messages },
|
1587
|
+
headers: (preparedRequest == null ? void 0 : preparedRequest.headers) !== void 0 ? preparedRequest.headers : { ...this.headers, ...headers },
|
1588
|
+
credentials: (_b = preparedRequest == null ? void 0 : preparedRequest.credentials) != null ? _b : this.credentials,
|
1589
|
+
abortSignal,
|
1590
|
+
fetch: this.fetch,
|
1591
|
+
requestType
|
1631
1592
|
});
|
1632
1593
|
}
|
1633
1594
|
};
|
1634
1595
|
|
1635
|
-
// src/ui/
|
1636
|
-
function
|
1637
|
-
|
1638
|
-
|
1639
|
-
maxSteps: maxSteps2,
|
1640
|
-
messages
|
1641
|
-
}) {
|
1642
|
-
var _a17;
|
1643
|
-
const lastMessage = messages[messages.length - 1];
|
1644
|
-
return (
|
1645
|
-
// check if the feature is enabled:
|
1646
|
-
maxSteps2 > 1 && // ensure there is a last message:
|
1647
|
-
lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
|
1648
|
-
(messages.length > originalMessageCount || extractMaxToolInvocationStep(getToolInvocations(lastMessage)) !== originalMaxToolInvocationStep) && // check that next step is possible:
|
1649
|
-
isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
|
1650
|
-
((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) < maxSteps2
|
1651
|
-
);
|
1652
|
-
}
|
1653
|
-
function isAssistantMessageWithCompletedToolCalls(message) {
|
1654
|
-
if (message.role !== "assistant") {
|
1655
|
-
return false;
|
1596
|
+
// src/ui/convert-file-list-to-file-ui-parts.ts
|
1597
|
+
async function convertFileListToFileUIParts(files) {
|
1598
|
+
if (files == null) {
|
1599
|
+
return [];
|
1656
1600
|
}
|
1657
|
-
|
1658
|
-
|
1659
|
-
}, -1);
|
1660
|
-
const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter((part) => part.type === "tool-invocation");
|
1661
|
-
return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every((part) => "result" in part.toolInvocation);
|
1662
|
-
}
|
1663
|
-
|
1664
|
-
// src/ui/update-tool-call-result.ts
|
1665
|
-
function updateToolCallResult({
|
1666
|
-
messages,
|
1667
|
-
toolCallId,
|
1668
|
-
toolResult: result
|
1669
|
-
}) {
|
1670
|
-
const lastMessage = messages[messages.length - 1];
|
1671
|
-
const invocationPart = lastMessage.parts.find(
|
1672
|
-
(part) => part.type === "tool-invocation" && part.toolInvocation.toolCallId === toolCallId
|
1673
|
-
);
|
1674
|
-
if (invocationPart == null) {
|
1675
|
-
return;
|
1601
|
+
if (!globalThis.FileList || !(files instanceof globalThis.FileList)) {
|
1602
|
+
throw new Error("FileList is not supported in the current environment");
|
1676
1603
|
}
|
1677
|
-
|
1678
|
-
|
1679
|
-
|
1680
|
-
|
1681
|
-
|
1604
|
+
return Promise.all(
|
1605
|
+
Array.from(files).map(async (file) => {
|
1606
|
+
const { name: name17, type } = file;
|
1607
|
+
const dataUrl = await new Promise((resolve, reject) => {
|
1608
|
+
const reader = new FileReader();
|
1609
|
+
reader.onload = (readerEvent) => {
|
1610
|
+
var _a17;
|
1611
|
+
resolve((_a17 = readerEvent.target) == null ? void 0 : _a17.result);
|
1612
|
+
};
|
1613
|
+
reader.onerror = (error) => reject(error);
|
1614
|
+
reader.readAsDataURL(file);
|
1615
|
+
});
|
1616
|
+
return {
|
1617
|
+
type: "file",
|
1618
|
+
mediaType: type,
|
1619
|
+
filename: name17,
|
1620
|
+
url: dataUrl
|
1621
|
+
};
|
1622
|
+
})
|
1623
|
+
);
|
1682
1624
|
}
|
1683
1625
|
|
1684
|
-
// src/ui/chat
|
1685
|
-
var
|
1626
|
+
// src/ui/chat.ts
|
1627
|
+
var AbstractChat = class {
|
1686
1628
|
constructor({
|
1687
|
-
|
1688
|
-
|
1689
|
-
transport,
|
1690
|
-
maxSteps
|
1629
|
+
generateId: generateId3 = import_provider_utils5.generateId,
|
1630
|
+
id = generateId3(),
|
1631
|
+
transport = new DefaultChatTransport(),
|
1632
|
+
maxSteps = 1,
|
1691
1633
|
messageMetadataSchema,
|
1692
|
-
dataPartSchemas
|
1634
|
+
dataPartSchemas,
|
1635
|
+
state,
|
1636
|
+
onError,
|
1637
|
+
onToolCall,
|
1638
|
+
onFinish
|
1693
1639
|
}) {
|
1694
|
-
this.
|
1695
|
-
|
1696
|
-
|
1697
|
-
|
1698
|
-
|
1699
|
-
|
1700
|
-
|
1701
|
-
|
1702
|
-
|
1640
|
+
this.subscribers = /* @__PURE__ */ new Set();
|
1641
|
+
this.activeResponse = void 0;
|
1642
|
+
this.jobExecutor = new SerialJobExecutor();
|
1643
|
+
this.removeAssistantResponse = () => {
|
1644
|
+
const lastMessage = this.state.messages[this.state.messages.length - 1];
|
1645
|
+
if (lastMessage == null) {
|
1646
|
+
throw new Error("Cannot remove assistant response from empty chat");
|
1647
|
+
}
|
1648
|
+
if (lastMessage.role !== "assistant") {
|
1649
|
+
throw new Error("Last message is not an assistant message");
|
1650
|
+
}
|
1651
|
+
this.state.popMessage();
|
1652
|
+
this.emit({ type: "messages-changed" });
|
1653
|
+
};
|
1654
|
+
/**
|
1655
|
+
* Append a user message to the chat list. This triggers the API call to fetch
|
1656
|
+
* the assistant's response.
|
1657
|
+
*/
|
1658
|
+
this.sendMessage = async (message, options = {}) => {
|
1659
|
+
var _a17, _b;
|
1660
|
+
let uiMessage;
|
1661
|
+
if ("text" in message || "files" in message) {
|
1662
|
+
const fileParts = Array.isArray(message.files) ? message.files : await convertFileListToFileUIParts(message.files);
|
1663
|
+
uiMessage = {
|
1664
|
+
parts: [
|
1665
|
+
...fileParts,
|
1666
|
+
..."text" in message && message.text != null ? [{ type: "text", text: message.text }] : []
|
1667
|
+
]
|
1668
|
+
};
|
1669
|
+
} else {
|
1670
|
+
uiMessage = message;
|
1671
|
+
}
|
1672
|
+
this.state.pushMessage({
|
1673
|
+
...uiMessage,
|
1674
|
+
id: (_a17 = uiMessage.id) != null ? _a17 : this.generateId(),
|
1675
|
+
role: (_b = uiMessage.role) != null ? _b : "user"
|
1676
|
+
});
|
1677
|
+
this.emit({ type: "messages-changed" });
|
1678
|
+
await this.triggerRequest({ requestType: "generate", ...options });
|
1679
|
+
};
|
1680
|
+
/**
|
1681
|
+
* Regenerate the last assistant message.
|
1682
|
+
*/
|
1683
|
+
this.reload = async (options = {}) => {
|
1684
|
+
if (this.lastMessage === void 0) {
|
1685
|
+
return;
|
1686
|
+
}
|
1687
|
+
if (this.lastMessage.role === "assistant") {
|
1688
|
+
this.state.popMessage();
|
1689
|
+
this.emit({ type: "messages-changed" });
|
1690
|
+
}
|
1691
|
+
await this.triggerRequest({ requestType: "generate", ...options });
|
1692
|
+
};
|
1693
|
+
/**
|
1694
|
+
* Resume an ongoing chat generation stream. This does not resume an aborted generation.
|
1695
|
+
*/
|
1696
|
+
this.experimental_resume = async (options = {}) => {
|
1697
|
+
await this.triggerRequest({ requestType: "resume", ...options });
|
1698
|
+
};
|
1699
|
+
this.addToolResult = async ({
|
1700
|
+
toolCallId,
|
1701
|
+
result
|
1702
|
+
}) => {
|
1703
|
+
this.jobExecutor.run(async () => {
|
1704
|
+
updateToolCallResult({
|
1705
|
+
messages: this.state.messages,
|
1706
|
+
toolCallId,
|
1707
|
+
toolResult: result
|
1708
|
+
});
|
1709
|
+
this.messages = this.state.messages;
|
1710
|
+
if (this.status === "submitted" || this.status === "streaming") {
|
1711
|
+
return;
|
1703
1712
|
}
|
1704
|
-
|
1705
|
-
|
1706
|
-
|
1713
|
+
const lastMessage = this.lastMessage;
|
1714
|
+
if (isAssistantMessageWithCompletedToolCalls(lastMessage)) {
|
1715
|
+
this.triggerRequest({
|
1716
|
+
requestType: "generate"
|
1717
|
+
});
|
1718
|
+
}
|
1719
|
+
});
|
1720
|
+
};
|
1721
|
+
/**
|
1722
|
+
* Abort the current request immediately, keep the generated tokens if any.
|
1723
|
+
*/
|
1724
|
+
this.stop = async () => {
|
1725
|
+
var _a17;
|
1726
|
+
if (this.status !== "streaming" && this.status !== "submitted")
|
1727
|
+
return;
|
1728
|
+
if ((_a17 = this.activeResponse) == null ? void 0 : _a17.abortController) {
|
1729
|
+
this.activeResponse.abortController.abort();
|
1730
|
+
this.activeResponse.abortController = void 0;
|
1731
|
+
}
|
1732
|
+
};
|
1733
|
+
this.id = id;
|
1734
|
+
this.maxSteps = maxSteps;
|
1707
1735
|
this.transport = transport;
|
1708
|
-
this.
|
1709
|
-
this.generateId = generateId3 != null ? generateId3 : import_provider_utils5.generateId;
|
1736
|
+
this.generateId = generateId3;
|
1710
1737
|
this.messageMetadataSchema = messageMetadataSchema;
|
1711
1738
|
this.dataPartSchemas = dataPartSchemas;
|
1739
|
+
this.state = state;
|
1740
|
+
this.onError = onError;
|
1741
|
+
this.onToolCall = onToolCall;
|
1742
|
+
this.onFinish = onFinish;
|
1712
1743
|
}
|
1713
|
-
|
1714
|
-
|
1715
|
-
|
1716
|
-
|
1717
|
-
|
1718
|
-
|
1719
|
-
|
1720
|
-
|
1721
|
-
|
1722
|
-
|
1723
|
-
getChats() {
|
1724
|
-
return Array.from(this.chats.entries());
|
1725
|
-
}
|
1726
|
-
get chatCount() {
|
1727
|
-
return this.chats.size;
|
1728
|
-
}
|
1729
|
-
getStatus(id) {
|
1730
|
-
return this.getChat(id).status;
|
1744
|
+
/**
|
1745
|
+
* Hook status:
|
1746
|
+
*
|
1747
|
+
* - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
|
1748
|
+
* - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
|
1749
|
+
* - `ready`: The full response has been received and processed; a new user message can be submitted.
|
1750
|
+
* - `error`: An error occurred during the API request, preventing successful completion.
|
1751
|
+
*/
|
1752
|
+
get status() {
|
1753
|
+
return this.state.status;
|
1731
1754
|
}
|
1732
1755
|
setStatus({
|
1733
|
-
id,
|
1734
1756
|
status,
|
1735
1757
|
error
|
1736
1758
|
}) {
|
1737
|
-
|
1738
|
-
if (chat.status === status)
|
1759
|
+
if (this.status === status)
|
1739
1760
|
return;
|
1740
|
-
|
1741
|
-
|
1742
|
-
this.emit({ type: "
|
1761
|
+
this.state.status = status;
|
1762
|
+
this.state.error = error;
|
1763
|
+
this.emit({ type: "status-changed" });
|
1743
1764
|
}
|
1744
|
-
|
1745
|
-
return this.
|
1765
|
+
get error() {
|
1766
|
+
return this.state.error;
|
1746
1767
|
}
|
1747
|
-
|
1748
|
-
return this.
|
1768
|
+
get messages() {
|
1769
|
+
return this.state.messages;
|
1749
1770
|
}
|
1750
|
-
|
1751
|
-
|
1752
|
-
return chat.messages[chat.messages.length - 1];
|
1771
|
+
get lastMessage() {
|
1772
|
+
return this.state.messages[this.state.messages.length - 1];
|
1753
1773
|
}
|
1754
1774
|
subscribe(subscriber) {
|
1755
1775
|
this.subscribers.add(subscriber);
|
1756
1776
|
return () => this.subscribers.delete(subscriber);
|
1757
1777
|
}
|
1758
|
-
|
1759
|
-
|
1760
|
-
messages
|
1761
|
-
}) {
|
1762
|
-
this.getChat(id).messages = [...messages];
|
1763
|
-
this.emit({ type: "chat-messages-changed", chatId: id });
|
1764
|
-
}
|
1765
|
-
removeAssistantResponse(id) {
|
1766
|
-
const chat = this.getChat(id);
|
1767
|
-
const lastMessage = chat.messages[chat.messages.length - 1];
|
1768
|
-
if (lastMessage == null) {
|
1769
|
-
throw new Error("Cannot remove assistant response from empty chat");
|
1770
|
-
}
|
1771
|
-
if (lastMessage.role !== "assistant") {
|
1772
|
-
throw new Error("Last message is not an assistant message");
|
1773
|
-
}
|
1774
|
-
this.setMessages({ id, messages: chat.messages.slice(0, -1) });
|
1775
|
-
}
|
1776
|
-
async submitMessage({
|
1777
|
-
chatId,
|
1778
|
-
message,
|
1779
|
-
headers,
|
1780
|
-
body,
|
1781
|
-
onError,
|
1782
|
-
onToolCall,
|
1783
|
-
onFinish
|
1784
|
-
}) {
|
1785
|
-
var _a17;
|
1786
|
-
const chat = this.getChat(chatId);
|
1787
|
-
const currentMessages = chat.messages;
|
1788
|
-
await this.triggerRequest({
|
1789
|
-
chatId,
|
1790
|
-
messages: currentMessages.concat({
|
1791
|
-
...message,
|
1792
|
-
id: (_a17 = message.id) != null ? _a17 : this.generateId()
|
1793
|
-
}),
|
1794
|
-
headers,
|
1795
|
-
body,
|
1796
|
-
requestType: "generate",
|
1797
|
-
onError,
|
1798
|
-
onToolCall,
|
1799
|
-
onFinish
|
1800
|
-
});
|
1801
|
-
}
|
1802
|
-
async resubmitLastUserMessage({
|
1803
|
-
chatId,
|
1804
|
-
headers,
|
1805
|
-
body,
|
1806
|
-
onError,
|
1807
|
-
onToolCall,
|
1808
|
-
onFinish
|
1809
|
-
}) {
|
1810
|
-
const messages = this.getChat(chatId).messages;
|
1811
|
-
const messagesToSubmit = messages[messages.length - 1].role === "assistant" ? messages.slice(0, -1) : messages;
|
1812
|
-
if (messagesToSubmit.length === 0) {
|
1813
|
-
return;
|
1814
|
-
}
|
1815
|
-
return this.triggerRequest({
|
1816
|
-
chatId,
|
1817
|
-
requestType: "generate",
|
1818
|
-
messages: messagesToSubmit,
|
1819
|
-
headers,
|
1820
|
-
body,
|
1821
|
-
onError,
|
1822
|
-
onToolCall,
|
1823
|
-
onFinish
|
1824
|
-
});
|
1825
|
-
}
|
1826
|
-
async resumeStream({
|
1827
|
-
chatId,
|
1828
|
-
headers,
|
1829
|
-
body,
|
1830
|
-
onError,
|
1831
|
-
onToolCall,
|
1832
|
-
onFinish
|
1833
|
-
}) {
|
1834
|
-
const chat = this.getChat(chatId);
|
1835
|
-
const currentMessages = chat.messages;
|
1836
|
-
return this.triggerRequest({
|
1837
|
-
chatId,
|
1838
|
-
messages: currentMessages,
|
1839
|
-
requestType: "resume",
|
1840
|
-
headers,
|
1841
|
-
body,
|
1842
|
-
onError,
|
1843
|
-
onToolCall,
|
1844
|
-
onFinish
|
1845
|
-
});
|
1846
|
-
}
|
1847
|
-
async addToolResult({
|
1848
|
-
chatId,
|
1849
|
-
toolCallId,
|
1850
|
-
result
|
1851
|
-
}) {
|
1852
|
-
const chat = this.getChat(chatId);
|
1853
|
-
chat.jobExecutor.run(async () => {
|
1854
|
-
const currentMessages = chat.messages;
|
1855
|
-
updateToolCallResult({
|
1856
|
-
messages: currentMessages,
|
1857
|
-
toolCallId,
|
1858
|
-
toolResult: result
|
1859
|
-
});
|
1860
|
-
this.setMessages({ id: chatId, messages: currentMessages });
|
1861
|
-
if (chat.status === "submitted" || chat.status === "streaming") {
|
1862
|
-
return;
|
1863
|
-
}
|
1864
|
-
const lastMessage = currentMessages[currentMessages.length - 1];
|
1865
|
-
if (isAssistantMessageWithCompletedToolCalls(lastMessage)) {
|
1866
|
-
await this.triggerRequest({
|
1867
|
-
messages: currentMessages,
|
1868
|
-
requestType: "generate",
|
1869
|
-
chatId
|
1870
|
-
});
|
1871
|
-
}
|
1872
|
-
});
|
1873
|
-
}
|
1874
|
-
async stopStream({ chatId }) {
|
1875
|
-
var _a17;
|
1876
|
-
const chat = this.getChat(chatId);
|
1877
|
-
if (chat.status !== "streaming" && chat.status !== "submitted")
|
1878
|
-
return;
|
1879
|
-
if ((_a17 = chat.activeResponse) == null ? void 0 : _a17.abortController) {
|
1880
|
-
chat.activeResponse.abortController.abort();
|
1881
|
-
chat.activeResponse.abortController = void 0;
|
1882
|
-
}
|
1778
|
+
set messages(messages) {
|
1779
|
+
this.state.messages = messages;
|
1780
|
+
this.emit({ type: "messages-changed" });
|
1883
1781
|
}
|
1884
1782
|
emit(event) {
|
1885
1783
|
for (const subscriber of this.subscribers) {
|
1886
|
-
subscriber.
|
1887
|
-
}
|
1888
|
-
}
|
1889
|
-
getChat(id) {
|
1890
|
-
if (!this.hasChat(id)) {
|
1891
|
-
throw new Error(`chat '${id}' not found`);
|
1784
|
+
subscriber.onChange(event);
|
1892
1785
|
}
|
1893
|
-
return this.chats.get(id);
|
1894
1786
|
}
|
1895
1787
|
async triggerRequest({
|
1896
|
-
chatId,
|
1897
|
-
messages: chatMessages,
|
1898
1788
|
requestType,
|
1789
|
+
metadata,
|
1899
1790
|
headers,
|
1900
|
-
body
|
1901
|
-
onError,
|
1902
|
-
onToolCall,
|
1903
|
-
onFinish
|
1791
|
+
body
|
1904
1792
|
}) {
|
1905
|
-
|
1906
|
-
|
1907
|
-
|
1908
|
-
|
1909
|
-
const
|
1910
|
-
const maxStep = extractMaxToolInvocationStep(
|
1911
|
-
getToolInvocations(chatMessages[chatMessages.length - 1])
|
1912
|
-
);
|
1793
|
+
var _a17, _b;
|
1794
|
+
this.setStatus({ status: "submitted", error: void 0 });
|
1795
|
+
const messageCount = this.state.messages.length;
|
1796
|
+
const lastMessage = this.lastMessage;
|
1797
|
+
const maxStep = (_a17 = lastMessage == null ? void 0 : lastMessage.parts.filter((part) => part.type === "step-start").length) != null ? _a17 : 0;
|
1913
1798
|
try {
|
1914
1799
|
const activeResponse = {
|
1915
1800
|
state: createStreamingUIMessageState({
|
1916
|
-
lastMessage:
|
1917
|
-
newMessageId:
|
1801
|
+
lastMessage: this.state.snapshot(lastMessage),
|
1802
|
+
newMessageId: this.generateId()
|
1918
1803
|
}),
|
1919
1804
|
abortController: new AbortController()
|
1920
1805
|
};
|
1921
|
-
|
1922
|
-
const stream = await
|
1923
|
-
chatId,
|
1924
|
-
messages:
|
1925
|
-
|
1806
|
+
this.activeResponse = activeResponse;
|
1807
|
+
const stream = await this.transport.submitMessages({
|
1808
|
+
chatId: this.id,
|
1809
|
+
messages: this.state.messages,
|
1810
|
+
abortSignal: activeResponse.abortController.signal,
|
1811
|
+
metadata,
|
1926
1812
|
headers,
|
1927
|
-
|
1813
|
+
body,
|
1928
1814
|
requestType
|
1929
1815
|
});
|
1930
1816
|
const runUpdateMessageJob = (job) => (
|
1931
1817
|
// serialize the job execution to avoid race conditions:
|
1932
|
-
|
1818
|
+
this.jobExecutor.run(
|
1933
1819
|
() => job({
|
1934
1820
|
state: activeResponse.state,
|
1935
1821
|
write: () => {
|
1936
|
-
|
1937
|
-
|
1938
|
-
const
|
1939
|
-
|
1940
|
-
|
1941
|
-
|
1942
|
-
|
1943
|
-
|
1944
|
-
|
1822
|
+
var _a18;
|
1823
|
+
this.setStatus({ status: "streaming" });
|
1824
|
+
const replaceLastMessage = activeResponse.state.message.id === ((_a18 = this.lastMessage) == null ? void 0 : _a18.id);
|
1825
|
+
if (replaceLastMessage) {
|
1826
|
+
this.state.replaceMessage(
|
1827
|
+
this.state.messages.length - 1,
|
1828
|
+
activeResponse.state.message
|
1829
|
+
);
|
1830
|
+
} else {
|
1831
|
+
this.state.pushMessage(activeResponse.state.message);
|
1832
|
+
}
|
1833
|
+
this.emit({
|
1834
|
+
type: "messages-changed"
|
1945
1835
|
});
|
1946
1836
|
}
|
1947
1837
|
})
|
@@ -1950,137 +1840,67 @@ var ChatStore = class {
|
|
1950
1840
|
await consumeStream({
|
1951
1841
|
stream: processUIMessageStream({
|
1952
1842
|
stream,
|
1953
|
-
onToolCall,
|
1954
|
-
messageMetadataSchema:
|
1955
|
-
dataPartSchemas:
|
1843
|
+
onToolCall: this.onToolCall,
|
1844
|
+
messageMetadataSchema: this.messageMetadataSchema,
|
1845
|
+
dataPartSchemas: this.dataPartSchemas,
|
1956
1846
|
runUpdateMessageJob
|
1957
1847
|
}),
|
1958
1848
|
onError: (error) => {
|
1959
1849
|
throw error;
|
1960
1850
|
}
|
1961
1851
|
});
|
1962
|
-
onFinish == null ? void 0 :
|
1963
|
-
this.setStatus({
|
1852
|
+
(_b = this.onFinish) == null ? void 0 : _b.call(this, { message: activeResponse.state.message });
|
1853
|
+
this.setStatus({ status: "ready" });
|
1964
1854
|
} catch (err) {
|
1855
|
+
console.error(err);
|
1965
1856
|
if (err.name === "AbortError") {
|
1966
|
-
this.setStatus({
|
1857
|
+
this.setStatus({ status: "ready" });
|
1967
1858
|
return null;
|
1968
1859
|
}
|
1969
|
-
if (onError && err instanceof Error) {
|
1970
|
-
onError(err);
|
1860
|
+
if (this.onError && err instanceof Error) {
|
1861
|
+
this.onError(err);
|
1971
1862
|
}
|
1972
|
-
this.setStatus({
|
1863
|
+
this.setStatus({ status: "error", error: err });
|
1973
1864
|
} finally {
|
1974
|
-
|
1865
|
+
this.activeResponse = void 0;
|
1975
1866
|
}
|
1976
|
-
const currentMessages = self.getMessages(chatId);
|
1977
1867
|
if (shouldResubmitMessages({
|
1978
1868
|
originalMaxToolInvocationStep: maxStep,
|
1979
1869
|
originalMessageCount: messageCount,
|
1980
|
-
maxSteps:
|
1981
|
-
messages:
|
1870
|
+
maxSteps: this.maxSteps,
|
1871
|
+
messages: this.state.messages
|
1982
1872
|
})) {
|
1983
|
-
await
|
1984
|
-
chatId,
|
1873
|
+
await this.triggerRequest({
|
1985
1874
|
requestType,
|
1986
|
-
|
1987
|
-
onToolCall,
|
1988
|
-
onFinish,
|
1875
|
+
metadata,
|
1989
1876
|
headers,
|
1990
|
-
body
|
1991
|
-
messages: currentMessages
|
1877
|
+
body
|
1992
1878
|
});
|
1993
1879
|
}
|
1994
1880
|
}
|
1995
1881
|
};
|
1996
|
-
|
1997
|
-
|
1998
|
-
|
1999
|
-
|
2000
|
-
|
2001
|
-
|
2002
|
-
|
2003
|
-
|
2004
|
-
streamProtocol,
|
2005
|
-
fetch: fetch2,
|
2006
|
-
prepareRequestBody
|
2007
|
-
}) {
|
2008
|
-
this.api = api;
|
2009
|
-
this.credentials = credentials;
|
2010
|
-
this.headers = headers;
|
2011
|
-
this.body = body;
|
2012
|
-
this.streamProtocol = streamProtocol;
|
2013
|
-
this.fetch = fetch2;
|
2014
|
-
this.prepareRequestBody = prepareRequestBody;
|
2015
|
-
}
|
2016
|
-
submitMessages({
|
2017
|
-
chatId,
|
2018
|
-
messages,
|
2019
|
-
abortController,
|
2020
|
-
body,
|
2021
|
-
headers,
|
2022
|
-
requestType
|
2023
|
-
}) {
|
2024
|
-
var _a17, _b;
|
2025
|
-
return fetchUIMessageStream({
|
2026
|
-
api: this.api,
|
2027
|
-
headers: {
|
2028
|
-
...this.headers,
|
2029
|
-
...headers
|
2030
|
-
},
|
2031
|
-
body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
|
2032
|
-
chatId,
|
2033
|
-
messages,
|
2034
|
-
...this.body,
|
2035
|
-
...body
|
2036
|
-
})) != null ? _b : {
|
2037
|
-
chatId,
|
2038
|
-
messages,
|
2039
|
-
...this.body,
|
2040
|
-
...body
|
2041
|
-
},
|
2042
|
-
streamProtocol: this.streamProtocol,
|
2043
|
-
credentials: this.credentials,
|
2044
|
-
abortController: () => abortController,
|
2045
|
-
fetch: this.fetch,
|
2046
|
-
requestType
|
2047
|
-
});
|
2048
|
-
}
|
2049
|
-
};
|
2050
|
-
|
2051
|
-
// src/ui/convert-file-list-to-file-ui-parts.ts
|
2052
|
-
async function convertFileListToFileUIParts(files) {
|
2053
|
-
if (files == null) {
|
2054
|
-
return [];
|
2055
|
-
}
|
2056
|
-
if (!globalThis.FileList || !(files instanceof globalThis.FileList)) {
|
2057
|
-
throw new Error("FileList is not supported in the current environment");
|
2058
|
-
}
|
2059
|
-
return Promise.all(
|
2060
|
-
Array.from(files).map(async (file) => {
|
2061
|
-
const { name: name17, type } = file;
|
2062
|
-
const dataUrl = await new Promise((resolve, reject) => {
|
2063
|
-
const reader = new FileReader();
|
2064
|
-
reader.onload = (readerEvent) => {
|
2065
|
-
var _a17;
|
2066
|
-
resolve((_a17 = readerEvent.target) == null ? void 0 : _a17.result);
|
2067
|
-
};
|
2068
|
-
reader.onerror = (error) => reject(error);
|
2069
|
-
reader.readAsDataURL(file);
|
2070
|
-
});
|
2071
|
-
return {
|
2072
|
-
type: "file",
|
2073
|
-
mediaType: type,
|
2074
|
-
filename: name17,
|
2075
|
-
url: dataUrl
|
2076
|
-
};
|
2077
|
-
})
|
1882
|
+
function updateToolCallResult({
|
1883
|
+
messages,
|
1884
|
+
toolCallId,
|
1885
|
+
toolResult: result
|
1886
|
+
}) {
|
1887
|
+
const lastMessage = messages[messages.length - 1];
|
1888
|
+
const invocationPart = lastMessage.parts.find(
|
1889
|
+
(part) => part.type === "tool-invocation" && part.toolInvocation.toolCallId === toolCallId
|
2078
1890
|
);
|
1891
|
+
if (invocationPart == null) {
|
1892
|
+
return;
|
1893
|
+
}
|
1894
|
+
invocationPart.toolInvocation = {
|
1895
|
+
...invocationPart.toolInvocation,
|
1896
|
+
state: "result",
|
1897
|
+
result
|
1898
|
+
};
|
2079
1899
|
}
|
2080
1900
|
|
2081
1901
|
// src/ui/convert-to-model-messages.ts
|
2082
1902
|
function convertToModelMessages(messages, options) {
|
2083
|
-
var _a17
|
1903
|
+
var _a17;
|
2084
1904
|
const tools = (_a17 = options == null ? void 0 : options.tools) != null ? _a17 : {};
|
2085
1905
|
const modelMessages = [];
|
2086
1906
|
for (const message of messages) {
|
@@ -2111,6 +1931,9 @@ function convertToModelMessages(messages, options) {
|
|
2111
1931
|
case "assistant": {
|
2112
1932
|
if (message.parts != null) {
|
2113
1933
|
let processBlock2 = function() {
|
1934
|
+
if (block.length === 0) {
|
1935
|
+
return;
|
1936
|
+
}
|
2114
1937
|
const content = [];
|
2115
1938
|
for (const part of block) {
|
2116
1939
|
switch (part.type) {
|
@@ -2185,33 +2008,20 @@ function convertToModelMessages(messages, options) {
|
|
2185
2008
|
});
|
2186
2009
|
}
|
2187
2010
|
block = [];
|
2188
|
-
blockHasToolInvocations = false;
|
2189
|
-
currentStep++;
|
2190
2011
|
};
|
2191
2012
|
var processBlock = processBlock2;
|
2192
|
-
let currentStep = 0;
|
2193
|
-
let blockHasToolInvocations = false;
|
2194
2013
|
let block = [];
|
2195
2014
|
for (const part of message.parts) {
|
2196
2015
|
switch (part.type) {
|
2197
|
-
case "text":
|
2198
|
-
|
2199
|
-
processBlock2();
|
2200
|
-
}
|
2201
|
-
block.push(part);
|
2202
|
-
break;
|
2203
|
-
}
|
2016
|
+
case "text":
|
2017
|
+
case "reasoning":
|
2204
2018
|
case "file":
|
2205
|
-
case "
|
2019
|
+
case "tool-invocation": {
|
2206
2020
|
block.push(part);
|
2207
2021
|
break;
|
2208
2022
|
}
|
2209
|
-
case "
|
2210
|
-
|
2211
|
-
processBlock2();
|
2212
|
-
}
|
2213
|
-
block.push(part);
|
2214
|
-
blockHasToolInvocations = true;
|
2023
|
+
case "step-start": {
|
2024
|
+
processBlock2();
|
2215
2025
|
break;
|
2216
2026
|
}
|
2217
2027
|
}
|
@@ -2234,45 +2044,166 @@ function convertToModelMessages(messages, options) {
|
|
2234
2044
|
}
|
2235
2045
|
var convertToCoreMessages = convertToModelMessages;
|
2236
2046
|
|
2237
|
-
// src/ui/
|
2238
|
-
|
2239
|
-
|
2047
|
+
// src/ui/transform-text-to-ui-message-stream.ts
|
2048
|
+
function transformTextToUiMessageStream({
|
2049
|
+
stream
|
2050
|
+
}) {
|
2051
|
+
return stream.pipeThrough(
|
2052
|
+
new TransformStream({
|
2053
|
+
start(controller) {
|
2054
|
+
controller.enqueue({ type: "start" });
|
2055
|
+
controller.enqueue({ type: "start-step" });
|
2056
|
+
},
|
2057
|
+
async transform(part, controller) {
|
2058
|
+
controller.enqueue({ type: "text", text: part });
|
2059
|
+
},
|
2060
|
+
async flush(controller) {
|
2061
|
+
controller.enqueue({ type: "finish-step" });
|
2062
|
+
controller.enqueue({ type: "finish" });
|
2063
|
+
}
|
2064
|
+
})
|
2065
|
+
);
|
2066
|
+
}
|
2067
|
+
|
2068
|
+
// src/ui/text-stream-chat-transport.ts
|
2069
|
+
var getOriginalFetch3 = () => fetch;
|
2070
|
+
async function fetchTextStream({
|
2240
2071
|
api,
|
2241
|
-
|
2242
|
-
streamProtocol = "ui-message",
|
2072
|
+
body,
|
2243
2073
|
credentials,
|
2244
2074
|
headers,
|
2245
|
-
|
2246
|
-
|
2247
|
-
|
2248
|
-
dataPartSchemas,
|
2249
|
-
messageMetadataSchema,
|
2250
|
-
maxSteps: maxSteps2 = 1,
|
2251
|
-
chats
|
2075
|
+
abortSignal,
|
2076
|
+
fetch: fetch2 = getOriginalFetch3(),
|
2077
|
+
requestType = "generate"
|
2252
2078
|
}) {
|
2253
|
-
|
2254
|
-
|
2255
|
-
|
2256
|
-
|
2257
|
-
|
2258
|
-
|
2259
|
-
|
2260
|
-
|
2261
|
-
|
2262
|
-
|
2263
|
-
|
2264
|
-
|
2265
|
-
|
2266
|
-
|
2267
|
-
|
2079
|
+
var _a17;
|
2080
|
+
const response = requestType === "resume" ? await fetch2(`${api}?chatId=${body.chatId}`, {
|
2081
|
+
method: "GET",
|
2082
|
+
headers: {
|
2083
|
+
"Content-Type": "application/json",
|
2084
|
+
...headers
|
2085
|
+
},
|
2086
|
+
signal: abortSignal,
|
2087
|
+
credentials
|
2088
|
+
}) : await fetch2(api, {
|
2089
|
+
method: "POST",
|
2090
|
+
body: JSON.stringify(body),
|
2091
|
+
headers: {
|
2092
|
+
"Content-Type": "application/json",
|
2093
|
+
...headers
|
2094
|
+
},
|
2095
|
+
signal: abortSignal,
|
2096
|
+
credentials
|
2097
|
+
});
|
2098
|
+
if (!response.ok) {
|
2099
|
+
throw new Error(
|
2100
|
+
(_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
|
2101
|
+
);
|
2102
|
+
}
|
2103
|
+
if (!response.body) {
|
2104
|
+
throw new Error("The response body is empty.");
|
2105
|
+
}
|
2106
|
+
return transformTextToUiMessageStream({
|
2107
|
+
stream: response.body.pipeThrough(new TextDecoderStream())
|
2108
|
+
});
|
2109
|
+
}
|
2110
|
+
var TextStreamChatTransport = class {
|
2111
|
+
constructor({
|
2112
|
+
api,
|
2113
|
+
credentials,
|
2114
|
+
headers,
|
2115
|
+
body,
|
2116
|
+
fetch: fetch2,
|
2117
|
+
prepareRequest
|
2118
|
+
}) {
|
2119
|
+
this.api = api;
|
2120
|
+
this.credentials = credentials;
|
2121
|
+
this.headers = headers;
|
2122
|
+
this.body = body;
|
2123
|
+
this.fetch = fetch2;
|
2124
|
+
this.prepareRequest = prepareRequest;
|
2125
|
+
}
|
2126
|
+
submitMessages({
|
2127
|
+
chatId,
|
2128
|
+
messages,
|
2129
|
+
abortSignal,
|
2130
|
+
metadata,
|
2131
|
+
headers,
|
2132
|
+
body,
|
2133
|
+
requestType
|
2134
|
+
}) {
|
2135
|
+
var _a17, _b;
|
2136
|
+
const preparedRequest = (_a17 = this.prepareRequest) == null ? void 0 : _a17.call(this, {
|
2137
|
+
id: chatId,
|
2138
|
+
messages,
|
2139
|
+
body: { ...this.body, ...body },
|
2140
|
+
headers: { ...this.headers, ...headers },
|
2141
|
+
credentials: this.credentials,
|
2142
|
+
requestMetadata: metadata
|
2143
|
+
});
|
2144
|
+
return fetchTextStream({
|
2145
|
+
api: this.api,
|
2146
|
+
body: (preparedRequest == null ? void 0 : preparedRequest.body) !== void 0 ? preparedRequest.body : { ...this.body, ...body },
|
2147
|
+
headers: (preparedRequest == null ? void 0 : preparedRequest.headers) !== void 0 ? preparedRequest.headers : { ...this.headers, ...headers },
|
2148
|
+
credentials: (_b = preparedRequest == null ? void 0 : preparedRequest.credentials) != null ? _b : this.credentials,
|
2149
|
+
abortSignal,
|
2150
|
+
fetch: this.fetch,
|
2151
|
+
requestType
|
2152
|
+
});
|
2153
|
+
}
|
2154
|
+
};
|
2155
|
+
|
2156
|
+
// src/ui-message-stream/handle-ui-message-stream-finish.ts
|
2157
|
+
function handleUIMessageStreamFinish({
|
2158
|
+
newMessageId,
|
2159
|
+
originalMessages = [],
|
2160
|
+
onFinish,
|
2161
|
+
stream
|
2162
|
+
}) {
|
2163
|
+
if (onFinish == null) {
|
2164
|
+
return stream;
|
2165
|
+
}
|
2166
|
+
const lastMessage = originalMessages[originalMessages.length - 1];
|
2167
|
+
const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
|
2168
|
+
const messageId = isContinuation ? lastMessage.id : newMessageId;
|
2169
|
+
const state = createStreamingUIMessageState({
|
2170
|
+
lastMessage: structuredClone(lastMessage),
|
2171
|
+
newMessageId: messageId
|
2268
2172
|
});
|
2173
|
+
const runUpdateMessageJob = async (job) => {
|
2174
|
+
await job({ state, write: () => {
|
2175
|
+
} });
|
2176
|
+
};
|
2177
|
+
return processUIMessageStream({
|
2178
|
+
stream,
|
2179
|
+
runUpdateMessageJob
|
2180
|
+
}).pipeThrough(
|
2181
|
+
new TransformStream({
|
2182
|
+
transform(chunk, controller) {
|
2183
|
+
controller.enqueue(chunk);
|
2184
|
+
},
|
2185
|
+
flush() {
|
2186
|
+
const isContinuation2 = state.message.id === (lastMessage == null ? void 0 : lastMessage.id);
|
2187
|
+
onFinish({
|
2188
|
+
isContinuation: isContinuation2,
|
2189
|
+
responseMessage: state.message,
|
2190
|
+
messages: [
|
2191
|
+
...isContinuation2 ? originalMessages.slice(0, -1) : originalMessages,
|
2192
|
+
state.message
|
2193
|
+
]
|
2194
|
+
});
|
2195
|
+
}
|
2196
|
+
})
|
2197
|
+
);
|
2269
2198
|
}
|
2270
2199
|
|
2271
2200
|
// src/ui-message-stream/create-ui-message-stream.ts
|
2272
2201
|
function createUIMessageStream({
|
2273
2202
|
execute,
|
2274
|
-
onError = () => "An error occurred."
|
2203
|
+
onError = () => "An error occurred.",
|
2275
2204
|
// mask error messages for safety by default
|
2205
|
+
originalMessages,
|
2206
|
+
onFinish
|
2276
2207
|
}) {
|
2277
2208
|
let controller;
|
2278
2209
|
const ongoingStreamPromises = [];
|
@@ -2289,25 +2220,27 @@ function createUIMessageStream({
|
|
2289
2220
|
}
|
2290
2221
|
try {
|
2291
2222
|
const result = execute({
|
2292
|
-
|
2293
|
-
|
2294
|
-
|
2295
|
-
|
2296
|
-
|
2297
|
-
(
|
2298
|
-
|
2299
|
-
|
2300
|
-
|
2301
|
-
|
2302
|
-
|
2303
|
-
|
2304
|
-
|
2305
|
-
|
2306
|
-
|
2307
|
-
|
2308
|
-
|
2309
|
-
|
2310
|
-
|
2223
|
+
writer: {
|
2224
|
+
write(part) {
|
2225
|
+
safeEnqueue(part);
|
2226
|
+
},
|
2227
|
+
merge(streamArg) {
|
2228
|
+
ongoingStreamPromises.push(
|
2229
|
+
(async () => {
|
2230
|
+
const reader = streamArg.getReader();
|
2231
|
+
while (true) {
|
2232
|
+
const { done, value } = await reader.read();
|
2233
|
+
if (done)
|
2234
|
+
break;
|
2235
|
+
safeEnqueue(value);
|
2236
|
+
}
|
2237
|
+
})().catch((error) => {
|
2238
|
+
safeEnqueue({ type: "error", errorText: onError(error) });
|
2239
|
+
})
|
2240
|
+
);
|
2241
|
+
},
|
2242
|
+
onError
|
2243
|
+
}
|
2311
2244
|
});
|
2312
2245
|
if (result) {
|
2313
2246
|
ongoingStreamPromises.push(
|
@@ -2331,7 +2264,12 @@ function createUIMessageStream({
|
|
2331
2264
|
} catch (error) {
|
2332
2265
|
}
|
2333
2266
|
});
|
2334
|
-
return
|
2267
|
+
return handleUIMessageStreamFinish({
|
2268
|
+
stream,
|
2269
|
+
newMessageId: "",
|
2270
|
+
originalMessages,
|
2271
|
+
onFinish
|
2272
|
+
});
|
2335
2273
|
}
|
2336
2274
|
|
2337
2275
|
// src/ui-message-stream/ui-message-stream-headers.ts
|
@@ -2396,6 +2334,32 @@ function pipeUIMessageStreamToResponse({
|
|
2396
2334
|
});
|
2397
2335
|
}
|
2398
2336
|
|
2337
|
+
// src/util/cosine-similarity.ts
|
2338
|
+
function cosineSimilarity(vector1, vector2) {
|
2339
|
+
if (vector1.length !== vector2.length) {
|
2340
|
+
throw new InvalidArgumentError({
|
2341
|
+
parameter: "vector1,vector2",
|
2342
|
+
value: { vector1Length: vector1.length, vector2Length: vector2.length },
|
2343
|
+
message: `Vectors must have the same length`
|
2344
|
+
});
|
2345
|
+
}
|
2346
|
+
const n = vector1.length;
|
2347
|
+
if (n === 0) {
|
2348
|
+
return 0;
|
2349
|
+
}
|
2350
|
+
let magnitudeSquared1 = 0;
|
2351
|
+
let magnitudeSquared2 = 0;
|
2352
|
+
let dotProduct = 0;
|
2353
|
+
for (let i = 0; i < n; i++) {
|
2354
|
+
const value1 = vector1[i];
|
2355
|
+
const value2 = vector2[i];
|
2356
|
+
magnitudeSquared1 += value1 * value1;
|
2357
|
+
magnitudeSquared2 += value2 * value2;
|
2358
|
+
dotProduct += value1 * value2;
|
2359
|
+
}
|
2360
|
+
return magnitudeSquared1 === 0 || magnitudeSquared2 === 0 ? 0 : dotProduct / (Math.sqrt(magnitudeSquared1) * Math.sqrt(magnitudeSquared2));
|
2361
|
+
}
|
2362
|
+
|
2399
2363
|
// src/util/data-url.ts
|
2400
2364
|
function getTextFromDataUrl(dataUrl) {
|
2401
2365
|
const [header, base64Content] = dataUrl.split(",");
|
@@ -2445,34 +2409,8 @@ function isDeepEqualData(obj1, obj2) {
|
|
2445
2409
|
return true;
|
2446
2410
|
}
|
2447
2411
|
|
2448
|
-
// src/util/cosine-similarity.ts
|
2449
|
-
function cosineSimilarity(vector1, vector2) {
|
2450
|
-
if (vector1.length !== vector2.length) {
|
2451
|
-
throw new InvalidArgumentError({
|
2452
|
-
parameter: "vector1,vector2",
|
2453
|
-
value: { vector1Length: vector1.length, vector2Length: vector2.length },
|
2454
|
-
message: `Vectors must have the same length`
|
2455
|
-
});
|
2456
|
-
}
|
2457
|
-
const n = vector1.length;
|
2458
|
-
if (n === 0) {
|
2459
|
-
return 0;
|
2460
|
-
}
|
2461
|
-
let magnitudeSquared1 = 0;
|
2462
|
-
let magnitudeSquared2 = 0;
|
2463
|
-
let dotProduct = 0;
|
2464
|
-
for (let i = 0; i < n; i++) {
|
2465
|
-
const value1 = vector1[i];
|
2466
|
-
const value2 = vector2[i];
|
2467
|
-
magnitudeSquared1 += value1 * value1;
|
2468
|
-
magnitudeSquared2 += value2 * value2;
|
2469
|
-
dotProduct += value1 * value2;
|
2470
|
-
}
|
2471
|
-
return magnitudeSquared1 === 0 || magnitudeSquared2 === 0 ? 0 : dotProduct / (Math.sqrt(magnitudeSquared1) * Math.sqrt(magnitudeSquared2));
|
2472
|
-
}
|
2473
|
-
|
2474
2412
|
// src/util/simulate-readable-stream.ts
|
2475
|
-
var
|
2413
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
2476
2414
|
function simulateReadableStream({
|
2477
2415
|
chunks,
|
2478
2416
|
initialDelayInMs = 0,
|
@@ -2480,7 +2418,7 @@ function simulateReadableStream({
|
|
2480
2418
|
_internal
|
2481
2419
|
}) {
|
2482
2420
|
var _a17;
|
2483
|
-
const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 :
|
2421
|
+
const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 : import_provider_utils6.delay;
|
2484
2422
|
let index = 0;
|
2485
2423
|
return new ReadableStream({
|
2486
2424
|
async pull(controller) {
|
@@ -2496,7 +2434,7 @@ function simulateReadableStream({
|
|
2496
2434
|
|
2497
2435
|
// src/util/retry-with-exponential-backoff.ts
|
2498
2436
|
var import_provider17 = require("@ai-sdk/provider");
|
2499
|
-
var
|
2437
|
+
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
2500
2438
|
var retryWithExponentialBackoff = ({
|
2501
2439
|
maxRetries = 2,
|
2502
2440
|
initialDelayInMs = 2e3,
|
@@ -2514,13 +2452,13 @@ async function _retryWithExponentialBackoff(f, {
|
|
2514
2452
|
try {
|
2515
2453
|
return await f();
|
2516
2454
|
} catch (error) {
|
2517
|
-
if ((0,
|
2455
|
+
if ((0, import_provider_utils7.isAbortError)(error)) {
|
2518
2456
|
throw error;
|
2519
2457
|
}
|
2520
2458
|
if (maxRetries === 0) {
|
2521
2459
|
throw error;
|
2522
2460
|
}
|
2523
|
-
const errorMessage = (0,
|
2461
|
+
const errorMessage = (0, import_provider_utils7.getErrorMessage)(error);
|
2524
2462
|
const newErrors = [...errors, error];
|
2525
2463
|
const tryNumber = newErrors.length;
|
2526
2464
|
if (tryNumber > maxRetries) {
|
@@ -2531,7 +2469,7 @@ async function _retryWithExponentialBackoff(f, {
|
|
2531
2469
|
});
|
2532
2470
|
}
|
2533
2471
|
if (error instanceof Error && import_provider17.APICallError.isInstance(error) && error.isRetryable === true && tryNumber <= maxRetries) {
|
2534
|
-
await (0,
|
2472
|
+
await (0, import_provider_utils7.delay)(delayInMs);
|
2535
2473
|
return _retryWithExponentialBackoff(
|
2536
2474
|
f,
|
2537
2475
|
{ maxRetries, delayInMs: backoffFactor * delayInMs, backoffFactor },
|
@@ -3090,7 +3028,7 @@ var DefaultEmbedManyResult = class {
|
|
3090
3028
|
};
|
3091
3029
|
|
3092
3030
|
// src/util/detect-media-type.ts
|
3093
|
-
var
|
3031
|
+
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
3094
3032
|
var imageMediaTypeSignatures = [
|
3095
3033
|
{
|
3096
3034
|
mediaType: "image/gif",
|
@@ -3197,7 +3135,7 @@ var audioMediaTypeSignatures = [
|
|
3197
3135
|
}
|
3198
3136
|
];
|
3199
3137
|
var stripID3 = (data) => {
|
3200
|
-
const bytes = typeof data === "string" ? (0,
|
3138
|
+
const bytes = typeof data === "string" ? (0, import_provider_utils8.convertBase64ToUint8Array)(data) : data;
|
3201
3139
|
const id3Size = (bytes[6] & 127) << 21 | (bytes[7] & 127) << 14 | (bytes[8] & 127) << 7 | bytes[9] & 127;
|
3202
3140
|
return bytes.slice(id3Size + 10);
|
3203
3141
|
};
|
@@ -3223,7 +3161,7 @@ function detectMediaType({
|
|
3223
3161
|
}
|
3224
3162
|
|
3225
3163
|
// core/generate-text/generated-file.ts
|
3226
|
-
var
|
3164
|
+
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
3227
3165
|
var DefaultGeneratedFile = class {
|
3228
3166
|
constructor({
|
3229
3167
|
data,
|
@@ -3237,14 +3175,14 @@ var DefaultGeneratedFile = class {
|
|
3237
3175
|
// lazy conversion with caching to avoid unnecessary conversion overhead:
|
3238
3176
|
get base64() {
|
3239
3177
|
if (this.base64Data == null) {
|
3240
|
-
this.base64Data = (0,
|
3178
|
+
this.base64Data = (0, import_provider_utils9.convertUint8ArrayToBase64)(this.uint8ArrayData);
|
3241
3179
|
}
|
3242
3180
|
return this.base64Data;
|
3243
3181
|
}
|
3244
3182
|
// lazy conversion with caching to avoid unnecessary conversion overhead:
|
3245
3183
|
get uint8Array() {
|
3246
3184
|
if (this.uint8ArrayData == null) {
|
3247
|
-
this.uint8ArrayData = (0,
|
3185
|
+
this.uint8ArrayData = (0, import_provider_utils9.convertBase64ToUint8Array)(this.base64Data);
|
3248
3186
|
}
|
3249
3187
|
return this.uint8ArrayData;
|
3250
3188
|
}
|
@@ -3359,8 +3297,8 @@ async function invokeModelMaxImagesPerCall(model) {
|
|
3359
3297
|
}
|
3360
3298
|
|
3361
3299
|
// core/generate-object/generate-object.ts
|
3362
|
-
var
|
3363
|
-
var
|
3300
|
+
var import_provider22 = require("@ai-sdk/provider");
|
3301
|
+
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
3364
3302
|
|
3365
3303
|
// core/generate-text/extract-content-text.ts
|
3366
3304
|
function extractContentText(content) {
|
@@ -3374,7 +3312,7 @@ function extractContentText(content) {
|
|
3374
3312
|
}
|
3375
3313
|
|
3376
3314
|
// core/prompt/convert-to-language-model-prompt.ts
|
3377
|
-
var
|
3315
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
3378
3316
|
|
3379
3317
|
// src/util/download.ts
|
3380
3318
|
async function download({ url }) {
|
@@ -3403,7 +3341,7 @@ async function download({ url }) {
|
|
3403
3341
|
|
3404
3342
|
// core/prompt/data-content.ts
|
3405
3343
|
var import_provider18 = require("@ai-sdk/provider");
|
3406
|
-
var
|
3344
|
+
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
3407
3345
|
var import_zod2 = require("zod");
|
3408
3346
|
|
3409
3347
|
// core/prompt/split-data-url.ts
|
@@ -3463,13 +3401,22 @@ function convertToLanguageModelV2DataContent(content) {
|
|
3463
3401
|
}
|
3464
3402
|
return { data: content, mediaType: void 0 };
|
3465
3403
|
}
|
3404
|
+
function convertDataContentToBase64String(content) {
|
3405
|
+
if (typeof content === "string") {
|
3406
|
+
return content;
|
3407
|
+
}
|
3408
|
+
if (content instanceof ArrayBuffer) {
|
3409
|
+
return (0, import_provider_utils10.convertUint8ArrayToBase64)(new Uint8Array(content));
|
3410
|
+
}
|
3411
|
+
return (0, import_provider_utils10.convertUint8ArrayToBase64)(content);
|
3412
|
+
}
|
3466
3413
|
function convertDataContentToUint8Array(content) {
|
3467
3414
|
if (content instanceof Uint8Array) {
|
3468
3415
|
return content;
|
3469
3416
|
}
|
3470
3417
|
if (typeof content === "string") {
|
3471
3418
|
try {
|
3472
|
-
return (0,
|
3419
|
+
return (0, import_provider_utils10.convertBase64ToUint8Array)(content);
|
3473
3420
|
} catch (error) {
|
3474
3421
|
throw new InvalidDataContentError({
|
3475
3422
|
message: "Invalid data content. Content string is not a base64-encoded media.",
|
@@ -3620,7 +3567,7 @@ async function downloadAssets(messages, downloadImplementation, supportedUrls) {
|
|
3620
3567
|
}
|
3621
3568
|
return { mediaType, data };
|
3622
3569
|
}).filter(
|
3623
|
-
(part) => part.data instanceof URL && part.mediaType != null && !(0,
|
3570
|
+
(part) => part.data instanceof URL && part.mediaType != null && !(0, import_provider_utils11.isUrlSupported)({
|
3624
3571
|
url: part.data.toString(),
|
3625
3572
|
mediaType: part.mediaType,
|
3626
3573
|
supportedUrls
|
@@ -3789,9 +3736,22 @@ function prepareCallSettings({
|
|
3789
3736
|
};
|
3790
3737
|
}
|
3791
3738
|
|
3739
|
+
// core/prompt/resolve-language-model.ts
|
3740
|
+
var import_gateway = require("@ai-sdk/gateway");
|
3741
|
+
var GLOBAL_DEFAULT_PROVIDER = Symbol(
|
3742
|
+
"vercel.ai.global.defaultProvider"
|
3743
|
+
);
|
3744
|
+
function resolveLanguageModel(model) {
|
3745
|
+
if (typeof model !== "string") {
|
3746
|
+
return model;
|
3747
|
+
}
|
3748
|
+
const globalProvider = globalThis[GLOBAL_DEFAULT_PROVIDER];
|
3749
|
+
return (globalProvider != null ? globalProvider : import_gateway.gateway).languageModel(model);
|
3750
|
+
}
|
3751
|
+
|
3792
3752
|
// core/prompt/standardize-prompt.ts
|
3793
3753
|
var import_provider19 = require("@ai-sdk/provider");
|
3794
|
-
var
|
3754
|
+
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
3795
3755
|
var import_zod8 = require("zod");
|
3796
3756
|
|
3797
3757
|
// core/prompt/message.ts
|
@@ -3963,7 +3923,7 @@ async function standardizePrompt(prompt) {
|
|
3963
3923
|
message: "messages must not be empty"
|
3964
3924
|
});
|
3965
3925
|
}
|
3966
|
-
const validationResult = await (0,
|
3926
|
+
const validationResult = await (0, import_provider_utils12.safeValidateTypes)({
|
3967
3927
|
value: messages,
|
3968
3928
|
schema: import_zod8.z.array(modelMessageSchema)
|
3969
3929
|
});
|
@@ -3980,9 +3940,38 @@ async function standardizePrompt(prompt) {
|
|
3980
3940
|
};
|
3981
3941
|
}
|
3982
3942
|
|
3983
|
-
// core/
|
3943
|
+
// core/prompt/wrap-gateway-error.ts
|
3944
|
+
var import_gateway2 = require("@ai-sdk/gateway");
|
3984
3945
|
var import_provider20 = require("@ai-sdk/provider");
|
3985
|
-
|
3946
|
+
function wrapGatewayError(error) {
|
3947
|
+
if (import_gateway2.GatewayAuthenticationError.isInstance(error) || import_gateway2.GatewayModelNotFoundError.isInstance(error)) {
|
3948
|
+
return new import_provider20.AISDKError({
|
3949
|
+
name: "GatewayError",
|
3950
|
+
message: "Vercel AI Gateway access failed. If you want to use AI SDK providers directly, use the providers, e.g. @ai-sdk/openai, or register a different global default provider.",
|
3951
|
+
cause: error
|
3952
|
+
});
|
3953
|
+
}
|
3954
|
+
return error;
|
3955
|
+
}
|
3956
|
+
|
3957
|
+
// core/telemetry/stringify-for-telemetry.ts
|
3958
|
+
function stringifyForTelemetry(prompt) {
|
3959
|
+
return JSON.stringify(
|
3960
|
+
prompt.map((message) => ({
|
3961
|
+
...message,
|
3962
|
+
content: typeof message.content === "string" ? message.content : message.content.map(
|
3963
|
+
(part) => part.type === "file" ? {
|
3964
|
+
...part,
|
3965
|
+
data: part.data instanceof Uint8Array ? convertDataContentToBase64String(part.data) : part.data
|
3966
|
+
} : part
|
3967
|
+
)
|
3968
|
+
}))
|
3969
|
+
);
|
3970
|
+
}
|
3971
|
+
|
3972
|
+
// core/generate-object/output-strategy.ts
|
3973
|
+
var import_provider21 = require("@ai-sdk/provider");
|
3974
|
+
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
3986
3975
|
|
3987
3976
|
// src/util/async-iterable-stream.ts
|
3988
3977
|
function createAsyncIterableStream(source) {
|
@@ -4019,7 +4008,7 @@ var noSchemaOutputStrategy = {
|
|
4019
4008
|
} : { success: true, value };
|
4020
4009
|
},
|
4021
4010
|
createElementStream() {
|
4022
|
-
throw new
|
4011
|
+
throw new import_provider21.UnsupportedFunctionalityError({
|
4023
4012
|
functionality: "element streams in no-schema mode"
|
4024
4013
|
});
|
4025
4014
|
}
|
@@ -4038,10 +4027,10 @@ var objectOutputStrategy = (schema) => ({
|
|
4038
4027
|
};
|
4039
4028
|
},
|
4040
4029
|
async validateFinalResult(value) {
|
4041
|
-
return (0,
|
4030
|
+
return (0, import_provider_utils13.safeValidateTypes)({ value, schema });
|
4042
4031
|
},
|
4043
4032
|
createElementStream() {
|
4044
|
-
throw new
|
4033
|
+
throw new import_provider21.UnsupportedFunctionalityError({
|
4045
4034
|
functionality: "element streams in object mode"
|
4046
4035
|
});
|
4047
4036
|
}
|
@@ -4069,10 +4058,10 @@ var arrayOutputStrategy = (schema) => {
|
|
4069
4058
|
isFinalDelta
|
4070
4059
|
}) {
|
4071
4060
|
var _a17;
|
4072
|
-
if (!(0,
|
4061
|
+
if (!(0, import_provider21.isJSONObject)(value) || !(0, import_provider21.isJSONArray)(value.elements)) {
|
4073
4062
|
return {
|
4074
4063
|
success: false,
|
4075
|
-
error: new
|
4064
|
+
error: new import_provider21.TypeValidationError({
|
4076
4065
|
value,
|
4077
4066
|
cause: "value must be an object that contains an array of elements"
|
4078
4067
|
})
|
@@ -4082,7 +4071,7 @@ var arrayOutputStrategy = (schema) => {
|
|
4082
4071
|
const resultArray = [];
|
4083
4072
|
for (let i = 0; i < inputArray.length; i++) {
|
4084
4073
|
const element = inputArray[i];
|
4085
|
-
const result = await (0,
|
4074
|
+
const result = await (0, import_provider_utils13.safeValidateTypes)({ value: element, schema });
|
4086
4075
|
if (i === inputArray.length - 1 && !isFinalDelta) {
|
4087
4076
|
continue;
|
4088
4077
|
}
|
@@ -4112,10 +4101,10 @@ var arrayOutputStrategy = (schema) => {
|
|
4112
4101
|
};
|
4113
4102
|
},
|
4114
4103
|
async validateFinalResult(value) {
|
4115
|
-
if (!(0,
|
4104
|
+
if (!(0, import_provider21.isJSONObject)(value) || !(0, import_provider21.isJSONArray)(value.elements)) {
|
4116
4105
|
return {
|
4117
4106
|
success: false,
|
4118
|
-
error: new
|
4107
|
+
error: new import_provider21.TypeValidationError({
|
4119
4108
|
value,
|
4120
4109
|
cause: "value must be an object that contains an array of elements"
|
4121
4110
|
})
|
@@ -4123,7 +4112,7 @@ var arrayOutputStrategy = (schema) => {
|
|
4123
4112
|
}
|
4124
4113
|
const inputArray = value.elements;
|
4125
4114
|
for (const element of inputArray) {
|
4126
|
-
const result = await (0,
|
4115
|
+
const result = await (0, import_provider_utils13.safeValidateTypes)({ value: element, schema });
|
4127
4116
|
if (!result.success) {
|
4128
4117
|
return result;
|
4129
4118
|
}
|
@@ -4178,10 +4167,10 @@ var enumOutputStrategy = (enumValues) => {
|
|
4178
4167
|
additionalProperties: false
|
4179
4168
|
},
|
4180
4169
|
async validateFinalResult(value) {
|
4181
|
-
if (!(0,
|
4170
|
+
if (!(0, import_provider21.isJSONObject)(value) || typeof value.result !== "string") {
|
4182
4171
|
return {
|
4183
4172
|
success: false,
|
4184
|
-
error: new
|
4173
|
+
error: new import_provider21.TypeValidationError({
|
4185
4174
|
value,
|
4186
4175
|
cause: 'value must be an object that contains a string in the "result" property.'
|
4187
4176
|
})
|
@@ -4190,17 +4179,17 @@ var enumOutputStrategy = (enumValues) => {
|
|
4190
4179
|
const result = value.result;
|
4191
4180
|
return enumValues.includes(result) ? { success: true, value: result } : {
|
4192
4181
|
success: false,
|
4193
|
-
error: new
|
4182
|
+
error: new import_provider21.TypeValidationError({
|
4194
4183
|
value,
|
4195
4184
|
cause: "value must be a string in the enum"
|
4196
4185
|
})
|
4197
4186
|
};
|
4198
4187
|
},
|
4199
4188
|
async validatePartialResult({ value, textDelta }) {
|
4200
|
-
if (!(0,
|
4189
|
+
if (!(0, import_provider21.isJSONObject)(value) || typeof value.result !== "string") {
|
4201
4190
|
return {
|
4202
4191
|
success: false,
|
4203
|
-
error: new
|
4192
|
+
error: new import_provider21.TypeValidationError({
|
4204
4193
|
value,
|
4205
4194
|
cause: 'value must be an object that contains a string in the "result" property.'
|
4206
4195
|
})
|
@@ -4213,7 +4202,7 @@ var enumOutputStrategy = (enumValues) => {
|
|
4213
4202
|
if (value.result.length === 0 || possibleEnumValues.length === 0) {
|
4214
4203
|
return {
|
4215
4204
|
success: false,
|
4216
|
-
error: new
|
4205
|
+
error: new import_provider21.TypeValidationError({
|
4217
4206
|
value,
|
4218
4207
|
cause: "value must be a string in the enum"
|
4219
4208
|
})
|
@@ -4228,7 +4217,7 @@ var enumOutputStrategy = (enumValues) => {
|
|
4228
4217
|
};
|
4229
4218
|
},
|
4230
4219
|
createElementStream() {
|
4231
|
-
throw new
|
4220
|
+
throw new import_provider21.UnsupportedFunctionalityError({
|
4232
4221
|
functionality: "element streams in enum mode"
|
4233
4222
|
});
|
4234
4223
|
}
|
@@ -4241,9 +4230,9 @@ function getOutputStrategy({
|
|
4241
4230
|
}) {
|
4242
4231
|
switch (output) {
|
4243
4232
|
case "object":
|
4244
|
-
return objectOutputStrategy((0,
|
4233
|
+
return objectOutputStrategy((0, import_provider_utils13.asSchema)(schema));
|
4245
4234
|
case "array":
|
4246
|
-
return arrayOutputStrategy((0,
|
4235
|
+
return arrayOutputStrategy((0, import_provider_utils13.asSchema)(schema));
|
4247
4236
|
case "enum":
|
4248
4237
|
return enumOutputStrategy(enumValues);
|
4249
4238
|
case "no-schema":
|
@@ -4374,10 +4363,10 @@ function validateObjectGenerationInput({
|
|
4374
4363
|
}
|
4375
4364
|
|
4376
4365
|
// core/generate-object/generate-object.ts
|
4377
|
-
var originalGenerateId = (0,
|
4366
|
+
var originalGenerateId = (0, import_provider_utils14.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
4378
4367
|
async function generateObject(options) {
|
4379
4368
|
const {
|
4380
|
-
model,
|
4369
|
+
model: modelArg,
|
4381
4370
|
output = "object",
|
4382
4371
|
system,
|
4383
4372
|
prompt,
|
@@ -4394,6 +4383,7 @@ async function generateObject(options) {
|
|
4394
4383
|
} = {},
|
4395
4384
|
...settings
|
4396
4385
|
} = options;
|
4386
|
+
const model = resolveLanguageModel(modelArg);
|
4397
4387
|
const enumValues = "enum" in options ? options.enum : void 0;
|
4398
4388
|
const {
|
4399
4389
|
schema: inputSchema,
|
@@ -4421,208 +4411,212 @@ async function generateObject(options) {
|
|
4421
4411
|
settings: { ...callSettings, maxRetries }
|
4422
4412
|
});
|
4423
4413
|
const tracer = getTracer(telemetry);
|
4424
|
-
|
4425
|
-
|
4426
|
-
|
4427
|
-
|
4428
|
-
|
4429
|
-
|
4430
|
-
|
4431
|
-
|
4432
|
-
|
4433
|
-
...baseTelemetryAttributes,
|
4434
|
-
// specific settings that only make sense on the outer level:
|
4435
|
-
"ai.prompt": {
|
4436
|
-
input: () => JSON.stringify({ system, prompt, messages })
|
4437
|
-
},
|
4438
|
-
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4439
|
-
"ai.schema.name": schemaName,
|
4440
|
-
"ai.schema.description": schemaDescription,
|
4441
|
-
"ai.settings.output": outputStrategy.type
|
4442
|
-
}
|
4443
|
-
}),
|
4444
|
-
tracer,
|
4445
|
-
fn: async (span) => {
|
4446
|
-
var _a17;
|
4447
|
-
let result;
|
4448
|
-
let finishReason;
|
4449
|
-
let usage;
|
4450
|
-
let warnings;
|
4451
|
-
let response;
|
4452
|
-
let request;
|
4453
|
-
let resultProviderMetadata;
|
4454
|
-
const standardizedPrompt = await standardizePrompt({
|
4455
|
-
system,
|
4456
|
-
prompt,
|
4457
|
-
messages
|
4458
|
-
});
|
4459
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
4460
|
-
prompt: standardizedPrompt,
|
4461
|
-
supportedUrls: await model.supportedUrls
|
4462
|
-
});
|
4463
|
-
const generateResult = await retry(
|
4464
|
-
() => recordSpan({
|
4465
|
-
name: "ai.generateObject.doGenerate",
|
4466
|
-
attributes: selectTelemetryAttributes({
|
4467
|
-
telemetry,
|
4468
|
-
attributes: {
|
4469
|
-
...assembleOperationName({
|
4470
|
-
operationId: "ai.generateObject.doGenerate",
|
4471
|
-
telemetry
|
4472
|
-
}),
|
4473
|
-
...baseTelemetryAttributes,
|
4474
|
-
"ai.prompt.messages": {
|
4475
|
-
input: () => JSON.stringify(promptMessages)
|
4476
|
-
},
|
4477
|
-
// standardized gen-ai llm span attributes:
|
4478
|
-
"gen_ai.system": model.provider,
|
4479
|
-
"gen_ai.request.model": model.modelId,
|
4480
|
-
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4481
|
-
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4482
|
-
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4483
|
-
"gen_ai.request.temperature": callSettings.temperature,
|
4484
|
-
"gen_ai.request.top_k": callSettings.topK,
|
4485
|
-
"gen_ai.request.top_p": callSettings.topP
|
4486
|
-
}
|
4414
|
+
try {
|
4415
|
+
return await recordSpan({
|
4416
|
+
name: "ai.generateObject",
|
4417
|
+
attributes: selectTelemetryAttributes({
|
4418
|
+
telemetry,
|
4419
|
+
attributes: {
|
4420
|
+
...assembleOperationName({
|
4421
|
+
operationId: "ai.generateObject",
|
4422
|
+
telemetry
|
4487
4423
|
}),
|
4488
|
-
|
4489
|
-
|
4490
|
-
|
4491
|
-
|
4492
|
-
|
4493
|
-
|
4494
|
-
|
4495
|
-
|
4496
|
-
|
4497
|
-
|
4498
|
-
|
4499
|
-
|
4500
|
-
|
4501
|
-
|
4502
|
-
|
4503
|
-
|
4504
|
-
|
4505
|
-
|
4506
|
-
|
4507
|
-
|
4508
|
-
|
4509
|
-
|
4510
|
-
|
4511
|
-
|
4512
|
-
|
4513
|
-
|
4514
|
-
|
4515
|
-
|
4516
|
-
|
4517
|
-
|
4424
|
+
...baseTelemetryAttributes,
|
4425
|
+
// specific settings that only make sense on the outer level:
|
4426
|
+
"ai.prompt": {
|
4427
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
4428
|
+
},
|
4429
|
+
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4430
|
+
"ai.schema.name": schemaName,
|
4431
|
+
"ai.schema.description": schemaDescription,
|
4432
|
+
"ai.settings.output": outputStrategy.type
|
4433
|
+
}
|
4434
|
+
}),
|
4435
|
+
tracer,
|
4436
|
+
fn: async (span) => {
|
4437
|
+
var _a17;
|
4438
|
+
let result;
|
4439
|
+
let finishReason;
|
4440
|
+
let usage;
|
4441
|
+
let warnings;
|
4442
|
+
let response;
|
4443
|
+
let request;
|
4444
|
+
let resultProviderMetadata;
|
4445
|
+
const standardizedPrompt = await standardizePrompt({
|
4446
|
+
system,
|
4447
|
+
prompt,
|
4448
|
+
messages
|
4449
|
+
});
|
4450
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
4451
|
+
prompt: standardizedPrompt,
|
4452
|
+
supportedUrls: await model.supportedUrls
|
4453
|
+
});
|
4454
|
+
const generateResult = await retry(
|
4455
|
+
() => recordSpan({
|
4456
|
+
name: "ai.generateObject.doGenerate",
|
4457
|
+
attributes: selectTelemetryAttributes({
|
4458
|
+
telemetry,
|
4459
|
+
attributes: {
|
4460
|
+
...assembleOperationName({
|
4461
|
+
operationId: "ai.generateObject.doGenerate",
|
4462
|
+
telemetry
|
4463
|
+
}),
|
4464
|
+
...baseTelemetryAttributes,
|
4465
|
+
"ai.prompt.messages": {
|
4466
|
+
input: () => stringifyForTelemetry(promptMessages)
|
4467
|
+
},
|
4468
|
+
// standardized gen-ai llm span attributes:
|
4469
|
+
"gen_ai.system": model.provider,
|
4470
|
+
"gen_ai.request.model": model.modelId,
|
4471
|
+
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4472
|
+
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4473
|
+
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4474
|
+
"gen_ai.request.temperature": callSettings.temperature,
|
4475
|
+
"gen_ai.request.top_k": callSettings.topK,
|
4476
|
+
"gen_ai.request.top_p": callSettings.topP
|
4477
|
+
}
|
4478
|
+
}),
|
4479
|
+
tracer,
|
4480
|
+
fn: async (span2) => {
|
4481
|
+
var _a18, _b, _c, _d, _e, _f, _g, _h;
|
4482
|
+
const result2 = await model.doGenerate({
|
4483
|
+
responseFormat: {
|
4484
|
+
type: "json",
|
4485
|
+
schema: outputStrategy.jsonSchema,
|
4486
|
+
name: schemaName,
|
4487
|
+
description: schemaDescription
|
4488
|
+
},
|
4489
|
+
...prepareCallSettings(settings),
|
4490
|
+
prompt: promptMessages,
|
4491
|
+
providerOptions,
|
4492
|
+
abortSignal,
|
4493
|
+
headers
|
4518
4494
|
});
|
4495
|
+
const responseData = {
|
4496
|
+
id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
|
4497
|
+
timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
|
4498
|
+
modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
4499
|
+
headers: (_g = result2.response) == null ? void 0 : _g.headers,
|
4500
|
+
body: (_h = result2.response) == null ? void 0 : _h.body
|
4501
|
+
};
|
4502
|
+
const text2 = extractContentText(result2.content);
|
4503
|
+
if (text2 === void 0) {
|
4504
|
+
throw new NoObjectGeneratedError({
|
4505
|
+
message: "No object generated: the model did not return a response.",
|
4506
|
+
response: responseData,
|
4507
|
+
usage: result2.usage,
|
4508
|
+
finishReason: result2.finishReason
|
4509
|
+
});
|
4510
|
+
}
|
4511
|
+
span2.setAttributes(
|
4512
|
+
selectTelemetryAttributes({
|
4513
|
+
telemetry,
|
4514
|
+
attributes: {
|
4515
|
+
"ai.response.finishReason": result2.finishReason,
|
4516
|
+
"ai.response.object": { output: () => text2 },
|
4517
|
+
"ai.response.id": responseData.id,
|
4518
|
+
"ai.response.model": responseData.modelId,
|
4519
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
4520
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4521
|
+
"ai.usage.promptTokens": result2.usage.inputTokens,
|
4522
|
+
"ai.usage.completionTokens": result2.usage.outputTokens,
|
4523
|
+
// standardized gen-ai llm span attributes:
|
4524
|
+
"gen_ai.response.finish_reasons": [result2.finishReason],
|
4525
|
+
"gen_ai.response.id": responseData.id,
|
4526
|
+
"gen_ai.response.model": responseData.modelId,
|
4527
|
+
"gen_ai.usage.input_tokens": result2.usage.inputTokens,
|
4528
|
+
"gen_ai.usage.output_tokens": result2.usage.outputTokens
|
4529
|
+
}
|
4530
|
+
})
|
4531
|
+
);
|
4532
|
+
return { ...result2, objectText: text2, responseData };
|
4519
4533
|
}
|
4520
|
-
|
4521
|
-
|
4522
|
-
|
4523
|
-
|
4524
|
-
|
4525
|
-
|
4526
|
-
|
4527
|
-
|
4528
|
-
|
4529
|
-
|
4530
|
-
|
4531
|
-
|
4532
|
-
|
4533
|
-
|
4534
|
-
|
4535
|
-
|
4536
|
-
|
4537
|
-
|
4538
|
-
|
4539
|
-
|
4540
|
-
);
|
4541
|
-
return { ...result2, objectText: text2, responseData };
|
4534
|
+
})
|
4535
|
+
);
|
4536
|
+
result = generateResult.objectText;
|
4537
|
+
finishReason = generateResult.finishReason;
|
4538
|
+
usage = generateResult.usage;
|
4539
|
+
warnings = generateResult.warnings;
|
4540
|
+
resultProviderMetadata = generateResult.providerMetadata;
|
4541
|
+
request = (_a17 = generateResult.request) != null ? _a17 : {};
|
4542
|
+
response = generateResult.responseData;
|
4543
|
+
async function processResult(result2) {
|
4544
|
+
const parseResult = await (0, import_provider_utils14.safeParseJSON)({ text: result2 });
|
4545
|
+
if (!parseResult.success) {
|
4546
|
+
throw new NoObjectGeneratedError({
|
4547
|
+
message: "No object generated: could not parse the response.",
|
4548
|
+
cause: parseResult.error,
|
4549
|
+
text: result2,
|
4550
|
+
response,
|
4551
|
+
usage,
|
4552
|
+
finishReason
|
4553
|
+
});
|
4542
4554
|
}
|
4543
|
-
|
4544
|
-
|
4545
|
-
|
4546
|
-
|
4547
|
-
|
4548
|
-
|
4549
|
-
|
4550
|
-
|
4551
|
-
|
4552
|
-
|
4553
|
-
|
4554
|
-
|
4555
|
-
|
4556
|
-
|
4557
|
-
|
4558
|
-
|
4559
|
-
|
4560
|
-
usage,
|
4561
|
-
finishReason
|
4562
|
-
});
|
4563
|
-
}
|
4564
|
-
const validationResult = await outputStrategy.validateFinalResult(
|
4565
|
-
parseResult.value,
|
4566
|
-
{
|
4567
|
-
text: result2,
|
4568
|
-
response,
|
4569
|
-
usage
|
4555
|
+
const validationResult = await outputStrategy.validateFinalResult(
|
4556
|
+
parseResult.value,
|
4557
|
+
{
|
4558
|
+
text: result2,
|
4559
|
+
response,
|
4560
|
+
usage
|
4561
|
+
}
|
4562
|
+
);
|
4563
|
+
if (!validationResult.success) {
|
4564
|
+
throw new NoObjectGeneratedError({
|
4565
|
+
message: "No object generated: response did not match schema.",
|
4566
|
+
cause: validationResult.error,
|
4567
|
+
text: result2,
|
4568
|
+
response,
|
4569
|
+
usage,
|
4570
|
+
finishReason
|
4571
|
+
});
|
4570
4572
|
}
|
4571
|
-
|
4572
|
-
if (!validationResult.success) {
|
4573
|
-
throw new NoObjectGeneratedError({
|
4574
|
-
message: "No object generated: response did not match schema.",
|
4575
|
-
cause: validationResult.error,
|
4576
|
-
text: result2,
|
4577
|
-
response,
|
4578
|
-
usage,
|
4579
|
-
finishReason
|
4580
|
-
});
|
4573
|
+
return validationResult.value;
|
4581
4574
|
}
|
4582
|
-
|
4583
|
-
|
4584
|
-
|
4585
|
-
|
4586
|
-
|
4587
|
-
|
4588
|
-
|
4589
|
-
|
4590
|
-
|
4591
|
-
|
4592
|
-
|
4593
|
-
|
4575
|
+
let object2;
|
4576
|
+
try {
|
4577
|
+
object2 = await processResult(result);
|
4578
|
+
} catch (error) {
|
4579
|
+
if (repairText != null && NoObjectGeneratedError.isInstance(error) && (import_provider22.JSONParseError.isInstance(error.cause) || import_provider22.TypeValidationError.isInstance(error.cause))) {
|
4580
|
+
const repairedText = await repairText({
|
4581
|
+
text: result,
|
4582
|
+
error: error.cause
|
4583
|
+
});
|
4584
|
+
if (repairedText === null) {
|
4585
|
+
throw error;
|
4586
|
+
}
|
4587
|
+
object2 = await processResult(repairedText);
|
4588
|
+
} else {
|
4594
4589
|
throw error;
|
4595
4590
|
}
|
4596
|
-
object2 = await processResult(repairedText);
|
4597
|
-
} else {
|
4598
|
-
throw error;
|
4599
4591
|
}
|
4592
|
+
span.setAttributes(
|
4593
|
+
selectTelemetryAttributes({
|
4594
|
+
telemetry,
|
4595
|
+
attributes: {
|
4596
|
+
"ai.response.finishReason": finishReason,
|
4597
|
+
"ai.response.object": {
|
4598
|
+
output: () => JSON.stringify(object2)
|
4599
|
+
},
|
4600
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4601
|
+
"ai.usage.promptTokens": usage.inputTokens,
|
4602
|
+
"ai.usage.completionTokens": usage.outputTokens
|
4603
|
+
}
|
4604
|
+
})
|
4605
|
+
);
|
4606
|
+
return new DefaultGenerateObjectResult({
|
4607
|
+
object: object2,
|
4608
|
+
finishReason,
|
4609
|
+
usage,
|
4610
|
+
warnings,
|
4611
|
+
request,
|
4612
|
+
response,
|
4613
|
+
providerMetadata: resultProviderMetadata
|
4614
|
+
});
|
4600
4615
|
}
|
4601
|
-
|
4602
|
-
|
4603
|
-
|
4604
|
-
|
4605
|
-
"ai.response.finishReason": finishReason,
|
4606
|
-
"ai.response.object": {
|
4607
|
-
output: () => JSON.stringify(object2)
|
4608
|
-
},
|
4609
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4610
|
-
"ai.usage.promptTokens": usage.inputTokens,
|
4611
|
-
"ai.usage.completionTokens": usage.outputTokens
|
4612
|
-
}
|
4613
|
-
})
|
4614
|
-
);
|
4615
|
-
return new DefaultGenerateObjectResult({
|
4616
|
-
object: object2,
|
4617
|
-
finishReason,
|
4618
|
-
usage,
|
4619
|
-
warnings,
|
4620
|
-
request,
|
4621
|
-
response,
|
4622
|
-
providerMetadata: resultProviderMetadata
|
4623
|
-
});
|
4624
|
-
}
|
4625
|
-
});
|
4616
|
+
});
|
4617
|
+
} catch (error) {
|
4618
|
+
throw wrapGatewayError(error);
|
4619
|
+
}
|
4626
4620
|
}
|
4627
4621
|
var DefaultGenerateObjectResult = class {
|
4628
4622
|
constructor(options) {
|
@@ -4646,7 +4640,7 @@ var DefaultGenerateObjectResult = class {
|
|
4646
4640
|
};
|
4647
4641
|
|
4648
4642
|
// core/generate-object/stream-object.ts
|
4649
|
-
var
|
4643
|
+
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
4650
4644
|
|
4651
4645
|
// src/util/create-resolvable-promise.ts
|
4652
4646
|
function createResolvablePromise() {
|
@@ -4752,11 +4746,11 @@ var DelayedPromise = class {
|
|
4752
4746
|
this._resolve = void 0;
|
4753
4747
|
this._reject = void 0;
|
4754
4748
|
}
|
4755
|
-
get
|
4756
|
-
if (this.
|
4757
|
-
return this.
|
4749
|
+
get promise() {
|
4750
|
+
if (this._promise) {
|
4751
|
+
return this._promise;
|
4758
4752
|
}
|
4759
|
-
this.
|
4753
|
+
this._promise = new Promise((resolve, reject) => {
|
4760
4754
|
if (this.status.type === "resolved") {
|
4761
4755
|
resolve(this.status.value);
|
4762
4756
|
} else if (this.status.type === "rejected") {
|
@@ -4765,19 +4759,19 @@ var DelayedPromise = class {
|
|
4765
4759
|
this._resolve = resolve;
|
4766
4760
|
this._reject = reject;
|
4767
4761
|
});
|
4768
|
-
return this.
|
4762
|
+
return this._promise;
|
4769
4763
|
}
|
4770
4764
|
resolve(value) {
|
4771
4765
|
var _a17;
|
4772
4766
|
this.status = { type: "resolved", value };
|
4773
|
-
if (this.
|
4767
|
+
if (this._promise) {
|
4774
4768
|
(_a17 = this._resolve) == null ? void 0 : _a17.call(this, value);
|
4775
4769
|
}
|
4776
4770
|
}
|
4777
4771
|
reject(error) {
|
4778
4772
|
var _a17;
|
4779
4773
|
this.status = { type: "rejected", error };
|
4780
|
-
if (this.
|
4774
|
+
if (this._promise) {
|
4781
4775
|
(_a17 = this._reject) == null ? void 0 : _a17.call(this, error);
|
4782
4776
|
}
|
4783
4777
|
}
|
@@ -4790,7 +4784,7 @@ function now() {
|
|
4790
4784
|
}
|
4791
4785
|
|
4792
4786
|
// core/generate-object/stream-object.ts
|
4793
|
-
var originalGenerateId2 = (0,
|
4787
|
+
var originalGenerateId2 = (0, import_provider_utils15.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
4794
4788
|
function streamObject(options) {
|
4795
4789
|
const {
|
4796
4790
|
model,
|
@@ -4803,7 +4797,9 @@ function streamObject(options) {
|
|
4803
4797
|
headers,
|
4804
4798
|
experimental_telemetry: telemetry,
|
4805
4799
|
providerOptions,
|
4806
|
-
onError
|
4800
|
+
onError = ({ error }) => {
|
4801
|
+
console.error(error);
|
4802
|
+
},
|
4807
4803
|
onFinish,
|
4808
4804
|
_internal: {
|
4809
4805
|
generateId: generateId3 = originalGenerateId2,
|
@@ -4853,7 +4849,7 @@ function streamObject(options) {
|
|
4853
4849
|
}
|
4854
4850
|
var DefaultStreamObjectResult = class {
|
4855
4851
|
constructor({
|
4856
|
-
model,
|
4852
|
+
model: modelArg,
|
4857
4853
|
headers,
|
4858
4854
|
telemetry,
|
4859
4855
|
settings,
|
@@ -4872,12 +4868,13 @@ var DefaultStreamObjectResult = class {
|
|
4872
4868
|
currentDate,
|
4873
4869
|
now: now2
|
4874
4870
|
}) {
|
4875
|
-
this.
|
4876
|
-
this.
|
4877
|
-
this.
|
4878
|
-
this.
|
4879
|
-
this.
|
4880
|
-
this.
|
4871
|
+
this._object = new DelayedPromise();
|
4872
|
+
this._usage = new DelayedPromise();
|
4873
|
+
this._providerMetadata = new DelayedPromise();
|
4874
|
+
this._warnings = new DelayedPromise();
|
4875
|
+
this._request = new DelayedPromise();
|
4876
|
+
this._response = new DelayedPromise();
|
4877
|
+
const model = resolveLanguageModel(modelArg);
|
4881
4878
|
const { maxRetries, retry } = prepareRetries({
|
4882
4879
|
maxRetries: maxRetriesArg
|
4883
4880
|
});
|
@@ -4895,7 +4892,7 @@ var DefaultStreamObjectResult = class {
|
|
4895
4892
|
transform(chunk, controller) {
|
4896
4893
|
controller.enqueue(chunk);
|
4897
4894
|
if (chunk.type === "error") {
|
4898
|
-
onError
|
4895
|
+
onError({ error: wrapGatewayError(chunk.error) });
|
4899
4896
|
}
|
4900
4897
|
}
|
4901
4898
|
});
|
@@ -4974,7 +4971,7 @@ var DefaultStreamObjectResult = class {
|
|
4974
4971
|
}),
|
4975
4972
|
...baseTelemetryAttributes,
|
4976
4973
|
"ai.prompt.messages": {
|
4977
|
-
input: () =>
|
4974
|
+
input: () => stringifyForTelemetry(callOptions.prompt)
|
4978
4975
|
},
|
4979
4976
|
// standardized gen-ai llm span attributes:
|
4980
4977
|
"gen_ai.system": model.provider,
|
@@ -4996,7 +4993,7 @@ var DefaultStreamObjectResult = class {
|
|
4996
4993
|
})
|
4997
4994
|
})
|
4998
4995
|
);
|
4999
|
-
self.
|
4996
|
+
self._request.resolve(request != null ? request : {});
|
5000
4997
|
let warnings;
|
5001
4998
|
let usage = {
|
5002
4999
|
inputTokens: void 0,
|
@@ -5089,9 +5086,9 @@ var DefaultStreamObjectResult = class {
|
|
5089
5086
|
usage,
|
5090
5087
|
response: fullResponse
|
5091
5088
|
});
|
5092
|
-
self.
|
5093
|
-
self.
|
5094
|
-
self.
|
5089
|
+
self._usage.resolve(usage);
|
5090
|
+
self._providerMetadata.resolve(providerMetadata);
|
5091
|
+
self._response.resolve({
|
5095
5092
|
...fullResponse,
|
5096
5093
|
headers: response == null ? void 0 : response.headers
|
5097
5094
|
});
|
@@ -5105,7 +5102,7 @@ var DefaultStreamObjectResult = class {
|
|
5105
5102
|
);
|
5106
5103
|
if (validationResult.success) {
|
5107
5104
|
object2 = validationResult.value;
|
5108
|
-
self.
|
5105
|
+
self._object.resolve(object2);
|
5109
5106
|
} else {
|
5110
5107
|
error = new NoObjectGeneratedError({
|
5111
5108
|
message: "No object generated: response did not match schema.",
|
@@ -5115,7 +5112,7 @@ var DefaultStreamObjectResult = class {
|
|
5115
5112
|
usage,
|
5116
5113
|
finishReason
|
5117
5114
|
});
|
5118
|
-
self.
|
5115
|
+
self._object.reject(error);
|
5119
5116
|
}
|
5120
5117
|
break;
|
5121
5118
|
}
|
@@ -5210,22 +5207,22 @@ var DefaultStreamObjectResult = class {
|
|
5210
5207
|
this.outputStrategy = outputStrategy;
|
5211
5208
|
}
|
5212
5209
|
get object() {
|
5213
|
-
return this.
|
5210
|
+
return this._object.promise;
|
5214
5211
|
}
|
5215
5212
|
get usage() {
|
5216
|
-
return this.
|
5213
|
+
return this._usage.promise;
|
5217
5214
|
}
|
5218
5215
|
get providerMetadata() {
|
5219
|
-
return this.
|
5216
|
+
return this._providerMetadata.promise;
|
5220
5217
|
}
|
5221
5218
|
get warnings() {
|
5222
|
-
return this.
|
5219
|
+
return this._warnings.promise;
|
5223
5220
|
}
|
5224
5221
|
get request() {
|
5225
|
-
return this.
|
5222
|
+
return this._request.promise;
|
5226
5223
|
}
|
5227
5224
|
get response() {
|
5228
|
-
return this.
|
5225
|
+
return this._response.promise;
|
5229
5226
|
}
|
5230
5227
|
get partialObjectStream() {
|
5231
5228
|
return createAsyncIterableStream(
|
@@ -5295,8 +5292,8 @@ var DefaultStreamObjectResult = class {
|
|
5295
5292
|
};
|
5296
5293
|
|
5297
5294
|
// src/error/no-speech-generated-error.ts
|
5298
|
-
var
|
5299
|
-
var NoSpeechGeneratedError = class extends
|
5295
|
+
var import_provider23 = require("@ai-sdk/provider");
|
5296
|
+
var NoSpeechGeneratedError = class extends import_provider23.AISDKError {
|
5300
5297
|
constructor(options) {
|
5301
5298
|
super({
|
5302
5299
|
name: "AI_NoSpeechGeneratedError",
|
@@ -5385,10 +5382,15 @@ var DefaultSpeechResult = class {
|
|
5385
5382
|
};
|
5386
5383
|
|
5387
5384
|
// core/generate-text/generate-text.ts
|
5388
|
-
var
|
5385
|
+
var import_provider_utils18 = require("@ai-sdk/provider-utils");
|
5386
|
+
|
5387
|
+
// src/util/as-array.ts
|
5388
|
+
function asArray(value) {
|
5389
|
+
return value === void 0 ? [] : Array.isArray(value) ? value : [value];
|
5390
|
+
}
|
5389
5391
|
|
5390
5392
|
// core/prompt/prepare-tools-and-tool-choice.ts
|
5391
|
-
var
|
5393
|
+
var import_provider_utils16 = require("@ai-sdk/provider-utils");
|
5392
5394
|
|
5393
5395
|
// src/util/is-non-empty-object.ts
|
5394
5396
|
function isNonEmptyObject(object2) {
|
@@ -5420,7 +5422,7 @@ function prepareToolsAndToolChoice({
|
|
5420
5422
|
type: "function",
|
5421
5423
|
name: name17,
|
5422
5424
|
description: tool2.description,
|
5423
|
-
parameters: (0,
|
5425
|
+
parameters: (0, import_provider_utils16.asSchema)(tool2.parameters).jsonSchema
|
5424
5426
|
};
|
5425
5427
|
case "provider-defined":
|
5426
5428
|
return {
|
@@ -5490,7 +5492,7 @@ function asContent({
|
|
5490
5492
|
}
|
5491
5493
|
|
5492
5494
|
// core/generate-text/parse-tool-call.ts
|
5493
|
-
var
|
5495
|
+
var import_provider_utils17 = require("@ai-sdk/provider-utils");
|
5494
5496
|
async function parseToolCall({
|
5495
5497
|
toolCall,
|
5496
5498
|
tools,
|
@@ -5514,7 +5516,7 @@ async function parseToolCall({
|
|
5514
5516
|
tools,
|
5515
5517
|
parameterSchema: ({ toolName }) => {
|
5516
5518
|
const { parameters } = tools[toolName];
|
5517
|
-
return (0,
|
5519
|
+
return (0, import_provider_utils17.asSchema)(parameters).jsonSchema;
|
5518
5520
|
},
|
5519
5521
|
system,
|
5520
5522
|
messages,
|
@@ -5544,8 +5546,8 @@ async function doParseToolCall({
|
|
5544
5546
|
availableTools: Object.keys(tools)
|
5545
5547
|
});
|
5546
5548
|
}
|
5547
|
-
const schema = (0,
|
5548
|
-
const parseResult = toolCall.args.trim() === "" ? await (0,
|
5549
|
+
const schema = (0, import_provider_utils17.asSchema)(tool2.parameters);
|
5550
|
+
const parseResult = toolCall.args.trim() === "" ? await (0, import_provider_utils17.safeValidateTypes)({ value: {}, schema }) : await (0, import_provider_utils17.safeParseJSON)({ text: toolCall.args, schema });
|
5549
5551
|
if (parseResult.success === false) {
|
5550
5552
|
throw new InvalidToolArgumentsError({
|
5551
5553
|
toolName,
|
@@ -5604,8 +5606,8 @@ var DefaultStepResult = class {
|
|
5604
5606
|
};
|
5605
5607
|
|
5606
5608
|
// core/generate-text/stop-condition.ts
|
5607
|
-
function
|
5608
|
-
return ({ steps }) => steps.length
|
5609
|
+
function stepCountIs(stepCount) {
|
5610
|
+
return ({ steps }) => steps.length === stepCount;
|
5609
5611
|
}
|
5610
5612
|
function hasToolCall(toolName) {
|
5611
5613
|
return ({ steps }) => {
|
@@ -5615,6 +5617,12 @@ function hasToolCall(toolName) {
|
|
5615
5617
|
)) != null ? _c : false;
|
5616
5618
|
};
|
5617
5619
|
}
|
5620
|
+
async function isStopConditionMet({
|
5621
|
+
stopConditions,
|
5622
|
+
steps
|
5623
|
+
}) {
|
5624
|
+
return (await Promise.all(stopConditions.map((condition) => condition({ steps })))).some((result) => result);
|
5625
|
+
}
|
5618
5626
|
|
5619
5627
|
// core/generate-text/to-response-messages.ts
|
5620
5628
|
function toResponseMessages({
|
@@ -5654,300 +5662,310 @@ function toResponseMessages({
|
|
5654
5662
|
type: "tool-result",
|
5655
5663
|
toolCallId: toolResult.toolCallId,
|
5656
5664
|
toolName: toolResult.toolName,
|
5657
|
-
result: tool2.experimental_toToolResultContent(toolResult.result),
|
5658
|
-
experimental_content: tool2.experimental_toToolResultContent(
|
5659
|
-
toolResult.result
|
5660
|
-
)
|
5661
|
-
} : {
|
5662
|
-
type: "tool-result",
|
5663
|
-
toolCallId: toolResult.toolCallId,
|
5664
|
-
toolName: toolResult.toolName,
|
5665
|
-
result: toolResult.result
|
5666
|
-
};
|
5667
|
-
});
|
5668
|
-
if (toolResultContent.length > 0) {
|
5669
|
-
responseMessages.push({
|
5670
|
-
role: "tool",
|
5671
|
-
content: toolResultContent
|
5672
|
-
});
|
5673
|
-
}
|
5674
|
-
return responseMessages;
|
5675
|
-
}
|
5676
|
-
|
5677
|
-
// core/generate-text/generate-text.ts
|
5678
|
-
var originalGenerateId3 = (0,
|
5679
|
-
prefix: "aitxt",
|
5680
|
-
size: 24
|
5681
|
-
});
|
5682
|
-
async function generateText({
|
5683
|
-
model,
|
5684
|
-
tools,
|
5685
|
-
toolChoice,
|
5686
|
-
system,
|
5687
|
-
prompt,
|
5688
|
-
messages,
|
5689
|
-
maxRetries: maxRetriesArg,
|
5690
|
-
abortSignal,
|
5691
|
-
headers,
|
5692
|
-
|
5693
|
-
experimental_output: output,
|
5694
|
-
experimental_telemetry: telemetry,
|
5695
|
-
providerOptions,
|
5696
|
-
experimental_activeTools
|
5697
|
-
|
5698
|
-
|
5699
|
-
|
5700
|
-
|
5701
|
-
|
5702
|
-
|
5703
|
-
|
5704
|
-
|
5705
|
-
|
5706
|
-
|
5707
|
-
|
5708
|
-
const
|
5709
|
-
|
5710
|
-
|
5711
|
-
|
5712
|
-
|
5713
|
-
|
5714
|
-
|
5715
|
-
|
5716
|
-
|
5717
|
-
|
5718
|
-
|
5719
|
-
|
5720
|
-
|
5721
|
-
|
5722
|
-
|
5723
|
-
|
5724
|
-
|
5725
|
-
|
5726
|
-
|
5727
|
-
|
5728
|
-
|
5729
|
-
|
5730
|
-
|
5731
|
-
|
5732
|
-
|
5733
|
-
|
5734
|
-
|
5735
|
-
|
5736
|
-
|
5737
|
-
|
5738
|
-
|
5739
|
-
|
5740
|
-
|
5741
|
-
var _a17, _b, _c, _d;
|
5742
|
-
const callSettings2 = prepareCallSettings(settings);
|
5743
|
-
let currentModelResponse;
|
5744
|
-
let currentToolCalls = [];
|
5745
|
-
let currentToolResults = [];
|
5746
|
-
const responseMessages = [];
|
5747
|
-
const steps = [];
|
5748
|
-
do {
|
5749
|
-
const stepInputMessages = [
|
5750
|
-
...initialPrompt.messages,
|
5751
|
-
...responseMessages
|
5752
|
-
];
|
5753
|
-
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5754
|
-
model,
|
5755
|
-
steps,
|
5756
|
-
stepNumber: steps.length
|
5757
|
-
}));
|
5758
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
5759
|
-
prompt: {
|
5760
|
-
system: initialPrompt.system,
|
5761
|
-
messages: stepInputMessages
|
5762
|
-
},
|
5763
|
-
supportedUrls: await model.supportedUrls
|
5764
|
-
});
|
5765
|
-
const stepModel = (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model;
|
5766
|
-
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5767
|
-
tools,
|
5768
|
-
toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
|
5769
|
-
activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.experimental_activeTools) != null ? _c : activeTools
|
5770
|
-
});
|
5771
|
-
currentModelResponse = await retry(
|
5772
|
-
() => {
|
5773
|
-
var _a18;
|
5774
|
-
return recordSpan({
|
5775
|
-
name: "ai.generateText.doGenerate",
|
5776
|
-
attributes: selectTelemetryAttributes({
|
5777
|
-
telemetry,
|
5778
|
-
attributes: {
|
5779
|
-
...assembleOperationName({
|
5780
|
-
operationId: "ai.generateText.doGenerate",
|
5781
|
-
telemetry
|
5782
|
-
}),
|
5783
|
-
...baseTelemetryAttributes,
|
5784
|
-
// model:
|
5785
|
-
"ai.model.provider": stepModel.provider,
|
5786
|
-
"ai.model.id": stepModel.modelId,
|
5787
|
-
// prompt:
|
5788
|
-
"ai.prompt.messages": {
|
5789
|
-
input: () => JSON.stringify(promptMessages)
|
5790
|
-
},
|
5791
|
-
"ai.prompt.tools": {
|
5792
|
-
// convert the language model level tools:
|
5793
|
-
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5794
|
-
},
|
5795
|
-
"ai.prompt.toolChoice": {
|
5796
|
-
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5797
|
-
},
|
5798
|
-
// standardized gen-ai llm span attributes:
|
5799
|
-
"gen_ai.system": stepModel.provider,
|
5800
|
-
"gen_ai.request.model": stepModel.modelId,
|
5801
|
-
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5802
|
-
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5803
|
-
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5804
|
-
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5805
|
-
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5806
|
-
"gen_ai.request.top_k": settings.topK,
|
5807
|
-
"gen_ai.request.top_p": settings.topP
|
5808
|
-
}
|
5809
|
-
}),
|
5810
|
-
tracer,
|
5811
|
-
fn: async (span2) => {
|
5812
|
-
var _a19, _b2, _c2, _d2, _e, _f, _g, _h;
|
5813
|
-
const result = await stepModel.doGenerate({
|
5814
|
-
...callSettings2,
|
5815
|
-
tools: stepTools,
|
5816
|
-
toolChoice: stepToolChoice,
|
5817
|
-
responseFormat: output == null ? void 0 : output.responseFormat,
|
5818
|
-
prompt: promptMessages,
|
5819
|
-
providerOptions,
|
5820
|
-
abortSignal,
|
5821
|
-
headers
|
5822
|
-
});
|
5823
|
-
const responseData = {
|
5824
|
-
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5825
|
-
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5826
|
-
modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : stepModel.modelId,
|
5827
|
-
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5828
|
-
body: (_h = result.response) == null ? void 0 : _h.body
|
5829
|
-
};
|
5830
|
-
span2.setAttributes(
|
5831
|
-
selectTelemetryAttributes({
|
5832
|
-
telemetry,
|
5833
|
-
attributes: {
|
5834
|
-
"ai.response.finishReason": result.finishReason,
|
5835
|
-
"ai.response.text": {
|
5836
|
-
output: () => extractContentText(result.content)
|
5837
|
-
},
|
5838
|
-
"ai.response.toolCalls": {
|
5839
|
-
output: () => {
|
5840
|
-
const toolCalls = asToolCalls(result.content);
|
5841
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5842
|
-
}
|
5843
|
-
},
|
5844
|
-
"ai.response.id": responseData.id,
|
5845
|
-
"ai.response.model": responseData.modelId,
|
5846
|
-
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5847
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5848
|
-
"ai.usage.promptTokens": result.usage.inputTokens,
|
5849
|
-
"ai.usage.completionTokens": result.usage.outputTokens,
|
5850
|
-
// standardized gen-ai llm span attributes:
|
5851
|
-
"gen_ai.response.finish_reasons": [result.finishReason],
|
5852
|
-
"gen_ai.response.id": responseData.id,
|
5853
|
-
"gen_ai.response.model": responseData.modelId,
|
5854
|
-
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5855
|
-
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5856
|
-
}
|
5857
|
-
})
|
5858
|
-
);
|
5859
|
-
return { ...result, response: responseData };
|
5860
|
-
}
|
5861
|
-
});
|
5665
|
+
result: tool2.experimental_toToolResultContent(toolResult.result),
|
5666
|
+
experimental_content: tool2.experimental_toToolResultContent(
|
5667
|
+
toolResult.result
|
5668
|
+
)
|
5669
|
+
} : {
|
5670
|
+
type: "tool-result",
|
5671
|
+
toolCallId: toolResult.toolCallId,
|
5672
|
+
toolName: toolResult.toolName,
|
5673
|
+
result: toolResult.result
|
5674
|
+
};
|
5675
|
+
});
|
5676
|
+
if (toolResultContent.length > 0) {
|
5677
|
+
responseMessages.push({
|
5678
|
+
role: "tool",
|
5679
|
+
content: toolResultContent
|
5680
|
+
});
|
5681
|
+
}
|
5682
|
+
return responseMessages;
|
5683
|
+
}
|
5684
|
+
|
5685
|
+
// core/generate-text/generate-text.ts
|
5686
|
+
var originalGenerateId3 = (0, import_provider_utils18.createIdGenerator)({
|
5687
|
+
prefix: "aitxt",
|
5688
|
+
size: 24
|
5689
|
+
});
|
5690
|
+
async function generateText({
|
5691
|
+
model: modelArg,
|
5692
|
+
tools,
|
5693
|
+
toolChoice,
|
5694
|
+
system,
|
5695
|
+
prompt,
|
5696
|
+
messages,
|
5697
|
+
maxRetries: maxRetriesArg,
|
5698
|
+
abortSignal,
|
5699
|
+
headers,
|
5700
|
+
stopWhen = stepCountIs(1),
|
5701
|
+
experimental_output: output,
|
5702
|
+
experimental_telemetry: telemetry,
|
5703
|
+
providerOptions,
|
5704
|
+
experimental_activeTools,
|
5705
|
+
activeTools = experimental_activeTools,
|
5706
|
+
experimental_prepareStep,
|
5707
|
+
prepareStep = experimental_prepareStep,
|
5708
|
+
experimental_repairToolCall: repairToolCall,
|
5709
|
+
_internal: {
|
5710
|
+
generateId: generateId3 = originalGenerateId3,
|
5711
|
+
currentDate = () => /* @__PURE__ */ new Date()
|
5712
|
+
} = {},
|
5713
|
+
onStepFinish,
|
5714
|
+
...settings
|
5715
|
+
}) {
|
5716
|
+
const model = resolveLanguageModel(modelArg);
|
5717
|
+
const stopConditions = asArray(stopWhen);
|
5718
|
+
const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
|
5719
|
+
const callSettings = prepareCallSettings(settings);
|
5720
|
+
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
5721
|
+
model,
|
5722
|
+
telemetry,
|
5723
|
+
headers,
|
5724
|
+
settings: { ...callSettings, maxRetries }
|
5725
|
+
});
|
5726
|
+
const initialPrompt = await standardizePrompt({
|
5727
|
+
system,
|
5728
|
+
prompt,
|
5729
|
+
messages
|
5730
|
+
});
|
5731
|
+
const tracer = getTracer(telemetry);
|
5732
|
+
try {
|
5733
|
+
return await recordSpan({
|
5734
|
+
name: "ai.generateText",
|
5735
|
+
attributes: selectTelemetryAttributes({
|
5736
|
+
telemetry,
|
5737
|
+
attributes: {
|
5738
|
+
...assembleOperationName({
|
5739
|
+
operationId: "ai.generateText",
|
5740
|
+
telemetry
|
5741
|
+
}),
|
5742
|
+
...baseTelemetryAttributes,
|
5743
|
+
// model:
|
5744
|
+
"ai.model.provider": model.provider,
|
5745
|
+
"ai.model.id": model.modelId,
|
5746
|
+
// specific settings that only make sense on the outer level:
|
5747
|
+
"ai.prompt": {
|
5748
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
5862
5749
|
}
|
5863
|
-
|
5864
|
-
|
5865
|
-
|
5866
|
-
|
5867
|
-
|
5868
|
-
|
5869
|
-
|
5870
|
-
|
5871
|
-
|
5872
|
-
|
5750
|
+
}
|
5751
|
+
}),
|
5752
|
+
tracer,
|
5753
|
+
fn: async (span) => {
|
5754
|
+
var _a17, _b, _c, _d, _e;
|
5755
|
+
const callSettings2 = prepareCallSettings(settings);
|
5756
|
+
let currentModelResponse;
|
5757
|
+
let currentToolCalls = [];
|
5758
|
+
let currentToolResults = [];
|
5759
|
+
const responseMessages = [];
|
5760
|
+
const steps = [];
|
5761
|
+
do {
|
5762
|
+
const stepInputMessages = [
|
5763
|
+
...initialPrompt.messages,
|
5764
|
+
...responseMessages
|
5765
|
+
];
|
5766
|
+
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5767
|
+
model,
|
5768
|
+
steps,
|
5769
|
+
stepNumber: steps.length
|
5770
|
+
}));
|
5771
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
5772
|
+
prompt: {
|
5773
|
+
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
5873
5774
|
messages: stepInputMessages
|
5775
|
+
},
|
5776
|
+
supportedUrls: await model.supportedUrls
|
5777
|
+
});
|
5778
|
+
const stepModel = resolveLanguageModel(
|
5779
|
+
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
5780
|
+
);
|
5781
|
+
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5782
|
+
tools,
|
5783
|
+
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
5784
|
+
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
5785
|
+
});
|
5786
|
+
currentModelResponse = await retry(
|
5787
|
+
() => {
|
5788
|
+
var _a18;
|
5789
|
+
return recordSpan({
|
5790
|
+
name: "ai.generateText.doGenerate",
|
5791
|
+
attributes: selectTelemetryAttributes({
|
5792
|
+
telemetry,
|
5793
|
+
attributes: {
|
5794
|
+
...assembleOperationName({
|
5795
|
+
operationId: "ai.generateText.doGenerate",
|
5796
|
+
telemetry
|
5797
|
+
}),
|
5798
|
+
...baseTelemetryAttributes,
|
5799
|
+
// model:
|
5800
|
+
"ai.model.provider": stepModel.provider,
|
5801
|
+
"ai.model.id": stepModel.modelId,
|
5802
|
+
// prompt:
|
5803
|
+
"ai.prompt.messages": {
|
5804
|
+
input: () => stringifyForTelemetry(promptMessages)
|
5805
|
+
},
|
5806
|
+
"ai.prompt.tools": {
|
5807
|
+
// convert the language model level tools:
|
5808
|
+
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5809
|
+
},
|
5810
|
+
"ai.prompt.toolChoice": {
|
5811
|
+
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5812
|
+
},
|
5813
|
+
// standardized gen-ai llm span attributes:
|
5814
|
+
"gen_ai.system": stepModel.provider,
|
5815
|
+
"gen_ai.request.model": stepModel.modelId,
|
5816
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5817
|
+
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5818
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5819
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5820
|
+
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5821
|
+
"gen_ai.request.top_k": settings.topK,
|
5822
|
+
"gen_ai.request.top_p": settings.topP
|
5823
|
+
}
|
5824
|
+
}),
|
5825
|
+
tracer,
|
5826
|
+
fn: async (span2) => {
|
5827
|
+
var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
|
5828
|
+
const result = await stepModel.doGenerate({
|
5829
|
+
...callSettings2,
|
5830
|
+
tools: stepTools,
|
5831
|
+
toolChoice: stepToolChoice,
|
5832
|
+
responseFormat: output == null ? void 0 : output.responseFormat,
|
5833
|
+
prompt: promptMessages,
|
5834
|
+
providerOptions,
|
5835
|
+
abortSignal,
|
5836
|
+
headers
|
5837
|
+
});
|
5838
|
+
const responseData = {
|
5839
|
+
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5840
|
+
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5841
|
+
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
|
5842
|
+
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5843
|
+
body: (_h = result.response) == null ? void 0 : _h.body
|
5844
|
+
};
|
5845
|
+
span2.setAttributes(
|
5846
|
+
selectTelemetryAttributes({
|
5847
|
+
telemetry,
|
5848
|
+
attributes: {
|
5849
|
+
"ai.response.finishReason": result.finishReason,
|
5850
|
+
"ai.response.text": {
|
5851
|
+
output: () => extractContentText(result.content)
|
5852
|
+
},
|
5853
|
+
"ai.response.toolCalls": {
|
5854
|
+
output: () => {
|
5855
|
+
const toolCalls = asToolCalls(result.content);
|
5856
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5857
|
+
}
|
5858
|
+
},
|
5859
|
+
"ai.response.id": responseData.id,
|
5860
|
+
"ai.response.model": responseData.modelId,
|
5861
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5862
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5863
|
+
"ai.usage.promptTokens": result.usage.inputTokens,
|
5864
|
+
"ai.usage.completionTokens": result.usage.outputTokens,
|
5865
|
+
// standardized gen-ai llm span attributes:
|
5866
|
+
"gen_ai.response.finish_reasons": [result.finishReason],
|
5867
|
+
"gen_ai.response.id": responseData.id,
|
5868
|
+
"gen_ai.response.model": responseData.modelId,
|
5869
|
+
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5870
|
+
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5871
|
+
}
|
5872
|
+
})
|
5873
|
+
);
|
5874
|
+
return { ...result, response: responseData };
|
5875
|
+
}
|
5876
|
+
});
|
5877
|
+
}
|
5878
|
+
);
|
5879
|
+
currentToolCalls = await Promise.all(
|
5880
|
+
currentModelResponse.content.filter(
|
5881
|
+
(part) => part.type === "tool-call"
|
5882
|
+
).map(
|
5883
|
+
(toolCall) => parseToolCall({
|
5884
|
+
toolCall,
|
5885
|
+
tools,
|
5886
|
+
repairToolCall,
|
5887
|
+
system,
|
5888
|
+
messages: stepInputMessages
|
5889
|
+
})
|
5890
|
+
)
|
5891
|
+
);
|
5892
|
+
currentToolResults = tools == null ? [] : await executeTools({
|
5893
|
+
toolCalls: currentToolCalls,
|
5894
|
+
tools,
|
5895
|
+
tracer,
|
5896
|
+
telemetry,
|
5897
|
+
messages: stepInputMessages,
|
5898
|
+
abortSignal
|
5899
|
+
});
|
5900
|
+
const stepContent = asContent({
|
5901
|
+
content: currentModelResponse.content,
|
5902
|
+
toolCalls: currentToolCalls,
|
5903
|
+
toolResults: currentToolResults
|
5904
|
+
});
|
5905
|
+
responseMessages.push(
|
5906
|
+
...toResponseMessages({
|
5907
|
+
content: stepContent,
|
5908
|
+
tools: tools != null ? tools : {}
|
5874
5909
|
})
|
5875
|
-
)
|
5876
|
-
|
5877
|
-
currentToolResults = tools == null ? [] : await executeTools({
|
5878
|
-
toolCalls: currentToolCalls,
|
5879
|
-
tools,
|
5880
|
-
tracer,
|
5881
|
-
telemetry,
|
5882
|
-
messages: stepInputMessages,
|
5883
|
-
abortSignal
|
5884
|
-
});
|
5885
|
-
const stepContent = asContent({
|
5886
|
-
content: currentModelResponse.content,
|
5887
|
-
toolCalls: currentToolCalls,
|
5888
|
-
toolResults: currentToolResults
|
5889
|
-
});
|
5890
|
-
responseMessages.push(
|
5891
|
-
...toResponseMessages({
|
5910
|
+
);
|
5911
|
+
const currentStepResult = new DefaultStepResult({
|
5892
5912
|
content: stepContent,
|
5893
|
-
|
5913
|
+
finishReason: currentModelResponse.finishReason,
|
5914
|
+
usage: currentModelResponse.usage,
|
5915
|
+
warnings: currentModelResponse.warnings,
|
5916
|
+
providerMetadata: currentModelResponse.providerMetadata,
|
5917
|
+
request: (_e = currentModelResponse.request) != null ? _e : {},
|
5918
|
+
response: {
|
5919
|
+
...currentModelResponse.response,
|
5920
|
+
// deep clone msgs to avoid mutating past messages in multi-step:
|
5921
|
+
messages: structuredClone(responseMessages)
|
5922
|
+
}
|
5923
|
+
});
|
5924
|
+
steps.push(currentStepResult);
|
5925
|
+
await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
|
5926
|
+
} while (
|
5927
|
+
// there are tool calls:
|
5928
|
+
currentToolCalls.length > 0 && // all current tool calls have results:
|
5929
|
+
currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
|
5930
|
+
!await isStopConditionMet({ stopConditions, steps })
|
5931
|
+
);
|
5932
|
+
span.setAttributes(
|
5933
|
+
selectTelemetryAttributes({
|
5934
|
+
telemetry,
|
5935
|
+
attributes: {
|
5936
|
+
"ai.response.finishReason": currentModelResponse.finishReason,
|
5937
|
+
"ai.response.text": {
|
5938
|
+
output: () => extractContentText(currentModelResponse.content)
|
5939
|
+
},
|
5940
|
+
"ai.response.toolCalls": {
|
5941
|
+
output: () => {
|
5942
|
+
const toolCalls = asToolCalls(currentModelResponse.content);
|
5943
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5944
|
+
}
|
5945
|
+
},
|
5946
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5947
|
+
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
5948
|
+
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
5949
|
+
}
|
5894
5950
|
})
|
5895
5951
|
);
|
5896
|
-
const
|
5897
|
-
|
5898
|
-
|
5899
|
-
|
5900
|
-
|
5901
|
-
|
5902
|
-
|
5903
|
-
|
5904
|
-
|
5905
|
-
|
5906
|
-
|
5907
|
-
}
|
5952
|
+
const lastStep = steps[steps.length - 1];
|
5953
|
+
return new DefaultGenerateTextResult({
|
5954
|
+
steps,
|
5955
|
+
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
5956
|
+
{ text: lastStep.text },
|
5957
|
+
{
|
5958
|
+
response: lastStep.response,
|
5959
|
+
usage: lastStep.usage,
|
5960
|
+
finishReason: lastStep.finishReason
|
5961
|
+
}
|
5962
|
+
))
|
5908
5963
|
});
|
5909
|
-
|
5910
|
-
|
5911
|
-
|
5912
|
-
|
5913
|
-
|
5914
|
-
currentToolResults.length === currentToolCalls.length && // continue until the stop condition is met:
|
5915
|
-
!await continueUntil({ steps })
|
5916
|
-
);
|
5917
|
-
span.setAttributes(
|
5918
|
-
selectTelemetryAttributes({
|
5919
|
-
telemetry,
|
5920
|
-
attributes: {
|
5921
|
-
"ai.response.finishReason": currentModelResponse.finishReason,
|
5922
|
-
"ai.response.text": {
|
5923
|
-
output: () => extractContentText(currentModelResponse.content)
|
5924
|
-
},
|
5925
|
-
"ai.response.toolCalls": {
|
5926
|
-
output: () => {
|
5927
|
-
const toolCalls = asToolCalls(currentModelResponse.content);
|
5928
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5929
|
-
}
|
5930
|
-
},
|
5931
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5932
|
-
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
5933
|
-
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
5934
|
-
}
|
5935
|
-
})
|
5936
|
-
);
|
5937
|
-
const lastStep = steps[steps.length - 1];
|
5938
|
-
return new DefaultGenerateTextResult({
|
5939
|
-
steps,
|
5940
|
-
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
5941
|
-
{ text: lastStep.text },
|
5942
|
-
{
|
5943
|
-
response: lastStep.response,
|
5944
|
-
usage: lastStep.usage,
|
5945
|
-
finishReason: lastStep.finishReason
|
5946
|
-
}
|
5947
|
-
))
|
5948
|
-
});
|
5949
|
-
}
|
5950
|
-
});
|
5964
|
+
}
|
5965
|
+
});
|
5966
|
+
} catch (error) {
|
5967
|
+
throw wrapGatewayError(error);
|
5968
|
+
}
|
5951
5969
|
}
|
5952
5970
|
async function executeTools({
|
5953
5971
|
toolCalls,
|
@@ -5960,6 +5978,14 @@ async function executeTools({
|
|
5960
5978
|
const toolResults = await Promise.all(
|
5961
5979
|
toolCalls.map(async ({ toolCallId, toolName, args }) => {
|
5962
5980
|
const tool2 = tools[toolName];
|
5981
|
+
if ((tool2 == null ? void 0 : tool2.onArgsAvailable) != null) {
|
5982
|
+
await tool2.onArgsAvailable({
|
5983
|
+
args,
|
5984
|
+
toolCallId,
|
5985
|
+
messages,
|
5986
|
+
abortSignal
|
5987
|
+
});
|
5988
|
+
}
|
5963
5989
|
if ((tool2 == null ? void 0 : tool2.execute) == null) {
|
5964
5990
|
return void 0;
|
5965
5991
|
}
|
@@ -6116,7 +6142,7 @@ __export(output_exports, {
|
|
6116
6142
|
object: () => object,
|
6117
6143
|
text: () => text
|
6118
6144
|
});
|
6119
|
-
var
|
6145
|
+
var import_provider_utils19 = require("@ai-sdk/provider-utils");
|
6120
6146
|
var text = () => ({
|
6121
6147
|
type: "text",
|
6122
6148
|
responseFormat: { type: "text" },
|
@@ -6130,7 +6156,7 @@ var text = () => ({
|
|
6130
6156
|
var object = ({
|
6131
6157
|
schema: inputSchema
|
6132
6158
|
}) => {
|
6133
|
-
const schema = (0,
|
6159
|
+
const schema = (0, import_provider_utils19.asSchema)(inputSchema);
|
6134
6160
|
return {
|
6135
6161
|
type: "object",
|
6136
6162
|
responseFormat: {
|
@@ -6156,7 +6182,7 @@ var object = ({
|
|
6156
6182
|
}
|
6157
6183
|
},
|
6158
6184
|
async parseOutput({ text: text2 }, context) {
|
6159
|
-
const parseResult = await (0,
|
6185
|
+
const parseResult = await (0, import_provider_utils19.safeParseJSON)({ text: text2 });
|
6160
6186
|
if (!parseResult.success) {
|
6161
6187
|
throw new NoObjectGeneratedError({
|
6162
6188
|
message: "No object generated: could not parse the response.",
|
@@ -6167,7 +6193,7 @@ var object = ({
|
|
6167
6193
|
finishReason: context.finishReason
|
6168
6194
|
});
|
6169
6195
|
}
|
6170
|
-
const validationResult = await (0,
|
6196
|
+
const validationResult = await (0, import_provider_utils19.safeValidateTypes)({
|
6171
6197
|
value: parseResult.value,
|
6172
6198
|
schema
|
6173
6199
|
});
|
@@ -6187,8 +6213,8 @@ var object = ({
|
|
6187
6213
|
};
|
6188
6214
|
|
6189
6215
|
// core/generate-text/smooth-stream.ts
|
6190
|
-
var
|
6191
|
-
var
|
6216
|
+
var import_provider_utils20 = require("@ai-sdk/provider-utils");
|
6217
|
+
var import_provider24 = require("@ai-sdk/provider");
|
6192
6218
|
var CHUNKING_REGEXPS = {
|
6193
6219
|
word: /\S+\s+/m,
|
6194
6220
|
line: /\n+/m
|
@@ -6196,7 +6222,7 @@ var CHUNKING_REGEXPS = {
|
|
6196
6222
|
function smoothStream({
|
6197
6223
|
delayInMs = 10,
|
6198
6224
|
chunking = "word",
|
6199
|
-
_internal: { delay: delay2 =
|
6225
|
+
_internal: { delay: delay2 = import_provider_utils20.delay } = {}
|
6200
6226
|
} = {}) {
|
6201
6227
|
let detectChunk;
|
6202
6228
|
if (typeof chunking === "function") {
|
@@ -6218,7 +6244,7 @@ function smoothStream({
|
|
6218
6244
|
} else {
|
6219
6245
|
const chunkingRegex = typeof chunking === "string" ? CHUNKING_REGEXPS[chunking] : chunking;
|
6220
6246
|
if (chunkingRegex == null) {
|
6221
|
-
throw new
|
6247
|
+
throw new import_provider24.InvalidArgumentError({
|
6222
6248
|
argument: "chunking",
|
6223
6249
|
message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`
|
6224
6250
|
});
|
@@ -6256,15 +6282,10 @@ function smoothStream({
|
|
6256
6282
|
}
|
6257
6283
|
|
6258
6284
|
// core/generate-text/stream-text.ts
|
6259
|
-
var
|
6260
|
-
|
6261
|
-
// src/util/as-array.ts
|
6262
|
-
function asArray(value) {
|
6263
|
-
return value === void 0 ? [] : Array.isArray(value) ? value : [value];
|
6264
|
-
}
|
6285
|
+
var import_provider_utils22 = require("@ai-sdk/provider-utils");
|
6265
6286
|
|
6266
6287
|
// core/generate-text/run-tools-transformation.ts
|
6267
|
-
var
|
6288
|
+
var import_provider_utils21 = require("@ai-sdk/provider-utils");
|
6268
6289
|
function runToolsTransformation({
|
6269
6290
|
tools,
|
6270
6291
|
generatorStream,
|
@@ -6349,8 +6370,16 @@ function runToolsTransformation({
|
|
6349
6370
|
});
|
6350
6371
|
controller.enqueue(toolCall);
|
6351
6372
|
const tool2 = tools[toolCall.toolName];
|
6373
|
+
if (tool2.onArgsAvailable != null) {
|
6374
|
+
await tool2.onArgsAvailable({
|
6375
|
+
args: toolCall.args,
|
6376
|
+
toolCallId: toolCall.toolCallId,
|
6377
|
+
messages,
|
6378
|
+
abortSignal
|
6379
|
+
});
|
6380
|
+
}
|
6352
6381
|
if (tool2.execute != null) {
|
6353
|
-
const toolExecutionId = (0,
|
6382
|
+
const toolExecutionId = (0, import_provider_utils21.generateId)();
|
6354
6383
|
outstandingToolResults.add(toolExecutionId);
|
6355
6384
|
recordSpan({
|
6356
6385
|
name: "ai.toolCall",
|
@@ -6459,7 +6488,7 @@ function runToolsTransformation({
|
|
6459
6488
|
}
|
6460
6489
|
|
6461
6490
|
// core/generate-text/stream-text.ts
|
6462
|
-
var originalGenerateId4 = (0,
|
6491
|
+
var originalGenerateId4 = (0, import_provider_utils22.createIdGenerator)({
|
6463
6492
|
prefix: "aitxt",
|
6464
6493
|
size: 24
|
6465
6494
|
});
|
@@ -6473,17 +6502,21 @@ function streamText({
|
|
6473
6502
|
maxRetries,
|
6474
6503
|
abortSignal,
|
6475
6504
|
headers,
|
6476
|
-
|
6505
|
+
stopWhen = stepCountIs(1),
|
6477
6506
|
experimental_output: output,
|
6478
6507
|
experimental_telemetry: telemetry,
|
6508
|
+
prepareStep,
|
6479
6509
|
providerOptions,
|
6480
6510
|
experimental_toolCallStreaming = false,
|
6481
6511
|
toolCallStreaming = experimental_toolCallStreaming,
|
6482
|
-
experimental_activeTools
|
6512
|
+
experimental_activeTools,
|
6513
|
+
activeTools = experimental_activeTools,
|
6483
6514
|
experimental_repairToolCall: repairToolCall,
|
6484
6515
|
experimental_transform: transform,
|
6485
6516
|
onChunk,
|
6486
|
-
onError
|
6517
|
+
onError = ({ error }) => {
|
6518
|
+
console.error(error);
|
6519
|
+
},
|
6487
6520
|
onFinish,
|
6488
6521
|
onStepFinish,
|
6489
6522
|
_internal: {
|
@@ -6494,7 +6527,7 @@ function streamText({
|
|
6494
6527
|
...settings
|
6495
6528
|
}) {
|
6496
6529
|
return new DefaultStreamTextResult({
|
6497
|
-
model,
|
6530
|
+
model: resolveLanguageModel(model),
|
6498
6531
|
telemetry,
|
6499
6532
|
headers,
|
6500
6533
|
settings,
|
@@ -6509,9 +6542,10 @@ function streamText({
|
|
6509
6542
|
transforms: asArray(transform),
|
6510
6543
|
activeTools,
|
6511
6544
|
repairToolCall,
|
6512
|
-
|
6545
|
+
stopConditions: asArray(stopWhen),
|
6513
6546
|
output,
|
6514
6547
|
providerOptions,
|
6548
|
+
prepareStep,
|
6515
6549
|
onChunk,
|
6516
6550
|
onError,
|
6517
6551
|
onFinish,
|
@@ -6586,9 +6620,10 @@ var DefaultStreamTextResult = class {
|
|
6586
6620
|
transforms,
|
6587
6621
|
activeTools,
|
6588
6622
|
repairToolCall,
|
6589
|
-
|
6623
|
+
stopConditions,
|
6590
6624
|
output,
|
6591
6625
|
providerOptions,
|
6626
|
+
prepareStep,
|
6592
6627
|
now: now2,
|
6593
6628
|
currentDate,
|
6594
6629
|
generateId: generateId3,
|
@@ -6597,18 +6632,12 @@ var DefaultStreamTextResult = class {
|
|
6597
6632
|
onFinish,
|
6598
6633
|
onStepFinish
|
6599
6634
|
}) {
|
6600
|
-
this.
|
6601
|
-
this.
|
6602
|
-
this.
|
6603
|
-
if (maxSteps2 < 1) {
|
6604
|
-
throw new InvalidArgumentError({
|
6605
|
-
parameter: "maxSteps",
|
6606
|
-
value: maxSteps2,
|
6607
|
-
message: "maxSteps must be at least 1"
|
6608
|
-
});
|
6609
|
-
}
|
6635
|
+
this._totalUsage = new DelayedPromise();
|
6636
|
+
this._finishReason = new DelayedPromise();
|
6637
|
+
this._steps = new DelayedPromise();
|
6610
6638
|
this.output = output;
|
6611
6639
|
this.generateId = generateId3;
|
6640
|
+
let stepFinish;
|
6612
6641
|
let activeReasoningPart = void 0;
|
6613
6642
|
let recordedContent = [];
|
6614
6643
|
const recordedResponseMessages = [];
|
@@ -6626,7 +6655,7 @@ var DefaultStreamTextResult = class {
|
|
6626
6655
|
await (onChunk == null ? void 0 : onChunk({ chunk: part }));
|
6627
6656
|
}
|
6628
6657
|
if (part.type === "error") {
|
6629
|
-
await
|
6658
|
+
await onError({ error: wrapGatewayError(part.error) });
|
6630
6659
|
}
|
6631
6660
|
if (part.type === "text") {
|
6632
6661
|
const latestContent = recordedContent[recordedContent.length - 1];
|
@@ -6690,6 +6719,7 @@ var DefaultStreamTextResult = class {
|
|
6690
6719
|
recordedContent = [];
|
6691
6720
|
activeReasoningPart = void 0;
|
6692
6721
|
recordedResponseMessages.push(...stepMessages);
|
6722
|
+
stepFinish.resolve();
|
6693
6723
|
}
|
6694
6724
|
if (part.type === "finish") {
|
6695
6725
|
recordedTotalUsage = part.totalUsage;
|
@@ -6707,9 +6737,9 @@ var DefaultStreamTextResult = class {
|
|
6707
6737
|
outputTokens: void 0,
|
6708
6738
|
totalTokens: void 0
|
6709
6739
|
};
|
6710
|
-
self.
|
6711
|
-
self.
|
6712
|
-
self.
|
6740
|
+
self._finishReason.resolve(finishReason);
|
6741
|
+
self._totalUsage.resolve(totalUsage);
|
6742
|
+
self._steps.resolve(recordedSteps);
|
6713
6743
|
const finalStep = recordedSteps[recordedSteps.length - 1];
|
6714
6744
|
await (onFinish == null ? void 0 : onFinish({
|
6715
6745
|
finishReason,
|
@@ -6800,8 +6830,7 @@ var DefaultStreamTextResult = class {
|
|
6800
6830
|
// specific settings that only make sense on the outer level:
|
6801
6831
|
"ai.prompt": {
|
6802
6832
|
input: () => JSON.stringify({ system, prompt, messages })
|
6803
|
-
}
|
6804
|
-
"ai.settings.maxSteps": maxSteps2
|
6833
|
+
}
|
6805
6834
|
}
|
6806
6835
|
}),
|
6807
6836
|
tracer,
|
@@ -6813,6 +6842,8 @@ var DefaultStreamTextResult = class {
|
|
6813
6842
|
responseMessages,
|
6814
6843
|
usage
|
6815
6844
|
}) {
|
6845
|
+
var _a17, _b, _c, _d;
|
6846
|
+
stepFinish = new DelayedPromise();
|
6816
6847
|
const initialPrompt = await standardizePrompt({
|
6817
6848
|
system,
|
6818
6849
|
prompt,
|
@@ -6822,16 +6853,26 @@ var DefaultStreamTextResult = class {
|
|
6822
6853
|
...initialPrompt.messages,
|
6823
6854
|
...responseMessages
|
6824
6855
|
];
|
6856
|
+
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
6857
|
+
model,
|
6858
|
+
steps: recordedSteps,
|
6859
|
+
stepNumber: recordedSteps.length
|
6860
|
+
}));
|
6825
6861
|
const promptMessages = await convertToLanguageModelPrompt({
|
6826
6862
|
prompt: {
|
6827
|
-
system: initialPrompt.system,
|
6863
|
+
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
6828
6864
|
messages: stepInputMessages
|
6829
6865
|
},
|
6830
6866
|
supportedUrls: await model.supportedUrls
|
6831
6867
|
});
|
6832
|
-
const
|
6833
|
-
|
6834
|
-
|
6868
|
+
const stepModel = resolveLanguageModel(
|
6869
|
+
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
6870
|
+
);
|
6871
|
+
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
6872
|
+
tools,
|
6873
|
+
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
6874
|
+
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
6875
|
+
});
|
6835
6876
|
const {
|
6836
6877
|
result: { stream: stream2, response, request },
|
6837
6878
|
doStreamSpan,
|
@@ -6847,24 +6888,23 @@ var DefaultStreamTextResult = class {
|
|
6847
6888
|
telemetry
|
6848
6889
|
}),
|
6849
6890
|
...baseTelemetryAttributes,
|
6891
|
+
// model:
|
6892
|
+
"ai.model.provider": stepModel.provider,
|
6893
|
+
"ai.model.id": stepModel.modelId,
|
6894
|
+
// prompt:
|
6850
6895
|
"ai.prompt.messages": {
|
6851
|
-
input: () =>
|
6896
|
+
input: () => stringifyForTelemetry(promptMessages)
|
6852
6897
|
},
|
6853
6898
|
"ai.prompt.tools": {
|
6854
6899
|
// convert the language model level tools:
|
6855
|
-
input: () =>
|
6856
|
-
var _a17;
|
6857
|
-
return (_a17 = toolsAndToolChoice.tools) == null ? void 0 : _a17.map(
|
6858
|
-
(tool2) => JSON.stringify(tool2)
|
6859
|
-
);
|
6860
|
-
}
|
6900
|
+
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
6861
6901
|
},
|
6862
6902
|
"ai.prompt.toolChoice": {
|
6863
|
-
input: () =>
|
6903
|
+
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
6864
6904
|
},
|
6865
6905
|
// standardized gen-ai llm span attributes:
|
6866
|
-
"gen_ai.system":
|
6867
|
-
"gen_ai.request.model":
|
6906
|
+
"gen_ai.system": stepModel.provider,
|
6907
|
+
"gen_ai.request.model": stepModel.modelId,
|
6868
6908
|
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
6869
6909
|
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
6870
6910
|
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
@@ -6881,9 +6921,10 @@ var DefaultStreamTextResult = class {
|
|
6881
6921
|
startTimestampMs: now2(),
|
6882
6922
|
// get before the call
|
6883
6923
|
doStreamSpan: doStreamSpan2,
|
6884
|
-
result: await
|
6924
|
+
result: await stepModel.doStream({
|
6885
6925
|
...callSettings,
|
6886
|
-
|
6926
|
+
tools: stepTools,
|
6927
|
+
toolChoice: stepToolChoice,
|
6887
6928
|
responseFormat: output == null ? void 0 : output.responseFormat,
|
6888
6929
|
prompt: promptMessages,
|
6889
6930
|
providerOptions,
|
@@ -6894,7 +6935,7 @@ var DefaultStreamTextResult = class {
|
|
6894
6935
|
}
|
6895
6936
|
})
|
6896
6937
|
);
|
6897
|
-
const
|
6938
|
+
const streamWithToolResults = runToolsTransformation({
|
6898
6939
|
tools,
|
6899
6940
|
generatorStream: stream2,
|
6900
6941
|
toolCallStreaming,
|
@@ -6933,10 +6974,10 @@ var DefaultStreamTextResult = class {
|
|
6933
6974
|
stepText += chunk.text;
|
6934
6975
|
}
|
6935
6976
|
self.addStream(
|
6936
|
-
|
6977
|
+
streamWithToolResults.pipeThrough(
|
6937
6978
|
new TransformStream({
|
6938
6979
|
async transform(chunk, controller) {
|
6939
|
-
var
|
6980
|
+
var _a18, _b2, _c2, _d2;
|
6940
6981
|
if (chunk.type === "stream-start") {
|
6941
6982
|
warnings = chunk.warnings;
|
6942
6983
|
return;
|
@@ -6999,9 +7040,9 @@ var DefaultStreamTextResult = class {
|
|
6999
7040
|
}
|
7000
7041
|
case "response-metadata": {
|
7001
7042
|
stepResponse = {
|
7002
|
-
id: (
|
7003
|
-
timestamp: (
|
7004
|
-
modelId: (
|
7043
|
+
id: (_a18 = chunk.id) != null ? _a18 : stepResponse.id,
|
7044
|
+
timestamp: (_b2 = chunk.timestamp) != null ? _b2 : stepResponse.timestamp,
|
7045
|
+
modelId: (_c2 = chunk.modelId) != null ? _c2 : stepResponse.modelId
|
7005
7046
|
};
|
7006
7047
|
break;
|
7007
7048
|
}
|
@@ -7013,7 +7054,7 @@ var DefaultStreamTextResult = class {
|
|
7013
7054
|
doStreamSpan.addEvent("ai.stream.finish");
|
7014
7055
|
doStreamSpan.setAttributes({
|
7015
7056
|
"ai.response.msToFinish": msToFinish,
|
7016
|
-
"ai.response.avgOutputTokensPerSecond": 1e3 * ((
|
7057
|
+
"ai.response.avgOutputTokensPerSecond": 1e3 * ((_d2 = stepUsage.outputTokens) != null ? _d2 : 0) / msToFinish
|
7017
7058
|
});
|
7018
7059
|
break;
|
7019
7060
|
}
|
@@ -7027,8 +7068,28 @@ var DefaultStreamTextResult = class {
|
|
7027
7068
|
controller.enqueue(chunk);
|
7028
7069
|
break;
|
7029
7070
|
}
|
7030
|
-
case "tool-call-streaming-start":
|
7071
|
+
case "tool-call-streaming-start": {
|
7072
|
+
const tool2 = tools == null ? void 0 : tools[chunk.toolName];
|
7073
|
+
if ((tool2 == null ? void 0 : tool2.onArgsStreamingStart) != null) {
|
7074
|
+
await tool2.onArgsStreamingStart({
|
7075
|
+
toolCallId: chunk.toolCallId,
|
7076
|
+
messages: stepInputMessages,
|
7077
|
+
abortSignal
|
7078
|
+
});
|
7079
|
+
}
|
7080
|
+
controller.enqueue(chunk);
|
7081
|
+
break;
|
7082
|
+
}
|
7031
7083
|
case "tool-call-delta": {
|
7084
|
+
const tool2 = tools == null ? void 0 : tools[chunk.toolName];
|
7085
|
+
if ((tool2 == null ? void 0 : tool2.onArgsStreamingDelta) != null) {
|
7086
|
+
await tool2.onArgsStreamingDelta({
|
7087
|
+
argsTextDelta: chunk.argsTextDelta,
|
7088
|
+
toolCallId: chunk.toolCallId,
|
7089
|
+
messages: stepInputMessages,
|
7090
|
+
abortSignal
|
7091
|
+
});
|
7092
|
+
}
|
7032
7093
|
controller.enqueue(chunk);
|
7033
7094
|
break;
|
7034
7095
|
}
|
@@ -7088,9 +7149,13 @@ var DefaultStreamTextResult = class {
|
|
7088
7149
|
}
|
7089
7150
|
});
|
7090
7151
|
const combinedUsage = addLanguageModelUsage(usage, stepUsage);
|
7091
|
-
|
7092
|
-
stepToolCalls.length > 0 && // all current tool calls have results:
|
7093
|
-
stepToolResults.length === stepToolCalls.length
|
7152
|
+
await stepFinish.promise;
|
7153
|
+
if (stepToolCalls.length > 0 && // all current tool calls have results:
|
7154
|
+
stepToolResults.length === stepToolCalls.length && // continue until a stop condition is met:
|
7155
|
+
!await isStopConditionMet({
|
7156
|
+
stopConditions,
|
7157
|
+
steps: recordedSteps
|
7158
|
+
})) {
|
7094
7159
|
responseMessages.push(
|
7095
7160
|
...toResponseMessages({
|
7096
7161
|
content: stepContent,
|
@@ -7138,7 +7203,7 @@ var DefaultStreamTextResult = class {
|
|
7138
7203
|
});
|
7139
7204
|
}
|
7140
7205
|
get steps() {
|
7141
|
-
return this.
|
7206
|
+
return this._steps.promise;
|
7142
7207
|
}
|
7143
7208
|
get finalStep() {
|
7144
7209
|
return this.steps.then((steps) => steps[steps.length - 1]);
|
@@ -7183,10 +7248,10 @@ var DefaultStreamTextResult = class {
|
|
7183
7248
|
return this.finalStep.then((step) => step.response);
|
7184
7249
|
}
|
7185
7250
|
get totalUsage() {
|
7186
|
-
return this.
|
7251
|
+
return this._totalUsage.promise;
|
7187
7252
|
}
|
7188
7253
|
get finishReason() {
|
7189
|
-
return this.
|
7254
|
+
return this._finishReason.promise;
|
7190
7255
|
}
|
7191
7256
|
/**
|
7192
7257
|
Split out a new stream from the original stream.
|
@@ -7259,8 +7324,8 @@ var DefaultStreamTextResult = class {
|
|
7259
7324
|
messageMetadata,
|
7260
7325
|
sendReasoning = false,
|
7261
7326
|
sendSources = false,
|
7262
|
-
|
7263
|
-
|
7327
|
+
sendStart = true,
|
7328
|
+
sendFinish = true,
|
7264
7329
|
onError = () => "An error occurred."
|
7265
7330
|
// mask error messages for safety by default
|
7266
7331
|
} = {}) {
|
@@ -7304,16 +7369,25 @@ var DefaultStreamTextResult = class {
|
|
7304
7369
|
break;
|
7305
7370
|
}
|
7306
7371
|
case "source": {
|
7307
|
-
if (sendSources) {
|
7372
|
+
if (sendSources && part.sourceType === "url") {
|
7308
7373
|
controller.enqueue({
|
7309
|
-
type: "source",
|
7310
|
-
|
7311
|
-
id: part.id,
|
7374
|
+
type: "source-url",
|
7375
|
+
sourceId: part.id,
|
7312
7376
|
url: part.url,
|
7313
7377
|
title: part.title,
|
7314
7378
|
providerMetadata: part.providerMetadata
|
7315
7379
|
});
|
7316
7380
|
}
|
7381
|
+
if (sendSources && part.sourceType === "document") {
|
7382
|
+
controller.enqueue({
|
7383
|
+
type: "source-document",
|
7384
|
+
sourceId: part.id,
|
7385
|
+
mediaType: part.mediaType,
|
7386
|
+
title: part.title,
|
7387
|
+
filename: part.filename,
|
7388
|
+
providerMetadata: part.providerMetadata
|
7389
|
+
});
|
7390
|
+
}
|
7317
7391
|
break;
|
7318
7392
|
}
|
7319
7393
|
case "tool-call-streaming-start": {
|
@@ -7373,7 +7447,7 @@ var DefaultStreamTextResult = class {
|
|
7373
7447
|
break;
|
7374
7448
|
}
|
7375
7449
|
case "start": {
|
7376
|
-
if (
|
7450
|
+
if (sendStart) {
|
7377
7451
|
const metadata = messageMetadata == null ? void 0 : messageMetadata({ part });
|
7378
7452
|
controller.enqueue({
|
7379
7453
|
type: "start",
|
@@ -7384,7 +7458,7 @@ var DefaultStreamTextResult = class {
|
|
7384
7458
|
break;
|
7385
7459
|
}
|
7386
7460
|
case "finish": {
|
7387
|
-
if (
|
7461
|
+
if (sendFinish) {
|
7388
7462
|
const metadata = messageMetadata == null ? void 0 : messageMetadata({ part });
|
7389
7463
|
controller.enqueue({
|
7390
7464
|
type: "finish",
|
@@ -7401,38 +7475,12 @@ var DefaultStreamTextResult = class {
|
|
7401
7475
|
}
|
7402
7476
|
})
|
7403
7477
|
);
|
7404
|
-
|
7405
|
-
return baseStream;
|
7406
|
-
}
|
7407
|
-
const state = createStreamingUIMessageState({
|
7408
|
-
lastMessage,
|
7409
|
-
newMessageId: messageId != null ? messageId : this.generateId()
|
7410
|
-
});
|
7411
|
-
const runUpdateMessageJob = async (job) => {
|
7412
|
-
await job({ state, write: () => {
|
7413
|
-
} });
|
7414
|
-
};
|
7415
|
-
return processUIMessageStream({
|
7478
|
+
return handleUIMessageStreamFinish({
|
7416
7479
|
stream: baseStream,
|
7417
|
-
|
7418
|
-
|
7419
|
-
|
7420
|
-
|
7421
|
-
controller.enqueue(chunk);
|
7422
|
-
},
|
7423
|
-
flush() {
|
7424
|
-
const isContinuation2 = state.message.id === (lastMessage == null ? void 0 : lastMessage.id);
|
7425
|
-
onFinish({
|
7426
|
-
isContinuation: isContinuation2,
|
7427
|
-
responseMessage: state.message,
|
7428
|
-
messages: [
|
7429
|
-
...isContinuation2 ? originalMessages.slice(0, -1) : originalMessages,
|
7430
|
-
state.message
|
7431
|
-
]
|
7432
|
-
});
|
7433
|
-
}
|
7434
|
-
})
|
7435
|
-
);
|
7480
|
+
newMessageId: messageId != null ? messageId : this.generateId(),
|
7481
|
+
originalMessages,
|
7482
|
+
onFinish
|
7483
|
+
});
|
7436
7484
|
}
|
7437
7485
|
pipeUIMessageStreamToResponse(response, {
|
7438
7486
|
newMessageId,
|
@@ -7441,8 +7489,8 @@ var DefaultStreamTextResult = class {
|
|
7441
7489
|
messageMetadata,
|
7442
7490
|
sendReasoning,
|
7443
7491
|
sendSources,
|
7444
|
-
|
7445
|
-
|
7492
|
+
sendFinish,
|
7493
|
+
sendStart,
|
7446
7494
|
onError,
|
7447
7495
|
...init
|
7448
7496
|
} = {}) {
|
@@ -7455,8 +7503,8 @@ var DefaultStreamTextResult = class {
|
|
7455
7503
|
messageMetadata,
|
7456
7504
|
sendReasoning,
|
7457
7505
|
sendSources,
|
7458
|
-
|
7459
|
-
|
7506
|
+
sendFinish,
|
7507
|
+
sendStart,
|
7460
7508
|
onError
|
7461
7509
|
}),
|
7462
7510
|
...init
|
@@ -7476,8 +7524,8 @@ var DefaultStreamTextResult = class {
|
|
7476
7524
|
messageMetadata,
|
7477
7525
|
sendReasoning,
|
7478
7526
|
sendSources,
|
7479
|
-
|
7480
|
-
|
7527
|
+
sendFinish,
|
7528
|
+
sendStart,
|
7481
7529
|
onError,
|
7482
7530
|
...init
|
7483
7531
|
} = {}) {
|
@@ -7489,8 +7537,8 @@ var DefaultStreamTextResult = class {
|
|
7489
7537
|
messageMetadata,
|
7490
7538
|
sendReasoning,
|
7491
7539
|
sendSources,
|
7492
|
-
|
7493
|
-
|
7540
|
+
sendFinish,
|
7541
|
+
sendStart,
|
7494
7542
|
onError
|
7495
7543
|
}),
|
7496
7544
|
...init
|
@@ -7733,7 +7781,7 @@ var doWrap = ({
|
|
7733
7781
|
};
|
7734
7782
|
|
7735
7783
|
// core/registry/custom-provider.ts
|
7736
|
-
var
|
7784
|
+
var import_provider25 = require("@ai-sdk/provider");
|
7737
7785
|
function customProvider({
|
7738
7786
|
languageModels,
|
7739
7787
|
textEmbeddingModels,
|
@@ -7748,7 +7796,7 @@ function customProvider({
|
|
7748
7796
|
if (fallbackProvider) {
|
7749
7797
|
return fallbackProvider.languageModel(modelId);
|
7750
7798
|
}
|
7751
|
-
throw new
|
7799
|
+
throw new import_provider25.NoSuchModelError({ modelId, modelType: "languageModel" });
|
7752
7800
|
},
|
7753
7801
|
textEmbeddingModel(modelId) {
|
7754
7802
|
if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
|
@@ -7757,7 +7805,7 @@ function customProvider({
|
|
7757
7805
|
if (fallbackProvider) {
|
7758
7806
|
return fallbackProvider.textEmbeddingModel(modelId);
|
7759
7807
|
}
|
7760
|
-
throw new
|
7808
|
+
throw new import_provider25.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
|
7761
7809
|
},
|
7762
7810
|
imageModel(modelId) {
|
7763
7811
|
if (imageModels != null && modelId in imageModels) {
|
@@ -7766,19 +7814,19 @@ function customProvider({
|
|
7766
7814
|
if (fallbackProvider == null ? void 0 : fallbackProvider.imageModel) {
|
7767
7815
|
return fallbackProvider.imageModel(modelId);
|
7768
7816
|
}
|
7769
|
-
throw new
|
7817
|
+
throw new import_provider25.NoSuchModelError({ modelId, modelType: "imageModel" });
|
7770
7818
|
}
|
7771
7819
|
};
|
7772
7820
|
}
|
7773
7821
|
var experimental_customProvider = customProvider;
|
7774
7822
|
|
7775
7823
|
// core/registry/no-such-provider-error.ts
|
7776
|
-
var
|
7824
|
+
var import_provider26 = require("@ai-sdk/provider");
|
7777
7825
|
var name16 = "AI_NoSuchProviderError";
|
7778
7826
|
var marker16 = `vercel.ai.error.${name16}`;
|
7779
7827
|
var symbol16 = Symbol.for(marker16);
|
7780
7828
|
var _a16;
|
7781
|
-
var NoSuchProviderError = class extends
|
7829
|
+
var NoSuchProviderError = class extends import_provider26.NoSuchModelError {
|
7782
7830
|
constructor({
|
7783
7831
|
modelId,
|
7784
7832
|
modelType,
|
@@ -7792,13 +7840,13 @@ var NoSuchProviderError = class extends import_provider25.NoSuchModelError {
|
|
7792
7840
|
this.availableProviders = availableProviders;
|
7793
7841
|
}
|
7794
7842
|
static isInstance(error) {
|
7795
|
-
return
|
7843
|
+
return import_provider26.AISDKError.hasMarker(error, marker16);
|
7796
7844
|
}
|
7797
7845
|
};
|
7798
7846
|
_a16 = symbol16;
|
7799
7847
|
|
7800
7848
|
// core/registry/provider-registry.ts
|
7801
|
-
var
|
7849
|
+
var import_provider27 = require("@ai-sdk/provider");
|
7802
7850
|
function createProviderRegistry(providers, {
|
7803
7851
|
separator = ":"
|
7804
7852
|
} = {}) {
|
@@ -7837,7 +7885,7 @@ var DefaultProviderRegistry = class {
|
|
7837
7885
|
splitId(id, modelType) {
|
7838
7886
|
const index = id.indexOf(this.separator);
|
7839
7887
|
if (index === -1) {
|
7840
|
-
throw new
|
7888
|
+
throw new import_provider27.NoSuchModelError({
|
7841
7889
|
modelId: id,
|
7842
7890
|
modelType,
|
7843
7891
|
message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId${this.separator}modelId")`
|
@@ -7850,7 +7898,7 @@ var DefaultProviderRegistry = class {
|
|
7850
7898
|
const [providerId, modelId] = this.splitId(id, "languageModel");
|
7851
7899
|
const model = (_b = (_a17 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a17, modelId);
|
7852
7900
|
if (model == null) {
|
7853
|
-
throw new
|
7901
|
+
throw new import_provider27.NoSuchModelError({ modelId: id, modelType: "languageModel" });
|
7854
7902
|
}
|
7855
7903
|
return model;
|
7856
7904
|
}
|
@@ -7860,7 +7908,7 @@ var DefaultProviderRegistry = class {
|
|
7860
7908
|
const provider = this.getProvider(providerId);
|
7861
7909
|
const model = (_a17 = provider.textEmbeddingModel) == null ? void 0 : _a17.call(provider, modelId);
|
7862
7910
|
if (model == null) {
|
7863
|
-
throw new
|
7911
|
+
throw new import_provider27.NoSuchModelError({
|
7864
7912
|
modelId: id,
|
7865
7913
|
modelType: "textEmbeddingModel"
|
7866
7914
|
});
|
@@ -7873,14 +7921,14 @@ var DefaultProviderRegistry = class {
|
|
7873
7921
|
const provider = this.getProvider(providerId);
|
7874
7922
|
const model = (_a17 = provider.imageModel) == null ? void 0 : _a17.call(provider, modelId);
|
7875
7923
|
if (model == null) {
|
7876
|
-
throw new
|
7924
|
+
throw new import_provider27.NoSuchModelError({ modelId: id, modelType: "imageModel" });
|
7877
7925
|
}
|
7878
7926
|
return model;
|
7879
7927
|
}
|
7880
7928
|
};
|
7881
7929
|
|
7882
7930
|
// core/tool/mcp/mcp-client.ts
|
7883
|
-
var
|
7931
|
+
var import_provider_utils24 = require("@ai-sdk/provider-utils");
|
7884
7932
|
|
7885
7933
|
// core/tool/tool.ts
|
7886
7934
|
function tool(tool2) {
|
@@ -7888,7 +7936,7 @@ function tool(tool2) {
|
|
7888
7936
|
}
|
7889
7937
|
|
7890
7938
|
// core/tool/mcp/mcp-sse-transport.ts
|
7891
|
-
var
|
7939
|
+
var import_provider_utils23 = require("@ai-sdk/provider-utils");
|
7892
7940
|
|
7893
7941
|
// core/tool/mcp/json-rpc-message.ts
|
7894
7942
|
var import_zod10 = require("zod");
|
@@ -8059,7 +8107,7 @@ var SseMCPTransport = class {
|
|
8059
8107
|
(_b = this.onerror) == null ? void 0 : _b.call(this, error);
|
8060
8108
|
return reject(error);
|
8061
8109
|
}
|
8062
|
-
const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0,
|
8110
|
+
const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0, import_provider_utils23.createEventSourceParserStream)());
|
8063
8111
|
const reader = stream.getReader();
|
8064
8112
|
const processEvents = async () => {
|
8065
8113
|
var _a18, _b2, _c2;
|
@@ -8383,7 +8431,7 @@ var MCPClient = class {
|
|
8383
8431
|
if (schemas !== "automatic" && !(name17 in schemas)) {
|
8384
8432
|
continue;
|
8385
8433
|
}
|
8386
|
-
const parameters = schemas === "automatic" ? (0,
|
8434
|
+
const parameters = schemas === "automatic" ? (0, import_provider_utils24.jsonSchema)({
|
8387
8435
|
...inputSchema,
|
8388
8436
|
properties: (_a17 = inputSchema.properties) != null ? _a17 : {},
|
8389
8437
|
additionalProperties: false
|
@@ -8447,8 +8495,8 @@ var MCPClient = class {
|
|
8447
8495
|
};
|
8448
8496
|
|
8449
8497
|
// src/error/no-transcript-generated-error.ts
|
8450
|
-
var
|
8451
|
-
var NoTranscriptGeneratedError = class extends
|
8498
|
+
var import_provider28 = require("@ai-sdk/provider");
|
8499
|
+
var NoTranscriptGeneratedError = class extends import_provider28.AISDKError {
|
8452
8500
|
constructor(options) {
|
8453
8501
|
super({
|
8454
8502
|
name: "AI_NoTranscriptGeneratedError",
|
@@ -8513,10 +8561,11 @@ var DefaultTranscriptionResult = class {
|
|
8513
8561
|
0 && (module.exports = {
|
8514
8562
|
AISDKError,
|
8515
8563
|
APICallError,
|
8516
|
-
|
8564
|
+
AbstractChat,
|
8517
8565
|
DefaultChatTransport,
|
8518
8566
|
DownloadError,
|
8519
8567
|
EmptyResponseBodyError,
|
8568
|
+
GLOBAL_DEFAULT_PROVIDER,
|
8520
8569
|
InvalidArgumentError,
|
8521
8570
|
InvalidDataContentError,
|
8522
8571
|
InvalidMessageRoleError,
|
@@ -8538,14 +8587,14 @@ var DefaultTranscriptionResult = class {
|
|
8538
8587
|
NoSuchToolError,
|
8539
8588
|
Output,
|
8540
8589
|
RetryError,
|
8590
|
+
SerialJobExecutor,
|
8591
|
+
TextStreamChatTransport,
|
8541
8592
|
ToolCallRepairError,
|
8542
8593
|
ToolExecutionError,
|
8543
8594
|
TypeValidationError,
|
8544
8595
|
UnsupportedFunctionalityError,
|
8545
|
-
appendClientMessage,
|
8546
8596
|
asSchema,
|
8547
8597
|
assistantModelMessageSchema,
|
8548
|
-
callChatApi,
|
8549
8598
|
callCompletionApi,
|
8550
8599
|
convertFileListToFileUIParts,
|
8551
8600
|
convertToCoreMessages,
|
@@ -8562,7 +8611,6 @@ var DefaultTranscriptionResult = class {
|
|
8562
8611
|
createUIMessageStream,
|
8563
8612
|
createUIMessageStreamResponse,
|
8564
8613
|
customProvider,
|
8565
|
-
defaultChatStore,
|
8566
8614
|
defaultSettingsMiddleware,
|
8567
8615
|
embed,
|
8568
8616
|
embedMany,
|
@@ -8572,7 +8620,6 @@ var DefaultTranscriptionResult = class {
|
|
8572
8620
|
experimental_generateImage,
|
8573
8621
|
experimental_generateSpeech,
|
8574
8622
|
experimental_transcribe,
|
8575
|
-
extractMaxToolInvocationStep,
|
8576
8623
|
extractReasoningMiddleware,
|
8577
8624
|
generateId,
|
8578
8625
|
generateObject,
|
@@ -8580,24 +8627,21 @@ var DefaultTranscriptionResult = class {
|
|
8580
8627
|
getTextFromDataUrl,
|
8581
8628
|
getToolInvocations,
|
8582
8629
|
hasToolCall,
|
8583
|
-
isAssistantMessageWithCompletedToolCalls,
|
8584
8630
|
isDeepEqualData,
|
8585
8631
|
jsonSchema,
|
8586
|
-
maxSteps,
|
8587
8632
|
modelMessageSchema,
|
8588
8633
|
parsePartialJson,
|
8589
8634
|
pipeTextStreamToResponse,
|
8590
8635
|
pipeUIMessageStreamToResponse,
|
8591
|
-
shouldResubmitMessages,
|
8592
8636
|
simulateReadableStream,
|
8593
8637
|
simulateStreamingMiddleware,
|
8594
8638
|
smoothStream,
|
8639
|
+
stepCountIs,
|
8595
8640
|
streamObject,
|
8596
8641
|
streamText,
|
8597
8642
|
systemModelMessageSchema,
|
8598
8643
|
tool,
|
8599
8644
|
toolModelMessageSchema,
|
8600
|
-
updateToolCallResult,
|
8601
8645
|
userModelMessageSchema,
|
8602
8646
|
wrapLanguageModel
|
8603
8647
|
});
|