ai 5.0.0-alpha.1 → 5.0.0-alpha.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +159 -0
- package/dist/index.d.mts +441 -563
- package/dist/index.d.ts +441 -563
- package/dist/index.js +1534 -1490
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1410 -1355
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +25 -5
- package/dist/internal/index.d.ts +25 -5
- package/dist/mcp-stdio/index.js.map +1 -1
- package/dist/mcp-stdio/index.mjs.map +1 -1
- package/package.json +6 -5
package/dist/index.mjs
CHANGED
@@ -449,21 +449,8 @@ function pipeTextStreamToResponse({
|
|
449
449
|
});
|
450
450
|
}
|
451
451
|
|
452
|
-
// src/ui/
|
453
|
-
|
454
|
-
messages,
|
455
|
-
message
|
456
|
-
}) {
|
457
|
-
return [
|
458
|
-
...messages.length > 0 && messages[messages.length - 1].id === message.id ? messages.slice(0, -1) : messages,
|
459
|
-
message
|
460
|
-
];
|
461
|
-
}
|
462
|
-
|
463
|
-
// src/ui/call-chat-api.ts
|
464
|
-
import {
|
465
|
-
parseJsonEventStream
|
466
|
-
} from "@ai-sdk/provider-utils";
|
452
|
+
// src/ui/call-completion-api.ts
|
453
|
+
import { parseJsonEventStream } from "@ai-sdk/provider-utils";
|
467
454
|
|
468
455
|
// src/ui-message-stream/ui-message-stream-parts.ts
|
469
456
|
import { z } from "zod";
|
@@ -504,14 +491,22 @@ var uiMessageStreamPartSchema = z.union([
|
|
504
491
|
providerMetadata: z.record(z.any()).optional()
|
505
492
|
}),
|
506
493
|
z.object({
|
507
|
-
type: z.literal("source"),
|
508
|
-
|
509
|
-
id: z.string(),
|
494
|
+
type: z.literal("source-url"),
|
495
|
+
sourceId: z.string(),
|
510
496
|
url: z.string(),
|
511
497
|
title: z.string().optional(),
|
512
498
|
providerMetadata: z.any().optional()
|
513
499
|
// Use z.any() for generic metadata
|
514
500
|
}),
|
501
|
+
z.object({
|
502
|
+
type: z.literal("source-document"),
|
503
|
+
sourceId: z.string(),
|
504
|
+
mediaType: z.string(),
|
505
|
+
title: z.string(),
|
506
|
+
filename: z.string().optional(),
|
507
|
+
providerMetadata: z.any().optional()
|
508
|
+
// Use z.any() for generic metadata
|
509
|
+
}),
|
515
510
|
z.object({
|
516
511
|
type: z.literal("file"),
|
517
512
|
url: z.string(),
|
@@ -570,6 +565,170 @@ async function consumeStream({
|
|
570
565
|
}
|
571
566
|
}
|
572
567
|
|
568
|
+
// src/ui/process-text-stream.ts
|
569
|
+
async function processTextStream({
|
570
|
+
stream,
|
571
|
+
onTextPart
|
572
|
+
}) {
|
573
|
+
const reader = stream.pipeThrough(new TextDecoderStream()).getReader();
|
574
|
+
while (true) {
|
575
|
+
const { done, value } = await reader.read();
|
576
|
+
if (done) {
|
577
|
+
break;
|
578
|
+
}
|
579
|
+
await onTextPart(value);
|
580
|
+
}
|
581
|
+
}
|
582
|
+
|
583
|
+
// src/ui/call-completion-api.ts
|
584
|
+
var getOriginalFetch = () => fetch;
|
585
|
+
async function callCompletionApi({
|
586
|
+
api,
|
587
|
+
prompt,
|
588
|
+
credentials,
|
589
|
+
headers,
|
590
|
+
body,
|
591
|
+
streamProtocol = "data",
|
592
|
+
setCompletion,
|
593
|
+
setLoading,
|
594
|
+
setError,
|
595
|
+
setAbortController,
|
596
|
+
onFinish,
|
597
|
+
onError,
|
598
|
+
fetch: fetch2 = getOriginalFetch()
|
599
|
+
}) {
|
600
|
+
var _a17;
|
601
|
+
try {
|
602
|
+
setLoading(true);
|
603
|
+
setError(void 0);
|
604
|
+
const abortController = new AbortController();
|
605
|
+
setAbortController(abortController);
|
606
|
+
setCompletion("");
|
607
|
+
const response = await fetch2(api, {
|
608
|
+
method: "POST",
|
609
|
+
body: JSON.stringify({
|
610
|
+
prompt,
|
611
|
+
...body
|
612
|
+
}),
|
613
|
+
credentials,
|
614
|
+
headers: {
|
615
|
+
"Content-Type": "application/json",
|
616
|
+
...headers
|
617
|
+
},
|
618
|
+
signal: abortController.signal
|
619
|
+
}).catch((err) => {
|
620
|
+
throw err;
|
621
|
+
});
|
622
|
+
if (!response.ok) {
|
623
|
+
throw new Error(
|
624
|
+
(_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
|
625
|
+
);
|
626
|
+
}
|
627
|
+
if (!response.body) {
|
628
|
+
throw new Error("The response body is empty.");
|
629
|
+
}
|
630
|
+
let result = "";
|
631
|
+
switch (streamProtocol) {
|
632
|
+
case "text": {
|
633
|
+
await processTextStream({
|
634
|
+
stream: response.body,
|
635
|
+
onTextPart: (chunk) => {
|
636
|
+
result += chunk;
|
637
|
+
setCompletion(result);
|
638
|
+
}
|
639
|
+
});
|
640
|
+
break;
|
641
|
+
}
|
642
|
+
case "data": {
|
643
|
+
await consumeStream({
|
644
|
+
stream: parseJsonEventStream({
|
645
|
+
stream: response.body,
|
646
|
+
schema: uiMessageStreamPartSchema
|
647
|
+
}).pipeThrough(
|
648
|
+
new TransformStream({
|
649
|
+
async transform(part) {
|
650
|
+
if (!part.success) {
|
651
|
+
throw part.error;
|
652
|
+
}
|
653
|
+
const streamPart = part.value;
|
654
|
+
if (streamPart.type === "text") {
|
655
|
+
result += streamPart.text;
|
656
|
+
setCompletion(result);
|
657
|
+
} else if (streamPart.type === "error") {
|
658
|
+
throw new Error(streamPart.errorText);
|
659
|
+
}
|
660
|
+
}
|
661
|
+
})
|
662
|
+
),
|
663
|
+
onError: (error) => {
|
664
|
+
throw error;
|
665
|
+
}
|
666
|
+
});
|
667
|
+
break;
|
668
|
+
}
|
669
|
+
default: {
|
670
|
+
const exhaustiveCheck = streamProtocol;
|
671
|
+
throw new Error(`Unknown stream protocol: ${exhaustiveCheck}`);
|
672
|
+
}
|
673
|
+
}
|
674
|
+
if (onFinish) {
|
675
|
+
onFinish(prompt, result);
|
676
|
+
}
|
677
|
+
setAbortController(null);
|
678
|
+
return result;
|
679
|
+
} catch (err) {
|
680
|
+
if (err.name === "AbortError") {
|
681
|
+
setAbortController(null);
|
682
|
+
return null;
|
683
|
+
}
|
684
|
+
if (err instanceof Error) {
|
685
|
+
if (onError) {
|
686
|
+
onError(err);
|
687
|
+
}
|
688
|
+
}
|
689
|
+
setError(err);
|
690
|
+
} finally {
|
691
|
+
setLoading(false);
|
692
|
+
}
|
693
|
+
}
|
694
|
+
|
695
|
+
// src/ui/chat.ts
|
696
|
+
import {
|
697
|
+
generateId as generateIdFunc
|
698
|
+
} from "@ai-sdk/provider-utils";
|
699
|
+
|
700
|
+
// src/util/serial-job-executor.ts
|
701
|
+
var SerialJobExecutor = class {
|
702
|
+
constructor() {
|
703
|
+
this.queue = [];
|
704
|
+
this.isProcessing = false;
|
705
|
+
}
|
706
|
+
async processQueue() {
|
707
|
+
if (this.isProcessing) {
|
708
|
+
return;
|
709
|
+
}
|
710
|
+
this.isProcessing = true;
|
711
|
+
while (this.queue.length > 0) {
|
712
|
+
await this.queue[0]();
|
713
|
+
this.queue.shift();
|
714
|
+
}
|
715
|
+
this.isProcessing = false;
|
716
|
+
}
|
717
|
+
async run(job) {
|
718
|
+
return new Promise((resolve, reject) => {
|
719
|
+
this.queue.push(async () => {
|
720
|
+
try {
|
721
|
+
await job();
|
722
|
+
resolve();
|
723
|
+
} catch (error) {
|
724
|
+
reject(error);
|
725
|
+
}
|
726
|
+
});
|
727
|
+
void this.processQueue();
|
728
|
+
});
|
729
|
+
}
|
730
|
+
};
|
731
|
+
|
573
732
|
// src/ui/process-ui-message-stream.ts
|
574
733
|
import {
|
575
734
|
validateTypes
|
@@ -945,14 +1104,6 @@ async function parsePartialJson(jsonText) {
|
|
945
1104
|
return { value: void 0, state: "failed-parse" };
|
946
1105
|
}
|
947
1106
|
|
948
|
-
// src/ui/extract-max-tool-invocation-step.ts
|
949
|
-
function extractMaxToolInvocationStep(toolInvocations) {
|
950
|
-
return toolInvocations == null ? void 0 : toolInvocations.reduce((max, toolInvocation) => {
|
951
|
-
var _a17;
|
952
|
-
return Math.max(max, (_a17 = toolInvocation.step) != null ? _a17 : 0);
|
953
|
-
}, 0);
|
954
|
-
}
|
955
|
-
|
956
1107
|
// src/ui/get-tool-invocations.ts
|
957
1108
|
function getToolInvocations(message) {
|
958
1109
|
return message.parts.filter(
|
@@ -963,12 +1114,10 @@ function getToolInvocations(message) {
|
|
963
1114
|
// src/ui/process-ui-message-stream.ts
|
964
1115
|
function createStreamingUIMessageState({
|
965
1116
|
lastMessage,
|
966
|
-
newMessageId = "
|
1117
|
+
newMessageId = ""
|
967
1118
|
} = {}) {
|
968
|
-
var _a17;
|
969
1119
|
const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
|
970
|
-
const
|
971
|
-
const message = isContinuation ? structuredClone(lastMessage) : {
|
1120
|
+
const message = isContinuation ? lastMessage : {
|
972
1121
|
id: newMessageId,
|
973
1122
|
metadata: {},
|
974
1123
|
role: "assistant",
|
@@ -978,8 +1127,7 @@ function createStreamingUIMessageState({
|
|
978
1127
|
message,
|
979
1128
|
activeTextPart: void 0,
|
980
1129
|
activeReasoningPart: void 0,
|
981
|
-
partialToolCalls: {}
|
982
|
-
step
|
1130
|
+
partialToolCalls: {}
|
983
1131
|
};
|
984
1132
|
}
|
985
1133
|
function processUIMessageStream({
|
@@ -1062,16 +1210,25 @@ function processUIMessageStream({
|
|
1062
1210
|
write();
|
1063
1211
|
break;
|
1064
1212
|
}
|
1065
|
-
case "source": {
|
1213
|
+
case "source-url": {
|
1066
1214
|
state.message.parts.push({
|
1067
|
-
type: "source",
|
1068
|
-
|
1069
|
-
|
1070
|
-
|
1071
|
-
|
1072
|
-
|
1073
|
-
|
1074
|
-
|
1215
|
+
type: "source-url",
|
1216
|
+
sourceId: part.sourceId,
|
1217
|
+
url: part.url,
|
1218
|
+
title: part.title,
|
1219
|
+
providerMetadata: part.providerMetadata
|
1220
|
+
});
|
1221
|
+
write();
|
1222
|
+
break;
|
1223
|
+
}
|
1224
|
+
case "source-document": {
|
1225
|
+
state.message.parts.push({
|
1226
|
+
type: "source-document",
|
1227
|
+
sourceId: part.sourceId,
|
1228
|
+
mediaType: part.mediaType,
|
1229
|
+
title: part.title,
|
1230
|
+
filename: part.filename,
|
1231
|
+
providerMetadata: part.providerMetadata
|
1075
1232
|
});
|
1076
1233
|
write();
|
1077
1234
|
break;
|
@@ -1080,13 +1237,11 @@ function processUIMessageStream({
|
|
1080
1237
|
const toolInvocations = getToolInvocations(state.message);
|
1081
1238
|
state.partialToolCalls[part.toolCallId] = {
|
1082
1239
|
text: "",
|
1083
|
-
step: state.step,
|
1084
1240
|
toolName: part.toolName,
|
1085
1241
|
index: toolInvocations.length
|
1086
1242
|
};
|
1087
1243
|
updateToolInvocationPart(part.toolCallId, {
|
1088
1244
|
state: "partial-call",
|
1089
|
-
step: state.step,
|
1090
1245
|
toolCallId: part.toolCallId,
|
1091
1246
|
toolName: part.toolName,
|
1092
1247
|
args: void 0
|
@@ -1102,7 +1257,6 @@ function processUIMessageStream({
|
|
1102
1257
|
);
|
1103
1258
|
updateToolInvocationPart(part.toolCallId, {
|
1104
1259
|
state: "partial-call",
|
1105
|
-
step: partialToolCall.step,
|
1106
1260
|
toolCallId: part.toolCallId,
|
1107
1261
|
toolName: partialToolCall.toolName,
|
1108
1262
|
args: partialArgs
|
@@ -1113,7 +1267,6 @@ function processUIMessageStream({
|
|
1113
1267
|
case "tool-call": {
|
1114
1268
|
updateToolInvocationPart(part.toolCallId, {
|
1115
1269
|
state: "call",
|
1116
|
-
step: state.step,
|
1117
1270
|
toolCallId: part.toolCallId,
|
1118
1271
|
toolName: part.toolName,
|
1119
1272
|
args: part.args
|
@@ -1126,7 +1279,6 @@ function processUIMessageStream({
|
|
1126
1279
|
if (result != null) {
|
1127
1280
|
updateToolInvocationPart(part.toolCallId, {
|
1128
1281
|
state: "result",
|
1129
|
-
step: state.step,
|
1130
1282
|
toolCallId: part.toolCallId,
|
1131
1283
|
toolName: part.toolName,
|
1132
1284
|
args: part.args,
|
@@ -1165,7 +1317,6 @@ function processUIMessageStream({
|
|
1165
1317
|
break;
|
1166
1318
|
}
|
1167
1319
|
case "finish-step": {
|
1168
|
-
state.step += 1;
|
1169
1320
|
state.activeTextPart = void 0;
|
1170
1321
|
state.activeReasoningPart = void 0;
|
1171
1322
|
await updateMessageMetadata(part.metadata);
|
@@ -1207,14 +1358,7 @@ function processUIMessageStream({
|
|
1207
1358
|
(partArg) => part.type === partArg.type && part.id === partArg.id
|
1208
1359
|
) : void 0;
|
1209
1360
|
if (existingPart != null) {
|
1210
|
-
|
1211
|
-
existingPart.value = mergeObjects(
|
1212
|
-
existingPart.data,
|
1213
|
-
part.data
|
1214
|
-
);
|
1215
|
-
} else {
|
1216
|
-
existingPart.data = part.data;
|
1217
|
-
}
|
1361
|
+
existingPart.data = isObject(existingPart.data) && isObject(part.data) ? mergeObjects(existingPart.data, part.data) : part.data;
|
1218
1362
|
} else {
|
1219
1363
|
state.message.parts.push(part);
|
1220
1364
|
}
|
@@ -1235,47 +1379,62 @@ function isObject(value) {
|
|
1235
1379
|
return typeof value === "object" && value !== null;
|
1236
1380
|
}
|
1237
1381
|
|
1238
|
-
// src/ui/
|
1239
|
-
function
|
1240
|
-
|
1382
|
+
// src/ui/should-resubmit-messages.ts
|
1383
|
+
function shouldResubmitMessages({
|
1384
|
+
originalMaxToolInvocationStep,
|
1385
|
+
originalMessageCount,
|
1386
|
+
maxSteps,
|
1387
|
+
messages
|
1241
1388
|
}) {
|
1242
|
-
|
1243
|
-
|
1244
|
-
|
1245
|
-
|
1246
|
-
|
1247
|
-
|
1248
|
-
|
1249
|
-
|
1250
|
-
|
1251
|
-
|
1252
|
-
|
1253
|
-
controller.enqueue({ type: "finish" });
|
1254
|
-
}
|
1255
|
-
})
|
1389
|
+
const lastMessage = messages[messages.length - 1];
|
1390
|
+
const lastMessageStepStartCount = lastMessage.parts.filter(
|
1391
|
+
(part) => part.type === "step-start"
|
1392
|
+
).length;
|
1393
|
+
return (
|
1394
|
+
// check if the feature is enabled:
|
1395
|
+
maxSteps > 1 && // ensure there is a last message:
|
1396
|
+
lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
|
1397
|
+
(messages.length > originalMessageCount || lastMessageStepStartCount !== originalMaxToolInvocationStep) && // check that next step is possible:
|
1398
|
+
isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
|
1399
|
+
lastMessageStepStartCount < maxSteps
|
1256
1400
|
);
|
1257
1401
|
}
|
1402
|
+
function isAssistantMessageWithCompletedToolCalls(message) {
|
1403
|
+
if (!message) {
|
1404
|
+
return false;
|
1405
|
+
}
|
1406
|
+
if (message.role !== "assistant") {
|
1407
|
+
return false;
|
1408
|
+
}
|
1409
|
+
const lastStepStartIndex = message.parts.reduce((lastIndex, part, index) => {
|
1410
|
+
return part.type === "step-start" ? index : lastIndex;
|
1411
|
+
}, -1);
|
1412
|
+
const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter((part) => part.type === "tool-invocation");
|
1413
|
+
return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every((part) => "result" in part.toolInvocation);
|
1414
|
+
}
|
1258
1415
|
|
1259
|
-
// src/ui/
|
1260
|
-
|
1416
|
+
// src/ui/default-chat-transport.ts
|
1417
|
+
import {
|
1418
|
+
parseJsonEventStream as parseJsonEventStream2
|
1419
|
+
} from "@ai-sdk/provider-utils";
|
1420
|
+
var getOriginalFetch2 = () => fetch;
|
1261
1421
|
async function fetchUIMessageStream({
|
1262
1422
|
api,
|
1263
1423
|
body,
|
1264
|
-
streamProtocol = "ui-message",
|
1265
1424
|
credentials,
|
1266
1425
|
headers,
|
1267
|
-
|
1268
|
-
fetch: fetch2 =
|
1426
|
+
abortSignal,
|
1427
|
+
fetch: fetch2 = getOriginalFetch2(),
|
1269
1428
|
requestType = "generate"
|
1270
1429
|
}) {
|
1271
|
-
var _a17
|
1272
|
-
const response = requestType === "resume" ? await fetch2(`${api}?
|
1430
|
+
var _a17;
|
1431
|
+
const response = requestType === "resume" ? await fetch2(`${api}?id=${body.id}`, {
|
1273
1432
|
method: "GET",
|
1274
1433
|
headers: {
|
1275
1434
|
"Content-Type": "application/json",
|
1276
1435
|
...headers
|
1277
1436
|
},
|
1278
|
-
signal:
|
1437
|
+
signal: abortSignal,
|
1279
1438
|
credentials
|
1280
1439
|
}) : await fetch2(api, {
|
1281
1440
|
method: "POST",
|
@@ -1284,20 +1443,18 @@ async function fetchUIMessageStream({
|
|
1284
1443
|
"Content-Type": "application/json",
|
1285
1444
|
...headers
|
1286
1445
|
},
|
1287
|
-
signal:
|
1446
|
+
signal: abortSignal,
|
1288
1447
|
credentials
|
1289
1448
|
});
|
1290
1449
|
if (!response.ok) {
|
1291
1450
|
throw new Error(
|
1292
|
-
(
|
1451
|
+
(_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
|
1293
1452
|
);
|
1294
1453
|
}
|
1295
1454
|
if (!response.body) {
|
1296
1455
|
throw new Error("The response body is empty.");
|
1297
1456
|
}
|
1298
|
-
return
|
1299
|
-
stream: response.body.pipeThrough(new TextDecoderStream())
|
1300
|
-
}) : parseJsonEventStream({
|
1457
|
+
return parseJsonEventStream2({
|
1301
1458
|
stream: response.body,
|
1302
1459
|
schema: uiMessageStreamPartSchema
|
1303
1460
|
}).pipeThrough(
|
@@ -1311,554 +1468,291 @@ async function fetchUIMessageStream({
|
|
1311
1468
|
})
|
1312
1469
|
);
|
1313
1470
|
}
|
1314
|
-
|
1315
|
-
|
1316
|
-
|
1317
|
-
onFinish,
|
1318
|
-
onToolCall,
|
1319
|
-
generateId: generateId3,
|
1320
|
-
lastMessage,
|
1321
|
-
messageMetadataSchema
|
1322
|
-
}) {
|
1323
|
-
const state = createStreamingUIMessageState({
|
1324
|
-
lastMessage,
|
1325
|
-
newMessageId: generateId3()
|
1326
|
-
});
|
1327
|
-
const runUpdateMessageJob = async (job) => {
|
1328
|
-
await job({
|
1329
|
-
state,
|
1330
|
-
write: () => {
|
1331
|
-
onUpdate({ message: state.message });
|
1332
|
-
}
|
1333
|
-
});
|
1334
|
-
};
|
1335
|
-
await consumeStream({
|
1336
|
-
stream: processUIMessageStream({
|
1337
|
-
stream,
|
1338
|
-
onToolCall,
|
1339
|
-
messageMetadataSchema,
|
1340
|
-
runUpdateMessageJob
|
1341
|
-
}),
|
1342
|
-
onError: (error) => {
|
1343
|
-
throw error;
|
1344
|
-
}
|
1345
|
-
});
|
1346
|
-
onFinish == null ? void 0 : onFinish({ message: state.message });
|
1347
|
-
}
|
1348
|
-
async function callChatApi({
|
1349
|
-
api,
|
1350
|
-
body,
|
1351
|
-
streamProtocol = "ui-message",
|
1352
|
-
credentials,
|
1353
|
-
headers,
|
1354
|
-
abortController,
|
1355
|
-
onUpdate,
|
1356
|
-
onFinish,
|
1357
|
-
onToolCall,
|
1358
|
-
generateId: generateId3,
|
1359
|
-
fetch: fetch2 = getOriginalFetch(),
|
1360
|
-
lastMessage,
|
1361
|
-
requestType = "generate",
|
1362
|
-
messageMetadataSchema
|
1363
|
-
}) {
|
1364
|
-
const stream = await fetchUIMessageStream({
|
1365
|
-
api,
|
1366
|
-
body,
|
1367
|
-
streamProtocol,
|
1471
|
+
var DefaultChatTransport = class {
|
1472
|
+
constructor({
|
1473
|
+
api = "/api/chat",
|
1368
1474
|
credentials,
|
1369
1475
|
headers,
|
1370
|
-
|
1476
|
+
body,
|
1371
1477
|
fetch: fetch2,
|
1372
|
-
|
1373
|
-
})
|
1374
|
-
|
1375
|
-
|
1376
|
-
|
1377
|
-
|
1378
|
-
|
1379
|
-
|
1380
|
-
lastMessage,
|
1381
|
-
messageMetadataSchema
|
1382
|
-
});
|
1383
|
-
}
|
1384
|
-
|
1385
|
-
// src/ui/call-completion-api.ts
|
1386
|
-
import { parseJsonEventStream as parseJsonEventStream2 } from "@ai-sdk/provider-utils";
|
1387
|
-
|
1388
|
-
// src/ui/process-text-stream.ts
|
1389
|
-
async function processTextStream({
|
1390
|
-
stream,
|
1391
|
-
onTextPart
|
1392
|
-
}) {
|
1393
|
-
const reader = stream.pipeThrough(new TextDecoderStream()).getReader();
|
1394
|
-
while (true) {
|
1395
|
-
const { done, value } = await reader.read();
|
1396
|
-
if (done) {
|
1397
|
-
break;
|
1398
|
-
}
|
1399
|
-
await onTextPart(value);
|
1478
|
+
prepareRequest
|
1479
|
+
} = {}) {
|
1480
|
+
this.api = api;
|
1481
|
+
this.credentials = credentials;
|
1482
|
+
this.headers = headers;
|
1483
|
+
this.body = body;
|
1484
|
+
this.fetch = fetch2;
|
1485
|
+
this.prepareRequest = prepareRequest;
|
1400
1486
|
}
|
1401
|
-
|
1402
|
-
|
1403
|
-
|
1404
|
-
|
1405
|
-
|
1406
|
-
|
1407
|
-
|
1408
|
-
|
1409
|
-
|
1410
|
-
|
1411
|
-
|
1412
|
-
|
1413
|
-
|
1414
|
-
|
1415
|
-
|
1416
|
-
|
1417
|
-
|
1418
|
-
fetch: fetch2 = getOriginalFetch2()
|
1419
|
-
}) {
|
1420
|
-
var _a17;
|
1421
|
-
try {
|
1422
|
-
setLoading(true);
|
1423
|
-
setError(void 0);
|
1424
|
-
const abortController = new AbortController();
|
1425
|
-
setAbortController(abortController);
|
1426
|
-
setCompletion("");
|
1427
|
-
const response = await fetch2(api, {
|
1428
|
-
method: "POST",
|
1429
|
-
body: JSON.stringify({
|
1430
|
-
prompt,
|
1431
|
-
...body
|
1432
|
-
}),
|
1433
|
-
credentials,
|
1434
|
-
headers: {
|
1435
|
-
"Content-Type": "application/json",
|
1436
|
-
...headers
|
1437
|
-
},
|
1438
|
-
signal: abortController.signal
|
1439
|
-
}).catch((err) => {
|
1440
|
-
throw err;
|
1487
|
+
submitMessages({
|
1488
|
+
chatId,
|
1489
|
+
messages,
|
1490
|
+
abortSignal,
|
1491
|
+
metadata,
|
1492
|
+
headers,
|
1493
|
+
body,
|
1494
|
+
requestType
|
1495
|
+
}) {
|
1496
|
+
var _a17, _b;
|
1497
|
+
const preparedRequest = (_a17 = this.prepareRequest) == null ? void 0 : _a17.call(this, {
|
1498
|
+
id: chatId,
|
1499
|
+
messages,
|
1500
|
+
body: { ...this.body, ...body },
|
1501
|
+
headers: { ...this.headers, ...headers },
|
1502
|
+
credentials: this.credentials,
|
1503
|
+
requestMetadata: metadata
|
1441
1504
|
});
|
1442
|
-
|
1443
|
-
|
1444
|
-
|
1445
|
-
)
|
1446
|
-
|
1447
|
-
|
1448
|
-
|
1449
|
-
|
1450
|
-
let result = "";
|
1451
|
-
switch (streamProtocol) {
|
1452
|
-
case "text": {
|
1453
|
-
await processTextStream({
|
1454
|
-
stream: response.body,
|
1455
|
-
onTextPart: (chunk) => {
|
1456
|
-
result += chunk;
|
1457
|
-
setCompletion(result);
|
1458
|
-
}
|
1459
|
-
});
|
1460
|
-
break;
|
1461
|
-
}
|
1462
|
-
case "data": {
|
1463
|
-
await consumeStream({
|
1464
|
-
stream: parseJsonEventStream2({
|
1465
|
-
stream: response.body,
|
1466
|
-
schema: uiMessageStreamPartSchema
|
1467
|
-
}).pipeThrough(
|
1468
|
-
new TransformStream({
|
1469
|
-
async transform(part) {
|
1470
|
-
if (!part.success) {
|
1471
|
-
throw part.error;
|
1472
|
-
}
|
1473
|
-
const streamPart = part.value;
|
1474
|
-
if (streamPart.type === "text") {
|
1475
|
-
result += streamPart.text;
|
1476
|
-
setCompletion(result);
|
1477
|
-
} else if (streamPart.type === "error") {
|
1478
|
-
throw new Error(streamPart.errorText);
|
1479
|
-
}
|
1480
|
-
}
|
1481
|
-
})
|
1482
|
-
),
|
1483
|
-
onError: (error) => {
|
1484
|
-
throw error;
|
1485
|
-
}
|
1486
|
-
});
|
1487
|
-
break;
|
1488
|
-
}
|
1489
|
-
default: {
|
1490
|
-
const exhaustiveCheck = streamProtocol;
|
1491
|
-
throw new Error(`Unknown stream protocol: ${exhaustiveCheck}`);
|
1492
|
-
}
|
1493
|
-
}
|
1494
|
-
if (onFinish) {
|
1495
|
-
onFinish(prompt, result);
|
1496
|
-
}
|
1497
|
-
setAbortController(null);
|
1498
|
-
return result;
|
1499
|
-
} catch (err) {
|
1500
|
-
if (err.name === "AbortError") {
|
1501
|
-
setAbortController(null);
|
1502
|
-
return null;
|
1503
|
-
}
|
1504
|
-
if (err instanceof Error) {
|
1505
|
-
if (onError) {
|
1506
|
-
onError(err);
|
1507
|
-
}
|
1508
|
-
}
|
1509
|
-
setError(err);
|
1510
|
-
} finally {
|
1511
|
-
setLoading(false);
|
1512
|
-
}
|
1513
|
-
}
|
1514
|
-
|
1515
|
-
// src/ui/chat-store.ts
|
1516
|
-
import {
|
1517
|
-
generateId as generateIdFunc
|
1518
|
-
} from "@ai-sdk/provider-utils";
|
1519
|
-
|
1520
|
-
// src/util/serial-job-executor.ts
|
1521
|
-
var SerialJobExecutor = class {
|
1522
|
-
constructor() {
|
1523
|
-
this.queue = [];
|
1524
|
-
this.isProcessing = false;
|
1525
|
-
}
|
1526
|
-
async processQueue() {
|
1527
|
-
if (this.isProcessing) {
|
1528
|
-
return;
|
1529
|
-
}
|
1530
|
-
this.isProcessing = true;
|
1531
|
-
while (this.queue.length > 0) {
|
1532
|
-
await this.queue[0]();
|
1533
|
-
this.queue.shift();
|
1534
|
-
}
|
1535
|
-
this.isProcessing = false;
|
1536
|
-
}
|
1537
|
-
async run(job) {
|
1538
|
-
return new Promise((resolve, reject) => {
|
1539
|
-
this.queue.push(async () => {
|
1540
|
-
try {
|
1541
|
-
await job();
|
1542
|
-
resolve();
|
1543
|
-
} catch (error) {
|
1544
|
-
reject(error);
|
1545
|
-
}
|
1546
|
-
});
|
1547
|
-
void this.processQueue();
|
1505
|
+
return fetchUIMessageStream({
|
1506
|
+
api: this.api,
|
1507
|
+
body: (preparedRequest == null ? void 0 : preparedRequest.body) !== void 0 ? preparedRequest.body : { ...this.body, ...body, id: chatId, messages },
|
1508
|
+
headers: (preparedRequest == null ? void 0 : preparedRequest.headers) !== void 0 ? preparedRequest.headers : { ...this.headers, ...headers },
|
1509
|
+
credentials: (_b = preparedRequest == null ? void 0 : preparedRequest.credentials) != null ? _b : this.credentials,
|
1510
|
+
abortSignal,
|
1511
|
+
fetch: this.fetch,
|
1512
|
+
requestType
|
1548
1513
|
});
|
1549
1514
|
}
|
1550
1515
|
};
|
1551
1516
|
|
1552
|
-
// src/ui/
|
1553
|
-
function
|
1554
|
-
|
1555
|
-
|
1556
|
-
maxSteps: maxSteps2,
|
1557
|
-
messages
|
1558
|
-
}) {
|
1559
|
-
var _a17;
|
1560
|
-
const lastMessage = messages[messages.length - 1];
|
1561
|
-
return (
|
1562
|
-
// check if the feature is enabled:
|
1563
|
-
maxSteps2 > 1 && // ensure there is a last message:
|
1564
|
-
lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
|
1565
|
-
(messages.length > originalMessageCount || extractMaxToolInvocationStep(getToolInvocations(lastMessage)) !== originalMaxToolInvocationStep) && // check that next step is possible:
|
1566
|
-
isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
|
1567
|
-
((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) < maxSteps2
|
1568
|
-
);
|
1569
|
-
}
|
1570
|
-
function isAssistantMessageWithCompletedToolCalls(message) {
|
1571
|
-
if (message.role !== "assistant") {
|
1572
|
-
return false;
|
1517
|
+
// src/ui/convert-file-list-to-file-ui-parts.ts
|
1518
|
+
async function convertFileListToFileUIParts(files) {
|
1519
|
+
if (files == null) {
|
1520
|
+
return [];
|
1573
1521
|
}
|
1574
|
-
|
1575
|
-
|
1576
|
-
}, -1);
|
1577
|
-
const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter((part) => part.type === "tool-invocation");
|
1578
|
-
return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every((part) => "result" in part.toolInvocation);
|
1579
|
-
}
|
1580
|
-
|
1581
|
-
// src/ui/update-tool-call-result.ts
|
1582
|
-
function updateToolCallResult({
|
1583
|
-
messages,
|
1584
|
-
toolCallId,
|
1585
|
-
toolResult: result
|
1586
|
-
}) {
|
1587
|
-
const lastMessage = messages[messages.length - 1];
|
1588
|
-
const invocationPart = lastMessage.parts.find(
|
1589
|
-
(part) => part.type === "tool-invocation" && part.toolInvocation.toolCallId === toolCallId
|
1590
|
-
);
|
1591
|
-
if (invocationPart == null) {
|
1592
|
-
return;
|
1522
|
+
if (!globalThis.FileList || !(files instanceof globalThis.FileList)) {
|
1523
|
+
throw new Error("FileList is not supported in the current environment");
|
1593
1524
|
}
|
1594
|
-
|
1595
|
-
|
1596
|
-
|
1597
|
-
|
1598
|
-
|
1525
|
+
return Promise.all(
|
1526
|
+
Array.from(files).map(async (file) => {
|
1527
|
+
const { name: name17, type } = file;
|
1528
|
+
const dataUrl = await new Promise((resolve, reject) => {
|
1529
|
+
const reader = new FileReader();
|
1530
|
+
reader.onload = (readerEvent) => {
|
1531
|
+
var _a17;
|
1532
|
+
resolve((_a17 = readerEvent.target) == null ? void 0 : _a17.result);
|
1533
|
+
};
|
1534
|
+
reader.onerror = (error) => reject(error);
|
1535
|
+
reader.readAsDataURL(file);
|
1536
|
+
});
|
1537
|
+
return {
|
1538
|
+
type: "file",
|
1539
|
+
mediaType: type,
|
1540
|
+
filename: name17,
|
1541
|
+
url: dataUrl
|
1542
|
+
};
|
1543
|
+
})
|
1544
|
+
);
|
1599
1545
|
}
|
1600
1546
|
|
1601
|
-
// src/ui/chat
|
1602
|
-
var
|
1547
|
+
// src/ui/chat.ts
|
1548
|
+
var AbstractChat = class {
|
1603
1549
|
constructor({
|
1604
|
-
|
1605
|
-
|
1606
|
-
transport,
|
1607
|
-
maxSteps
|
1550
|
+
generateId: generateId3 = generateIdFunc,
|
1551
|
+
id = generateId3(),
|
1552
|
+
transport = new DefaultChatTransport(),
|
1553
|
+
maxSteps = 1,
|
1608
1554
|
messageMetadataSchema,
|
1609
|
-
dataPartSchemas
|
1610
|
-
|
1611
|
-
this.chats = new Map(
|
1612
|
-
Object.entries(chats).map(([id, state]) => [
|
1613
|
-
id,
|
1614
|
-
{
|
1615
|
-
messages: [...state.messages],
|
1616
|
-
status: "ready",
|
1617
|
-
activeResponse: void 0,
|
1618
|
-
error: void 0,
|
1619
|
-
jobExecutor: new SerialJobExecutor()
|
1620
|
-
}
|
1621
|
-
])
|
1622
|
-
);
|
1623
|
-
this.maxSteps = maxSteps2;
|
1624
|
-
this.transport = transport;
|
1625
|
-
this.subscribers = /* @__PURE__ */ new Set();
|
1626
|
-
this.generateId = generateId3 != null ? generateId3 : generateIdFunc;
|
1627
|
-
this.messageMetadataSchema = messageMetadataSchema;
|
1628
|
-
this.dataPartSchemas = dataPartSchemas;
|
1629
|
-
}
|
1630
|
-
hasChat(id) {
|
1631
|
-
return this.chats.has(id);
|
1632
|
-
}
|
1633
|
-
addChat(id, messages) {
|
1634
|
-
this.chats.set(id, {
|
1635
|
-
messages,
|
1636
|
-
status: "ready",
|
1637
|
-
jobExecutor: new SerialJobExecutor()
|
1638
|
-
});
|
1639
|
-
}
|
1640
|
-
getChats() {
|
1641
|
-
return Array.from(this.chats.entries());
|
1642
|
-
}
|
1643
|
-
get chatCount() {
|
1644
|
-
return this.chats.size;
|
1645
|
-
}
|
1646
|
-
getStatus(id) {
|
1647
|
-
return this.getChat(id).status;
|
1648
|
-
}
|
1649
|
-
setStatus({
|
1650
|
-
id,
|
1651
|
-
status,
|
1652
|
-
error
|
1653
|
-
}) {
|
1654
|
-
const chat = this.getChat(id);
|
1655
|
-
if (chat.status === status)
|
1656
|
-
return;
|
1657
|
-
chat.status = status;
|
1658
|
-
chat.error = error;
|
1659
|
-
this.emit({ type: "chat-status-changed", chatId: id, error });
|
1660
|
-
}
|
1661
|
-
getError(id) {
|
1662
|
-
return this.getChat(id).error;
|
1663
|
-
}
|
1664
|
-
getMessages(id) {
|
1665
|
-
return this.getChat(id).messages;
|
1666
|
-
}
|
1667
|
-
getLastMessage(id) {
|
1668
|
-
const chat = this.getChat(id);
|
1669
|
-
return chat.messages[chat.messages.length - 1];
|
1670
|
-
}
|
1671
|
-
subscribe(subscriber) {
|
1672
|
-
this.subscribers.add(subscriber);
|
1673
|
-
return () => this.subscribers.delete(subscriber);
|
1674
|
-
}
|
1675
|
-
setMessages({
|
1676
|
-
id,
|
1677
|
-
messages
|
1678
|
-
}) {
|
1679
|
-
this.getChat(id).messages = [...messages];
|
1680
|
-
this.emit({ type: "chat-messages-changed", chatId: id });
|
1681
|
-
}
|
1682
|
-
removeAssistantResponse(id) {
|
1683
|
-
const chat = this.getChat(id);
|
1684
|
-
const lastMessage = chat.messages[chat.messages.length - 1];
|
1685
|
-
if (lastMessage == null) {
|
1686
|
-
throw new Error("Cannot remove assistant response from empty chat");
|
1687
|
-
}
|
1688
|
-
if (lastMessage.role !== "assistant") {
|
1689
|
-
throw new Error("Last message is not an assistant message");
|
1690
|
-
}
|
1691
|
-
this.setMessages({ id, messages: chat.messages.slice(0, -1) });
|
1692
|
-
}
|
1693
|
-
async submitMessage({
|
1694
|
-
chatId,
|
1695
|
-
message,
|
1696
|
-
headers,
|
1697
|
-
body,
|
1698
|
-
onError,
|
1699
|
-
onToolCall,
|
1700
|
-
onFinish
|
1701
|
-
}) {
|
1702
|
-
var _a17;
|
1703
|
-
const chat = this.getChat(chatId);
|
1704
|
-
const currentMessages = chat.messages;
|
1705
|
-
await this.triggerRequest({
|
1706
|
-
chatId,
|
1707
|
-
messages: currentMessages.concat({
|
1708
|
-
...message,
|
1709
|
-
id: (_a17 = message.id) != null ? _a17 : this.generateId()
|
1710
|
-
}),
|
1711
|
-
headers,
|
1712
|
-
body,
|
1713
|
-
requestType: "generate",
|
1714
|
-
onError,
|
1715
|
-
onToolCall,
|
1716
|
-
onFinish
|
1717
|
-
});
|
1718
|
-
}
|
1719
|
-
async resubmitLastUserMessage({
|
1720
|
-
chatId,
|
1721
|
-
headers,
|
1722
|
-
body,
|
1723
|
-
onError,
|
1724
|
-
onToolCall,
|
1725
|
-
onFinish
|
1726
|
-
}) {
|
1727
|
-
const messages = this.getChat(chatId).messages;
|
1728
|
-
const messagesToSubmit = messages[messages.length - 1].role === "assistant" ? messages.slice(0, -1) : messages;
|
1729
|
-
if (messagesToSubmit.length === 0) {
|
1730
|
-
return;
|
1731
|
-
}
|
1732
|
-
return this.triggerRequest({
|
1733
|
-
chatId,
|
1734
|
-
requestType: "generate",
|
1735
|
-
messages: messagesToSubmit,
|
1736
|
-
headers,
|
1737
|
-
body,
|
1738
|
-
onError,
|
1739
|
-
onToolCall,
|
1740
|
-
onFinish
|
1741
|
-
});
|
1742
|
-
}
|
1743
|
-
async resumeStream({
|
1744
|
-
chatId,
|
1745
|
-
headers,
|
1746
|
-
body,
|
1555
|
+
dataPartSchemas,
|
1556
|
+
state,
|
1747
1557
|
onError,
|
1748
1558
|
onToolCall,
|
1749
1559
|
onFinish
|
1750
1560
|
}) {
|
1751
|
-
|
1752
|
-
|
1753
|
-
|
1754
|
-
|
1755
|
-
messages
|
1756
|
-
|
1757
|
-
|
1758
|
-
|
1759
|
-
|
1760
|
-
|
1761
|
-
|
1762
|
-
|
1763
|
-
|
1764
|
-
|
1765
|
-
|
1766
|
-
|
1767
|
-
|
1768
|
-
|
1769
|
-
|
1770
|
-
|
1771
|
-
|
1772
|
-
|
1773
|
-
|
1774
|
-
|
1775
|
-
|
1561
|
+
this.subscribers = /* @__PURE__ */ new Set();
|
1562
|
+
this.activeResponse = void 0;
|
1563
|
+
this.jobExecutor = new SerialJobExecutor();
|
1564
|
+
this.removeAssistantResponse = () => {
|
1565
|
+
const lastMessage = this.state.messages[this.state.messages.length - 1];
|
1566
|
+
if (lastMessage == null) {
|
1567
|
+
throw new Error("Cannot remove assistant response from empty chat");
|
1568
|
+
}
|
1569
|
+
if (lastMessage.role !== "assistant") {
|
1570
|
+
throw new Error("Last message is not an assistant message");
|
1571
|
+
}
|
1572
|
+
this.state.popMessage();
|
1573
|
+
this.emit({ type: "messages-changed" });
|
1574
|
+
};
|
1575
|
+
/**
|
1576
|
+
* Append a user message to the chat list. This triggers the API call to fetch
|
1577
|
+
* the assistant's response.
|
1578
|
+
*/
|
1579
|
+
this.sendMessage = async (message, options = {}) => {
|
1580
|
+
var _a17, _b;
|
1581
|
+
let uiMessage;
|
1582
|
+
if ("text" in message || "files" in message) {
|
1583
|
+
const fileParts = Array.isArray(message.files) ? message.files : await convertFileListToFileUIParts(message.files);
|
1584
|
+
uiMessage = {
|
1585
|
+
parts: [
|
1586
|
+
...fileParts,
|
1587
|
+
..."text" in message && message.text != null ? [{ type: "text", text: message.text }] : []
|
1588
|
+
]
|
1589
|
+
};
|
1590
|
+
} else {
|
1591
|
+
uiMessage = message;
|
1592
|
+
}
|
1593
|
+
this.state.pushMessage({
|
1594
|
+
...uiMessage,
|
1595
|
+
id: (_a17 = uiMessage.id) != null ? _a17 : this.generateId(),
|
1596
|
+
role: (_b = uiMessage.role) != null ? _b : "user"
|
1776
1597
|
});
|
1777
|
-
this.
|
1778
|
-
|
1598
|
+
this.emit({ type: "messages-changed" });
|
1599
|
+
await this.triggerRequest({ requestType: "generate", ...options });
|
1600
|
+
};
|
1601
|
+
/**
|
1602
|
+
* Regenerate the last assistant message.
|
1603
|
+
*/
|
1604
|
+
this.reload = async (options = {}) => {
|
1605
|
+
if (this.lastMessage === void 0) {
|
1779
1606
|
return;
|
1780
1607
|
}
|
1781
|
-
|
1782
|
-
|
1783
|
-
|
1784
|
-
|
1785
|
-
|
1786
|
-
|
1608
|
+
if (this.lastMessage.role === "assistant") {
|
1609
|
+
this.state.popMessage();
|
1610
|
+
this.emit({ type: "messages-changed" });
|
1611
|
+
}
|
1612
|
+
await this.triggerRequest({ requestType: "generate", ...options });
|
1613
|
+
};
|
1614
|
+
/**
|
1615
|
+
* Resume an ongoing chat generation stream. This does not resume an aborted generation.
|
1616
|
+
*/
|
1617
|
+
this.experimental_resume = async (options = {}) => {
|
1618
|
+
await this.triggerRequest({ requestType: "resume", ...options });
|
1619
|
+
};
|
1620
|
+
this.addToolResult = async ({
|
1621
|
+
toolCallId,
|
1622
|
+
result
|
1623
|
+
}) => {
|
1624
|
+
this.jobExecutor.run(async () => {
|
1625
|
+
updateToolCallResult({
|
1626
|
+
messages: this.state.messages,
|
1627
|
+
toolCallId,
|
1628
|
+
toolResult: result
|
1787
1629
|
});
|
1630
|
+
this.messages = this.state.messages;
|
1631
|
+
if (this.status === "submitted" || this.status === "streaming") {
|
1632
|
+
return;
|
1633
|
+
}
|
1634
|
+
const lastMessage = this.lastMessage;
|
1635
|
+
if (isAssistantMessageWithCompletedToolCalls(lastMessage)) {
|
1636
|
+
this.triggerRequest({
|
1637
|
+
requestType: "generate"
|
1638
|
+
});
|
1639
|
+
}
|
1640
|
+
});
|
1641
|
+
};
|
1642
|
+
/**
|
1643
|
+
* Abort the current request immediately, keep the generated tokens if any.
|
1644
|
+
*/
|
1645
|
+
this.stop = async () => {
|
1646
|
+
var _a17;
|
1647
|
+
if (this.status !== "streaming" && this.status !== "submitted")
|
1648
|
+
return;
|
1649
|
+
if ((_a17 = this.activeResponse) == null ? void 0 : _a17.abortController) {
|
1650
|
+
this.activeResponse.abortController.abort();
|
1651
|
+
this.activeResponse.abortController = void 0;
|
1788
1652
|
}
|
1789
|
-
}
|
1653
|
+
};
|
1654
|
+
this.id = id;
|
1655
|
+
this.maxSteps = maxSteps;
|
1656
|
+
this.transport = transport;
|
1657
|
+
this.generateId = generateId3;
|
1658
|
+
this.messageMetadataSchema = messageMetadataSchema;
|
1659
|
+
this.dataPartSchemas = dataPartSchemas;
|
1660
|
+
this.state = state;
|
1661
|
+
this.onError = onError;
|
1662
|
+
this.onToolCall = onToolCall;
|
1663
|
+
this.onFinish = onFinish;
|
1790
1664
|
}
|
1791
|
-
|
1792
|
-
|
1793
|
-
|
1794
|
-
|
1665
|
+
/**
|
1666
|
+
* Hook status:
|
1667
|
+
*
|
1668
|
+
* - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
|
1669
|
+
* - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
|
1670
|
+
* - `ready`: The full response has been received and processed; a new user message can be submitted.
|
1671
|
+
* - `error`: An error occurred during the API request, preventing successful completion.
|
1672
|
+
*/
|
1673
|
+
get status() {
|
1674
|
+
return this.state.status;
|
1675
|
+
}
|
1676
|
+
setStatus({
|
1677
|
+
status,
|
1678
|
+
error
|
1679
|
+
}) {
|
1680
|
+
if (this.status === status)
|
1795
1681
|
return;
|
1796
|
-
|
1797
|
-
|
1798
|
-
|
1799
|
-
|
1682
|
+
this.state.status = status;
|
1683
|
+
this.state.error = error;
|
1684
|
+
this.emit({ type: "status-changed" });
|
1685
|
+
}
|
1686
|
+
get error() {
|
1687
|
+
return this.state.error;
|
1688
|
+
}
|
1689
|
+
get messages() {
|
1690
|
+
return this.state.messages;
|
1691
|
+
}
|
1692
|
+
get lastMessage() {
|
1693
|
+
return this.state.messages[this.state.messages.length - 1];
|
1694
|
+
}
|
1695
|
+
subscribe(subscriber) {
|
1696
|
+
this.subscribers.add(subscriber);
|
1697
|
+
return () => this.subscribers.delete(subscriber);
|
1698
|
+
}
|
1699
|
+
set messages(messages) {
|
1700
|
+
this.state.messages = messages;
|
1701
|
+
this.emit({ type: "messages-changed" });
|
1800
1702
|
}
|
1801
1703
|
emit(event) {
|
1802
1704
|
for (const subscriber of this.subscribers) {
|
1803
|
-
subscriber.
|
1804
|
-
}
|
1805
|
-
}
|
1806
|
-
getChat(id) {
|
1807
|
-
if (!this.hasChat(id)) {
|
1808
|
-
throw new Error(`chat '${id}' not found`);
|
1705
|
+
subscriber.onChange(event);
|
1809
1706
|
}
|
1810
|
-
return this.chats.get(id);
|
1811
1707
|
}
|
1812
1708
|
async triggerRequest({
|
1813
|
-
chatId,
|
1814
|
-
messages: chatMessages,
|
1815
1709
|
requestType,
|
1710
|
+
metadata,
|
1816
1711
|
headers,
|
1817
|
-
body
|
1818
|
-
onError,
|
1819
|
-
onToolCall,
|
1820
|
-
onFinish
|
1712
|
+
body
|
1821
1713
|
}) {
|
1822
|
-
|
1823
|
-
|
1824
|
-
|
1825
|
-
|
1826
|
-
const
|
1827
|
-
const maxStep = extractMaxToolInvocationStep(
|
1828
|
-
getToolInvocations(chatMessages[chatMessages.length - 1])
|
1829
|
-
);
|
1714
|
+
var _a17, _b;
|
1715
|
+
this.setStatus({ status: "submitted", error: void 0 });
|
1716
|
+
const messageCount = this.state.messages.length;
|
1717
|
+
const lastMessage = this.lastMessage;
|
1718
|
+
const maxStep = (_a17 = lastMessage == null ? void 0 : lastMessage.parts.filter((part) => part.type === "step-start").length) != null ? _a17 : 0;
|
1830
1719
|
try {
|
1831
1720
|
const activeResponse = {
|
1832
1721
|
state: createStreamingUIMessageState({
|
1833
|
-
lastMessage:
|
1834
|
-
newMessageId:
|
1722
|
+
lastMessage: this.state.snapshot(lastMessage),
|
1723
|
+
newMessageId: this.generateId()
|
1835
1724
|
}),
|
1836
1725
|
abortController: new AbortController()
|
1837
1726
|
};
|
1838
|
-
|
1839
|
-
const stream = await
|
1840
|
-
chatId,
|
1841
|
-
messages:
|
1842
|
-
|
1727
|
+
this.activeResponse = activeResponse;
|
1728
|
+
const stream = await this.transport.submitMessages({
|
1729
|
+
chatId: this.id,
|
1730
|
+
messages: this.state.messages,
|
1731
|
+
abortSignal: activeResponse.abortController.signal,
|
1732
|
+
metadata,
|
1843
1733
|
headers,
|
1844
|
-
|
1734
|
+
body,
|
1845
1735
|
requestType
|
1846
1736
|
});
|
1847
1737
|
const runUpdateMessageJob = (job) => (
|
1848
1738
|
// serialize the job execution to avoid race conditions:
|
1849
|
-
|
1739
|
+
this.jobExecutor.run(
|
1850
1740
|
() => job({
|
1851
1741
|
state: activeResponse.state,
|
1852
1742
|
write: () => {
|
1853
|
-
|
1854
|
-
|
1855
|
-
const
|
1856
|
-
|
1857
|
-
|
1858
|
-
|
1859
|
-
|
1860
|
-
|
1861
|
-
|
1743
|
+
var _a18;
|
1744
|
+
this.setStatus({ status: "streaming" });
|
1745
|
+
const replaceLastMessage = activeResponse.state.message.id === ((_a18 = this.lastMessage) == null ? void 0 : _a18.id);
|
1746
|
+
if (replaceLastMessage) {
|
1747
|
+
this.state.replaceMessage(
|
1748
|
+
this.state.messages.length - 1,
|
1749
|
+
activeResponse.state.message
|
1750
|
+
);
|
1751
|
+
} else {
|
1752
|
+
this.state.pushMessage(activeResponse.state.message);
|
1753
|
+
}
|
1754
|
+
this.emit({
|
1755
|
+
type: "messages-changed"
|
1862
1756
|
});
|
1863
1757
|
}
|
1864
1758
|
})
|
@@ -1867,137 +1761,67 @@ var ChatStore = class {
|
|
1867
1761
|
await consumeStream({
|
1868
1762
|
stream: processUIMessageStream({
|
1869
1763
|
stream,
|
1870
|
-
onToolCall,
|
1871
|
-
messageMetadataSchema:
|
1872
|
-
dataPartSchemas:
|
1764
|
+
onToolCall: this.onToolCall,
|
1765
|
+
messageMetadataSchema: this.messageMetadataSchema,
|
1766
|
+
dataPartSchemas: this.dataPartSchemas,
|
1873
1767
|
runUpdateMessageJob
|
1874
1768
|
}),
|
1875
1769
|
onError: (error) => {
|
1876
1770
|
throw error;
|
1877
1771
|
}
|
1878
1772
|
});
|
1879
|
-
onFinish == null ? void 0 :
|
1880
|
-
this.setStatus({
|
1773
|
+
(_b = this.onFinish) == null ? void 0 : _b.call(this, { message: activeResponse.state.message });
|
1774
|
+
this.setStatus({ status: "ready" });
|
1881
1775
|
} catch (err) {
|
1776
|
+
console.error(err);
|
1882
1777
|
if (err.name === "AbortError") {
|
1883
|
-
this.setStatus({
|
1778
|
+
this.setStatus({ status: "ready" });
|
1884
1779
|
return null;
|
1885
1780
|
}
|
1886
|
-
if (onError && err instanceof Error) {
|
1887
|
-
onError(err);
|
1781
|
+
if (this.onError && err instanceof Error) {
|
1782
|
+
this.onError(err);
|
1888
1783
|
}
|
1889
|
-
this.setStatus({
|
1784
|
+
this.setStatus({ status: "error", error: err });
|
1890
1785
|
} finally {
|
1891
|
-
|
1786
|
+
this.activeResponse = void 0;
|
1892
1787
|
}
|
1893
|
-
const currentMessages = self.getMessages(chatId);
|
1894
1788
|
if (shouldResubmitMessages({
|
1895
1789
|
originalMaxToolInvocationStep: maxStep,
|
1896
1790
|
originalMessageCount: messageCount,
|
1897
|
-
maxSteps:
|
1898
|
-
messages:
|
1791
|
+
maxSteps: this.maxSteps,
|
1792
|
+
messages: this.state.messages
|
1899
1793
|
})) {
|
1900
|
-
await
|
1901
|
-
chatId,
|
1794
|
+
await this.triggerRequest({
|
1902
1795
|
requestType,
|
1903
|
-
|
1904
|
-
onToolCall,
|
1905
|
-
onFinish,
|
1796
|
+
metadata,
|
1906
1797
|
headers,
|
1907
|
-
body
|
1908
|
-
messages: currentMessages
|
1798
|
+
body
|
1909
1799
|
});
|
1910
1800
|
}
|
1911
1801
|
}
|
1912
1802
|
};
|
1913
|
-
|
1914
|
-
|
1915
|
-
|
1916
|
-
|
1917
|
-
|
1918
|
-
|
1919
|
-
|
1920
|
-
|
1921
|
-
streamProtocol,
|
1922
|
-
fetch: fetch2,
|
1923
|
-
prepareRequestBody
|
1924
|
-
}) {
|
1925
|
-
this.api = api;
|
1926
|
-
this.credentials = credentials;
|
1927
|
-
this.headers = headers;
|
1928
|
-
this.body = body;
|
1929
|
-
this.streamProtocol = streamProtocol;
|
1930
|
-
this.fetch = fetch2;
|
1931
|
-
this.prepareRequestBody = prepareRequestBody;
|
1932
|
-
}
|
1933
|
-
submitMessages({
|
1934
|
-
chatId,
|
1935
|
-
messages,
|
1936
|
-
abortController,
|
1937
|
-
body,
|
1938
|
-
headers,
|
1939
|
-
requestType
|
1940
|
-
}) {
|
1941
|
-
var _a17, _b;
|
1942
|
-
return fetchUIMessageStream({
|
1943
|
-
api: this.api,
|
1944
|
-
headers: {
|
1945
|
-
...this.headers,
|
1946
|
-
...headers
|
1947
|
-
},
|
1948
|
-
body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
|
1949
|
-
chatId,
|
1950
|
-
messages,
|
1951
|
-
...this.body,
|
1952
|
-
...body
|
1953
|
-
})) != null ? _b : {
|
1954
|
-
chatId,
|
1955
|
-
messages,
|
1956
|
-
...this.body,
|
1957
|
-
...body
|
1958
|
-
},
|
1959
|
-
streamProtocol: this.streamProtocol,
|
1960
|
-
credentials: this.credentials,
|
1961
|
-
abortController: () => abortController,
|
1962
|
-
fetch: this.fetch,
|
1963
|
-
requestType
|
1964
|
-
});
|
1965
|
-
}
|
1966
|
-
};
|
1967
|
-
|
1968
|
-
// src/ui/convert-file-list-to-file-ui-parts.ts
|
1969
|
-
async function convertFileListToFileUIParts(files) {
|
1970
|
-
if (files == null) {
|
1971
|
-
return [];
|
1972
|
-
}
|
1973
|
-
if (!globalThis.FileList || !(files instanceof globalThis.FileList)) {
|
1974
|
-
throw new Error("FileList is not supported in the current environment");
|
1975
|
-
}
|
1976
|
-
return Promise.all(
|
1977
|
-
Array.from(files).map(async (file) => {
|
1978
|
-
const { name: name17, type } = file;
|
1979
|
-
const dataUrl = await new Promise((resolve, reject) => {
|
1980
|
-
const reader = new FileReader();
|
1981
|
-
reader.onload = (readerEvent) => {
|
1982
|
-
var _a17;
|
1983
|
-
resolve((_a17 = readerEvent.target) == null ? void 0 : _a17.result);
|
1984
|
-
};
|
1985
|
-
reader.onerror = (error) => reject(error);
|
1986
|
-
reader.readAsDataURL(file);
|
1987
|
-
});
|
1988
|
-
return {
|
1989
|
-
type: "file",
|
1990
|
-
mediaType: type,
|
1991
|
-
filename: name17,
|
1992
|
-
url: dataUrl
|
1993
|
-
};
|
1994
|
-
})
|
1803
|
+
function updateToolCallResult({
|
1804
|
+
messages,
|
1805
|
+
toolCallId,
|
1806
|
+
toolResult: result
|
1807
|
+
}) {
|
1808
|
+
const lastMessage = messages[messages.length - 1];
|
1809
|
+
const invocationPart = lastMessage.parts.find(
|
1810
|
+
(part) => part.type === "tool-invocation" && part.toolInvocation.toolCallId === toolCallId
|
1995
1811
|
);
|
1812
|
+
if (invocationPart == null) {
|
1813
|
+
return;
|
1814
|
+
}
|
1815
|
+
invocationPart.toolInvocation = {
|
1816
|
+
...invocationPart.toolInvocation,
|
1817
|
+
state: "result",
|
1818
|
+
result
|
1819
|
+
};
|
1996
1820
|
}
|
1997
1821
|
|
1998
1822
|
// src/ui/convert-to-model-messages.ts
|
1999
1823
|
function convertToModelMessages(messages, options) {
|
2000
|
-
var _a17
|
1824
|
+
var _a17;
|
2001
1825
|
const tools = (_a17 = options == null ? void 0 : options.tools) != null ? _a17 : {};
|
2002
1826
|
const modelMessages = [];
|
2003
1827
|
for (const message of messages) {
|
@@ -2028,6 +1852,9 @@ function convertToModelMessages(messages, options) {
|
|
2028
1852
|
case "assistant": {
|
2029
1853
|
if (message.parts != null) {
|
2030
1854
|
let processBlock2 = function() {
|
1855
|
+
if (block.length === 0) {
|
1856
|
+
return;
|
1857
|
+
}
|
2031
1858
|
const content = [];
|
2032
1859
|
for (const part of block) {
|
2033
1860
|
switch (part.type) {
|
@@ -2102,33 +1929,20 @@ function convertToModelMessages(messages, options) {
|
|
2102
1929
|
});
|
2103
1930
|
}
|
2104
1931
|
block = [];
|
2105
|
-
blockHasToolInvocations = false;
|
2106
|
-
currentStep++;
|
2107
1932
|
};
|
2108
1933
|
var processBlock = processBlock2;
|
2109
|
-
let currentStep = 0;
|
2110
|
-
let blockHasToolInvocations = false;
|
2111
1934
|
let block = [];
|
2112
1935
|
for (const part of message.parts) {
|
2113
1936
|
switch (part.type) {
|
2114
|
-
case "text":
|
2115
|
-
|
2116
|
-
processBlock2();
|
2117
|
-
}
|
2118
|
-
block.push(part);
|
2119
|
-
break;
|
2120
|
-
}
|
1937
|
+
case "text":
|
1938
|
+
case "reasoning":
|
2121
1939
|
case "file":
|
2122
|
-
case "
|
1940
|
+
case "tool-invocation": {
|
2123
1941
|
block.push(part);
|
2124
1942
|
break;
|
2125
1943
|
}
|
2126
|
-
case "
|
2127
|
-
|
2128
|
-
processBlock2();
|
2129
|
-
}
|
2130
|
-
block.push(part);
|
2131
|
-
blockHasToolInvocations = true;
|
1944
|
+
case "step-start": {
|
1945
|
+
processBlock2();
|
2132
1946
|
break;
|
2133
1947
|
}
|
2134
1948
|
}
|
@@ -2151,47 +1965,166 @@ function convertToModelMessages(messages, options) {
|
|
2151
1965
|
}
|
2152
1966
|
var convertToCoreMessages = convertToModelMessages;
|
2153
1967
|
|
2154
|
-
// src/ui/
|
2155
|
-
|
2156
|
-
|
2157
|
-
}
|
2158
|
-
|
1968
|
+
// src/ui/transform-text-to-ui-message-stream.ts
|
1969
|
+
function transformTextToUiMessageStream({
|
1970
|
+
stream
|
1971
|
+
}) {
|
1972
|
+
return stream.pipeThrough(
|
1973
|
+
new TransformStream({
|
1974
|
+
start(controller) {
|
1975
|
+
controller.enqueue({ type: "start" });
|
1976
|
+
controller.enqueue({ type: "start-step" });
|
1977
|
+
},
|
1978
|
+
async transform(part, controller) {
|
1979
|
+
controller.enqueue({ type: "text", text: part });
|
1980
|
+
},
|
1981
|
+
async flush(controller) {
|
1982
|
+
controller.enqueue({ type: "finish-step" });
|
1983
|
+
controller.enqueue({ type: "finish" });
|
1984
|
+
}
|
1985
|
+
})
|
1986
|
+
);
|
1987
|
+
}
|
1988
|
+
|
1989
|
+
// src/ui/text-stream-chat-transport.ts
|
1990
|
+
var getOriginalFetch3 = () => fetch;
|
1991
|
+
async function fetchTextStream({
|
2159
1992
|
api,
|
2160
|
-
|
2161
|
-
streamProtocol = "ui-message",
|
1993
|
+
body,
|
2162
1994
|
credentials,
|
2163
1995
|
headers,
|
2164
|
-
|
2165
|
-
|
2166
|
-
|
2167
|
-
|
2168
|
-
|
2169
|
-
|
2170
|
-
|
1996
|
+
abortSignal,
|
1997
|
+
fetch: fetch2 = getOriginalFetch3(),
|
1998
|
+
requestType = "generate"
|
1999
|
+
}) {
|
2000
|
+
var _a17;
|
2001
|
+
const response = requestType === "resume" ? await fetch2(`${api}?chatId=${body.chatId}`, {
|
2002
|
+
method: "GET",
|
2003
|
+
headers: {
|
2004
|
+
"Content-Type": "application/json",
|
2005
|
+
...headers
|
2006
|
+
},
|
2007
|
+
signal: abortSignal,
|
2008
|
+
credentials
|
2009
|
+
}) : await fetch2(api, {
|
2010
|
+
method: "POST",
|
2011
|
+
body: JSON.stringify(body),
|
2012
|
+
headers: {
|
2013
|
+
"Content-Type": "application/json",
|
2014
|
+
...headers
|
2015
|
+
},
|
2016
|
+
signal: abortSignal,
|
2017
|
+
credentials
|
2018
|
+
});
|
2019
|
+
if (!response.ok) {
|
2020
|
+
throw new Error(
|
2021
|
+
(_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
|
2022
|
+
);
|
2023
|
+
}
|
2024
|
+
if (!response.body) {
|
2025
|
+
throw new Error("The response body is empty.");
|
2026
|
+
}
|
2027
|
+
return transformTextToUiMessageStream({
|
2028
|
+
stream: response.body.pipeThrough(new TextDecoderStream())
|
2029
|
+
});
|
2030
|
+
}
|
2031
|
+
var TextStreamChatTransport = class {
|
2032
|
+
constructor({
|
2033
|
+
api,
|
2034
|
+
credentials,
|
2035
|
+
headers,
|
2036
|
+
body,
|
2037
|
+
fetch: fetch2,
|
2038
|
+
prepareRequest
|
2039
|
+
}) {
|
2040
|
+
this.api = api;
|
2041
|
+
this.credentials = credentials;
|
2042
|
+
this.headers = headers;
|
2043
|
+
this.body = body;
|
2044
|
+
this.fetch = fetch2;
|
2045
|
+
this.prepareRequest = prepareRequest;
|
2046
|
+
}
|
2047
|
+
submitMessages({
|
2048
|
+
chatId,
|
2049
|
+
messages,
|
2050
|
+
abortSignal,
|
2051
|
+
metadata,
|
2052
|
+
headers,
|
2053
|
+
body,
|
2054
|
+
requestType
|
2055
|
+
}) {
|
2056
|
+
var _a17, _b;
|
2057
|
+
const preparedRequest = (_a17 = this.prepareRequest) == null ? void 0 : _a17.call(this, {
|
2058
|
+
id: chatId,
|
2059
|
+
messages,
|
2060
|
+
body: { ...this.body, ...body },
|
2061
|
+
headers: { ...this.headers, ...headers },
|
2062
|
+
credentials: this.credentials,
|
2063
|
+
requestMetadata: metadata
|
2064
|
+
});
|
2065
|
+
return fetchTextStream({
|
2066
|
+
api: this.api,
|
2067
|
+
body: (preparedRequest == null ? void 0 : preparedRequest.body) !== void 0 ? preparedRequest.body : { ...this.body, ...body },
|
2068
|
+
headers: (preparedRequest == null ? void 0 : preparedRequest.headers) !== void 0 ? preparedRequest.headers : { ...this.headers, ...headers },
|
2069
|
+
credentials: (_b = preparedRequest == null ? void 0 : preparedRequest.credentials) != null ? _b : this.credentials,
|
2070
|
+
abortSignal,
|
2071
|
+
fetch: this.fetch,
|
2072
|
+
requestType
|
2073
|
+
});
|
2074
|
+
}
|
2075
|
+
};
|
2076
|
+
|
2077
|
+
// src/ui-message-stream/handle-ui-message-stream-finish.ts
|
2078
|
+
function handleUIMessageStreamFinish({
|
2079
|
+
newMessageId,
|
2080
|
+
originalMessages = [],
|
2081
|
+
onFinish,
|
2082
|
+
stream
|
2171
2083
|
}) {
|
2172
|
-
|
2173
|
-
|
2174
|
-
|
2175
|
-
|
2176
|
-
|
2177
|
-
|
2178
|
-
|
2179
|
-
|
2180
|
-
|
2181
|
-
}),
|
2182
|
-
generateId: generateId3,
|
2183
|
-
messageMetadataSchema,
|
2184
|
-
dataPartSchemas,
|
2185
|
-
maxSteps: maxSteps2,
|
2186
|
-
chats
|
2084
|
+
if (onFinish == null) {
|
2085
|
+
return stream;
|
2086
|
+
}
|
2087
|
+
const lastMessage = originalMessages[originalMessages.length - 1];
|
2088
|
+
const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
|
2089
|
+
const messageId = isContinuation ? lastMessage.id : newMessageId;
|
2090
|
+
const state = createStreamingUIMessageState({
|
2091
|
+
lastMessage: structuredClone(lastMessage),
|
2092
|
+
newMessageId: messageId
|
2187
2093
|
});
|
2094
|
+
const runUpdateMessageJob = async (job) => {
|
2095
|
+
await job({ state, write: () => {
|
2096
|
+
} });
|
2097
|
+
};
|
2098
|
+
return processUIMessageStream({
|
2099
|
+
stream,
|
2100
|
+
runUpdateMessageJob
|
2101
|
+
}).pipeThrough(
|
2102
|
+
new TransformStream({
|
2103
|
+
transform(chunk, controller) {
|
2104
|
+
controller.enqueue(chunk);
|
2105
|
+
},
|
2106
|
+
flush() {
|
2107
|
+
const isContinuation2 = state.message.id === (lastMessage == null ? void 0 : lastMessage.id);
|
2108
|
+
onFinish({
|
2109
|
+
isContinuation: isContinuation2,
|
2110
|
+
responseMessage: state.message,
|
2111
|
+
messages: [
|
2112
|
+
...isContinuation2 ? originalMessages.slice(0, -1) : originalMessages,
|
2113
|
+
state.message
|
2114
|
+
]
|
2115
|
+
});
|
2116
|
+
}
|
2117
|
+
})
|
2118
|
+
);
|
2188
2119
|
}
|
2189
2120
|
|
2190
2121
|
// src/ui-message-stream/create-ui-message-stream.ts
|
2191
2122
|
function createUIMessageStream({
|
2192
2123
|
execute,
|
2193
|
-
onError = () => "An error occurred."
|
2124
|
+
onError = () => "An error occurred.",
|
2194
2125
|
// mask error messages for safety by default
|
2126
|
+
originalMessages,
|
2127
|
+
onFinish
|
2195
2128
|
}) {
|
2196
2129
|
let controller;
|
2197
2130
|
const ongoingStreamPromises = [];
|
@@ -2208,25 +2141,27 @@ function createUIMessageStream({
|
|
2208
2141
|
}
|
2209
2142
|
try {
|
2210
2143
|
const result = execute({
|
2211
|
-
|
2212
|
-
|
2213
|
-
|
2214
|
-
|
2215
|
-
|
2216
|
-
(
|
2217
|
-
|
2218
|
-
|
2219
|
-
|
2220
|
-
|
2221
|
-
|
2222
|
-
|
2223
|
-
|
2224
|
-
|
2225
|
-
|
2226
|
-
|
2227
|
-
|
2228
|
-
|
2229
|
-
|
2144
|
+
writer: {
|
2145
|
+
write(part) {
|
2146
|
+
safeEnqueue(part);
|
2147
|
+
},
|
2148
|
+
merge(streamArg) {
|
2149
|
+
ongoingStreamPromises.push(
|
2150
|
+
(async () => {
|
2151
|
+
const reader = streamArg.getReader();
|
2152
|
+
while (true) {
|
2153
|
+
const { done, value } = await reader.read();
|
2154
|
+
if (done)
|
2155
|
+
break;
|
2156
|
+
safeEnqueue(value);
|
2157
|
+
}
|
2158
|
+
})().catch((error) => {
|
2159
|
+
safeEnqueue({ type: "error", errorText: onError(error) });
|
2160
|
+
})
|
2161
|
+
);
|
2162
|
+
},
|
2163
|
+
onError
|
2164
|
+
}
|
2230
2165
|
});
|
2231
2166
|
if (result) {
|
2232
2167
|
ongoingStreamPromises.push(
|
@@ -2250,7 +2185,12 @@ function createUIMessageStream({
|
|
2250
2185
|
} catch (error) {
|
2251
2186
|
}
|
2252
2187
|
});
|
2253
|
-
return
|
2188
|
+
return handleUIMessageStreamFinish({
|
2189
|
+
stream,
|
2190
|
+
newMessageId: "",
|
2191
|
+
originalMessages,
|
2192
|
+
onFinish
|
2193
|
+
});
|
2254
2194
|
}
|
2255
2195
|
|
2256
2196
|
// src/ui-message-stream/ui-message-stream-headers.ts
|
@@ -2315,6 +2255,32 @@ function pipeUIMessageStreamToResponse({
|
|
2315
2255
|
});
|
2316
2256
|
}
|
2317
2257
|
|
2258
|
+
// src/util/cosine-similarity.ts
|
2259
|
+
function cosineSimilarity(vector1, vector2) {
|
2260
|
+
if (vector1.length !== vector2.length) {
|
2261
|
+
throw new InvalidArgumentError({
|
2262
|
+
parameter: "vector1,vector2",
|
2263
|
+
value: { vector1Length: vector1.length, vector2Length: vector2.length },
|
2264
|
+
message: `Vectors must have the same length`
|
2265
|
+
});
|
2266
|
+
}
|
2267
|
+
const n = vector1.length;
|
2268
|
+
if (n === 0) {
|
2269
|
+
return 0;
|
2270
|
+
}
|
2271
|
+
let magnitudeSquared1 = 0;
|
2272
|
+
let magnitudeSquared2 = 0;
|
2273
|
+
let dotProduct = 0;
|
2274
|
+
for (let i = 0; i < n; i++) {
|
2275
|
+
const value1 = vector1[i];
|
2276
|
+
const value2 = vector2[i];
|
2277
|
+
magnitudeSquared1 += value1 * value1;
|
2278
|
+
magnitudeSquared2 += value2 * value2;
|
2279
|
+
dotProduct += value1 * value2;
|
2280
|
+
}
|
2281
|
+
return magnitudeSquared1 === 0 || magnitudeSquared2 === 0 ? 0 : dotProduct / (Math.sqrt(magnitudeSquared1) * Math.sqrt(magnitudeSquared2));
|
2282
|
+
}
|
2283
|
+
|
2318
2284
|
// src/util/data-url.ts
|
2319
2285
|
function getTextFromDataUrl(dataUrl) {
|
2320
2286
|
const [header, base64Content] = dataUrl.split(",");
|
@@ -2364,32 +2330,6 @@ function isDeepEqualData(obj1, obj2) {
|
|
2364
2330
|
return true;
|
2365
2331
|
}
|
2366
2332
|
|
2367
|
-
// src/util/cosine-similarity.ts
|
2368
|
-
function cosineSimilarity(vector1, vector2) {
|
2369
|
-
if (vector1.length !== vector2.length) {
|
2370
|
-
throw new InvalidArgumentError({
|
2371
|
-
parameter: "vector1,vector2",
|
2372
|
-
value: { vector1Length: vector1.length, vector2Length: vector2.length },
|
2373
|
-
message: `Vectors must have the same length`
|
2374
|
-
});
|
2375
|
-
}
|
2376
|
-
const n = vector1.length;
|
2377
|
-
if (n === 0) {
|
2378
|
-
return 0;
|
2379
|
-
}
|
2380
|
-
let magnitudeSquared1 = 0;
|
2381
|
-
let magnitudeSquared2 = 0;
|
2382
|
-
let dotProduct = 0;
|
2383
|
-
for (let i = 0; i < n; i++) {
|
2384
|
-
const value1 = vector1[i];
|
2385
|
-
const value2 = vector2[i];
|
2386
|
-
magnitudeSquared1 += value1 * value1;
|
2387
|
-
magnitudeSquared2 += value2 * value2;
|
2388
|
-
dotProduct += value1 * value2;
|
2389
|
-
}
|
2390
|
-
return magnitudeSquared1 === 0 || magnitudeSquared2 === 0 ? 0 : dotProduct / (Math.sqrt(magnitudeSquared1) * Math.sqrt(magnitudeSquared2));
|
2391
|
-
}
|
2392
|
-
|
2393
2333
|
// src/util/simulate-readable-stream.ts
|
2394
2334
|
import { delay as delayFunction } from "@ai-sdk/provider-utils";
|
2395
2335
|
function simulateReadableStream({
|
@@ -3394,6 +3334,15 @@ function convertToLanguageModelV2DataContent(content) {
|
|
3394
3334
|
}
|
3395
3335
|
return { data: content, mediaType: void 0 };
|
3396
3336
|
}
|
3337
|
+
function convertDataContentToBase64String(content) {
|
3338
|
+
if (typeof content === "string") {
|
3339
|
+
return content;
|
3340
|
+
}
|
3341
|
+
if (content instanceof ArrayBuffer) {
|
3342
|
+
return convertUint8ArrayToBase642(new Uint8Array(content));
|
3343
|
+
}
|
3344
|
+
return convertUint8ArrayToBase642(content);
|
3345
|
+
}
|
3397
3346
|
function convertDataContentToUint8Array(content) {
|
3398
3347
|
if (content instanceof Uint8Array) {
|
3399
3348
|
return content;
|
@@ -3720,6 +3669,19 @@ function prepareCallSettings({
|
|
3720
3669
|
};
|
3721
3670
|
}
|
3722
3671
|
|
3672
|
+
// core/prompt/resolve-language-model.ts
|
3673
|
+
import { gateway } from "@ai-sdk/gateway";
|
3674
|
+
var GLOBAL_DEFAULT_PROVIDER = Symbol(
|
3675
|
+
"vercel.ai.global.defaultProvider"
|
3676
|
+
);
|
3677
|
+
function resolveLanguageModel(model) {
|
3678
|
+
if (typeof model !== "string") {
|
3679
|
+
return model;
|
3680
|
+
}
|
3681
|
+
const globalProvider = globalThis[GLOBAL_DEFAULT_PROVIDER];
|
3682
|
+
return (globalProvider != null ? globalProvider : gateway).languageModel(model);
|
3683
|
+
}
|
3684
|
+
|
3723
3685
|
// core/prompt/standardize-prompt.ts
|
3724
3686
|
import { InvalidPromptError as InvalidPromptError2 } from "@ai-sdk/provider";
|
3725
3687
|
import { safeValidateTypes } from "@ai-sdk/provider-utils";
|
@@ -3911,6 +3873,38 @@ async function standardizePrompt(prompt) {
|
|
3911
3873
|
};
|
3912
3874
|
}
|
3913
3875
|
|
3876
|
+
// core/prompt/wrap-gateway-error.ts
|
3877
|
+
import {
|
3878
|
+
GatewayAuthenticationError,
|
3879
|
+
GatewayModelNotFoundError
|
3880
|
+
} from "@ai-sdk/gateway";
|
3881
|
+
import { AISDKError as AISDKError18 } from "@ai-sdk/provider";
|
3882
|
+
function wrapGatewayError(error) {
|
3883
|
+
if (GatewayAuthenticationError.isInstance(error) || GatewayModelNotFoundError.isInstance(error)) {
|
3884
|
+
return new AISDKError18({
|
3885
|
+
name: "GatewayError",
|
3886
|
+
message: "Vercel AI Gateway access failed. If you want to use AI SDK providers directly, use the providers, e.g. @ai-sdk/openai, or register a different global default provider.",
|
3887
|
+
cause: error
|
3888
|
+
});
|
3889
|
+
}
|
3890
|
+
return error;
|
3891
|
+
}
|
3892
|
+
|
3893
|
+
// core/telemetry/stringify-for-telemetry.ts
|
3894
|
+
function stringifyForTelemetry(prompt) {
|
3895
|
+
return JSON.stringify(
|
3896
|
+
prompt.map((message) => ({
|
3897
|
+
...message,
|
3898
|
+
content: typeof message.content === "string" ? message.content : message.content.map(
|
3899
|
+
(part) => part.type === "file" ? {
|
3900
|
+
...part,
|
3901
|
+
data: part.data instanceof Uint8Array ? convertDataContentToBase64String(part.data) : part.data
|
3902
|
+
} : part
|
3903
|
+
)
|
3904
|
+
}))
|
3905
|
+
);
|
3906
|
+
}
|
3907
|
+
|
3914
3908
|
// core/generate-object/output-strategy.ts
|
3915
3909
|
import {
|
3916
3910
|
isJSONArray,
|
@@ -4316,7 +4310,7 @@ function validateObjectGenerationInput({
|
|
4316
4310
|
var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
|
4317
4311
|
async function generateObject(options) {
|
4318
4312
|
const {
|
4319
|
-
model,
|
4313
|
+
model: modelArg,
|
4320
4314
|
output = "object",
|
4321
4315
|
system,
|
4322
4316
|
prompt,
|
@@ -4333,6 +4327,7 @@ async function generateObject(options) {
|
|
4333
4327
|
} = {},
|
4334
4328
|
...settings
|
4335
4329
|
} = options;
|
4330
|
+
const model = resolveLanguageModel(modelArg);
|
4336
4331
|
const enumValues = "enum" in options ? options.enum : void 0;
|
4337
4332
|
const {
|
4338
4333
|
schema: inputSchema,
|
@@ -4360,208 +4355,212 @@ async function generateObject(options) {
|
|
4360
4355
|
settings: { ...callSettings, maxRetries }
|
4361
4356
|
});
|
4362
4357
|
const tracer = getTracer(telemetry);
|
4363
|
-
|
4364
|
-
|
4365
|
-
|
4366
|
-
|
4367
|
-
|
4368
|
-
|
4369
|
-
|
4370
|
-
|
4371
|
-
|
4372
|
-
...baseTelemetryAttributes,
|
4373
|
-
// specific settings that only make sense on the outer level:
|
4374
|
-
"ai.prompt": {
|
4375
|
-
input: () => JSON.stringify({ system, prompt, messages })
|
4376
|
-
},
|
4377
|
-
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4378
|
-
"ai.schema.name": schemaName,
|
4379
|
-
"ai.schema.description": schemaDescription,
|
4380
|
-
"ai.settings.output": outputStrategy.type
|
4381
|
-
}
|
4382
|
-
}),
|
4383
|
-
tracer,
|
4384
|
-
fn: async (span) => {
|
4385
|
-
var _a17;
|
4386
|
-
let result;
|
4387
|
-
let finishReason;
|
4388
|
-
let usage;
|
4389
|
-
let warnings;
|
4390
|
-
let response;
|
4391
|
-
let request;
|
4392
|
-
let resultProviderMetadata;
|
4393
|
-
const standardizedPrompt = await standardizePrompt({
|
4394
|
-
system,
|
4395
|
-
prompt,
|
4396
|
-
messages
|
4397
|
-
});
|
4398
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
4399
|
-
prompt: standardizedPrompt,
|
4400
|
-
supportedUrls: await model.supportedUrls
|
4401
|
-
});
|
4402
|
-
const generateResult = await retry(
|
4403
|
-
() => recordSpan({
|
4404
|
-
name: "ai.generateObject.doGenerate",
|
4405
|
-
attributes: selectTelemetryAttributes({
|
4406
|
-
telemetry,
|
4407
|
-
attributes: {
|
4408
|
-
...assembleOperationName({
|
4409
|
-
operationId: "ai.generateObject.doGenerate",
|
4410
|
-
telemetry
|
4411
|
-
}),
|
4412
|
-
...baseTelemetryAttributes,
|
4413
|
-
"ai.prompt.messages": {
|
4414
|
-
input: () => JSON.stringify(promptMessages)
|
4415
|
-
},
|
4416
|
-
// standardized gen-ai llm span attributes:
|
4417
|
-
"gen_ai.system": model.provider,
|
4418
|
-
"gen_ai.request.model": model.modelId,
|
4419
|
-
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4420
|
-
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4421
|
-
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4422
|
-
"gen_ai.request.temperature": callSettings.temperature,
|
4423
|
-
"gen_ai.request.top_k": callSettings.topK,
|
4424
|
-
"gen_ai.request.top_p": callSettings.topP
|
4425
|
-
}
|
4358
|
+
try {
|
4359
|
+
return await recordSpan({
|
4360
|
+
name: "ai.generateObject",
|
4361
|
+
attributes: selectTelemetryAttributes({
|
4362
|
+
telemetry,
|
4363
|
+
attributes: {
|
4364
|
+
...assembleOperationName({
|
4365
|
+
operationId: "ai.generateObject",
|
4366
|
+
telemetry
|
4426
4367
|
}),
|
4427
|
-
|
4428
|
-
|
4429
|
-
|
4430
|
-
|
4431
|
-
|
4432
|
-
|
4433
|
-
|
4434
|
-
|
4435
|
-
|
4436
|
-
|
4437
|
-
|
4438
|
-
|
4439
|
-
|
4440
|
-
|
4441
|
-
|
4442
|
-
|
4443
|
-
|
4444
|
-
|
4445
|
-
|
4446
|
-
|
4447
|
-
|
4448
|
-
|
4449
|
-
|
4450
|
-
|
4451
|
-
|
4452
|
-
|
4453
|
-
|
4454
|
-
|
4455
|
-
|
4456
|
-
|
4368
|
+
...baseTelemetryAttributes,
|
4369
|
+
// specific settings that only make sense on the outer level:
|
4370
|
+
"ai.prompt": {
|
4371
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
4372
|
+
},
|
4373
|
+
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4374
|
+
"ai.schema.name": schemaName,
|
4375
|
+
"ai.schema.description": schemaDescription,
|
4376
|
+
"ai.settings.output": outputStrategy.type
|
4377
|
+
}
|
4378
|
+
}),
|
4379
|
+
tracer,
|
4380
|
+
fn: async (span) => {
|
4381
|
+
var _a17;
|
4382
|
+
let result;
|
4383
|
+
let finishReason;
|
4384
|
+
let usage;
|
4385
|
+
let warnings;
|
4386
|
+
let response;
|
4387
|
+
let request;
|
4388
|
+
let resultProviderMetadata;
|
4389
|
+
const standardizedPrompt = await standardizePrompt({
|
4390
|
+
system,
|
4391
|
+
prompt,
|
4392
|
+
messages
|
4393
|
+
});
|
4394
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
4395
|
+
prompt: standardizedPrompt,
|
4396
|
+
supportedUrls: await model.supportedUrls
|
4397
|
+
});
|
4398
|
+
const generateResult = await retry(
|
4399
|
+
() => recordSpan({
|
4400
|
+
name: "ai.generateObject.doGenerate",
|
4401
|
+
attributes: selectTelemetryAttributes({
|
4402
|
+
telemetry,
|
4403
|
+
attributes: {
|
4404
|
+
...assembleOperationName({
|
4405
|
+
operationId: "ai.generateObject.doGenerate",
|
4406
|
+
telemetry
|
4407
|
+
}),
|
4408
|
+
...baseTelemetryAttributes,
|
4409
|
+
"ai.prompt.messages": {
|
4410
|
+
input: () => stringifyForTelemetry(promptMessages)
|
4411
|
+
},
|
4412
|
+
// standardized gen-ai llm span attributes:
|
4413
|
+
"gen_ai.system": model.provider,
|
4414
|
+
"gen_ai.request.model": model.modelId,
|
4415
|
+
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4416
|
+
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4417
|
+
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4418
|
+
"gen_ai.request.temperature": callSettings.temperature,
|
4419
|
+
"gen_ai.request.top_k": callSettings.topK,
|
4420
|
+
"gen_ai.request.top_p": callSettings.topP
|
4421
|
+
}
|
4422
|
+
}),
|
4423
|
+
tracer,
|
4424
|
+
fn: async (span2) => {
|
4425
|
+
var _a18, _b, _c, _d, _e, _f, _g, _h;
|
4426
|
+
const result2 = await model.doGenerate({
|
4427
|
+
responseFormat: {
|
4428
|
+
type: "json",
|
4429
|
+
schema: outputStrategy.jsonSchema,
|
4430
|
+
name: schemaName,
|
4431
|
+
description: schemaDescription
|
4432
|
+
},
|
4433
|
+
...prepareCallSettings(settings),
|
4434
|
+
prompt: promptMessages,
|
4435
|
+
providerOptions,
|
4436
|
+
abortSignal,
|
4437
|
+
headers
|
4457
4438
|
});
|
4439
|
+
const responseData = {
|
4440
|
+
id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
|
4441
|
+
timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
|
4442
|
+
modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
4443
|
+
headers: (_g = result2.response) == null ? void 0 : _g.headers,
|
4444
|
+
body: (_h = result2.response) == null ? void 0 : _h.body
|
4445
|
+
};
|
4446
|
+
const text2 = extractContentText(result2.content);
|
4447
|
+
if (text2 === void 0) {
|
4448
|
+
throw new NoObjectGeneratedError({
|
4449
|
+
message: "No object generated: the model did not return a response.",
|
4450
|
+
response: responseData,
|
4451
|
+
usage: result2.usage,
|
4452
|
+
finishReason: result2.finishReason
|
4453
|
+
});
|
4454
|
+
}
|
4455
|
+
span2.setAttributes(
|
4456
|
+
selectTelemetryAttributes({
|
4457
|
+
telemetry,
|
4458
|
+
attributes: {
|
4459
|
+
"ai.response.finishReason": result2.finishReason,
|
4460
|
+
"ai.response.object": { output: () => text2 },
|
4461
|
+
"ai.response.id": responseData.id,
|
4462
|
+
"ai.response.model": responseData.modelId,
|
4463
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
4464
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4465
|
+
"ai.usage.promptTokens": result2.usage.inputTokens,
|
4466
|
+
"ai.usage.completionTokens": result2.usage.outputTokens,
|
4467
|
+
// standardized gen-ai llm span attributes:
|
4468
|
+
"gen_ai.response.finish_reasons": [result2.finishReason],
|
4469
|
+
"gen_ai.response.id": responseData.id,
|
4470
|
+
"gen_ai.response.model": responseData.modelId,
|
4471
|
+
"gen_ai.usage.input_tokens": result2.usage.inputTokens,
|
4472
|
+
"gen_ai.usage.output_tokens": result2.usage.outputTokens
|
4473
|
+
}
|
4474
|
+
})
|
4475
|
+
);
|
4476
|
+
return { ...result2, objectText: text2, responseData };
|
4458
4477
|
}
|
4459
|
-
|
4460
|
-
|
4461
|
-
|
4462
|
-
|
4463
|
-
|
4464
|
-
|
4465
|
-
|
4466
|
-
|
4467
|
-
|
4468
|
-
|
4469
|
-
|
4470
|
-
|
4471
|
-
|
4472
|
-
|
4473
|
-
|
4474
|
-
|
4475
|
-
|
4476
|
-
|
4477
|
-
|
4478
|
-
|
4479
|
-
);
|
4480
|
-
return { ...result2, objectText: text2, responseData };
|
4478
|
+
})
|
4479
|
+
);
|
4480
|
+
result = generateResult.objectText;
|
4481
|
+
finishReason = generateResult.finishReason;
|
4482
|
+
usage = generateResult.usage;
|
4483
|
+
warnings = generateResult.warnings;
|
4484
|
+
resultProviderMetadata = generateResult.providerMetadata;
|
4485
|
+
request = (_a17 = generateResult.request) != null ? _a17 : {};
|
4486
|
+
response = generateResult.responseData;
|
4487
|
+
async function processResult(result2) {
|
4488
|
+
const parseResult = await safeParseJSON2({ text: result2 });
|
4489
|
+
if (!parseResult.success) {
|
4490
|
+
throw new NoObjectGeneratedError({
|
4491
|
+
message: "No object generated: could not parse the response.",
|
4492
|
+
cause: parseResult.error,
|
4493
|
+
text: result2,
|
4494
|
+
response,
|
4495
|
+
usage,
|
4496
|
+
finishReason
|
4497
|
+
});
|
4481
4498
|
}
|
4482
|
-
|
4483
|
-
|
4484
|
-
|
4485
|
-
|
4486
|
-
|
4487
|
-
|
4488
|
-
|
4489
|
-
|
4490
|
-
|
4491
|
-
|
4492
|
-
|
4493
|
-
|
4494
|
-
|
4495
|
-
|
4496
|
-
|
4497
|
-
|
4498
|
-
|
4499
|
-
usage,
|
4500
|
-
finishReason
|
4501
|
-
});
|
4502
|
-
}
|
4503
|
-
const validationResult = await outputStrategy.validateFinalResult(
|
4504
|
-
parseResult.value,
|
4505
|
-
{
|
4506
|
-
text: result2,
|
4507
|
-
response,
|
4508
|
-
usage
|
4499
|
+
const validationResult = await outputStrategy.validateFinalResult(
|
4500
|
+
parseResult.value,
|
4501
|
+
{
|
4502
|
+
text: result2,
|
4503
|
+
response,
|
4504
|
+
usage
|
4505
|
+
}
|
4506
|
+
);
|
4507
|
+
if (!validationResult.success) {
|
4508
|
+
throw new NoObjectGeneratedError({
|
4509
|
+
message: "No object generated: response did not match schema.",
|
4510
|
+
cause: validationResult.error,
|
4511
|
+
text: result2,
|
4512
|
+
response,
|
4513
|
+
usage,
|
4514
|
+
finishReason
|
4515
|
+
});
|
4509
4516
|
}
|
4510
|
-
|
4511
|
-
if (!validationResult.success) {
|
4512
|
-
throw new NoObjectGeneratedError({
|
4513
|
-
message: "No object generated: response did not match schema.",
|
4514
|
-
cause: validationResult.error,
|
4515
|
-
text: result2,
|
4516
|
-
response,
|
4517
|
-
usage,
|
4518
|
-
finishReason
|
4519
|
-
});
|
4517
|
+
return validationResult.value;
|
4520
4518
|
}
|
4521
|
-
|
4522
|
-
|
4523
|
-
|
4524
|
-
|
4525
|
-
|
4526
|
-
|
4527
|
-
|
4528
|
-
|
4529
|
-
|
4530
|
-
|
4531
|
-
|
4532
|
-
|
4519
|
+
let object2;
|
4520
|
+
try {
|
4521
|
+
object2 = await processResult(result);
|
4522
|
+
} catch (error) {
|
4523
|
+
if (repairText != null && NoObjectGeneratedError.isInstance(error) && (JSONParseError2.isInstance(error.cause) || TypeValidationError3.isInstance(error.cause))) {
|
4524
|
+
const repairedText = await repairText({
|
4525
|
+
text: result,
|
4526
|
+
error: error.cause
|
4527
|
+
});
|
4528
|
+
if (repairedText === null) {
|
4529
|
+
throw error;
|
4530
|
+
}
|
4531
|
+
object2 = await processResult(repairedText);
|
4532
|
+
} else {
|
4533
4533
|
throw error;
|
4534
4534
|
}
|
4535
|
-
object2 = await processResult(repairedText);
|
4536
|
-
} else {
|
4537
|
-
throw error;
|
4538
4535
|
}
|
4536
|
+
span.setAttributes(
|
4537
|
+
selectTelemetryAttributes({
|
4538
|
+
telemetry,
|
4539
|
+
attributes: {
|
4540
|
+
"ai.response.finishReason": finishReason,
|
4541
|
+
"ai.response.object": {
|
4542
|
+
output: () => JSON.stringify(object2)
|
4543
|
+
},
|
4544
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4545
|
+
"ai.usage.promptTokens": usage.inputTokens,
|
4546
|
+
"ai.usage.completionTokens": usage.outputTokens
|
4547
|
+
}
|
4548
|
+
})
|
4549
|
+
);
|
4550
|
+
return new DefaultGenerateObjectResult({
|
4551
|
+
object: object2,
|
4552
|
+
finishReason,
|
4553
|
+
usage,
|
4554
|
+
warnings,
|
4555
|
+
request,
|
4556
|
+
response,
|
4557
|
+
providerMetadata: resultProviderMetadata
|
4558
|
+
});
|
4539
4559
|
}
|
4540
|
-
|
4541
|
-
|
4542
|
-
|
4543
|
-
|
4544
|
-
"ai.response.finishReason": finishReason,
|
4545
|
-
"ai.response.object": {
|
4546
|
-
output: () => JSON.stringify(object2)
|
4547
|
-
},
|
4548
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4549
|
-
"ai.usage.promptTokens": usage.inputTokens,
|
4550
|
-
"ai.usage.completionTokens": usage.outputTokens
|
4551
|
-
}
|
4552
|
-
})
|
4553
|
-
);
|
4554
|
-
return new DefaultGenerateObjectResult({
|
4555
|
-
object: object2,
|
4556
|
-
finishReason,
|
4557
|
-
usage,
|
4558
|
-
warnings,
|
4559
|
-
request,
|
4560
|
-
response,
|
4561
|
-
providerMetadata: resultProviderMetadata
|
4562
|
-
});
|
4563
|
-
}
|
4564
|
-
});
|
4560
|
+
});
|
4561
|
+
} catch (error) {
|
4562
|
+
throw wrapGatewayError(error);
|
4563
|
+
}
|
4565
4564
|
}
|
4566
4565
|
var DefaultGenerateObjectResult = class {
|
4567
4566
|
constructor(options) {
|
@@ -4585,7 +4584,9 @@ var DefaultGenerateObjectResult = class {
|
|
4585
4584
|
};
|
4586
4585
|
|
4587
4586
|
// core/generate-object/stream-object.ts
|
4588
|
-
import {
|
4587
|
+
import {
|
4588
|
+
createIdGenerator as createIdGenerator2
|
4589
|
+
} from "@ai-sdk/provider-utils";
|
4589
4590
|
|
4590
4591
|
// src/util/create-resolvable-promise.ts
|
4591
4592
|
function createResolvablePromise() {
|
@@ -4691,11 +4692,11 @@ var DelayedPromise = class {
|
|
4691
4692
|
this._resolve = void 0;
|
4692
4693
|
this._reject = void 0;
|
4693
4694
|
}
|
4694
|
-
get
|
4695
|
-
if (this.
|
4696
|
-
return this.
|
4695
|
+
get promise() {
|
4696
|
+
if (this._promise) {
|
4697
|
+
return this._promise;
|
4697
4698
|
}
|
4698
|
-
this.
|
4699
|
+
this._promise = new Promise((resolve, reject) => {
|
4699
4700
|
if (this.status.type === "resolved") {
|
4700
4701
|
resolve(this.status.value);
|
4701
4702
|
} else if (this.status.type === "rejected") {
|
@@ -4704,19 +4705,19 @@ var DelayedPromise = class {
|
|
4704
4705
|
this._resolve = resolve;
|
4705
4706
|
this._reject = reject;
|
4706
4707
|
});
|
4707
|
-
return this.
|
4708
|
+
return this._promise;
|
4708
4709
|
}
|
4709
4710
|
resolve(value) {
|
4710
4711
|
var _a17;
|
4711
4712
|
this.status = { type: "resolved", value };
|
4712
|
-
if (this.
|
4713
|
+
if (this._promise) {
|
4713
4714
|
(_a17 = this._resolve) == null ? void 0 : _a17.call(this, value);
|
4714
4715
|
}
|
4715
4716
|
}
|
4716
4717
|
reject(error) {
|
4717
4718
|
var _a17;
|
4718
4719
|
this.status = { type: "rejected", error };
|
4719
|
-
if (this.
|
4720
|
+
if (this._promise) {
|
4720
4721
|
(_a17 = this._reject) == null ? void 0 : _a17.call(this, error);
|
4721
4722
|
}
|
4722
4723
|
}
|
@@ -4742,7 +4743,9 @@ function streamObject(options) {
|
|
4742
4743
|
headers,
|
4743
4744
|
experimental_telemetry: telemetry,
|
4744
4745
|
providerOptions,
|
4745
|
-
onError
|
4746
|
+
onError = ({ error }) => {
|
4747
|
+
console.error(error);
|
4748
|
+
},
|
4746
4749
|
onFinish,
|
4747
4750
|
_internal: {
|
4748
4751
|
generateId: generateId3 = originalGenerateId2,
|
@@ -4792,7 +4795,7 @@ function streamObject(options) {
|
|
4792
4795
|
}
|
4793
4796
|
var DefaultStreamObjectResult = class {
|
4794
4797
|
constructor({
|
4795
|
-
model,
|
4798
|
+
model: modelArg,
|
4796
4799
|
headers,
|
4797
4800
|
telemetry,
|
4798
4801
|
settings,
|
@@ -4811,12 +4814,13 @@ var DefaultStreamObjectResult = class {
|
|
4811
4814
|
currentDate,
|
4812
4815
|
now: now2
|
4813
4816
|
}) {
|
4814
|
-
this.
|
4815
|
-
this.
|
4816
|
-
this.
|
4817
|
-
this.
|
4818
|
-
this.
|
4819
|
-
this.
|
4817
|
+
this._object = new DelayedPromise();
|
4818
|
+
this._usage = new DelayedPromise();
|
4819
|
+
this._providerMetadata = new DelayedPromise();
|
4820
|
+
this._warnings = new DelayedPromise();
|
4821
|
+
this._request = new DelayedPromise();
|
4822
|
+
this._response = new DelayedPromise();
|
4823
|
+
const model = resolveLanguageModel(modelArg);
|
4820
4824
|
const { maxRetries, retry } = prepareRetries({
|
4821
4825
|
maxRetries: maxRetriesArg
|
4822
4826
|
});
|
@@ -4834,7 +4838,7 @@ var DefaultStreamObjectResult = class {
|
|
4834
4838
|
transform(chunk, controller) {
|
4835
4839
|
controller.enqueue(chunk);
|
4836
4840
|
if (chunk.type === "error") {
|
4837
|
-
onError
|
4841
|
+
onError({ error: wrapGatewayError(chunk.error) });
|
4838
4842
|
}
|
4839
4843
|
}
|
4840
4844
|
});
|
@@ -4913,7 +4917,7 @@ var DefaultStreamObjectResult = class {
|
|
4913
4917
|
}),
|
4914
4918
|
...baseTelemetryAttributes,
|
4915
4919
|
"ai.prompt.messages": {
|
4916
|
-
input: () =>
|
4920
|
+
input: () => stringifyForTelemetry(callOptions.prompt)
|
4917
4921
|
},
|
4918
4922
|
// standardized gen-ai llm span attributes:
|
4919
4923
|
"gen_ai.system": model.provider,
|
@@ -4935,7 +4939,7 @@ var DefaultStreamObjectResult = class {
|
|
4935
4939
|
})
|
4936
4940
|
})
|
4937
4941
|
);
|
4938
|
-
self.
|
4942
|
+
self._request.resolve(request != null ? request : {});
|
4939
4943
|
let warnings;
|
4940
4944
|
let usage = {
|
4941
4945
|
inputTokens: void 0,
|
@@ -5028,9 +5032,9 @@ var DefaultStreamObjectResult = class {
|
|
5028
5032
|
usage,
|
5029
5033
|
response: fullResponse
|
5030
5034
|
});
|
5031
|
-
self.
|
5032
|
-
self.
|
5033
|
-
self.
|
5035
|
+
self._usage.resolve(usage);
|
5036
|
+
self._providerMetadata.resolve(providerMetadata);
|
5037
|
+
self._response.resolve({
|
5034
5038
|
...fullResponse,
|
5035
5039
|
headers: response == null ? void 0 : response.headers
|
5036
5040
|
});
|
@@ -5044,7 +5048,7 @@ var DefaultStreamObjectResult = class {
|
|
5044
5048
|
);
|
5045
5049
|
if (validationResult.success) {
|
5046
5050
|
object2 = validationResult.value;
|
5047
|
-
self.
|
5051
|
+
self._object.resolve(object2);
|
5048
5052
|
} else {
|
5049
5053
|
error = new NoObjectGeneratedError({
|
5050
5054
|
message: "No object generated: response did not match schema.",
|
@@ -5054,7 +5058,7 @@ var DefaultStreamObjectResult = class {
|
|
5054
5058
|
usage,
|
5055
5059
|
finishReason
|
5056
5060
|
});
|
5057
|
-
self.
|
5061
|
+
self._object.reject(error);
|
5058
5062
|
}
|
5059
5063
|
break;
|
5060
5064
|
}
|
@@ -5149,22 +5153,22 @@ var DefaultStreamObjectResult = class {
|
|
5149
5153
|
this.outputStrategy = outputStrategy;
|
5150
5154
|
}
|
5151
5155
|
get object() {
|
5152
|
-
return this.
|
5156
|
+
return this._object.promise;
|
5153
5157
|
}
|
5154
5158
|
get usage() {
|
5155
|
-
return this.
|
5159
|
+
return this._usage.promise;
|
5156
5160
|
}
|
5157
5161
|
get providerMetadata() {
|
5158
|
-
return this.
|
5162
|
+
return this._providerMetadata.promise;
|
5159
5163
|
}
|
5160
5164
|
get warnings() {
|
5161
|
-
return this.
|
5165
|
+
return this._warnings.promise;
|
5162
5166
|
}
|
5163
5167
|
get request() {
|
5164
|
-
return this.
|
5168
|
+
return this._request.promise;
|
5165
5169
|
}
|
5166
5170
|
get response() {
|
5167
|
-
return this.
|
5171
|
+
return this._response.promise;
|
5168
5172
|
}
|
5169
5173
|
get partialObjectStream() {
|
5170
5174
|
return createAsyncIterableStream(
|
@@ -5234,8 +5238,8 @@ var DefaultStreamObjectResult = class {
|
|
5234
5238
|
};
|
5235
5239
|
|
5236
5240
|
// src/error/no-speech-generated-error.ts
|
5237
|
-
import { AISDKError as
|
5238
|
-
var NoSpeechGeneratedError = class extends
|
5241
|
+
import { AISDKError as AISDKError19 } from "@ai-sdk/provider";
|
5242
|
+
var NoSpeechGeneratedError = class extends AISDKError19 {
|
5239
5243
|
constructor(options) {
|
5240
5244
|
super({
|
5241
5245
|
name: "AI_NoSpeechGeneratedError",
|
@@ -5326,6 +5330,11 @@ var DefaultSpeechResult = class {
|
|
5326
5330
|
// core/generate-text/generate-text.ts
|
5327
5331
|
import { createIdGenerator as createIdGenerator3 } from "@ai-sdk/provider-utils";
|
5328
5332
|
|
5333
|
+
// src/util/as-array.ts
|
5334
|
+
function asArray(value) {
|
5335
|
+
return value === void 0 ? [] : Array.isArray(value) ? value : [value];
|
5336
|
+
}
|
5337
|
+
|
5329
5338
|
// core/prompt/prepare-tools-and-tool-choice.ts
|
5330
5339
|
import { asSchema as asSchema2 } from "@ai-sdk/provider-utils";
|
5331
5340
|
|
@@ -5547,8 +5556,8 @@ var DefaultStepResult = class {
|
|
5547
5556
|
};
|
5548
5557
|
|
5549
5558
|
// core/generate-text/stop-condition.ts
|
5550
|
-
function
|
5551
|
-
return ({ steps }) => steps.length
|
5559
|
+
function stepCountIs(stepCount) {
|
5560
|
+
return ({ steps }) => steps.length === stepCount;
|
5552
5561
|
}
|
5553
5562
|
function hasToolCall(toolName) {
|
5554
5563
|
return ({ steps }) => {
|
@@ -5558,6 +5567,12 @@ function hasToolCall(toolName) {
|
|
5558
5567
|
)) != null ? _c : false;
|
5559
5568
|
};
|
5560
5569
|
}
|
5570
|
+
async function isStopConditionMet({
|
5571
|
+
stopConditions,
|
5572
|
+
steps
|
5573
|
+
}) {
|
5574
|
+
return (await Promise.all(stopConditions.map((condition) => condition({ steps })))).some((result) => result);
|
5575
|
+
}
|
5561
5576
|
|
5562
5577
|
// core/generate-text/to-response-messages.ts
|
5563
5578
|
function toResponseMessages({
|
@@ -5623,7 +5638,7 @@ var originalGenerateId3 = createIdGenerator3({
|
|
5623
5638
|
size: 24
|
5624
5639
|
});
|
5625
5640
|
async function generateText({
|
5626
|
-
model,
|
5641
|
+
model: modelArg,
|
5627
5642
|
tools,
|
5628
5643
|
toolChoice,
|
5629
5644
|
system,
|
@@ -5632,12 +5647,14 @@ async function generateText({
|
|
5632
5647
|
maxRetries: maxRetriesArg,
|
5633
5648
|
abortSignal,
|
5634
5649
|
headers,
|
5635
|
-
|
5650
|
+
stopWhen = stepCountIs(1),
|
5636
5651
|
experimental_output: output,
|
5637
5652
|
experimental_telemetry: telemetry,
|
5638
5653
|
providerOptions,
|
5639
|
-
experimental_activeTools
|
5640
|
-
|
5654
|
+
experimental_activeTools,
|
5655
|
+
activeTools = experimental_activeTools,
|
5656
|
+
experimental_prepareStep,
|
5657
|
+
prepareStep = experimental_prepareStep,
|
5641
5658
|
experimental_repairToolCall: repairToolCall,
|
5642
5659
|
_internal: {
|
5643
5660
|
generateId: generateId3 = originalGenerateId3,
|
@@ -5646,6 +5663,8 @@ async function generateText({
|
|
5646
5663
|
onStepFinish,
|
5647
5664
|
...settings
|
5648
5665
|
}) {
|
5666
|
+
const model = resolveLanguageModel(modelArg);
|
5667
|
+
const stopConditions = asArray(stopWhen);
|
5649
5668
|
const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
|
5650
5669
|
const callSettings = prepareCallSettings(settings);
|
5651
5670
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
@@ -5660,237 +5679,243 @@ async function generateText({
|
|
5660
5679
|
messages
|
5661
5680
|
});
|
5662
5681
|
const tracer = getTracer(telemetry);
|
5663
|
-
|
5664
|
-
|
5665
|
-
|
5666
|
-
|
5667
|
-
|
5668
|
-
|
5669
|
-
|
5670
|
-
|
5671
|
-
|
5672
|
-
|
5673
|
-
|
5674
|
-
|
5675
|
-
|
5676
|
-
|
5677
|
-
|
5678
|
-
|
5679
|
-
|
5680
|
-
}
|
5681
|
-
}),
|
5682
|
-
tracer,
|
5683
|
-
fn: async (span) => {
|
5684
|
-
var _a17, _b, _c, _d;
|
5685
|
-
const callSettings2 = prepareCallSettings(settings);
|
5686
|
-
let currentModelResponse;
|
5687
|
-
let currentToolCalls = [];
|
5688
|
-
let currentToolResults = [];
|
5689
|
-
const responseMessages = [];
|
5690
|
-
const steps = [];
|
5691
|
-
do {
|
5692
|
-
const stepInputMessages = [
|
5693
|
-
...initialPrompt.messages,
|
5694
|
-
...responseMessages
|
5695
|
-
];
|
5696
|
-
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5697
|
-
model,
|
5698
|
-
steps,
|
5699
|
-
stepNumber: steps.length
|
5700
|
-
}));
|
5701
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
5702
|
-
prompt: {
|
5703
|
-
system: initialPrompt.system,
|
5704
|
-
messages: stepInputMessages
|
5705
|
-
},
|
5706
|
-
supportedUrls: await model.supportedUrls
|
5707
|
-
});
|
5708
|
-
const stepModel = (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model;
|
5709
|
-
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5710
|
-
tools,
|
5711
|
-
toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
|
5712
|
-
activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.experimental_activeTools) != null ? _c : activeTools
|
5713
|
-
});
|
5714
|
-
currentModelResponse = await retry(
|
5715
|
-
() => {
|
5716
|
-
var _a18;
|
5717
|
-
return recordSpan({
|
5718
|
-
name: "ai.generateText.doGenerate",
|
5719
|
-
attributes: selectTelemetryAttributes({
|
5720
|
-
telemetry,
|
5721
|
-
attributes: {
|
5722
|
-
...assembleOperationName({
|
5723
|
-
operationId: "ai.generateText.doGenerate",
|
5724
|
-
telemetry
|
5725
|
-
}),
|
5726
|
-
...baseTelemetryAttributes,
|
5727
|
-
// model:
|
5728
|
-
"ai.model.provider": stepModel.provider,
|
5729
|
-
"ai.model.id": stepModel.modelId,
|
5730
|
-
// prompt:
|
5731
|
-
"ai.prompt.messages": {
|
5732
|
-
input: () => JSON.stringify(promptMessages)
|
5733
|
-
},
|
5734
|
-
"ai.prompt.tools": {
|
5735
|
-
// convert the language model level tools:
|
5736
|
-
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5737
|
-
},
|
5738
|
-
"ai.prompt.toolChoice": {
|
5739
|
-
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5740
|
-
},
|
5741
|
-
// standardized gen-ai llm span attributes:
|
5742
|
-
"gen_ai.system": stepModel.provider,
|
5743
|
-
"gen_ai.request.model": stepModel.modelId,
|
5744
|
-
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5745
|
-
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5746
|
-
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5747
|
-
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5748
|
-
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5749
|
-
"gen_ai.request.top_k": settings.topK,
|
5750
|
-
"gen_ai.request.top_p": settings.topP
|
5751
|
-
}
|
5752
|
-
}),
|
5753
|
-
tracer,
|
5754
|
-
fn: async (span2) => {
|
5755
|
-
var _a19, _b2, _c2, _d2, _e, _f, _g, _h;
|
5756
|
-
const result = await stepModel.doGenerate({
|
5757
|
-
...callSettings2,
|
5758
|
-
tools: stepTools,
|
5759
|
-
toolChoice: stepToolChoice,
|
5760
|
-
responseFormat: output == null ? void 0 : output.responseFormat,
|
5761
|
-
prompt: promptMessages,
|
5762
|
-
providerOptions,
|
5763
|
-
abortSignal,
|
5764
|
-
headers
|
5765
|
-
});
|
5766
|
-
const responseData = {
|
5767
|
-
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5768
|
-
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5769
|
-
modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : stepModel.modelId,
|
5770
|
-
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5771
|
-
body: (_h = result.response) == null ? void 0 : _h.body
|
5772
|
-
};
|
5773
|
-
span2.setAttributes(
|
5774
|
-
selectTelemetryAttributes({
|
5775
|
-
telemetry,
|
5776
|
-
attributes: {
|
5777
|
-
"ai.response.finishReason": result.finishReason,
|
5778
|
-
"ai.response.text": {
|
5779
|
-
output: () => extractContentText(result.content)
|
5780
|
-
},
|
5781
|
-
"ai.response.toolCalls": {
|
5782
|
-
output: () => {
|
5783
|
-
const toolCalls = asToolCalls(result.content);
|
5784
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5785
|
-
}
|
5786
|
-
},
|
5787
|
-
"ai.response.id": responseData.id,
|
5788
|
-
"ai.response.model": responseData.modelId,
|
5789
|
-
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5790
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5791
|
-
"ai.usage.promptTokens": result.usage.inputTokens,
|
5792
|
-
"ai.usage.completionTokens": result.usage.outputTokens,
|
5793
|
-
// standardized gen-ai llm span attributes:
|
5794
|
-
"gen_ai.response.finish_reasons": [result.finishReason],
|
5795
|
-
"gen_ai.response.id": responseData.id,
|
5796
|
-
"gen_ai.response.model": responseData.modelId,
|
5797
|
-
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5798
|
-
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5799
|
-
}
|
5800
|
-
})
|
5801
|
-
);
|
5802
|
-
return { ...result, response: responseData };
|
5803
|
-
}
|
5804
|
-
});
|
5682
|
+
try {
|
5683
|
+
return await recordSpan({
|
5684
|
+
name: "ai.generateText",
|
5685
|
+
attributes: selectTelemetryAttributes({
|
5686
|
+
telemetry,
|
5687
|
+
attributes: {
|
5688
|
+
...assembleOperationName({
|
5689
|
+
operationId: "ai.generateText",
|
5690
|
+
telemetry
|
5691
|
+
}),
|
5692
|
+
...baseTelemetryAttributes,
|
5693
|
+
// model:
|
5694
|
+
"ai.model.provider": model.provider,
|
5695
|
+
"ai.model.id": model.modelId,
|
5696
|
+
// specific settings that only make sense on the outer level:
|
5697
|
+
"ai.prompt": {
|
5698
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
5805
5699
|
}
|
5806
|
-
|
5807
|
-
|
5808
|
-
|
5809
|
-
|
5810
|
-
|
5811
|
-
|
5812
|
-
|
5813
|
-
|
5814
|
-
|
5815
|
-
|
5700
|
+
}
|
5701
|
+
}),
|
5702
|
+
tracer,
|
5703
|
+
fn: async (span) => {
|
5704
|
+
var _a17, _b, _c, _d, _e;
|
5705
|
+
const callSettings2 = prepareCallSettings(settings);
|
5706
|
+
let currentModelResponse;
|
5707
|
+
let currentToolCalls = [];
|
5708
|
+
let currentToolResults = [];
|
5709
|
+
const responseMessages = [];
|
5710
|
+
const steps = [];
|
5711
|
+
do {
|
5712
|
+
const stepInputMessages = [
|
5713
|
+
...initialPrompt.messages,
|
5714
|
+
...responseMessages
|
5715
|
+
];
|
5716
|
+
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5717
|
+
model,
|
5718
|
+
steps,
|
5719
|
+
stepNumber: steps.length
|
5720
|
+
}));
|
5721
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
5722
|
+
prompt: {
|
5723
|
+
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
5816
5724
|
messages: stepInputMessages
|
5725
|
+
},
|
5726
|
+
supportedUrls: await model.supportedUrls
|
5727
|
+
});
|
5728
|
+
const stepModel = resolveLanguageModel(
|
5729
|
+
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
5730
|
+
);
|
5731
|
+
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5732
|
+
tools,
|
5733
|
+
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
5734
|
+
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
5735
|
+
});
|
5736
|
+
currentModelResponse = await retry(
|
5737
|
+
() => {
|
5738
|
+
var _a18;
|
5739
|
+
return recordSpan({
|
5740
|
+
name: "ai.generateText.doGenerate",
|
5741
|
+
attributes: selectTelemetryAttributes({
|
5742
|
+
telemetry,
|
5743
|
+
attributes: {
|
5744
|
+
...assembleOperationName({
|
5745
|
+
operationId: "ai.generateText.doGenerate",
|
5746
|
+
telemetry
|
5747
|
+
}),
|
5748
|
+
...baseTelemetryAttributes,
|
5749
|
+
// model:
|
5750
|
+
"ai.model.provider": stepModel.provider,
|
5751
|
+
"ai.model.id": stepModel.modelId,
|
5752
|
+
// prompt:
|
5753
|
+
"ai.prompt.messages": {
|
5754
|
+
input: () => stringifyForTelemetry(promptMessages)
|
5755
|
+
},
|
5756
|
+
"ai.prompt.tools": {
|
5757
|
+
// convert the language model level tools:
|
5758
|
+
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5759
|
+
},
|
5760
|
+
"ai.prompt.toolChoice": {
|
5761
|
+
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5762
|
+
},
|
5763
|
+
// standardized gen-ai llm span attributes:
|
5764
|
+
"gen_ai.system": stepModel.provider,
|
5765
|
+
"gen_ai.request.model": stepModel.modelId,
|
5766
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5767
|
+
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5768
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5769
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5770
|
+
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5771
|
+
"gen_ai.request.top_k": settings.topK,
|
5772
|
+
"gen_ai.request.top_p": settings.topP
|
5773
|
+
}
|
5774
|
+
}),
|
5775
|
+
tracer,
|
5776
|
+
fn: async (span2) => {
|
5777
|
+
var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
|
5778
|
+
const result = await stepModel.doGenerate({
|
5779
|
+
...callSettings2,
|
5780
|
+
tools: stepTools,
|
5781
|
+
toolChoice: stepToolChoice,
|
5782
|
+
responseFormat: output == null ? void 0 : output.responseFormat,
|
5783
|
+
prompt: promptMessages,
|
5784
|
+
providerOptions,
|
5785
|
+
abortSignal,
|
5786
|
+
headers
|
5787
|
+
});
|
5788
|
+
const responseData = {
|
5789
|
+
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5790
|
+
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5791
|
+
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
|
5792
|
+
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5793
|
+
body: (_h = result.response) == null ? void 0 : _h.body
|
5794
|
+
};
|
5795
|
+
span2.setAttributes(
|
5796
|
+
selectTelemetryAttributes({
|
5797
|
+
telemetry,
|
5798
|
+
attributes: {
|
5799
|
+
"ai.response.finishReason": result.finishReason,
|
5800
|
+
"ai.response.text": {
|
5801
|
+
output: () => extractContentText(result.content)
|
5802
|
+
},
|
5803
|
+
"ai.response.toolCalls": {
|
5804
|
+
output: () => {
|
5805
|
+
const toolCalls = asToolCalls(result.content);
|
5806
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5807
|
+
}
|
5808
|
+
},
|
5809
|
+
"ai.response.id": responseData.id,
|
5810
|
+
"ai.response.model": responseData.modelId,
|
5811
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5812
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5813
|
+
"ai.usage.promptTokens": result.usage.inputTokens,
|
5814
|
+
"ai.usage.completionTokens": result.usage.outputTokens,
|
5815
|
+
// standardized gen-ai llm span attributes:
|
5816
|
+
"gen_ai.response.finish_reasons": [result.finishReason],
|
5817
|
+
"gen_ai.response.id": responseData.id,
|
5818
|
+
"gen_ai.response.model": responseData.modelId,
|
5819
|
+
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5820
|
+
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5821
|
+
}
|
5822
|
+
})
|
5823
|
+
);
|
5824
|
+
return { ...result, response: responseData };
|
5825
|
+
}
|
5826
|
+
});
|
5827
|
+
}
|
5828
|
+
);
|
5829
|
+
currentToolCalls = await Promise.all(
|
5830
|
+
currentModelResponse.content.filter(
|
5831
|
+
(part) => part.type === "tool-call"
|
5832
|
+
).map(
|
5833
|
+
(toolCall) => parseToolCall({
|
5834
|
+
toolCall,
|
5835
|
+
tools,
|
5836
|
+
repairToolCall,
|
5837
|
+
system,
|
5838
|
+
messages: stepInputMessages
|
5839
|
+
})
|
5840
|
+
)
|
5841
|
+
);
|
5842
|
+
currentToolResults = tools == null ? [] : await executeTools({
|
5843
|
+
toolCalls: currentToolCalls,
|
5844
|
+
tools,
|
5845
|
+
tracer,
|
5846
|
+
telemetry,
|
5847
|
+
messages: stepInputMessages,
|
5848
|
+
abortSignal
|
5849
|
+
});
|
5850
|
+
const stepContent = asContent({
|
5851
|
+
content: currentModelResponse.content,
|
5852
|
+
toolCalls: currentToolCalls,
|
5853
|
+
toolResults: currentToolResults
|
5854
|
+
});
|
5855
|
+
responseMessages.push(
|
5856
|
+
...toResponseMessages({
|
5857
|
+
content: stepContent,
|
5858
|
+
tools: tools != null ? tools : {}
|
5817
5859
|
})
|
5818
|
-
)
|
5819
|
-
|
5820
|
-
currentToolResults = tools == null ? [] : await executeTools({
|
5821
|
-
toolCalls: currentToolCalls,
|
5822
|
-
tools,
|
5823
|
-
tracer,
|
5824
|
-
telemetry,
|
5825
|
-
messages: stepInputMessages,
|
5826
|
-
abortSignal
|
5827
|
-
});
|
5828
|
-
const stepContent = asContent({
|
5829
|
-
content: currentModelResponse.content,
|
5830
|
-
toolCalls: currentToolCalls,
|
5831
|
-
toolResults: currentToolResults
|
5832
|
-
});
|
5833
|
-
responseMessages.push(
|
5834
|
-
...toResponseMessages({
|
5860
|
+
);
|
5861
|
+
const currentStepResult = new DefaultStepResult({
|
5835
5862
|
content: stepContent,
|
5836
|
-
|
5863
|
+
finishReason: currentModelResponse.finishReason,
|
5864
|
+
usage: currentModelResponse.usage,
|
5865
|
+
warnings: currentModelResponse.warnings,
|
5866
|
+
providerMetadata: currentModelResponse.providerMetadata,
|
5867
|
+
request: (_e = currentModelResponse.request) != null ? _e : {},
|
5868
|
+
response: {
|
5869
|
+
...currentModelResponse.response,
|
5870
|
+
// deep clone msgs to avoid mutating past messages in multi-step:
|
5871
|
+
messages: structuredClone(responseMessages)
|
5872
|
+
}
|
5873
|
+
});
|
5874
|
+
steps.push(currentStepResult);
|
5875
|
+
await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
|
5876
|
+
} while (
|
5877
|
+
// there are tool calls:
|
5878
|
+
currentToolCalls.length > 0 && // all current tool calls have results:
|
5879
|
+
currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
|
5880
|
+
!await isStopConditionMet({ stopConditions, steps })
|
5881
|
+
);
|
5882
|
+
span.setAttributes(
|
5883
|
+
selectTelemetryAttributes({
|
5884
|
+
telemetry,
|
5885
|
+
attributes: {
|
5886
|
+
"ai.response.finishReason": currentModelResponse.finishReason,
|
5887
|
+
"ai.response.text": {
|
5888
|
+
output: () => extractContentText(currentModelResponse.content)
|
5889
|
+
},
|
5890
|
+
"ai.response.toolCalls": {
|
5891
|
+
output: () => {
|
5892
|
+
const toolCalls = asToolCalls(currentModelResponse.content);
|
5893
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5894
|
+
}
|
5895
|
+
},
|
5896
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5897
|
+
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
5898
|
+
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
5899
|
+
}
|
5837
5900
|
})
|
5838
5901
|
);
|
5839
|
-
const
|
5840
|
-
|
5841
|
-
|
5842
|
-
|
5843
|
-
|
5844
|
-
|
5845
|
-
|
5846
|
-
|
5847
|
-
|
5848
|
-
|
5849
|
-
|
5850
|
-
}
|
5902
|
+
const lastStep = steps[steps.length - 1];
|
5903
|
+
return new DefaultGenerateTextResult({
|
5904
|
+
steps,
|
5905
|
+
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
5906
|
+
{ text: lastStep.text },
|
5907
|
+
{
|
5908
|
+
response: lastStep.response,
|
5909
|
+
usage: lastStep.usage,
|
5910
|
+
finishReason: lastStep.finishReason
|
5911
|
+
}
|
5912
|
+
))
|
5851
5913
|
});
|
5852
|
-
|
5853
|
-
|
5854
|
-
|
5855
|
-
|
5856
|
-
|
5857
|
-
currentToolResults.length === currentToolCalls.length && // continue until the stop condition is met:
|
5858
|
-
!await continueUntil({ steps })
|
5859
|
-
);
|
5860
|
-
span.setAttributes(
|
5861
|
-
selectTelemetryAttributes({
|
5862
|
-
telemetry,
|
5863
|
-
attributes: {
|
5864
|
-
"ai.response.finishReason": currentModelResponse.finishReason,
|
5865
|
-
"ai.response.text": {
|
5866
|
-
output: () => extractContentText(currentModelResponse.content)
|
5867
|
-
},
|
5868
|
-
"ai.response.toolCalls": {
|
5869
|
-
output: () => {
|
5870
|
-
const toolCalls = asToolCalls(currentModelResponse.content);
|
5871
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5872
|
-
}
|
5873
|
-
},
|
5874
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5875
|
-
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
5876
|
-
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
5877
|
-
}
|
5878
|
-
})
|
5879
|
-
);
|
5880
|
-
const lastStep = steps[steps.length - 1];
|
5881
|
-
return new DefaultGenerateTextResult({
|
5882
|
-
steps,
|
5883
|
-
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
5884
|
-
{ text: lastStep.text },
|
5885
|
-
{
|
5886
|
-
response: lastStep.response,
|
5887
|
-
usage: lastStep.usage,
|
5888
|
-
finishReason: lastStep.finishReason
|
5889
|
-
}
|
5890
|
-
))
|
5891
|
-
});
|
5892
|
-
}
|
5893
|
-
});
|
5914
|
+
}
|
5915
|
+
});
|
5916
|
+
} catch (error) {
|
5917
|
+
throw wrapGatewayError(error);
|
5918
|
+
}
|
5894
5919
|
}
|
5895
5920
|
async function executeTools({
|
5896
5921
|
toolCalls,
|
@@ -5903,6 +5928,14 @@ async function executeTools({
|
|
5903
5928
|
const toolResults = await Promise.all(
|
5904
5929
|
toolCalls.map(async ({ toolCallId, toolName, args }) => {
|
5905
5930
|
const tool2 = tools[toolName];
|
5931
|
+
if ((tool2 == null ? void 0 : tool2.onArgsAvailable) != null) {
|
5932
|
+
await tool2.onArgsAvailable({
|
5933
|
+
args,
|
5934
|
+
toolCallId,
|
5935
|
+
messages,
|
5936
|
+
abortSignal
|
5937
|
+
});
|
5938
|
+
}
|
5906
5939
|
if ((tool2 == null ? void 0 : tool2.execute) == null) {
|
5907
5940
|
return void 0;
|
5908
5941
|
}
|
@@ -6205,11 +6238,6 @@ function smoothStream({
|
|
6205
6238
|
// core/generate-text/stream-text.ts
|
6206
6239
|
import { createIdGenerator as createIdGenerator4 } from "@ai-sdk/provider-utils";
|
6207
6240
|
|
6208
|
-
// src/util/as-array.ts
|
6209
|
-
function asArray(value) {
|
6210
|
-
return value === void 0 ? [] : Array.isArray(value) ? value : [value];
|
6211
|
-
}
|
6212
|
-
|
6213
6241
|
// core/generate-text/run-tools-transformation.ts
|
6214
6242
|
import { generateId } from "@ai-sdk/provider-utils";
|
6215
6243
|
function runToolsTransformation({
|
@@ -6296,6 +6324,14 @@ function runToolsTransformation({
|
|
6296
6324
|
});
|
6297
6325
|
controller.enqueue(toolCall);
|
6298
6326
|
const tool2 = tools[toolCall.toolName];
|
6327
|
+
if (tool2.onArgsAvailable != null) {
|
6328
|
+
await tool2.onArgsAvailable({
|
6329
|
+
args: toolCall.args,
|
6330
|
+
toolCallId: toolCall.toolCallId,
|
6331
|
+
messages,
|
6332
|
+
abortSignal
|
6333
|
+
});
|
6334
|
+
}
|
6299
6335
|
if (tool2.execute != null) {
|
6300
6336
|
const toolExecutionId = generateId();
|
6301
6337
|
outstandingToolResults.add(toolExecutionId);
|
@@ -6420,17 +6456,21 @@ function streamText({
|
|
6420
6456
|
maxRetries,
|
6421
6457
|
abortSignal,
|
6422
6458
|
headers,
|
6423
|
-
|
6459
|
+
stopWhen = stepCountIs(1),
|
6424
6460
|
experimental_output: output,
|
6425
6461
|
experimental_telemetry: telemetry,
|
6462
|
+
prepareStep,
|
6426
6463
|
providerOptions,
|
6427
6464
|
experimental_toolCallStreaming = false,
|
6428
6465
|
toolCallStreaming = experimental_toolCallStreaming,
|
6429
|
-
experimental_activeTools
|
6466
|
+
experimental_activeTools,
|
6467
|
+
activeTools = experimental_activeTools,
|
6430
6468
|
experimental_repairToolCall: repairToolCall,
|
6431
6469
|
experimental_transform: transform,
|
6432
6470
|
onChunk,
|
6433
|
-
onError
|
6471
|
+
onError = ({ error }) => {
|
6472
|
+
console.error(error);
|
6473
|
+
},
|
6434
6474
|
onFinish,
|
6435
6475
|
onStepFinish,
|
6436
6476
|
_internal: {
|
@@ -6441,7 +6481,7 @@ function streamText({
|
|
6441
6481
|
...settings
|
6442
6482
|
}) {
|
6443
6483
|
return new DefaultStreamTextResult({
|
6444
|
-
model,
|
6484
|
+
model: resolveLanguageModel(model),
|
6445
6485
|
telemetry,
|
6446
6486
|
headers,
|
6447
6487
|
settings,
|
@@ -6456,9 +6496,10 @@ function streamText({
|
|
6456
6496
|
transforms: asArray(transform),
|
6457
6497
|
activeTools,
|
6458
6498
|
repairToolCall,
|
6459
|
-
|
6499
|
+
stopConditions: asArray(stopWhen),
|
6460
6500
|
output,
|
6461
6501
|
providerOptions,
|
6502
|
+
prepareStep,
|
6462
6503
|
onChunk,
|
6463
6504
|
onError,
|
6464
6505
|
onFinish,
|
@@ -6533,9 +6574,10 @@ var DefaultStreamTextResult = class {
|
|
6533
6574
|
transforms,
|
6534
6575
|
activeTools,
|
6535
6576
|
repairToolCall,
|
6536
|
-
|
6577
|
+
stopConditions,
|
6537
6578
|
output,
|
6538
6579
|
providerOptions,
|
6580
|
+
prepareStep,
|
6539
6581
|
now: now2,
|
6540
6582
|
currentDate,
|
6541
6583
|
generateId: generateId3,
|
@@ -6544,18 +6586,12 @@ var DefaultStreamTextResult = class {
|
|
6544
6586
|
onFinish,
|
6545
6587
|
onStepFinish
|
6546
6588
|
}) {
|
6547
|
-
this.
|
6548
|
-
this.
|
6549
|
-
this.
|
6550
|
-
if (maxSteps2 < 1) {
|
6551
|
-
throw new InvalidArgumentError({
|
6552
|
-
parameter: "maxSteps",
|
6553
|
-
value: maxSteps2,
|
6554
|
-
message: "maxSteps must be at least 1"
|
6555
|
-
});
|
6556
|
-
}
|
6589
|
+
this._totalUsage = new DelayedPromise();
|
6590
|
+
this._finishReason = new DelayedPromise();
|
6591
|
+
this._steps = new DelayedPromise();
|
6557
6592
|
this.output = output;
|
6558
6593
|
this.generateId = generateId3;
|
6594
|
+
let stepFinish;
|
6559
6595
|
let activeReasoningPart = void 0;
|
6560
6596
|
let recordedContent = [];
|
6561
6597
|
const recordedResponseMessages = [];
|
@@ -6573,7 +6609,7 @@ var DefaultStreamTextResult = class {
|
|
6573
6609
|
await (onChunk == null ? void 0 : onChunk({ chunk: part }));
|
6574
6610
|
}
|
6575
6611
|
if (part.type === "error") {
|
6576
|
-
await
|
6612
|
+
await onError({ error: wrapGatewayError(part.error) });
|
6577
6613
|
}
|
6578
6614
|
if (part.type === "text") {
|
6579
6615
|
const latestContent = recordedContent[recordedContent.length - 1];
|
@@ -6637,6 +6673,7 @@ var DefaultStreamTextResult = class {
|
|
6637
6673
|
recordedContent = [];
|
6638
6674
|
activeReasoningPart = void 0;
|
6639
6675
|
recordedResponseMessages.push(...stepMessages);
|
6676
|
+
stepFinish.resolve();
|
6640
6677
|
}
|
6641
6678
|
if (part.type === "finish") {
|
6642
6679
|
recordedTotalUsage = part.totalUsage;
|
@@ -6654,9 +6691,9 @@ var DefaultStreamTextResult = class {
|
|
6654
6691
|
outputTokens: void 0,
|
6655
6692
|
totalTokens: void 0
|
6656
6693
|
};
|
6657
|
-
self.
|
6658
|
-
self.
|
6659
|
-
self.
|
6694
|
+
self._finishReason.resolve(finishReason);
|
6695
|
+
self._totalUsage.resolve(totalUsage);
|
6696
|
+
self._steps.resolve(recordedSteps);
|
6660
6697
|
const finalStep = recordedSteps[recordedSteps.length - 1];
|
6661
6698
|
await (onFinish == null ? void 0 : onFinish({
|
6662
6699
|
finishReason,
|
@@ -6747,8 +6784,7 @@ var DefaultStreamTextResult = class {
|
|
6747
6784
|
// specific settings that only make sense on the outer level:
|
6748
6785
|
"ai.prompt": {
|
6749
6786
|
input: () => JSON.stringify({ system, prompt, messages })
|
6750
|
-
}
|
6751
|
-
"ai.settings.maxSteps": maxSteps2
|
6787
|
+
}
|
6752
6788
|
}
|
6753
6789
|
}),
|
6754
6790
|
tracer,
|
@@ -6760,6 +6796,8 @@ var DefaultStreamTextResult = class {
|
|
6760
6796
|
responseMessages,
|
6761
6797
|
usage
|
6762
6798
|
}) {
|
6799
|
+
var _a17, _b, _c, _d;
|
6800
|
+
stepFinish = new DelayedPromise();
|
6763
6801
|
const initialPrompt = await standardizePrompt({
|
6764
6802
|
system,
|
6765
6803
|
prompt,
|
@@ -6769,16 +6807,26 @@ var DefaultStreamTextResult = class {
|
|
6769
6807
|
...initialPrompt.messages,
|
6770
6808
|
...responseMessages
|
6771
6809
|
];
|
6810
|
+
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
6811
|
+
model,
|
6812
|
+
steps: recordedSteps,
|
6813
|
+
stepNumber: recordedSteps.length
|
6814
|
+
}));
|
6772
6815
|
const promptMessages = await convertToLanguageModelPrompt({
|
6773
6816
|
prompt: {
|
6774
|
-
system: initialPrompt.system,
|
6817
|
+
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
6775
6818
|
messages: stepInputMessages
|
6776
6819
|
},
|
6777
6820
|
supportedUrls: await model.supportedUrls
|
6778
6821
|
});
|
6779
|
-
const
|
6780
|
-
|
6781
|
-
|
6822
|
+
const stepModel = resolveLanguageModel(
|
6823
|
+
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
6824
|
+
);
|
6825
|
+
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
6826
|
+
tools,
|
6827
|
+
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
6828
|
+
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
6829
|
+
});
|
6782
6830
|
const {
|
6783
6831
|
result: { stream: stream2, response, request },
|
6784
6832
|
doStreamSpan,
|
@@ -6794,24 +6842,23 @@ var DefaultStreamTextResult = class {
|
|
6794
6842
|
telemetry
|
6795
6843
|
}),
|
6796
6844
|
...baseTelemetryAttributes,
|
6845
|
+
// model:
|
6846
|
+
"ai.model.provider": stepModel.provider,
|
6847
|
+
"ai.model.id": stepModel.modelId,
|
6848
|
+
// prompt:
|
6797
6849
|
"ai.prompt.messages": {
|
6798
|
-
input: () =>
|
6850
|
+
input: () => stringifyForTelemetry(promptMessages)
|
6799
6851
|
},
|
6800
6852
|
"ai.prompt.tools": {
|
6801
6853
|
// convert the language model level tools:
|
6802
|
-
input: () =>
|
6803
|
-
var _a17;
|
6804
|
-
return (_a17 = toolsAndToolChoice.tools) == null ? void 0 : _a17.map(
|
6805
|
-
(tool2) => JSON.stringify(tool2)
|
6806
|
-
);
|
6807
|
-
}
|
6854
|
+
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
6808
6855
|
},
|
6809
6856
|
"ai.prompt.toolChoice": {
|
6810
|
-
input: () =>
|
6857
|
+
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
6811
6858
|
},
|
6812
6859
|
// standardized gen-ai llm span attributes:
|
6813
|
-
"gen_ai.system":
|
6814
|
-
"gen_ai.request.model":
|
6860
|
+
"gen_ai.system": stepModel.provider,
|
6861
|
+
"gen_ai.request.model": stepModel.modelId,
|
6815
6862
|
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
6816
6863
|
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
6817
6864
|
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
@@ -6828,9 +6875,10 @@ var DefaultStreamTextResult = class {
|
|
6828
6875
|
startTimestampMs: now2(),
|
6829
6876
|
// get before the call
|
6830
6877
|
doStreamSpan: doStreamSpan2,
|
6831
|
-
result: await
|
6878
|
+
result: await stepModel.doStream({
|
6832
6879
|
...callSettings,
|
6833
|
-
|
6880
|
+
tools: stepTools,
|
6881
|
+
toolChoice: stepToolChoice,
|
6834
6882
|
responseFormat: output == null ? void 0 : output.responseFormat,
|
6835
6883
|
prompt: promptMessages,
|
6836
6884
|
providerOptions,
|
@@ -6841,7 +6889,7 @@ var DefaultStreamTextResult = class {
|
|
6841
6889
|
}
|
6842
6890
|
})
|
6843
6891
|
);
|
6844
|
-
const
|
6892
|
+
const streamWithToolResults = runToolsTransformation({
|
6845
6893
|
tools,
|
6846
6894
|
generatorStream: stream2,
|
6847
6895
|
toolCallStreaming,
|
@@ -6880,10 +6928,10 @@ var DefaultStreamTextResult = class {
|
|
6880
6928
|
stepText += chunk.text;
|
6881
6929
|
}
|
6882
6930
|
self.addStream(
|
6883
|
-
|
6931
|
+
streamWithToolResults.pipeThrough(
|
6884
6932
|
new TransformStream({
|
6885
6933
|
async transform(chunk, controller) {
|
6886
|
-
var
|
6934
|
+
var _a18, _b2, _c2, _d2;
|
6887
6935
|
if (chunk.type === "stream-start") {
|
6888
6936
|
warnings = chunk.warnings;
|
6889
6937
|
return;
|
@@ -6946,9 +6994,9 @@ var DefaultStreamTextResult = class {
|
|
6946
6994
|
}
|
6947
6995
|
case "response-metadata": {
|
6948
6996
|
stepResponse = {
|
6949
|
-
id: (
|
6950
|
-
timestamp: (
|
6951
|
-
modelId: (
|
6997
|
+
id: (_a18 = chunk.id) != null ? _a18 : stepResponse.id,
|
6998
|
+
timestamp: (_b2 = chunk.timestamp) != null ? _b2 : stepResponse.timestamp,
|
6999
|
+
modelId: (_c2 = chunk.modelId) != null ? _c2 : stepResponse.modelId
|
6952
7000
|
};
|
6953
7001
|
break;
|
6954
7002
|
}
|
@@ -6960,7 +7008,7 @@ var DefaultStreamTextResult = class {
|
|
6960
7008
|
doStreamSpan.addEvent("ai.stream.finish");
|
6961
7009
|
doStreamSpan.setAttributes({
|
6962
7010
|
"ai.response.msToFinish": msToFinish,
|
6963
|
-
"ai.response.avgOutputTokensPerSecond": 1e3 * ((
|
7011
|
+
"ai.response.avgOutputTokensPerSecond": 1e3 * ((_d2 = stepUsage.outputTokens) != null ? _d2 : 0) / msToFinish
|
6964
7012
|
});
|
6965
7013
|
break;
|
6966
7014
|
}
|
@@ -6974,8 +7022,28 @@ var DefaultStreamTextResult = class {
|
|
6974
7022
|
controller.enqueue(chunk);
|
6975
7023
|
break;
|
6976
7024
|
}
|
6977
|
-
case "tool-call-streaming-start":
|
7025
|
+
case "tool-call-streaming-start": {
|
7026
|
+
const tool2 = tools == null ? void 0 : tools[chunk.toolName];
|
7027
|
+
if ((tool2 == null ? void 0 : tool2.onArgsStreamingStart) != null) {
|
7028
|
+
await tool2.onArgsStreamingStart({
|
7029
|
+
toolCallId: chunk.toolCallId,
|
7030
|
+
messages: stepInputMessages,
|
7031
|
+
abortSignal
|
7032
|
+
});
|
7033
|
+
}
|
7034
|
+
controller.enqueue(chunk);
|
7035
|
+
break;
|
7036
|
+
}
|
6978
7037
|
case "tool-call-delta": {
|
7038
|
+
const tool2 = tools == null ? void 0 : tools[chunk.toolName];
|
7039
|
+
if ((tool2 == null ? void 0 : tool2.onArgsStreamingDelta) != null) {
|
7040
|
+
await tool2.onArgsStreamingDelta({
|
7041
|
+
argsTextDelta: chunk.argsTextDelta,
|
7042
|
+
toolCallId: chunk.toolCallId,
|
7043
|
+
messages: stepInputMessages,
|
7044
|
+
abortSignal
|
7045
|
+
});
|
7046
|
+
}
|
6979
7047
|
controller.enqueue(chunk);
|
6980
7048
|
break;
|
6981
7049
|
}
|
@@ -7035,9 +7103,13 @@ var DefaultStreamTextResult = class {
|
|
7035
7103
|
}
|
7036
7104
|
});
|
7037
7105
|
const combinedUsage = addLanguageModelUsage(usage, stepUsage);
|
7038
|
-
|
7039
|
-
stepToolCalls.length > 0 && // all current tool calls have results:
|
7040
|
-
stepToolResults.length === stepToolCalls.length
|
7106
|
+
await stepFinish.promise;
|
7107
|
+
if (stepToolCalls.length > 0 && // all current tool calls have results:
|
7108
|
+
stepToolResults.length === stepToolCalls.length && // continue until a stop condition is met:
|
7109
|
+
!await isStopConditionMet({
|
7110
|
+
stopConditions,
|
7111
|
+
steps: recordedSteps
|
7112
|
+
})) {
|
7041
7113
|
responseMessages.push(
|
7042
7114
|
...toResponseMessages({
|
7043
7115
|
content: stepContent,
|
@@ -7085,7 +7157,7 @@ var DefaultStreamTextResult = class {
|
|
7085
7157
|
});
|
7086
7158
|
}
|
7087
7159
|
get steps() {
|
7088
|
-
return this.
|
7160
|
+
return this._steps.promise;
|
7089
7161
|
}
|
7090
7162
|
get finalStep() {
|
7091
7163
|
return this.steps.then((steps) => steps[steps.length - 1]);
|
@@ -7130,10 +7202,10 @@ var DefaultStreamTextResult = class {
|
|
7130
7202
|
return this.finalStep.then((step) => step.response);
|
7131
7203
|
}
|
7132
7204
|
get totalUsage() {
|
7133
|
-
return this.
|
7205
|
+
return this._totalUsage.promise;
|
7134
7206
|
}
|
7135
7207
|
get finishReason() {
|
7136
|
-
return this.
|
7208
|
+
return this._finishReason.promise;
|
7137
7209
|
}
|
7138
7210
|
/**
|
7139
7211
|
Split out a new stream from the original stream.
|
@@ -7206,8 +7278,8 @@ var DefaultStreamTextResult = class {
|
|
7206
7278
|
messageMetadata,
|
7207
7279
|
sendReasoning = false,
|
7208
7280
|
sendSources = false,
|
7209
|
-
|
7210
|
-
|
7281
|
+
sendStart = true,
|
7282
|
+
sendFinish = true,
|
7211
7283
|
onError = () => "An error occurred."
|
7212
7284
|
// mask error messages for safety by default
|
7213
7285
|
} = {}) {
|
@@ -7251,16 +7323,25 @@ var DefaultStreamTextResult = class {
|
|
7251
7323
|
break;
|
7252
7324
|
}
|
7253
7325
|
case "source": {
|
7254
|
-
if (sendSources) {
|
7326
|
+
if (sendSources && part.sourceType === "url") {
|
7255
7327
|
controller.enqueue({
|
7256
|
-
type: "source",
|
7257
|
-
|
7258
|
-
id: part.id,
|
7328
|
+
type: "source-url",
|
7329
|
+
sourceId: part.id,
|
7259
7330
|
url: part.url,
|
7260
7331
|
title: part.title,
|
7261
7332
|
providerMetadata: part.providerMetadata
|
7262
7333
|
});
|
7263
7334
|
}
|
7335
|
+
if (sendSources && part.sourceType === "document") {
|
7336
|
+
controller.enqueue({
|
7337
|
+
type: "source-document",
|
7338
|
+
sourceId: part.id,
|
7339
|
+
mediaType: part.mediaType,
|
7340
|
+
title: part.title,
|
7341
|
+
filename: part.filename,
|
7342
|
+
providerMetadata: part.providerMetadata
|
7343
|
+
});
|
7344
|
+
}
|
7264
7345
|
break;
|
7265
7346
|
}
|
7266
7347
|
case "tool-call-streaming-start": {
|
@@ -7320,7 +7401,7 @@ var DefaultStreamTextResult = class {
|
|
7320
7401
|
break;
|
7321
7402
|
}
|
7322
7403
|
case "start": {
|
7323
|
-
if (
|
7404
|
+
if (sendStart) {
|
7324
7405
|
const metadata = messageMetadata == null ? void 0 : messageMetadata({ part });
|
7325
7406
|
controller.enqueue({
|
7326
7407
|
type: "start",
|
@@ -7331,7 +7412,7 @@ var DefaultStreamTextResult = class {
|
|
7331
7412
|
break;
|
7332
7413
|
}
|
7333
7414
|
case "finish": {
|
7334
|
-
if (
|
7415
|
+
if (sendFinish) {
|
7335
7416
|
const metadata = messageMetadata == null ? void 0 : messageMetadata({ part });
|
7336
7417
|
controller.enqueue({
|
7337
7418
|
type: "finish",
|
@@ -7348,38 +7429,12 @@ var DefaultStreamTextResult = class {
|
|
7348
7429
|
}
|
7349
7430
|
})
|
7350
7431
|
);
|
7351
|
-
|
7352
|
-
return baseStream;
|
7353
|
-
}
|
7354
|
-
const state = createStreamingUIMessageState({
|
7355
|
-
lastMessage,
|
7356
|
-
newMessageId: messageId != null ? messageId : this.generateId()
|
7357
|
-
});
|
7358
|
-
const runUpdateMessageJob = async (job) => {
|
7359
|
-
await job({ state, write: () => {
|
7360
|
-
} });
|
7361
|
-
};
|
7362
|
-
return processUIMessageStream({
|
7432
|
+
return handleUIMessageStreamFinish({
|
7363
7433
|
stream: baseStream,
|
7364
|
-
|
7365
|
-
|
7366
|
-
|
7367
|
-
|
7368
|
-
controller.enqueue(chunk);
|
7369
|
-
},
|
7370
|
-
flush() {
|
7371
|
-
const isContinuation2 = state.message.id === (lastMessage == null ? void 0 : lastMessage.id);
|
7372
|
-
onFinish({
|
7373
|
-
isContinuation: isContinuation2,
|
7374
|
-
responseMessage: state.message,
|
7375
|
-
messages: [
|
7376
|
-
...isContinuation2 ? originalMessages.slice(0, -1) : originalMessages,
|
7377
|
-
state.message
|
7378
|
-
]
|
7379
|
-
});
|
7380
|
-
}
|
7381
|
-
})
|
7382
|
-
);
|
7434
|
+
newMessageId: messageId != null ? messageId : this.generateId(),
|
7435
|
+
originalMessages,
|
7436
|
+
onFinish
|
7437
|
+
});
|
7383
7438
|
}
|
7384
7439
|
pipeUIMessageStreamToResponse(response, {
|
7385
7440
|
newMessageId,
|
@@ -7388,8 +7443,8 @@ var DefaultStreamTextResult = class {
|
|
7388
7443
|
messageMetadata,
|
7389
7444
|
sendReasoning,
|
7390
7445
|
sendSources,
|
7391
|
-
|
7392
|
-
|
7446
|
+
sendFinish,
|
7447
|
+
sendStart,
|
7393
7448
|
onError,
|
7394
7449
|
...init
|
7395
7450
|
} = {}) {
|
@@ -7402,8 +7457,8 @@ var DefaultStreamTextResult = class {
|
|
7402
7457
|
messageMetadata,
|
7403
7458
|
sendReasoning,
|
7404
7459
|
sendSources,
|
7405
|
-
|
7406
|
-
|
7460
|
+
sendFinish,
|
7461
|
+
sendStart,
|
7407
7462
|
onError
|
7408
7463
|
}),
|
7409
7464
|
...init
|
@@ -7423,8 +7478,8 @@ var DefaultStreamTextResult = class {
|
|
7423
7478
|
messageMetadata,
|
7424
7479
|
sendReasoning,
|
7425
7480
|
sendSources,
|
7426
|
-
|
7427
|
-
|
7481
|
+
sendFinish,
|
7482
|
+
sendStart,
|
7428
7483
|
onError,
|
7429
7484
|
...init
|
7430
7485
|
} = {}) {
|
@@ -7436,8 +7491,8 @@ var DefaultStreamTextResult = class {
|
|
7436
7491
|
messageMetadata,
|
7437
7492
|
sendReasoning,
|
7438
7493
|
sendSources,
|
7439
|
-
|
7440
|
-
|
7494
|
+
sendFinish,
|
7495
|
+
sendStart,
|
7441
7496
|
onError
|
7442
7497
|
}),
|
7443
7498
|
...init
|
@@ -7680,7 +7735,9 @@ var doWrap = ({
|
|
7680
7735
|
};
|
7681
7736
|
|
7682
7737
|
// core/registry/custom-provider.ts
|
7683
|
-
import {
|
7738
|
+
import {
|
7739
|
+
NoSuchModelError as NoSuchModelError2
|
7740
|
+
} from "@ai-sdk/provider";
|
7684
7741
|
function customProvider({
|
7685
7742
|
languageModels,
|
7686
7743
|
textEmbeddingModels,
|
@@ -7720,7 +7777,7 @@ function customProvider({
|
|
7720
7777
|
var experimental_customProvider = customProvider;
|
7721
7778
|
|
7722
7779
|
// core/registry/no-such-provider-error.ts
|
7723
|
-
import { AISDKError as
|
7780
|
+
import { AISDKError as AISDKError20, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
|
7724
7781
|
var name16 = "AI_NoSuchProviderError";
|
7725
7782
|
var marker16 = `vercel.ai.error.${name16}`;
|
7726
7783
|
var symbol16 = Symbol.for(marker16);
|
@@ -7739,13 +7796,15 @@ var NoSuchProviderError = class extends NoSuchModelError3 {
|
|
7739
7796
|
this.availableProviders = availableProviders;
|
7740
7797
|
}
|
7741
7798
|
static isInstance(error) {
|
7742
|
-
return
|
7799
|
+
return AISDKError20.hasMarker(error, marker16);
|
7743
7800
|
}
|
7744
7801
|
};
|
7745
7802
|
_a16 = symbol16;
|
7746
7803
|
|
7747
7804
|
// core/registry/provider-registry.ts
|
7748
|
-
import {
|
7805
|
+
import {
|
7806
|
+
NoSuchModelError as NoSuchModelError4
|
7807
|
+
} from "@ai-sdk/provider";
|
7749
7808
|
function createProviderRegistry(providers, {
|
7750
7809
|
separator = ":"
|
7751
7810
|
} = {}) {
|
@@ -8394,8 +8453,8 @@ var MCPClient = class {
|
|
8394
8453
|
};
|
8395
8454
|
|
8396
8455
|
// src/error/no-transcript-generated-error.ts
|
8397
|
-
import { AISDKError as
|
8398
|
-
var NoTranscriptGeneratedError = class extends
|
8456
|
+
import { AISDKError as AISDKError21 } from "@ai-sdk/provider";
|
8457
|
+
var NoTranscriptGeneratedError = class extends AISDKError21 {
|
8399
8458
|
constructor(options) {
|
8400
8459
|
super({
|
8401
8460
|
name: "AI_NoTranscriptGeneratedError",
|
@@ -8459,10 +8518,11 @@ var DefaultTranscriptionResult = class {
|
|
8459
8518
|
export {
|
8460
8519
|
AISDKError16 as AISDKError,
|
8461
8520
|
APICallError,
|
8462
|
-
|
8521
|
+
AbstractChat,
|
8463
8522
|
DefaultChatTransport,
|
8464
8523
|
DownloadError,
|
8465
8524
|
EmptyResponseBodyError,
|
8525
|
+
GLOBAL_DEFAULT_PROVIDER,
|
8466
8526
|
InvalidArgumentError,
|
8467
8527
|
InvalidDataContentError,
|
8468
8528
|
InvalidMessageRoleError,
|
@@ -8484,14 +8544,14 @@ export {
|
|
8484
8544
|
NoSuchToolError,
|
8485
8545
|
output_exports as Output,
|
8486
8546
|
RetryError,
|
8547
|
+
SerialJobExecutor,
|
8548
|
+
TextStreamChatTransport,
|
8487
8549
|
ToolCallRepairError,
|
8488
8550
|
ToolExecutionError,
|
8489
8551
|
TypeValidationError,
|
8490
8552
|
UnsupportedFunctionalityError,
|
8491
|
-
appendClientMessage,
|
8492
8553
|
asSchema5 as asSchema,
|
8493
8554
|
assistantModelMessageSchema,
|
8494
|
-
callChatApi,
|
8495
8555
|
callCompletionApi,
|
8496
8556
|
convertFileListToFileUIParts,
|
8497
8557
|
convertToCoreMessages,
|
@@ -8508,7 +8568,6 @@ export {
|
|
8508
8568
|
createUIMessageStream,
|
8509
8569
|
createUIMessageStreamResponse,
|
8510
8570
|
customProvider,
|
8511
|
-
defaultChatStore,
|
8512
8571
|
defaultSettingsMiddleware,
|
8513
8572
|
embed,
|
8514
8573
|
embedMany,
|
@@ -8518,7 +8577,6 @@ export {
|
|
8518
8577
|
generateImage as experimental_generateImage,
|
8519
8578
|
generateSpeech as experimental_generateSpeech,
|
8520
8579
|
transcribe as experimental_transcribe,
|
8521
|
-
extractMaxToolInvocationStep,
|
8522
8580
|
extractReasoningMiddleware,
|
8523
8581
|
generateId2 as generateId,
|
8524
8582
|
generateObject,
|
@@ -8526,24 +8584,21 @@ export {
|
|
8526
8584
|
getTextFromDataUrl,
|
8527
8585
|
getToolInvocations,
|
8528
8586
|
hasToolCall,
|
8529
|
-
isAssistantMessageWithCompletedToolCalls,
|
8530
8587
|
isDeepEqualData,
|
8531
8588
|
jsonSchema2 as jsonSchema,
|
8532
|
-
maxSteps,
|
8533
8589
|
modelMessageSchema,
|
8534
8590
|
parsePartialJson,
|
8535
8591
|
pipeTextStreamToResponse,
|
8536
8592
|
pipeUIMessageStreamToResponse,
|
8537
|
-
shouldResubmitMessages,
|
8538
8593
|
simulateReadableStream,
|
8539
8594
|
simulateStreamingMiddleware,
|
8540
8595
|
smoothStream,
|
8596
|
+
stepCountIs,
|
8541
8597
|
streamObject,
|
8542
8598
|
streamText,
|
8543
8599
|
systemModelMessageSchema,
|
8544
8600
|
tool,
|
8545
8601
|
toolModelMessageSchema,
|
8546
|
-
updateToolCallResult,
|
8547
8602
|
userModelMessageSchema,
|
8548
8603
|
wrapLanguageModel
|
8549
8604
|
};
|