ai 5.0.0-alpha.1 → 5.0.0-alpha.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +146 -0
- package/dist/index.d.mts +404 -563
- package/dist/index.d.ts +404 -563
- package/dist/index.js +1416 -1439
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1350 -1362
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +3 -2
- package/dist/internal/index.d.ts +3 -2
- package/dist/mcp-stdio/index.js.map +1 -1
- package/dist/mcp-stdio/index.mjs.map +1 -1
- package/package.json +6 -5
package/dist/index.mjs
CHANGED
@@ -449,21 +449,8 @@ function pipeTextStreamToResponse({
|
|
449
449
|
});
|
450
450
|
}
|
451
451
|
|
452
|
-
// src/ui/
|
453
|
-
|
454
|
-
messages,
|
455
|
-
message
|
456
|
-
}) {
|
457
|
-
return [
|
458
|
-
...messages.length > 0 && messages[messages.length - 1].id === message.id ? messages.slice(0, -1) : messages,
|
459
|
-
message
|
460
|
-
];
|
461
|
-
}
|
462
|
-
|
463
|
-
// src/ui/call-chat-api.ts
|
464
|
-
import {
|
465
|
-
parseJsonEventStream
|
466
|
-
} from "@ai-sdk/provider-utils";
|
452
|
+
// src/ui/call-completion-api.ts
|
453
|
+
import { parseJsonEventStream } from "@ai-sdk/provider-utils";
|
467
454
|
|
468
455
|
// src/ui-message-stream/ui-message-stream-parts.ts
|
469
456
|
import { z } from "zod";
|
@@ -504,9 +491,8 @@ var uiMessageStreamPartSchema = z.union([
|
|
504
491
|
providerMetadata: z.record(z.any()).optional()
|
505
492
|
}),
|
506
493
|
z.object({
|
507
|
-
type: z.literal("source"),
|
508
|
-
|
509
|
-
id: z.string(),
|
494
|
+
type: z.literal("source-url"),
|
495
|
+
sourceId: z.string(),
|
510
496
|
url: z.string(),
|
511
497
|
title: z.string().optional(),
|
512
498
|
providerMetadata: z.any().optional()
|
@@ -570,6 +556,170 @@ async function consumeStream({
|
|
570
556
|
}
|
571
557
|
}
|
572
558
|
|
559
|
+
// src/ui/process-text-stream.ts
|
560
|
+
async function processTextStream({
|
561
|
+
stream,
|
562
|
+
onTextPart
|
563
|
+
}) {
|
564
|
+
const reader = stream.pipeThrough(new TextDecoderStream()).getReader();
|
565
|
+
while (true) {
|
566
|
+
const { done, value } = await reader.read();
|
567
|
+
if (done) {
|
568
|
+
break;
|
569
|
+
}
|
570
|
+
await onTextPart(value);
|
571
|
+
}
|
572
|
+
}
|
573
|
+
|
574
|
+
// src/ui/call-completion-api.ts
|
575
|
+
var getOriginalFetch = () => fetch;
|
576
|
+
async function callCompletionApi({
|
577
|
+
api,
|
578
|
+
prompt,
|
579
|
+
credentials,
|
580
|
+
headers,
|
581
|
+
body,
|
582
|
+
streamProtocol = "data",
|
583
|
+
setCompletion,
|
584
|
+
setLoading,
|
585
|
+
setError,
|
586
|
+
setAbortController,
|
587
|
+
onFinish,
|
588
|
+
onError,
|
589
|
+
fetch: fetch2 = getOriginalFetch()
|
590
|
+
}) {
|
591
|
+
var _a17;
|
592
|
+
try {
|
593
|
+
setLoading(true);
|
594
|
+
setError(void 0);
|
595
|
+
const abortController = new AbortController();
|
596
|
+
setAbortController(abortController);
|
597
|
+
setCompletion("");
|
598
|
+
const response = await fetch2(api, {
|
599
|
+
method: "POST",
|
600
|
+
body: JSON.stringify({
|
601
|
+
prompt,
|
602
|
+
...body
|
603
|
+
}),
|
604
|
+
credentials,
|
605
|
+
headers: {
|
606
|
+
"Content-Type": "application/json",
|
607
|
+
...headers
|
608
|
+
},
|
609
|
+
signal: abortController.signal
|
610
|
+
}).catch((err) => {
|
611
|
+
throw err;
|
612
|
+
});
|
613
|
+
if (!response.ok) {
|
614
|
+
throw new Error(
|
615
|
+
(_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
|
616
|
+
);
|
617
|
+
}
|
618
|
+
if (!response.body) {
|
619
|
+
throw new Error("The response body is empty.");
|
620
|
+
}
|
621
|
+
let result = "";
|
622
|
+
switch (streamProtocol) {
|
623
|
+
case "text": {
|
624
|
+
await processTextStream({
|
625
|
+
stream: response.body,
|
626
|
+
onTextPart: (chunk) => {
|
627
|
+
result += chunk;
|
628
|
+
setCompletion(result);
|
629
|
+
}
|
630
|
+
});
|
631
|
+
break;
|
632
|
+
}
|
633
|
+
case "data": {
|
634
|
+
await consumeStream({
|
635
|
+
stream: parseJsonEventStream({
|
636
|
+
stream: response.body,
|
637
|
+
schema: uiMessageStreamPartSchema
|
638
|
+
}).pipeThrough(
|
639
|
+
new TransformStream({
|
640
|
+
async transform(part) {
|
641
|
+
if (!part.success) {
|
642
|
+
throw part.error;
|
643
|
+
}
|
644
|
+
const streamPart = part.value;
|
645
|
+
if (streamPart.type === "text") {
|
646
|
+
result += streamPart.text;
|
647
|
+
setCompletion(result);
|
648
|
+
} else if (streamPart.type === "error") {
|
649
|
+
throw new Error(streamPart.errorText);
|
650
|
+
}
|
651
|
+
}
|
652
|
+
})
|
653
|
+
),
|
654
|
+
onError: (error) => {
|
655
|
+
throw error;
|
656
|
+
}
|
657
|
+
});
|
658
|
+
break;
|
659
|
+
}
|
660
|
+
default: {
|
661
|
+
const exhaustiveCheck = streamProtocol;
|
662
|
+
throw new Error(`Unknown stream protocol: ${exhaustiveCheck}`);
|
663
|
+
}
|
664
|
+
}
|
665
|
+
if (onFinish) {
|
666
|
+
onFinish(prompt, result);
|
667
|
+
}
|
668
|
+
setAbortController(null);
|
669
|
+
return result;
|
670
|
+
} catch (err) {
|
671
|
+
if (err.name === "AbortError") {
|
672
|
+
setAbortController(null);
|
673
|
+
return null;
|
674
|
+
}
|
675
|
+
if (err instanceof Error) {
|
676
|
+
if (onError) {
|
677
|
+
onError(err);
|
678
|
+
}
|
679
|
+
}
|
680
|
+
setError(err);
|
681
|
+
} finally {
|
682
|
+
setLoading(false);
|
683
|
+
}
|
684
|
+
}
|
685
|
+
|
686
|
+
// src/ui/chat.ts
|
687
|
+
import {
|
688
|
+
generateId as generateIdFunc
|
689
|
+
} from "@ai-sdk/provider-utils";
|
690
|
+
|
691
|
+
// src/util/serial-job-executor.ts
|
692
|
+
var SerialJobExecutor = class {
|
693
|
+
constructor() {
|
694
|
+
this.queue = [];
|
695
|
+
this.isProcessing = false;
|
696
|
+
}
|
697
|
+
async processQueue() {
|
698
|
+
if (this.isProcessing) {
|
699
|
+
return;
|
700
|
+
}
|
701
|
+
this.isProcessing = true;
|
702
|
+
while (this.queue.length > 0) {
|
703
|
+
await this.queue[0]();
|
704
|
+
this.queue.shift();
|
705
|
+
}
|
706
|
+
this.isProcessing = false;
|
707
|
+
}
|
708
|
+
async run(job) {
|
709
|
+
return new Promise((resolve, reject) => {
|
710
|
+
this.queue.push(async () => {
|
711
|
+
try {
|
712
|
+
await job();
|
713
|
+
resolve();
|
714
|
+
} catch (error) {
|
715
|
+
reject(error);
|
716
|
+
}
|
717
|
+
});
|
718
|
+
void this.processQueue();
|
719
|
+
});
|
720
|
+
}
|
721
|
+
};
|
722
|
+
|
573
723
|
// src/ui/process-ui-message-stream.ts
|
574
724
|
import {
|
575
725
|
validateTypes
|
@@ -945,14 +1095,6 @@ async function parsePartialJson(jsonText) {
|
|
945
1095
|
return { value: void 0, state: "failed-parse" };
|
946
1096
|
}
|
947
1097
|
|
948
|
-
// src/ui/extract-max-tool-invocation-step.ts
|
949
|
-
function extractMaxToolInvocationStep(toolInvocations) {
|
950
|
-
return toolInvocations == null ? void 0 : toolInvocations.reduce((max, toolInvocation) => {
|
951
|
-
var _a17;
|
952
|
-
return Math.max(max, (_a17 = toolInvocation.step) != null ? _a17 : 0);
|
953
|
-
}, 0);
|
954
|
-
}
|
955
|
-
|
956
1098
|
// src/ui/get-tool-invocations.ts
|
957
1099
|
function getToolInvocations(message) {
|
958
1100
|
return message.parts.filter(
|
@@ -963,12 +1105,10 @@ function getToolInvocations(message) {
|
|
963
1105
|
// src/ui/process-ui-message-stream.ts
|
964
1106
|
function createStreamingUIMessageState({
|
965
1107
|
lastMessage,
|
966
|
-
newMessageId = "
|
1108
|
+
newMessageId = ""
|
967
1109
|
} = {}) {
|
968
|
-
var _a17;
|
969
1110
|
const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
|
970
|
-
const
|
971
|
-
const message = isContinuation ? structuredClone(lastMessage) : {
|
1111
|
+
const message = isContinuation ? lastMessage : {
|
972
1112
|
id: newMessageId,
|
973
1113
|
metadata: {},
|
974
1114
|
role: "assistant",
|
@@ -978,8 +1118,7 @@ function createStreamingUIMessageState({
|
|
978
1118
|
message,
|
979
1119
|
activeTextPart: void 0,
|
980
1120
|
activeReasoningPart: void 0,
|
981
|
-
partialToolCalls: {}
|
982
|
-
step
|
1121
|
+
partialToolCalls: {}
|
983
1122
|
};
|
984
1123
|
}
|
985
1124
|
function processUIMessageStream({
|
@@ -1062,16 +1201,13 @@ function processUIMessageStream({
|
|
1062
1201
|
write();
|
1063
1202
|
break;
|
1064
1203
|
}
|
1065
|
-
case "source": {
|
1204
|
+
case "source-url": {
|
1066
1205
|
state.message.parts.push({
|
1067
|
-
type: "source",
|
1068
|
-
|
1069
|
-
|
1070
|
-
|
1071
|
-
|
1072
|
-
title: part.title,
|
1073
|
-
providerMetadata: part.providerMetadata
|
1074
|
-
}
|
1206
|
+
type: "source-url",
|
1207
|
+
sourceId: part.sourceId,
|
1208
|
+
url: part.url,
|
1209
|
+
title: part.title,
|
1210
|
+
providerMetadata: part.providerMetadata
|
1075
1211
|
});
|
1076
1212
|
write();
|
1077
1213
|
break;
|
@@ -1080,13 +1216,11 @@ function processUIMessageStream({
|
|
1080
1216
|
const toolInvocations = getToolInvocations(state.message);
|
1081
1217
|
state.partialToolCalls[part.toolCallId] = {
|
1082
1218
|
text: "",
|
1083
|
-
step: state.step,
|
1084
1219
|
toolName: part.toolName,
|
1085
1220
|
index: toolInvocations.length
|
1086
1221
|
};
|
1087
1222
|
updateToolInvocationPart(part.toolCallId, {
|
1088
1223
|
state: "partial-call",
|
1089
|
-
step: state.step,
|
1090
1224
|
toolCallId: part.toolCallId,
|
1091
1225
|
toolName: part.toolName,
|
1092
1226
|
args: void 0
|
@@ -1102,7 +1236,6 @@ function processUIMessageStream({
|
|
1102
1236
|
);
|
1103
1237
|
updateToolInvocationPart(part.toolCallId, {
|
1104
1238
|
state: "partial-call",
|
1105
|
-
step: partialToolCall.step,
|
1106
1239
|
toolCallId: part.toolCallId,
|
1107
1240
|
toolName: partialToolCall.toolName,
|
1108
1241
|
args: partialArgs
|
@@ -1113,7 +1246,6 @@ function processUIMessageStream({
|
|
1113
1246
|
case "tool-call": {
|
1114
1247
|
updateToolInvocationPart(part.toolCallId, {
|
1115
1248
|
state: "call",
|
1116
|
-
step: state.step,
|
1117
1249
|
toolCallId: part.toolCallId,
|
1118
1250
|
toolName: part.toolName,
|
1119
1251
|
args: part.args
|
@@ -1126,7 +1258,6 @@ function processUIMessageStream({
|
|
1126
1258
|
if (result != null) {
|
1127
1259
|
updateToolInvocationPart(part.toolCallId, {
|
1128
1260
|
state: "result",
|
1129
|
-
step: state.step,
|
1130
1261
|
toolCallId: part.toolCallId,
|
1131
1262
|
toolName: part.toolName,
|
1132
1263
|
args: part.args,
|
@@ -1165,7 +1296,6 @@ function processUIMessageStream({
|
|
1165
1296
|
break;
|
1166
1297
|
}
|
1167
1298
|
case "finish-step": {
|
1168
|
-
state.step += 1;
|
1169
1299
|
state.activeTextPart = void 0;
|
1170
1300
|
state.activeReasoningPart = void 0;
|
1171
1301
|
await updateMessageMetadata(part.metadata);
|
@@ -1207,14 +1337,7 @@ function processUIMessageStream({
|
|
1207
1337
|
(partArg) => part.type === partArg.type && part.id === partArg.id
|
1208
1338
|
) : void 0;
|
1209
1339
|
if (existingPart != null) {
|
1210
|
-
|
1211
|
-
existingPart.value = mergeObjects(
|
1212
|
-
existingPart.data,
|
1213
|
-
part.data
|
1214
|
-
);
|
1215
|
-
} else {
|
1216
|
-
existingPart.data = part.data;
|
1217
|
-
}
|
1340
|
+
existingPart.data = isObject(existingPart.data) && isObject(part.data) ? mergeObjects(existingPart.data, part.data) : part.data;
|
1218
1341
|
} else {
|
1219
1342
|
state.message.parts.push(part);
|
1220
1343
|
}
|
@@ -1235,47 +1358,62 @@ function isObject(value) {
|
|
1235
1358
|
return typeof value === "object" && value !== null;
|
1236
1359
|
}
|
1237
1360
|
|
1238
|
-
// src/ui/
|
1239
|
-
function
|
1240
|
-
|
1361
|
+
// src/ui/should-resubmit-messages.ts
|
1362
|
+
function shouldResubmitMessages({
|
1363
|
+
originalMaxToolInvocationStep,
|
1364
|
+
originalMessageCount,
|
1365
|
+
maxSteps,
|
1366
|
+
messages
|
1241
1367
|
}) {
|
1242
|
-
|
1243
|
-
|
1244
|
-
|
1245
|
-
|
1246
|
-
|
1247
|
-
|
1248
|
-
|
1249
|
-
|
1250
|
-
|
1251
|
-
|
1252
|
-
|
1253
|
-
controller.enqueue({ type: "finish" });
|
1254
|
-
}
|
1255
|
-
})
|
1368
|
+
const lastMessage = messages[messages.length - 1];
|
1369
|
+
const lastMessageStepStartCount = lastMessage.parts.filter(
|
1370
|
+
(part) => part.type === "step-start"
|
1371
|
+
).length;
|
1372
|
+
return (
|
1373
|
+
// check if the feature is enabled:
|
1374
|
+
maxSteps > 1 && // ensure there is a last message:
|
1375
|
+
lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
|
1376
|
+
(messages.length > originalMessageCount || lastMessageStepStartCount !== originalMaxToolInvocationStep) && // check that next step is possible:
|
1377
|
+
isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
|
1378
|
+
lastMessageStepStartCount < maxSteps
|
1256
1379
|
);
|
1257
1380
|
}
|
1381
|
+
function isAssistantMessageWithCompletedToolCalls(message) {
|
1382
|
+
if (!message) {
|
1383
|
+
return false;
|
1384
|
+
}
|
1385
|
+
if (message.role !== "assistant") {
|
1386
|
+
return false;
|
1387
|
+
}
|
1388
|
+
const lastStepStartIndex = message.parts.reduce((lastIndex, part, index) => {
|
1389
|
+
return part.type === "step-start" ? index : lastIndex;
|
1390
|
+
}, -1);
|
1391
|
+
const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter((part) => part.type === "tool-invocation");
|
1392
|
+
return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every((part) => "result" in part.toolInvocation);
|
1393
|
+
}
|
1258
1394
|
|
1259
|
-
// src/ui/
|
1260
|
-
|
1395
|
+
// src/ui/default-chat-transport.ts
|
1396
|
+
import {
|
1397
|
+
parseJsonEventStream as parseJsonEventStream2
|
1398
|
+
} from "@ai-sdk/provider-utils";
|
1399
|
+
var getOriginalFetch2 = () => fetch;
|
1261
1400
|
async function fetchUIMessageStream({
|
1262
1401
|
api,
|
1263
1402
|
body,
|
1264
|
-
streamProtocol = "ui-message",
|
1265
1403
|
credentials,
|
1266
1404
|
headers,
|
1267
|
-
|
1268
|
-
fetch: fetch2 =
|
1405
|
+
abortSignal,
|
1406
|
+
fetch: fetch2 = getOriginalFetch2(),
|
1269
1407
|
requestType = "generate"
|
1270
1408
|
}) {
|
1271
|
-
var _a17
|
1272
|
-
const response = requestType === "resume" ? await fetch2(`${api}?
|
1409
|
+
var _a17;
|
1410
|
+
const response = requestType === "resume" ? await fetch2(`${api}?id=${body.id}`, {
|
1273
1411
|
method: "GET",
|
1274
1412
|
headers: {
|
1275
1413
|
"Content-Type": "application/json",
|
1276
1414
|
...headers
|
1277
1415
|
},
|
1278
|
-
signal:
|
1416
|
+
signal: abortSignal,
|
1279
1417
|
credentials
|
1280
1418
|
}) : await fetch2(api, {
|
1281
1419
|
method: "POST",
|
@@ -1284,20 +1422,18 @@ async function fetchUIMessageStream({
|
|
1284
1422
|
"Content-Type": "application/json",
|
1285
1423
|
...headers
|
1286
1424
|
},
|
1287
|
-
signal:
|
1425
|
+
signal: abortSignal,
|
1288
1426
|
credentials
|
1289
1427
|
});
|
1290
1428
|
if (!response.ok) {
|
1291
1429
|
throw new Error(
|
1292
|
-
(
|
1430
|
+
(_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
|
1293
1431
|
);
|
1294
1432
|
}
|
1295
1433
|
if (!response.body) {
|
1296
1434
|
throw new Error("The response body is empty.");
|
1297
1435
|
}
|
1298
|
-
return
|
1299
|
-
stream: response.body.pipeThrough(new TextDecoderStream())
|
1300
|
-
}) : parseJsonEventStream({
|
1436
|
+
return parseJsonEventStream2({
|
1301
1437
|
stream: response.body,
|
1302
1438
|
schema: uiMessageStreamPartSchema
|
1303
1439
|
}).pipeThrough(
|
@@ -1311,554 +1447,291 @@ async function fetchUIMessageStream({
|
|
1311
1447
|
})
|
1312
1448
|
);
|
1313
1449
|
}
|
1314
|
-
|
1315
|
-
|
1316
|
-
|
1317
|
-
onFinish,
|
1318
|
-
onToolCall,
|
1319
|
-
generateId: generateId3,
|
1320
|
-
lastMessage,
|
1321
|
-
messageMetadataSchema
|
1322
|
-
}) {
|
1323
|
-
const state = createStreamingUIMessageState({
|
1324
|
-
lastMessage,
|
1325
|
-
newMessageId: generateId3()
|
1326
|
-
});
|
1327
|
-
const runUpdateMessageJob = async (job) => {
|
1328
|
-
await job({
|
1329
|
-
state,
|
1330
|
-
write: () => {
|
1331
|
-
onUpdate({ message: state.message });
|
1332
|
-
}
|
1333
|
-
});
|
1334
|
-
};
|
1335
|
-
await consumeStream({
|
1336
|
-
stream: processUIMessageStream({
|
1337
|
-
stream,
|
1338
|
-
onToolCall,
|
1339
|
-
messageMetadataSchema,
|
1340
|
-
runUpdateMessageJob
|
1341
|
-
}),
|
1342
|
-
onError: (error) => {
|
1343
|
-
throw error;
|
1344
|
-
}
|
1345
|
-
});
|
1346
|
-
onFinish == null ? void 0 : onFinish({ message: state.message });
|
1347
|
-
}
|
1348
|
-
async function callChatApi({
|
1349
|
-
api,
|
1350
|
-
body,
|
1351
|
-
streamProtocol = "ui-message",
|
1352
|
-
credentials,
|
1353
|
-
headers,
|
1354
|
-
abortController,
|
1355
|
-
onUpdate,
|
1356
|
-
onFinish,
|
1357
|
-
onToolCall,
|
1358
|
-
generateId: generateId3,
|
1359
|
-
fetch: fetch2 = getOriginalFetch(),
|
1360
|
-
lastMessage,
|
1361
|
-
requestType = "generate",
|
1362
|
-
messageMetadataSchema
|
1363
|
-
}) {
|
1364
|
-
const stream = await fetchUIMessageStream({
|
1365
|
-
api,
|
1366
|
-
body,
|
1367
|
-
streamProtocol,
|
1450
|
+
var DefaultChatTransport = class {
|
1451
|
+
constructor({
|
1452
|
+
api = "/api/chat",
|
1368
1453
|
credentials,
|
1369
1454
|
headers,
|
1370
|
-
|
1455
|
+
body,
|
1371
1456
|
fetch: fetch2,
|
1372
|
-
|
1373
|
-
})
|
1374
|
-
|
1375
|
-
|
1376
|
-
|
1377
|
-
|
1378
|
-
|
1379
|
-
|
1380
|
-
lastMessage,
|
1381
|
-
messageMetadataSchema
|
1382
|
-
});
|
1383
|
-
}
|
1384
|
-
|
1385
|
-
// src/ui/call-completion-api.ts
|
1386
|
-
import { parseJsonEventStream as parseJsonEventStream2 } from "@ai-sdk/provider-utils";
|
1387
|
-
|
1388
|
-
// src/ui/process-text-stream.ts
|
1389
|
-
async function processTextStream({
|
1390
|
-
stream,
|
1391
|
-
onTextPart
|
1392
|
-
}) {
|
1393
|
-
const reader = stream.pipeThrough(new TextDecoderStream()).getReader();
|
1394
|
-
while (true) {
|
1395
|
-
const { done, value } = await reader.read();
|
1396
|
-
if (done) {
|
1397
|
-
break;
|
1398
|
-
}
|
1399
|
-
await onTextPart(value);
|
1457
|
+
prepareRequest
|
1458
|
+
} = {}) {
|
1459
|
+
this.api = api;
|
1460
|
+
this.credentials = credentials;
|
1461
|
+
this.headers = headers;
|
1462
|
+
this.body = body;
|
1463
|
+
this.fetch = fetch2;
|
1464
|
+
this.prepareRequest = prepareRequest;
|
1400
1465
|
}
|
1401
|
-
|
1402
|
-
|
1403
|
-
|
1404
|
-
|
1405
|
-
|
1406
|
-
|
1407
|
-
|
1408
|
-
|
1409
|
-
|
1410
|
-
|
1411
|
-
|
1412
|
-
|
1413
|
-
|
1414
|
-
|
1415
|
-
|
1416
|
-
|
1417
|
-
|
1418
|
-
fetch: fetch2 = getOriginalFetch2()
|
1419
|
-
}) {
|
1420
|
-
var _a17;
|
1421
|
-
try {
|
1422
|
-
setLoading(true);
|
1423
|
-
setError(void 0);
|
1424
|
-
const abortController = new AbortController();
|
1425
|
-
setAbortController(abortController);
|
1426
|
-
setCompletion("");
|
1427
|
-
const response = await fetch2(api, {
|
1428
|
-
method: "POST",
|
1429
|
-
body: JSON.stringify({
|
1430
|
-
prompt,
|
1431
|
-
...body
|
1432
|
-
}),
|
1433
|
-
credentials,
|
1434
|
-
headers: {
|
1435
|
-
"Content-Type": "application/json",
|
1436
|
-
...headers
|
1437
|
-
},
|
1438
|
-
signal: abortController.signal
|
1439
|
-
}).catch((err) => {
|
1440
|
-
throw err;
|
1466
|
+
submitMessages({
|
1467
|
+
chatId,
|
1468
|
+
messages,
|
1469
|
+
abortSignal,
|
1470
|
+
metadata,
|
1471
|
+
headers,
|
1472
|
+
body,
|
1473
|
+
requestType
|
1474
|
+
}) {
|
1475
|
+
var _a17, _b;
|
1476
|
+
const preparedRequest = (_a17 = this.prepareRequest) == null ? void 0 : _a17.call(this, {
|
1477
|
+
id: chatId,
|
1478
|
+
messages,
|
1479
|
+
body: { ...this.body, ...body },
|
1480
|
+
headers: { ...this.headers, ...headers },
|
1481
|
+
credentials: this.credentials,
|
1482
|
+
requestMetadata: metadata
|
1441
1483
|
});
|
1442
|
-
|
1443
|
-
|
1444
|
-
|
1445
|
-
)
|
1446
|
-
|
1447
|
-
|
1448
|
-
|
1449
|
-
|
1450
|
-
let result = "";
|
1451
|
-
switch (streamProtocol) {
|
1452
|
-
case "text": {
|
1453
|
-
await processTextStream({
|
1454
|
-
stream: response.body,
|
1455
|
-
onTextPart: (chunk) => {
|
1456
|
-
result += chunk;
|
1457
|
-
setCompletion(result);
|
1458
|
-
}
|
1459
|
-
});
|
1460
|
-
break;
|
1461
|
-
}
|
1462
|
-
case "data": {
|
1463
|
-
await consumeStream({
|
1464
|
-
stream: parseJsonEventStream2({
|
1465
|
-
stream: response.body,
|
1466
|
-
schema: uiMessageStreamPartSchema
|
1467
|
-
}).pipeThrough(
|
1468
|
-
new TransformStream({
|
1469
|
-
async transform(part) {
|
1470
|
-
if (!part.success) {
|
1471
|
-
throw part.error;
|
1472
|
-
}
|
1473
|
-
const streamPart = part.value;
|
1474
|
-
if (streamPart.type === "text") {
|
1475
|
-
result += streamPart.text;
|
1476
|
-
setCompletion(result);
|
1477
|
-
} else if (streamPart.type === "error") {
|
1478
|
-
throw new Error(streamPart.errorText);
|
1479
|
-
}
|
1480
|
-
}
|
1481
|
-
})
|
1482
|
-
),
|
1483
|
-
onError: (error) => {
|
1484
|
-
throw error;
|
1485
|
-
}
|
1486
|
-
});
|
1487
|
-
break;
|
1488
|
-
}
|
1489
|
-
default: {
|
1490
|
-
const exhaustiveCheck = streamProtocol;
|
1491
|
-
throw new Error(`Unknown stream protocol: ${exhaustiveCheck}`);
|
1492
|
-
}
|
1493
|
-
}
|
1494
|
-
if (onFinish) {
|
1495
|
-
onFinish(prompt, result);
|
1496
|
-
}
|
1497
|
-
setAbortController(null);
|
1498
|
-
return result;
|
1499
|
-
} catch (err) {
|
1500
|
-
if (err.name === "AbortError") {
|
1501
|
-
setAbortController(null);
|
1502
|
-
return null;
|
1503
|
-
}
|
1504
|
-
if (err instanceof Error) {
|
1505
|
-
if (onError) {
|
1506
|
-
onError(err);
|
1507
|
-
}
|
1508
|
-
}
|
1509
|
-
setError(err);
|
1510
|
-
} finally {
|
1511
|
-
setLoading(false);
|
1512
|
-
}
|
1513
|
-
}
|
1514
|
-
|
1515
|
-
// src/ui/chat-store.ts
|
1516
|
-
import {
|
1517
|
-
generateId as generateIdFunc
|
1518
|
-
} from "@ai-sdk/provider-utils";
|
1519
|
-
|
1520
|
-
// src/util/serial-job-executor.ts
|
1521
|
-
var SerialJobExecutor = class {
|
1522
|
-
constructor() {
|
1523
|
-
this.queue = [];
|
1524
|
-
this.isProcessing = false;
|
1525
|
-
}
|
1526
|
-
async processQueue() {
|
1527
|
-
if (this.isProcessing) {
|
1528
|
-
return;
|
1529
|
-
}
|
1530
|
-
this.isProcessing = true;
|
1531
|
-
while (this.queue.length > 0) {
|
1532
|
-
await this.queue[0]();
|
1533
|
-
this.queue.shift();
|
1534
|
-
}
|
1535
|
-
this.isProcessing = false;
|
1536
|
-
}
|
1537
|
-
async run(job) {
|
1538
|
-
return new Promise((resolve, reject) => {
|
1539
|
-
this.queue.push(async () => {
|
1540
|
-
try {
|
1541
|
-
await job();
|
1542
|
-
resolve();
|
1543
|
-
} catch (error) {
|
1544
|
-
reject(error);
|
1545
|
-
}
|
1546
|
-
});
|
1547
|
-
void this.processQueue();
|
1484
|
+
return fetchUIMessageStream({
|
1485
|
+
api: this.api,
|
1486
|
+
body: (preparedRequest == null ? void 0 : preparedRequest.body) !== void 0 ? preparedRequest.body : { ...this.body, ...body, id: chatId, messages },
|
1487
|
+
headers: (preparedRequest == null ? void 0 : preparedRequest.headers) !== void 0 ? preparedRequest.headers : { ...this.headers, ...headers },
|
1488
|
+
credentials: (_b = preparedRequest == null ? void 0 : preparedRequest.credentials) != null ? _b : this.credentials,
|
1489
|
+
abortSignal,
|
1490
|
+
fetch: this.fetch,
|
1491
|
+
requestType
|
1548
1492
|
});
|
1549
1493
|
}
|
1550
1494
|
};
|
1551
1495
|
|
1552
|
-
// src/ui/
|
1553
|
-
function
|
1554
|
-
|
1555
|
-
|
1556
|
-
maxSteps: maxSteps2,
|
1557
|
-
messages
|
1558
|
-
}) {
|
1559
|
-
var _a17;
|
1560
|
-
const lastMessage = messages[messages.length - 1];
|
1561
|
-
return (
|
1562
|
-
// check if the feature is enabled:
|
1563
|
-
maxSteps2 > 1 && // ensure there is a last message:
|
1564
|
-
lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
|
1565
|
-
(messages.length > originalMessageCount || extractMaxToolInvocationStep(getToolInvocations(lastMessage)) !== originalMaxToolInvocationStep) && // check that next step is possible:
|
1566
|
-
isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
|
1567
|
-
((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) < maxSteps2
|
1568
|
-
);
|
1569
|
-
}
|
1570
|
-
function isAssistantMessageWithCompletedToolCalls(message) {
|
1571
|
-
if (message.role !== "assistant") {
|
1572
|
-
return false;
|
1496
|
+
// src/ui/convert-file-list-to-file-ui-parts.ts
|
1497
|
+
async function convertFileListToFileUIParts(files) {
|
1498
|
+
if (files == null) {
|
1499
|
+
return [];
|
1573
1500
|
}
|
1574
|
-
|
1575
|
-
|
1576
|
-
}, -1);
|
1577
|
-
const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter((part) => part.type === "tool-invocation");
|
1578
|
-
return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every((part) => "result" in part.toolInvocation);
|
1579
|
-
}
|
1580
|
-
|
1581
|
-
// src/ui/update-tool-call-result.ts
|
1582
|
-
function updateToolCallResult({
|
1583
|
-
messages,
|
1584
|
-
toolCallId,
|
1585
|
-
toolResult: result
|
1586
|
-
}) {
|
1587
|
-
const lastMessage = messages[messages.length - 1];
|
1588
|
-
const invocationPart = lastMessage.parts.find(
|
1589
|
-
(part) => part.type === "tool-invocation" && part.toolInvocation.toolCallId === toolCallId
|
1590
|
-
);
|
1591
|
-
if (invocationPart == null) {
|
1592
|
-
return;
|
1501
|
+
if (!globalThis.FileList || !(files instanceof globalThis.FileList)) {
|
1502
|
+
throw new Error("FileList is not supported in the current environment");
|
1593
1503
|
}
|
1594
|
-
|
1595
|
-
|
1596
|
-
|
1597
|
-
|
1598
|
-
|
1504
|
+
return Promise.all(
|
1505
|
+
Array.from(files).map(async (file) => {
|
1506
|
+
const { name: name17, type } = file;
|
1507
|
+
const dataUrl = await new Promise((resolve, reject) => {
|
1508
|
+
const reader = new FileReader();
|
1509
|
+
reader.onload = (readerEvent) => {
|
1510
|
+
var _a17;
|
1511
|
+
resolve((_a17 = readerEvent.target) == null ? void 0 : _a17.result);
|
1512
|
+
};
|
1513
|
+
reader.onerror = (error) => reject(error);
|
1514
|
+
reader.readAsDataURL(file);
|
1515
|
+
});
|
1516
|
+
return {
|
1517
|
+
type: "file",
|
1518
|
+
mediaType: type,
|
1519
|
+
filename: name17,
|
1520
|
+
url: dataUrl
|
1521
|
+
};
|
1522
|
+
})
|
1523
|
+
);
|
1599
1524
|
}
|
1600
1525
|
|
1601
|
-
// src/ui/chat
|
1602
|
-
var
|
1526
|
+
// src/ui/chat.ts
|
1527
|
+
var AbstractChat = class {
|
1603
1528
|
constructor({
|
1604
|
-
|
1605
|
-
|
1606
|
-
transport,
|
1607
|
-
maxSteps
|
1529
|
+
generateId: generateId3 = generateIdFunc,
|
1530
|
+
id = generateId3(),
|
1531
|
+
transport = new DefaultChatTransport(),
|
1532
|
+
maxSteps = 1,
|
1608
1533
|
messageMetadataSchema,
|
1609
|
-
dataPartSchemas
|
1610
|
-
|
1611
|
-
|
1612
|
-
|
1613
|
-
|
1614
|
-
{
|
1615
|
-
messages: [...state.messages],
|
1616
|
-
status: "ready",
|
1617
|
-
activeResponse: void 0,
|
1618
|
-
error: void 0,
|
1619
|
-
jobExecutor: new SerialJobExecutor()
|
1620
|
-
}
|
1621
|
-
])
|
1622
|
-
);
|
1623
|
-
this.maxSteps = maxSteps2;
|
1624
|
-
this.transport = transport;
|
1625
|
-
this.subscribers = /* @__PURE__ */ new Set();
|
1626
|
-
this.generateId = generateId3 != null ? generateId3 : generateIdFunc;
|
1627
|
-
this.messageMetadataSchema = messageMetadataSchema;
|
1628
|
-
this.dataPartSchemas = dataPartSchemas;
|
1629
|
-
}
|
1630
|
-
hasChat(id) {
|
1631
|
-
return this.chats.has(id);
|
1632
|
-
}
|
1633
|
-
addChat(id, messages) {
|
1634
|
-
this.chats.set(id, {
|
1635
|
-
messages,
|
1636
|
-
status: "ready",
|
1637
|
-
jobExecutor: new SerialJobExecutor()
|
1638
|
-
});
|
1639
|
-
}
|
1640
|
-
getChats() {
|
1641
|
-
return Array.from(this.chats.entries());
|
1642
|
-
}
|
1643
|
-
get chatCount() {
|
1644
|
-
return this.chats.size;
|
1645
|
-
}
|
1646
|
-
getStatus(id) {
|
1647
|
-
return this.getChat(id).status;
|
1648
|
-
}
|
1649
|
-
setStatus({
|
1650
|
-
id,
|
1651
|
-
status,
|
1652
|
-
error
|
1653
|
-
}) {
|
1654
|
-
const chat = this.getChat(id);
|
1655
|
-
if (chat.status === status)
|
1656
|
-
return;
|
1657
|
-
chat.status = status;
|
1658
|
-
chat.error = error;
|
1659
|
-
this.emit({ type: "chat-status-changed", chatId: id, error });
|
1660
|
-
}
|
1661
|
-
getError(id) {
|
1662
|
-
return this.getChat(id).error;
|
1663
|
-
}
|
1664
|
-
getMessages(id) {
|
1665
|
-
return this.getChat(id).messages;
|
1666
|
-
}
|
1667
|
-
getLastMessage(id) {
|
1668
|
-
const chat = this.getChat(id);
|
1669
|
-
return chat.messages[chat.messages.length - 1];
|
1670
|
-
}
|
1671
|
-
subscribe(subscriber) {
|
1672
|
-
this.subscribers.add(subscriber);
|
1673
|
-
return () => this.subscribers.delete(subscriber);
|
1674
|
-
}
|
1675
|
-
setMessages({
|
1676
|
-
id,
|
1677
|
-
messages
|
1678
|
-
}) {
|
1679
|
-
this.getChat(id).messages = [...messages];
|
1680
|
-
this.emit({ type: "chat-messages-changed", chatId: id });
|
1681
|
-
}
|
1682
|
-
removeAssistantResponse(id) {
|
1683
|
-
const chat = this.getChat(id);
|
1684
|
-
const lastMessage = chat.messages[chat.messages.length - 1];
|
1685
|
-
if (lastMessage == null) {
|
1686
|
-
throw new Error("Cannot remove assistant response from empty chat");
|
1687
|
-
}
|
1688
|
-
if (lastMessage.role !== "assistant") {
|
1689
|
-
throw new Error("Last message is not an assistant message");
|
1690
|
-
}
|
1691
|
-
this.setMessages({ id, messages: chat.messages.slice(0, -1) });
|
1692
|
-
}
|
1693
|
-
async submitMessage({
|
1694
|
-
chatId,
|
1695
|
-
message,
|
1696
|
-
headers,
|
1697
|
-
body,
|
1698
|
-
onError,
|
1699
|
-
onToolCall,
|
1700
|
-
onFinish
|
1701
|
-
}) {
|
1702
|
-
var _a17;
|
1703
|
-
const chat = this.getChat(chatId);
|
1704
|
-
const currentMessages = chat.messages;
|
1705
|
-
await this.triggerRequest({
|
1706
|
-
chatId,
|
1707
|
-
messages: currentMessages.concat({
|
1708
|
-
...message,
|
1709
|
-
id: (_a17 = message.id) != null ? _a17 : this.generateId()
|
1710
|
-
}),
|
1711
|
-
headers,
|
1712
|
-
body,
|
1713
|
-
requestType: "generate",
|
1714
|
-
onError,
|
1715
|
-
onToolCall,
|
1716
|
-
onFinish
|
1717
|
-
});
|
1718
|
-
}
|
1719
|
-
async resubmitLastUserMessage({
|
1720
|
-
chatId,
|
1721
|
-
headers,
|
1722
|
-
body,
|
1723
|
-
onError,
|
1724
|
-
onToolCall,
|
1725
|
-
onFinish
|
1726
|
-
}) {
|
1727
|
-
const messages = this.getChat(chatId).messages;
|
1728
|
-
const messagesToSubmit = messages[messages.length - 1].role === "assistant" ? messages.slice(0, -1) : messages;
|
1729
|
-
if (messagesToSubmit.length === 0) {
|
1730
|
-
return;
|
1731
|
-
}
|
1732
|
-
return this.triggerRequest({
|
1733
|
-
chatId,
|
1734
|
-
requestType: "generate",
|
1735
|
-
messages: messagesToSubmit,
|
1736
|
-
headers,
|
1737
|
-
body,
|
1738
|
-
onError,
|
1739
|
-
onToolCall,
|
1740
|
-
onFinish
|
1741
|
-
});
|
1742
|
-
}
|
1743
|
-
async resumeStream({
|
1744
|
-
chatId,
|
1745
|
-
headers,
|
1746
|
-
body,
|
1747
|
-
onError,
|
1748
|
-
onToolCall,
|
1749
|
-
onFinish
|
1750
|
-
}) {
|
1751
|
-
const chat = this.getChat(chatId);
|
1752
|
-
const currentMessages = chat.messages;
|
1753
|
-
return this.triggerRequest({
|
1754
|
-
chatId,
|
1755
|
-
messages: currentMessages,
|
1756
|
-
requestType: "resume",
|
1757
|
-
headers,
|
1758
|
-
body,
|
1759
|
-
onError,
|
1760
|
-
onToolCall,
|
1761
|
-
onFinish
|
1762
|
-
});
|
1763
|
-
}
|
1764
|
-
async addToolResult({
|
1765
|
-
chatId,
|
1766
|
-
toolCallId,
|
1767
|
-
result
|
1534
|
+
dataPartSchemas,
|
1535
|
+
state,
|
1536
|
+
onError,
|
1537
|
+
onToolCall,
|
1538
|
+
onFinish
|
1768
1539
|
}) {
|
1769
|
-
|
1770
|
-
|
1771
|
-
|
1772
|
-
|
1773
|
-
|
1774
|
-
|
1775
|
-
|
1540
|
+
this.subscribers = /* @__PURE__ */ new Set();
|
1541
|
+
this.activeResponse = void 0;
|
1542
|
+
this.jobExecutor = new SerialJobExecutor();
|
1543
|
+
this.removeAssistantResponse = () => {
|
1544
|
+
const lastMessage = this.state.messages[this.state.messages.length - 1];
|
1545
|
+
if (lastMessage == null) {
|
1546
|
+
throw new Error("Cannot remove assistant response from empty chat");
|
1547
|
+
}
|
1548
|
+
if (lastMessage.role !== "assistant") {
|
1549
|
+
throw new Error("Last message is not an assistant message");
|
1550
|
+
}
|
1551
|
+
this.state.popMessage();
|
1552
|
+
this.emit({ type: "messages-changed" });
|
1553
|
+
};
|
1554
|
+
/**
|
1555
|
+
* Append a user message to the chat list. This triggers the API call to fetch
|
1556
|
+
* the assistant's response.
|
1557
|
+
*/
|
1558
|
+
this.sendMessage = async (message, options = {}) => {
|
1559
|
+
var _a17, _b;
|
1560
|
+
let uiMessage;
|
1561
|
+
if ("text" in message || "files" in message) {
|
1562
|
+
const fileParts = Array.isArray(message.files) ? message.files : await convertFileListToFileUIParts(message.files);
|
1563
|
+
uiMessage = {
|
1564
|
+
parts: [
|
1565
|
+
...fileParts,
|
1566
|
+
..."text" in message && message.text != null ? [{ type: "text", text: message.text }] : []
|
1567
|
+
]
|
1568
|
+
};
|
1569
|
+
} else {
|
1570
|
+
uiMessage = message;
|
1571
|
+
}
|
1572
|
+
this.state.pushMessage({
|
1573
|
+
...uiMessage,
|
1574
|
+
id: (_a17 = uiMessage.id) != null ? _a17 : this.generateId(),
|
1575
|
+
role: (_b = uiMessage.role) != null ? _b : "user"
|
1776
1576
|
});
|
1777
|
-
this.
|
1778
|
-
|
1577
|
+
this.emit({ type: "messages-changed" });
|
1578
|
+
await this.triggerRequest({ requestType: "generate", ...options });
|
1579
|
+
};
|
1580
|
+
/**
|
1581
|
+
* Regenerate the last assistant message.
|
1582
|
+
*/
|
1583
|
+
this.reload = async (options = {}) => {
|
1584
|
+
if (this.lastMessage === void 0) {
|
1779
1585
|
return;
|
1780
1586
|
}
|
1781
|
-
|
1782
|
-
|
1783
|
-
|
1784
|
-
|
1785
|
-
|
1786
|
-
|
1587
|
+
if (this.lastMessage.role === "assistant") {
|
1588
|
+
this.state.popMessage();
|
1589
|
+
this.emit({ type: "messages-changed" });
|
1590
|
+
}
|
1591
|
+
await this.triggerRequest({ requestType: "generate", ...options });
|
1592
|
+
};
|
1593
|
+
/**
|
1594
|
+
* Resume an ongoing chat generation stream. This does not resume an aborted generation.
|
1595
|
+
*/
|
1596
|
+
this.experimental_resume = async (options = {}) => {
|
1597
|
+
await this.triggerRequest({ requestType: "resume", ...options });
|
1598
|
+
};
|
1599
|
+
this.addToolResult = async ({
|
1600
|
+
toolCallId,
|
1601
|
+
result
|
1602
|
+
}) => {
|
1603
|
+
this.jobExecutor.run(async () => {
|
1604
|
+
updateToolCallResult({
|
1605
|
+
messages: this.state.messages,
|
1606
|
+
toolCallId,
|
1607
|
+
toolResult: result
|
1787
1608
|
});
|
1609
|
+
this.messages = this.state.messages;
|
1610
|
+
if (this.status === "submitted" || this.status === "streaming") {
|
1611
|
+
return;
|
1612
|
+
}
|
1613
|
+
const lastMessage = this.lastMessage;
|
1614
|
+
if (isAssistantMessageWithCompletedToolCalls(lastMessage)) {
|
1615
|
+
this.triggerRequest({
|
1616
|
+
requestType: "generate"
|
1617
|
+
});
|
1618
|
+
}
|
1619
|
+
});
|
1620
|
+
};
|
1621
|
+
/**
|
1622
|
+
* Abort the current request immediately, keep the generated tokens if any.
|
1623
|
+
*/
|
1624
|
+
this.stop = async () => {
|
1625
|
+
var _a17;
|
1626
|
+
if (this.status !== "streaming" && this.status !== "submitted")
|
1627
|
+
return;
|
1628
|
+
if ((_a17 = this.activeResponse) == null ? void 0 : _a17.abortController) {
|
1629
|
+
this.activeResponse.abortController.abort();
|
1630
|
+
this.activeResponse.abortController = void 0;
|
1788
1631
|
}
|
1789
|
-
}
|
1632
|
+
};
|
1633
|
+
this.id = id;
|
1634
|
+
this.maxSteps = maxSteps;
|
1635
|
+
this.transport = transport;
|
1636
|
+
this.generateId = generateId3;
|
1637
|
+
this.messageMetadataSchema = messageMetadataSchema;
|
1638
|
+
this.dataPartSchemas = dataPartSchemas;
|
1639
|
+
this.state = state;
|
1640
|
+
this.onError = onError;
|
1641
|
+
this.onToolCall = onToolCall;
|
1642
|
+
this.onFinish = onFinish;
|
1790
1643
|
}
|
1791
|
-
|
1792
|
-
|
1793
|
-
|
1794
|
-
|
1644
|
+
/**
|
1645
|
+
* Hook status:
|
1646
|
+
*
|
1647
|
+
* - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
|
1648
|
+
* - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
|
1649
|
+
* - `ready`: The full response has been received and processed; a new user message can be submitted.
|
1650
|
+
* - `error`: An error occurred during the API request, preventing successful completion.
|
1651
|
+
*/
|
1652
|
+
get status() {
|
1653
|
+
return this.state.status;
|
1654
|
+
}
|
1655
|
+
setStatus({
|
1656
|
+
status,
|
1657
|
+
error
|
1658
|
+
}) {
|
1659
|
+
if (this.status === status)
|
1795
1660
|
return;
|
1796
|
-
|
1797
|
-
|
1798
|
-
|
1799
|
-
|
1661
|
+
this.state.status = status;
|
1662
|
+
this.state.error = error;
|
1663
|
+
this.emit({ type: "status-changed" });
|
1664
|
+
}
|
1665
|
+
get error() {
|
1666
|
+
return this.state.error;
|
1667
|
+
}
|
1668
|
+
get messages() {
|
1669
|
+
return this.state.messages;
|
1670
|
+
}
|
1671
|
+
get lastMessage() {
|
1672
|
+
return this.state.messages[this.state.messages.length - 1];
|
1673
|
+
}
|
1674
|
+
subscribe(subscriber) {
|
1675
|
+
this.subscribers.add(subscriber);
|
1676
|
+
return () => this.subscribers.delete(subscriber);
|
1677
|
+
}
|
1678
|
+
set messages(messages) {
|
1679
|
+
this.state.messages = messages;
|
1680
|
+
this.emit({ type: "messages-changed" });
|
1800
1681
|
}
|
1801
1682
|
emit(event) {
|
1802
1683
|
for (const subscriber of this.subscribers) {
|
1803
|
-
subscriber.
|
1804
|
-
}
|
1805
|
-
}
|
1806
|
-
getChat(id) {
|
1807
|
-
if (!this.hasChat(id)) {
|
1808
|
-
throw new Error(`chat '${id}' not found`);
|
1684
|
+
subscriber.onChange(event);
|
1809
1685
|
}
|
1810
|
-
return this.chats.get(id);
|
1811
1686
|
}
|
1812
1687
|
async triggerRequest({
|
1813
|
-
chatId,
|
1814
|
-
messages: chatMessages,
|
1815
1688
|
requestType,
|
1689
|
+
metadata,
|
1816
1690
|
headers,
|
1817
|
-
body
|
1818
|
-
onError,
|
1819
|
-
onToolCall,
|
1820
|
-
onFinish
|
1691
|
+
body
|
1821
1692
|
}) {
|
1822
|
-
|
1823
|
-
|
1824
|
-
|
1825
|
-
|
1826
|
-
const
|
1827
|
-
const maxStep = extractMaxToolInvocationStep(
|
1828
|
-
getToolInvocations(chatMessages[chatMessages.length - 1])
|
1829
|
-
);
|
1693
|
+
var _a17, _b;
|
1694
|
+
this.setStatus({ status: "submitted", error: void 0 });
|
1695
|
+
const messageCount = this.state.messages.length;
|
1696
|
+
const lastMessage = this.lastMessage;
|
1697
|
+
const maxStep = (_a17 = lastMessage == null ? void 0 : lastMessage.parts.filter((part) => part.type === "step-start").length) != null ? _a17 : 0;
|
1830
1698
|
try {
|
1831
1699
|
const activeResponse = {
|
1832
1700
|
state: createStreamingUIMessageState({
|
1833
|
-
lastMessage:
|
1834
|
-
newMessageId:
|
1701
|
+
lastMessage: this.state.snapshot(lastMessage),
|
1702
|
+
newMessageId: this.generateId()
|
1835
1703
|
}),
|
1836
1704
|
abortController: new AbortController()
|
1837
1705
|
};
|
1838
|
-
|
1839
|
-
const stream = await
|
1840
|
-
chatId,
|
1841
|
-
messages:
|
1842
|
-
|
1706
|
+
this.activeResponse = activeResponse;
|
1707
|
+
const stream = await this.transport.submitMessages({
|
1708
|
+
chatId: this.id,
|
1709
|
+
messages: this.state.messages,
|
1710
|
+
abortSignal: activeResponse.abortController.signal,
|
1711
|
+
metadata,
|
1843
1712
|
headers,
|
1844
|
-
|
1713
|
+
body,
|
1845
1714
|
requestType
|
1846
1715
|
});
|
1847
1716
|
const runUpdateMessageJob = (job) => (
|
1848
1717
|
// serialize the job execution to avoid race conditions:
|
1849
|
-
|
1718
|
+
this.jobExecutor.run(
|
1850
1719
|
() => job({
|
1851
1720
|
state: activeResponse.state,
|
1852
1721
|
write: () => {
|
1853
|
-
|
1854
|
-
|
1855
|
-
const
|
1856
|
-
|
1857
|
-
|
1858
|
-
|
1859
|
-
|
1860
|
-
|
1861
|
-
|
1722
|
+
var _a18;
|
1723
|
+
this.setStatus({ status: "streaming" });
|
1724
|
+
const replaceLastMessage = activeResponse.state.message.id === ((_a18 = this.lastMessage) == null ? void 0 : _a18.id);
|
1725
|
+
if (replaceLastMessage) {
|
1726
|
+
this.state.replaceMessage(
|
1727
|
+
this.state.messages.length - 1,
|
1728
|
+
activeResponse.state.message
|
1729
|
+
);
|
1730
|
+
} else {
|
1731
|
+
this.state.pushMessage(activeResponse.state.message);
|
1732
|
+
}
|
1733
|
+
this.emit({
|
1734
|
+
type: "messages-changed"
|
1862
1735
|
});
|
1863
1736
|
}
|
1864
1737
|
})
|
@@ -1867,137 +1740,67 @@ var ChatStore = class {
|
|
1867
1740
|
await consumeStream({
|
1868
1741
|
stream: processUIMessageStream({
|
1869
1742
|
stream,
|
1870
|
-
onToolCall,
|
1871
|
-
messageMetadataSchema:
|
1872
|
-
dataPartSchemas:
|
1743
|
+
onToolCall: this.onToolCall,
|
1744
|
+
messageMetadataSchema: this.messageMetadataSchema,
|
1745
|
+
dataPartSchemas: this.dataPartSchemas,
|
1873
1746
|
runUpdateMessageJob
|
1874
1747
|
}),
|
1875
1748
|
onError: (error) => {
|
1876
1749
|
throw error;
|
1877
1750
|
}
|
1878
1751
|
});
|
1879
|
-
onFinish == null ? void 0 :
|
1880
|
-
this.setStatus({
|
1752
|
+
(_b = this.onFinish) == null ? void 0 : _b.call(this, { message: activeResponse.state.message });
|
1753
|
+
this.setStatus({ status: "ready" });
|
1881
1754
|
} catch (err) {
|
1755
|
+
console.error(err);
|
1882
1756
|
if (err.name === "AbortError") {
|
1883
|
-
this.setStatus({
|
1757
|
+
this.setStatus({ status: "ready" });
|
1884
1758
|
return null;
|
1885
1759
|
}
|
1886
|
-
if (onError && err instanceof Error) {
|
1887
|
-
onError(err);
|
1760
|
+
if (this.onError && err instanceof Error) {
|
1761
|
+
this.onError(err);
|
1888
1762
|
}
|
1889
|
-
this.setStatus({
|
1763
|
+
this.setStatus({ status: "error", error: err });
|
1890
1764
|
} finally {
|
1891
|
-
|
1765
|
+
this.activeResponse = void 0;
|
1892
1766
|
}
|
1893
|
-
const currentMessages = self.getMessages(chatId);
|
1894
1767
|
if (shouldResubmitMessages({
|
1895
1768
|
originalMaxToolInvocationStep: maxStep,
|
1896
1769
|
originalMessageCount: messageCount,
|
1897
|
-
maxSteps:
|
1898
|
-
messages:
|
1770
|
+
maxSteps: this.maxSteps,
|
1771
|
+
messages: this.state.messages
|
1899
1772
|
})) {
|
1900
|
-
await
|
1901
|
-
chatId,
|
1773
|
+
await this.triggerRequest({
|
1902
1774
|
requestType,
|
1903
|
-
|
1904
|
-
onToolCall,
|
1905
|
-
onFinish,
|
1775
|
+
metadata,
|
1906
1776
|
headers,
|
1907
|
-
body
|
1908
|
-
messages: currentMessages
|
1777
|
+
body
|
1909
1778
|
});
|
1910
1779
|
}
|
1911
1780
|
}
|
1912
1781
|
};
|
1913
|
-
|
1914
|
-
|
1915
|
-
|
1916
|
-
|
1917
|
-
|
1918
|
-
|
1919
|
-
|
1920
|
-
|
1921
|
-
streamProtocol,
|
1922
|
-
fetch: fetch2,
|
1923
|
-
prepareRequestBody
|
1924
|
-
}) {
|
1925
|
-
this.api = api;
|
1926
|
-
this.credentials = credentials;
|
1927
|
-
this.headers = headers;
|
1928
|
-
this.body = body;
|
1929
|
-
this.streamProtocol = streamProtocol;
|
1930
|
-
this.fetch = fetch2;
|
1931
|
-
this.prepareRequestBody = prepareRequestBody;
|
1932
|
-
}
|
1933
|
-
submitMessages({
|
1934
|
-
chatId,
|
1935
|
-
messages,
|
1936
|
-
abortController,
|
1937
|
-
body,
|
1938
|
-
headers,
|
1939
|
-
requestType
|
1940
|
-
}) {
|
1941
|
-
var _a17, _b;
|
1942
|
-
return fetchUIMessageStream({
|
1943
|
-
api: this.api,
|
1944
|
-
headers: {
|
1945
|
-
...this.headers,
|
1946
|
-
...headers
|
1947
|
-
},
|
1948
|
-
body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
|
1949
|
-
chatId,
|
1950
|
-
messages,
|
1951
|
-
...this.body,
|
1952
|
-
...body
|
1953
|
-
})) != null ? _b : {
|
1954
|
-
chatId,
|
1955
|
-
messages,
|
1956
|
-
...this.body,
|
1957
|
-
...body
|
1958
|
-
},
|
1959
|
-
streamProtocol: this.streamProtocol,
|
1960
|
-
credentials: this.credentials,
|
1961
|
-
abortController: () => abortController,
|
1962
|
-
fetch: this.fetch,
|
1963
|
-
requestType
|
1964
|
-
});
|
1965
|
-
}
|
1966
|
-
};
|
1967
|
-
|
1968
|
-
// src/ui/convert-file-list-to-file-ui-parts.ts
|
1969
|
-
async function convertFileListToFileUIParts(files) {
|
1970
|
-
if (files == null) {
|
1971
|
-
return [];
|
1972
|
-
}
|
1973
|
-
if (!globalThis.FileList || !(files instanceof globalThis.FileList)) {
|
1974
|
-
throw new Error("FileList is not supported in the current environment");
|
1975
|
-
}
|
1976
|
-
return Promise.all(
|
1977
|
-
Array.from(files).map(async (file) => {
|
1978
|
-
const { name: name17, type } = file;
|
1979
|
-
const dataUrl = await new Promise((resolve, reject) => {
|
1980
|
-
const reader = new FileReader();
|
1981
|
-
reader.onload = (readerEvent) => {
|
1982
|
-
var _a17;
|
1983
|
-
resolve((_a17 = readerEvent.target) == null ? void 0 : _a17.result);
|
1984
|
-
};
|
1985
|
-
reader.onerror = (error) => reject(error);
|
1986
|
-
reader.readAsDataURL(file);
|
1987
|
-
});
|
1988
|
-
return {
|
1989
|
-
type: "file",
|
1990
|
-
mediaType: type,
|
1991
|
-
filename: name17,
|
1992
|
-
url: dataUrl
|
1993
|
-
};
|
1994
|
-
})
|
1782
|
+
function updateToolCallResult({
|
1783
|
+
messages,
|
1784
|
+
toolCallId,
|
1785
|
+
toolResult: result
|
1786
|
+
}) {
|
1787
|
+
const lastMessage = messages[messages.length - 1];
|
1788
|
+
const invocationPart = lastMessage.parts.find(
|
1789
|
+
(part) => part.type === "tool-invocation" && part.toolInvocation.toolCallId === toolCallId
|
1995
1790
|
);
|
1791
|
+
if (invocationPart == null) {
|
1792
|
+
return;
|
1793
|
+
}
|
1794
|
+
invocationPart.toolInvocation = {
|
1795
|
+
...invocationPart.toolInvocation,
|
1796
|
+
state: "result",
|
1797
|
+
result
|
1798
|
+
};
|
1996
1799
|
}
|
1997
1800
|
|
1998
1801
|
// src/ui/convert-to-model-messages.ts
|
1999
1802
|
function convertToModelMessages(messages, options) {
|
2000
|
-
var _a17
|
1803
|
+
var _a17;
|
2001
1804
|
const tools = (_a17 = options == null ? void 0 : options.tools) != null ? _a17 : {};
|
2002
1805
|
const modelMessages = [];
|
2003
1806
|
for (const message of messages) {
|
@@ -2028,6 +1831,9 @@ function convertToModelMessages(messages, options) {
|
|
2028
1831
|
case "assistant": {
|
2029
1832
|
if (message.parts != null) {
|
2030
1833
|
let processBlock2 = function() {
|
1834
|
+
if (block.length === 0) {
|
1835
|
+
return;
|
1836
|
+
}
|
2031
1837
|
const content = [];
|
2032
1838
|
for (const part of block) {
|
2033
1839
|
switch (part.type) {
|
@@ -2102,33 +1908,20 @@ function convertToModelMessages(messages, options) {
|
|
2102
1908
|
});
|
2103
1909
|
}
|
2104
1910
|
block = [];
|
2105
|
-
blockHasToolInvocations = false;
|
2106
|
-
currentStep++;
|
2107
1911
|
};
|
2108
1912
|
var processBlock = processBlock2;
|
2109
|
-
let currentStep = 0;
|
2110
|
-
let blockHasToolInvocations = false;
|
2111
1913
|
let block = [];
|
2112
1914
|
for (const part of message.parts) {
|
2113
1915
|
switch (part.type) {
|
2114
|
-
case "text":
|
2115
|
-
|
2116
|
-
processBlock2();
|
2117
|
-
}
|
2118
|
-
block.push(part);
|
2119
|
-
break;
|
2120
|
-
}
|
1916
|
+
case "text":
|
1917
|
+
case "reasoning":
|
2121
1918
|
case "file":
|
2122
|
-
case "
|
1919
|
+
case "tool-invocation": {
|
2123
1920
|
block.push(part);
|
2124
1921
|
break;
|
2125
1922
|
}
|
2126
|
-
case "
|
2127
|
-
|
2128
|
-
processBlock2();
|
2129
|
-
}
|
2130
|
-
block.push(part);
|
2131
|
-
blockHasToolInvocations = true;
|
1923
|
+
case "step-start": {
|
1924
|
+
processBlock2();
|
2132
1925
|
break;
|
2133
1926
|
}
|
2134
1927
|
}
|
@@ -2147,51 +1940,170 @@ function convertToModelMessages(messages, options) {
|
|
2147
1940
|
}
|
2148
1941
|
}
|
2149
1942
|
}
|
2150
|
-
return modelMessages;
|
2151
|
-
}
|
2152
|
-
var convertToCoreMessages = convertToModelMessages;
|
1943
|
+
return modelMessages;
|
1944
|
+
}
|
1945
|
+
var convertToCoreMessages = convertToModelMessages;
|
1946
|
+
|
1947
|
+
// src/ui/transform-text-to-ui-message-stream.ts
|
1948
|
+
function transformTextToUiMessageStream({
|
1949
|
+
stream
|
1950
|
+
}) {
|
1951
|
+
return stream.pipeThrough(
|
1952
|
+
new TransformStream({
|
1953
|
+
start(controller) {
|
1954
|
+
controller.enqueue({ type: "start" });
|
1955
|
+
controller.enqueue({ type: "start-step" });
|
1956
|
+
},
|
1957
|
+
async transform(part, controller) {
|
1958
|
+
controller.enqueue({ type: "text", text: part });
|
1959
|
+
},
|
1960
|
+
async flush(controller) {
|
1961
|
+
controller.enqueue({ type: "finish-step" });
|
1962
|
+
controller.enqueue({ type: "finish" });
|
1963
|
+
}
|
1964
|
+
})
|
1965
|
+
);
|
1966
|
+
}
|
1967
|
+
|
1968
|
+
// src/ui/text-stream-chat-transport.ts
|
1969
|
+
var getOriginalFetch3 = () => fetch;
|
1970
|
+
async function fetchTextStream({
|
1971
|
+
api,
|
1972
|
+
body,
|
1973
|
+
credentials,
|
1974
|
+
headers,
|
1975
|
+
abortSignal,
|
1976
|
+
fetch: fetch2 = getOriginalFetch3(),
|
1977
|
+
requestType = "generate"
|
1978
|
+
}) {
|
1979
|
+
var _a17;
|
1980
|
+
const response = requestType === "resume" ? await fetch2(`${api}?chatId=${body.chatId}`, {
|
1981
|
+
method: "GET",
|
1982
|
+
headers: {
|
1983
|
+
"Content-Type": "application/json",
|
1984
|
+
...headers
|
1985
|
+
},
|
1986
|
+
signal: abortSignal,
|
1987
|
+
credentials
|
1988
|
+
}) : await fetch2(api, {
|
1989
|
+
method: "POST",
|
1990
|
+
body: JSON.stringify(body),
|
1991
|
+
headers: {
|
1992
|
+
"Content-Type": "application/json",
|
1993
|
+
...headers
|
1994
|
+
},
|
1995
|
+
signal: abortSignal,
|
1996
|
+
credentials
|
1997
|
+
});
|
1998
|
+
if (!response.ok) {
|
1999
|
+
throw new Error(
|
2000
|
+
(_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
|
2001
|
+
);
|
2002
|
+
}
|
2003
|
+
if (!response.body) {
|
2004
|
+
throw new Error("The response body is empty.");
|
2005
|
+
}
|
2006
|
+
return transformTextToUiMessageStream({
|
2007
|
+
stream: response.body.pipeThrough(new TextDecoderStream())
|
2008
|
+
});
|
2009
|
+
}
|
2010
|
+
var TextStreamChatTransport = class {
|
2011
|
+
constructor({
|
2012
|
+
api,
|
2013
|
+
credentials,
|
2014
|
+
headers,
|
2015
|
+
body,
|
2016
|
+
fetch: fetch2,
|
2017
|
+
prepareRequest
|
2018
|
+
}) {
|
2019
|
+
this.api = api;
|
2020
|
+
this.credentials = credentials;
|
2021
|
+
this.headers = headers;
|
2022
|
+
this.body = body;
|
2023
|
+
this.fetch = fetch2;
|
2024
|
+
this.prepareRequest = prepareRequest;
|
2025
|
+
}
|
2026
|
+
submitMessages({
|
2027
|
+
chatId,
|
2028
|
+
messages,
|
2029
|
+
abortSignal,
|
2030
|
+
metadata,
|
2031
|
+
headers,
|
2032
|
+
body,
|
2033
|
+
requestType
|
2034
|
+
}) {
|
2035
|
+
var _a17, _b;
|
2036
|
+
const preparedRequest = (_a17 = this.prepareRequest) == null ? void 0 : _a17.call(this, {
|
2037
|
+
id: chatId,
|
2038
|
+
messages,
|
2039
|
+
body: { ...this.body, ...body },
|
2040
|
+
headers: { ...this.headers, ...headers },
|
2041
|
+
credentials: this.credentials,
|
2042
|
+
requestMetadata: metadata
|
2043
|
+
});
|
2044
|
+
return fetchTextStream({
|
2045
|
+
api: this.api,
|
2046
|
+
body: (preparedRequest == null ? void 0 : preparedRequest.body) !== void 0 ? preparedRequest.body : { ...this.body, ...body },
|
2047
|
+
headers: (preparedRequest == null ? void 0 : preparedRequest.headers) !== void 0 ? preparedRequest.headers : { ...this.headers, ...headers },
|
2048
|
+
credentials: (_b = preparedRequest == null ? void 0 : preparedRequest.credentials) != null ? _b : this.credentials,
|
2049
|
+
abortSignal,
|
2050
|
+
fetch: this.fetch,
|
2051
|
+
requestType
|
2052
|
+
});
|
2053
|
+
}
|
2054
|
+
};
|
2153
2055
|
|
2154
|
-
// src/ui/
|
2155
|
-
|
2156
|
-
|
2157
|
-
|
2158
|
-
|
2159
|
-
|
2160
|
-
fetch: fetch2,
|
2161
|
-
streamProtocol = "ui-message",
|
2162
|
-
credentials,
|
2163
|
-
headers,
|
2164
|
-
body,
|
2165
|
-
prepareRequestBody,
|
2166
|
-
generateId: generateId3 = generateIdFunc2,
|
2167
|
-
dataPartSchemas,
|
2168
|
-
messageMetadataSchema,
|
2169
|
-
maxSteps: maxSteps2 = 1,
|
2170
|
-
chats
|
2056
|
+
// src/ui-message-stream/handle-ui-message-stream-finish.ts
|
2057
|
+
function handleUIMessageStreamFinish({
|
2058
|
+
newMessageId,
|
2059
|
+
originalMessages = [],
|
2060
|
+
onFinish,
|
2061
|
+
stream
|
2171
2062
|
}) {
|
2172
|
-
|
2173
|
-
|
2174
|
-
|
2175
|
-
|
2176
|
-
|
2177
|
-
|
2178
|
-
|
2179
|
-
|
2180
|
-
|
2181
|
-
}),
|
2182
|
-
generateId: generateId3,
|
2183
|
-
messageMetadataSchema,
|
2184
|
-
dataPartSchemas,
|
2185
|
-
maxSteps: maxSteps2,
|
2186
|
-
chats
|
2063
|
+
if (onFinish == null) {
|
2064
|
+
return stream;
|
2065
|
+
}
|
2066
|
+
const lastMessage = originalMessages[originalMessages.length - 1];
|
2067
|
+
const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
|
2068
|
+
const messageId = isContinuation ? lastMessage.id : newMessageId;
|
2069
|
+
const state = createStreamingUIMessageState({
|
2070
|
+
lastMessage: structuredClone(lastMessage),
|
2071
|
+
newMessageId: messageId
|
2187
2072
|
});
|
2073
|
+
const runUpdateMessageJob = async (job) => {
|
2074
|
+
await job({ state, write: () => {
|
2075
|
+
} });
|
2076
|
+
};
|
2077
|
+
return processUIMessageStream({
|
2078
|
+
stream,
|
2079
|
+
runUpdateMessageJob
|
2080
|
+
}).pipeThrough(
|
2081
|
+
new TransformStream({
|
2082
|
+
transform(chunk, controller) {
|
2083
|
+
controller.enqueue(chunk);
|
2084
|
+
},
|
2085
|
+
flush() {
|
2086
|
+
const isContinuation2 = state.message.id === (lastMessage == null ? void 0 : lastMessage.id);
|
2087
|
+
onFinish({
|
2088
|
+
isContinuation: isContinuation2,
|
2089
|
+
responseMessage: state.message,
|
2090
|
+
messages: [
|
2091
|
+
...isContinuation2 ? originalMessages.slice(0, -1) : originalMessages,
|
2092
|
+
state.message
|
2093
|
+
]
|
2094
|
+
});
|
2095
|
+
}
|
2096
|
+
})
|
2097
|
+
);
|
2188
2098
|
}
|
2189
2099
|
|
2190
2100
|
// src/ui-message-stream/create-ui-message-stream.ts
|
2191
2101
|
function createUIMessageStream({
|
2192
2102
|
execute,
|
2193
|
-
onError = () => "An error occurred."
|
2103
|
+
onError = () => "An error occurred.",
|
2194
2104
|
// mask error messages for safety by default
|
2105
|
+
originalMessages,
|
2106
|
+
onFinish
|
2195
2107
|
}) {
|
2196
2108
|
let controller;
|
2197
2109
|
const ongoingStreamPromises = [];
|
@@ -2208,25 +2120,27 @@ function createUIMessageStream({
|
|
2208
2120
|
}
|
2209
2121
|
try {
|
2210
2122
|
const result = execute({
|
2211
|
-
|
2212
|
-
|
2213
|
-
|
2214
|
-
|
2215
|
-
|
2216
|
-
(
|
2217
|
-
|
2218
|
-
|
2219
|
-
|
2220
|
-
|
2221
|
-
|
2222
|
-
|
2223
|
-
|
2224
|
-
|
2225
|
-
|
2226
|
-
|
2227
|
-
|
2228
|
-
|
2229
|
-
|
2123
|
+
writer: {
|
2124
|
+
write(part) {
|
2125
|
+
safeEnqueue(part);
|
2126
|
+
},
|
2127
|
+
merge(streamArg) {
|
2128
|
+
ongoingStreamPromises.push(
|
2129
|
+
(async () => {
|
2130
|
+
const reader = streamArg.getReader();
|
2131
|
+
while (true) {
|
2132
|
+
const { done, value } = await reader.read();
|
2133
|
+
if (done)
|
2134
|
+
break;
|
2135
|
+
safeEnqueue(value);
|
2136
|
+
}
|
2137
|
+
})().catch((error) => {
|
2138
|
+
safeEnqueue({ type: "error", errorText: onError(error) });
|
2139
|
+
})
|
2140
|
+
);
|
2141
|
+
},
|
2142
|
+
onError
|
2143
|
+
}
|
2230
2144
|
});
|
2231
2145
|
if (result) {
|
2232
2146
|
ongoingStreamPromises.push(
|
@@ -2250,7 +2164,12 @@ function createUIMessageStream({
|
|
2250
2164
|
} catch (error) {
|
2251
2165
|
}
|
2252
2166
|
});
|
2253
|
-
return
|
2167
|
+
return handleUIMessageStreamFinish({
|
2168
|
+
stream,
|
2169
|
+
newMessageId: "",
|
2170
|
+
originalMessages,
|
2171
|
+
onFinish
|
2172
|
+
});
|
2254
2173
|
}
|
2255
2174
|
|
2256
2175
|
// src/ui-message-stream/ui-message-stream-headers.ts
|
@@ -2315,6 +2234,32 @@ function pipeUIMessageStreamToResponse({
|
|
2315
2234
|
});
|
2316
2235
|
}
|
2317
2236
|
|
2237
|
+
// src/util/cosine-similarity.ts
|
2238
|
+
function cosineSimilarity(vector1, vector2) {
|
2239
|
+
if (vector1.length !== vector2.length) {
|
2240
|
+
throw new InvalidArgumentError({
|
2241
|
+
parameter: "vector1,vector2",
|
2242
|
+
value: { vector1Length: vector1.length, vector2Length: vector2.length },
|
2243
|
+
message: `Vectors must have the same length`
|
2244
|
+
});
|
2245
|
+
}
|
2246
|
+
const n = vector1.length;
|
2247
|
+
if (n === 0) {
|
2248
|
+
return 0;
|
2249
|
+
}
|
2250
|
+
let magnitudeSquared1 = 0;
|
2251
|
+
let magnitudeSquared2 = 0;
|
2252
|
+
let dotProduct = 0;
|
2253
|
+
for (let i = 0; i < n; i++) {
|
2254
|
+
const value1 = vector1[i];
|
2255
|
+
const value2 = vector2[i];
|
2256
|
+
magnitudeSquared1 += value1 * value1;
|
2257
|
+
magnitudeSquared2 += value2 * value2;
|
2258
|
+
dotProduct += value1 * value2;
|
2259
|
+
}
|
2260
|
+
return magnitudeSquared1 === 0 || magnitudeSquared2 === 0 ? 0 : dotProduct / (Math.sqrt(magnitudeSquared1) * Math.sqrt(magnitudeSquared2));
|
2261
|
+
}
|
2262
|
+
|
2318
2263
|
// src/util/data-url.ts
|
2319
2264
|
function getTextFromDataUrl(dataUrl) {
|
2320
2265
|
const [header, base64Content] = dataUrl.split(",");
|
@@ -2364,32 +2309,6 @@ function isDeepEqualData(obj1, obj2) {
|
|
2364
2309
|
return true;
|
2365
2310
|
}
|
2366
2311
|
|
2367
|
-
// src/util/cosine-similarity.ts
|
2368
|
-
function cosineSimilarity(vector1, vector2) {
|
2369
|
-
if (vector1.length !== vector2.length) {
|
2370
|
-
throw new InvalidArgumentError({
|
2371
|
-
parameter: "vector1,vector2",
|
2372
|
-
value: { vector1Length: vector1.length, vector2Length: vector2.length },
|
2373
|
-
message: `Vectors must have the same length`
|
2374
|
-
});
|
2375
|
-
}
|
2376
|
-
const n = vector1.length;
|
2377
|
-
if (n === 0) {
|
2378
|
-
return 0;
|
2379
|
-
}
|
2380
|
-
let magnitudeSquared1 = 0;
|
2381
|
-
let magnitudeSquared2 = 0;
|
2382
|
-
let dotProduct = 0;
|
2383
|
-
for (let i = 0; i < n; i++) {
|
2384
|
-
const value1 = vector1[i];
|
2385
|
-
const value2 = vector2[i];
|
2386
|
-
magnitudeSquared1 += value1 * value1;
|
2387
|
-
magnitudeSquared2 += value2 * value2;
|
2388
|
-
dotProduct += value1 * value2;
|
2389
|
-
}
|
2390
|
-
return magnitudeSquared1 === 0 || magnitudeSquared2 === 0 ? 0 : dotProduct / (Math.sqrt(magnitudeSquared1) * Math.sqrt(magnitudeSquared2));
|
2391
|
-
}
|
2392
|
-
|
2393
2312
|
// src/util/simulate-readable-stream.ts
|
2394
2313
|
import { delay as delayFunction } from "@ai-sdk/provider-utils";
|
2395
2314
|
function simulateReadableStream({
|
@@ -3394,6 +3313,15 @@ function convertToLanguageModelV2DataContent(content) {
|
|
3394
3313
|
}
|
3395
3314
|
return { data: content, mediaType: void 0 };
|
3396
3315
|
}
|
3316
|
+
function convertDataContentToBase64String(content) {
|
3317
|
+
if (typeof content === "string") {
|
3318
|
+
return content;
|
3319
|
+
}
|
3320
|
+
if (content instanceof ArrayBuffer) {
|
3321
|
+
return convertUint8ArrayToBase642(new Uint8Array(content));
|
3322
|
+
}
|
3323
|
+
return convertUint8ArrayToBase642(content);
|
3324
|
+
}
|
3397
3325
|
function convertDataContentToUint8Array(content) {
|
3398
3326
|
if (content instanceof Uint8Array) {
|
3399
3327
|
return content;
|
@@ -3720,6 +3648,19 @@ function prepareCallSettings({
|
|
3720
3648
|
};
|
3721
3649
|
}
|
3722
3650
|
|
3651
|
+
// core/prompt/resolve-language-model.ts
|
3652
|
+
import { gateway } from "@ai-sdk/gateway";
|
3653
|
+
var GLOBAL_DEFAULT_PROVIDER = Symbol(
|
3654
|
+
"vercel.ai.global.defaultProvider"
|
3655
|
+
);
|
3656
|
+
function resolveLanguageModel(model) {
|
3657
|
+
if (typeof model !== "string") {
|
3658
|
+
return model;
|
3659
|
+
}
|
3660
|
+
const globalProvider = globalThis[GLOBAL_DEFAULT_PROVIDER];
|
3661
|
+
return (globalProvider != null ? globalProvider : gateway).languageModel(model);
|
3662
|
+
}
|
3663
|
+
|
3723
3664
|
// core/prompt/standardize-prompt.ts
|
3724
3665
|
import { InvalidPromptError as InvalidPromptError2 } from "@ai-sdk/provider";
|
3725
3666
|
import { safeValidateTypes } from "@ai-sdk/provider-utils";
|
@@ -3911,6 +3852,38 @@ async function standardizePrompt(prompt) {
|
|
3911
3852
|
};
|
3912
3853
|
}
|
3913
3854
|
|
3855
|
+
// core/prompt/wrap-gateway-error.ts
|
3856
|
+
import {
|
3857
|
+
GatewayAuthenticationError,
|
3858
|
+
GatewayModelNotFoundError
|
3859
|
+
} from "@ai-sdk/gateway";
|
3860
|
+
import { AISDKError as AISDKError18 } from "@ai-sdk/provider";
|
3861
|
+
function wrapGatewayError(error) {
|
3862
|
+
if (GatewayAuthenticationError.isInstance(error) || GatewayModelNotFoundError.isInstance(error)) {
|
3863
|
+
return new AISDKError18({
|
3864
|
+
name: "GatewayError",
|
3865
|
+
message: "Vercel AI Gateway access failed. If you want to use AI SDK providers directly, use the providers, e.g. @ai-sdk/openai, or register a different global default provider.",
|
3866
|
+
cause: error
|
3867
|
+
});
|
3868
|
+
}
|
3869
|
+
return error;
|
3870
|
+
}
|
3871
|
+
|
3872
|
+
// core/telemetry/stringify-for-telemetry.ts
|
3873
|
+
function stringifyForTelemetry(prompt) {
|
3874
|
+
return JSON.stringify(
|
3875
|
+
prompt.map((message) => ({
|
3876
|
+
...message,
|
3877
|
+
content: typeof message.content === "string" ? message.content : message.content.map(
|
3878
|
+
(part) => part.type === "file" ? {
|
3879
|
+
...part,
|
3880
|
+
data: part.data instanceof Uint8Array ? convertDataContentToBase64String(part.data) : part.data
|
3881
|
+
} : part
|
3882
|
+
)
|
3883
|
+
}))
|
3884
|
+
);
|
3885
|
+
}
|
3886
|
+
|
3914
3887
|
// core/generate-object/output-strategy.ts
|
3915
3888
|
import {
|
3916
3889
|
isJSONArray,
|
@@ -4316,7 +4289,7 @@ function validateObjectGenerationInput({
|
|
4316
4289
|
var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
|
4317
4290
|
async function generateObject(options) {
|
4318
4291
|
const {
|
4319
|
-
model,
|
4292
|
+
model: modelArg,
|
4320
4293
|
output = "object",
|
4321
4294
|
system,
|
4322
4295
|
prompt,
|
@@ -4333,6 +4306,7 @@ async function generateObject(options) {
|
|
4333
4306
|
} = {},
|
4334
4307
|
...settings
|
4335
4308
|
} = options;
|
4309
|
+
const model = resolveLanguageModel(modelArg);
|
4336
4310
|
const enumValues = "enum" in options ? options.enum : void 0;
|
4337
4311
|
const {
|
4338
4312
|
schema: inputSchema,
|
@@ -4360,208 +4334,212 @@ async function generateObject(options) {
|
|
4360
4334
|
settings: { ...callSettings, maxRetries }
|
4361
4335
|
});
|
4362
4336
|
const tracer = getTracer(telemetry);
|
4363
|
-
|
4364
|
-
|
4365
|
-
|
4366
|
-
|
4367
|
-
|
4368
|
-
|
4369
|
-
|
4370
|
-
|
4371
|
-
|
4372
|
-
...baseTelemetryAttributes,
|
4373
|
-
// specific settings that only make sense on the outer level:
|
4374
|
-
"ai.prompt": {
|
4375
|
-
input: () => JSON.stringify({ system, prompt, messages })
|
4376
|
-
},
|
4377
|
-
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4378
|
-
"ai.schema.name": schemaName,
|
4379
|
-
"ai.schema.description": schemaDescription,
|
4380
|
-
"ai.settings.output": outputStrategy.type
|
4381
|
-
}
|
4382
|
-
}),
|
4383
|
-
tracer,
|
4384
|
-
fn: async (span) => {
|
4385
|
-
var _a17;
|
4386
|
-
let result;
|
4387
|
-
let finishReason;
|
4388
|
-
let usage;
|
4389
|
-
let warnings;
|
4390
|
-
let response;
|
4391
|
-
let request;
|
4392
|
-
let resultProviderMetadata;
|
4393
|
-
const standardizedPrompt = await standardizePrompt({
|
4394
|
-
system,
|
4395
|
-
prompt,
|
4396
|
-
messages
|
4397
|
-
});
|
4398
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
4399
|
-
prompt: standardizedPrompt,
|
4400
|
-
supportedUrls: await model.supportedUrls
|
4401
|
-
});
|
4402
|
-
const generateResult = await retry(
|
4403
|
-
() => recordSpan({
|
4404
|
-
name: "ai.generateObject.doGenerate",
|
4405
|
-
attributes: selectTelemetryAttributes({
|
4406
|
-
telemetry,
|
4407
|
-
attributes: {
|
4408
|
-
...assembleOperationName({
|
4409
|
-
operationId: "ai.generateObject.doGenerate",
|
4410
|
-
telemetry
|
4411
|
-
}),
|
4412
|
-
...baseTelemetryAttributes,
|
4413
|
-
"ai.prompt.messages": {
|
4414
|
-
input: () => JSON.stringify(promptMessages)
|
4415
|
-
},
|
4416
|
-
// standardized gen-ai llm span attributes:
|
4417
|
-
"gen_ai.system": model.provider,
|
4418
|
-
"gen_ai.request.model": model.modelId,
|
4419
|
-
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4420
|
-
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4421
|
-
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4422
|
-
"gen_ai.request.temperature": callSettings.temperature,
|
4423
|
-
"gen_ai.request.top_k": callSettings.topK,
|
4424
|
-
"gen_ai.request.top_p": callSettings.topP
|
4425
|
-
}
|
4337
|
+
try {
|
4338
|
+
return await recordSpan({
|
4339
|
+
name: "ai.generateObject",
|
4340
|
+
attributes: selectTelemetryAttributes({
|
4341
|
+
telemetry,
|
4342
|
+
attributes: {
|
4343
|
+
...assembleOperationName({
|
4344
|
+
operationId: "ai.generateObject",
|
4345
|
+
telemetry
|
4426
4346
|
}),
|
4427
|
-
|
4428
|
-
|
4429
|
-
|
4430
|
-
|
4431
|
-
|
4432
|
-
|
4433
|
-
|
4434
|
-
|
4435
|
-
|
4436
|
-
|
4437
|
-
|
4438
|
-
|
4439
|
-
|
4440
|
-
|
4441
|
-
|
4442
|
-
|
4443
|
-
|
4444
|
-
|
4445
|
-
|
4446
|
-
|
4447
|
-
|
4448
|
-
|
4449
|
-
|
4450
|
-
|
4451
|
-
|
4452
|
-
|
4453
|
-
|
4454
|
-
|
4455
|
-
|
4456
|
-
|
4347
|
+
...baseTelemetryAttributes,
|
4348
|
+
// specific settings that only make sense on the outer level:
|
4349
|
+
"ai.prompt": {
|
4350
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
4351
|
+
},
|
4352
|
+
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4353
|
+
"ai.schema.name": schemaName,
|
4354
|
+
"ai.schema.description": schemaDescription,
|
4355
|
+
"ai.settings.output": outputStrategy.type
|
4356
|
+
}
|
4357
|
+
}),
|
4358
|
+
tracer,
|
4359
|
+
fn: async (span) => {
|
4360
|
+
var _a17;
|
4361
|
+
let result;
|
4362
|
+
let finishReason;
|
4363
|
+
let usage;
|
4364
|
+
let warnings;
|
4365
|
+
let response;
|
4366
|
+
let request;
|
4367
|
+
let resultProviderMetadata;
|
4368
|
+
const standardizedPrompt = await standardizePrompt({
|
4369
|
+
system,
|
4370
|
+
prompt,
|
4371
|
+
messages
|
4372
|
+
});
|
4373
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
4374
|
+
prompt: standardizedPrompt,
|
4375
|
+
supportedUrls: await model.supportedUrls
|
4376
|
+
});
|
4377
|
+
const generateResult = await retry(
|
4378
|
+
() => recordSpan({
|
4379
|
+
name: "ai.generateObject.doGenerate",
|
4380
|
+
attributes: selectTelemetryAttributes({
|
4381
|
+
telemetry,
|
4382
|
+
attributes: {
|
4383
|
+
...assembleOperationName({
|
4384
|
+
operationId: "ai.generateObject.doGenerate",
|
4385
|
+
telemetry
|
4386
|
+
}),
|
4387
|
+
...baseTelemetryAttributes,
|
4388
|
+
"ai.prompt.messages": {
|
4389
|
+
input: () => stringifyForTelemetry(promptMessages)
|
4390
|
+
},
|
4391
|
+
// standardized gen-ai llm span attributes:
|
4392
|
+
"gen_ai.system": model.provider,
|
4393
|
+
"gen_ai.request.model": model.modelId,
|
4394
|
+
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4395
|
+
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4396
|
+
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4397
|
+
"gen_ai.request.temperature": callSettings.temperature,
|
4398
|
+
"gen_ai.request.top_k": callSettings.topK,
|
4399
|
+
"gen_ai.request.top_p": callSettings.topP
|
4400
|
+
}
|
4401
|
+
}),
|
4402
|
+
tracer,
|
4403
|
+
fn: async (span2) => {
|
4404
|
+
var _a18, _b, _c, _d, _e, _f, _g, _h;
|
4405
|
+
const result2 = await model.doGenerate({
|
4406
|
+
responseFormat: {
|
4407
|
+
type: "json",
|
4408
|
+
schema: outputStrategy.jsonSchema,
|
4409
|
+
name: schemaName,
|
4410
|
+
description: schemaDescription
|
4411
|
+
},
|
4412
|
+
...prepareCallSettings(settings),
|
4413
|
+
prompt: promptMessages,
|
4414
|
+
providerOptions,
|
4415
|
+
abortSignal,
|
4416
|
+
headers
|
4457
4417
|
});
|
4418
|
+
const responseData = {
|
4419
|
+
id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
|
4420
|
+
timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
|
4421
|
+
modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
4422
|
+
headers: (_g = result2.response) == null ? void 0 : _g.headers,
|
4423
|
+
body: (_h = result2.response) == null ? void 0 : _h.body
|
4424
|
+
};
|
4425
|
+
const text2 = extractContentText(result2.content);
|
4426
|
+
if (text2 === void 0) {
|
4427
|
+
throw new NoObjectGeneratedError({
|
4428
|
+
message: "No object generated: the model did not return a response.",
|
4429
|
+
response: responseData,
|
4430
|
+
usage: result2.usage,
|
4431
|
+
finishReason: result2.finishReason
|
4432
|
+
});
|
4433
|
+
}
|
4434
|
+
span2.setAttributes(
|
4435
|
+
selectTelemetryAttributes({
|
4436
|
+
telemetry,
|
4437
|
+
attributes: {
|
4438
|
+
"ai.response.finishReason": result2.finishReason,
|
4439
|
+
"ai.response.object": { output: () => text2 },
|
4440
|
+
"ai.response.id": responseData.id,
|
4441
|
+
"ai.response.model": responseData.modelId,
|
4442
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
4443
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4444
|
+
"ai.usage.promptTokens": result2.usage.inputTokens,
|
4445
|
+
"ai.usage.completionTokens": result2.usage.outputTokens,
|
4446
|
+
// standardized gen-ai llm span attributes:
|
4447
|
+
"gen_ai.response.finish_reasons": [result2.finishReason],
|
4448
|
+
"gen_ai.response.id": responseData.id,
|
4449
|
+
"gen_ai.response.model": responseData.modelId,
|
4450
|
+
"gen_ai.usage.input_tokens": result2.usage.inputTokens,
|
4451
|
+
"gen_ai.usage.output_tokens": result2.usage.outputTokens
|
4452
|
+
}
|
4453
|
+
})
|
4454
|
+
);
|
4455
|
+
return { ...result2, objectText: text2, responseData };
|
4458
4456
|
}
|
4459
|
-
|
4460
|
-
|
4461
|
-
|
4462
|
-
|
4463
|
-
|
4464
|
-
|
4465
|
-
|
4466
|
-
|
4467
|
-
|
4468
|
-
|
4469
|
-
|
4470
|
-
|
4471
|
-
|
4472
|
-
|
4473
|
-
|
4474
|
-
|
4475
|
-
|
4476
|
-
|
4477
|
-
|
4478
|
-
|
4479
|
-
);
|
4480
|
-
return { ...result2, objectText: text2, responseData };
|
4457
|
+
})
|
4458
|
+
);
|
4459
|
+
result = generateResult.objectText;
|
4460
|
+
finishReason = generateResult.finishReason;
|
4461
|
+
usage = generateResult.usage;
|
4462
|
+
warnings = generateResult.warnings;
|
4463
|
+
resultProviderMetadata = generateResult.providerMetadata;
|
4464
|
+
request = (_a17 = generateResult.request) != null ? _a17 : {};
|
4465
|
+
response = generateResult.responseData;
|
4466
|
+
async function processResult(result2) {
|
4467
|
+
const parseResult = await safeParseJSON2({ text: result2 });
|
4468
|
+
if (!parseResult.success) {
|
4469
|
+
throw new NoObjectGeneratedError({
|
4470
|
+
message: "No object generated: could not parse the response.",
|
4471
|
+
cause: parseResult.error,
|
4472
|
+
text: result2,
|
4473
|
+
response,
|
4474
|
+
usage,
|
4475
|
+
finishReason
|
4476
|
+
});
|
4481
4477
|
}
|
4482
|
-
|
4483
|
-
|
4484
|
-
|
4485
|
-
|
4486
|
-
|
4487
|
-
|
4488
|
-
|
4489
|
-
|
4490
|
-
|
4491
|
-
|
4492
|
-
|
4493
|
-
|
4494
|
-
|
4495
|
-
|
4496
|
-
|
4497
|
-
|
4498
|
-
|
4499
|
-
usage,
|
4500
|
-
finishReason
|
4501
|
-
});
|
4502
|
-
}
|
4503
|
-
const validationResult = await outputStrategy.validateFinalResult(
|
4504
|
-
parseResult.value,
|
4505
|
-
{
|
4506
|
-
text: result2,
|
4507
|
-
response,
|
4508
|
-
usage
|
4478
|
+
const validationResult = await outputStrategy.validateFinalResult(
|
4479
|
+
parseResult.value,
|
4480
|
+
{
|
4481
|
+
text: result2,
|
4482
|
+
response,
|
4483
|
+
usage
|
4484
|
+
}
|
4485
|
+
);
|
4486
|
+
if (!validationResult.success) {
|
4487
|
+
throw new NoObjectGeneratedError({
|
4488
|
+
message: "No object generated: response did not match schema.",
|
4489
|
+
cause: validationResult.error,
|
4490
|
+
text: result2,
|
4491
|
+
response,
|
4492
|
+
usage,
|
4493
|
+
finishReason
|
4494
|
+
});
|
4509
4495
|
}
|
4510
|
-
|
4511
|
-
if (!validationResult.success) {
|
4512
|
-
throw new NoObjectGeneratedError({
|
4513
|
-
message: "No object generated: response did not match schema.",
|
4514
|
-
cause: validationResult.error,
|
4515
|
-
text: result2,
|
4516
|
-
response,
|
4517
|
-
usage,
|
4518
|
-
finishReason
|
4519
|
-
});
|
4496
|
+
return validationResult.value;
|
4520
4497
|
}
|
4521
|
-
|
4522
|
-
|
4523
|
-
|
4524
|
-
|
4525
|
-
|
4526
|
-
|
4527
|
-
|
4528
|
-
|
4529
|
-
|
4530
|
-
|
4531
|
-
|
4532
|
-
|
4498
|
+
let object2;
|
4499
|
+
try {
|
4500
|
+
object2 = await processResult(result);
|
4501
|
+
} catch (error) {
|
4502
|
+
if (repairText != null && NoObjectGeneratedError.isInstance(error) && (JSONParseError2.isInstance(error.cause) || TypeValidationError3.isInstance(error.cause))) {
|
4503
|
+
const repairedText = await repairText({
|
4504
|
+
text: result,
|
4505
|
+
error: error.cause
|
4506
|
+
});
|
4507
|
+
if (repairedText === null) {
|
4508
|
+
throw error;
|
4509
|
+
}
|
4510
|
+
object2 = await processResult(repairedText);
|
4511
|
+
} else {
|
4533
4512
|
throw error;
|
4534
4513
|
}
|
4535
|
-
object2 = await processResult(repairedText);
|
4536
|
-
} else {
|
4537
|
-
throw error;
|
4538
4514
|
}
|
4515
|
+
span.setAttributes(
|
4516
|
+
selectTelemetryAttributes({
|
4517
|
+
telemetry,
|
4518
|
+
attributes: {
|
4519
|
+
"ai.response.finishReason": finishReason,
|
4520
|
+
"ai.response.object": {
|
4521
|
+
output: () => JSON.stringify(object2)
|
4522
|
+
},
|
4523
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4524
|
+
"ai.usage.promptTokens": usage.inputTokens,
|
4525
|
+
"ai.usage.completionTokens": usage.outputTokens
|
4526
|
+
}
|
4527
|
+
})
|
4528
|
+
);
|
4529
|
+
return new DefaultGenerateObjectResult({
|
4530
|
+
object: object2,
|
4531
|
+
finishReason,
|
4532
|
+
usage,
|
4533
|
+
warnings,
|
4534
|
+
request,
|
4535
|
+
response,
|
4536
|
+
providerMetadata: resultProviderMetadata
|
4537
|
+
});
|
4539
4538
|
}
|
4540
|
-
|
4541
|
-
|
4542
|
-
|
4543
|
-
|
4544
|
-
"ai.response.finishReason": finishReason,
|
4545
|
-
"ai.response.object": {
|
4546
|
-
output: () => JSON.stringify(object2)
|
4547
|
-
},
|
4548
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4549
|
-
"ai.usage.promptTokens": usage.inputTokens,
|
4550
|
-
"ai.usage.completionTokens": usage.outputTokens
|
4551
|
-
}
|
4552
|
-
})
|
4553
|
-
);
|
4554
|
-
return new DefaultGenerateObjectResult({
|
4555
|
-
object: object2,
|
4556
|
-
finishReason,
|
4557
|
-
usage,
|
4558
|
-
warnings,
|
4559
|
-
request,
|
4560
|
-
response,
|
4561
|
-
providerMetadata: resultProviderMetadata
|
4562
|
-
});
|
4563
|
-
}
|
4564
|
-
});
|
4539
|
+
});
|
4540
|
+
} catch (error) {
|
4541
|
+
throw wrapGatewayError(error);
|
4542
|
+
}
|
4565
4543
|
}
|
4566
4544
|
var DefaultGenerateObjectResult = class {
|
4567
4545
|
constructor(options) {
|
@@ -4585,7 +4563,9 @@ var DefaultGenerateObjectResult = class {
|
|
4585
4563
|
};
|
4586
4564
|
|
4587
4565
|
// core/generate-object/stream-object.ts
|
4588
|
-
import {
|
4566
|
+
import {
|
4567
|
+
createIdGenerator as createIdGenerator2
|
4568
|
+
} from "@ai-sdk/provider-utils";
|
4589
4569
|
|
4590
4570
|
// src/util/create-resolvable-promise.ts
|
4591
4571
|
function createResolvablePromise() {
|
@@ -4691,11 +4671,11 @@ var DelayedPromise = class {
|
|
4691
4671
|
this._resolve = void 0;
|
4692
4672
|
this._reject = void 0;
|
4693
4673
|
}
|
4694
|
-
get
|
4695
|
-
if (this.
|
4696
|
-
return this.
|
4674
|
+
get promise() {
|
4675
|
+
if (this._promise) {
|
4676
|
+
return this._promise;
|
4697
4677
|
}
|
4698
|
-
this.
|
4678
|
+
this._promise = new Promise((resolve, reject) => {
|
4699
4679
|
if (this.status.type === "resolved") {
|
4700
4680
|
resolve(this.status.value);
|
4701
4681
|
} else if (this.status.type === "rejected") {
|
@@ -4704,19 +4684,19 @@ var DelayedPromise = class {
|
|
4704
4684
|
this._resolve = resolve;
|
4705
4685
|
this._reject = reject;
|
4706
4686
|
});
|
4707
|
-
return this.
|
4687
|
+
return this._promise;
|
4708
4688
|
}
|
4709
4689
|
resolve(value) {
|
4710
4690
|
var _a17;
|
4711
4691
|
this.status = { type: "resolved", value };
|
4712
|
-
if (this.
|
4692
|
+
if (this._promise) {
|
4713
4693
|
(_a17 = this._resolve) == null ? void 0 : _a17.call(this, value);
|
4714
4694
|
}
|
4715
4695
|
}
|
4716
4696
|
reject(error) {
|
4717
4697
|
var _a17;
|
4718
4698
|
this.status = { type: "rejected", error };
|
4719
|
-
if (this.
|
4699
|
+
if (this._promise) {
|
4720
4700
|
(_a17 = this._reject) == null ? void 0 : _a17.call(this, error);
|
4721
4701
|
}
|
4722
4702
|
}
|
@@ -4742,7 +4722,9 @@ function streamObject(options) {
|
|
4742
4722
|
headers,
|
4743
4723
|
experimental_telemetry: telemetry,
|
4744
4724
|
providerOptions,
|
4745
|
-
onError
|
4725
|
+
onError = ({ error }) => {
|
4726
|
+
console.error(error);
|
4727
|
+
},
|
4746
4728
|
onFinish,
|
4747
4729
|
_internal: {
|
4748
4730
|
generateId: generateId3 = originalGenerateId2,
|
@@ -4792,7 +4774,7 @@ function streamObject(options) {
|
|
4792
4774
|
}
|
4793
4775
|
var DefaultStreamObjectResult = class {
|
4794
4776
|
constructor({
|
4795
|
-
model,
|
4777
|
+
model: modelArg,
|
4796
4778
|
headers,
|
4797
4779
|
telemetry,
|
4798
4780
|
settings,
|
@@ -4811,12 +4793,13 @@ var DefaultStreamObjectResult = class {
|
|
4811
4793
|
currentDate,
|
4812
4794
|
now: now2
|
4813
4795
|
}) {
|
4814
|
-
this.
|
4815
|
-
this.
|
4816
|
-
this.
|
4817
|
-
this.
|
4818
|
-
this.
|
4819
|
-
this.
|
4796
|
+
this._object = new DelayedPromise();
|
4797
|
+
this._usage = new DelayedPromise();
|
4798
|
+
this._providerMetadata = new DelayedPromise();
|
4799
|
+
this._warnings = new DelayedPromise();
|
4800
|
+
this._request = new DelayedPromise();
|
4801
|
+
this._response = new DelayedPromise();
|
4802
|
+
const model = resolveLanguageModel(modelArg);
|
4820
4803
|
const { maxRetries, retry } = prepareRetries({
|
4821
4804
|
maxRetries: maxRetriesArg
|
4822
4805
|
});
|
@@ -4834,7 +4817,7 @@ var DefaultStreamObjectResult = class {
|
|
4834
4817
|
transform(chunk, controller) {
|
4835
4818
|
controller.enqueue(chunk);
|
4836
4819
|
if (chunk.type === "error") {
|
4837
|
-
onError
|
4820
|
+
onError({ error: wrapGatewayError(chunk.error) });
|
4838
4821
|
}
|
4839
4822
|
}
|
4840
4823
|
});
|
@@ -4913,7 +4896,7 @@ var DefaultStreamObjectResult = class {
|
|
4913
4896
|
}),
|
4914
4897
|
...baseTelemetryAttributes,
|
4915
4898
|
"ai.prompt.messages": {
|
4916
|
-
input: () =>
|
4899
|
+
input: () => stringifyForTelemetry(callOptions.prompt)
|
4917
4900
|
},
|
4918
4901
|
// standardized gen-ai llm span attributes:
|
4919
4902
|
"gen_ai.system": model.provider,
|
@@ -4935,7 +4918,7 @@ var DefaultStreamObjectResult = class {
|
|
4935
4918
|
})
|
4936
4919
|
})
|
4937
4920
|
);
|
4938
|
-
self.
|
4921
|
+
self._request.resolve(request != null ? request : {});
|
4939
4922
|
let warnings;
|
4940
4923
|
let usage = {
|
4941
4924
|
inputTokens: void 0,
|
@@ -5028,9 +5011,9 @@ var DefaultStreamObjectResult = class {
|
|
5028
5011
|
usage,
|
5029
5012
|
response: fullResponse
|
5030
5013
|
});
|
5031
|
-
self.
|
5032
|
-
self.
|
5033
|
-
self.
|
5014
|
+
self._usage.resolve(usage);
|
5015
|
+
self._providerMetadata.resolve(providerMetadata);
|
5016
|
+
self._response.resolve({
|
5034
5017
|
...fullResponse,
|
5035
5018
|
headers: response == null ? void 0 : response.headers
|
5036
5019
|
});
|
@@ -5044,7 +5027,7 @@ var DefaultStreamObjectResult = class {
|
|
5044
5027
|
);
|
5045
5028
|
if (validationResult.success) {
|
5046
5029
|
object2 = validationResult.value;
|
5047
|
-
self.
|
5030
|
+
self._object.resolve(object2);
|
5048
5031
|
} else {
|
5049
5032
|
error = new NoObjectGeneratedError({
|
5050
5033
|
message: "No object generated: response did not match schema.",
|
@@ -5054,7 +5037,7 @@ var DefaultStreamObjectResult = class {
|
|
5054
5037
|
usage,
|
5055
5038
|
finishReason
|
5056
5039
|
});
|
5057
|
-
self.
|
5040
|
+
self._object.reject(error);
|
5058
5041
|
}
|
5059
5042
|
break;
|
5060
5043
|
}
|
@@ -5149,22 +5132,22 @@ var DefaultStreamObjectResult = class {
|
|
5149
5132
|
this.outputStrategy = outputStrategy;
|
5150
5133
|
}
|
5151
5134
|
get object() {
|
5152
|
-
return this.
|
5135
|
+
return this._object.promise;
|
5153
5136
|
}
|
5154
5137
|
get usage() {
|
5155
|
-
return this.
|
5138
|
+
return this._usage.promise;
|
5156
5139
|
}
|
5157
5140
|
get providerMetadata() {
|
5158
|
-
return this.
|
5141
|
+
return this._providerMetadata.promise;
|
5159
5142
|
}
|
5160
5143
|
get warnings() {
|
5161
|
-
return this.
|
5144
|
+
return this._warnings.promise;
|
5162
5145
|
}
|
5163
5146
|
get request() {
|
5164
|
-
return this.
|
5147
|
+
return this._request.promise;
|
5165
5148
|
}
|
5166
5149
|
get response() {
|
5167
|
-
return this.
|
5150
|
+
return this._response.promise;
|
5168
5151
|
}
|
5169
5152
|
get partialObjectStream() {
|
5170
5153
|
return createAsyncIterableStream(
|
@@ -5234,8 +5217,8 @@ var DefaultStreamObjectResult = class {
|
|
5234
5217
|
};
|
5235
5218
|
|
5236
5219
|
// src/error/no-speech-generated-error.ts
|
5237
|
-
import { AISDKError as
|
5238
|
-
var NoSpeechGeneratedError = class extends
|
5220
|
+
import { AISDKError as AISDKError19 } from "@ai-sdk/provider";
|
5221
|
+
var NoSpeechGeneratedError = class extends AISDKError19 {
|
5239
5222
|
constructor(options) {
|
5240
5223
|
super({
|
5241
5224
|
name: "AI_NoSpeechGeneratedError",
|
@@ -5326,6 +5309,11 @@ var DefaultSpeechResult = class {
|
|
5326
5309
|
// core/generate-text/generate-text.ts
|
5327
5310
|
import { createIdGenerator as createIdGenerator3 } from "@ai-sdk/provider-utils";
|
5328
5311
|
|
5312
|
+
// src/util/as-array.ts
|
5313
|
+
function asArray(value) {
|
5314
|
+
return value === void 0 ? [] : Array.isArray(value) ? value : [value];
|
5315
|
+
}
|
5316
|
+
|
5329
5317
|
// core/prompt/prepare-tools-and-tool-choice.ts
|
5330
5318
|
import { asSchema as asSchema2 } from "@ai-sdk/provider-utils";
|
5331
5319
|
|
@@ -5547,8 +5535,8 @@ var DefaultStepResult = class {
|
|
5547
5535
|
};
|
5548
5536
|
|
5549
5537
|
// core/generate-text/stop-condition.ts
|
5550
|
-
function
|
5551
|
-
return ({ steps }) => steps.length
|
5538
|
+
function stepCountIs(stepCount) {
|
5539
|
+
return ({ steps }) => steps.length === stepCount;
|
5552
5540
|
}
|
5553
5541
|
function hasToolCall(toolName) {
|
5554
5542
|
return ({ steps }) => {
|
@@ -5558,6 +5546,12 @@ function hasToolCall(toolName) {
|
|
5558
5546
|
)) != null ? _c : false;
|
5559
5547
|
};
|
5560
5548
|
}
|
5549
|
+
async function isStopConditionMet({
|
5550
|
+
stopConditions,
|
5551
|
+
steps
|
5552
|
+
}) {
|
5553
|
+
return (await Promise.all(stopConditions.map((condition) => condition({ steps })))).some((result) => result);
|
5554
|
+
}
|
5561
5555
|
|
5562
5556
|
// core/generate-text/to-response-messages.ts
|
5563
5557
|
function toResponseMessages({
|
@@ -5623,7 +5617,7 @@ var originalGenerateId3 = createIdGenerator3({
|
|
5623
5617
|
size: 24
|
5624
5618
|
});
|
5625
5619
|
async function generateText({
|
5626
|
-
model,
|
5620
|
+
model: modelArg,
|
5627
5621
|
tools,
|
5628
5622
|
toolChoice,
|
5629
5623
|
system,
|
@@ -5632,12 +5626,14 @@ async function generateText({
|
|
5632
5626
|
maxRetries: maxRetriesArg,
|
5633
5627
|
abortSignal,
|
5634
5628
|
headers,
|
5635
|
-
|
5629
|
+
stopWhen = stepCountIs(1),
|
5636
5630
|
experimental_output: output,
|
5637
5631
|
experimental_telemetry: telemetry,
|
5638
5632
|
providerOptions,
|
5639
|
-
experimental_activeTools
|
5640
|
-
|
5633
|
+
experimental_activeTools,
|
5634
|
+
activeTools = experimental_activeTools,
|
5635
|
+
experimental_prepareStep,
|
5636
|
+
prepareStep = experimental_prepareStep,
|
5641
5637
|
experimental_repairToolCall: repairToolCall,
|
5642
5638
|
_internal: {
|
5643
5639
|
generateId: generateId3 = originalGenerateId3,
|
@@ -5646,6 +5642,8 @@ async function generateText({
|
|
5646
5642
|
onStepFinish,
|
5647
5643
|
...settings
|
5648
5644
|
}) {
|
5645
|
+
const model = resolveLanguageModel(modelArg);
|
5646
|
+
const stopConditions = asArray(stopWhen);
|
5649
5647
|
const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
|
5650
5648
|
const callSettings = prepareCallSettings(settings);
|
5651
5649
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
@@ -5660,237 +5658,243 @@ async function generateText({
|
|
5660
5658
|
messages
|
5661
5659
|
});
|
5662
5660
|
const tracer = getTracer(telemetry);
|
5663
|
-
|
5664
|
-
|
5665
|
-
|
5666
|
-
|
5667
|
-
|
5668
|
-
|
5669
|
-
|
5670
|
-
|
5671
|
-
|
5672
|
-
|
5673
|
-
|
5674
|
-
|
5675
|
-
|
5676
|
-
|
5677
|
-
|
5678
|
-
|
5679
|
-
|
5680
|
-
}
|
5681
|
-
}),
|
5682
|
-
tracer,
|
5683
|
-
fn: async (span) => {
|
5684
|
-
var _a17, _b, _c, _d;
|
5685
|
-
const callSettings2 = prepareCallSettings(settings);
|
5686
|
-
let currentModelResponse;
|
5687
|
-
let currentToolCalls = [];
|
5688
|
-
let currentToolResults = [];
|
5689
|
-
const responseMessages = [];
|
5690
|
-
const steps = [];
|
5691
|
-
do {
|
5692
|
-
const stepInputMessages = [
|
5693
|
-
...initialPrompt.messages,
|
5694
|
-
...responseMessages
|
5695
|
-
];
|
5696
|
-
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5697
|
-
model,
|
5698
|
-
steps,
|
5699
|
-
stepNumber: steps.length
|
5700
|
-
}));
|
5701
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
5702
|
-
prompt: {
|
5703
|
-
system: initialPrompt.system,
|
5704
|
-
messages: stepInputMessages
|
5705
|
-
},
|
5706
|
-
supportedUrls: await model.supportedUrls
|
5707
|
-
});
|
5708
|
-
const stepModel = (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model;
|
5709
|
-
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5710
|
-
tools,
|
5711
|
-
toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
|
5712
|
-
activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.experimental_activeTools) != null ? _c : activeTools
|
5713
|
-
});
|
5714
|
-
currentModelResponse = await retry(
|
5715
|
-
() => {
|
5716
|
-
var _a18;
|
5717
|
-
return recordSpan({
|
5718
|
-
name: "ai.generateText.doGenerate",
|
5719
|
-
attributes: selectTelemetryAttributes({
|
5720
|
-
telemetry,
|
5721
|
-
attributes: {
|
5722
|
-
...assembleOperationName({
|
5723
|
-
operationId: "ai.generateText.doGenerate",
|
5724
|
-
telemetry
|
5725
|
-
}),
|
5726
|
-
...baseTelemetryAttributes,
|
5727
|
-
// model:
|
5728
|
-
"ai.model.provider": stepModel.provider,
|
5729
|
-
"ai.model.id": stepModel.modelId,
|
5730
|
-
// prompt:
|
5731
|
-
"ai.prompt.messages": {
|
5732
|
-
input: () => JSON.stringify(promptMessages)
|
5733
|
-
},
|
5734
|
-
"ai.prompt.tools": {
|
5735
|
-
// convert the language model level tools:
|
5736
|
-
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5737
|
-
},
|
5738
|
-
"ai.prompt.toolChoice": {
|
5739
|
-
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5740
|
-
},
|
5741
|
-
// standardized gen-ai llm span attributes:
|
5742
|
-
"gen_ai.system": stepModel.provider,
|
5743
|
-
"gen_ai.request.model": stepModel.modelId,
|
5744
|
-
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5745
|
-
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5746
|
-
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5747
|
-
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5748
|
-
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5749
|
-
"gen_ai.request.top_k": settings.topK,
|
5750
|
-
"gen_ai.request.top_p": settings.topP
|
5751
|
-
}
|
5752
|
-
}),
|
5753
|
-
tracer,
|
5754
|
-
fn: async (span2) => {
|
5755
|
-
var _a19, _b2, _c2, _d2, _e, _f, _g, _h;
|
5756
|
-
const result = await stepModel.doGenerate({
|
5757
|
-
...callSettings2,
|
5758
|
-
tools: stepTools,
|
5759
|
-
toolChoice: stepToolChoice,
|
5760
|
-
responseFormat: output == null ? void 0 : output.responseFormat,
|
5761
|
-
prompt: promptMessages,
|
5762
|
-
providerOptions,
|
5763
|
-
abortSignal,
|
5764
|
-
headers
|
5765
|
-
});
|
5766
|
-
const responseData = {
|
5767
|
-
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5768
|
-
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5769
|
-
modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : stepModel.modelId,
|
5770
|
-
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5771
|
-
body: (_h = result.response) == null ? void 0 : _h.body
|
5772
|
-
};
|
5773
|
-
span2.setAttributes(
|
5774
|
-
selectTelemetryAttributes({
|
5775
|
-
telemetry,
|
5776
|
-
attributes: {
|
5777
|
-
"ai.response.finishReason": result.finishReason,
|
5778
|
-
"ai.response.text": {
|
5779
|
-
output: () => extractContentText(result.content)
|
5780
|
-
},
|
5781
|
-
"ai.response.toolCalls": {
|
5782
|
-
output: () => {
|
5783
|
-
const toolCalls = asToolCalls(result.content);
|
5784
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5785
|
-
}
|
5786
|
-
},
|
5787
|
-
"ai.response.id": responseData.id,
|
5788
|
-
"ai.response.model": responseData.modelId,
|
5789
|
-
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5790
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5791
|
-
"ai.usage.promptTokens": result.usage.inputTokens,
|
5792
|
-
"ai.usage.completionTokens": result.usage.outputTokens,
|
5793
|
-
// standardized gen-ai llm span attributes:
|
5794
|
-
"gen_ai.response.finish_reasons": [result.finishReason],
|
5795
|
-
"gen_ai.response.id": responseData.id,
|
5796
|
-
"gen_ai.response.model": responseData.modelId,
|
5797
|
-
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5798
|
-
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5799
|
-
}
|
5800
|
-
})
|
5801
|
-
);
|
5802
|
-
return { ...result, response: responseData };
|
5803
|
-
}
|
5804
|
-
});
|
5661
|
+
try {
|
5662
|
+
return await recordSpan({
|
5663
|
+
name: "ai.generateText",
|
5664
|
+
attributes: selectTelemetryAttributes({
|
5665
|
+
telemetry,
|
5666
|
+
attributes: {
|
5667
|
+
...assembleOperationName({
|
5668
|
+
operationId: "ai.generateText",
|
5669
|
+
telemetry
|
5670
|
+
}),
|
5671
|
+
...baseTelemetryAttributes,
|
5672
|
+
// model:
|
5673
|
+
"ai.model.provider": model.provider,
|
5674
|
+
"ai.model.id": model.modelId,
|
5675
|
+
// specific settings that only make sense on the outer level:
|
5676
|
+
"ai.prompt": {
|
5677
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
5805
5678
|
}
|
5806
|
-
|
5807
|
-
|
5808
|
-
|
5809
|
-
|
5810
|
-
|
5811
|
-
|
5812
|
-
|
5813
|
-
|
5814
|
-
|
5815
|
-
|
5679
|
+
}
|
5680
|
+
}),
|
5681
|
+
tracer,
|
5682
|
+
fn: async (span) => {
|
5683
|
+
var _a17, _b, _c, _d, _e;
|
5684
|
+
const callSettings2 = prepareCallSettings(settings);
|
5685
|
+
let currentModelResponse;
|
5686
|
+
let currentToolCalls = [];
|
5687
|
+
let currentToolResults = [];
|
5688
|
+
const responseMessages = [];
|
5689
|
+
const steps = [];
|
5690
|
+
do {
|
5691
|
+
const stepInputMessages = [
|
5692
|
+
...initialPrompt.messages,
|
5693
|
+
...responseMessages
|
5694
|
+
];
|
5695
|
+
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5696
|
+
model,
|
5697
|
+
steps,
|
5698
|
+
stepNumber: steps.length
|
5699
|
+
}));
|
5700
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
5701
|
+
prompt: {
|
5702
|
+
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
5816
5703
|
messages: stepInputMessages
|
5704
|
+
},
|
5705
|
+
supportedUrls: await model.supportedUrls
|
5706
|
+
});
|
5707
|
+
const stepModel = resolveLanguageModel(
|
5708
|
+
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
5709
|
+
);
|
5710
|
+
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5711
|
+
tools,
|
5712
|
+
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
5713
|
+
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
5714
|
+
});
|
5715
|
+
currentModelResponse = await retry(
|
5716
|
+
() => {
|
5717
|
+
var _a18;
|
5718
|
+
return recordSpan({
|
5719
|
+
name: "ai.generateText.doGenerate",
|
5720
|
+
attributes: selectTelemetryAttributes({
|
5721
|
+
telemetry,
|
5722
|
+
attributes: {
|
5723
|
+
...assembleOperationName({
|
5724
|
+
operationId: "ai.generateText.doGenerate",
|
5725
|
+
telemetry
|
5726
|
+
}),
|
5727
|
+
...baseTelemetryAttributes,
|
5728
|
+
// model:
|
5729
|
+
"ai.model.provider": stepModel.provider,
|
5730
|
+
"ai.model.id": stepModel.modelId,
|
5731
|
+
// prompt:
|
5732
|
+
"ai.prompt.messages": {
|
5733
|
+
input: () => stringifyForTelemetry(promptMessages)
|
5734
|
+
},
|
5735
|
+
"ai.prompt.tools": {
|
5736
|
+
// convert the language model level tools:
|
5737
|
+
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5738
|
+
},
|
5739
|
+
"ai.prompt.toolChoice": {
|
5740
|
+
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5741
|
+
},
|
5742
|
+
// standardized gen-ai llm span attributes:
|
5743
|
+
"gen_ai.system": stepModel.provider,
|
5744
|
+
"gen_ai.request.model": stepModel.modelId,
|
5745
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5746
|
+
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5747
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5748
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5749
|
+
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5750
|
+
"gen_ai.request.top_k": settings.topK,
|
5751
|
+
"gen_ai.request.top_p": settings.topP
|
5752
|
+
}
|
5753
|
+
}),
|
5754
|
+
tracer,
|
5755
|
+
fn: async (span2) => {
|
5756
|
+
var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
|
5757
|
+
const result = await stepModel.doGenerate({
|
5758
|
+
...callSettings2,
|
5759
|
+
tools: stepTools,
|
5760
|
+
toolChoice: stepToolChoice,
|
5761
|
+
responseFormat: output == null ? void 0 : output.responseFormat,
|
5762
|
+
prompt: promptMessages,
|
5763
|
+
providerOptions,
|
5764
|
+
abortSignal,
|
5765
|
+
headers
|
5766
|
+
});
|
5767
|
+
const responseData = {
|
5768
|
+
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5769
|
+
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5770
|
+
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
|
5771
|
+
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5772
|
+
body: (_h = result.response) == null ? void 0 : _h.body
|
5773
|
+
};
|
5774
|
+
span2.setAttributes(
|
5775
|
+
selectTelemetryAttributes({
|
5776
|
+
telemetry,
|
5777
|
+
attributes: {
|
5778
|
+
"ai.response.finishReason": result.finishReason,
|
5779
|
+
"ai.response.text": {
|
5780
|
+
output: () => extractContentText(result.content)
|
5781
|
+
},
|
5782
|
+
"ai.response.toolCalls": {
|
5783
|
+
output: () => {
|
5784
|
+
const toolCalls = asToolCalls(result.content);
|
5785
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5786
|
+
}
|
5787
|
+
},
|
5788
|
+
"ai.response.id": responseData.id,
|
5789
|
+
"ai.response.model": responseData.modelId,
|
5790
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5791
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5792
|
+
"ai.usage.promptTokens": result.usage.inputTokens,
|
5793
|
+
"ai.usage.completionTokens": result.usage.outputTokens,
|
5794
|
+
// standardized gen-ai llm span attributes:
|
5795
|
+
"gen_ai.response.finish_reasons": [result.finishReason],
|
5796
|
+
"gen_ai.response.id": responseData.id,
|
5797
|
+
"gen_ai.response.model": responseData.modelId,
|
5798
|
+
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5799
|
+
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5800
|
+
}
|
5801
|
+
})
|
5802
|
+
);
|
5803
|
+
return { ...result, response: responseData };
|
5804
|
+
}
|
5805
|
+
});
|
5806
|
+
}
|
5807
|
+
);
|
5808
|
+
currentToolCalls = await Promise.all(
|
5809
|
+
currentModelResponse.content.filter(
|
5810
|
+
(part) => part.type === "tool-call"
|
5811
|
+
).map(
|
5812
|
+
(toolCall) => parseToolCall({
|
5813
|
+
toolCall,
|
5814
|
+
tools,
|
5815
|
+
repairToolCall,
|
5816
|
+
system,
|
5817
|
+
messages: stepInputMessages
|
5818
|
+
})
|
5819
|
+
)
|
5820
|
+
);
|
5821
|
+
currentToolResults = tools == null ? [] : await executeTools({
|
5822
|
+
toolCalls: currentToolCalls,
|
5823
|
+
tools,
|
5824
|
+
tracer,
|
5825
|
+
telemetry,
|
5826
|
+
messages: stepInputMessages,
|
5827
|
+
abortSignal
|
5828
|
+
});
|
5829
|
+
const stepContent = asContent({
|
5830
|
+
content: currentModelResponse.content,
|
5831
|
+
toolCalls: currentToolCalls,
|
5832
|
+
toolResults: currentToolResults
|
5833
|
+
});
|
5834
|
+
responseMessages.push(
|
5835
|
+
...toResponseMessages({
|
5836
|
+
content: stepContent,
|
5837
|
+
tools: tools != null ? tools : {}
|
5817
5838
|
})
|
5818
|
-
)
|
5819
|
-
|
5820
|
-
currentToolResults = tools == null ? [] : await executeTools({
|
5821
|
-
toolCalls: currentToolCalls,
|
5822
|
-
tools,
|
5823
|
-
tracer,
|
5824
|
-
telemetry,
|
5825
|
-
messages: stepInputMessages,
|
5826
|
-
abortSignal
|
5827
|
-
});
|
5828
|
-
const stepContent = asContent({
|
5829
|
-
content: currentModelResponse.content,
|
5830
|
-
toolCalls: currentToolCalls,
|
5831
|
-
toolResults: currentToolResults
|
5832
|
-
});
|
5833
|
-
responseMessages.push(
|
5834
|
-
...toResponseMessages({
|
5839
|
+
);
|
5840
|
+
const currentStepResult = new DefaultStepResult({
|
5835
5841
|
content: stepContent,
|
5836
|
-
|
5842
|
+
finishReason: currentModelResponse.finishReason,
|
5843
|
+
usage: currentModelResponse.usage,
|
5844
|
+
warnings: currentModelResponse.warnings,
|
5845
|
+
providerMetadata: currentModelResponse.providerMetadata,
|
5846
|
+
request: (_e = currentModelResponse.request) != null ? _e : {},
|
5847
|
+
response: {
|
5848
|
+
...currentModelResponse.response,
|
5849
|
+
// deep clone msgs to avoid mutating past messages in multi-step:
|
5850
|
+
messages: structuredClone(responseMessages)
|
5851
|
+
}
|
5852
|
+
});
|
5853
|
+
steps.push(currentStepResult);
|
5854
|
+
await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
|
5855
|
+
} while (
|
5856
|
+
// there are tool calls:
|
5857
|
+
currentToolCalls.length > 0 && // all current tool calls have results:
|
5858
|
+
currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
|
5859
|
+
!await isStopConditionMet({ stopConditions, steps })
|
5860
|
+
);
|
5861
|
+
span.setAttributes(
|
5862
|
+
selectTelemetryAttributes({
|
5863
|
+
telemetry,
|
5864
|
+
attributes: {
|
5865
|
+
"ai.response.finishReason": currentModelResponse.finishReason,
|
5866
|
+
"ai.response.text": {
|
5867
|
+
output: () => extractContentText(currentModelResponse.content)
|
5868
|
+
},
|
5869
|
+
"ai.response.toolCalls": {
|
5870
|
+
output: () => {
|
5871
|
+
const toolCalls = asToolCalls(currentModelResponse.content);
|
5872
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5873
|
+
}
|
5874
|
+
},
|
5875
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5876
|
+
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
5877
|
+
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
5878
|
+
}
|
5837
5879
|
})
|
5838
5880
|
);
|
5839
|
-
const
|
5840
|
-
|
5841
|
-
|
5842
|
-
|
5843
|
-
|
5844
|
-
|
5845
|
-
|
5846
|
-
|
5847
|
-
|
5848
|
-
|
5849
|
-
|
5850
|
-
}
|
5881
|
+
const lastStep = steps[steps.length - 1];
|
5882
|
+
return new DefaultGenerateTextResult({
|
5883
|
+
steps,
|
5884
|
+
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
5885
|
+
{ text: lastStep.text },
|
5886
|
+
{
|
5887
|
+
response: lastStep.response,
|
5888
|
+
usage: lastStep.usage,
|
5889
|
+
finishReason: lastStep.finishReason
|
5890
|
+
}
|
5891
|
+
))
|
5851
5892
|
});
|
5852
|
-
|
5853
|
-
|
5854
|
-
|
5855
|
-
|
5856
|
-
|
5857
|
-
currentToolResults.length === currentToolCalls.length && // continue until the stop condition is met:
|
5858
|
-
!await continueUntil({ steps })
|
5859
|
-
);
|
5860
|
-
span.setAttributes(
|
5861
|
-
selectTelemetryAttributes({
|
5862
|
-
telemetry,
|
5863
|
-
attributes: {
|
5864
|
-
"ai.response.finishReason": currentModelResponse.finishReason,
|
5865
|
-
"ai.response.text": {
|
5866
|
-
output: () => extractContentText(currentModelResponse.content)
|
5867
|
-
},
|
5868
|
-
"ai.response.toolCalls": {
|
5869
|
-
output: () => {
|
5870
|
-
const toolCalls = asToolCalls(currentModelResponse.content);
|
5871
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5872
|
-
}
|
5873
|
-
},
|
5874
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5875
|
-
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
5876
|
-
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
5877
|
-
}
|
5878
|
-
})
|
5879
|
-
);
|
5880
|
-
const lastStep = steps[steps.length - 1];
|
5881
|
-
return new DefaultGenerateTextResult({
|
5882
|
-
steps,
|
5883
|
-
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
5884
|
-
{ text: lastStep.text },
|
5885
|
-
{
|
5886
|
-
response: lastStep.response,
|
5887
|
-
usage: lastStep.usage,
|
5888
|
-
finishReason: lastStep.finishReason
|
5889
|
-
}
|
5890
|
-
))
|
5891
|
-
});
|
5892
|
-
}
|
5893
|
-
});
|
5893
|
+
}
|
5894
|
+
});
|
5895
|
+
} catch (error) {
|
5896
|
+
throw wrapGatewayError(error);
|
5897
|
+
}
|
5894
5898
|
}
|
5895
5899
|
async function executeTools({
|
5896
5900
|
toolCalls,
|
@@ -6205,11 +6209,6 @@ function smoothStream({
|
|
6205
6209
|
// core/generate-text/stream-text.ts
|
6206
6210
|
import { createIdGenerator as createIdGenerator4 } from "@ai-sdk/provider-utils";
|
6207
6211
|
|
6208
|
-
// src/util/as-array.ts
|
6209
|
-
function asArray(value) {
|
6210
|
-
return value === void 0 ? [] : Array.isArray(value) ? value : [value];
|
6211
|
-
}
|
6212
|
-
|
6213
6212
|
// core/generate-text/run-tools-transformation.ts
|
6214
6213
|
import { generateId } from "@ai-sdk/provider-utils";
|
6215
6214
|
function runToolsTransformation({
|
@@ -6420,17 +6419,21 @@ function streamText({
|
|
6420
6419
|
maxRetries,
|
6421
6420
|
abortSignal,
|
6422
6421
|
headers,
|
6423
|
-
|
6422
|
+
stopWhen = stepCountIs(1),
|
6424
6423
|
experimental_output: output,
|
6425
6424
|
experimental_telemetry: telemetry,
|
6425
|
+
prepareStep,
|
6426
6426
|
providerOptions,
|
6427
6427
|
experimental_toolCallStreaming = false,
|
6428
6428
|
toolCallStreaming = experimental_toolCallStreaming,
|
6429
|
-
experimental_activeTools
|
6429
|
+
experimental_activeTools,
|
6430
|
+
activeTools = experimental_activeTools,
|
6430
6431
|
experimental_repairToolCall: repairToolCall,
|
6431
6432
|
experimental_transform: transform,
|
6432
6433
|
onChunk,
|
6433
|
-
onError
|
6434
|
+
onError = ({ error }) => {
|
6435
|
+
console.error(error);
|
6436
|
+
},
|
6434
6437
|
onFinish,
|
6435
6438
|
onStepFinish,
|
6436
6439
|
_internal: {
|
@@ -6441,7 +6444,7 @@ function streamText({
|
|
6441
6444
|
...settings
|
6442
6445
|
}) {
|
6443
6446
|
return new DefaultStreamTextResult({
|
6444
|
-
model,
|
6447
|
+
model: resolveLanguageModel(model),
|
6445
6448
|
telemetry,
|
6446
6449
|
headers,
|
6447
6450
|
settings,
|
@@ -6456,9 +6459,10 @@ function streamText({
|
|
6456
6459
|
transforms: asArray(transform),
|
6457
6460
|
activeTools,
|
6458
6461
|
repairToolCall,
|
6459
|
-
|
6462
|
+
stopConditions: asArray(stopWhen),
|
6460
6463
|
output,
|
6461
6464
|
providerOptions,
|
6465
|
+
prepareStep,
|
6462
6466
|
onChunk,
|
6463
6467
|
onError,
|
6464
6468
|
onFinish,
|
@@ -6533,9 +6537,10 @@ var DefaultStreamTextResult = class {
|
|
6533
6537
|
transforms,
|
6534
6538
|
activeTools,
|
6535
6539
|
repairToolCall,
|
6536
|
-
|
6540
|
+
stopConditions,
|
6537
6541
|
output,
|
6538
6542
|
providerOptions,
|
6543
|
+
prepareStep,
|
6539
6544
|
now: now2,
|
6540
6545
|
currentDate,
|
6541
6546
|
generateId: generateId3,
|
@@ -6544,18 +6549,12 @@ var DefaultStreamTextResult = class {
|
|
6544
6549
|
onFinish,
|
6545
6550
|
onStepFinish
|
6546
6551
|
}) {
|
6547
|
-
this.
|
6548
|
-
this.
|
6549
|
-
this.
|
6550
|
-
if (maxSteps2 < 1) {
|
6551
|
-
throw new InvalidArgumentError({
|
6552
|
-
parameter: "maxSteps",
|
6553
|
-
value: maxSteps2,
|
6554
|
-
message: "maxSteps must be at least 1"
|
6555
|
-
});
|
6556
|
-
}
|
6552
|
+
this._totalUsage = new DelayedPromise();
|
6553
|
+
this._finishReason = new DelayedPromise();
|
6554
|
+
this._steps = new DelayedPromise();
|
6557
6555
|
this.output = output;
|
6558
6556
|
this.generateId = generateId3;
|
6557
|
+
let stepFinish;
|
6559
6558
|
let activeReasoningPart = void 0;
|
6560
6559
|
let recordedContent = [];
|
6561
6560
|
const recordedResponseMessages = [];
|
@@ -6573,7 +6572,7 @@ var DefaultStreamTextResult = class {
|
|
6573
6572
|
await (onChunk == null ? void 0 : onChunk({ chunk: part }));
|
6574
6573
|
}
|
6575
6574
|
if (part.type === "error") {
|
6576
|
-
await
|
6575
|
+
await onError({ error: wrapGatewayError(part.error) });
|
6577
6576
|
}
|
6578
6577
|
if (part.type === "text") {
|
6579
6578
|
const latestContent = recordedContent[recordedContent.length - 1];
|
@@ -6637,6 +6636,7 @@ var DefaultStreamTextResult = class {
|
|
6637
6636
|
recordedContent = [];
|
6638
6637
|
activeReasoningPart = void 0;
|
6639
6638
|
recordedResponseMessages.push(...stepMessages);
|
6639
|
+
stepFinish.resolve();
|
6640
6640
|
}
|
6641
6641
|
if (part.type === "finish") {
|
6642
6642
|
recordedTotalUsage = part.totalUsage;
|
@@ -6654,9 +6654,9 @@ var DefaultStreamTextResult = class {
|
|
6654
6654
|
outputTokens: void 0,
|
6655
6655
|
totalTokens: void 0
|
6656
6656
|
};
|
6657
|
-
self.
|
6658
|
-
self.
|
6659
|
-
self.
|
6657
|
+
self._finishReason.resolve(finishReason);
|
6658
|
+
self._totalUsage.resolve(totalUsage);
|
6659
|
+
self._steps.resolve(recordedSteps);
|
6660
6660
|
const finalStep = recordedSteps[recordedSteps.length - 1];
|
6661
6661
|
await (onFinish == null ? void 0 : onFinish({
|
6662
6662
|
finishReason,
|
@@ -6747,8 +6747,7 @@ var DefaultStreamTextResult = class {
|
|
6747
6747
|
// specific settings that only make sense on the outer level:
|
6748
6748
|
"ai.prompt": {
|
6749
6749
|
input: () => JSON.stringify({ system, prompt, messages })
|
6750
|
-
}
|
6751
|
-
"ai.settings.maxSteps": maxSteps2
|
6750
|
+
}
|
6752
6751
|
}
|
6753
6752
|
}),
|
6754
6753
|
tracer,
|
@@ -6760,6 +6759,8 @@ var DefaultStreamTextResult = class {
|
|
6760
6759
|
responseMessages,
|
6761
6760
|
usage
|
6762
6761
|
}) {
|
6762
|
+
var _a17, _b, _c, _d;
|
6763
|
+
stepFinish = new DelayedPromise();
|
6763
6764
|
const initialPrompt = await standardizePrompt({
|
6764
6765
|
system,
|
6765
6766
|
prompt,
|
@@ -6769,16 +6770,26 @@ var DefaultStreamTextResult = class {
|
|
6769
6770
|
...initialPrompt.messages,
|
6770
6771
|
...responseMessages
|
6771
6772
|
];
|
6773
|
+
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
6774
|
+
model,
|
6775
|
+
steps: recordedSteps,
|
6776
|
+
stepNumber: recordedSteps.length
|
6777
|
+
}));
|
6772
6778
|
const promptMessages = await convertToLanguageModelPrompt({
|
6773
6779
|
prompt: {
|
6774
|
-
system: initialPrompt.system,
|
6780
|
+
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
6775
6781
|
messages: stepInputMessages
|
6776
6782
|
},
|
6777
6783
|
supportedUrls: await model.supportedUrls
|
6778
6784
|
});
|
6779
|
-
const
|
6780
|
-
|
6781
|
-
|
6785
|
+
const stepModel = resolveLanguageModel(
|
6786
|
+
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
6787
|
+
);
|
6788
|
+
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
6789
|
+
tools,
|
6790
|
+
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
6791
|
+
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
6792
|
+
});
|
6782
6793
|
const {
|
6783
6794
|
result: { stream: stream2, response, request },
|
6784
6795
|
doStreamSpan,
|
@@ -6794,24 +6805,23 @@ var DefaultStreamTextResult = class {
|
|
6794
6805
|
telemetry
|
6795
6806
|
}),
|
6796
6807
|
...baseTelemetryAttributes,
|
6808
|
+
// model:
|
6809
|
+
"ai.model.provider": stepModel.provider,
|
6810
|
+
"ai.model.id": stepModel.modelId,
|
6811
|
+
// prompt:
|
6797
6812
|
"ai.prompt.messages": {
|
6798
|
-
input: () =>
|
6813
|
+
input: () => stringifyForTelemetry(promptMessages)
|
6799
6814
|
},
|
6800
6815
|
"ai.prompt.tools": {
|
6801
6816
|
// convert the language model level tools:
|
6802
|
-
input: () =>
|
6803
|
-
var _a17;
|
6804
|
-
return (_a17 = toolsAndToolChoice.tools) == null ? void 0 : _a17.map(
|
6805
|
-
(tool2) => JSON.stringify(tool2)
|
6806
|
-
);
|
6807
|
-
}
|
6817
|
+
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
6808
6818
|
},
|
6809
6819
|
"ai.prompt.toolChoice": {
|
6810
|
-
input: () =>
|
6820
|
+
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
6811
6821
|
},
|
6812
6822
|
// standardized gen-ai llm span attributes:
|
6813
|
-
"gen_ai.system":
|
6814
|
-
"gen_ai.request.model":
|
6823
|
+
"gen_ai.system": stepModel.provider,
|
6824
|
+
"gen_ai.request.model": stepModel.modelId,
|
6815
6825
|
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
6816
6826
|
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
6817
6827
|
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
@@ -6828,9 +6838,10 @@ var DefaultStreamTextResult = class {
|
|
6828
6838
|
startTimestampMs: now2(),
|
6829
6839
|
// get before the call
|
6830
6840
|
doStreamSpan: doStreamSpan2,
|
6831
|
-
result: await
|
6841
|
+
result: await stepModel.doStream({
|
6832
6842
|
...callSettings,
|
6833
|
-
|
6843
|
+
tools: stepTools,
|
6844
|
+
toolChoice: stepToolChoice,
|
6834
6845
|
responseFormat: output == null ? void 0 : output.responseFormat,
|
6835
6846
|
prompt: promptMessages,
|
6836
6847
|
providerOptions,
|
@@ -6841,7 +6852,7 @@ var DefaultStreamTextResult = class {
|
|
6841
6852
|
}
|
6842
6853
|
})
|
6843
6854
|
);
|
6844
|
-
const
|
6855
|
+
const streamWithToolResults = runToolsTransformation({
|
6845
6856
|
tools,
|
6846
6857
|
generatorStream: stream2,
|
6847
6858
|
toolCallStreaming,
|
@@ -6880,10 +6891,10 @@ var DefaultStreamTextResult = class {
|
|
6880
6891
|
stepText += chunk.text;
|
6881
6892
|
}
|
6882
6893
|
self.addStream(
|
6883
|
-
|
6894
|
+
streamWithToolResults.pipeThrough(
|
6884
6895
|
new TransformStream({
|
6885
6896
|
async transform(chunk, controller) {
|
6886
|
-
var
|
6897
|
+
var _a18, _b2, _c2, _d2;
|
6887
6898
|
if (chunk.type === "stream-start") {
|
6888
6899
|
warnings = chunk.warnings;
|
6889
6900
|
return;
|
@@ -6946,9 +6957,9 @@ var DefaultStreamTextResult = class {
|
|
6946
6957
|
}
|
6947
6958
|
case "response-metadata": {
|
6948
6959
|
stepResponse = {
|
6949
|
-
id: (
|
6950
|
-
timestamp: (
|
6951
|
-
modelId: (
|
6960
|
+
id: (_a18 = chunk.id) != null ? _a18 : stepResponse.id,
|
6961
|
+
timestamp: (_b2 = chunk.timestamp) != null ? _b2 : stepResponse.timestamp,
|
6962
|
+
modelId: (_c2 = chunk.modelId) != null ? _c2 : stepResponse.modelId
|
6952
6963
|
};
|
6953
6964
|
break;
|
6954
6965
|
}
|
@@ -6960,7 +6971,7 @@ var DefaultStreamTextResult = class {
|
|
6960
6971
|
doStreamSpan.addEvent("ai.stream.finish");
|
6961
6972
|
doStreamSpan.setAttributes({
|
6962
6973
|
"ai.response.msToFinish": msToFinish,
|
6963
|
-
"ai.response.avgOutputTokensPerSecond": 1e3 * ((
|
6974
|
+
"ai.response.avgOutputTokensPerSecond": 1e3 * ((_d2 = stepUsage.outputTokens) != null ? _d2 : 0) / msToFinish
|
6964
6975
|
});
|
6965
6976
|
break;
|
6966
6977
|
}
|
@@ -7035,9 +7046,13 @@ var DefaultStreamTextResult = class {
|
|
7035
7046
|
}
|
7036
7047
|
});
|
7037
7048
|
const combinedUsage = addLanguageModelUsage(usage, stepUsage);
|
7038
|
-
|
7039
|
-
stepToolCalls.length > 0 && // all current tool calls have results:
|
7040
|
-
stepToolResults.length === stepToolCalls.length
|
7049
|
+
await stepFinish.promise;
|
7050
|
+
if (stepToolCalls.length > 0 && // all current tool calls have results:
|
7051
|
+
stepToolResults.length === stepToolCalls.length && // continue until a stop condition is met:
|
7052
|
+
!await isStopConditionMet({
|
7053
|
+
stopConditions,
|
7054
|
+
steps: recordedSteps
|
7055
|
+
})) {
|
7041
7056
|
responseMessages.push(
|
7042
7057
|
...toResponseMessages({
|
7043
7058
|
content: stepContent,
|
@@ -7085,7 +7100,7 @@ var DefaultStreamTextResult = class {
|
|
7085
7100
|
});
|
7086
7101
|
}
|
7087
7102
|
get steps() {
|
7088
|
-
return this.
|
7103
|
+
return this._steps.promise;
|
7089
7104
|
}
|
7090
7105
|
get finalStep() {
|
7091
7106
|
return this.steps.then((steps) => steps[steps.length - 1]);
|
@@ -7130,10 +7145,10 @@ var DefaultStreamTextResult = class {
|
|
7130
7145
|
return this.finalStep.then((step) => step.response);
|
7131
7146
|
}
|
7132
7147
|
get totalUsage() {
|
7133
|
-
return this.
|
7148
|
+
return this._totalUsage.promise;
|
7134
7149
|
}
|
7135
7150
|
get finishReason() {
|
7136
|
-
return this.
|
7151
|
+
return this._finishReason.promise;
|
7137
7152
|
}
|
7138
7153
|
/**
|
7139
7154
|
Split out a new stream from the original stream.
|
@@ -7206,8 +7221,8 @@ var DefaultStreamTextResult = class {
|
|
7206
7221
|
messageMetadata,
|
7207
7222
|
sendReasoning = false,
|
7208
7223
|
sendSources = false,
|
7209
|
-
|
7210
|
-
|
7224
|
+
sendStart = true,
|
7225
|
+
sendFinish = true,
|
7211
7226
|
onError = () => "An error occurred."
|
7212
7227
|
// mask error messages for safety by default
|
7213
7228
|
} = {}) {
|
@@ -7253,9 +7268,8 @@ var DefaultStreamTextResult = class {
|
|
7253
7268
|
case "source": {
|
7254
7269
|
if (sendSources) {
|
7255
7270
|
controller.enqueue({
|
7256
|
-
type: "source",
|
7257
|
-
|
7258
|
-
id: part.id,
|
7271
|
+
type: "source-url",
|
7272
|
+
sourceId: part.id,
|
7259
7273
|
url: part.url,
|
7260
7274
|
title: part.title,
|
7261
7275
|
providerMetadata: part.providerMetadata
|
@@ -7320,7 +7334,7 @@ var DefaultStreamTextResult = class {
|
|
7320
7334
|
break;
|
7321
7335
|
}
|
7322
7336
|
case "start": {
|
7323
|
-
if (
|
7337
|
+
if (sendStart) {
|
7324
7338
|
const metadata = messageMetadata == null ? void 0 : messageMetadata({ part });
|
7325
7339
|
controller.enqueue({
|
7326
7340
|
type: "start",
|
@@ -7331,7 +7345,7 @@ var DefaultStreamTextResult = class {
|
|
7331
7345
|
break;
|
7332
7346
|
}
|
7333
7347
|
case "finish": {
|
7334
|
-
if (
|
7348
|
+
if (sendFinish) {
|
7335
7349
|
const metadata = messageMetadata == null ? void 0 : messageMetadata({ part });
|
7336
7350
|
controller.enqueue({
|
7337
7351
|
type: "finish",
|
@@ -7348,38 +7362,12 @@ var DefaultStreamTextResult = class {
|
|
7348
7362
|
}
|
7349
7363
|
})
|
7350
7364
|
);
|
7351
|
-
|
7352
|
-
return baseStream;
|
7353
|
-
}
|
7354
|
-
const state = createStreamingUIMessageState({
|
7355
|
-
lastMessage,
|
7356
|
-
newMessageId: messageId != null ? messageId : this.generateId()
|
7357
|
-
});
|
7358
|
-
const runUpdateMessageJob = async (job) => {
|
7359
|
-
await job({ state, write: () => {
|
7360
|
-
} });
|
7361
|
-
};
|
7362
|
-
return processUIMessageStream({
|
7365
|
+
return handleUIMessageStreamFinish({
|
7363
7366
|
stream: baseStream,
|
7364
|
-
|
7365
|
-
|
7366
|
-
|
7367
|
-
|
7368
|
-
controller.enqueue(chunk);
|
7369
|
-
},
|
7370
|
-
flush() {
|
7371
|
-
const isContinuation2 = state.message.id === (lastMessage == null ? void 0 : lastMessage.id);
|
7372
|
-
onFinish({
|
7373
|
-
isContinuation: isContinuation2,
|
7374
|
-
responseMessage: state.message,
|
7375
|
-
messages: [
|
7376
|
-
...isContinuation2 ? originalMessages.slice(0, -1) : originalMessages,
|
7377
|
-
state.message
|
7378
|
-
]
|
7379
|
-
});
|
7380
|
-
}
|
7381
|
-
})
|
7382
|
-
);
|
7367
|
+
newMessageId: messageId != null ? messageId : this.generateId(),
|
7368
|
+
originalMessages,
|
7369
|
+
onFinish
|
7370
|
+
});
|
7383
7371
|
}
|
7384
7372
|
pipeUIMessageStreamToResponse(response, {
|
7385
7373
|
newMessageId,
|
@@ -7388,8 +7376,8 @@ var DefaultStreamTextResult = class {
|
|
7388
7376
|
messageMetadata,
|
7389
7377
|
sendReasoning,
|
7390
7378
|
sendSources,
|
7391
|
-
|
7392
|
-
|
7379
|
+
sendFinish,
|
7380
|
+
sendStart,
|
7393
7381
|
onError,
|
7394
7382
|
...init
|
7395
7383
|
} = {}) {
|
@@ -7402,8 +7390,8 @@ var DefaultStreamTextResult = class {
|
|
7402
7390
|
messageMetadata,
|
7403
7391
|
sendReasoning,
|
7404
7392
|
sendSources,
|
7405
|
-
|
7406
|
-
|
7393
|
+
sendFinish,
|
7394
|
+
sendStart,
|
7407
7395
|
onError
|
7408
7396
|
}),
|
7409
7397
|
...init
|
@@ -7423,8 +7411,8 @@ var DefaultStreamTextResult = class {
|
|
7423
7411
|
messageMetadata,
|
7424
7412
|
sendReasoning,
|
7425
7413
|
sendSources,
|
7426
|
-
|
7427
|
-
|
7414
|
+
sendFinish,
|
7415
|
+
sendStart,
|
7428
7416
|
onError,
|
7429
7417
|
...init
|
7430
7418
|
} = {}) {
|
@@ -7436,8 +7424,8 @@ var DefaultStreamTextResult = class {
|
|
7436
7424
|
messageMetadata,
|
7437
7425
|
sendReasoning,
|
7438
7426
|
sendSources,
|
7439
|
-
|
7440
|
-
|
7427
|
+
sendFinish,
|
7428
|
+
sendStart,
|
7441
7429
|
onError
|
7442
7430
|
}),
|
7443
7431
|
...init
|
@@ -7680,7 +7668,9 @@ var doWrap = ({
|
|
7680
7668
|
};
|
7681
7669
|
|
7682
7670
|
// core/registry/custom-provider.ts
|
7683
|
-
import {
|
7671
|
+
import {
|
7672
|
+
NoSuchModelError as NoSuchModelError2
|
7673
|
+
} from "@ai-sdk/provider";
|
7684
7674
|
function customProvider({
|
7685
7675
|
languageModels,
|
7686
7676
|
textEmbeddingModels,
|
@@ -7720,7 +7710,7 @@ function customProvider({
|
|
7720
7710
|
var experimental_customProvider = customProvider;
|
7721
7711
|
|
7722
7712
|
// core/registry/no-such-provider-error.ts
|
7723
|
-
import { AISDKError as
|
7713
|
+
import { AISDKError as AISDKError20, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
|
7724
7714
|
var name16 = "AI_NoSuchProviderError";
|
7725
7715
|
var marker16 = `vercel.ai.error.${name16}`;
|
7726
7716
|
var symbol16 = Symbol.for(marker16);
|
@@ -7739,13 +7729,15 @@ var NoSuchProviderError = class extends NoSuchModelError3 {
|
|
7739
7729
|
this.availableProviders = availableProviders;
|
7740
7730
|
}
|
7741
7731
|
static isInstance(error) {
|
7742
|
-
return
|
7732
|
+
return AISDKError20.hasMarker(error, marker16);
|
7743
7733
|
}
|
7744
7734
|
};
|
7745
7735
|
_a16 = symbol16;
|
7746
7736
|
|
7747
7737
|
// core/registry/provider-registry.ts
|
7748
|
-
import {
|
7738
|
+
import {
|
7739
|
+
NoSuchModelError as NoSuchModelError4
|
7740
|
+
} from "@ai-sdk/provider";
|
7749
7741
|
function createProviderRegistry(providers, {
|
7750
7742
|
separator = ":"
|
7751
7743
|
} = {}) {
|
@@ -8394,8 +8386,8 @@ var MCPClient = class {
|
|
8394
8386
|
};
|
8395
8387
|
|
8396
8388
|
// src/error/no-transcript-generated-error.ts
|
8397
|
-
import { AISDKError as
|
8398
|
-
var NoTranscriptGeneratedError = class extends
|
8389
|
+
import { AISDKError as AISDKError21 } from "@ai-sdk/provider";
|
8390
|
+
var NoTranscriptGeneratedError = class extends AISDKError21 {
|
8399
8391
|
constructor(options) {
|
8400
8392
|
super({
|
8401
8393
|
name: "AI_NoTranscriptGeneratedError",
|
@@ -8459,10 +8451,11 @@ var DefaultTranscriptionResult = class {
|
|
8459
8451
|
export {
|
8460
8452
|
AISDKError16 as AISDKError,
|
8461
8453
|
APICallError,
|
8462
|
-
|
8454
|
+
AbstractChat,
|
8463
8455
|
DefaultChatTransport,
|
8464
8456
|
DownloadError,
|
8465
8457
|
EmptyResponseBodyError,
|
8458
|
+
GLOBAL_DEFAULT_PROVIDER,
|
8466
8459
|
InvalidArgumentError,
|
8467
8460
|
InvalidDataContentError,
|
8468
8461
|
InvalidMessageRoleError,
|
@@ -8484,14 +8477,14 @@ export {
|
|
8484
8477
|
NoSuchToolError,
|
8485
8478
|
output_exports as Output,
|
8486
8479
|
RetryError,
|
8480
|
+
SerialJobExecutor,
|
8481
|
+
TextStreamChatTransport,
|
8487
8482
|
ToolCallRepairError,
|
8488
8483
|
ToolExecutionError,
|
8489
8484
|
TypeValidationError,
|
8490
8485
|
UnsupportedFunctionalityError,
|
8491
|
-
appendClientMessage,
|
8492
8486
|
asSchema5 as asSchema,
|
8493
8487
|
assistantModelMessageSchema,
|
8494
|
-
callChatApi,
|
8495
8488
|
callCompletionApi,
|
8496
8489
|
convertFileListToFileUIParts,
|
8497
8490
|
convertToCoreMessages,
|
@@ -8508,7 +8501,6 @@ export {
|
|
8508
8501
|
createUIMessageStream,
|
8509
8502
|
createUIMessageStreamResponse,
|
8510
8503
|
customProvider,
|
8511
|
-
defaultChatStore,
|
8512
8504
|
defaultSettingsMiddleware,
|
8513
8505
|
embed,
|
8514
8506
|
embedMany,
|
@@ -8518,7 +8510,6 @@ export {
|
|
8518
8510
|
generateImage as experimental_generateImage,
|
8519
8511
|
generateSpeech as experimental_generateSpeech,
|
8520
8512
|
transcribe as experimental_transcribe,
|
8521
|
-
extractMaxToolInvocationStep,
|
8522
8513
|
extractReasoningMiddleware,
|
8523
8514
|
generateId2 as generateId,
|
8524
8515
|
generateObject,
|
@@ -8526,24 +8517,21 @@ export {
|
|
8526
8517
|
getTextFromDataUrl,
|
8527
8518
|
getToolInvocations,
|
8528
8519
|
hasToolCall,
|
8529
|
-
isAssistantMessageWithCompletedToolCalls,
|
8530
8520
|
isDeepEqualData,
|
8531
8521
|
jsonSchema2 as jsonSchema,
|
8532
|
-
maxSteps,
|
8533
8522
|
modelMessageSchema,
|
8534
8523
|
parsePartialJson,
|
8535
8524
|
pipeTextStreamToResponse,
|
8536
8525
|
pipeUIMessageStreamToResponse,
|
8537
|
-
shouldResubmitMessages,
|
8538
8526
|
simulateReadableStream,
|
8539
8527
|
simulateStreamingMiddleware,
|
8540
8528
|
smoothStream,
|
8529
|
+
stepCountIs,
|
8541
8530
|
streamObject,
|
8542
8531
|
streamText,
|
8543
8532
|
systemModelMessageSchema,
|
8544
8533
|
tool,
|
8545
8534
|
toolModelMessageSchema,
|
8546
|
-
updateToolCallResult,
|
8547
8535
|
userModelMessageSchema,
|
8548
8536
|
wrapLanguageModel
|
8549
8537
|
};
|