ai 5.0.0-alpha.6 → 5.0.0-alpha.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +44 -0
- package/dist/index.d.mts +282 -437
- package/dist/index.d.ts +282 -437
- package/dist/index.js +957 -1047
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +935 -1015
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +3 -2
- package/dist/internal/index.d.ts +3 -2
- package/dist/mcp-stdio/index.js.map +1 -1
- package/dist/mcp-stdio/index.mjs.map +1 -1
- package/package.json +6 -6
package/dist/index.mjs
CHANGED
@@ -449,21 +449,8 @@ function pipeTextStreamToResponse({
|
|
449
449
|
});
|
450
450
|
}
|
451
451
|
|
452
|
-
// src/ui/
|
453
|
-
|
454
|
-
messages,
|
455
|
-
message
|
456
|
-
}) {
|
457
|
-
return [
|
458
|
-
...messages.length > 0 && messages[messages.length - 1].id === message.id ? messages.slice(0, -1) : messages,
|
459
|
-
message
|
460
|
-
];
|
461
|
-
}
|
462
|
-
|
463
|
-
// src/ui/call-chat-api.ts
|
464
|
-
import {
|
465
|
-
parseJsonEventStream
|
466
|
-
} from "@ai-sdk/provider-utils";
|
452
|
+
// src/ui/call-completion-api.ts
|
453
|
+
import { parseJsonEventStream } from "@ai-sdk/provider-utils";
|
467
454
|
|
468
455
|
// src/ui-message-stream/ui-message-stream-parts.ts
|
469
456
|
import { z } from "zod";
|
@@ -569,6 +556,138 @@ async function consumeStream({
|
|
569
556
|
}
|
570
557
|
}
|
571
558
|
|
559
|
+
// src/ui/process-text-stream.ts
|
560
|
+
async function processTextStream({
|
561
|
+
stream,
|
562
|
+
onTextPart
|
563
|
+
}) {
|
564
|
+
const reader = stream.pipeThrough(new TextDecoderStream()).getReader();
|
565
|
+
while (true) {
|
566
|
+
const { done, value } = await reader.read();
|
567
|
+
if (done) {
|
568
|
+
break;
|
569
|
+
}
|
570
|
+
await onTextPart(value);
|
571
|
+
}
|
572
|
+
}
|
573
|
+
|
574
|
+
// src/ui/call-completion-api.ts
|
575
|
+
var getOriginalFetch = () => fetch;
|
576
|
+
async function callCompletionApi({
|
577
|
+
api,
|
578
|
+
prompt,
|
579
|
+
credentials,
|
580
|
+
headers,
|
581
|
+
body,
|
582
|
+
streamProtocol = "data",
|
583
|
+
setCompletion,
|
584
|
+
setLoading,
|
585
|
+
setError,
|
586
|
+
setAbortController,
|
587
|
+
onFinish,
|
588
|
+
onError,
|
589
|
+
fetch: fetch2 = getOriginalFetch()
|
590
|
+
}) {
|
591
|
+
var _a17;
|
592
|
+
try {
|
593
|
+
setLoading(true);
|
594
|
+
setError(void 0);
|
595
|
+
const abortController = new AbortController();
|
596
|
+
setAbortController(abortController);
|
597
|
+
setCompletion("");
|
598
|
+
const response = await fetch2(api, {
|
599
|
+
method: "POST",
|
600
|
+
body: JSON.stringify({
|
601
|
+
prompt,
|
602
|
+
...body
|
603
|
+
}),
|
604
|
+
credentials,
|
605
|
+
headers: {
|
606
|
+
"Content-Type": "application/json",
|
607
|
+
...headers
|
608
|
+
},
|
609
|
+
signal: abortController.signal
|
610
|
+
}).catch((err) => {
|
611
|
+
throw err;
|
612
|
+
});
|
613
|
+
if (!response.ok) {
|
614
|
+
throw new Error(
|
615
|
+
(_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
|
616
|
+
);
|
617
|
+
}
|
618
|
+
if (!response.body) {
|
619
|
+
throw new Error("The response body is empty.");
|
620
|
+
}
|
621
|
+
let result = "";
|
622
|
+
switch (streamProtocol) {
|
623
|
+
case "text": {
|
624
|
+
await processTextStream({
|
625
|
+
stream: response.body,
|
626
|
+
onTextPart: (chunk) => {
|
627
|
+
result += chunk;
|
628
|
+
setCompletion(result);
|
629
|
+
}
|
630
|
+
});
|
631
|
+
break;
|
632
|
+
}
|
633
|
+
case "data": {
|
634
|
+
await consumeStream({
|
635
|
+
stream: parseJsonEventStream({
|
636
|
+
stream: response.body,
|
637
|
+
schema: uiMessageStreamPartSchema
|
638
|
+
}).pipeThrough(
|
639
|
+
new TransformStream({
|
640
|
+
async transform(part) {
|
641
|
+
if (!part.success) {
|
642
|
+
throw part.error;
|
643
|
+
}
|
644
|
+
const streamPart = part.value;
|
645
|
+
if (streamPart.type === "text") {
|
646
|
+
result += streamPart.text;
|
647
|
+
setCompletion(result);
|
648
|
+
} else if (streamPart.type === "error") {
|
649
|
+
throw new Error(streamPart.errorText);
|
650
|
+
}
|
651
|
+
}
|
652
|
+
})
|
653
|
+
),
|
654
|
+
onError: (error) => {
|
655
|
+
throw error;
|
656
|
+
}
|
657
|
+
});
|
658
|
+
break;
|
659
|
+
}
|
660
|
+
default: {
|
661
|
+
const exhaustiveCheck = streamProtocol;
|
662
|
+
throw new Error(`Unknown stream protocol: ${exhaustiveCheck}`);
|
663
|
+
}
|
664
|
+
}
|
665
|
+
if (onFinish) {
|
666
|
+
onFinish(prompt, result);
|
667
|
+
}
|
668
|
+
setAbortController(null);
|
669
|
+
return result;
|
670
|
+
} catch (err) {
|
671
|
+
if (err.name === "AbortError") {
|
672
|
+
setAbortController(null);
|
673
|
+
return null;
|
674
|
+
}
|
675
|
+
if (err instanceof Error) {
|
676
|
+
if (onError) {
|
677
|
+
onError(err);
|
678
|
+
}
|
679
|
+
}
|
680
|
+
setError(err);
|
681
|
+
} finally {
|
682
|
+
setLoading(false);
|
683
|
+
}
|
684
|
+
}
|
685
|
+
|
686
|
+
// src/ui/chat-store.ts
|
687
|
+
import {
|
688
|
+
generateId as generateIdFunc
|
689
|
+
} from "@ai-sdk/provider-utils";
|
690
|
+
|
572
691
|
// src/ui/process-ui-message-stream.ts
|
573
692
|
import {
|
574
693
|
validateTypes
|
@@ -944,14 +1063,6 @@ async function parsePartialJson(jsonText) {
|
|
944
1063
|
return { value: void 0, state: "failed-parse" };
|
945
1064
|
}
|
946
1065
|
|
947
|
-
// src/ui/extract-max-tool-invocation-step.ts
|
948
|
-
function extractMaxToolInvocationStep(toolInvocations) {
|
949
|
-
return toolInvocations == null ? void 0 : toolInvocations.reduce((max, toolInvocation) => {
|
950
|
-
var _a17;
|
951
|
-
return Math.max(max, (_a17 = toolInvocation.step) != null ? _a17 : 0);
|
952
|
-
}, 0);
|
953
|
-
}
|
954
|
-
|
955
1066
|
// src/ui/get-tool-invocations.ts
|
956
1067
|
function getToolInvocations(message) {
|
957
1068
|
return message.parts.filter(
|
@@ -964,9 +1075,7 @@ function createStreamingUIMessageState({
|
|
964
1075
|
lastMessage,
|
965
1076
|
newMessageId = ""
|
966
1077
|
} = {}) {
|
967
|
-
var _a17;
|
968
1078
|
const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
|
969
|
-
const step = isContinuation ? 1 + ((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) : 0;
|
970
1079
|
const message = isContinuation ? lastMessage : {
|
971
1080
|
id: newMessageId,
|
972
1081
|
metadata: {},
|
@@ -977,8 +1086,7 @@ function createStreamingUIMessageState({
|
|
977
1086
|
message,
|
978
1087
|
activeTextPart: void 0,
|
979
1088
|
activeReasoningPart: void 0,
|
980
|
-
partialToolCalls: {}
|
981
|
-
step
|
1089
|
+
partialToolCalls: {}
|
982
1090
|
};
|
983
1091
|
}
|
984
1092
|
function processUIMessageStream({
|
@@ -1076,13 +1184,11 @@ function processUIMessageStream({
|
|
1076
1184
|
const toolInvocations = getToolInvocations(state.message);
|
1077
1185
|
state.partialToolCalls[part.toolCallId] = {
|
1078
1186
|
text: "",
|
1079
|
-
step: state.step,
|
1080
1187
|
toolName: part.toolName,
|
1081
1188
|
index: toolInvocations.length
|
1082
1189
|
};
|
1083
1190
|
updateToolInvocationPart(part.toolCallId, {
|
1084
1191
|
state: "partial-call",
|
1085
|
-
step: state.step,
|
1086
1192
|
toolCallId: part.toolCallId,
|
1087
1193
|
toolName: part.toolName,
|
1088
1194
|
args: void 0
|
@@ -1098,7 +1204,6 @@ function processUIMessageStream({
|
|
1098
1204
|
);
|
1099
1205
|
updateToolInvocationPart(part.toolCallId, {
|
1100
1206
|
state: "partial-call",
|
1101
|
-
step: partialToolCall.step,
|
1102
1207
|
toolCallId: part.toolCallId,
|
1103
1208
|
toolName: partialToolCall.toolName,
|
1104
1209
|
args: partialArgs
|
@@ -1109,7 +1214,6 @@ function processUIMessageStream({
|
|
1109
1214
|
case "tool-call": {
|
1110
1215
|
updateToolInvocationPart(part.toolCallId, {
|
1111
1216
|
state: "call",
|
1112
|
-
step: state.step,
|
1113
1217
|
toolCallId: part.toolCallId,
|
1114
1218
|
toolName: part.toolName,
|
1115
1219
|
args: part.args
|
@@ -1122,7 +1226,6 @@ function processUIMessageStream({
|
|
1122
1226
|
if (result != null) {
|
1123
1227
|
updateToolInvocationPart(part.toolCallId, {
|
1124
1228
|
state: "result",
|
1125
|
-
step: state.step,
|
1126
1229
|
toolCallId: part.toolCallId,
|
1127
1230
|
toolName: part.toolName,
|
1128
1231
|
args: part.args,
|
@@ -1161,7 +1264,6 @@ function processUIMessageStream({
|
|
1161
1264
|
break;
|
1162
1265
|
}
|
1163
1266
|
case "finish-step": {
|
1164
|
-
state.step += 1;
|
1165
1267
|
state.activeTextPart = void 0;
|
1166
1268
|
state.activeReasoningPart = void 0;
|
1167
1269
|
await updateMessageMetadata(part.metadata);
|
@@ -1224,408 +1326,64 @@ function isObject(value) {
|
|
1224
1326
|
return typeof value === "object" && value !== null;
|
1225
1327
|
}
|
1226
1328
|
|
1227
|
-
// src/ui/
|
1228
|
-
function
|
1229
|
-
|
1329
|
+
// src/ui/should-resubmit-messages.ts
|
1330
|
+
function shouldResubmitMessages({
|
1331
|
+
originalMaxToolInvocationStep,
|
1332
|
+
originalMessageCount,
|
1333
|
+
maxSteps,
|
1334
|
+
messages
|
1230
1335
|
}) {
|
1231
|
-
|
1232
|
-
|
1233
|
-
|
1234
|
-
|
1235
|
-
|
1236
|
-
|
1237
|
-
|
1238
|
-
|
1239
|
-
|
1240
|
-
|
1241
|
-
|
1242
|
-
controller.enqueue({ type: "finish" });
|
1243
|
-
}
|
1244
|
-
})
|
1336
|
+
const lastMessage = messages[messages.length - 1];
|
1337
|
+
const lastMessageStepStartCount = lastMessage.parts.filter(
|
1338
|
+
(part) => part.type === "step-start"
|
1339
|
+
).length;
|
1340
|
+
return (
|
1341
|
+
// check if the feature is enabled:
|
1342
|
+
maxSteps > 1 && // ensure there is a last message:
|
1343
|
+
lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
|
1344
|
+
(messages.length > originalMessageCount || lastMessageStepStartCount !== originalMaxToolInvocationStep) && // check that next step is possible:
|
1345
|
+
isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
|
1346
|
+
lastMessageStepStartCount < maxSteps
|
1245
1347
|
);
|
1246
1348
|
}
|
1349
|
+
function isAssistantMessageWithCompletedToolCalls(message) {
|
1350
|
+
if (message.role !== "assistant") {
|
1351
|
+
return false;
|
1352
|
+
}
|
1353
|
+
const lastStepStartIndex = message.parts.reduce((lastIndex, part, index) => {
|
1354
|
+
return part.type === "step-start" ? index : lastIndex;
|
1355
|
+
}, -1);
|
1356
|
+
const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter((part) => part.type === "tool-invocation");
|
1357
|
+
return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every((part) => "result" in part.toolInvocation);
|
1358
|
+
}
|
1247
1359
|
|
1248
|
-
// src/ui/
|
1249
|
-
var
|
1250
|
-
|
1251
|
-
|
1252
|
-
|
1253
|
-
|
1254
|
-
|
1255
|
-
|
1256
|
-
|
1257
|
-
|
1258
|
-
}) {
|
1259
|
-
|
1260
|
-
|
1261
|
-
|
1262
|
-
|
1263
|
-
|
1264
|
-
|
1265
|
-
},
|
1266
|
-
signal: (_a17 = abortController == null ? void 0 : abortController()) == null ? void 0 : _a17.signal,
|
1267
|
-
credentials
|
1268
|
-
}) : await fetch2(api, {
|
1269
|
-
method: "POST",
|
1270
|
-
body: JSON.stringify(body),
|
1271
|
-
headers: {
|
1272
|
-
"Content-Type": "application/json",
|
1273
|
-
...headers
|
1274
|
-
},
|
1275
|
-
signal: (_b = abortController == null ? void 0 : abortController()) == null ? void 0 : _b.signal,
|
1276
|
-
credentials
|
1277
|
-
});
|
1278
|
-
if (!response.ok) {
|
1279
|
-
throw new Error(
|
1280
|
-
(_c = await response.text()) != null ? _c : "Failed to fetch the chat response."
|
1360
|
+
// src/ui/chat-store.ts
|
1361
|
+
var ChatStore = class {
|
1362
|
+
constructor({
|
1363
|
+
chats = {},
|
1364
|
+
generateId: generateId3,
|
1365
|
+
transport,
|
1366
|
+
maxSteps = 1,
|
1367
|
+
messageMetadataSchema,
|
1368
|
+
dataPartSchemas,
|
1369
|
+
createChat
|
1370
|
+
}) {
|
1371
|
+
this.createChat = createChat;
|
1372
|
+
this.chats = new Map(
|
1373
|
+
Object.entries(chats).map(([id, chat]) => [
|
1374
|
+
id,
|
1375
|
+
this.createChat({ messages: chat.messages })
|
1376
|
+
])
|
1281
1377
|
);
|
1378
|
+
this.maxSteps = maxSteps;
|
1379
|
+
this.transport = transport;
|
1380
|
+
this.subscribers = /* @__PURE__ */ new Set();
|
1381
|
+
this.generateId = generateId3 != null ? generateId3 : generateIdFunc;
|
1382
|
+
this.messageMetadataSchema = messageMetadataSchema;
|
1383
|
+
this.dataPartSchemas = dataPartSchemas;
|
1282
1384
|
}
|
1283
|
-
|
1284
|
-
|
1285
|
-
}
|
1286
|
-
return parseJsonEventStream({
|
1287
|
-
stream: response.body,
|
1288
|
-
schema: uiMessageStreamPartSchema
|
1289
|
-
}).pipeThrough(
|
1290
|
-
new TransformStream({
|
1291
|
-
async transform(part, controller) {
|
1292
|
-
if (!part.success) {
|
1293
|
-
throw part.error;
|
1294
|
-
}
|
1295
|
-
controller.enqueue(part.value);
|
1296
|
-
}
|
1297
|
-
})
|
1298
|
-
);
|
1299
|
-
}
|
1300
|
-
async function fetchTextStream({
|
1301
|
-
api,
|
1302
|
-
body,
|
1303
|
-
credentials,
|
1304
|
-
headers,
|
1305
|
-
abortController,
|
1306
|
-
fetch: fetch2 = getOriginalFetch(),
|
1307
|
-
requestType = "generate"
|
1308
|
-
}) {
|
1309
|
-
var _a17, _b, _c;
|
1310
|
-
const response = requestType === "resume" ? await fetch2(`${api}?chatId=${body.chatId}`, {
|
1311
|
-
method: "GET",
|
1312
|
-
headers: {
|
1313
|
-
"Content-Type": "application/json",
|
1314
|
-
...headers
|
1315
|
-
},
|
1316
|
-
signal: (_a17 = abortController == null ? void 0 : abortController()) == null ? void 0 : _a17.signal,
|
1317
|
-
credentials
|
1318
|
-
}) : await fetch2(api, {
|
1319
|
-
method: "POST",
|
1320
|
-
body: JSON.stringify(body),
|
1321
|
-
headers: {
|
1322
|
-
"Content-Type": "application/json",
|
1323
|
-
...headers
|
1324
|
-
},
|
1325
|
-
signal: (_b = abortController == null ? void 0 : abortController()) == null ? void 0 : _b.signal,
|
1326
|
-
credentials
|
1327
|
-
});
|
1328
|
-
if (!response.ok) {
|
1329
|
-
throw new Error(
|
1330
|
-
(_c = await response.text()) != null ? _c : "Failed to fetch the chat response."
|
1331
|
-
);
|
1332
|
-
}
|
1333
|
-
if (!response.body) {
|
1334
|
-
throw new Error("The response body is empty.");
|
1335
|
-
}
|
1336
|
-
return transformTextToUiMessageStream({
|
1337
|
-
stream: response.body.pipeThrough(new TextDecoderStream())
|
1338
|
-
});
|
1339
|
-
}
|
1340
|
-
async function consumeUIMessageStream({
|
1341
|
-
stream,
|
1342
|
-
onUpdate,
|
1343
|
-
onFinish,
|
1344
|
-
onToolCall,
|
1345
|
-
generateId: generateId3,
|
1346
|
-
lastMessage,
|
1347
|
-
messageMetadataSchema
|
1348
|
-
}) {
|
1349
|
-
const state = createStreamingUIMessageState({
|
1350
|
-
lastMessage: lastMessage ? structuredClone(lastMessage) : void 0,
|
1351
|
-
newMessageId: generateId3()
|
1352
|
-
});
|
1353
|
-
const runUpdateMessageJob = async (job) => {
|
1354
|
-
await job({
|
1355
|
-
state,
|
1356
|
-
write: () => {
|
1357
|
-
onUpdate({ message: state.message });
|
1358
|
-
}
|
1359
|
-
});
|
1360
|
-
};
|
1361
|
-
await consumeStream({
|
1362
|
-
stream: processUIMessageStream({
|
1363
|
-
stream,
|
1364
|
-
onToolCall,
|
1365
|
-
messageMetadataSchema,
|
1366
|
-
runUpdateMessageJob
|
1367
|
-
}),
|
1368
|
-
onError: (error) => {
|
1369
|
-
throw error;
|
1370
|
-
}
|
1371
|
-
});
|
1372
|
-
onFinish == null ? void 0 : onFinish({ message: state.message });
|
1373
|
-
}
|
1374
|
-
async function callChatApi({
|
1375
|
-
api,
|
1376
|
-
body,
|
1377
|
-
streamProtocol = "ui-message",
|
1378
|
-
credentials,
|
1379
|
-
headers,
|
1380
|
-
abortController,
|
1381
|
-
onUpdate,
|
1382
|
-
onFinish,
|
1383
|
-
onToolCall,
|
1384
|
-
generateId: generateId3,
|
1385
|
-
fetch: fetch2 = getOriginalFetch(),
|
1386
|
-
lastMessage,
|
1387
|
-
requestType = "generate",
|
1388
|
-
messageMetadataSchema
|
1389
|
-
}) {
|
1390
|
-
const stream = streamProtocol === "text" ? await fetchTextStream({
|
1391
|
-
api,
|
1392
|
-
body,
|
1393
|
-
credentials,
|
1394
|
-
headers,
|
1395
|
-
abortController,
|
1396
|
-
fetch: fetch2,
|
1397
|
-
requestType
|
1398
|
-
}) : await fetchUIMessageStream({
|
1399
|
-
api,
|
1400
|
-
body,
|
1401
|
-
credentials,
|
1402
|
-
headers,
|
1403
|
-
abortController,
|
1404
|
-
fetch: fetch2,
|
1405
|
-
requestType
|
1406
|
-
});
|
1407
|
-
await consumeUIMessageStream({
|
1408
|
-
stream,
|
1409
|
-
onUpdate,
|
1410
|
-
onFinish,
|
1411
|
-
onToolCall,
|
1412
|
-
generateId: generateId3,
|
1413
|
-
lastMessage,
|
1414
|
-
messageMetadataSchema
|
1415
|
-
});
|
1416
|
-
}
|
1417
|
-
|
1418
|
-
// src/ui/call-completion-api.ts
|
1419
|
-
import { parseJsonEventStream as parseJsonEventStream2 } from "@ai-sdk/provider-utils";
|
1420
|
-
|
1421
|
-
// src/ui/process-text-stream.ts
|
1422
|
-
async function processTextStream({
|
1423
|
-
stream,
|
1424
|
-
onTextPart
|
1425
|
-
}) {
|
1426
|
-
const reader = stream.pipeThrough(new TextDecoderStream()).getReader();
|
1427
|
-
while (true) {
|
1428
|
-
const { done, value } = await reader.read();
|
1429
|
-
if (done) {
|
1430
|
-
break;
|
1431
|
-
}
|
1432
|
-
await onTextPart(value);
|
1433
|
-
}
|
1434
|
-
}
|
1435
|
-
|
1436
|
-
// src/ui/call-completion-api.ts
|
1437
|
-
var getOriginalFetch2 = () => fetch;
|
1438
|
-
async function callCompletionApi({
|
1439
|
-
api,
|
1440
|
-
prompt,
|
1441
|
-
credentials,
|
1442
|
-
headers,
|
1443
|
-
body,
|
1444
|
-
streamProtocol = "data",
|
1445
|
-
setCompletion,
|
1446
|
-
setLoading,
|
1447
|
-
setError,
|
1448
|
-
setAbortController,
|
1449
|
-
onFinish,
|
1450
|
-
onError,
|
1451
|
-
fetch: fetch2 = getOriginalFetch2()
|
1452
|
-
}) {
|
1453
|
-
var _a17;
|
1454
|
-
try {
|
1455
|
-
setLoading(true);
|
1456
|
-
setError(void 0);
|
1457
|
-
const abortController = new AbortController();
|
1458
|
-
setAbortController(abortController);
|
1459
|
-
setCompletion("");
|
1460
|
-
const response = await fetch2(api, {
|
1461
|
-
method: "POST",
|
1462
|
-
body: JSON.stringify({
|
1463
|
-
prompt,
|
1464
|
-
...body
|
1465
|
-
}),
|
1466
|
-
credentials,
|
1467
|
-
headers: {
|
1468
|
-
"Content-Type": "application/json",
|
1469
|
-
...headers
|
1470
|
-
},
|
1471
|
-
signal: abortController.signal
|
1472
|
-
}).catch((err) => {
|
1473
|
-
throw err;
|
1474
|
-
});
|
1475
|
-
if (!response.ok) {
|
1476
|
-
throw new Error(
|
1477
|
-
(_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
|
1478
|
-
);
|
1479
|
-
}
|
1480
|
-
if (!response.body) {
|
1481
|
-
throw new Error("The response body is empty.");
|
1482
|
-
}
|
1483
|
-
let result = "";
|
1484
|
-
switch (streamProtocol) {
|
1485
|
-
case "text": {
|
1486
|
-
await processTextStream({
|
1487
|
-
stream: response.body,
|
1488
|
-
onTextPart: (chunk) => {
|
1489
|
-
result += chunk;
|
1490
|
-
setCompletion(result);
|
1491
|
-
}
|
1492
|
-
});
|
1493
|
-
break;
|
1494
|
-
}
|
1495
|
-
case "data": {
|
1496
|
-
await consumeStream({
|
1497
|
-
stream: parseJsonEventStream2({
|
1498
|
-
stream: response.body,
|
1499
|
-
schema: uiMessageStreamPartSchema
|
1500
|
-
}).pipeThrough(
|
1501
|
-
new TransformStream({
|
1502
|
-
async transform(part) {
|
1503
|
-
if (!part.success) {
|
1504
|
-
throw part.error;
|
1505
|
-
}
|
1506
|
-
const streamPart = part.value;
|
1507
|
-
if (streamPart.type === "text") {
|
1508
|
-
result += streamPart.text;
|
1509
|
-
setCompletion(result);
|
1510
|
-
} else if (streamPart.type === "error") {
|
1511
|
-
throw new Error(streamPart.errorText);
|
1512
|
-
}
|
1513
|
-
}
|
1514
|
-
})
|
1515
|
-
),
|
1516
|
-
onError: (error) => {
|
1517
|
-
throw error;
|
1518
|
-
}
|
1519
|
-
});
|
1520
|
-
break;
|
1521
|
-
}
|
1522
|
-
default: {
|
1523
|
-
const exhaustiveCheck = streamProtocol;
|
1524
|
-
throw new Error(`Unknown stream protocol: ${exhaustiveCheck}`);
|
1525
|
-
}
|
1526
|
-
}
|
1527
|
-
if (onFinish) {
|
1528
|
-
onFinish(prompt, result);
|
1529
|
-
}
|
1530
|
-
setAbortController(null);
|
1531
|
-
return result;
|
1532
|
-
} catch (err) {
|
1533
|
-
if (err.name === "AbortError") {
|
1534
|
-
setAbortController(null);
|
1535
|
-
return null;
|
1536
|
-
}
|
1537
|
-
if (err instanceof Error) {
|
1538
|
-
if (onError) {
|
1539
|
-
onError(err);
|
1540
|
-
}
|
1541
|
-
}
|
1542
|
-
setError(err);
|
1543
|
-
} finally {
|
1544
|
-
setLoading(false);
|
1545
|
-
}
|
1546
|
-
}
|
1547
|
-
|
1548
|
-
// src/ui/chat-store.ts
|
1549
|
-
import {
|
1550
|
-
generateId as generateIdFunc
|
1551
|
-
} from "@ai-sdk/provider-utils";
|
1552
|
-
|
1553
|
-
// src/ui/should-resubmit-messages.ts
|
1554
|
-
function shouldResubmitMessages({
|
1555
|
-
originalMaxToolInvocationStep,
|
1556
|
-
originalMessageCount,
|
1557
|
-
maxSteps,
|
1558
|
-
messages
|
1559
|
-
}) {
|
1560
|
-
var _a17;
|
1561
|
-
const lastMessage = messages[messages.length - 1];
|
1562
|
-
return (
|
1563
|
-
// check if the feature is enabled:
|
1564
|
-
maxSteps > 1 && // ensure there is a last message:
|
1565
|
-
lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
|
1566
|
-
(messages.length > originalMessageCount || extractMaxToolInvocationStep(getToolInvocations(lastMessage)) !== originalMaxToolInvocationStep) && // check that next step is possible:
|
1567
|
-
isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
|
1568
|
-
((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) < maxSteps
|
1569
|
-
);
|
1570
|
-
}
|
1571
|
-
function isAssistantMessageWithCompletedToolCalls(message) {
|
1572
|
-
if (message.role !== "assistant") {
|
1573
|
-
return false;
|
1574
|
-
}
|
1575
|
-
const lastStepStartIndex = message.parts.reduce((lastIndex, part, index) => {
|
1576
|
-
return part.type === "step-start" ? index : lastIndex;
|
1577
|
-
}, -1);
|
1578
|
-
const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter((part) => part.type === "tool-invocation");
|
1579
|
-
return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every((part) => "result" in part.toolInvocation);
|
1580
|
-
}
|
1581
|
-
|
1582
|
-
// src/ui/update-tool-call-result.ts
|
1583
|
-
function updateToolCallResult({
|
1584
|
-
messages,
|
1585
|
-
toolCallId,
|
1586
|
-
toolResult: result
|
1587
|
-
}) {
|
1588
|
-
const lastMessage = messages[messages.length - 1];
|
1589
|
-
const invocationPart = lastMessage.parts.find(
|
1590
|
-
(part) => part.type === "tool-invocation" && part.toolInvocation.toolCallId === toolCallId
|
1591
|
-
);
|
1592
|
-
if (invocationPart == null) {
|
1593
|
-
return;
|
1594
|
-
}
|
1595
|
-
invocationPart.toolInvocation = {
|
1596
|
-
...invocationPart.toolInvocation,
|
1597
|
-
state: "result",
|
1598
|
-
result
|
1599
|
-
};
|
1600
|
-
}
|
1601
|
-
|
1602
|
-
// src/ui/chat-store.ts
|
1603
|
-
var ChatStore = class {
|
1604
|
-
constructor({
|
1605
|
-
chats = {},
|
1606
|
-
generateId: generateId3,
|
1607
|
-
transport,
|
1608
|
-
maxSteps = 1,
|
1609
|
-
messageMetadataSchema,
|
1610
|
-
dataPartSchemas,
|
1611
|
-
createChat
|
1612
|
-
}) {
|
1613
|
-
this.createChat = createChat;
|
1614
|
-
this.chats = new Map(
|
1615
|
-
Object.entries(chats).map(([id, chat]) => [
|
1616
|
-
id,
|
1617
|
-
this.createChat({ messages: chat.messages })
|
1618
|
-
])
|
1619
|
-
);
|
1620
|
-
this.maxSteps = maxSteps;
|
1621
|
-
this.transport = transport;
|
1622
|
-
this.subscribers = /* @__PURE__ */ new Set();
|
1623
|
-
this.generateId = generateId3 != null ? generateId3 : generateIdFunc;
|
1624
|
-
this.messageMetadataSchema = messageMetadataSchema;
|
1625
|
-
this.dataPartSchemas = dataPartSchemas;
|
1626
|
-
}
|
1627
|
-
hasChat(id) {
|
1628
|
-
return this.chats.has(id);
|
1385
|
+
hasChat(id) {
|
1386
|
+
return this.chats.has(id);
|
1629
1387
|
}
|
1630
1388
|
addChat(id, messages) {
|
1631
1389
|
this.chats.set(id, this.createChat({ messages }));
|
@@ -1637,14 +1395,14 @@ var ChatStore = class {
|
|
1637
1395
|
return this.chats.size;
|
1638
1396
|
}
|
1639
1397
|
getStatus(id) {
|
1640
|
-
return this.
|
1398
|
+
return this.getChat(id).status;
|
1641
1399
|
}
|
1642
1400
|
setStatus({
|
1643
1401
|
id,
|
1644
1402
|
status,
|
1645
1403
|
error
|
1646
1404
|
}) {
|
1647
|
-
const state = this.
|
1405
|
+
const state = this.getChat(id);
|
1648
1406
|
if (state.status === status)
|
1649
1407
|
return;
|
1650
1408
|
state.setStatus(status);
|
@@ -1652,13 +1410,13 @@ var ChatStore = class {
|
|
1652
1410
|
this.emit({ type: "chat-status-changed", chatId: id, error });
|
1653
1411
|
}
|
1654
1412
|
getError(id) {
|
1655
|
-
return this.
|
1413
|
+
return this.getChat(id).error;
|
1656
1414
|
}
|
1657
1415
|
getMessages(id) {
|
1658
|
-
return this.
|
1416
|
+
return this.getChat(id).messages;
|
1659
1417
|
}
|
1660
1418
|
getLastMessage(id) {
|
1661
|
-
const chat = this.
|
1419
|
+
const chat = this.getChat(id);
|
1662
1420
|
return chat.messages[chat.messages.length - 1];
|
1663
1421
|
}
|
1664
1422
|
subscribe(subscriber) {
|
@@ -1669,11 +1427,11 @@ var ChatStore = class {
|
|
1669
1427
|
id,
|
1670
1428
|
messages
|
1671
1429
|
}) {
|
1672
|
-
this.
|
1430
|
+
this.getChat(id).setMessages(messages);
|
1673
1431
|
this.emit({ type: "chat-messages-changed", chatId: id });
|
1674
1432
|
}
|
1675
1433
|
removeAssistantResponse(id) {
|
1676
|
-
const chat = this.
|
1434
|
+
const chat = this.getChat(id);
|
1677
1435
|
const lastMessage = chat.messages[chat.messages.length - 1];
|
1678
1436
|
if (lastMessage == null) {
|
1679
1437
|
throw new Error("Cannot remove assistant response from empty chat");
|
@@ -1694,8 +1452,8 @@ var ChatStore = class {
|
|
1694
1452
|
onFinish
|
1695
1453
|
}) {
|
1696
1454
|
var _a17;
|
1697
|
-
const
|
1698
|
-
|
1455
|
+
const chat = this.getChat(chatId);
|
1456
|
+
chat.pushMessage({ ...message, id: (_a17 = message.id) != null ? _a17 : this.generateId() });
|
1699
1457
|
this.emit({
|
1700
1458
|
type: "chat-messages-changed",
|
1701
1459
|
chatId
|
@@ -1718,7 +1476,7 @@ var ChatStore = class {
|
|
1718
1476
|
onToolCall,
|
1719
1477
|
onFinish
|
1720
1478
|
}) {
|
1721
|
-
const chat = this.
|
1479
|
+
const chat = this.getChat(chatId);
|
1722
1480
|
if (chat.messages[chat.messages.length - 1].role === "assistant") {
|
1723
1481
|
chat.popMessage();
|
1724
1482
|
this.emit({
|
@@ -1762,7 +1520,7 @@ var ChatStore = class {
|
|
1762
1520
|
toolCallId,
|
1763
1521
|
result
|
1764
1522
|
}) {
|
1765
|
-
const chat = this.
|
1523
|
+
const chat = this.getChat(chatId);
|
1766
1524
|
chat.jobExecutor.run(async () => {
|
1767
1525
|
updateToolCallResult({
|
1768
1526
|
messages: chat.messages,
|
@@ -1787,7 +1545,7 @@ var ChatStore = class {
|
|
1787
1545
|
}
|
1788
1546
|
async stopStream({ chatId }) {
|
1789
1547
|
var _a17;
|
1790
|
-
const chat = this.
|
1548
|
+
const chat = this.getChat(chatId);
|
1791
1549
|
if (chat.status !== "streaming" && chat.status !== "submitted")
|
1792
1550
|
return;
|
1793
1551
|
if ((_a17 = chat.activeResponse) == null ? void 0 : _a17.abortController) {
|
@@ -1800,7 +1558,7 @@ var ChatStore = class {
|
|
1800
1558
|
subscriber.onChatChanged(event);
|
1801
1559
|
}
|
1802
1560
|
}
|
1803
|
-
|
1561
|
+
getChat(id) {
|
1804
1562
|
if (!this.hasChat(id)) {
|
1805
1563
|
this.addChat(id, []);
|
1806
1564
|
}
|
@@ -1815,17 +1573,18 @@ var ChatStore = class {
|
|
1815
1573
|
onToolCall,
|
1816
1574
|
onFinish
|
1817
1575
|
}) {
|
1818
|
-
const chat = this.
|
1576
|
+
const chat = this.getChat(chatId);
|
1819
1577
|
this.setStatus({ id: chatId, status: "submitted", error: void 0 });
|
1820
1578
|
const messageCount = chat.messages.length;
|
1821
|
-
const
|
1822
|
-
|
1823
|
-
|
1579
|
+
const lastMessage = chat.messages[chat.messages.length - 1];
|
1580
|
+
const maxStep = lastMessage.parts.filter(
|
1581
|
+
(part) => part.type === "step-start"
|
1582
|
+
).length;
|
1824
1583
|
try {
|
1825
|
-
const
|
1584
|
+
const lastMessage2 = chat.messages[chat.messages.length - 1];
|
1826
1585
|
const activeResponse = {
|
1827
1586
|
state: createStreamingUIMessageState({
|
1828
|
-
lastMessage: chat.snapshot ? chat.snapshot(
|
1587
|
+
lastMessage: chat.snapshot ? chat.snapshot(lastMessage2) : lastMessage2,
|
1829
1588
|
newMessageId: this.generateId()
|
1830
1589
|
}),
|
1831
1590
|
abortController: new AbortController()
|
@@ -1907,106 +1666,24 @@ var ChatStore = class {
|
|
1907
1666
|
}
|
1908
1667
|
}
|
1909
1668
|
};
|
1910
|
-
|
1911
|
-
|
1912
|
-
|
1913
|
-
|
1914
|
-
|
1915
|
-
|
1916
|
-
|
1917
|
-
|
1918
|
-
|
1919
|
-
|
1920
|
-
|
1921
|
-
this.api = api;
|
1922
|
-
this.credentials = credentials;
|
1923
|
-
this.headers = headers;
|
1924
|
-
this.body = body;
|
1925
|
-
this.fetch = fetch2;
|
1926
|
-
this.prepareRequestBody = prepareRequestBody;
|
1927
|
-
}
|
1928
|
-
submitMessages({
|
1929
|
-
chatId,
|
1930
|
-
messages,
|
1931
|
-
abortController,
|
1932
|
-
body,
|
1933
|
-
headers,
|
1934
|
-
requestType
|
1935
|
-
}) {
|
1936
|
-
var _a17, _b;
|
1937
|
-
return fetchUIMessageStream({
|
1938
|
-
api: this.api,
|
1939
|
-
headers: {
|
1940
|
-
...this.headers,
|
1941
|
-
...headers
|
1942
|
-
},
|
1943
|
-
body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
|
1944
|
-
chatId,
|
1945
|
-
messages,
|
1946
|
-
...this.body,
|
1947
|
-
...body
|
1948
|
-
})) != null ? _b : {
|
1949
|
-
chatId,
|
1950
|
-
messages,
|
1951
|
-
...this.body,
|
1952
|
-
...body
|
1953
|
-
},
|
1954
|
-
credentials: this.credentials,
|
1955
|
-
abortController: () => abortController,
|
1956
|
-
fetch: this.fetch,
|
1957
|
-
requestType
|
1958
|
-
});
|
1959
|
-
}
|
1960
|
-
};
|
1961
|
-
var TextStreamChatTransport = class {
|
1962
|
-
constructor({
|
1963
|
-
api,
|
1964
|
-
credentials,
|
1965
|
-
headers,
|
1966
|
-
body,
|
1967
|
-
fetch: fetch2,
|
1968
|
-
prepareRequestBody
|
1969
|
-
}) {
|
1970
|
-
this.api = api;
|
1971
|
-
this.credentials = credentials;
|
1972
|
-
this.headers = headers;
|
1973
|
-
this.body = body;
|
1974
|
-
this.fetch = fetch2;
|
1975
|
-
this.prepareRequestBody = prepareRequestBody;
|
1976
|
-
}
|
1977
|
-
submitMessages({
|
1978
|
-
chatId,
|
1979
|
-
messages,
|
1980
|
-
abortController,
|
1981
|
-
body,
|
1982
|
-
headers,
|
1983
|
-
requestType
|
1984
|
-
}) {
|
1985
|
-
var _a17, _b;
|
1986
|
-
return fetchTextStream({
|
1987
|
-
api: this.api,
|
1988
|
-
headers: {
|
1989
|
-
...this.headers,
|
1990
|
-
...headers
|
1991
|
-
},
|
1992
|
-
body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
|
1993
|
-
chatId,
|
1994
|
-
messages,
|
1995
|
-
...this.body,
|
1996
|
-
...body
|
1997
|
-
})) != null ? _b : {
|
1998
|
-
chatId,
|
1999
|
-
messages,
|
2000
|
-
...this.body,
|
2001
|
-
...body
|
2002
|
-
},
|
2003
|
-
credentials: this.credentials,
|
2004
|
-
abortController: () => abortController,
|
2005
|
-
fetch: this.fetch,
|
2006
|
-
requestType
|
2007
|
-
});
|
1669
|
+
function updateToolCallResult({
|
1670
|
+
messages,
|
1671
|
+
toolCallId,
|
1672
|
+
toolResult: result
|
1673
|
+
}) {
|
1674
|
+
const lastMessage = messages[messages.length - 1];
|
1675
|
+
const invocationPart = lastMessage.parts.find(
|
1676
|
+
(part) => part.type === "tool-invocation" && part.toolInvocation.toolCallId === toolCallId
|
1677
|
+
);
|
1678
|
+
if (invocationPart == null) {
|
1679
|
+
return;
|
2008
1680
|
}
|
2009
|
-
|
1681
|
+
invocationPart.toolInvocation = {
|
1682
|
+
...invocationPart.toolInvocation,
|
1683
|
+
state: "result",
|
1684
|
+
result
|
1685
|
+
};
|
1686
|
+
}
|
2010
1687
|
|
2011
1688
|
// src/ui/convert-file-list-to-file-ui-parts.ts
|
2012
1689
|
async function convertFileListToFileUIParts(files) {
|
@@ -2040,7 +1717,7 @@ async function convertFileListToFileUIParts(files) {
|
|
2040
1717
|
|
2041
1718
|
// src/ui/convert-to-model-messages.ts
|
2042
1719
|
function convertToModelMessages(messages, options) {
|
2043
|
-
var _a17
|
1720
|
+
var _a17;
|
2044
1721
|
const tools = (_a17 = options == null ? void 0 : options.tools) != null ? _a17 : {};
|
2045
1722
|
const modelMessages = [];
|
2046
1723
|
for (const message of messages) {
|
@@ -2071,6 +1748,9 @@ function convertToModelMessages(messages, options) {
|
|
2071
1748
|
case "assistant": {
|
2072
1749
|
if (message.parts != null) {
|
2073
1750
|
let processBlock2 = function() {
|
1751
|
+
if (block.length === 0) {
|
1752
|
+
return;
|
1753
|
+
}
|
2074
1754
|
const content = [];
|
2075
1755
|
for (const part of block) {
|
2076
1756
|
switch (part.type) {
|
@@ -2145,33 +1825,20 @@ function convertToModelMessages(messages, options) {
|
|
2145
1825
|
});
|
2146
1826
|
}
|
2147
1827
|
block = [];
|
2148
|
-
blockHasToolInvocations = false;
|
2149
|
-
currentStep++;
|
2150
1828
|
};
|
2151
1829
|
var processBlock = processBlock2;
|
2152
|
-
let currentStep = 0;
|
2153
|
-
let blockHasToolInvocations = false;
|
2154
1830
|
let block = [];
|
2155
1831
|
for (const part of message.parts) {
|
2156
1832
|
switch (part.type) {
|
2157
|
-
case "text":
|
2158
|
-
|
2159
|
-
processBlock2();
|
2160
|
-
}
|
2161
|
-
block.push(part);
|
2162
|
-
break;
|
2163
|
-
}
|
1833
|
+
case "text":
|
1834
|
+
case "reasoning":
|
2164
1835
|
case "file":
|
2165
|
-
case "
|
1836
|
+
case "tool-invocation": {
|
2166
1837
|
block.push(part);
|
2167
1838
|
break;
|
2168
1839
|
}
|
2169
|
-
case "
|
2170
|
-
|
2171
|
-
processBlock2();
|
2172
|
-
}
|
2173
|
-
block.push(part);
|
2174
|
-
blockHasToolInvocations = true;
|
1840
|
+
case "step-start": {
|
1841
|
+
processBlock2();
|
2175
1842
|
break;
|
2176
1843
|
}
|
2177
1844
|
}
|
@@ -2190,14 +1857,121 @@ function convertToModelMessages(messages, options) {
|
|
2190
1857
|
}
|
2191
1858
|
}
|
2192
1859
|
}
|
2193
|
-
return modelMessages;
|
2194
|
-
}
|
2195
|
-
var convertToCoreMessages = convertToModelMessages;
|
1860
|
+
return modelMessages;
|
1861
|
+
}
|
1862
|
+
var convertToCoreMessages = convertToModelMessages;
|
1863
|
+
|
1864
|
+
// src/ui/default-chat-store-options.ts
|
1865
|
+
import {
|
1866
|
+
generateId as generateIdFunc2
|
1867
|
+
} from "@ai-sdk/provider-utils";
|
1868
|
+
|
1869
|
+
// src/ui/default-chat-transport.ts
|
1870
|
+
import {
|
1871
|
+
parseJsonEventStream as parseJsonEventStream2
|
1872
|
+
} from "@ai-sdk/provider-utils";
|
1873
|
+
var getOriginalFetch2 = () => fetch;
|
1874
|
+
async function fetchUIMessageStream({
|
1875
|
+
api,
|
1876
|
+
body,
|
1877
|
+
credentials,
|
1878
|
+
headers,
|
1879
|
+
abortController,
|
1880
|
+
fetch: fetch2 = getOriginalFetch2(),
|
1881
|
+
requestType = "generate"
|
1882
|
+
}) {
|
1883
|
+
var _a17, _b, _c;
|
1884
|
+
const response = requestType === "resume" ? await fetch2(`${api}?chatId=${body.chatId}`, {
|
1885
|
+
method: "GET",
|
1886
|
+
headers: {
|
1887
|
+
"Content-Type": "application/json",
|
1888
|
+
...headers
|
1889
|
+
},
|
1890
|
+
signal: (_a17 = abortController == null ? void 0 : abortController()) == null ? void 0 : _a17.signal,
|
1891
|
+
credentials
|
1892
|
+
}) : await fetch2(api, {
|
1893
|
+
method: "POST",
|
1894
|
+
body: JSON.stringify(body),
|
1895
|
+
headers: {
|
1896
|
+
"Content-Type": "application/json",
|
1897
|
+
...headers
|
1898
|
+
},
|
1899
|
+
signal: (_b = abortController == null ? void 0 : abortController()) == null ? void 0 : _b.signal,
|
1900
|
+
credentials
|
1901
|
+
});
|
1902
|
+
if (!response.ok) {
|
1903
|
+
throw new Error(
|
1904
|
+
(_c = await response.text()) != null ? _c : "Failed to fetch the chat response."
|
1905
|
+
);
|
1906
|
+
}
|
1907
|
+
if (!response.body) {
|
1908
|
+
throw new Error("The response body is empty.");
|
1909
|
+
}
|
1910
|
+
return parseJsonEventStream2({
|
1911
|
+
stream: response.body,
|
1912
|
+
schema: uiMessageStreamPartSchema
|
1913
|
+
}).pipeThrough(
|
1914
|
+
new TransformStream({
|
1915
|
+
async transform(part, controller) {
|
1916
|
+
if (!part.success) {
|
1917
|
+
throw part.error;
|
1918
|
+
}
|
1919
|
+
controller.enqueue(part.value);
|
1920
|
+
}
|
1921
|
+
})
|
1922
|
+
);
|
1923
|
+
}
|
1924
|
+
var DefaultChatTransport = class {
|
1925
|
+
constructor({
|
1926
|
+
api,
|
1927
|
+
credentials,
|
1928
|
+
headers,
|
1929
|
+
body,
|
1930
|
+
fetch: fetch2,
|
1931
|
+
prepareRequestBody
|
1932
|
+
}) {
|
1933
|
+
this.api = api;
|
1934
|
+
this.credentials = credentials;
|
1935
|
+
this.headers = headers;
|
1936
|
+
this.body = body;
|
1937
|
+
this.fetch = fetch2;
|
1938
|
+
this.prepareRequestBody = prepareRequestBody;
|
1939
|
+
}
|
1940
|
+
submitMessages({
|
1941
|
+
chatId,
|
1942
|
+
messages,
|
1943
|
+
abortController,
|
1944
|
+
body,
|
1945
|
+
headers,
|
1946
|
+
requestType
|
1947
|
+
}) {
|
1948
|
+
var _a17, _b;
|
1949
|
+
return fetchUIMessageStream({
|
1950
|
+
api: this.api,
|
1951
|
+
headers: {
|
1952
|
+
...this.headers,
|
1953
|
+
...headers
|
1954
|
+
},
|
1955
|
+
body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
|
1956
|
+
chatId,
|
1957
|
+
messages,
|
1958
|
+
...this.body,
|
1959
|
+
...body
|
1960
|
+
})) != null ? _b : {
|
1961
|
+
chatId,
|
1962
|
+
messages,
|
1963
|
+
...this.body,
|
1964
|
+
...body
|
1965
|
+
},
|
1966
|
+
credentials: this.credentials,
|
1967
|
+
abortController: () => abortController,
|
1968
|
+
fetch: this.fetch,
|
1969
|
+
requestType
|
1970
|
+
});
|
1971
|
+
}
|
1972
|
+
};
|
2196
1973
|
|
2197
1974
|
// src/ui/default-chat-store-options.ts
|
2198
|
-
import {
|
2199
|
-
generateId as generateIdFunc2
|
2200
|
-
} from "@ai-sdk/provider-utils";
|
2201
1975
|
function defaultChatStoreOptions({
|
2202
1976
|
api = "/api/chat",
|
2203
1977
|
fetch: fetch2,
|
@@ -2228,6 +2002,119 @@ function defaultChatStoreOptions({
|
|
2228
2002
|
});
|
2229
2003
|
}
|
2230
2004
|
|
2005
|
+
// src/ui/transform-text-to-ui-message-stream.ts
|
2006
|
+
function transformTextToUiMessageStream({
|
2007
|
+
stream
|
2008
|
+
}) {
|
2009
|
+
return stream.pipeThrough(
|
2010
|
+
new TransformStream({
|
2011
|
+
start(controller) {
|
2012
|
+
controller.enqueue({ type: "start" });
|
2013
|
+
controller.enqueue({ type: "start-step" });
|
2014
|
+
},
|
2015
|
+
async transform(part, controller) {
|
2016
|
+
controller.enqueue({ type: "text", text: part });
|
2017
|
+
},
|
2018
|
+
async flush(controller) {
|
2019
|
+
controller.enqueue({ type: "finish-step" });
|
2020
|
+
controller.enqueue({ type: "finish" });
|
2021
|
+
}
|
2022
|
+
})
|
2023
|
+
);
|
2024
|
+
}
|
2025
|
+
|
2026
|
+
// src/ui/text-stream-chat-transport.ts
|
2027
|
+
var getOriginalFetch3 = () => fetch;
|
2028
|
+
async function fetchTextStream({
|
2029
|
+
api,
|
2030
|
+
body,
|
2031
|
+
credentials,
|
2032
|
+
headers,
|
2033
|
+
abortController,
|
2034
|
+
fetch: fetch2 = getOriginalFetch3(),
|
2035
|
+
requestType = "generate"
|
2036
|
+
}) {
|
2037
|
+
var _a17, _b, _c;
|
2038
|
+
const response = requestType === "resume" ? await fetch2(`${api}?chatId=${body.chatId}`, {
|
2039
|
+
method: "GET",
|
2040
|
+
headers: {
|
2041
|
+
"Content-Type": "application/json",
|
2042
|
+
...headers
|
2043
|
+
},
|
2044
|
+
signal: (_a17 = abortController == null ? void 0 : abortController()) == null ? void 0 : _a17.signal,
|
2045
|
+
credentials
|
2046
|
+
}) : await fetch2(api, {
|
2047
|
+
method: "POST",
|
2048
|
+
body: JSON.stringify(body),
|
2049
|
+
headers: {
|
2050
|
+
"Content-Type": "application/json",
|
2051
|
+
...headers
|
2052
|
+
},
|
2053
|
+
signal: (_b = abortController == null ? void 0 : abortController()) == null ? void 0 : _b.signal,
|
2054
|
+
credentials
|
2055
|
+
});
|
2056
|
+
if (!response.ok) {
|
2057
|
+
throw new Error(
|
2058
|
+
(_c = await response.text()) != null ? _c : "Failed to fetch the chat response."
|
2059
|
+
);
|
2060
|
+
}
|
2061
|
+
if (!response.body) {
|
2062
|
+
throw new Error("The response body is empty.");
|
2063
|
+
}
|
2064
|
+
return transformTextToUiMessageStream({
|
2065
|
+
stream: response.body.pipeThrough(new TextDecoderStream())
|
2066
|
+
});
|
2067
|
+
}
|
2068
|
+
var TextStreamChatTransport = class {
|
2069
|
+
constructor({
|
2070
|
+
api,
|
2071
|
+
credentials,
|
2072
|
+
headers,
|
2073
|
+
body,
|
2074
|
+
fetch: fetch2,
|
2075
|
+
prepareRequestBody
|
2076
|
+
}) {
|
2077
|
+
this.api = api;
|
2078
|
+
this.credentials = credentials;
|
2079
|
+
this.headers = headers;
|
2080
|
+
this.body = body;
|
2081
|
+
this.fetch = fetch2;
|
2082
|
+
this.prepareRequestBody = prepareRequestBody;
|
2083
|
+
}
|
2084
|
+
submitMessages({
|
2085
|
+
chatId,
|
2086
|
+
messages,
|
2087
|
+
abortController,
|
2088
|
+
body,
|
2089
|
+
headers,
|
2090
|
+
requestType
|
2091
|
+
}) {
|
2092
|
+
var _a17, _b;
|
2093
|
+
return fetchTextStream({
|
2094
|
+
api: this.api,
|
2095
|
+
headers: {
|
2096
|
+
...this.headers,
|
2097
|
+
...headers
|
2098
|
+
},
|
2099
|
+
body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
|
2100
|
+
chatId,
|
2101
|
+
messages,
|
2102
|
+
...this.body,
|
2103
|
+
...body
|
2104
|
+
})) != null ? _b : {
|
2105
|
+
chatId,
|
2106
|
+
messages,
|
2107
|
+
...this.body,
|
2108
|
+
...body
|
2109
|
+
},
|
2110
|
+
credentials: this.credentials,
|
2111
|
+
abortController: () => abortController,
|
2112
|
+
fetch: this.fetch,
|
2113
|
+
requestType
|
2114
|
+
});
|
2115
|
+
}
|
2116
|
+
};
|
2117
|
+
|
2231
2118
|
// src/ui-message-stream/handle-ui-message-stream-finish.ts
|
2232
2119
|
function handleUIMessageStreamFinish({
|
2233
2120
|
newMessageId,
|
@@ -3855,6 +3742,19 @@ function prepareCallSettings({
|
|
3855
3742
|
};
|
3856
3743
|
}
|
3857
3744
|
|
3745
|
+
// core/prompt/resolve-language-model.ts
|
3746
|
+
import { gateway } from "@ai-sdk/gateway";
|
3747
|
+
var GLOBAL_DEFAULT_PROVIDER = Symbol(
|
3748
|
+
"vercel.ai.global.defaultProvider"
|
3749
|
+
);
|
3750
|
+
function resolveLanguageModel(model) {
|
3751
|
+
if (typeof model !== "string") {
|
3752
|
+
return model;
|
3753
|
+
}
|
3754
|
+
const globalProvider = globalThis[GLOBAL_DEFAULT_PROVIDER];
|
3755
|
+
return (globalProvider != null ? globalProvider : gateway).languageModel(model);
|
3756
|
+
}
|
3757
|
+
|
3858
3758
|
// core/prompt/standardize-prompt.ts
|
3859
3759
|
import { InvalidPromptError as InvalidPromptError2 } from "@ai-sdk/provider";
|
3860
3760
|
import { safeValidateTypes } from "@ai-sdk/provider-utils";
|
@@ -4046,6 +3946,23 @@ async function standardizePrompt(prompt) {
|
|
4046
3946
|
};
|
4047
3947
|
}
|
4048
3948
|
|
3949
|
+
// core/prompt/wrap-gateway-error.ts
|
3950
|
+
import {
|
3951
|
+
GatewayAuthenticationError,
|
3952
|
+
GatewayModelNotFoundError
|
3953
|
+
} from "@ai-sdk/gateway";
|
3954
|
+
import { AISDKError as AISDKError18 } from "@ai-sdk/provider";
|
3955
|
+
function wrapGatewayError(error) {
|
3956
|
+
if (GatewayAuthenticationError.isInstance(error) || GatewayModelNotFoundError.isInstance(error)) {
|
3957
|
+
return new AISDKError18({
|
3958
|
+
name: "GatewayError",
|
3959
|
+
message: "Vercel AI Gateway access failed. If you want to use AI SDK providers directly, use the providers, e.g. @ai-sdk/openai, or register a different global default provider.",
|
3960
|
+
cause: error
|
3961
|
+
});
|
3962
|
+
}
|
3963
|
+
return error;
|
3964
|
+
}
|
3965
|
+
|
4049
3966
|
// core/telemetry/stringify-for-telemetry.ts
|
4050
3967
|
function stringifyForTelemetry(prompt) {
|
4051
3968
|
return JSON.stringify(
|
@@ -4462,12 +4379,6 @@ function validateObjectGenerationInput({
|
|
4462
4379
|
}
|
4463
4380
|
}
|
4464
4381
|
|
4465
|
-
// core/prompt/resolve-language-model.ts
|
4466
|
-
import { gateway } from "@ai-sdk/gateway";
|
4467
|
-
function resolveLanguageModel(model) {
|
4468
|
-
return typeof model === "string" ? gateway.languageModel(model) : model;
|
4469
|
-
}
|
4470
|
-
|
4471
4382
|
// core/generate-object/generate-object.ts
|
4472
4383
|
var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
|
4473
4384
|
async function generateObject(options) {
|
@@ -4517,208 +4428,212 @@ async function generateObject(options) {
|
|
4517
4428
|
settings: { ...callSettings, maxRetries }
|
4518
4429
|
});
|
4519
4430
|
const tracer = getTracer(telemetry);
|
4520
|
-
|
4521
|
-
|
4522
|
-
|
4523
|
-
|
4524
|
-
|
4525
|
-
|
4526
|
-
|
4527
|
-
|
4528
|
-
|
4529
|
-
...baseTelemetryAttributes,
|
4530
|
-
// specific settings that only make sense on the outer level:
|
4531
|
-
"ai.prompt": {
|
4532
|
-
input: () => JSON.stringify({ system, prompt, messages })
|
4533
|
-
},
|
4534
|
-
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4535
|
-
"ai.schema.name": schemaName,
|
4536
|
-
"ai.schema.description": schemaDescription,
|
4537
|
-
"ai.settings.output": outputStrategy.type
|
4538
|
-
}
|
4539
|
-
}),
|
4540
|
-
tracer,
|
4541
|
-
fn: async (span) => {
|
4542
|
-
var _a17;
|
4543
|
-
let result;
|
4544
|
-
let finishReason;
|
4545
|
-
let usage;
|
4546
|
-
let warnings;
|
4547
|
-
let response;
|
4548
|
-
let request;
|
4549
|
-
let resultProviderMetadata;
|
4550
|
-
const standardizedPrompt = await standardizePrompt({
|
4551
|
-
system,
|
4552
|
-
prompt,
|
4553
|
-
messages
|
4554
|
-
});
|
4555
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
4556
|
-
prompt: standardizedPrompt,
|
4557
|
-
supportedUrls: await model.supportedUrls
|
4558
|
-
});
|
4559
|
-
const generateResult = await retry(
|
4560
|
-
() => recordSpan({
|
4561
|
-
name: "ai.generateObject.doGenerate",
|
4562
|
-
attributes: selectTelemetryAttributes({
|
4563
|
-
telemetry,
|
4564
|
-
attributes: {
|
4565
|
-
...assembleOperationName({
|
4566
|
-
operationId: "ai.generateObject.doGenerate",
|
4567
|
-
telemetry
|
4568
|
-
}),
|
4569
|
-
...baseTelemetryAttributes,
|
4570
|
-
"ai.prompt.messages": {
|
4571
|
-
input: () => stringifyForTelemetry(promptMessages)
|
4572
|
-
},
|
4573
|
-
// standardized gen-ai llm span attributes:
|
4574
|
-
"gen_ai.system": model.provider,
|
4575
|
-
"gen_ai.request.model": model.modelId,
|
4576
|
-
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4577
|
-
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4578
|
-
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4579
|
-
"gen_ai.request.temperature": callSettings.temperature,
|
4580
|
-
"gen_ai.request.top_k": callSettings.topK,
|
4581
|
-
"gen_ai.request.top_p": callSettings.topP
|
4582
|
-
}
|
4431
|
+
try {
|
4432
|
+
return await recordSpan({
|
4433
|
+
name: "ai.generateObject",
|
4434
|
+
attributes: selectTelemetryAttributes({
|
4435
|
+
telemetry,
|
4436
|
+
attributes: {
|
4437
|
+
...assembleOperationName({
|
4438
|
+
operationId: "ai.generateObject",
|
4439
|
+
telemetry
|
4583
4440
|
}),
|
4584
|
-
|
4585
|
-
|
4586
|
-
|
4587
|
-
|
4588
|
-
|
4589
|
-
|
4590
|
-
|
4591
|
-
|
4592
|
-
|
4593
|
-
|
4594
|
-
|
4595
|
-
|
4596
|
-
|
4597
|
-
|
4598
|
-
|
4599
|
-
|
4600
|
-
|
4601
|
-
|
4602
|
-
|
4603
|
-
|
4604
|
-
|
4605
|
-
|
4606
|
-
|
4607
|
-
|
4608
|
-
|
4609
|
-
|
4610
|
-
|
4611
|
-
|
4612
|
-
|
4613
|
-
|
4441
|
+
...baseTelemetryAttributes,
|
4442
|
+
// specific settings that only make sense on the outer level:
|
4443
|
+
"ai.prompt": {
|
4444
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
4445
|
+
},
|
4446
|
+
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4447
|
+
"ai.schema.name": schemaName,
|
4448
|
+
"ai.schema.description": schemaDescription,
|
4449
|
+
"ai.settings.output": outputStrategy.type
|
4450
|
+
}
|
4451
|
+
}),
|
4452
|
+
tracer,
|
4453
|
+
fn: async (span) => {
|
4454
|
+
var _a17;
|
4455
|
+
let result;
|
4456
|
+
let finishReason;
|
4457
|
+
let usage;
|
4458
|
+
let warnings;
|
4459
|
+
let response;
|
4460
|
+
let request;
|
4461
|
+
let resultProviderMetadata;
|
4462
|
+
const standardizedPrompt = await standardizePrompt({
|
4463
|
+
system,
|
4464
|
+
prompt,
|
4465
|
+
messages
|
4466
|
+
});
|
4467
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
4468
|
+
prompt: standardizedPrompt,
|
4469
|
+
supportedUrls: await model.supportedUrls
|
4470
|
+
});
|
4471
|
+
const generateResult = await retry(
|
4472
|
+
() => recordSpan({
|
4473
|
+
name: "ai.generateObject.doGenerate",
|
4474
|
+
attributes: selectTelemetryAttributes({
|
4475
|
+
telemetry,
|
4476
|
+
attributes: {
|
4477
|
+
...assembleOperationName({
|
4478
|
+
operationId: "ai.generateObject.doGenerate",
|
4479
|
+
telemetry
|
4480
|
+
}),
|
4481
|
+
...baseTelemetryAttributes,
|
4482
|
+
"ai.prompt.messages": {
|
4483
|
+
input: () => stringifyForTelemetry(promptMessages)
|
4484
|
+
},
|
4485
|
+
// standardized gen-ai llm span attributes:
|
4486
|
+
"gen_ai.system": model.provider,
|
4487
|
+
"gen_ai.request.model": model.modelId,
|
4488
|
+
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4489
|
+
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4490
|
+
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4491
|
+
"gen_ai.request.temperature": callSettings.temperature,
|
4492
|
+
"gen_ai.request.top_k": callSettings.topK,
|
4493
|
+
"gen_ai.request.top_p": callSettings.topP
|
4494
|
+
}
|
4495
|
+
}),
|
4496
|
+
tracer,
|
4497
|
+
fn: async (span2) => {
|
4498
|
+
var _a18, _b, _c, _d, _e, _f, _g, _h;
|
4499
|
+
const result2 = await model.doGenerate({
|
4500
|
+
responseFormat: {
|
4501
|
+
type: "json",
|
4502
|
+
schema: outputStrategy.jsonSchema,
|
4503
|
+
name: schemaName,
|
4504
|
+
description: schemaDescription
|
4505
|
+
},
|
4506
|
+
...prepareCallSettings(settings),
|
4507
|
+
prompt: promptMessages,
|
4508
|
+
providerOptions,
|
4509
|
+
abortSignal,
|
4510
|
+
headers
|
4614
4511
|
});
|
4512
|
+
const responseData = {
|
4513
|
+
id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
|
4514
|
+
timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
|
4515
|
+
modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
4516
|
+
headers: (_g = result2.response) == null ? void 0 : _g.headers,
|
4517
|
+
body: (_h = result2.response) == null ? void 0 : _h.body
|
4518
|
+
};
|
4519
|
+
const text2 = extractContentText(result2.content);
|
4520
|
+
if (text2 === void 0) {
|
4521
|
+
throw new NoObjectGeneratedError({
|
4522
|
+
message: "No object generated: the model did not return a response.",
|
4523
|
+
response: responseData,
|
4524
|
+
usage: result2.usage,
|
4525
|
+
finishReason: result2.finishReason
|
4526
|
+
});
|
4527
|
+
}
|
4528
|
+
span2.setAttributes(
|
4529
|
+
selectTelemetryAttributes({
|
4530
|
+
telemetry,
|
4531
|
+
attributes: {
|
4532
|
+
"ai.response.finishReason": result2.finishReason,
|
4533
|
+
"ai.response.object": { output: () => text2 },
|
4534
|
+
"ai.response.id": responseData.id,
|
4535
|
+
"ai.response.model": responseData.modelId,
|
4536
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
4537
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4538
|
+
"ai.usage.promptTokens": result2.usage.inputTokens,
|
4539
|
+
"ai.usage.completionTokens": result2.usage.outputTokens,
|
4540
|
+
// standardized gen-ai llm span attributes:
|
4541
|
+
"gen_ai.response.finish_reasons": [result2.finishReason],
|
4542
|
+
"gen_ai.response.id": responseData.id,
|
4543
|
+
"gen_ai.response.model": responseData.modelId,
|
4544
|
+
"gen_ai.usage.input_tokens": result2.usage.inputTokens,
|
4545
|
+
"gen_ai.usage.output_tokens": result2.usage.outputTokens
|
4546
|
+
}
|
4547
|
+
})
|
4548
|
+
);
|
4549
|
+
return { ...result2, objectText: text2, responseData };
|
4615
4550
|
}
|
4616
|
-
|
4617
|
-
|
4618
|
-
|
4619
|
-
|
4620
|
-
|
4621
|
-
|
4622
|
-
|
4623
|
-
|
4624
|
-
|
4625
|
-
|
4626
|
-
|
4627
|
-
|
4628
|
-
|
4629
|
-
|
4630
|
-
|
4631
|
-
|
4632
|
-
|
4633
|
-
|
4634
|
-
|
4635
|
-
|
4636
|
-
);
|
4637
|
-
return { ...result2, objectText: text2, responseData };
|
4551
|
+
})
|
4552
|
+
);
|
4553
|
+
result = generateResult.objectText;
|
4554
|
+
finishReason = generateResult.finishReason;
|
4555
|
+
usage = generateResult.usage;
|
4556
|
+
warnings = generateResult.warnings;
|
4557
|
+
resultProviderMetadata = generateResult.providerMetadata;
|
4558
|
+
request = (_a17 = generateResult.request) != null ? _a17 : {};
|
4559
|
+
response = generateResult.responseData;
|
4560
|
+
async function processResult(result2) {
|
4561
|
+
const parseResult = await safeParseJSON2({ text: result2 });
|
4562
|
+
if (!parseResult.success) {
|
4563
|
+
throw new NoObjectGeneratedError({
|
4564
|
+
message: "No object generated: could not parse the response.",
|
4565
|
+
cause: parseResult.error,
|
4566
|
+
text: result2,
|
4567
|
+
response,
|
4568
|
+
usage,
|
4569
|
+
finishReason
|
4570
|
+
});
|
4638
4571
|
}
|
4639
|
-
|
4640
|
-
|
4641
|
-
|
4642
|
-
|
4643
|
-
|
4644
|
-
|
4645
|
-
|
4646
|
-
|
4647
|
-
|
4648
|
-
|
4649
|
-
|
4650
|
-
|
4651
|
-
|
4652
|
-
|
4653
|
-
|
4654
|
-
|
4655
|
-
|
4656
|
-
usage,
|
4657
|
-
finishReason
|
4658
|
-
});
|
4659
|
-
}
|
4660
|
-
const validationResult = await outputStrategy.validateFinalResult(
|
4661
|
-
parseResult.value,
|
4662
|
-
{
|
4663
|
-
text: result2,
|
4664
|
-
response,
|
4665
|
-
usage
|
4572
|
+
const validationResult = await outputStrategy.validateFinalResult(
|
4573
|
+
parseResult.value,
|
4574
|
+
{
|
4575
|
+
text: result2,
|
4576
|
+
response,
|
4577
|
+
usage
|
4578
|
+
}
|
4579
|
+
);
|
4580
|
+
if (!validationResult.success) {
|
4581
|
+
throw new NoObjectGeneratedError({
|
4582
|
+
message: "No object generated: response did not match schema.",
|
4583
|
+
cause: validationResult.error,
|
4584
|
+
text: result2,
|
4585
|
+
response,
|
4586
|
+
usage,
|
4587
|
+
finishReason
|
4588
|
+
});
|
4666
4589
|
}
|
4667
|
-
|
4668
|
-
if (!validationResult.success) {
|
4669
|
-
throw new NoObjectGeneratedError({
|
4670
|
-
message: "No object generated: response did not match schema.",
|
4671
|
-
cause: validationResult.error,
|
4672
|
-
text: result2,
|
4673
|
-
response,
|
4674
|
-
usage,
|
4675
|
-
finishReason
|
4676
|
-
});
|
4590
|
+
return validationResult.value;
|
4677
4591
|
}
|
4678
|
-
|
4679
|
-
|
4680
|
-
|
4681
|
-
|
4682
|
-
|
4683
|
-
|
4684
|
-
|
4685
|
-
|
4686
|
-
|
4687
|
-
|
4688
|
-
|
4689
|
-
|
4592
|
+
let object2;
|
4593
|
+
try {
|
4594
|
+
object2 = await processResult(result);
|
4595
|
+
} catch (error) {
|
4596
|
+
if (repairText != null && NoObjectGeneratedError.isInstance(error) && (JSONParseError2.isInstance(error.cause) || TypeValidationError3.isInstance(error.cause))) {
|
4597
|
+
const repairedText = await repairText({
|
4598
|
+
text: result,
|
4599
|
+
error: error.cause
|
4600
|
+
});
|
4601
|
+
if (repairedText === null) {
|
4602
|
+
throw error;
|
4603
|
+
}
|
4604
|
+
object2 = await processResult(repairedText);
|
4605
|
+
} else {
|
4690
4606
|
throw error;
|
4691
4607
|
}
|
4692
|
-
object2 = await processResult(repairedText);
|
4693
|
-
} else {
|
4694
|
-
throw error;
|
4695
4608
|
}
|
4609
|
+
span.setAttributes(
|
4610
|
+
selectTelemetryAttributes({
|
4611
|
+
telemetry,
|
4612
|
+
attributes: {
|
4613
|
+
"ai.response.finishReason": finishReason,
|
4614
|
+
"ai.response.object": {
|
4615
|
+
output: () => JSON.stringify(object2)
|
4616
|
+
},
|
4617
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4618
|
+
"ai.usage.promptTokens": usage.inputTokens,
|
4619
|
+
"ai.usage.completionTokens": usage.outputTokens
|
4620
|
+
}
|
4621
|
+
})
|
4622
|
+
);
|
4623
|
+
return new DefaultGenerateObjectResult({
|
4624
|
+
object: object2,
|
4625
|
+
finishReason,
|
4626
|
+
usage,
|
4627
|
+
warnings,
|
4628
|
+
request,
|
4629
|
+
response,
|
4630
|
+
providerMetadata: resultProviderMetadata
|
4631
|
+
});
|
4696
4632
|
}
|
4697
|
-
|
4698
|
-
|
4699
|
-
|
4700
|
-
|
4701
|
-
"ai.response.finishReason": finishReason,
|
4702
|
-
"ai.response.object": {
|
4703
|
-
output: () => JSON.stringify(object2)
|
4704
|
-
},
|
4705
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4706
|
-
"ai.usage.promptTokens": usage.inputTokens,
|
4707
|
-
"ai.usage.completionTokens": usage.outputTokens
|
4708
|
-
}
|
4709
|
-
})
|
4710
|
-
);
|
4711
|
-
return new DefaultGenerateObjectResult({
|
4712
|
-
object: object2,
|
4713
|
-
finishReason,
|
4714
|
-
usage,
|
4715
|
-
warnings,
|
4716
|
-
request,
|
4717
|
-
response,
|
4718
|
-
providerMetadata: resultProviderMetadata
|
4719
|
-
});
|
4720
|
-
}
|
4721
|
-
});
|
4633
|
+
});
|
4634
|
+
} catch (error) {
|
4635
|
+
throw wrapGatewayError(error);
|
4636
|
+
}
|
4722
4637
|
}
|
4723
4638
|
var DefaultGenerateObjectResult = class {
|
4724
4639
|
constructor(options) {
|
@@ -4742,7 +4657,9 @@ var DefaultGenerateObjectResult = class {
|
|
4742
4657
|
};
|
4743
4658
|
|
4744
4659
|
// core/generate-object/stream-object.ts
|
4745
|
-
import {
|
4660
|
+
import {
|
4661
|
+
createIdGenerator as createIdGenerator2
|
4662
|
+
} from "@ai-sdk/provider-utils";
|
4746
4663
|
|
4747
4664
|
// src/util/create-resolvable-promise.ts
|
4748
4665
|
function createResolvablePromise() {
|
@@ -4899,7 +4816,9 @@ function streamObject(options) {
|
|
4899
4816
|
headers,
|
4900
4817
|
experimental_telemetry: telemetry,
|
4901
4818
|
providerOptions,
|
4902
|
-
onError
|
4819
|
+
onError = ({ error }) => {
|
4820
|
+
console.error(error);
|
4821
|
+
},
|
4903
4822
|
onFinish,
|
4904
4823
|
_internal: {
|
4905
4824
|
generateId: generateId3 = originalGenerateId2,
|
@@ -4992,7 +4911,7 @@ var DefaultStreamObjectResult = class {
|
|
4992
4911
|
transform(chunk, controller) {
|
4993
4912
|
controller.enqueue(chunk);
|
4994
4913
|
if (chunk.type === "error") {
|
4995
|
-
onError
|
4914
|
+
onError({ error: wrapGatewayError(chunk.error) });
|
4996
4915
|
}
|
4997
4916
|
}
|
4998
4917
|
});
|
@@ -5392,8 +5311,8 @@ var DefaultStreamObjectResult = class {
|
|
5392
5311
|
};
|
5393
5312
|
|
5394
5313
|
// src/error/no-speech-generated-error.ts
|
5395
|
-
import { AISDKError as
|
5396
|
-
var NoSpeechGeneratedError = class extends
|
5314
|
+
import { AISDKError as AISDKError19 } from "@ai-sdk/provider";
|
5315
|
+
var NoSpeechGeneratedError = class extends AISDKError19 {
|
5397
5316
|
constructor(options) {
|
5398
5317
|
super({
|
5399
5318
|
name: "AI_NoSpeechGeneratedError",
|
@@ -5833,239 +5752,243 @@ async function generateText({
|
|
5833
5752
|
messages
|
5834
5753
|
});
|
5835
5754
|
const tracer = getTracer(telemetry);
|
5836
|
-
|
5837
|
-
|
5838
|
-
|
5839
|
-
|
5840
|
-
|
5841
|
-
|
5842
|
-
|
5843
|
-
|
5844
|
-
|
5845
|
-
|
5846
|
-
|
5847
|
-
|
5848
|
-
|
5849
|
-
|
5850
|
-
|
5851
|
-
|
5852
|
-
|
5853
|
-
}
|
5854
|
-
}),
|
5855
|
-
tracer,
|
5856
|
-
fn: async (span) => {
|
5857
|
-
var _a17, _b, _c, _d, _e;
|
5858
|
-
const callSettings2 = prepareCallSettings(settings);
|
5859
|
-
let currentModelResponse;
|
5860
|
-
let currentToolCalls = [];
|
5861
|
-
let currentToolResults = [];
|
5862
|
-
const responseMessages = [];
|
5863
|
-
const steps = [];
|
5864
|
-
do {
|
5865
|
-
const stepInputMessages = [
|
5866
|
-
...initialPrompt.messages,
|
5867
|
-
...responseMessages
|
5868
|
-
];
|
5869
|
-
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5870
|
-
model,
|
5871
|
-
steps,
|
5872
|
-
stepNumber: steps.length
|
5873
|
-
}));
|
5874
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
5875
|
-
prompt: {
|
5876
|
-
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
5877
|
-
messages: stepInputMessages
|
5878
|
-
},
|
5879
|
-
supportedUrls: await model.supportedUrls
|
5880
|
-
});
|
5881
|
-
const stepModel = resolveLanguageModel(
|
5882
|
-
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
5883
|
-
);
|
5884
|
-
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5885
|
-
tools,
|
5886
|
-
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
5887
|
-
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
5888
|
-
});
|
5889
|
-
currentModelResponse = await retry(
|
5890
|
-
() => {
|
5891
|
-
var _a18;
|
5892
|
-
return recordSpan({
|
5893
|
-
name: "ai.generateText.doGenerate",
|
5894
|
-
attributes: selectTelemetryAttributes({
|
5895
|
-
telemetry,
|
5896
|
-
attributes: {
|
5897
|
-
...assembleOperationName({
|
5898
|
-
operationId: "ai.generateText.doGenerate",
|
5899
|
-
telemetry
|
5900
|
-
}),
|
5901
|
-
...baseTelemetryAttributes,
|
5902
|
-
// model:
|
5903
|
-
"ai.model.provider": stepModel.provider,
|
5904
|
-
"ai.model.id": stepModel.modelId,
|
5905
|
-
// prompt:
|
5906
|
-
"ai.prompt.messages": {
|
5907
|
-
input: () => stringifyForTelemetry(promptMessages)
|
5908
|
-
},
|
5909
|
-
"ai.prompt.tools": {
|
5910
|
-
// convert the language model level tools:
|
5911
|
-
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5912
|
-
},
|
5913
|
-
"ai.prompt.toolChoice": {
|
5914
|
-
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5915
|
-
},
|
5916
|
-
// standardized gen-ai llm span attributes:
|
5917
|
-
"gen_ai.system": stepModel.provider,
|
5918
|
-
"gen_ai.request.model": stepModel.modelId,
|
5919
|
-
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5920
|
-
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5921
|
-
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5922
|
-
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5923
|
-
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5924
|
-
"gen_ai.request.top_k": settings.topK,
|
5925
|
-
"gen_ai.request.top_p": settings.topP
|
5926
|
-
}
|
5927
|
-
}),
|
5928
|
-
tracer,
|
5929
|
-
fn: async (span2) => {
|
5930
|
-
var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
|
5931
|
-
const result = await stepModel.doGenerate({
|
5932
|
-
...callSettings2,
|
5933
|
-
tools: stepTools,
|
5934
|
-
toolChoice: stepToolChoice,
|
5935
|
-
responseFormat: output == null ? void 0 : output.responseFormat,
|
5936
|
-
prompt: promptMessages,
|
5937
|
-
providerOptions,
|
5938
|
-
abortSignal,
|
5939
|
-
headers
|
5940
|
-
});
|
5941
|
-
const responseData = {
|
5942
|
-
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5943
|
-
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5944
|
-
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
|
5945
|
-
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5946
|
-
body: (_h = result.response) == null ? void 0 : _h.body
|
5947
|
-
};
|
5948
|
-
span2.setAttributes(
|
5949
|
-
selectTelemetryAttributes({
|
5950
|
-
telemetry,
|
5951
|
-
attributes: {
|
5952
|
-
"ai.response.finishReason": result.finishReason,
|
5953
|
-
"ai.response.text": {
|
5954
|
-
output: () => extractContentText(result.content)
|
5955
|
-
},
|
5956
|
-
"ai.response.toolCalls": {
|
5957
|
-
output: () => {
|
5958
|
-
const toolCalls = asToolCalls(result.content);
|
5959
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5960
|
-
}
|
5961
|
-
},
|
5962
|
-
"ai.response.id": responseData.id,
|
5963
|
-
"ai.response.model": responseData.modelId,
|
5964
|
-
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5965
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5966
|
-
"ai.usage.promptTokens": result.usage.inputTokens,
|
5967
|
-
"ai.usage.completionTokens": result.usage.outputTokens,
|
5968
|
-
// standardized gen-ai llm span attributes:
|
5969
|
-
"gen_ai.response.finish_reasons": [result.finishReason],
|
5970
|
-
"gen_ai.response.id": responseData.id,
|
5971
|
-
"gen_ai.response.model": responseData.modelId,
|
5972
|
-
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5973
|
-
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5974
|
-
}
|
5975
|
-
})
|
5976
|
-
);
|
5977
|
-
return { ...result, response: responseData };
|
5978
|
-
}
|
5979
|
-
});
|
5755
|
+
try {
|
5756
|
+
return await recordSpan({
|
5757
|
+
name: "ai.generateText",
|
5758
|
+
attributes: selectTelemetryAttributes({
|
5759
|
+
telemetry,
|
5760
|
+
attributes: {
|
5761
|
+
...assembleOperationName({
|
5762
|
+
operationId: "ai.generateText",
|
5763
|
+
telemetry
|
5764
|
+
}),
|
5765
|
+
...baseTelemetryAttributes,
|
5766
|
+
// model:
|
5767
|
+
"ai.model.provider": model.provider,
|
5768
|
+
"ai.model.id": model.modelId,
|
5769
|
+
// specific settings that only make sense on the outer level:
|
5770
|
+
"ai.prompt": {
|
5771
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
5980
5772
|
}
|
5981
|
-
|
5982
|
-
|
5983
|
-
|
5984
|
-
|
5985
|
-
|
5986
|
-
|
5987
|
-
|
5988
|
-
|
5989
|
-
|
5990
|
-
|
5773
|
+
}
|
5774
|
+
}),
|
5775
|
+
tracer,
|
5776
|
+
fn: async (span) => {
|
5777
|
+
var _a17, _b, _c, _d, _e;
|
5778
|
+
const callSettings2 = prepareCallSettings(settings);
|
5779
|
+
let currentModelResponse;
|
5780
|
+
let currentToolCalls = [];
|
5781
|
+
let currentToolResults = [];
|
5782
|
+
const responseMessages = [];
|
5783
|
+
const steps = [];
|
5784
|
+
do {
|
5785
|
+
const stepInputMessages = [
|
5786
|
+
...initialPrompt.messages,
|
5787
|
+
...responseMessages
|
5788
|
+
];
|
5789
|
+
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5790
|
+
model,
|
5791
|
+
steps,
|
5792
|
+
stepNumber: steps.length
|
5793
|
+
}));
|
5794
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
5795
|
+
prompt: {
|
5796
|
+
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
5991
5797
|
messages: stepInputMessages
|
5798
|
+
},
|
5799
|
+
supportedUrls: await model.supportedUrls
|
5800
|
+
});
|
5801
|
+
const stepModel = resolveLanguageModel(
|
5802
|
+
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
5803
|
+
);
|
5804
|
+
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5805
|
+
tools,
|
5806
|
+
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
5807
|
+
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
5808
|
+
});
|
5809
|
+
currentModelResponse = await retry(
|
5810
|
+
() => {
|
5811
|
+
var _a18;
|
5812
|
+
return recordSpan({
|
5813
|
+
name: "ai.generateText.doGenerate",
|
5814
|
+
attributes: selectTelemetryAttributes({
|
5815
|
+
telemetry,
|
5816
|
+
attributes: {
|
5817
|
+
...assembleOperationName({
|
5818
|
+
operationId: "ai.generateText.doGenerate",
|
5819
|
+
telemetry
|
5820
|
+
}),
|
5821
|
+
...baseTelemetryAttributes,
|
5822
|
+
// model:
|
5823
|
+
"ai.model.provider": stepModel.provider,
|
5824
|
+
"ai.model.id": stepModel.modelId,
|
5825
|
+
// prompt:
|
5826
|
+
"ai.prompt.messages": {
|
5827
|
+
input: () => stringifyForTelemetry(promptMessages)
|
5828
|
+
},
|
5829
|
+
"ai.prompt.tools": {
|
5830
|
+
// convert the language model level tools:
|
5831
|
+
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5832
|
+
},
|
5833
|
+
"ai.prompt.toolChoice": {
|
5834
|
+
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5835
|
+
},
|
5836
|
+
// standardized gen-ai llm span attributes:
|
5837
|
+
"gen_ai.system": stepModel.provider,
|
5838
|
+
"gen_ai.request.model": stepModel.modelId,
|
5839
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5840
|
+
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5841
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5842
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5843
|
+
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5844
|
+
"gen_ai.request.top_k": settings.topK,
|
5845
|
+
"gen_ai.request.top_p": settings.topP
|
5846
|
+
}
|
5847
|
+
}),
|
5848
|
+
tracer,
|
5849
|
+
fn: async (span2) => {
|
5850
|
+
var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
|
5851
|
+
const result = await stepModel.doGenerate({
|
5852
|
+
...callSettings2,
|
5853
|
+
tools: stepTools,
|
5854
|
+
toolChoice: stepToolChoice,
|
5855
|
+
responseFormat: output == null ? void 0 : output.responseFormat,
|
5856
|
+
prompt: promptMessages,
|
5857
|
+
providerOptions,
|
5858
|
+
abortSignal,
|
5859
|
+
headers
|
5860
|
+
});
|
5861
|
+
const responseData = {
|
5862
|
+
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5863
|
+
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5864
|
+
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
|
5865
|
+
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5866
|
+
body: (_h = result.response) == null ? void 0 : _h.body
|
5867
|
+
};
|
5868
|
+
span2.setAttributes(
|
5869
|
+
selectTelemetryAttributes({
|
5870
|
+
telemetry,
|
5871
|
+
attributes: {
|
5872
|
+
"ai.response.finishReason": result.finishReason,
|
5873
|
+
"ai.response.text": {
|
5874
|
+
output: () => extractContentText(result.content)
|
5875
|
+
},
|
5876
|
+
"ai.response.toolCalls": {
|
5877
|
+
output: () => {
|
5878
|
+
const toolCalls = asToolCalls(result.content);
|
5879
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5880
|
+
}
|
5881
|
+
},
|
5882
|
+
"ai.response.id": responseData.id,
|
5883
|
+
"ai.response.model": responseData.modelId,
|
5884
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5885
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5886
|
+
"ai.usage.promptTokens": result.usage.inputTokens,
|
5887
|
+
"ai.usage.completionTokens": result.usage.outputTokens,
|
5888
|
+
// standardized gen-ai llm span attributes:
|
5889
|
+
"gen_ai.response.finish_reasons": [result.finishReason],
|
5890
|
+
"gen_ai.response.id": responseData.id,
|
5891
|
+
"gen_ai.response.model": responseData.modelId,
|
5892
|
+
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5893
|
+
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5894
|
+
}
|
5895
|
+
})
|
5896
|
+
);
|
5897
|
+
return { ...result, response: responseData };
|
5898
|
+
}
|
5899
|
+
});
|
5900
|
+
}
|
5901
|
+
);
|
5902
|
+
currentToolCalls = await Promise.all(
|
5903
|
+
currentModelResponse.content.filter(
|
5904
|
+
(part) => part.type === "tool-call"
|
5905
|
+
).map(
|
5906
|
+
(toolCall) => parseToolCall({
|
5907
|
+
toolCall,
|
5908
|
+
tools,
|
5909
|
+
repairToolCall,
|
5910
|
+
system,
|
5911
|
+
messages: stepInputMessages
|
5912
|
+
})
|
5913
|
+
)
|
5914
|
+
);
|
5915
|
+
currentToolResults = tools == null ? [] : await executeTools({
|
5916
|
+
toolCalls: currentToolCalls,
|
5917
|
+
tools,
|
5918
|
+
tracer,
|
5919
|
+
telemetry,
|
5920
|
+
messages: stepInputMessages,
|
5921
|
+
abortSignal
|
5922
|
+
});
|
5923
|
+
const stepContent = asContent({
|
5924
|
+
content: currentModelResponse.content,
|
5925
|
+
toolCalls: currentToolCalls,
|
5926
|
+
toolResults: currentToolResults
|
5927
|
+
});
|
5928
|
+
responseMessages.push(
|
5929
|
+
...toResponseMessages({
|
5930
|
+
content: stepContent,
|
5931
|
+
tools: tools != null ? tools : {}
|
5992
5932
|
})
|
5993
|
-
)
|
5994
|
-
|
5995
|
-
currentToolResults = tools == null ? [] : await executeTools({
|
5996
|
-
toolCalls: currentToolCalls,
|
5997
|
-
tools,
|
5998
|
-
tracer,
|
5999
|
-
telemetry,
|
6000
|
-
messages: stepInputMessages,
|
6001
|
-
abortSignal
|
6002
|
-
});
|
6003
|
-
const stepContent = asContent({
|
6004
|
-
content: currentModelResponse.content,
|
6005
|
-
toolCalls: currentToolCalls,
|
6006
|
-
toolResults: currentToolResults
|
6007
|
-
});
|
6008
|
-
responseMessages.push(
|
6009
|
-
...toResponseMessages({
|
5933
|
+
);
|
5934
|
+
const currentStepResult = new DefaultStepResult({
|
6010
5935
|
content: stepContent,
|
6011
|
-
|
5936
|
+
finishReason: currentModelResponse.finishReason,
|
5937
|
+
usage: currentModelResponse.usage,
|
5938
|
+
warnings: currentModelResponse.warnings,
|
5939
|
+
providerMetadata: currentModelResponse.providerMetadata,
|
5940
|
+
request: (_e = currentModelResponse.request) != null ? _e : {},
|
5941
|
+
response: {
|
5942
|
+
...currentModelResponse.response,
|
5943
|
+
// deep clone msgs to avoid mutating past messages in multi-step:
|
5944
|
+
messages: structuredClone(responseMessages)
|
5945
|
+
}
|
5946
|
+
});
|
5947
|
+
steps.push(currentStepResult);
|
5948
|
+
await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
|
5949
|
+
} while (
|
5950
|
+
// there are tool calls:
|
5951
|
+
currentToolCalls.length > 0 && // all current tool calls have results:
|
5952
|
+
currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
|
5953
|
+
!await isStopConditionMet({ stopConditions, steps })
|
5954
|
+
);
|
5955
|
+
span.setAttributes(
|
5956
|
+
selectTelemetryAttributes({
|
5957
|
+
telemetry,
|
5958
|
+
attributes: {
|
5959
|
+
"ai.response.finishReason": currentModelResponse.finishReason,
|
5960
|
+
"ai.response.text": {
|
5961
|
+
output: () => extractContentText(currentModelResponse.content)
|
5962
|
+
},
|
5963
|
+
"ai.response.toolCalls": {
|
5964
|
+
output: () => {
|
5965
|
+
const toolCalls = asToolCalls(currentModelResponse.content);
|
5966
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5967
|
+
}
|
5968
|
+
},
|
5969
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5970
|
+
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
5971
|
+
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
5972
|
+
}
|
6012
5973
|
})
|
6013
5974
|
);
|
6014
|
-
const
|
6015
|
-
|
6016
|
-
|
6017
|
-
|
6018
|
-
|
6019
|
-
|
6020
|
-
|
6021
|
-
|
6022
|
-
|
6023
|
-
|
6024
|
-
|
6025
|
-
}
|
5975
|
+
const lastStep = steps[steps.length - 1];
|
5976
|
+
return new DefaultGenerateTextResult({
|
5977
|
+
steps,
|
5978
|
+
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
5979
|
+
{ text: lastStep.text },
|
5980
|
+
{
|
5981
|
+
response: lastStep.response,
|
5982
|
+
usage: lastStep.usage,
|
5983
|
+
finishReason: lastStep.finishReason
|
5984
|
+
}
|
5985
|
+
))
|
6026
5986
|
});
|
6027
|
-
|
6028
|
-
|
6029
|
-
|
6030
|
-
|
6031
|
-
|
6032
|
-
currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
|
6033
|
-
!await isStopConditionMet({ stopConditions, steps })
|
6034
|
-
);
|
6035
|
-
span.setAttributes(
|
6036
|
-
selectTelemetryAttributes({
|
6037
|
-
telemetry,
|
6038
|
-
attributes: {
|
6039
|
-
"ai.response.finishReason": currentModelResponse.finishReason,
|
6040
|
-
"ai.response.text": {
|
6041
|
-
output: () => extractContentText(currentModelResponse.content)
|
6042
|
-
},
|
6043
|
-
"ai.response.toolCalls": {
|
6044
|
-
output: () => {
|
6045
|
-
const toolCalls = asToolCalls(currentModelResponse.content);
|
6046
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
6047
|
-
}
|
6048
|
-
},
|
6049
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
6050
|
-
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
6051
|
-
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
6052
|
-
}
|
6053
|
-
})
|
6054
|
-
);
|
6055
|
-
const lastStep = steps[steps.length - 1];
|
6056
|
-
return new DefaultGenerateTextResult({
|
6057
|
-
steps,
|
6058
|
-
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
6059
|
-
{ text: lastStep.text },
|
6060
|
-
{
|
6061
|
-
response: lastStep.response,
|
6062
|
-
usage: lastStep.usage,
|
6063
|
-
finishReason: lastStep.finishReason
|
6064
|
-
}
|
6065
|
-
))
|
6066
|
-
});
|
6067
|
-
}
|
6068
|
-
});
|
5987
|
+
}
|
5988
|
+
});
|
5989
|
+
} catch (error) {
|
5990
|
+
throw wrapGatewayError(error);
|
5991
|
+
}
|
6069
5992
|
}
|
6070
5993
|
async function executeTools({
|
6071
5994
|
toolCalls,
|
@@ -6602,7 +6525,9 @@ function streamText({
|
|
6602
6525
|
experimental_repairToolCall: repairToolCall,
|
6603
6526
|
experimental_transform: transform,
|
6604
6527
|
onChunk,
|
6605
|
-
onError
|
6528
|
+
onError = ({ error }) => {
|
6529
|
+
console.error(error);
|
6530
|
+
},
|
6606
6531
|
onFinish,
|
6607
6532
|
onStepFinish,
|
6608
6533
|
_internal: {
|
@@ -6741,7 +6666,7 @@ var DefaultStreamTextResult = class {
|
|
6741
6666
|
await (onChunk == null ? void 0 : onChunk({ chunk: part }));
|
6742
6667
|
}
|
6743
6668
|
if (part.type === "error") {
|
6744
|
-
await
|
6669
|
+
await onError({ error: wrapGatewayError(part.error) });
|
6745
6670
|
}
|
6746
6671
|
if (part.type === "text") {
|
6747
6672
|
const latestContent = recordedContent[recordedContent.length - 1];
|
@@ -7397,7 +7322,7 @@ var DefaultStreamTextResult = class {
|
|
7397
7322
|
} = {}) {
|
7398
7323
|
const lastMessage = originalMessages[originalMessages.length - 1];
|
7399
7324
|
const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
|
7400
|
-
const messageId = isContinuation ? lastMessage.id : newMessageId
|
7325
|
+
const messageId = isContinuation ? lastMessage.id : newMessageId;
|
7401
7326
|
const baseStream = this.fullStream.pipeThrough(
|
7402
7327
|
new TransformStream({
|
7403
7328
|
transform: async (part, controller) => {
|
@@ -7533,7 +7458,7 @@ var DefaultStreamTextResult = class {
|
|
7533
7458
|
);
|
7534
7459
|
return handleUIMessageStreamFinish({
|
7535
7460
|
stream: baseStream,
|
7536
|
-
newMessageId: messageId,
|
7461
|
+
newMessageId: messageId != null ? messageId : this.generateId(),
|
7537
7462
|
originalMessages,
|
7538
7463
|
onFinish
|
7539
7464
|
});
|
@@ -7879,7 +7804,7 @@ function customProvider({
|
|
7879
7804
|
var experimental_customProvider = customProvider;
|
7880
7805
|
|
7881
7806
|
// core/registry/no-such-provider-error.ts
|
7882
|
-
import { AISDKError as
|
7807
|
+
import { AISDKError as AISDKError20, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
|
7883
7808
|
var name16 = "AI_NoSuchProviderError";
|
7884
7809
|
var marker16 = `vercel.ai.error.${name16}`;
|
7885
7810
|
var symbol16 = Symbol.for(marker16);
|
@@ -7898,7 +7823,7 @@ var NoSuchProviderError = class extends NoSuchModelError3 {
|
|
7898
7823
|
this.availableProviders = availableProviders;
|
7899
7824
|
}
|
7900
7825
|
static isInstance(error) {
|
7901
|
-
return
|
7826
|
+
return AISDKError20.hasMarker(error, marker16);
|
7902
7827
|
}
|
7903
7828
|
};
|
7904
7829
|
_a16 = symbol16;
|
@@ -8555,8 +8480,8 @@ var MCPClient = class {
|
|
8555
8480
|
};
|
8556
8481
|
|
8557
8482
|
// src/error/no-transcript-generated-error.ts
|
8558
|
-
import { AISDKError as
|
8559
|
-
var NoTranscriptGeneratedError = class extends
|
8483
|
+
import { AISDKError as AISDKError21 } from "@ai-sdk/provider";
|
8484
|
+
var NoTranscriptGeneratedError = class extends AISDKError21 {
|
8560
8485
|
constructor(options) {
|
8561
8486
|
super({
|
8562
8487
|
name: "AI_NoTranscriptGeneratedError",
|
@@ -8624,6 +8549,7 @@ export {
|
|
8624
8549
|
DefaultChatTransport,
|
8625
8550
|
DownloadError,
|
8626
8551
|
EmptyResponseBodyError,
|
8552
|
+
GLOBAL_DEFAULT_PROVIDER,
|
8627
8553
|
InvalidArgumentError,
|
8628
8554
|
InvalidDataContentError,
|
8629
8555
|
InvalidMessageRoleError,
|
@@ -8651,10 +8577,8 @@ export {
|
|
8651
8577
|
ToolExecutionError,
|
8652
8578
|
TypeValidationError,
|
8653
8579
|
UnsupportedFunctionalityError,
|
8654
|
-
appendClientMessage,
|
8655
8580
|
asSchema5 as asSchema,
|
8656
8581
|
assistantModelMessageSchema,
|
8657
|
-
callChatApi,
|
8658
8582
|
callCompletionApi,
|
8659
8583
|
convertFileListToFileUIParts,
|
8660
8584
|
convertToCoreMessages,
|
@@ -8681,7 +8605,6 @@ export {
|
|
8681
8605
|
generateImage as experimental_generateImage,
|
8682
8606
|
generateSpeech as experimental_generateSpeech,
|
8683
8607
|
transcribe as experimental_transcribe,
|
8684
|
-
extractMaxToolInvocationStep,
|
8685
8608
|
extractReasoningMiddleware,
|
8686
8609
|
generateId2 as generateId,
|
8687
8610
|
generateObject,
|
@@ -8689,14 +8612,12 @@ export {
|
|
8689
8612
|
getTextFromDataUrl,
|
8690
8613
|
getToolInvocations,
|
8691
8614
|
hasToolCall,
|
8692
|
-
isAssistantMessageWithCompletedToolCalls,
|
8693
8615
|
isDeepEqualData,
|
8694
8616
|
jsonSchema2 as jsonSchema,
|
8695
8617
|
modelMessageSchema,
|
8696
8618
|
parsePartialJson,
|
8697
8619
|
pipeTextStreamToResponse,
|
8698
8620
|
pipeUIMessageStreamToResponse,
|
8699
|
-
shouldResubmitMessages,
|
8700
8621
|
simulateReadableStream,
|
8701
8622
|
simulateStreamingMiddleware,
|
8702
8623
|
smoothStream,
|
@@ -8706,7 +8627,6 @@ export {
|
|
8706
8627
|
systemModelMessageSchema,
|
8707
8628
|
tool,
|
8708
8629
|
toolModelMessageSchema,
|
8709
|
-
updateToolCallResult,
|
8710
8630
|
userModelMessageSchema,
|
8711
8631
|
wrapLanguageModel
|
8712
8632
|
};
|