ai 5.0.0-alpha.7 → 5.0.0-alpha.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +49 -0
- package/dist/index.d.mts +122 -276
- package/dist/index.d.ts +122 -276
- package/dist/index.js +866 -916
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +836 -883
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +3 -2
- package/dist/internal/index.d.ts +3 -2
- package/dist/mcp-stdio/index.js.map +1 -1
- package/dist/mcp-stdio/index.mjs.map +1 -1
- package/package.json +6 -6
package/dist/index.mjs
CHANGED
@@ -683,11 +683,43 @@ async function callCompletionApi({
|
|
683
683
|
}
|
684
684
|
}
|
685
685
|
|
686
|
-
// src/ui/chat
|
686
|
+
// src/ui/chat.ts
|
687
687
|
import {
|
688
688
|
generateId as generateIdFunc
|
689
689
|
} from "@ai-sdk/provider-utils";
|
690
690
|
|
691
|
+
// src/util/serial-job-executor.ts
|
692
|
+
var SerialJobExecutor = class {
|
693
|
+
constructor() {
|
694
|
+
this.queue = [];
|
695
|
+
this.isProcessing = false;
|
696
|
+
}
|
697
|
+
async processQueue() {
|
698
|
+
if (this.isProcessing) {
|
699
|
+
return;
|
700
|
+
}
|
701
|
+
this.isProcessing = true;
|
702
|
+
while (this.queue.length > 0) {
|
703
|
+
await this.queue[0]();
|
704
|
+
this.queue.shift();
|
705
|
+
}
|
706
|
+
this.isProcessing = false;
|
707
|
+
}
|
708
|
+
async run(job) {
|
709
|
+
return new Promise((resolve, reject) => {
|
710
|
+
this.queue.push(async () => {
|
711
|
+
try {
|
712
|
+
await job();
|
713
|
+
resolve();
|
714
|
+
} catch (error) {
|
715
|
+
reject(error);
|
716
|
+
}
|
717
|
+
});
|
718
|
+
void this.processQueue();
|
719
|
+
});
|
720
|
+
}
|
721
|
+
};
|
722
|
+
|
691
723
|
// src/ui/process-ui-message-stream.ts
|
692
724
|
import {
|
693
725
|
validateTypes
|
@@ -1347,6 +1379,9 @@ function shouldResubmitMessages({
|
|
1347
1379
|
);
|
1348
1380
|
}
|
1349
1381
|
function isAssistantMessageWithCompletedToolCalls(message) {
|
1382
|
+
if (!message) {
|
1383
|
+
return false;
|
1384
|
+
}
|
1350
1385
|
if (message.role !== "assistant") {
|
1351
1386
|
return false;
|
1352
1387
|
}
|
@@ -1357,242 +1392,298 @@ function isAssistantMessageWithCompletedToolCalls(message) {
|
|
1357
1392
|
return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every((part) => "result" in part.toolInvocation);
|
1358
1393
|
}
|
1359
1394
|
|
1360
|
-
// src/ui/chat-
|
1361
|
-
|
1395
|
+
// src/ui/default-chat-transport.ts
|
1396
|
+
import {
|
1397
|
+
parseJsonEventStream as parseJsonEventStream2
|
1398
|
+
} from "@ai-sdk/provider-utils";
|
1399
|
+
var getOriginalFetch2 = () => fetch;
|
1400
|
+
async function fetchUIMessageStream({
|
1401
|
+
api,
|
1402
|
+
body,
|
1403
|
+
credentials,
|
1404
|
+
headers,
|
1405
|
+
abortController,
|
1406
|
+
fetch: fetch2 = getOriginalFetch2(),
|
1407
|
+
requestType = "generate"
|
1408
|
+
}) {
|
1409
|
+
var _a17, _b, _c;
|
1410
|
+
const response = requestType === "resume" ? await fetch2(`${api}?chatId=${body.chatId}`, {
|
1411
|
+
method: "GET",
|
1412
|
+
headers: {
|
1413
|
+
"Content-Type": "application/json",
|
1414
|
+
...headers
|
1415
|
+
},
|
1416
|
+
signal: (_a17 = abortController == null ? void 0 : abortController()) == null ? void 0 : _a17.signal,
|
1417
|
+
credentials
|
1418
|
+
}) : await fetch2(api, {
|
1419
|
+
method: "POST",
|
1420
|
+
body: JSON.stringify(body),
|
1421
|
+
headers: {
|
1422
|
+
"Content-Type": "application/json",
|
1423
|
+
...headers
|
1424
|
+
},
|
1425
|
+
signal: (_b = abortController == null ? void 0 : abortController()) == null ? void 0 : _b.signal,
|
1426
|
+
credentials
|
1427
|
+
});
|
1428
|
+
if (!response.ok) {
|
1429
|
+
throw new Error(
|
1430
|
+
(_c = await response.text()) != null ? _c : "Failed to fetch the chat response."
|
1431
|
+
);
|
1432
|
+
}
|
1433
|
+
if (!response.body) {
|
1434
|
+
throw new Error("The response body is empty.");
|
1435
|
+
}
|
1436
|
+
return parseJsonEventStream2({
|
1437
|
+
stream: response.body,
|
1438
|
+
schema: uiMessageStreamPartSchema
|
1439
|
+
}).pipeThrough(
|
1440
|
+
new TransformStream({
|
1441
|
+
async transform(part, controller) {
|
1442
|
+
if (!part.success) {
|
1443
|
+
throw part.error;
|
1444
|
+
}
|
1445
|
+
controller.enqueue(part.value);
|
1446
|
+
}
|
1447
|
+
})
|
1448
|
+
);
|
1449
|
+
}
|
1450
|
+
var DefaultChatTransport = class {
|
1362
1451
|
constructor({
|
1363
|
-
|
1364
|
-
|
1365
|
-
|
1452
|
+
api = "/api/chat",
|
1453
|
+
credentials,
|
1454
|
+
headers,
|
1455
|
+
body,
|
1456
|
+
fetch: fetch2,
|
1457
|
+
prepareRequestBody
|
1458
|
+
} = {}) {
|
1459
|
+
this.api = api;
|
1460
|
+
this.credentials = credentials;
|
1461
|
+
this.headers = headers;
|
1462
|
+
this.body = body;
|
1463
|
+
this.fetch = fetch2;
|
1464
|
+
this.prepareRequestBody = prepareRequestBody;
|
1465
|
+
}
|
1466
|
+
submitMessages({
|
1467
|
+
chatId,
|
1468
|
+
messages,
|
1469
|
+
abortController,
|
1470
|
+
body,
|
1471
|
+
headers,
|
1472
|
+
requestType
|
1473
|
+
}) {
|
1474
|
+
var _a17, _b;
|
1475
|
+
return fetchUIMessageStream({
|
1476
|
+
api: this.api,
|
1477
|
+
headers: {
|
1478
|
+
...this.headers,
|
1479
|
+
...headers
|
1480
|
+
},
|
1481
|
+
body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
|
1482
|
+
chatId,
|
1483
|
+
messages,
|
1484
|
+
...this.body,
|
1485
|
+
...body
|
1486
|
+
})) != null ? _b : {
|
1487
|
+
chatId,
|
1488
|
+
messages,
|
1489
|
+
...this.body,
|
1490
|
+
...body
|
1491
|
+
},
|
1492
|
+
credentials: this.credentials,
|
1493
|
+
abortController: () => abortController,
|
1494
|
+
fetch: this.fetch,
|
1495
|
+
requestType
|
1496
|
+
});
|
1497
|
+
}
|
1498
|
+
};
|
1499
|
+
|
1500
|
+
// src/ui/chat.ts
|
1501
|
+
var AbstractChat = class {
|
1502
|
+
constructor({
|
1503
|
+
generateId: generateId3 = generateIdFunc,
|
1504
|
+
id = generateId3(),
|
1505
|
+
transport = new DefaultChatTransport(),
|
1366
1506
|
maxSteps = 1,
|
1367
1507
|
messageMetadataSchema,
|
1368
1508
|
dataPartSchemas,
|
1369
|
-
|
1509
|
+
state,
|
1510
|
+
onError,
|
1511
|
+
onToolCall,
|
1512
|
+
onFinish
|
1370
1513
|
}) {
|
1371
|
-
this.
|
1372
|
-
this.
|
1373
|
-
|
1374
|
-
|
1375
|
-
|
1376
|
-
|
1377
|
-
|
1514
|
+
this.subscribers = /* @__PURE__ */ new Set();
|
1515
|
+
this.activeResponse = void 0;
|
1516
|
+
this.jobExecutor = new SerialJobExecutor();
|
1517
|
+
this.removeAssistantResponse = () => {
|
1518
|
+
const lastMessage = this.state.messages[this.state.messages.length - 1];
|
1519
|
+
if (lastMessage == null) {
|
1520
|
+
throw new Error("Cannot remove assistant response from empty chat");
|
1521
|
+
}
|
1522
|
+
if (lastMessage.role !== "assistant") {
|
1523
|
+
throw new Error("Last message is not an assistant message");
|
1524
|
+
}
|
1525
|
+
this.state.popMessage();
|
1526
|
+
this.emit({ type: "messages-changed" });
|
1527
|
+
};
|
1528
|
+
/**
|
1529
|
+
* Append a user message to the chat list. This triggers the API call to fetch
|
1530
|
+
* the assistant's response.
|
1531
|
+
*/
|
1532
|
+
this.append = async (message, { headers, body } = {}) => {
|
1533
|
+
var _a17;
|
1534
|
+
this.state.pushMessage({ ...message, id: (_a17 = message.id) != null ? _a17 : this.generateId() });
|
1535
|
+
this.emit({ type: "messages-changed" });
|
1536
|
+
await this.triggerRequest({
|
1537
|
+
headers,
|
1538
|
+
body,
|
1539
|
+
requestType: "generate"
|
1540
|
+
});
|
1541
|
+
};
|
1542
|
+
/**
|
1543
|
+
* Reload the last AI chat response for the given chat history. If the last
|
1544
|
+
* message isn't from the assistant, it will request the API to generate a
|
1545
|
+
* new response.
|
1546
|
+
*/
|
1547
|
+
this.reload = async ({
|
1548
|
+
headers,
|
1549
|
+
body
|
1550
|
+
} = {}) => {
|
1551
|
+
if (this.lastMessage === void 0) {
|
1552
|
+
return;
|
1553
|
+
}
|
1554
|
+
if (this.lastMessage.role === "assistant") {
|
1555
|
+
this.state.popMessage();
|
1556
|
+
this.emit({ type: "messages-changed" });
|
1557
|
+
}
|
1558
|
+
await this.triggerRequest({
|
1559
|
+
requestType: "generate",
|
1560
|
+
headers,
|
1561
|
+
body
|
1562
|
+
});
|
1563
|
+
};
|
1564
|
+
/**
|
1565
|
+
* Resume an ongoing chat generation stream. This does not resume an aborted generation.
|
1566
|
+
*/
|
1567
|
+
this.experimental_resume = async ({
|
1568
|
+
headers,
|
1569
|
+
body
|
1570
|
+
} = {}) => {
|
1571
|
+
await this.triggerRequest({
|
1572
|
+
requestType: "resume",
|
1573
|
+
headers,
|
1574
|
+
body
|
1575
|
+
});
|
1576
|
+
};
|
1577
|
+
this.addToolResult = async ({
|
1578
|
+
toolCallId,
|
1579
|
+
result
|
1580
|
+
}) => {
|
1581
|
+
this.jobExecutor.run(async () => {
|
1582
|
+
updateToolCallResult({
|
1583
|
+
messages: this.state.messages,
|
1584
|
+
toolCallId,
|
1585
|
+
toolResult: result
|
1586
|
+
});
|
1587
|
+
this.messages = this.state.messages;
|
1588
|
+
if (this.status === "submitted" || this.status === "streaming") {
|
1589
|
+
return;
|
1590
|
+
}
|
1591
|
+
const lastMessage = this.lastMessage;
|
1592
|
+
if (isAssistantMessageWithCompletedToolCalls(lastMessage)) {
|
1593
|
+
this.triggerRequest({
|
1594
|
+
requestType: "generate"
|
1595
|
+
});
|
1596
|
+
}
|
1597
|
+
});
|
1598
|
+
};
|
1599
|
+
/**
|
1600
|
+
* Abort the current request immediately, keep the generated tokens if any.
|
1601
|
+
*/
|
1602
|
+
this.stop = async () => {
|
1603
|
+
var _a17;
|
1604
|
+
if (this.status !== "streaming" && this.status !== "submitted")
|
1605
|
+
return;
|
1606
|
+
if ((_a17 = this.activeResponse) == null ? void 0 : _a17.abortController) {
|
1607
|
+
this.activeResponse.abortController.abort();
|
1608
|
+
this.activeResponse.abortController = void 0;
|
1609
|
+
}
|
1610
|
+
};
|
1611
|
+
this.id = id;
|
1378
1612
|
this.maxSteps = maxSteps;
|
1379
1613
|
this.transport = transport;
|
1380
|
-
this.
|
1381
|
-
this.generateId = generateId3 != null ? generateId3 : generateIdFunc;
|
1614
|
+
this.generateId = generateId3;
|
1382
1615
|
this.messageMetadataSchema = messageMetadataSchema;
|
1383
1616
|
this.dataPartSchemas = dataPartSchemas;
|
1617
|
+
this.state = state;
|
1618
|
+
this.onError = onError;
|
1619
|
+
this.onToolCall = onToolCall;
|
1620
|
+
this.onFinish = onFinish;
|
1384
1621
|
}
|
1385
|
-
|
1386
|
-
|
1387
|
-
|
1388
|
-
|
1389
|
-
|
1390
|
-
|
1391
|
-
|
1392
|
-
|
1393
|
-
|
1394
|
-
|
1395
|
-
return this.chats.size;
|
1396
|
-
}
|
1397
|
-
getStatus(id) {
|
1398
|
-
return this.getChatState(id).status;
|
1622
|
+
/**
|
1623
|
+
* Hook status:
|
1624
|
+
*
|
1625
|
+
* - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
|
1626
|
+
* - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
|
1627
|
+
* - `ready`: The full response has been received and processed; a new user message can be submitted.
|
1628
|
+
* - `error`: An error occurred during the API request, preventing successful completion.
|
1629
|
+
*/
|
1630
|
+
get status() {
|
1631
|
+
return this.state.status;
|
1399
1632
|
}
|
1400
1633
|
setStatus({
|
1401
|
-
id,
|
1402
1634
|
status,
|
1403
1635
|
error
|
1404
1636
|
}) {
|
1405
|
-
|
1406
|
-
if (state.status === status)
|
1637
|
+
if (this.status === status)
|
1407
1638
|
return;
|
1408
|
-
state.
|
1409
|
-
state.
|
1410
|
-
this.emit({ type: "
|
1639
|
+
this.state.status = status;
|
1640
|
+
this.state.error = error;
|
1641
|
+
this.emit({ type: "status-changed" });
|
1411
1642
|
}
|
1412
|
-
|
1413
|
-
return this.
|
1643
|
+
get error() {
|
1644
|
+
return this.state.error;
|
1414
1645
|
}
|
1415
|
-
|
1416
|
-
return this.
|
1646
|
+
get messages() {
|
1647
|
+
return this.state.messages;
|
1417
1648
|
}
|
1418
|
-
|
1419
|
-
|
1420
|
-
return chat.messages[chat.messages.length - 1];
|
1649
|
+
get lastMessage() {
|
1650
|
+
return this.state.messages[this.state.messages.length - 1];
|
1421
1651
|
}
|
1422
1652
|
subscribe(subscriber) {
|
1423
1653
|
this.subscribers.add(subscriber);
|
1424
1654
|
return () => this.subscribers.delete(subscriber);
|
1425
1655
|
}
|
1426
|
-
|
1427
|
-
|
1428
|
-
messages
|
1429
|
-
}) {
|
1430
|
-
this.getChatState(id).setMessages(messages);
|
1431
|
-
this.emit({ type: "chat-messages-changed", chatId: id });
|
1432
|
-
}
|
1433
|
-
removeAssistantResponse(id) {
|
1434
|
-
const chat = this.getChatState(id);
|
1435
|
-
const lastMessage = chat.messages[chat.messages.length - 1];
|
1436
|
-
if (lastMessage == null) {
|
1437
|
-
throw new Error("Cannot remove assistant response from empty chat");
|
1438
|
-
}
|
1439
|
-
if (lastMessage.role !== "assistant") {
|
1440
|
-
throw new Error("Last message is not an assistant message");
|
1441
|
-
}
|
1442
|
-
chat.popMessage();
|
1443
|
-
this.emit({ type: "chat-messages-changed", chatId: id });
|
1444
|
-
}
|
1445
|
-
async submitMessage({
|
1446
|
-
chatId,
|
1447
|
-
message,
|
1448
|
-
headers,
|
1449
|
-
body,
|
1450
|
-
onError,
|
1451
|
-
onToolCall,
|
1452
|
-
onFinish
|
1453
|
-
}) {
|
1454
|
-
var _a17;
|
1455
|
-
const state = this.getChatState(chatId);
|
1456
|
-
state.pushMessage({ ...message, id: (_a17 = message.id) != null ? _a17 : this.generateId() });
|
1457
|
-
this.emit({
|
1458
|
-
type: "chat-messages-changed",
|
1459
|
-
chatId
|
1460
|
-
});
|
1461
|
-
await this.triggerRequest({
|
1462
|
-
chatId,
|
1463
|
-
headers,
|
1464
|
-
body,
|
1465
|
-
requestType: "generate",
|
1466
|
-
onError,
|
1467
|
-
onToolCall,
|
1468
|
-
onFinish
|
1469
|
-
});
|
1470
|
-
}
|
1471
|
-
async resubmitLastUserMessage({
|
1472
|
-
chatId,
|
1473
|
-
headers,
|
1474
|
-
body,
|
1475
|
-
onError,
|
1476
|
-
onToolCall,
|
1477
|
-
onFinish
|
1478
|
-
}) {
|
1479
|
-
const chat = this.getChatState(chatId);
|
1480
|
-
if (chat.messages[chat.messages.length - 1].role === "assistant") {
|
1481
|
-
chat.popMessage();
|
1482
|
-
this.emit({
|
1483
|
-
type: "chat-messages-changed",
|
1484
|
-
chatId
|
1485
|
-
});
|
1486
|
-
}
|
1487
|
-
if (chat.messages.length === 0) {
|
1488
|
-
return;
|
1489
|
-
}
|
1490
|
-
return this.triggerRequest({
|
1491
|
-
chatId,
|
1492
|
-
requestType: "generate",
|
1493
|
-
headers,
|
1494
|
-
body,
|
1495
|
-
onError,
|
1496
|
-
onToolCall,
|
1497
|
-
onFinish
|
1498
|
-
});
|
1499
|
-
}
|
1500
|
-
async resumeStream({
|
1501
|
-
chatId,
|
1502
|
-
headers,
|
1503
|
-
body,
|
1504
|
-
onError,
|
1505
|
-
onToolCall,
|
1506
|
-
onFinish
|
1507
|
-
}) {
|
1508
|
-
return this.triggerRequest({
|
1509
|
-
chatId,
|
1510
|
-
requestType: "resume",
|
1511
|
-
headers,
|
1512
|
-
body,
|
1513
|
-
onError,
|
1514
|
-
onToolCall,
|
1515
|
-
onFinish
|
1516
|
-
});
|
1517
|
-
}
|
1518
|
-
async addToolResult({
|
1519
|
-
chatId,
|
1520
|
-
toolCallId,
|
1521
|
-
result
|
1522
|
-
}) {
|
1523
|
-
const chat = this.getChatState(chatId);
|
1524
|
-
chat.jobExecutor.run(async () => {
|
1525
|
-
updateToolCallResult({
|
1526
|
-
messages: chat.messages,
|
1527
|
-
toolCallId,
|
1528
|
-
toolResult: result
|
1529
|
-
});
|
1530
|
-
this.setMessages({
|
1531
|
-
id: chatId,
|
1532
|
-
messages: chat.messages
|
1533
|
-
});
|
1534
|
-
if (chat.status === "submitted" || chat.status === "streaming") {
|
1535
|
-
return;
|
1536
|
-
}
|
1537
|
-
const lastMessage = chat.messages[chat.messages.length - 1];
|
1538
|
-
if (isAssistantMessageWithCompletedToolCalls(lastMessage)) {
|
1539
|
-
this.triggerRequest({
|
1540
|
-
requestType: "generate",
|
1541
|
-
chatId
|
1542
|
-
});
|
1543
|
-
}
|
1544
|
-
});
|
1545
|
-
}
|
1546
|
-
async stopStream({ chatId }) {
|
1547
|
-
var _a17;
|
1548
|
-
const chat = this.getChatState(chatId);
|
1549
|
-
if (chat.status !== "streaming" && chat.status !== "submitted")
|
1550
|
-
return;
|
1551
|
-
if ((_a17 = chat.activeResponse) == null ? void 0 : _a17.abortController) {
|
1552
|
-
chat.activeResponse.abortController.abort();
|
1553
|
-
chat.activeResponse.abortController = void 0;
|
1554
|
-
}
|
1656
|
+
set messages(messages) {
|
1657
|
+
this.state.messages = messages;
|
1658
|
+
this.emit({ type: "messages-changed" });
|
1555
1659
|
}
|
1556
1660
|
emit(event) {
|
1557
1661
|
for (const subscriber of this.subscribers) {
|
1558
|
-
subscriber.
|
1559
|
-
}
|
1560
|
-
}
|
1561
|
-
getChatState(id) {
|
1562
|
-
if (!this.hasChat(id)) {
|
1563
|
-
this.addChat(id, []);
|
1662
|
+
subscriber.onChange(event);
|
1564
1663
|
}
|
1565
|
-
return this.chats.get(id);
|
1566
1664
|
}
|
1567
1665
|
async triggerRequest({
|
1568
|
-
chatId,
|
1569
1666
|
requestType,
|
1570
1667
|
headers,
|
1571
|
-
body
|
1572
|
-
onError,
|
1573
|
-
onToolCall,
|
1574
|
-
onFinish
|
1668
|
+
body
|
1575
1669
|
}) {
|
1576
|
-
|
1577
|
-
this.setStatus({
|
1578
|
-
const messageCount =
|
1579
|
-
const lastMessage =
|
1580
|
-
const maxStep = lastMessage.parts.filter(
|
1581
|
-
(part) => part.type === "step-start"
|
1582
|
-
).length;
|
1670
|
+
var _a17, _b;
|
1671
|
+
this.setStatus({ status: "submitted", error: void 0 });
|
1672
|
+
const messageCount = this.state.messages.length;
|
1673
|
+
const lastMessage = this.lastMessage;
|
1674
|
+
const maxStep = (_a17 = lastMessage == null ? void 0 : lastMessage.parts.filter((part) => part.type === "step-start").length) != null ? _a17 : 0;
|
1583
1675
|
try {
|
1584
|
-
const lastMessage2 = chat.messages[chat.messages.length - 1];
|
1585
1676
|
const activeResponse = {
|
1586
1677
|
state: createStreamingUIMessageState({
|
1587
|
-
lastMessage:
|
1678
|
+
lastMessage: this.state.snapshot(lastMessage),
|
1588
1679
|
newMessageId: this.generateId()
|
1589
1680
|
}),
|
1590
1681
|
abortController: new AbortController()
|
1591
1682
|
};
|
1592
|
-
|
1683
|
+
this.activeResponse = activeResponse;
|
1593
1684
|
const stream = await this.transport.submitMessages({
|
1594
|
-
chatId,
|
1595
|
-
messages:
|
1685
|
+
chatId: this.id,
|
1686
|
+
messages: this.state.messages,
|
1596
1687
|
body,
|
1597
1688
|
headers,
|
1598
1689
|
abortController: activeResponse.abortController,
|
@@ -1600,23 +1691,23 @@ var ChatStore = class {
|
|
1600
1691
|
});
|
1601
1692
|
const runUpdateMessageJob = (job) => (
|
1602
1693
|
// serialize the job execution to avoid race conditions:
|
1603
|
-
|
1694
|
+
this.jobExecutor.run(
|
1604
1695
|
() => job({
|
1605
1696
|
state: activeResponse.state,
|
1606
1697
|
write: () => {
|
1607
|
-
|
1608
|
-
|
1698
|
+
var _a18;
|
1699
|
+
this.setStatus({ status: "streaming" });
|
1700
|
+
const replaceLastMessage = activeResponse.state.message.id === ((_a18 = this.lastMessage) == null ? void 0 : _a18.id);
|
1609
1701
|
if (replaceLastMessage) {
|
1610
|
-
|
1611
|
-
|
1702
|
+
this.state.replaceMessage(
|
1703
|
+
this.state.messages.length - 1,
|
1612
1704
|
activeResponse.state.message
|
1613
1705
|
);
|
1614
1706
|
} else {
|
1615
|
-
|
1707
|
+
this.state.pushMessage(activeResponse.state.message);
|
1616
1708
|
}
|
1617
1709
|
this.emit({
|
1618
|
-
type: "
|
1619
|
-
chatId
|
1710
|
+
type: "messages-changed"
|
1620
1711
|
});
|
1621
1712
|
}
|
1622
1713
|
})
|
@@ -1625,7 +1716,7 @@ var ChatStore = class {
|
|
1625
1716
|
await consumeStream({
|
1626
1717
|
stream: processUIMessageStream({
|
1627
1718
|
stream,
|
1628
|
-
onToolCall,
|
1719
|
+
onToolCall: this.onToolCall,
|
1629
1720
|
messageMetadataSchema: this.messageMetadataSchema,
|
1630
1721
|
dataPartSchemas: this.dataPartSchemas,
|
1631
1722
|
runUpdateMessageJob
|
@@ -1634,32 +1725,29 @@ var ChatStore = class {
|
|
1634
1725
|
throw error;
|
1635
1726
|
}
|
1636
1727
|
});
|
1637
|
-
onFinish == null ? void 0 :
|
1638
|
-
this.setStatus({
|
1728
|
+
(_b = this.onFinish) == null ? void 0 : _b.call(this, { message: activeResponse.state.message });
|
1729
|
+
this.setStatus({ status: "ready" });
|
1639
1730
|
} catch (err) {
|
1731
|
+
console.error(err);
|
1640
1732
|
if (err.name === "AbortError") {
|
1641
|
-
this.setStatus({
|
1733
|
+
this.setStatus({ status: "ready" });
|
1642
1734
|
return null;
|
1643
1735
|
}
|
1644
|
-
if (onError && err instanceof Error) {
|
1645
|
-
onError(err);
|
1736
|
+
if (this.onError && err instanceof Error) {
|
1737
|
+
this.onError(err);
|
1646
1738
|
}
|
1647
|
-
this.setStatus({
|
1739
|
+
this.setStatus({ status: "error", error: err });
|
1648
1740
|
} finally {
|
1649
|
-
|
1741
|
+
this.activeResponse = void 0;
|
1650
1742
|
}
|
1651
1743
|
if (shouldResubmitMessages({
|
1652
1744
|
originalMaxToolInvocationStep: maxStep,
|
1653
1745
|
originalMessageCount: messageCount,
|
1654
1746
|
maxSteps: this.maxSteps,
|
1655
|
-
messages:
|
1747
|
+
messages: this.state.messages
|
1656
1748
|
})) {
|
1657
1749
|
await this.triggerRequest({
|
1658
|
-
chatId,
|
1659
1750
|
requestType,
|
1660
|
-
onError,
|
1661
|
-
onToolCall,
|
1662
|
-
onFinish,
|
1663
1751
|
headers,
|
1664
1752
|
body
|
1665
1753
|
});
|
@@ -1800,207 +1888,66 @@ function convertToModelMessages(messages, options) {
|
|
1800
1888
|
role: "tool",
|
1801
1889
|
content: stepInvocations.map(
|
1802
1890
|
(toolInvocation) => {
|
1803
|
-
if (!("result" in toolInvocation)) {
|
1804
|
-
throw new MessageConversionError({
|
1805
|
-
originalMessage: message,
|
1806
|
-
message: "ToolInvocation must have a result: " + JSON.stringify(toolInvocation)
|
1807
|
-
});
|
1808
|
-
}
|
1809
|
-
const { toolCallId, toolName, result } = toolInvocation;
|
1810
|
-
const tool2 = tools[toolName];
|
1811
|
-
return (tool2 == null ? void 0 : tool2.experimental_toToolResultContent) != null ? {
|
1812
|
-
type: "tool-result",
|
1813
|
-
toolCallId,
|
1814
|
-
toolName,
|
1815
|
-
result: tool2.experimental_toToolResultContent(result),
|
1816
|
-
experimental_content: tool2.experimental_toToolResultContent(result)
|
1817
|
-
} : {
|
1818
|
-
type: "tool-result",
|
1819
|
-
toolCallId,
|
1820
|
-
toolName,
|
1821
|
-
result
|
1822
|
-
};
|
1823
|
-
}
|
1824
|
-
)
|
1825
|
-
});
|
1826
|
-
}
|
1827
|
-
block = [];
|
1828
|
-
};
|
1829
|
-
var processBlock = processBlock2;
|
1830
|
-
let block = [];
|
1831
|
-
for (const part of message.parts) {
|
1832
|
-
switch (part.type) {
|
1833
|
-
case "text":
|
1834
|
-
case "reasoning":
|
1835
|
-
case "file":
|
1836
|
-
case "tool-invocation": {
|
1837
|
-
block.push(part);
|
1838
|
-
break;
|
1839
|
-
}
|
1840
|
-
case "step-start": {
|
1841
|
-
processBlock2();
|
1842
|
-
break;
|
1843
|
-
}
|
1844
|
-
}
|
1845
|
-
}
|
1846
|
-
processBlock2();
|
1847
|
-
break;
|
1848
|
-
}
|
1849
|
-
break;
|
1850
|
-
}
|
1851
|
-
default: {
|
1852
|
-
const _exhaustiveCheck = message.role;
|
1853
|
-
throw new MessageConversionError({
|
1854
|
-
originalMessage: message,
|
1855
|
-
message: `Unsupported role: ${_exhaustiveCheck}`
|
1856
|
-
});
|
1857
|
-
}
|
1858
|
-
}
|
1859
|
-
}
|
1860
|
-
return modelMessages;
|
1861
|
-
}
|
1862
|
-
var convertToCoreMessages = convertToModelMessages;
|
1863
|
-
|
1864
|
-
// src/ui/default-chat-store-options.ts
|
1865
|
-
import {
|
1866
|
-
generateId as generateIdFunc2
|
1867
|
-
} from "@ai-sdk/provider-utils";
|
1868
|
-
|
1869
|
-
// src/ui/default-chat-transport.ts
|
1870
|
-
import {
|
1871
|
-
parseJsonEventStream as parseJsonEventStream2
|
1872
|
-
} from "@ai-sdk/provider-utils";
|
1873
|
-
var getOriginalFetch2 = () => fetch;
|
1874
|
-
async function fetchUIMessageStream({
|
1875
|
-
api,
|
1876
|
-
body,
|
1877
|
-
credentials,
|
1878
|
-
headers,
|
1879
|
-
abortController,
|
1880
|
-
fetch: fetch2 = getOriginalFetch2(),
|
1881
|
-
requestType = "generate"
|
1882
|
-
}) {
|
1883
|
-
var _a17, _b, _c;
|
1884
|
-
const response = requestType === "resume" ? await fetch2(`${api}?chatId=${body.chatId}`, {
|
1885
|
-
method: "GET",
|
1886
|
-
headers: {
|
1887
|
-
"Content-Type": "application/json",
|
1888
|
-
...headers
|
1889
|
-
},
|
1890
|
-
signal: (_a17 = abortController == null ? void 0 : abortController()) == null ? void 0 : _a17.signal,
|
1891
|
-
credentials
|
1892
|
-
}) : await fetch2(api, {
|
1893
|
-
method: "POST",
|
1894
|
-
body: JSON.stringify(body),
|
1895
|
-
headers: {
|
1896
|
-
"Content-Type": "application/json",
|
1897
|
-
...headers
|
1898
|
-
},
|
1899
|
-
signal: (_b = abortController == null ? void 0 : abortController()) == null ? void 0 : _b.signal,
|
1900
|
-
credentials
|
1901
|
-
});
|
1902
|
-
if (!response.ok) {
|
1903
|
-
throw new Error(
|
1904
|
-
(_c = await response.text()) != null ? _c : "Failed to fetch the chat response."
|
1905
|
-
);
|
1906
|
-
}
|
1907
|
-
if (!response.body) {
|
1908
|
-
throw new Error("The response body is empty.");
|
1909
|
-
}
|
1910
|
-
return parseJsonEventStream2({
|
1911
|
-
stream: response.body,
|
1912
|
-
schema: uiMessageStreamPartSchema
|
1913
|
-
}).pipeThrough(
|
1914
|
-
new TransformStream({
|
1915
|
-
async transform(part, controller) {
|
1916
|
-
if (!part.success) {
|
1917
|
-
throw part.error;
|
1891
|
+
if (!("result" in toolInvocation)) {
|
1892
|
+
throw new MessageConversionError({
|
1893
|
+
originalMessage: message,
|
1894
|
+
message: "ToolInvocation must have a result: " + JSON.stringify(toolInvocation)
|
1895
|
+
});
|
1896
|
+
}
|
1897
|
+
const { toolCallId, toolName, result } = toolInvocation;
|
1898
|
+
const tool2 = tools[toolName];
|
1899
|
+
return (tool2 == null ? void 0 : tool2.experimental_toToolResultContent) != null ? {
|
1900
|
+
type: "tool-result",
|
1901
|
+
toolCallId,
|
1902
|
+
toolName,
|
1903
|
+
result: tool2.experimental_toToolResultContent(result),
|
1904
|
+
experimental_content: tool2.experimental_toToolResultContent(result)
|
1905
|
+
} : {
|
1906
|
+
type: "tool-result",
|
1907
|
+
toolCallId,
|
1908
|
+
toolName,
|
1909
|
+
result
|
1910
|
+
};
|
1911
|
+
}
|
1912
|
+
)
|
1913
|
+
});
|
1914
|
+
}
|
1915
|
+
block = [];
|
1916
|
+
};
|
1917
|
+
var processBlock = processBlock2;
|
1918
|
+
let block = [];
|
1919
|
+
for (const part of message.parts) {
|
1920
|
+
switch (part.type) {
|
1921
|
+
case "text":
|
1922
|
+
case "reasoning":
|
1923
|
+
case "file":
|
1924
|
+
case "tool-invocation": {
|
1925
|
+
block.push(part);
|
1926
|
+
break;
|
1927
|
+
}
|
1928
|
+
case "step-start": {
|
1929
|
+
processBlock2();
|
1930
|
+
break;
|
1931
|
+
}
|
1932
|
+
}
|
1933
|
+
}
|
1934
|
+
processBlock2();
|
1935
|
+
break;
|
1918
1936
|
}
|
1919
|
-
|
1937
|
+
break;
|
1920
1938
|
}
|
1921
|
-
|
1922
|
-
|
1923
|
-
|
1924
|
-
|
1925
|
-
|
1926
|
-
|
1927
|
-
|
1928
|
-
|
1929
|
-
body,
|
1930
|
-
fetch: fetch2,
|
1931
|
-
prepareRequestBody
|
1932
|
-
}) {
|
1933
|
-
this.api = api;
|
1934
|
-
this.credentials = credentials;
|
1935
|
-
this.headers = headers;
|
1936
|
-
this.body = body;
|
1937
|
-
this.fetch = fetch2;
|
1938
|
-
this.prepareRequestBody = prepareRequestBody;
|
1939
|
-
}
|
1940
|
-
submitMessages({
|
1941
|
-
chatId,
|
1942
|
-
messages,
|
1943
|
-
abortController,
|
1944
|
-
body,
|
1945
|
-
headers,
|
1946
|
-
requestType
|
1947
|
-
}) {
|
1948
|
-
var _a17, _b;
|
1949
|
-
return fetchUIMessageStream({
|
1950
|
-
api: this.api,
|
1951
|
-
headers: {
|
1952
|
-
...this.headers,
|
1953
|
-
...headers
|
1954
|
-
},
|
1955
|
-
body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
|
1956
|
-
chatId,
|
1957
|
-
messages,
|
1958
|
-
...this.body,
|
1959
|
-
...body
|
1960
|
-
})) != null ? _b : {
|
1961
|
-
chatId,
|
1962
|
-
messages,
|
1963
|
-
...this.body,
|
1964
|
-
...body
|
1965
|
-
},
|
1966
|
-
credentials: this.credentials,
|
1967
|
-
abortController: () => abortController,
|
1968
|
-
fetch: this.fetch,
|
1969
|
-
requestType
|
1970
|
-
});
|
1939
|
+
default: {
|
1940
|
+
const _exhaustiveCheck = message.role;
|
1941
|
+
throw new MessageConversionError({
|
1942
|
+
originalMessage: message,
|
1943
|
+
message: `Unsupported role: ${_exhaustiveCheck}`
|
1944
|
+
});
|
1945
|
+
}
|
1946
|
+
}
|
1971
1947
|
}
|
1972
|
-
|
1973
|
-
|
1974
|
-
// src/ui/default-chat-store-options.ts
|
1975
|
-
function defaultChatStoreOptions({
|
1976
|
-
api = "/api/chat",
|
1977
|
-
fetch: fetch2,
|
1978
|
-
credentials,
|
1979
|
-
headers,
|
1980
|
-
body,
|
1981
|
-
prepareRequestBody,
|
1982
|
-
generateId: generateId3 = generateIdFunc2,
|
1983
|
-
messageMetadataSchema,
|
1984
|
-
maxSteps = 1,
|
1985
|
-
dataPartSchemas,
|
1986
|
-
chats
|
1987
|
-
}) {
|
1988
|
-
return () => ({
|
1989
|
-
transport: new DefaultChatTransport({
|
1990
|
-
api,
|
1991
|
-
fetch: fetch2,
|
1992
|
-
credentials,
|
1993
|
-
headers,
|
1994
|
-
body,
|
1995
|
-
prepareRequestBody
|
1996
|
-
}),
|
1997
|
-
generateId: generateId3,
|
1998
|
-
messageMetadataSchema,
|
1999
|
-
dataPartSchemas,
|
2000
|
-
maxSteps,
|
2001
|
-
chats
|
2002
|
-
});
|
1948
|
+
return modelMessages;
|
2003
1949
|
}
|
1950
|
+
var convertToCoreMessages = convertToModelMessages;
|
2004
1951
|
|
2005
1952
|
// src/ui/transform-text-to-ui-message-stream.ts
|
2006
1953
|
function transformTextToUiMessageStream({
|
@@ -2371,38 +2318,6 @@ function isDeepEqualData(obj1, obj2) {
|
|
2371
2318
|
return true;
|
2372
2319
|
}
|
2373
2320
|
|
2374
|
-
// src/util/serial-job-executor.ts
|
2375
|
-
var SerialJobExecutor = class {
|
2376
|
-
constructor() {
|
2377
|
-
this.queue = [];
|
2378
|
-
this.isProcessing = false;
|
2379
|
-
}
|
2380
|
-
async processQueue() {
|
2381
|
-
if (this.isProcessing) {
|
2382
|
-
return;
|
2383
|
-
}
|
2384
|
-
this.isProcessing = true;
|
2385
|
-
while (this.queue.length > 0) {
|
2386
|
-
await this.queue[0]();
|
2387
|
-
this.queue.shift();
|
2388
|
-
}
|
2389
|
-
this.isProcessing = false;
|
2390
|
-
}
|
2391
|
-
async run(job) {
|
2392
|
-
return new Promise((resolve, reject) => {
|
2393
|
-
this.queue.push(async () => {
|
2394
|
-
try {
|
2395
|
-
await job();
|
2396
|
-
resolve();
|
2397
|
-
} catch (error) {
|
2398
|
-
reject(error);
|
2399
|
-
}
|
2400
|
-
});
|
2401
|
-
void this.processQueue();
|
2402
|
-
});
|
2403
|
-
}
|
2404
|
-
};
|
2405
|
-
|
2406
2321
|
// src/util/simulate-readable-stream.ts
|
2407
2322
|
import { delay as delayFunction } from "@ai-sdk/provider-utils";
|
2408
2323
|
function simulateReadableStream({
|
@@ -3742,6 +3657,19 @@ function prepareCallSettings({
|
|
3742
3657
|
};
|
3743
3658
|
}
|
3744
3659
|
|
3660
|
+
// core/prompt/resolve-language-model.ts
|
3661
|
+
import { gateway } from "@ai-sdk/gateway";
|
3662
|
+
var GLOBAL_DEFAULT_PROVIDER = Symbol(
|
3663
|
+
"vercel.ai.global.defaultProvider"
|
3664
|
+
);
|
3665
|
+
function resolveLanguageModel(model) {
|
3666
|
+
if (typeof model !== "string") {
|
3667
|
+
return model;
|
3668
|
+
}
|
3669
|
+
const globalProvider = globalThis[GLOBAL_DEFAULT_PROVIDER];
|
3670
|
+
return (globalProvider != null ? globalProvider : gateway).languageModel(model);
|
3671
|
+
}
|
3672
|
+
|
3745
3673
|
// core/prompt/standardize-prompt.ts
|
3746
3674
|
import { InvalidPromptError as InvalidPromptError2 } from "@ai-sdk/provider";
|
3747
3675
|
import { safeValidateTypes } from "@ai-sdk/provider-utils";
|
@@ -3933,6 +3861,23 @@ async function standardizePrompt(prompt) {
|
|
3933
3861
|
};
|
3934
3862
|
}
|
3935
3863
|
|
3864
|
+
// core/prompt/wrap-gateway-error.ts
|
3865
|
+
import {
|
3866
|
+
GatewayAuthenticationError,
|
3867
|
+
GatewayModelNotFoundError
|
3868
|
+
} from "@ai-sdk/gateway";
|
3869
|
+
import { AISDKError as AISDKError18 } from "@ai-sdk/provider";
|
3870
|
+
function wrapGatewayError(error) {
|
3871
|
+
if (GatewayAuthenticationError.isInstance(error) || GatewayModelNotFoundError.isInstance(error)) {
|
3872
|
+
return new AISDKError18({
|
3873
|
+
name: "GatewayError",
|
3874
|
+
message: "Vercel AI Gateway access failed. If you want to use AI SDK providers directly, use the providers, e.g. @ai-sdk/openai, or register a different global default provider.",
|
3875
|
+
cause: error
|
3876
|
+
});
|
3877
|
+
}
|
3878
|
+
return error;
|
3879
|
+
}
|
3880
|
+
|
3936
3881
|
// core/telemetry/stringify-for-telemetry.ts
|
3937
3882
|
function stringifyForTelemetry(prompt) {
|
3938
3883
|
return JSON.stringify(
|
@@ -4349,12 +4294,6 @@ function validateObjectGenerationInput({
|
|
4349
4294
|
}
|
4350
4295
|
}
|
4351
4296
|
|
4352
|
-
// core/prompt/resolve-language-model.ts
|
4353
|
-
import { gateway } from "@ai-sdk/gateway";
|
4354
|
-
function resolveLanguageModel(model) {
|
4355
|
-
return typeof model === "string" ? gateway.languageModel(model) : model;
|
4356
|
-
}
|
4357
|
-
|
4358
4297
|
// core/generate-object/generate-object.ts
|
4359
4298
|
var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
|
4360
4299
|
async function generateObject(options) {
|
@@ -4404,208 +4343,212 @@ async function generateObject(options) {
|
|
4404
4343
|
settings: { ...callSettings, maxRetries }
|
4405
4344
|
});
|
4406
4345
|
const tracer = getTracer(telemetry);
|
4407
|
-
|
4408
|
-
|
4409
|
-
|
4410
|
-
|
4411
|
-
|
4412
|
-
|
4413
|
-
|
4414
|
-
|
4415
|
-
|
4416
|
-
...baseTelemetryAttributes,
|
4417
|
-
// specific settings that only make sense on the outer level:
|
4418
|
-
"ai.prompt": {
|
4419
|
-
input: () => JSON.stringify({ system, prompt, messages })
|
4420
|
-
},
|
4421
|
-
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4422
|
-
"ai.schema.name": schemaName,
|
4423
|
-
"ai.schema.description": schemaDescription,
|
4424
|
-
"ai.settings.output": outputStrategy.type
|
4425
|
-
}
|
4426
|
-
}),
|
4427
|
-
tracer,
|
4428
|
-
fn: async (span) => {
|
4429
|
-
var _a17;
|
4430
|
-
let result;
|
4431
|
-
let finishReason;
|
4432
|
-
let usage;
|
4433
|
-
let warnings;
|
4434
|
-
let response;
|
4435
|
-
let request;
|
4436
|
-
let resultProviderMetadata;
|
4437
|
-
const standardizedPrompt = await standardizePrompt({
|
4438
|
-
system,
|
4439
|
-
prompt,
|
4440
|
-
messages
|
4441
|
-
});
|
4442
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
4443
|
-
prompt: standardizedPrompt,
|
4444
|
-
supportedUrls: await model.supportedUrls
|
4445
|
-
});
|
4446
|
-
const generateResult = await retry(
|
4447
|
-
() => recordSpan({
|
4448
|
-
name: "ai.generateObject.doGenerate",
|
4449
|
-
attributes: selectTelemetryAttributes({
|
4450
|
-
telemetry,
|
4451
|
-
attributes: {
|
4452
|
-
...assembleOperationName({
|
4453
|
-
operationId: "ai.generateObject.doGenerate",
|
4454
|
-
telemetry
|
4455
|
-
}),
|
4456
|
-
...baseTelemetryAttributes,
|
4457
|
-
"ai.prompt.messages": {
|
4458
|
-
input: () => stringifyForTelemetry(promptMessages)
|
4459
|
-
},
|
4460
|
-
// standardized gen-ai llm span attributes:
|
4461
|
-
"gen_ai.system": model.provider,
|
4462
|
-
"gen_ai.request.model": model.modelId,
|
4463
|
-
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4464
|
-
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4465
|
-
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4466
|
-
"gen_ai.request.temperature": callSettings.temperature,
|
4467
|
-
"gen_ai.request.top_k": callSettings.topK,
|
4468
|
-
"gen_ai.request.top_p": callSettings.topP
|
4469
|
-
}
|
4346
|
+
try {
|
4347
|
+
return await recordSpan({
|
4348
|
+
name: "ai.generateObject",
|
4349
|
+
attributes: selectTelemetryAttributes({
|
4350
|
+
telemetry,
|
4351
|
+
attributes: {
|
4352
|
+
...assembleOperationName({
|
4353
|
+
operationId: "ai.generateObject",
|
4354
|
+
telemetry
|
4470
4355
|
}),
|
4471
|
-
|
4472
|
-
|
4473
|
-
|
4474
|
-
|
4475
|
-
|
4476
|
-
|
4477
|
-
|
4478
|
-
|
4479
|
-
|
4480
|
-
|
4481
|
-
|
4482
|
-
|
4483
|
-
|
4484
|
-
|
4485
|
-
|
4486
|
-
|
4487
|
-
|
4488
|
-
|
4489
|
-
|
4490
|
-
|
4491
|
-
|
4492
|
-
|
4493
|
-
|
4494
|
-
|
4495
|
-
|
4496
|
-
|
4497
|
-
|
4498
|
-
|
4499
|
-
|
4500
|
-
|
4356
|
+
...baseTelemetryAttributes,
|
4357
|
+
// specific settings that only make sense on the outer level:
|
4358
|
+
"ai.prompt": {
|
4359
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
4360
|
+
},
|
4361
|
+
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4362
|
+
"ai.schema.name": schemaName,
|
4363
|
+
"ai.schema.description": schemaDescription,
|
4364
|
+
"ai.settings.output": outputStrategy.type
|
4365
|
+
}
|
4366
|
+
}),
|
4367
|
+
tracer,
|
4368
|
+
fn: async (span) => {
|
4369
|
+
var _a17;
|
4370
|
+
let result;
|
4371
|
+
let finishReason;
|
4372
|
+
let usage;
|
4373
|
+
let warnings;
|
4374
|
+
let response;
|
4375
|
+
let request;
|
4376
|
+
let resultProviderMetadata;
|
4377
|
+
const standardizedPrompt = await standardizePrompt({
|
4378
|
+
system,
|
4379
|
+
prompt,
|
4380
|
+
messages
|
4381
|
+
});
|
4382
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
4383
|
+
prompt: standardizedPrompt,
|
4384
|
+
supportedUrls: await model.supportedUrls
|
4385
|
+
});
|
4386
|
+
const generateResult = await retry(
|
4387
|
+
() => recordSpan({
|
4388
|
+
name: "ai.generateObject.doGenerate",
|
4389
|
+
attributes: selectTelemetryAttributes({
|
4390
|
+
telemetry,
|
4391
|
+
attributes: {
|
4392
|
+
...assembleOperationName({
|
4393
|
+
operationId: "ai.generateObject.doGenerate",
|
4394
|
+
telemetry
|
4395
|
+
}),
|
4396
|
+
...baseTelemetryAttributes,
|
4397
|
+
"ai.prompt.messages": {
|
4398
|
+
input: () => stringifyForTelemetry(promptMessages)
|
4399
|
+
},
|
4400
|
+
// standardized gen-ai llm span attributes:
|
4401
|
+
"gen_ai.system": model.provider,
|
4402
|
+
"gen_ai.request.model": model.modelId,
|
4403
|
+
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4404
|
+
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4405
|
+
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4406
|
+
"gen_ai.request.temperature": callSettings.temperature,
|
4407
|
+
"gen_ai.request.top_k": callSettings.topK,
|
4408
|
+
"gen_ai.request.top_p": callSettings.topP
|
4409
|
+
}
|
4410
|
+
}),
|
4411
|
+
tracer,
|
4412
|
+
fn: async (span2) => {
|
4413
|
+
var _a18, _b, _c, _d, _e, _f, _g, _h;
|
4414
|
+
const result2 = await model.doGenerate({
|
4415
|
+
responseFormat: {
|
4416
|
+
type: "json",
|
4417
|
+
schema: outputStrategy.jsonSchema,
|
4418
|
+
name: schemaName,
|
4419
|
+
description: schemaDescription
|
4420
|
+
},
|
4421
|
+
...prepareCallSettings(settings),
|
4422
|
+
prompt: promptMessages,
|
4423
|
+
providerOptions,
|
4424
|
+
abortSignal,
|
4425
|
+
headers
|
4501
4426
|
});
|
4427
|
+
const responseData = {
|
4428
|
+
id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
|
4429
|
+
timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
|
4430
|
+
modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
4431
|
+
headers: (_g = result2.response) == null ? void 0 : _g.headers,
|
4432
|
+
body: (_h = result2.response) == null ? void 0 : _h.body
|
4433
|
+
};
|
4434
|
+
const text2 = extractContentText(result2.content);
|
4435
|
+
if (text2 === void 0) {
|
4436
|
+
throw new NoObjectGeneratedError({
|
4437
|
+
message: "No object generated: the model did not return a response.",
|
4438
|
+
response: responseData,
|
4439
|
+
usage: result2.usage,
|
4440
|
+
finishReason: result2.finishReason
|
4441
|
+
});
|
4442
|
+
}
|
4443
|
+
span2.setAttributes(
|
4444
|
+
selectTelemetryAttributes({
|
4445
|
+
telemetry,
|
4446
|
+
attributes: {
|
4447
|
+
"ai.response.finishReason": result2.finishReason,
|
4448
|
+
"ai.response.object": { output: () => text2 },
|
4449
|
+
"ai.response.id": responseData.id,
|
4450
|
+
"ai.response.model": responseData.modelId,
|
4451
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
4452
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4453
|
+
"ai.usage.promptTokens": result2.usage.inputTokens,
|
4454
|
+
"ai.usage.completionTokens": result2.usage.outputTokens,
|
4455
|
+
// standardized gen-ai llm span attributes:
|
4456
|
+
"gen_ai.response.finish_reasons": [result2.finishReason],
|
4457
|
+
"gen_ai.response.id": responseData.id,
|
4458
|
+
"gen_ai.response.model": responseData.modelId,
|
4459
|
+
"gen_ai.usage.input_tokens": result2.usage.inputTokens,
|
4460
|
+
"gen_ai.usage.output_tokens": result2.usage.outputTokens
|
4461
|
+
}
|
4462
|
+
})
|
4463
|
+
);
|
4464
|
+
return { ...result2, objectText: text2, responseData };
|
4502
4465
|
}
|
4503
|
-
|
4504
|
-
|
4505
|
-
|
4506
|
-
|
4507
|
-
|
4508
|
-
|
4509
|
-
|
4510
|
-
|
4511
|
-
|
4512
|
-
|
4513
|
-
|
4514
|
-
|
4515
|
-
|
4516
|
-
|
4517
|
-
|
4518
|
-
|
4519
|
-
|
4520
|
-
|
4521
|
-
|
4522
|
-
|
4523
|
-
);
|
4524
|
-
return { ...result2, objectText: text2, responseData };
|
4466
|
+
})
|
4467
|
+
);
|
4468
|
+
result = generateResult.objectText;
|
4469
|
+
finishReason = generateResult.finishReason;
|
4470
|
+
usage = generateResult.usage;
|
4471
|
+
warnings = generateResult.warnings;
|
4472
|
+
resultProviderMetadata = generateResult.providerMetadata;
|
4473
|
+
request = (_a17 = generateResult.request) != null ? _a17 : {};
|
4474
|
+
response = generateResult.responseData;
|
4475
|
+
async function processResult(result2) {
|
4476
|
+
const parseResult = await safeParseJSON2({ text: result2 });
|
4477
|
+
if (!parseResult.success) {
|
4478
|
+
throw new NoObjectGeneratedError({
|
4479
|
+
message: "No object generated: could not parse the response.",
|
4480
|
+
cause: parseResult.error,
|
4481
|
+
text: result2,
|
4482
|
+
response,
|
4483
|
+
usage,
|
4484
|
+
finishReason
|
4485
|
+
});
|
4525
4486
|
}
|
4526
|
-
|
4527
|
-
|
4528
|
-
|
4529
|
-
|
4530
|
-
|
4531
|
-
|
4532
|
-
|
4533
|
-
|
4534
|
-
|
4535
|
-
|
4536
|
-
|
4537
|
-
|
4538
|
-
|
4539
|
-
|
4540
|
-
|
4541
|
-
|
4542
|
-
|
4543
|
-
usage,
|
4544
|
-
finishReason
|
4545
|
-
});
|
4546
|
-
}
|
4547
|
-
const validationResult = await outputStrategy.validateFinalResult(
|
4548
|
-
parseResult.value,
|
4549
|
-
{
|
4550
|
-
text: result2,
|
4551
|
-
response,
|
4552
|
-
usage
|
4487
|
+
const validationResult = await outputStrategy.validateFinalResult(
|
4488
|
+
parseResult.value,
|
4489
|
+
{
|
4490
|
+
text: result2,
|
4491
|
+
response,
|
4492
|
+
usage
|
4493
|
+
}
|
4494
|
+
);
|
4495
|
+
if (!validationResult.success) {
|
4496
|
+
throw new NoObjectGeneratedError({
|
4497
|
+
message: "No object generated: response did not match schema.",
|
4498
|
+
cause: validationResult.error,
|
4499
|
+
text: result2,
|
4500
|
+
response,
|
4501
|
+
usage,
|
4502
|
+
finishReason
|
4503
|
+
});
|
4553
4504
|
}
|
4554
|
-
|
4555
|
-
if (!validationResult.success) {
|
4556
|
-
throw new NoObjectGeneratedError({
|
4557
|
-
message: "No object generated: response did not match schema.",
|
4558
|
-
cause: validationResult.error,
|
4559
|
-
text: result2,
|
4560
|
-
response,
|
4561
|
-
usage,
|
4562
|
-
finishReason
|
4563
|
-
});
|
4505
|
+
return validationResult.value;
|
4564
4506
|
}
|
4565
|
-
|
4566
|
-
|
4567
|
-
|
4568
|
-
|
4569
|
-
|
4570
|
-
|
4571
|
-
|
4572
|
-
|
4573
|
-
|
4574
|
-
|
4575
|
-
|
4576
|
-
|
4507
|
+
let object2;
|
4508
|
+
try {
|
4509
|
+
object2 = await processResult(result);
|
4510
|
+
} catch (error) {
|
4511
|
+
if (repairText != null && NoObjectGeneratedError.isInstance(error) && (JSONParseError2.isInstance(error.cause) || TypeValidationError3.isInstance(error.cause))) {
|
4512
|
+
const repairedText = await repairText({
|
4513
|
+
text: result,
|
4514
|
+
error: error.cause
|
4515
|
+
});
|
4516
|
+
if (repairedText === null) {
|
4517
|
+
throw error;
|
4518
|
+
}
|
4519
|
+
object2 = await processResult(repairedText);
|
4520
|
+
} else {
|
4577
4521
|
throw error;
|
4578
4522
|
}
|
4579
|
-
object2 = await processResult(repairedText);
|
4580
|
-
} else {
|
4581
|
-
throw error;
|
4582
4523
|
}
|
4524
|
+
span.setAttributes(
|
4525
|
+
selectTelemetryAttributes({
|
4526
|
+
telemetry,
|
4527
|
+
attributes: {
|
4528
|
+
"ai.response.finishReason": finishReason,
|
4529
|
+
"ai.response.object": {
|
4530
|
+
output: () => JSON.stringify(object2)
|
4531
|
+
},
|
4532
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4533
|
+
"ai.usage.promptTokens": usage.inputTokens,
|
4534
|
+
"ai.usage.completionTokens": usage.outputTokens
|
4535
|
+
}
|
4536
|
+
})
|
4537
|
+
);
|
4538
|
+
return new DefaultGenerateObjectResult({
|
4539
|
+
object: object2,
|
4540
|
+
finishReason,
|
4541
|
+
usage,
|
4542
|
+
warnings,
|
4543
|
+
request,
|
4544
|
+
response,
|
4545
|
+
providerMetadata: resultProviderMetadata
|
4546
|
+
});
|
4583
4547
|
}
|
4584
|
-
|
4585
|
-
|
4586
|
-
|
4587
|
-
|
4588
|
-
"ai.response.finishReason": finishReason,
|
4589
|
-
"ai.response.object": {
|
4590
|
-
output: () => JSON.stringify(object2)
|
4591
|
-
},
|
4592
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4593
|
-
"ai.usage.promptTokens": usage.inputTokens,
|
4594
|
-
"ai.usage.completionTokens": usage.outputTokens
|
4595
|
-
}
|
4596
|
-
})
|
4597
|
-
);
|
4598
|
-
return new DefaultGenerateObjectResult({
|
4599
|
-
object: object2,
|
4600
|
-
finishReason,
|
4601
|
-
usage,
|
4602
|
-
warnings,
|
4603
|
-
request,
|
4604
|
-
response,
|
4605
|
-
providerMetadata: resultProviderMetadata
|
4606
|
-
});
|
4607
|
-
}
|
4608
|
-
});
|
4548
|
+
});
|
4549
|
+
} catch (error) {
|
4550
|
+
throw wrapGatewayError(error);
|
4551
|
+
}
|
4609
4552
|
}
|
4610
4553
|
var DefaultGenerateObjectResult = class {
|
4611
4554
|
constructor(options) {
|
@@ -4629,7 +4572,9 @@ var DefaultGenerateObjectResult = class {
|
|
4629
4572
|
};
|
4630
4573
|
|
4631
4574
|
// core/generate-object/stream-object.ts
|
4632
|
-
import {
|
4575
|
+
import {
|
4576
|
+
createIdGenerator as createIdGenerator2
|
4577
|
+
} from "@ai-sdk/provider-utils";
|
4633
4578
|
|
4634
4579
|
// src/util/create-resolvable-promise.ts
|
4635
4580
|
function createResolvablePromise() {
|
@@ -4786,7 +4731,9 @@ function streamObject(options) {
|
|
4786
4731
|
headers,
|
4787
4732
|
experimental_telemetry: telemetry,
|
4788
4733
|
providerOptions,
|
4789
|
-
onError
|
4734
|
+
onError = ({ error }) => {
|
4735
|
+
console.error(error);
|
4736
|
+
},
|
4790
4737
|
onFinish,
|
4791
4738
|
_internal: {
|
4792
4739
|
generateId: generateId3 = originalGenerateId2,
|
@@ -4879,7 +4826,7 @@ var DefaultStreamObjectResult = class {
|
|
4879
4826
|
transform(chunk, controller) {
|
4880
4827
|
controller.enqueue(chunk);
|
4881
4828
|
if (chunk.type === "error") {
|
4882
|
-
onError
|
4829
|
+
onError({ error: wrapGatewayError(chunk.error) });
|
4883
4830
|
}
|
4884
4831
|
}
|
4885
4832
|
});
|
@@ -5279,8 +5226,8 @@ var DefaultStreamObjectResult = class {
|
|
5279
5226
|
};
|
5280
5227
|
|
5281
5228
|
// src/error/no-speech-generated-error.ts
|
5282
|
-
import { AISDKError as
|
5283
|
-
var NoSpeechGeneratedError = class extends
|
5229
|
+
import { AISDKError as AISDKError19 } from "@ai-sdk/provider";
|
5230
|
+
var NoSpeechGeneratedError = class extends AISDKError19 {
|
5284
5231
|
constructor(options) {
|
5285
5232
|
super({
|
5286
5233
|
name: "AI_NoSpeechGeneratedError",
|
@@ -5720,239 +5667,243 @@ async function generateText({
|
|
5720
5667
|
messages
|
5721
5668
|
});
|
5722
5669
|
const tracer = getTracer(telemetry);
|
5723
|
-
|
5724
|
-
|
5725
|
-
|
5726
|
-
|
5727
|
-
|
5728
|
-
|
5729
|
-
|
5730
|
-
|
5731
|
-
|
5732
|
-
|
5733
|
-
|
5734
|
-
|
5735
|
-
|
5736
|
-
|
5737
|
-
|
5738
|
-
|
5739
|
-
|
5740
|
-
}
|
5741
|
-
}),
|
5742
|
-
tracer,
|
5743
|
-
fn: async (span) => {
|
5744
|
-
var _a17, _b, _c, _d, _e;
|
5745
|
-
const callSettings2 = prepareCallSettings(settings);
|
5746
|
-
let currentModelResponse;
|
5747
|
-
let currentToolCalls = [];
|
5748
|
-
let currentToolResults = [];
|
5749
|
-
const responseMessages = [];
|
5750
|
-
const steps = [];
|
5751
|
-
do {
|
5752
|
-
const stepInputMessages = [
|
5753
|
-
...initialPrompt.messages,
|
5754
|
-
...responseMessages
|
5755
|
-
];
|
5756
|
-
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5757
|
-
model,
|
5758
|
-
steps,
|
5759
|
-
stepNumber: steps.length
|
5760
|
-
}));
|
5761
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
5762
|
-
prompt: {
|
5763
|
-
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
5764
|
-
messages: stepInputMessages
|
5765
|
-
},
|
5766
|
-
supportedUrls: await model.supportedUrls
|
5767
|
-
});
|
5768
|
-
const stepModel = resolveLanguageModel(
|
5769
|
-
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
5770
|
-
);
|
5771
|
-
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5772
|
-
tools,
|
5773
|
-
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
5774
|
-
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
5775
|
-
});
|
5776
|
-
currentModelResponse = await retry(
|
5777
|
-
() => {
|
5778
|
-
var _a18;
|
5779
|
-
return recordSpan({
|
5780
|
-
name: "ai.generateText.doGenerate",
|
5781
|
-
attributes: selectTelemetryAttributes({
|
5782
|
-
telemetry,
|
5783
|
-
attributes: {
|
5784
|
-
...assembleOperationName({
|
5785
|
-
operationId: "ai.generateText.doGenerate",
|
5786
|
-
telemetry
|
5787
|
-
}),
|
5788
|
-
...baseTelemetryAttributes,
|
5789
|
-
// model:
|
5790
|
-
"ai.model.provider": stepModel.provider,
|
5791
|
-
"ai.model.id": stepModel.modelId,
|
5792
|
-
// prompt:
|
5793
|
-
"ai.prompt.messages": {
|
5794
|
-
input: () => stringifyForTelemetry(promptMessages)
|
5795
|
-
},
|
5796
|
-
"ai.prompt.tools": {
|
5797
|
-
// convert the language model level tools:
|
5798
|
-
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5799
|
-
},
|
5800
|
-
"ai.prompt.toolChoice": {
|
5801
|
-
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5802
|
-
},
|
5803
|
-
// standardized gen-ai llm span attributes:
|
5804
|
-
"gen_ai.system": stepModel.provider,
|
5805
|
-
"gen_ai.request.model": stepModel.modelId,
|
5806
|
-
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5807
|
-
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5808
|
-
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5809
|
-
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5810
|
-
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5811
|
-
"gen_ai.request.top_k": settings.topK,
|
5812
|
-
"gen_ai.request.top_p": settings.topP
|
5813
|
-
}
|
5814
|
-
}),
|
5815
|
-
tracer,
|
5816
|
-
fn: async (span2) => {
|
5817
|
-
var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
|
5818
|
-
const result = await stepModel.doGenerate({
|
5819
|
-
...callSettings2,
|
5820
|
-
tools: stepTools,
|
5821
|
-
toolChoice: stepToolChoice,
|
5822
|
-
responseFormat: output == null ? void 0 : output.responseFormat,
|
5823
|
-
prompt: promptMessages,
|
5824
|
-
providerOptions,
|
5825
|
-
abortSignal,
|
5826
|
-
headers
|
5827
|
-
});
|
5828
|
-
const responseData = {
|
5829
|
-
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5830
|
-
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5831
|
-
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
|
5832
|
-
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5833
|
-
body: (_h = result.response) == null ? void 0 : _h.body
|
5834
|
-
};
|
5835
|
-
span2.setAttributes(
|
5836
|
-
selectTelemetryAttributes({
|
5837
|
-
telemetry,
|
5838
|
-
attributes: {
|
5839
|
-
"ai.response.finishReason": result.finishReason,
|
5840
|
-
"ai.response.text": {
|
5841
|
-
output: () => extractContentText(result.content)
|
5842
|
-
},
|
5843
|
-
"ai.response.toolCalls": {
|
5844
|
-
output: () => {
|
5845
|
-
const toolCalls = asToolCalls(result.content);
|
5846
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5847
|
-
}
|
5848
|
-
},
|
5849
|
-
"ai.response.id": responseData.id,
|
5850
|
-
"ai.response.model": responseData.modelId,
|
5851
|
-
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5852
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5853
|
-
"ai.usage.promptTokens": result.usage.inputTokens,
|
5854
|
-
"ai.usage.completionTokens": result.usage.outputTokens,
|
5855
|
-
// standardized gen-ai llm span attributes:
|
5856
|
-
"gen_ai.response.finish_reasons": [result.finishReason],
|
5857
|
-
"gen_ai.response.id": responseData.id,
|
5858
|
-
"gen_ai.response.model": responseData.modelId,
|
5859
|
-
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5860
|
-
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5861
|
-
}
|
5862
|
-
})
|
5863
|
-
);
|
5864
|
-
return { ...result, response: responseData };
|
5865
|
-
}
|
5866
|
-
});
|
5670
|
+
try {
|
5671
|
+
return await recordSpan({
|
5672
|
+
name: "ai.generateText",
|
5673
|
+
attributes: selectTelemetryAttributes({
|
5674
|
+
telemetry,
|
5675
|
+
attributes: {
|
5676
|
+
...assembleOperationName({
|
5677
|
+
operationId: "ai.generateText",
|
5678
|
+
telemetry
|
5679
|
+
}),
|
5680
|
+
...baseTelemetryAttributes,
|
5681
|
+
// model:
|
5682
|
+
"ai.model.provider": model.provider,
|
5683
|
+
"ai.model.id": model.modelId,
|
5684
|
+
// specific settings that only make sense on the outer level:
|
5685
|
+
"ai.prompt": {
|
5686
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
5867
5687
|
}
|
5868
|
-
|
5869
|
-
|
5870
|
-
|
5871
|
-
|
5872
|
-
|
5873
|
-
|
5874
|
-
|
5875
|
-
|
5876
|
-
|
5877
|
-
|
5688
|
+
}
|
5689
|
+
}),
|
5690
|
+
tracer,
|
5691
|
+
fn: async (span) => {
|
5692
|
+
var _a17, _b, _c, _d, _e;
|
5693
|
+
const callSettings2 = prepareCallSettings(settings);
|
5694
|
+
let currentModelResponse;
|
5695
|
+
let currentToolCalls = [];
|
5696
|
+
let currentToolResults = [];
|
5697
|
+
const responseMessages = [];
|
5698
|
+
const steps = [];
|
5699
|
+
do {
|
5700
|
+
const stepInputMessages = [
|
5701
|
+
...initialPrompt.messages,
|
5702
|
+
...responseMessages
|
5703
|
+
];
|
5704
|
+
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5705
|
+
model,
|
5706
|
+
steps,
|
5707
|
+
stepNumber: steps.length
|
5708
|
+
}));
|
5709
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
5710
|
+
prompt: {
|
5711
|
+
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
5878
5712
|
messages: stepInputMessages
|
5713
|
+
},
|
5714
|
+
supportedUrls: await model.supportedUrls
|
5715
|
+
});
|
5716
|
+
const stepModel = resolveLanguageModel(
|
5717
|
+
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
5718
|
+
);
|
5719
|
+
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5720
|
+
tools,
|
5721
|
+
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
5722
|
+
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
5723
|
+
});
|
5724
|
+
currentModelResponse = await retry(
|
5725
|
+
() => {
|
5726
|
+
var _a18;
|
5727
|
+
return recordSpan({
|
5728
|
+
name: "ai.generateText.doGenerate",
|
5729
|
+
attributes: selectTelemetryAttributes({
|
5730
|
+
telemetry,
|
5731
|
+
attributes: {
|
5732
|
+
...assembleOperationName({
|
5733
|
+
operationId: "ai.generateText.doGenerate",
|
5734
|
+
telemetry
|
5735
|
+
}),
|
5736
|
+
...baseTelemetryAttributes,
|
5737
|
+
// model:
|
5738
|
+
"ai.model.provider": stepModel.provider,
|
5739
|
+
"ai.model.id": stepModel.modelId,
|
5740
|
+
// prompt:
|
5741
|
+
"ai.prompt.messages": {
|
5742
|
+
input: () => stringifyForTelemetry(promptMessages)
|
5743
|
+
},
|
5744
|
+
"ai.prompt.tools": {
|
5745
|
+
// convert the language model level tools:
|
5746
|
+
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5747
|
+
},
|
5748
|
+
"ai.prompt.toolChoice": {
|
5749
|
+
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5750
|
+
},
|
5751
|
+
// standardized gen-ai llm span attributes:
|
5752
|
+
"gen_ai.system": stepModel.provider,
|
5753
|
+
"gen_ai.request.model": stepModel.modelId,
|
5754
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5755
|
+
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5756
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5757
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5758
|
+
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5759
|
+
"gen_ai.request.top_k": settings.topK,
|
5760
|
+
"gen_ai.request.top_p": settings.topP
|
5761
|
+
}
|
5762
|
+
}),
|
5763
|
+
tracer,
|
5764
|
+
fn: async (span2) => {
|
5765
|
+
var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
|
5766
|
+
const result = await stepModel.doGenerate({
|
5767
|
+
...callSettings2,
|
5768
|
+
tools: stepTools,
|
5769
|
+
toolChoice: stepToolChoice,
|
5770
|
+
responseFormat: output == null ? void 0 : output.responseFormat,
|
5771
|
+
prompt: promptMessages,
|
5772
|
+
providerOptions,
|
5773
|
+
abortSignal,
|
5774
|
+
headers
|
5775
|
+
});
|
5776
|
+
const responseData = {
|
5777
|
+
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5778
|
+
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5779
|
+
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
|
5780
|
+
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5781
|
+
body: (_h = result.response) == null ? void 0 : _h.body
|
5782
|
+
};
|
5783
|
+
span2.setAttributes(
|
5784
|
+
selectTelemetryAttributes({
|
5785
|
+
telemetry,
|
5786
|
+
attributes: {
|
5787
|
+
"ai.response.finishReason": result.finishReason,
|
5788
|
+
"ai.response.text": {
|
5789
|
+
output: () => extractContentText(result.content)
|
5790
|
+
},
|
5791
|
+
"ai.response.toolCalls": {
|
5792
|
+
output: () => {
|
5793
|
+
const toolCalls = asToolCalls(result.content);
|
5794
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5795
|
+
}
|
5796
|
+
},
|
5797
|
+
"ai.response.id": responseData.id,
|
5798
|
+
"ai.response.model": responseData.modelId,
|
5799
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5800
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5801
|
+
"ai.usage.promptTokens": result.usage.inputTokens,
|
5802
|
+
"ai.usage.completionTokens": result.usage.outputTokens,
|
5803
|
+
// standardized gen-ai llm span attributes:
|
5804
|
+
"gen_ai.response.finish_reasons": [result.finishReason],
|
5805
|
+
"gen_ai.response.id": responseData.id,
|
5806
|
+
"gen_ai.response.model": responseData.modelId,
|
5807
|
+
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5808
|
+
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5809
|
+
}
|
5810
|
+
})
|
5811
|
+
);
|
5812
|
+
return { ...result, response: responseData };
|
5813
|
+
}
|
5814
|
+
});
|
5815
|
+
}
|
5816
|
+
);
|
5817
|
+
currentToolCalls = await Promise.all(
|
5818
|
+
currentModelResponse.content.filter(
|
5819
|
+
(part) => part.type === "tool-call"
|
5820
|
+
).map(
|
5821
|
+
(toolCall) => parseToolCall({
|
5822
|
+
toolCall,
|
5823
|
+
tools,
|
5824
|
+
repairToolCall,
|
5825
|
+
system,
|
5826
|
+
messages: stepInputMessages
|
5827
|
+
})
|
5828
|
+
)
|
5829
|
+
);
|
5830
|
+
currentToolResults = tools == null ? [] : await executeTools({
|
5831
|
+
toolCalls: currentToolCalls,
|
5832
|
+
tools,
|
5833
|
+
tracer,
|
5834
|
+
telemetry,
|
5835
|
+
messages: stepInputMessages,
|
5836
|
+
abortSignal
|
5837
|
+
});
|
5838
|
+
const stepContent = asContent({
|
5839
|
+
content: currentModelResponse.content,
|
5840
|
+
toolCalls: currentToolCalls,
|
5841
|
+
toolResults: currentToolResults
|
5842
|
+
});
|
5843
|
+
responseMessages.push(
|
5844
|
+
...toResponseMessages({
|
5845
|
+
content: stepContent,
|
5846
|
+
tools: tools != null ? tools : {}
|
5879
5847
|
})
|
5880
|
-
)
|
5881
|
-
|
5882
|
-
currentToolResults = tools == null ? [] : await executeTools({
|
5883
|
-
toolCalls: currentToolCalls,
|
5884
|
-
tools,
|
5885
|
-
tracer,
|
5886
|
-
telemetry,
|
5887
|
-
messages: stepInputMessages,
|
5888
|
-
abortSignal
|
5889
|
-
});
|
5890
|
-
const stepContent = asContent({
|
5891
|
-
content: currentModelResponse.content,
|
5892
|
-
toolCalls: currentToolCalls,
|
5893
|
-
toolResults: currentToolResults
|
5894
|
-
});
|
5895
|
-
responseMessages.push(
|
5896
|
-
...toResponseMessages({
|
5848
|
+
);
|
5849
|
+
const currentStepResult = new DefaultStepResult({
|
5897
5850
|
content: stepContent,
|
5898
|
-
|
5851
|
+
finishReason: currentModelResponse.finishReason,
|
5852
|
+
usage: currentModelResponse.usage,
|
5853
|
+
warnings: currentModelResponse.warnings,
|
5854
|
+
providerMetadata: currentModelResponse.providerMetadata,
|
5855
|
+
request: (_e = currentModelResponse.request) != null ? _e : {},
|
5856
|
+
response: {
|
5857
|
+
...currentModelResponse.response,
|
5858
|
+
// deep clone msgs to avoid mutating past messages in multi-step:
|
5859
|
+
messages: structuredClone(responseMessages)
|
5860
|
+
}
|
5861
|
+
});
|
5862
|
+
steps.push(currentStepResult);
|
5863
|
+
await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
|
5864
|
+
} while (
|
5865
|
+
// there are tool calls:
|
5866
|
+
currentToolCalls.length > 0 && // all current tool calls have results:
|
5867
|
+
currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
|
5868
|
+
!await isStopConditionMet({ stopConditions, steps })
|
5869
|
+
);
|
5870
|
+
span.setAttributes(
|
5871
|
+
selectTelemetryAttributes({
|
5872
|
+
telemetry,
|
5873
|
+
attributes: {
|
5874
|
+
"ai.response.finishReason": currentModelResponse.finishReason,
|
5875
|
+
"ai.response.text": {
|
5876
|
+
output: () => extractContentText(currentModelResponse.content)
|
5877
|
+
},
|
5878
|
+
"ai.response.toolCalls": {
|
5879
|
+
output: () => {
|
5880
|
+
const toolCalls = asToolCalls(currentModelResponse.content);
|
5881
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5882
|
+
}
|
5883
|
+
},
|
5884
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5885
|
+
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
5886
|
+
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
5887
|
+
}
|
5899
5888
|
})
|
5900
5889
|
);
|
5901
|
-
const
|
5902
|
-
|
5903
|
-
|
5904
|
-
|
5905
|
-
|
5906
|
-
|
5907
|
-
|
5908
|
-
|
5909
|
-
|
5910
|
-
|
5911
|
-
|
5912
|
-
}
|
5890
|
+
const lastStep = steps[steps.length - 1];
|
5891
|
+
return new DefaultGenerateTextResult({
|
5892
|
+
steps,
|
5893
|
+
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
5894
|
+
{ text: lastStep.text },
|
5895
|
+
{
|
5896
|
+
response: lastStep.response,
|
5897
|
+
usage: lastStep.usage,
|
5898
|
+
finishReason: lastStep.finishReason
|
5899
|
+
}
|
5900
|
+
))
|
5913
5901
|
});
|
5914
|
-
|
5915
|
-
|
5916
|
-
|
5917
|
-
|
5918
|
-
|
5919
|
-
currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
|
5920
|
-
!await isStopConditionMet({ stopConditions, steps })
|
5921
|
-
);
|
5922
|
-
span.setAttributes(
|
5923
|
-
selectTelemetryAttributes({
|
5924
|
-
telemetry,
|
5925
|
-
attributes: {
|
5926
|
-
"ai.response.finishReason": currentModelResponse.finishReason,
|
5927
|
-
"ai.response.text": {
|
5928
|
-
output: () => extractContentText(currentModelResponse.content)
|
5929
|
-
},
|
5930
|
-
"ai.response.toolCalls": {
|
5931
|
-
output: () => {
|
5932
|
-
const toolCalls = asToolCalls(currentModelResponse.content);
|
5933
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5934
|
-
}
|
5935
|
-
},
|
5936
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5937
|
-
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
5938
|
-
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
5939
|
-
}
|
5940
|
-
})
|
5941
|
-
);
|
5942
|
-
const lastStep = steps[steps.length - 1];
|
5943
|
-
return new DefaultGenerateTextResult({
|
5944
|
-
steps,
|
5945
|
-
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
5946
|
-
{ text: lastStep.text },
|
5947
|
-
{
|
5948
|
-
response: lastStep.response,
|
5949
|
-
usage: lastStep.usage,
|
5950
|
-
finishReason: lastStep.finishReason
|
5951
|
-
}
|
5952
|
-
))
|
5953
|
-
});
|
5954
|
-
}
|
5955
|
-
});
|
5902
|
+
}
|
5903
|
+
});
|
5904
|
+
} catch (error) {
|
5905
|
+
throw wrapGatewayError(error);
|
5906
|
+
}
|
5956
5907
|
}
|
5957
5908
|
async function executeTools({
|
5958
5909
|
toolCalls,
|
@@ -6489,7 +6440,9 @@ function streamText({
|
|
6489
6440
|
experimental_repairToolCall: repairToolCall,
|
6490
6441
|
experimental_transform: transform,
|
6491
6442
|
onChunk,
|
6492
|
-
onError
|
6443
|
+
onError = ({ error }) => {
|
6444
|
+
console.error(error);
|
6445
|
+
},
|
6493
6446
|
onFinish,
|
6494
6447
|
onStepFinish,
|
6495
6448
|
_internal: {
|
@@ -6628,7 +6581,7 @@ var DefaultStreamTextResult = class {
|
|
6628
6581
|
await (onChunk == null ? void 0 : onChunk({ chunk: part }));
|
6629
6582
|
}
|
6630
6583
|
if (part.type === "error") {
|
6631
|
-
await
|
6584
|
+
await onError({ error: wrapGatewayError(part.error) });
|
6632
6585
|
}
|
6633
6586
|
if (part.type === "text") {
|
6634
6587
|
const latestContent = recordedContent[recordedContent.length - 1];
|
@@ -7766,7 +7719,7 @@ function customProvider({
|
|
7766
7719
|
var experimental_customProvider = customProvider;
|
7767
7720
|
|
7768
7721
|
// core/registry/no-such-provider-error.ts
|
7769
|
-
import { AISDKError as
|
7722
|
+
import { AISDKError as AISDKError20, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
|
7770
7723
|
var name16 = "AI_NoSuchProviderError";
|
7771
7724
|
var marker16 = `vercel.ai.error.${name16}`;
|
7772
7725
|
var symbol16 = Symbol.for(marker16);
|
@@ -7785,7 +7738,7 @@ var NoSuchProviderError = class extends NoSuchModelError3 {
|
|
7785
7738
|
this.availableProviders = availableProviders;
|
7786
7739
|
}
|
7787
7740
|
static isInstance(error) {
|
7788
|
-
return
|
7741
|
+
return AISDKError20.hasMarker(error, marker16);
|
7789
7742
|
}
|
7790
7743
|
};
|
7791
7744
|
_a16 = symbol16;
|
@@ -8442,8 +8395,8 @@ var MCPClient = class {
|
|
8442
8395
|
};
|
8443
8396
|
|
8444
8397
|
// src/error/no-transcript-generated-error.ts
|
8445
|
-
import { AISDKError as
|
8446
|
-
var NoTranscriptGeneratedError = class extends
|
8398
|
+
import { AISDKError as AISDKError21 } from "@ai-sdk/provider";
|
8399
|
+
var NoTranscriptGeneratedError = class extends AISDKError21 {
|
8447
8400
|
constructor(options) {
|
8448
8401
|
super({
|
8449
8402
|
name: "AI_NoTranscriptGeneratedError",
|
@@ -8507,10 +8460,11 @@ var DefaultTranscriptionResult = class {
|
|
8507
8460
|
export {
|
8508
8461
|
AISDKError16 as AISDKError,
|
8509
8462
|
APICallError,
|
8510
|
-
|
8463
|
+
AbstractChat,
|
8511
8464
|
DefaultChatTransport,
|
8512
8465
|
DownloadError,
|
8513
8466
|
EmptyResponseBodyError,
|
8467
|
+
GLOBAL_DEFAULT_PROVIDER,
|
8514
8468
|
InvalidArgumentError,
|
8515
8469
|
InvalidDataContentError,
|
8516
8470
|
InvalidMessageRoleError,
|
@@ -8556,7 +8510,6 @@ export {
|
|
8556
8510
|
createUIMessageStream,
|
8557
8511
|
createUIMessageStreamResponse,
|
8558
8512
|
customProvider,
|
8559
|
-
defaultChatStoreOptions,
|
8560
8513
|
defaultSettingsMiddleware,
|
8561
8514
|
embed,
|
8562
8515
|
embedMany,
|