ai 5.0.0-alpha.7 → 5.0.0-alpha.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +49 -0
- package/dist/index.d.mts +122 -276
- package/dist/index.d.ts +122 -276
- package/dist/index.js +866 -916
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +836 -883
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +3 -2
- package/dist/internal/index.d.ts +3 -2
- package/dist/mcp-stdio/index.js.map +1 -1
- package/dist/mcp-stdio/index.mjs.map +1 -1
- package/package.json +6 -6
package/dist/index.js
CHANGED
@@ -22,10 +22,11 @@ var src_exports = {};
|
|
22
22
|
__export(src_exports, {
|
23
23
|
AISDKError: () => import_provider16.AISDKError,
|
24
24
|
APICallError: () => import_provider16.APICallError,
|
25
|
-
|
25
|
+
AbstractChat: () => AbstractChat,
|
26
26
|
DefaultChatTransport: () => DefaultChatTransport,
|
27
27
|
DownloadError: () => DownloadError,
|
28
28
|
EmptyResponseBodyError: () => import_provider16.EmptyResponseBodyError,
|
29
|
+
GLOBAL_DEFAULT_PROVIDER: () => GLOBAL_DEFAULT_PROVIDER,
|
29
30
|
InvalidArgumentError: () => InvalidArgumentError,
|
30
31
|
InvalidDataContentError: () => InvalidDataContentError,
|
31
32
|
InvalidMessageRoleError: () => InvalidMessageRoleError,
|
@@ -53,7 +54,7 @@ __export(src_exports, {
|
|
53
54
|
ToolExecutionError: () => ToolExecutionError,
|
54
55
|
TypeValidationError: () => import_provider16.TypeValidationError,
|
55
56
|
UnsupportedFunctionalityError: () => import_provider16.UnsupportedFunctionalityError,
|
56
|
-
asSchema: () =>
|
57
|
+
asSchema: () => import_provider_utils25.asSchema,
|
57
58
|
assistantModelMessageSchema: () => assistantModelMessageSchema,
|
58
59
|
callCompletionApi: () => callCompletionApi,
|
59
60
|
convertFileListToFileUIParts: () => convertFileListToFileUIParts,
|
@@ -65,13 +66,12 @@ __export(src_exports, {
|
|
65
66
|
coreToolMessageSchema: () => coreToolMessageSchema,
|
66
67
|
coreUserMessageSchema: () => coreUserMessageSchema,
|
67
68
|
cosineSimilarity: () => cosineSimilarity,
|
68
|
-
createIdGenerator: () =>
|
69
|
+
createIdGenerator: () => import_provider_utils25.createIdGenerator,
|
69
70
|
createProviderRegistry: () => createProviderRegistry,
|
70
71
|
createTextStreamResponse: () => createTextStreamResponse,
|
71
72
|
createUIMessageStream: () => createUIMessageStream,
|
72
73
|
createUIMessageStreamResponse: () => createUIMessageStreamResponse,
|
73
74
|
customProvider: () => customProvider,
|
74
|
-
defaultChatStoreOptions: () => defaultChatStoreOptions,
|
75
75
|
defaultSettingsMiddleware: () => defaultSettingsMiddleware,
|
76
76
|
embed: () => embed,
|
77
77
|
embedMany: () => embedMany,
|
@@ -82,14 +82,14 @@ __export(src_exports, {
|
|
82
82
|
experimental_generateSpeech: () => generateSpeech,
|
83
83
|
experimental_transcribe: () => transcribe,
|
84
84
|
extractReasoningMiddleware: () => extractReasoningMiddleware,
|
85
|
-
generateId: () =>
|
85
|
+
generateId: () => import_provider_utils25.generateId,
|
86
86
|
generateObject: () => generateObject,
|
87
87
|
generateText: () => generateText,
|
88
88
|
getTextFromDataUrl: () => getTextFromDataUrl,
|
89
89
|
getToolInvocations: () => getToolInvocations,
|
90
90
|
hasToolCall: () => hasToolCall,
|
91
91
|
isDeepEqualData: () => isDeepEqualData,
|
92
|
-
jsonSchema: () =>
|
92
|
+
jsonSchema: () => import_provider_utils25.jsonSchema,
|
93
93
|
modelMessageSchema: () => modelMessageSchema,
|
94
94
|
parsePartialJson: () => parsePartialJson,
|
95
95
|
pipeTextStreamToResponse: () => pipeTextStreamToResponse,
|
@@ -107,7 +107,7 @@ __export(src_exports, {
|
|
107
107
|
wrapLanguageModel: () => wrapLanguageModel
|
108
108
|
});
|
109
109
|
module.exports = __toCommonJS(src_exports);
|
110
|
-
var
|
110
|
+
var import_provider_utils25 = require("@ai-sdk/provider-utils");
|
111
111
|
|
112
112
|
// src/error/index.ts
|
113
113
|
var import_provider16 = require("@ai-sdk/provider");
|
@@ -768,8 +768,40 @@ async function callCompletionApi({
|
|
768
768
|
}
|
769
769
|
}
|
770
770
|
|
771
|
-
// src/ui/chat
|
772
|
-
var
|
771
|
+
// src/ui/chat.ts
|
772
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
773
|
+
|
774
|
+
// src/util/serial-job-executor.ts
|
775
|
+
var SerialJobExecutor = class {
|
776
|
+
constructor() {
|
777
|
+
this.queue = [];
|
778
|
+
this.isProcessing = false;
|
779
|
+
}
|
780
|
+
async processQueue() {
|
781
|
+
if (this.isProcessing) {
|
782
|
+
return;
|
783
|
+
}
|
784
|
+
this.isProcessing = true;
|
785
|
+
while (this.queue.length > 0) {
|
786
|
+
await this.queue[0]();
|
787
|
+
this.queue.shift();
|
788
|
+
}
|
789
|
+
this.isProcessing = false;
|
790
|
+
}
|
791
|
+
async run(job) {
|
792
|
+
return new Promise((resolve, reject) => {
|
793
|
+
this.queue.push(async () => {
|
794
|
+
try {
|
795
|
+
await job();
|
796
|
+
resolve();
|
797
|
+
} catch (error) {
|
798
|
+
reject(error);
|
799
|
+
}
|
800
|
+
});
|
801
|
+
void this.processQueue();
|
802
|
+
});
|
803
|
+
}
|
804
|
+
};
|
773
805
|
|
774
806
|
// src/ui/process-ui-message-stream.ts
|
775
807
|
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
@@ -1428,6 +1460,9 @@ function shouldResubmitMessages({
|
|
1428
1460
|
);
|
1429
1461
|
}
|
1430
1462
|
function isAssistantMessageWithCompletedToolCalls(message) {
|
1463
|
+
if (!message) {
|
1464
|
+
return false;
|
1465
|
+
}
|
1431
1466
|
if (message.role !== "assistant") {
|
1432
1467
|
return false;
|
1433
1468
|
}
|
@@ -1438,242 +1473,296 @@ function isAssistantMessageWithCompletedToolCalls(message) {
|
|
1438
1473
|
return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every((part) => "result" in part.toolInvocation);
|
1439
1474
|
}
|
1440
1475
|
|
1441
|
-
// src/ui/chat-
|
1442
|
-
var
|
1476
|
+
// src/ui/default-chat-transport.ts
|
1477
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
1478
|
+
var getOriginalFetch2 = () => fetch;
|
1479
|
+
async function fetchUIMessageStream({
|
1480
|
+
api,
|
1481
|
+
body,
|
1482
|
+
credentials,
|
1483
|
+
headers,
|
1484
|
+
abortController,
|
1485
|
+
fetch: fetch2 = getOriginalFetch2(),
|
1486
|
+
requestType = "generate"
|
1487
|
+
}) {
|
1488
|
+
var _a17, _b, _c;
|
1489
|
+
const response = requestType === "resume" ? await fetch2(`${api}?chatId=${body.chatId}`, {
|
1490
|
+
method: "GET",
|
1491
|
+
headers: {
|
1492
|
+
"Content-Type": "application/json",
|
1493
|
+
...headers
|
1494
|
+
},
|
1495
|
+
signal: (_a17 = abortController == null ? void 0 : abortController()) == null ? void 0 : _a17.signal,
|
1496
|
+
credentials
|
1497
|
+
}) : await fetch2(api, {
|
1498
|
+
method: "POST",
|
1499
|
+
body: JSON.stringify(body),
|
1500
|
+
headers: {
|
1501
|
+
"Content-Type": "application/json",
|
1502
|
+
...headers
|
1503
|
+
},
|
1504
|
+
signal: (_b = abortController == null ? void 0 : abortController()) == null ? void 0 : _b.signal,
|
1505
|
+
credentials
|
1506
|
+
});
|
1507
|
+
if (!response.ok) {
|
1508
|
+
throw new Error(
|
1509
|
+
(_c = await response.text()) != null ? _c : "Failed to fetch the chat response."
|
1510
|
+
);
|
1511
|
+
}
|
1512
|
+
if (!response.body) {
|
1513
|
+
throw new Error("The response body is empty.");
|
1514
|
+
}
|
1515
|
+
return (0, import_provider_utils4.parseJsonEventStream)({
|
1516
|
+
stream: response.body,
|
1517
|
+
schema: uiMessageStreamPartSchema
|
1518
|
+
}).pipeThrough(
|
1519
|
+
new TransformStream({
|
1520
|
+
async transform(part, controller) {
|
1521
|
+
if (!part.success) {
|
1522
|
+
throw part.error;
|
1523
|
+
}
|
1524
|
+
controller.enqueue(part.value);
|
1525
|
+
}
|
1526
|
+
})
|
1527
|
+
);
|
1528
|
+
}
|
1529
|
+
var DefaultChatTransport = class {
|
1443
1530
|
constructor({
|
1444
|
-
|
1445
|
-
|
1446
|
-
|
1531
|
+
api = "/api/chat",
|
1532
|
+
credentials,
|
1533
|
+
headers,
|
1534
|
+
body,
|
1535
|
+
fetch: fetch2,
|
1536
|
+
prepareRequestBody
|
1537
|
+
} = {}) {
|
1538
|
+
this.api = api;
|
1539
|
+
this.credentials = credentials;
|
1540
|
+
this.headers = headers;
|
1541
|
+
this.body = body;
|
1542
|
+
this.fetch = fetch2;
|
1543
|
+
this.prepareRequestBody = prepareRequestBody;
|
1544
|
+
}
|
1545
|
+
submitMessages({
|
1546
|
+
chatId,
|
1547
|
+
messages,
|
1548
|
+
abortController,
|
1549
|
+
body,
|
1550
|
+
headers,
|
1551
|
+
requestType
|
1552
|
+
}) {
|
1553
|
+
var _a17, _b;
|
1554
|
+
return fetchUIMessageStream({
|
1555
|
+
api: this.api,
|
1556
|
+
headers: {
|
1557
|
+
...this.headers,
|
1558
|
+
...headers
|
1559
|
+
},
|
1560
|
+
body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
|
1561
|
+
chatId,
|
1562
|
+
messages,
|
1563
|
+
...this.body,
|
1564
|
+
...body
|
1565
|
+
})) != null ? _b : {
|
1566
|
+
chatId,
|
1567
|
+
messages,
|
1568
|
+
...this.body,
|
1569
|
+
...body
|
1570
|
+
},
|
1571
|
+
credentials: this.credentials,
|
1572
|
+
abortController: () => abortController,
|
1573
|
+
fetch: this.fetch,
|
1574
|
+
requestType
|
1575
|
+
});
|
1576
|
+
}
|
1577
|
+
};
|
1578
|
+
|
1579
|
+
// src/ui/chat.ts
|
1580
|
+
var AbstractChat = class {
|
1581
|
+
constructor({
|
1582
|
+
generateId: generateId3 = import_provider_utils5.generateId,
|
1583
|
+
id = generateId3(),
|
1584
|
+
transport = new DefaultChatTransport(),
|
1447
1585
|
maxSteps = 1,
|
1448
1586
|
messageMetadataSchema,
|
1449
1587
|
dataPartSchemas,
|
1450
|
-
|
1588
|
+
state,
|
1589
|
+
onError,
|
1590
|
+
onToolCall,
|
1591
|
+
onFinish
|
1451
1592
|
}) {
|
1452
|
-
this.
|
1453
|
-
this.
|
1454
|
-
|
1455
|
-
|
1456
|
-
|
1457
|
-
|
1458
|
-
|
1593
|
+
this.subscribers = /* @__PURE__ */ new Set();
|
1594
|
+
this.activeResponse = void 0;
|
1595
|
+
this.jobExecutor = new SerialJobExecutor();
|
1596
|
+
this.removeAssistantResponse = () => {
|
1597
|
+
const lastMessage = this.state.messages[this.state.messages.length - 1];
|
1598
|
+
if (lastMessage == null) {
|
1599
|
+
throw new Error("Cannot remove assistant response from empty chat");
|
1600
|
+
}
|
1601
|
+
if (lastMessage.role !== "assistant") {
|
1602
|
+
throw new Error("Last message is not an assistant message");
|
1603
|
+
}
|
1604
|
+
this.state.popMessage();
|
1605
|
+
this.emit({ type: "messages-changed" });
|
1606
|
+
};
|
1607
|
+
/**
|
1608
|
+
* Append a user message to the chat list. This triggers the API call to fetch
|
1609
|
+
* the assistant's response.
|
1610
|
+
*/
|
1611
|
+
this.append = async (message, { headers, body } = {}) => {
|
1612
|
+
var _a17;
|
1613
|
+
this.state.pushMessage({ ...message, id: (_a17 = message.id) != null ? _a17 : this.generateId() });
|
1614
|
+
this.emit({ type: "messages-changed" });
|
1615
|
+
await this.triggerRequest({
|
1616
|
+
headers,
|
1617
|
+
body,
|
1618
|
+
requestType: "generate"
|
1619
|
+
});
|
1620
|
+
};
|
1621
|
+
/**
|
1622
|
+
* Reload the last AI chat response for the given chat history. If the last
|
1623
|
+
* message isn't from the assistant, it will request the API to generate a
|
1624
|
+
* new response.
|
1625
|
+
*/
|
1626
|
+
this.reload = async ({
|
1627
|
+
headers,
|
1628
|
+
body
|
1629
|
+
} = {}) => {
|
1630
|
+
if (this.lastMessage === void 0) {
|
1631
|
+
return;
|
1632
|
+
}
|
1633
|
+
if (this.lastMessage.role === "assistant") {
|
1634
|
+
this.state.popMessage();
|
1635
|
+
this.emit({ type: "messages-changed" });
|
1636
|
+
}
|
1637
|
+
await this.triggerRequest({
|
1638
|
+
requestType: "generate",
|
1639
|
+
headers,
|
1640
|
+
body
|
1641
|
+
});
|
1642
|
+
};
|
1643
|
+
/**
|
1644
|
+
* Resume an ongoing chat generation stream. This does not resume an aborted generation.
|
1645
|
+
*/
|
1646
|
+
this.experimental_resume = async ({
|
1647
|
+
headers,
|
1648
|
+
body
|
1649
|
+
} = {}) => {
|
1650
|
+
await this.triggerRequest({
|
1651
|
+
requestType: "resume",
|
1652
|
+
headers,
|
1653
|
+
body
|
1654
|
+
});
|
1655
|
+
};
|
1656
|
+
this.addToolResult = async ({
|
1657
|
+
toolCallId,
|
1658
|
+
result
|
1659
|
+
}) => {
|
1660
|
+
this.jobExecutor.run(async () => {
|
1661
|
+
updateToolCallResult({
|
1662
|
+
messages: this.state.messages,
|
1663
|
+
toolCallId,
|
1664
|
+
toolResult: result
|
1665
|
+
});
|
1666
|
+
this.messages = this.state.messages;
|
1667
|
+
if (this.status === "submitted" || this.status === "streaming") {
|
1668
|
+
return;
|
1669
|
+
}
|
1670
|
+
const lastMessage = this.lastMessage;
|
1671
|
+
if (isAssistantMessageWithCompletedToolCalls(lastMessage)) {
|
1672
|
+
this.triggerRequest({
|
1673
|
+
requestType: "generate"
|
1674
|
+
});
|
1675
|
+
}
|
1676
|
+
});
|
1677
|
+
};
|
1678
|
+
/**
|
1679
|
+
* Abort the current request immediately, keep the generated tokens if any.
|
1680
|
+
*/
|
1681
|
+
this.stop = async () => {
|
1682
|
+
var _a17;
|
1683
|
+
if (this.status !== "streaming" && this.status !== "submitted")
|
1684
|
+
return;
|
1685
|
+
if ((_a17 = this.activeResponse) == null ? void 0 : _a17.abortController) {
|
1686
|
+
this.activeResponse.abortController.abort();
|
1687
|
+
this.activeResponse.abortController = void 0;
|
1688
|
+
}
|
1689
|
+
};
|
1690
|
+
this.id = id;
|
1459
1691
|
this.maxSteps = maxSteps;
|
1460
1692
|
this.transport = transport;
|
1461
|
-
this.
|
1462
|
-
this.generateId = generateId3 != null ? generateId3 : import_provider_utils4.generateId;
|
1693
|
+
this.generateId = generateId3;
|
1463
1694
|
this.messageMetadataSchema = messageMetadataSchema;
|
1464
1695
|
this.dataPartSchemas = dataPartSchemas;
|
1696
|
+
this.state = state;
|
1697
|
+
this.onError = onError;
|
1698
|
+
this.onToolCall = onToolCall;
|
1699
|
+
this.onFinish = onFinish;
|
1465
1700
|
}
|
1466
|
-
|
1467
|
-
|
1468
|
-
|
1469
|
-
|
1470
|
-
|
1471
|
-
|
1472
|
-
|
1473
|
-
|
1474
|
-
|
1475
|
-
|
1476
|
-
return this.chats.size;
|
1477
|
-
}
|
1478
|
-
getStatus(id) {
|
1479
|
-
return this.getChatState(id).status;
|
1701
|
+
/**
|
1702
|
+
* Hook status:
|
1703
|
+
*
|
1704
|
+
* - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
|
1705
|
+
* - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
|
1706
|
+
* - `ready`: The full response has been received and processed; a new user message can be submitted.
|
1707
|
+
* - `error`: An error occurred during the API request, preventing successful completion.
|
1708
|
+
*/
|
1709
|
+
get status() {
|
1710
|
+
return this.state.status;
|
1480
1711
|
}
|
1481
1712
|
setStatus({
|
1482
|
-
id,
|
1483
1713
|
status,
|
1484
1714
|
error
|
1485
1715
|
}) {
|
1486
|
-
|
1487
|
-
if (state.status === status)
|
1716
|
+
if (this.status === status)
|
1488
1717
|
return;
|
1489
|
-
state.
|
1490
|
-
state.
|
1491
|
-
this.emit({ type: "
|
1718
|
+
this.state.status = status;
|
1719
|
+
this.state.error = error;
|
1720
|
+
this.emit({ type: "status-changed" });
|
1492
1721
|
}
|
1493
|
-
|
1494
|
-
return this.
|
1722
|
+
get error() {
|
1723
|
+
return this.state.error;
|
1495
1724
|
}
|
1496
|
-
|
1497
|
-
return this.
|
1725
|
+
get messages() {
|
1726
|
+
return this.state.messages;
|
1498
1727
|
}
|
1499
|
-
|
1500
|
-
|
1501
|
-
return chat.messages[chat.messages.length - 1];
|
1728
|
+
get lastMessage() {
|
1729
|
+
return this.state.messages[this.state.messages.length - 1];
|
1502
1730
|
}
|
1503
1731
|
subscribe(subscriber) {
|
1504
1732
|
this.subscribers.add(subscriber);
|
1505
1733
|
return () => this.subscribers.delete(subscriber);
|
1506
1734
|
}
|
1507
|
-
|
1508
|
-
|
1509
|
-
messages
|
1510
|
-
}) {
|
1511
|
-
this.getChatState(id).setMessages(messages);
|
1512
|
-
this.emit({ type: "chat-messages-changed", chatId: id });
|
1513
|
-
}
|
1514
|
-
removeAssistantResponse(id) {
|
1515
|
-
const chat = this.getChatState(id);
|
1516
|
-
const lastMessage = chat.messages[chat.messages.length - 1];
|
1517
|
-
if (lastMessage == null) {
|
1518
|
-
throw new Error("Cannot remove assistant response from empty chat");
|
1519
|
-
}
|
1520
|
-
if (lastMessage.role !== "assistant") {
|
1521
|
-
throw new Error("Last message is not an assistant message");
|
1522
|
-
}
|
1523
|
-
chat.popMessage();
|
1524
|
-
this.emit({ type: "chat-messages-changed", chatId: id });
|
1525
|
-
}
|
1526
|
-
async submitMessage({
|
1527
|
-
chatId,
|
1528
|
-
message,
|
1529
|
-
headers,
|
1530
|
-
body,
|
1531
|
-
onError,
|
1532
|
-
onToolCall,
|
1533
|
-
onFinish
|
1534
|
-
}) {
|
1535
|
-
var _a17;
|
1536
|
-
const state = this.getChatState(chatId);
|
1537
|
-
state.pushMessage({ ...message, id: (_a17 = message.id) != null ? _a17 : this.generateId() });
|
1538
|
-
this.emit({
|
1539
|
-
type: "chat-messages-changed",
|
1540
|
-
chatId
|
1541
|
-
});
|
1542
|
-
await this.triggerRequest({
|
1543
|
-
chatId,
|
1544
|
-
headers,
|
1545
|
-
body,
|
1546
|
-
requestType: "generate",
|
1547
|
-
onError,
|
1548
|
-
onToolCall,
|
1549
|
-
onFinish
|
1550
|
-
});
|
1551
|
-
}
|
1552
|
-
async resubmitLastUserMessage({
|
1553
|
-
chatId,
|
1554
|
-
headers,
|
1555
|
-
body,
|
1556
|
-
onError,
|
1557
|
-
onToolCall,
|
1558
|
-
onFinish
|
1559
|
-
}) {
|
1560
|
-
const chat = this.getChatState(chatId);
|
1561
|
-
if (chat.messages[chat.messages.length - 1].role === "assistant") {
|
1562
|
-
chat.popMessage();
|
1563
|
-
this.emit({
|
1564
|
-
type: "chat-messages-changed",
|
1565
|
-
chatId
|
1566
|
-
});
|
1567
|
-
}
|
1568
|
-
if (chat.messages.length === 0) {
|
1569
|
-
return;
|
1570
|
-
}
|
1571
|
-
return this.triggerRequest({
|
1572
|
-
chatId,
|
1573
|
-
requestType: "generate",
|
1574
|
-
headers,
|
1575
|
-
body,
|
1576
|
-
onError,
|
1577
|
-
onToolCall,
|
1578
|
-
onFinish
|
1579
|
-
});
|
1580
|
-
}
|
1581
|
-
async resumeStream({
|
1582
|
-
chatId,
|
1583
|
-
headers,
|
1584
|
-
body,
|
1585
|
-
onError,
|
1586
|
-
onToolCall,
|
1587
|
-
onFinish
|
1588
|
-
}) {
|
1589
|
-
return this.triggerRequest({
|
1590
|
-
chatId,
|
1591
|
-
requestType: "resume",
|
1592
|
-
headers,
|
1593
|
-
body,
|
1594
|
-
onError,
|
1595
|
-
onToolCall,
|
1596
|
-
onFinish
|
1597
|
-
});
|
1598
|
-
}
|
1599
|
-
async addToolResult({
|
1600
|
-
chatId,
|
1601
|
-
toolCallId,
|
1602
|
-
result
|
1603
|
-
}) {
|
1604
|
-
const chat = this.getChatState(chatId);
|
1605
|
-
chat.jobExecutor.run(async () => {
|
1606
|
-
updateToolCallResult({
|
1607
|
-
messages: chat.messages,
|
1608
|
-
toolCallId,
|
1609
|
-
toolResult: result
|
1610
|
-
});
|
1611
|
-
this.setMessages({
|
1612
|
-
id: chatId,
|
1613
|
-
messages: chat.messages
|
1614
|
-
});
|
1615
|
-
if (chat.status === "submitted" || chat.status === "streaming") {
|
1616
|
-
return;
|
1617
|
-
}
|
1618
|
-
const lastMessage = chat.messages[chat.messages.length - 1];
|
1619
|
-
if (isAssistantMessageWithCompletedToolCalls(lastMessage)) {
|
1620
|
-
this.triggerRequest({
|
1621
|
-
requestType: "generate",
|
1622
|
-
chatId
|
1623
|
-
});
|
1624
|
-
}
|
1625
|
-
});
|
1626
|
-
}
|
1627
|
-
async stopStream({ chatId }) {
|
1628
|
-
var _a17;
|
1629
|
-
const chat = this.getChatState(chatId);
|
1630
|
-
if (chat.status !== "streaming" && chat.status !== "submitted")
|
1631
|
-
return;
|
1632
|
-
if ((_a17 = chat.activeResponse) == null ? void 0 : _a17.abortController) {
|
1633
|
-
chat.activeResponse.abortController.abort();
|
1634
|
-
chat.activeResponse.abortController = void 0;
|
1635
|
-
}
|
1735
|
+
set messages(messages) {
|
1736
|
+
this.state.messages = messages;
|
1737
|
+
this.emit({ type: "messages-changed" });
|
1636
1738
|
}
|
1637
1739
|
emit(event) {
|
1638
1740
|
for (const subscriber of this.subscribers) {
|
1639
|
-
subscriber.
|
1640
|
-
}
|
1641
|
-
}
|
1642
|
-
getChatState(id) {
|
1643
|
-
if (!this.hasChat(id)) {
|
1644
|
-
this.addChat(id, []);
|
1741
|
+
subscriber.onChange(event);
|
1645
1742
|
}
|
1646
|
-
return this.chats.get(id);
|
1647
1743
|
}
|
1648
1744
|
async triggerRequest({
|
1649
|
-
chatId,
|
1650
1745
|
requestType,
|
1651
1746
|
headers,
|
1652
|
-
body
|
1653
|
-
onError,
|
1654
|
-
onToolCall,
|
1655
|
-
onFinish
|
1747
|
+
body
|
1656
1748
|
}) {
|
1657
|
-
|
1658
|
-
this.setStatus({
|
1659
|
-
const messageCount =
|
1660
|
-
const lastMessage =
|
1661
|
-
const maxStep = lastMessage.parts.filter(
|
1662
|
-
(part) => part.type === "step-start"
|
1663
|
-
).length;
|
1749
|
+
var _a17, _b;
|
1750
|
+
this.setStatus({ status: "submitted", error: void 0 });
|
1751
|
+
const messageCount = this.state.messages.length;
|
1752
|
+
const lastMessage = this.lastMessage;
|
1753
|
+
const maxStep = (_a17 = lastMessage == null ? void 0 : lastMessage.parts.filter((part) => part.type === "step-start").length) != null ? _a17 : 0;
|
1664
1754
|
try {
|
1665
|
-
const lastMessage2 = chat.messages[chat.messages.length - 1];
|
1666
1755
|
const activeResponse = {
|
1667
1756
|
state: createStreamingUIMessageState({
|
1668
|
-
lastMessage:
|
1757
|
+
lastMessage: this.state.snapshot(lastMessage),
|
1669
1758
|
newMessageId: this.generateId()
|
1670
1759
|
}),
|
1671
1760
|
abortController: new AbortController()
|
1672
1761
|
};
|
1673
|
-
|
1762
|
+
this.activeResponse = activeResponse;
|
1674
1763
|
const stream = await this.transport.submitMessages({
|
1675
|
-
chatId,
|
1676
|
-
messages:
|
1764
|
+
chatId: this.id,
|
1765
|
+
messages: this.state.messages,
|
1677
1766
|
body,
|
1678
1767
|
headers,
|
1679
1768
|
abortController: activeResponse.abortController,
|
@@ -1681,23 +1770,23 @@ var ChatStore = class {
|
|
1681
1770
|
});
|
1682
1771
|
const runUpdateMessageJob = (job) => (
|
1683
1772
|
// serialize the job execution to avoid race conditions:
|
1684
|
-
|
1773
|
+
this.jobExecutor.run(
|
1685
1774
|
() => job({
|
1686
1775
|
state: activeResponse.state,
|
1687
1776
|
write: () => {
|
1688
|
-
|
1689
|
-
|
1777
|
+
var _a18;
|
1778
|
+
this.setStatus({ status: "streaming" });
|
1779
|
+
const replaceLastMessage = activeResponse.state.message.id === ((_a18 = this.lastMessage) == null ? void 0 : _a18.id);
|
1690
1780
|
if (replaceLastMessage) {
|
1691
|
-
|
1692
|
-
|
1781
|
+
this.state.replaceMessage(
|
1782
|
+
this.state.messages.length - 1,
|
1693
1783
|
activeResponse.state.message
|
1694
1784
|
);
|
1695
1785
|
} else {
|
1696
|
-
|
1786
|
+
this.state.pushMessage(activeResponse.state.message);
|
1697
1787
|
}
|
1698
1788
|
this.emit({
|
1699
|
-
type: "
|
1700
|
-
chatId
|
1789
|
+
type: "messages-changed"
|
1701
1790
|
});
|
1702
1791
|
}
|
1703
1792
|
})
|
@@ -1706,7 +1795,7 @@ var ChatStore = class {
|
|
1706
1795
|
await consumeStream({
|
1707
1796
|
stream: processUIMessageStream({
|
1708
1797
|
stream,
|
1709
|
-
onToolCall,
|
1798
|
+
onToolCall: this.onToolCall,
|
1710
1799
|
messageMetadataSchema: this.messageMetadataSchema,
|
1711
1800
|
dataPartSchemas: this.dataPartSchemas,
|
1712
1801
|
runUpdateMessageJob
|
@@ -1715,32 +1804,29 @@ var ChatStore = class {
|
|
1715
1804
|
throw error;
|
1716
1805
|
}
|
1717
1806
|
});
|
1718
|
-
onFinish == null ? void 0 :
|
1719
|
-
this.setStatus({
|
1807
|
+
(_b = this.onFinish) == null ? void 0 : _b.call(this, { message: activeResponse.state.message });
|
1808
|
+
this.setStatus({ status: "ready" });
|
1720
1809
|
} catch (err) {
|
1810
|
+
console.error(err);
|
1721
1811
|
if (err.name === "AbortError") {
|
1722
|
-
this.setStatus({
|
1812
|
+
this.setStatus({ status: "ready" });
|
1723
1813
|
return null;
|
1724
1814
|
}
|
1725
|
-
if (onError && err instanceof Error) {
|
1726
|
-
onError(err);
|
1815
|
+
if (this.onError && err instanceof Error) {
|
1816
|
+
this.onError(err);
|
1727
1817
|
}
|
1728
|
-
this.setStatus({
|
1818
|
+
this.setStatus({ status: "error", error: err });
|
1729
1819
|
} finally {
|
1730
|
-
|
1820
|
+
this.activeResponse = void 0;
|
1731
1821
|
}
|
1732
1822
|
if (shouldResubmitMessages({
|
1733
1823
|
originalMaxToolInvocationStep: maxStep,
|
1734
1824
|
originalMessageCount: messageCount,
|
1735
1825
|
maxSteps: this.maxSteps,
|
1736
|
-
messages:
|
1826
|
+
messages: this.state.messages
|
1737
1827
|
})) {
|
1738
1828
|
await this.triggerRequest({
|
1739
|
-
chatId,
|
1740
1829
|
requestType,
|
1741
|
-
onError,
|
1742
|
-
onToolCall,
|
1743
|
-
onFinish,
|
1744
1830
|
headers,
|
1745
1831
|
body
|
1746
1832
|
});
|
@@ -1930,154 +2016,17 @@ function convertToModelMessages(messages, options) {
|
|
1930
2016
|
break;
|
1931
2017
|
}
|
1932
2018
|
default: {
|
1933
|
-
const _exhaustiveCheck = message.role;
|
1934
|
-
throw new MessageConversionError({
|
1935
|
-
originalMessage: message,
|
1936
|
-
message: `Unsupported role: ${_exhaustiveCheck}`
|
1937
|
-
});
|
1938
|
-
}
|
1939
|
-
}
|
1940
|
-
}
|
1941
|
-
return modelMessages;
|
1942
|
-
}
|
1943
|
-
var convertToCoreMessages = convertToModelMessages;
|
1944
|
-
|
1945
|
-
// src/ui/default-chat-store-options.ts
|
1946
|
-
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
1947
|
-
|
1948
|
-
// src/ui/default-chat-transport.ts
|
1949
|
-
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
1950
|
-
var getOriginalFetch2 = () => fetch;
|
1951
|
-
async function fetchUIMessageStream({
|
1952
|
-
api,
|
1953
|
-
body,
|
1954
|
-
credentials,
|
1955
|
-
headers,
|
1956
|
-
abortController,
|
1957
|
-
fetch: fetch2 = getOriginalFetch2(),
|
1958
|
-
requestType = "generate"
|
1959
|
-
}) {
|
1960
|
-
var _a17, _b, _c;
|
1961
|
-
const response = requestType === "resume" ? await fetch2(`${api}?chatId=${body.chatId}`, {
|
1962
|
-
method: "GET",
|
1963
|
-
headers: {
|
1964
|
-
"Content-Type": "application/json",
|
1965
|
-
...headers
|
1966
|
-
},
|
1967
|
-
signal: (_a17 = abortController == null ? void 0 : abortController()) == null ? void 0 : _a17.signal,
|
1968
|
-
credentials
|
1969
|
-
}) : await fetch2(api, {
|
1970
|
-
method: "POST",
|
1971
|
-
body: JSON.stringify(body),
|
1972
|
-
headers: {
|
1973
|
-
"Content-Type": "application/json",
|
1974
|
-
...headers
|
1975
|
-
},
|
1976
|
-
signal: (_b = abortController == null ? void 0 : abortController()) == null ? void 0 : _b.signal,
|
1977
|
-
credentials
|
1978
|
-
});
|
1979
|
-
if (!response.ok) {
|
1980
|
-
throw new Error(
|
1981
|
-
(_c = await response.text()) != null ? _c : "Failed to fetch the chat response."
|
1982
|
-
);
|
1983
|
-
}
|
1984
|
-
if (!response.body) {
|
1985
|
-
throw new Error("The response body is empty.");
|
1986
|
-
}
|
1987
|
-
return (0, import_provider_utils5.parseJsonEventStream)({
|
1988
|
-
stream: response.body,
|
1989
|
-
schema: uiMessageStreamPartSchema
|
1990
|
-
}).pipeThrough(
|
1991
|
-
new TransformStream({
|
1992
|
-
async transform(part, controller) {
|
1993
|
-
if (!part.success) {
|
1994
|
-
throw part.error;
|
1995
|
-
}
|
1996
|
-
controller.enqueue(part.value);
|
2019
|
+
const _exhaustiveCheck = message.role;
|
2020
|
+
throw new MessageConversionError({
|
2021
|
+
originalMessage: message,
|
2022
|
+
message: `Unsupported role: ${_exhaustiveCheck}`
|
2023
|
+
});
|
1997
2024
|
}
|
1998
|
-
}
|
1999
|
-
);
|
2000
|
-
}
|
2001
|
-
var DefaultChatTransport = class {
|
2002
|
-
constructor({
|
2003
|
-
api,
|
2004
|
-
credentials,
|
2005
|
-
headers,
|
2006
|
-
body,
|
2007
|
-
fetch: fetch2,
|
2008
|
-
prepareRequestBody
|
2009
|
-
}) {
|
2010
|
-
this.api = api;
|
2011
|
-
this.credentials = credentials;
|
2012
|
-
this.headers = headers;
|
2013
|
-
this.body = body;
|
2014
|
-
this.fetch = fetch2;
|
2015
|
-
this.prepareRequestBody = prepareRequestBody;
|
2016
|
-
}
|
2017
|
-
submitMessages({
|
2018
|
-
chatId,
|
2019
|
-
messages,
|
2020
|
-
abortController,
|
2021
|
-
body,
|
2022
|
-
headers,
|
2023
|
-
requestType
|
2024
|
-
}) {
|
2025
|
-
var _a17, _b;
|
2026
|
-
return fetchUIMessageStream({
|
2027
|
-
api: this.api,
|
2028
|
-
headers: {
|
2029
|
-
...this.headers,
|
2030
|
-
...headers
|
2031
|
-
},
|
2032
|
-
body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
|
2033
|
-
chatId,
|
2034
|
-
messages,
|
2035
|
-
...this.body,
|
2036
|
-
...body
|
2037
|
-
})) != null ? _b : {
|
2038
|
-
chatId,
|
2039
|
-
messages,
|
2040
|
-
...this.body,
|
2041
|
-
...body
|
2042
|
-
},
|
2043
|
-
credentials: this.credentials,
|
2044
|
-
abortController: () => abortController,
|
2045
|
-
fetch: this.fetch,
|
2046
|
-
requestType
|
2047
|
-
});
|
2025
|
+
}
|
2048
2026
|
}
|
2049
|
-
|
2050
|
-
|
2051
|
-
// src/ui/default-chat-store-options.ts
|
2052
|
-
function defaultChatStoreOptions({
|
2053
|
-
api = "/api/chat",
|
2054
|
-
fetch: fetch2,
|
2055
|
-
credentials,
|
2056
|
-
headers,
|
2057
|
-
body,
|
2058
|
-
prepareRequestBody,
|
2059
|
-
generateId: generateId3 = import_provider_utils6.generateId,
|
2060
|
-
messageMetadataSchema,
|
2061
|
-
maxSteps = 1,
|
2062
|
-
dataPartSchemas,
|
2063
|
-
chats
|
2064
|
-
}) {
|
2065
|
-
return () => ({
|
2066
|
-
transport: new DefaultChatTransport({
|
2067
|
-
api,
|
2068
|
-
fetch: fetch2,
|
2069
|
-
credentials,
|
2070
|
-
headers,
|
2071
|
-
body,
|
2072
|
-
prepareRequestBody
|
2073
|
-
}),
|
2074
|
-
generateId: generateId3,
|
2075
|
-
messageMetadataSchema,
|
2076
|
-
dataPartSchemas,
|
2077
|
-
maxSteps,
|
2078
|
-
chats
|
2079
|
-
});
|
2027
|
+
return modelMessages;
|
2080
2028
|
}
|
2029
|
+
var convertToCoreMessages = convertToModelMessages;
|
2081
2030
|
|
2082
2031
|
// src/ui/transform-text-to-ui-message-stream.ts
|
2083
2032
|
function transformTextToUiMessageStream({
|
@@ -2448,40 +2397,8 @@ function isDeepEqualData(obj1, obj2) {
|
|
2448
2397
|
return true;
|
2449
2398
|
}
|
2450
2399
|
|
2451
|
-
// src/util/serial-job-executor.ts
|
2452
|
-
var SerialJobExecutor = class {
|
2453
|
-
constructor() {
|
2454
|
-
this.queue = [];
|
2455
|
-
this.isProcessing = false;
|
2456
|
-
}
|
2457
|
-
async processQueue() {
|
2458
|
-
if (this.isProcessing) {
|
2459
|
-
return;
|
2460
|
-
}
|
2461
|
-
this.isProcessing = true;
|
2462
|
-
while (this.queue.length > 0) {
|
2463
|
-
await this.queue[0]();
|
2464
|
-
this.queue.shift();
|
2465
|
-
}
|
2466
|
-
this.isProcessing = false;
|
2467
|
-
}
|
2468
|
-
async run(job) {
|
2469
|
-
return new Promise((resolve, reject) => {
|
2470
|
-
this.queue.push(async () => {
|
2471
|
-
try {
|
2472
|
-
await job();
|
2473
|
-
resolve();
|
2474
|
-
} catch (error) {
|
2475
|
-
reject(error);
|
2476
|
-
}
|
2477
|
-
});
|
2478
|
-
void this.processQueue();
|
2479
|
-
});
|
2480
|
-
}
|
2481
|
-
};
|
2482
|
-
|
2483
2400
|
// src/util/simulate-readable-stream.ts
|
2484
|
-
var
|
2401
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
2485
2402
|
function simulateReadableStream({
|
2486
2403
|
chunks,
|
2487
2404
|
initialDelayInMs = 0,
|
@@ -2489,7 +2406,7 @@ function simulateReadableStream({
|
|
2489
2406
|
_internal
|
2490
2407
|
}) {
|
2491
2408
|
var _a17;
|
2492
|
-
const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 :
|
2409
|
+
const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 : import_provider_utils6.delay;
|
2493
2410
|
let index = 0;
|
2494
2411
|
return new ReadableStream({
|
2495
2412
|
async pull(controller) {
|
@@ -2505,7 +2422,7 @@ function simulateReadableStream({
|
|
2505
2422
|
|
2506
2423
|
// src/util/retry-with-exponential-backoff.ts
|
2507
2424
|
var import_provider17 = require("@ai-sdk/provider");
|
2508
|
-
var
|
2425
|
+
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
2509
2426
|
var retryWithExponentialBackoff = ({
|
2510
2427
|
maxRetries = 2,
|
2511
2428
|
initialDelayInMs = 2e3,
|
@@ -2523,13 +2440,13 @@ async function _retryWithExponentialBackoff(f, {
|
|
2523
2440
|
try {
|
2524
2441
|
return await f();
|
2525
2442
|
} catch (error) {
|
2526
|
-
if ((0,
|
2443
|
+
if ((0, import_provider_utils7.isAbortError)(error)) {
|
2527
2444
|
throw error;
|
2528
2445
|
}
|
2529
2446
|
if (maxRetries === 0) {
|
2530
2447
|
throw error;
|
2531
2448
|
}
|
2532
|
-
const errorMessage = (0,
|
2449
|
+
const errorMessage = (0, import_provider_utils7.getErrorMessage)(error);
|
2533
2450
|
const newErrors = [...errors, error];
|
2534
2451
|
const tryNumber = newErrors.length;
|
2535
2452
|
if (tryNumber > maxRetries) {
|
@@ -2540,7 +2457,7 @@ async function _retryWithExponentialBackoff(f, {
|
|
2540
2457
|
});
|
2541
2458
|
}
|
2542
2459
|
if (error instanceof Error && import_provider17.APICallError.isInstance(error) && error.isRetryable === true && tryNumber <= maxRetries) {
|
2543
|
-
await (0,
|
2460
|
+
await (0, import_provider_utils7.delay)(delayInMs);
|
2544
2461
|
return _retryWithExponentialBackoff(
|
2545
2462
|
f,
|
2546
2463
|
{ maxRetries, delayInMs: backoffFactor * delayInMs, backoffFactor },
|
@@ -3099,7 +3016,7 @@ var DefaultEmbedManyResult = class {
|
|
3099
3016
|
};
|
3100
3017
|
|
3101
3018
|
// src/util/detect-media-type.ts
|
3102
|
-
var
|
3019
|
+
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
3103
3020
|
var imageMediaTypeSignatures = [
|
3104
3021
|
{
|
3105
3022
|
mediaType: "image/gif",
|
@@ -3206,7 +3123,7 @@ var audioMediaTypeSignatures = [
|
|
3206
3123
|
}
|
3207
3124
|
];
|
3208
3125
|
var stripID3 = (data) => {
|
3209
|
-
const bytes = typeof data === "string" ? (0,
|
3126
|
+
const bytes = typeof data === "string" ? (0, import_provider_utils8.convertBase64ToUint8Array)(data) : data;
|
3210
3127
|
const id3Size = (bytes[6] & 127) << 21 | (bytes[7] & 127) << 14 | (bytes[8] & 127) << 7 | bytes[9] & 127;
|
3211
3128
|
return bytes.slice(id3Size + 10);
|
3212
3129
|
};
|
@@ -3232,7 +3149,7 @@ function detectMediaType({
|
|
3232
3149
|
}
|
3233
3150
|
|
3234
3151
|
// core/generate-text/generated-file.ts
|
3235
|
-
var
|
3152
|
+
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
3236
3153
|
var DefaultGeneratedFile = class {
|
3237
3154
|
constructor({
|
3238
3155
|
data,
|
@@ -3246,14 +3163,14 @@ var DefaultGeneratedFile = class {
|
|
3246
3163
|
// lazy conversion with caching to avoid unnecessary conversion overhead:
|
3247
3164
|
get base64() {
|
3248
3165
|
if (this.base64Data == null) {
|
3249
|
-
this.base64Data = (0,
|
3166
|
+
this.base64Data = (0, import_provider_utils9.convertUint8ArrayToBase64)(this.uint8ArrayData);
|
3250
3167
|
}
|
3251
3168
|
return this.base64Data;
|
3252
3169
|
}
|
3253
3170
|
// lazy conversion with caching to avoid unnecessary conversion overhead:
|
3254
3171
|
get uint8Array() {
|
3255
3172
|
if (this.uint8ArrayData == null) {
|
3256
|
-
this.uint8ArrayData = (0,
|
3173
|
+
this.uint8ArrayData = (0, import_provider_utils9.convertBase64ToUint8Array)(this.base64Data);
|
3257
3174
|
}
|
3258
3175
|
return this.uint8ArrayData;
|
3259
3176
|
}
|
@@ -3368,8 +3285,8 @@ async function invokeModelMaxImagesPerCall(model) {
|
|
3368
3285
|
}
|
3369
3286
|
|
3370
3287
|
// core/generate-object/generate-object.ts
|
3371
|
-
var
|
3372
|
-
var
|
3288
|
+
var import_provider22 = require("@ai-sdk/provider");
|
3289
|
+
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
3373
3290
|
|
3374
3291
|
// core/generate-text/extract-content-text.ts
|
3375
3292
|
function extractContentText(content) {
|
@@ -3383,7 +3300,7 @@ function extractContentText(content) {
|
|
3383
3300
|
}
|
3384
3301
|
|
3385
3302
|
// core/prompt/convert-to-language-model-prompt.ts
|
3386
|
-
var
|
3303
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
3387
3304
|
|
3388
3305
|
// src/util/download.ts
|
3389
3306
|
async function download({ url }) {
|
@@ -3412,7 +3329,7 @@ async function download({ url }) {
|
|
3412
3329
|
|
3413
3330
|
// core/prompt/data-content.ts
|
3414
3331
|
var import_provider18 = require("@ai-sdk/provider");
|
3415
|
-
var
|
3332
|
+
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
3416
3333
|
var import_zod2 = require("zod");
|
3417
3334
|
|
3418
3335
|
// core/prompt/split-data-url.ts
|
@@ -3477,9 +3394,9 @@ function convertDataContentToBase64String(content) {
|
|
3477
3394
|
return content;
|
3478
3395
|
}
|
3479
3396
|
if (content instanceof ArrayBuffer) {
|
3480
|
-
return (0,
|
3397
|
+
return (0, import_provider_utils10.convertUint8ArrayToBase64)(new Uint8Array(content));
|
3481
3398
|
}
|
3482
|
-
return (0,
|
3399
|
+
return (0, import_provider_utils10.convertUint8ArrayToBase64)(content);
|
3483
3400
|
}
|
3484
3401
|
function convertDataContentToUint8Array(content) {
|
3485
3402
|
if (content instanceof Uint8Array) {
|
@@ -3487,7 +3404,7 @@ function convertDataContentToUint8Array(content) {
|
|
3487
3404
|
}
|
3488
3405
|
if (typeof content === "string") {
|
3489
3406
|
try {
|
3490
|
-
return (0,
|
3407
|
+
return (0, import_provider_utils10.convertBase64ToUint8Array)(content);
|
3491
3408
|
} catch (error) {
|
3492
3409
|
throw new InvalidDataContentError({
|
3493
3410
|
message: "Invalid data content. Content string is not a base64-encoded media.",
|
@@ -3638,7 +3555,7 @@ async function downloadAssets(messages, downloadImplementation, supportedUrls) {
|
|
3638
3555
|
}
|
3639
3556
|
return { mediaType, data };
|
3640
3557
|
}).filter(
|
3641
|
-
(part) => part.data instanceof URL && part.mediaType != null && !(0,
|
3558
|
+
(part) => part.data instanceof URL && part.mediaType != null && !(0, import_provider_utils11.isUrlSupported)({
|
3642
3559
|
url: part.data.toString(),
|
3643
3560
|
mediaType: part.mediaType,
|
3644
3561
|
supportedUrls
|
@@ -3807,9 +3724,22 @@ function prepareCallSettings({
|
|
3807
3724
|
};
|
3808
3725
|
}
|
3809
3726
|
|
3727
|
+
// core/prompt/resolve-language-model.ts
|
3728
|
+
var import_gateway = require("@ai-sdk/gateway");
|
3729
|
+
var GLOBAL_DEFAULT_PROVIDER = Symbol(
|
3730
|
+
"vercel.ai.global.defaultProvider"
|
3731
|
+
);
|
3732
|
+
function resolveLanguageModel(model) {
|
3733
|
+
if (typeof model !== "string") {
|
3734
|
+
return model;
|
3735
|
+
}
|
3736
|
+
const globalProvider = globalThis[GLOBAL_DEFAULT_PROVIDER];
|
3737
|
+
return (globalProvider != null ? globalProvider : import_gateway.gateway).languageModel(model);
|
3738
|
+
}
|
3739
|
+
|
3810
3740
|
// core/prompt/standardize-prompt.ts
|
3811
3741
|
var import_provider19 = require("@ai-sdk/provider");
|
3812
|
-
var
|
3742
|
+
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
3813
3743
|
var import_zod8 = require("zod");
|
3814
3744
|
|
3815
3745
|
// core/prompt/message.ts
|
@@ -3981,7 +3911,7 @@ async function standardizePrompt(prompt) {
|
|
3981
3911
|
message: "messages must not be empty"
|
3982
3912
|
});
|
3983
3913
|
}
|
3984
|
-
const validationResult = await (0,
|
3914
|
+
const validationResult = await (0, import_provider_utils12.safeValidateTypes)({
|
3985
3915
|
value: messages,
|
3986
3916
|
schema: import_zod8.z.array(modelMessageSchema)
|
3987
3917
|
});
|
@@ -3998,6 +3928,20 @@ async function standardizePrompt(prompt) {
|
|
3998
3928
|
};
|
3999
3929
|
}
|
4000
3930
|
|
3931
|
+
// core/prompt/wrap-gateway-error.ts
|
3932
|
+
var import_gateway2 = require("@ai-sdk/gateway");
|
3933
|
+
var import_provider20 = require("@ai-sdk/provider");
|
3934
|
+
function wrapGatewayError(error) {
|
3935
|
+
if (import_gateway2.GatewayAuthenticationError.isInstance(error) || import_gateway2.GatewayModelNotFoundError.isInstance(error)) {
|
3936
|
+
return new import_provider20.AISDKError({
|
3937
|
+
name: "GatewayError",
|
3938
|
+
message: "Vercel AI Gateway access failed. If you want to use AI SDK providers directly, use the providers, e.g. @ai-sdk/openai, or register a different global default provider.",
|
3939
|
+
cause: error
|
3940
|
+
});
|
3941
|
+
}
|
3942
|
+
return error;
|
3943
|
+
}
|
3944
|
+
|
4001
3945
|
// core/telemetry/stringify-for-telemetry.ts
|
4002
3946
|
function stringifyForTelemetry(prompt) {
|
4003
3947
|
return JSON.stringify(
|
@@ -4014,8 +3958,8 @@ function stringifyForTelemetry(prompt) {
|
|
4014
3958
|
}
|
4015
3959
|
|
4016
3960
|
// core/generate-object/output-strategy.ts
|
4017
|
-
var
|
4018
|
-
var
|
3961
|
+
var import_provider21 = require("@ai-sdk/provider");
|
3962
|
+
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
4019
3963
|
|
4020
3964
|
// src/util/async-iterable-stream.ts
|
4021
3965
|
function createAsyncIterableStream(source) {
|
@@ -4052,7 +3996,7 @@ var noSchemaOutputStrategy = {
|
|
4052
3996
|
} : { success: true, value };
|
4053
3997
|
},
|
4054
3998
|
createElementStream() {
|
4055
|
-
throw new
|
3999
|
+
throw new import_provider21.UnsupportedFunctionalityError({
|
4056
4000
|
functionality: "element streams in no-schema mode"
|
4057
4001
|
});
|
4058
4002
|
}
|
@@ -4071,10 +4015,10 @@ var objectOutputStrategy = (schema) => ({
|
|
4071
4015
|
};
|
4072
4016
|
},
|
4073
4017
|
async validateFinalResult(value) {
|
4074
|
-
return (0,
|
4018
|
+
return (0, import_provider_utils13.safeValidateTypes)({ value, schema });
|
4075
4019
|
},
|
4076
4020
|
createElementStream() {
|
4077
|
-
throw new
|
4021
|
+
throw new import_provider21.UnsupportedFunctionalityError({
|
4078
4022
|
functionality: "element streams in object mode"
|
4079
4023
|
});
|
4080
4024
|
}
|
@@ -4102,10 +4046,10 @@ var arrayOutputStrategy = (schema) => {
|
|
4102
4046
|
isFinalDelta
|
4103
4047
|
}) {
|
4104
4048
|
var _a17;
|
4105
|
-
if (!(0,
|
4049
|
+
if (!(0, import_provider21.isJSONObject)(value) || !(0, import_provider21.isJSONArray)(value.elements)) {
|
4106
4050
|
return {
|
4107
4051
|
success: false,
|
4108
|
-
error: new
|
4052
|
+
error: new import_provider21.TypeValidationError({
|
4109
4053
|
value,
|
4110
4054
|
cause: "value must be an object that contains an array of elements"
|
4111
4055
|
})
|
@@ -4115,7 +4059,7 @@ var arrayOutputStrategy = (schema) => {
|
|
4115
4059
|
const resultArray = [];
|
4116
4060
|
for (let i = 0; i < inputArray.length; i++) {
|
4117
4061
|
const element = inputArray[i];
|
4118
|
-
const result = await (0,
|
4062
|
+
const result = await (0, import_provider_utils13.safeValidateTypes)({ value: element, schema });
|
4119
4063
|
if (i === inputArray.length - 1 && !isFinalDelta) {
|
4120
4064
|
continue;
|
4121
4065
|
}
|
@@ -4145,10 +4089,10 @@ var arrayOutputStrategy = (schema) => {
|
|
4145
4089
|
};
|
4146
4090
|
},
|
4147
4091
|
async validateFinalResult(value) {
|
4148
|
-
if (!(0,
|
4092
|
+
if (!(0, import_provider21.isJSONObject)(value) || !(0, import_provider21.isJSONArray)(value.elements)) {
|
4149
4093
|
return {
|
4150
4094
|
success: false,
|
4151
|
-
error: new
|
4095
|
+
error: new import_provider21.TypeValidationError({
|
4152
4096
|
value,
|
4153
4097
|
cause: "value must be an object that contains an array of elements"
|
4154
4098
|
})
|
@@ -4156,7 +4100,7 @@ var arrayOutputStrategy = (schema) => {
|
|
4156
4100
|
}
|
4157
4101
|
const inputArray = value.elements;
|
4158
4102
|
for (const element of inputArray) {
|
4159
|
-
const result = await (0,
|
4103
|
+
const result = await (0, import_provider_utils13.safeValidateTypes)({ value: element, schema });
|
4160
4104
|
if (!result.success) {
|
4161
4105
|
return result;
|
4162
4106
|
}
|
@@ -4211,10 +4155,10 @@ var enumOutputStrategy = (enumValues) => {
|
|
4211
4155
|
additionalProperties: false
|
4212
4156
|
},
|
4213
4157
|
async validateFinalResult(value) {
|
4214
|
-
if (!(0,
|
4158
|
+
if (!(0, import_provider21.isJSONObject)(value) || typeof value.result !== "string") {
|
4215
4159
|
return {
|
4216
4160
|
success: false,
|
4217
|
-
error: new
|
4161
|
+
error: new import_provider21.TypeValidationError({
|
4218
4162
|
value,
|
4219
4163
|
cause: 'value must be an object that contains a string in the "result" property.'
|
4220
4164
|
})
|
@@ -4223,17 +4167,17 @@ var enumOutputStrategy = (enumValues) => {
|
|
4223
4167
|
const result = value.result;
|
4224
4168
|
return enumValues.includes(result) ? { success: true, value: result } : {
|
4225
4169
|
success: false,
|
4226
|
-
error: new
|
4170
|
+
error: new import_provider21.TypeValidationError({
|
4227
4171
|
value,
|
4228
4172
|
cause: "value must be a string in the enum"
|
4229
4173
|
})
|
4230
4174
|
};
|
4231
4175
|
},
|
4232
4176
|
async validatePartialResult({ value, textDelta }) {
|
4233
|
-
if (!(0,
|
4177
|
+
if (!(0, import_provider21.isJSONObject)(value) || typeof value.result !== "string") {
|
4234
4178
|
return {
|
4235
4179
|
success: false,
|
4236
|
-
error: new
|
4180
|
+
error: new import_provider21.TypeValidationError({
|
4237
4181
|
value,
|
4238
4182
|
cause: 'value must be an object that contains a string in the "result" property.'
|
4239
4183
|
})
|
@@ -4246,7 +4190,7 @@ var enumOutputStrategy = (enumValues) => {
|
|
4246
4190
|
if (value.result.length === 0 || possibleEnumValues.length === 0) {
|
4247
4191
|
return {
|
4248
4192
|
success: false,
|
4249
|
-
error: new
|
4193
|
+
error: new import_provider21.TypeValidationError({
|
4250
4194
|
value,
|
4251
4195
|
cause: "value must be a string in the enum"
|
4252
4196
|
})
|
@@ -4261,7 +4205,7 @@ var enumOutputStrategy = (enumValues) => {
|
|
4261
4205
|
};
|
4262
4206
|
},
|
4263
4207
|
createElementStream() {
|
4264
|
-
throw new
|
4208
|
+
throw new import_provider21.UnsupportedFunctionalityError({
|
4265
4209
|
functionality: "element streams in enum mode"
|
4266
4210
|
});
|
4267
4211
|
}
|
@@ -4274,9 +4218,9 @@ function getOutputStrategy({
|
|
4274
4218
|
}) {
|
4275
4219
|
switch (output) {
|
4276
4220
|
case "object":
|
4277
|
-
return objectOutputStrategy((0,
|
4221
|
+
return objectOutputStrategy((0, import_provider_utils13.asSchema)(schema));
|
4278
4222
|
case "array":
|
4279
|
-
return arrayOutputStrategy((0,
|
4223
|
+
return arrayOutputStrategy((0, import_provider_utils13.asSchema)(schema));
|
4280
4224
|
case "enum":
|
4281
4225
|
return enumOutputStrategy(enumValues);
|
4282
4226
|
case "no-schema":
|
@@ -4406,14 +4350,8 @@ function validateObjectGenerationInput({
|
|
4406
4350
|
}
|
4407
4351
|
}
|
4408
4352
|
|
4409
|
-
// core/prompt/resolve-language-model.ts
|
4410
|
-
var import_gateway = require("@ai-sdk/gateway");
|
4411
|
-
function resolveLanguageModel(model) {
|
4412
|
-
return typeof model === "string" ? import_gateway.gateway.languageModel(model) : model;
|
4413
|
-
}
|
4414
|
-
|
4415
4353
|
// core/generate-object/generate-object.ts
|
4416
|
-
var originalGenerateId = (0,
|
4354
|
+
var originalGenerateId = (0, import_provider_utils14.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
4417
4355
|
async function generateObject(options) {
|
4418
4356
|
const {
|
4419
4357
|
model: modelArg,
|
@@ -4461,208 +4399,212 @@ async function generateObject(options) {
|
|
4461
4399
|
settings: { ...callSettings, maxRetries }
|
4462
4400
|
});
|
4463
4401
|
const tracer = getTracer(telemetry);
|
4464
|
-
|
4465
|
-
|
4466
|
-
|
4467
|
-
|
4468
|
-
|
4469
|
-
|
4470
|
-
|
4471
|
-
|
4472
|
-
|
4473
|
-
...baseTelemetryAttributes,
|
4474
|
-
// specific settings that only make sense on the outer level:
|
4475
|
-
"ai.prompt": {
|
4476
|
-
input: () => JSON.stringify({ system, prompt, messages })
|
4477
|
-
},
|
4478
|
-
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4479
|
-
"ai.schema.name": schemaName,
|
4480
|
-
"ai.schema.description": schemaDescription,
|
4481
|
-
"ai.settings.output": outputStrategy.type
|
4482
|
-
}
|
4483
|
-
}),
|
4484
|
-
tracer,
|
4485
|
-
fn: async (span) => {
|
4486
|
-
var _a17;
|
4487
|
-
let result;
|
4488
|
-
let finishReason;
|
4489
|
-
let usage;
|
4490
|
-
let warnings;
|
4491
|
-
let response;
|
4492
|
-
let request;
|
4493
|
-
let resultProviderMetadata;
|
4494
|
-
const standardizedPrompt = await standardizePrompt({
|
4495
|
-
system,
|
4496
|
-
prompt,
|
4497
|
-
messages
|
4498
|
-
});
|
4499
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
4500
|
-
prompt: standardizedPrompt,
|
4501
|
-
supportedUrls: await model.supportedUrls
|
4502
|
-
});
|
4503
|
-
const generateResult = await retry(
|
4504
|
-
() => recordSpan({
|
4505
|
-
name: "ai.generateObject.doGenerate",
|
4506
|
-
attributes: selectTelemetryAttributes({
|
4507
|
-
telemetry,
|
4508
|
-
attributes: {
|
4509
|
-
...assembleOperationName({
|
4510
|
-
operationId: "ai.generateObject.doGenerate",
|
4511
|
-
telemetry
|
4512
|
-
}),
|
4513
|
-
...baseTelemetryAttributes,
|
4514
|
-
"ai.prompt.messages": {
|
4515
|
-
input: () => stringifyForTelemetry(promptMessages)
|
4516
|
-
},
|
4517
|
-
// standardized gen-ai llm span attributes:
|
4518
|
-
"gen_ai.system": model.provider,
|
4519
|
-
"gen_ai.request.model": model.modelId,
|
4520
|
-
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4521
|
-
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4522
|
-
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4523
|
-
"gen_ai.request.temperature": callSettings.temperature,
|
4524
|
-
"gen_ai.request.top_k": callSettings.topK,
|
4525
|
-
"gen_ai.request.top_p": callSettings.topP
|
4526
|
-
}
|
4402
|
+
try {
|
4403
|
+
return await recordSpan({
|
4404
|
+
name: "ai.generateObject",
|
4405
|
+
attributes: selectTelemetryAttributes({
|
4406
|
+
telemetry,
|
4407
|
+
attributes: {
|
4408
|
+
...assembleOperationName({
|
4409
|
+
operationId: "ai.generateObject",
|
4410
|
+
telemetry
|
4527
4411
|
}),
|
4528
|
-
|
4529
|
-
|
4530
|
-
|
4531
|
-
|
4532
|
-
|
4533
|
-
|
4534
|
-
|
4535
|
-
|
4536
|
-
|
4537
|
-
|
4538
|
-
|
4539
|
-
|
4540
|
-
|
4541
|
-
|
4542
|
-
|
4543
|
-
|
4544
|
-
|
4545
|
-
|
4546
|
-
|
4547
|
-
|
4548
|
-
|
4549
|
-
|
4550
|
-
|
4551
|
-
|
4552
|
-
|
4553
|
-
|
4554
|
-
|
4555
|
-
|
4556
|
-
|
4557
|
-
|
4412
|
+
...baseTelemetryAttributes,
|
4413
|
+
// specific settings that only make sense on the outer level:
|
4414
|
+
"ai.prompt": {
|
4415
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
4416
|
+
},
|
4417
|
+
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4418
|
+
"ai.schema.name": schemaName,
|
4419
|
+
"ai.schema.description": schemaDescription,
|
4420
|
+
"ai.settings.output": outputStrategy.type
|
4421
|
+
}
|
4422
|
+
}),
|
4423
|
+
tracer,
|
4424
|
+
fn: async (span) => {
|
4425
|
+
var _a17;
|
4426
|
+
let result;
|
4427
|
+
let finishReason;
|
4428
|
+
let usage;
|
4429
|
+
let warnings;
|
4430
|
+
let response;
|
4431
|
+
let request;
|
4432
|
+
let resultProviderMetadata;
|
4433
|
+
const standardizedPrompt = await standardizePrompt({
|
4434
|
+
system,
|
4435
|
+
prompt,
|
4436
|
+
messages
|
4437
|
+
});
|
4438
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
4439
|
+
prompt: standardizedPrompt,
|
4440
|
+
supportedUrls: await model.supportedUrls
|
4441
|
+
});
|
4442
|
+
const generateResult = await retry(
|
4443
|
+
() => recordSpan({
|
4444
|
+
name: "ai.generateObject.doGenerate",
|
4445
|
+
attributes: selectTelemetryAttributes({
|
4446
|
+
telemetry,
|
4447
|
+
attributes: {
|
4448
|
+
...assembleOperationName({
|
4449
|
+
operationId: "ai.generateObject.doGenerate",
|
4450
|
+
telemetry
|
4451
|
+
}),
|
4452
|
+
...baseTelemetryAttributes,
|
4453
|
+
"ai.prompt.messages": {
|
4454
|
+
input: () => stringifyForTelemetry(promptMessages)
|
4455
|
+
},
|
4456
|
+
// standardized gen-ai llm span attributes:
|
4457
|
+
"gen_ai.system": model.provider,
|
4458
|
+
"gen_ai.request.model": model.modelId,
|
4459
|
+
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4460
|
+
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4461
|
+
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4462
|
+
"gen_ai.request.temperature": callSettings.temperature,
|
4463
|
+
"gen_ai.request.top_k": callSettings.topK,
|
4464
|
+
"gen_ai.request.top_p": callSettings.topP
|
4465
|
+
}
|
4466
|
+
}),
|
4467
|
+
tracer,
|
4468
|
+
fn: async (span2) => {
|
4469
|
+
var _a18, _b, _c, _d, _e, _f, _g, _h;
|
4470
|
+
const result2 = await model.doGenerate({
|
4471
|
+
responseFormat: {
|
4472
|
+
type: "json",
|
4473
|
+
schema: outputStrategy.jsonSchema,
|
4474
|
+
name: schemaName,
|
4475
|
+
description: schemaDescription
|
4476
|
+
},
|
4477
|
+
...prepareCallSettings(settings),
|
4478
|
+
prompt: promptMessages,
|
4479
|
+
providerOptions,
|
4480
|
+
abortSignal,
|
4481
|
+
headers
|
4558
4482
|
});
|
4483
|
+
const responseData = {
|
4484
|
+
id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
|
4485
|
+
timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
|
4486
|
+
modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
4487
|
+
headers: (_g = result2.response) == null ? void 0 : _g.headers,
|
4488
|
+
body: (_h = result2.response) == null ? void 0 : _h.body
|
4489
|
+
};
|
4490
|
+
const text2 = extractContentText(result2.content);
|
4491
|
+
if (text2 === void 0) {
|
4492
|
+
throw new NoObjectGeneratedError({
|
4493
|
+
message: "No object generated: the model did not return a response.",
|
4494
|
+
response: responseData,
|
4495
|
+
usage: result2.usage,
|
4496
|
+
finishReason: result2.finishReason
|
4497
|
+
});
|
4498
|
+
}
|
4499
|
+
span2.setAttributes(
|
4500
|
+
selectTelemetryAttributes({
|
4501
|
+
telemetry,
|
4502
|
+
attributes: {
|
4503
|
+
"ai.response.finishReason": result2.finishReason,
|
4504
|
+
"ai.response.object": { output: () => text2 },
|
4505
|
+
"ai.response.id": responseData.id,
|
4506
|
+
"ai.response.model": responseData.modelId,
|
4507
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
4508
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4509
|
+
"ai.usage.promptTokens": result2.usage.inputTokens,
|
4510
|
+
"ai.usage.completionTokens": result2.usage.outputTokens,
|
4511
|
+
// standardized gen-ai llm span attributes:
|
4512
|
+
"gen_ai.response.finish_reasons": [result2.finishReason],
|
4513
|
+
"gen_ai.response.id": responseData.id,
|
4514
|
+
"gen_ai.response.model": responseData.modelId,
|
4515
|
+
"gen_ai.usage.input_tokens": result2.usage.inputTokens,
|
4516
|
+
"gen_ai.usage.output_tokens": result2.usage.outputTokens
|
4517
|
+
}
|
4518
|
+
})
|
4519
|
+
);
|
4520
|
+
return { ...result2, objectText: text2, responseData };
|
4559
4521
|
}
|
4560
|
-
|
4561
|
-
|
4562
|
-
|
4563
|
-
|
4564
|
-
|
4565
|
-
|
4566
|
-
|
4567
|
-
|
4568
|
-
|
4569
|
-
|
4570
|
-
|
4571
|
-
|
4572
|
-
|
4573
|
-
|
4574
|
-
|
4575
|
-
|
4576
|
-
|
4577
|
-
|
4578
|
-
|
4579
|
-
|
4580
|
-
);
|
4581
|
-
return { ...result2, objectText: text2, responseData };
|
4522
|
+
})
|
4523
|
+
);
|
4524
|
+
result = generateResult.objectText;
|
4525
|
+
finishReason = generateResult.finishReason;
|
4526
|
+
usage = generateResult.usage;
|
4527
|
+
warnings = generateResult.warnings;
|
4528
|
+
resultProviderMetadata = generateResult.providerMetadata;
|
4529
|
+
request = (_a17 = generateResult.request) != null ? _a17 : {};
|
4530
|
+
response = generateResult.responseData;
|
4531
|
+
async function processResult(result2) {
|
4532
|
+
const parseResult = await (0, import_provider_utils14.safeParseJSON)({ text: result2 });
|
4533
|
+
if (!parseResult.success) {
|
4534
|
+
throw new NoObjectGeneratedError({
|
4535
|
+
message: "No object generated: could not parse the response.",
|
4536
|
+
cause: parseResult.error,
|
4537
|
+
text: result2,
|
4538
|
+
response,
|
4539
|
+
usage,
|
4540
|
+
finishReason
|
4541
|
+
});
|
4582
4542
|
}
|
4583
|
-
|
4584
|
-
|
4585
|
-
|
4586
|
-
|
4587
|
-
|
4588
|
-
|
4589
|
-
|
4590
|
-
|
4591
|
-
|
4592
|
-
|
4593
|
-
|
4594
|
-
|
4595
|
-
|
4596
|
-
|
4597
|
-
|
4598
|
-
|
4599
|
-
|
4600
|
-
usage,
|
4601
|
-
finishReason
|
4602
|
-
});
|
4603
|
-
}
|
4604
|
-
const validationResult = await outputStrategy.validateFinalResult(
|
4605
|
-
parseResult.value,
|
4606
|
-
{
|
4607
|
-
text: result2,
|
4608
|
-
response,
|
4609
|
-
usage
|
4543
|
+
const validationResult = await outputStrategy.validateFinalResult(
|
4544
|
+
parseResult.value,
|
4545
|
+
{
|
4546
|
+
text: result2,
|
4547
|
+
response,
|
4548
|
+
usage
|
4549
|
+
}
|
4550
|
+
);
|
4551
|
+
if (!validationResult.success) {
|
4552
|
+
throw new NoObjectGeneratedError({
|
4553
|
+
message: "No object generated: response did not match schema.",
|
4554
|
+
cause: validationResult.error,
|
4555
|
+
text: result2,
|
4556
|
+
response,
|
4557
|
+
usage,
|
4558
|
+
finishReason
|
4559
|
+
});
|
4610
4560
|
}
|
4611
|
-
|
4612
|
-
if (!validationResult.success) {
|
4613
|
-
throw new NoObjectGeneratedError({
|
4614
|
-
message: "No object generated: response did not match schema.",
|
4615
|
-
cause: validationResult.error,
|
4616
|
-
text: result2,
|
4617
|
-
response,
|
4618
|
-
usage,
|
4619
|
-
finishReason
|
4620
|
-
});
|
4561
|
+
return validationResult.value;
|
4621
4562
|
}
|
4622
|
-
|
4623
|
-
|
4624
|
-
|
4625
|
-
|
4626
|
-
|
4627
|
-
|
4628
|
-
|
4629
|
-
|
4630
|
-
|
4631
|
-
|
4632
|
-
|
4633
|
-
|
4563
|
+
let object2;
|
4564
|
+
try {
|
4565
|
+
object2 = await processResult(result);
|
4566
|
+
} catch (error) {
|
4567
|
+
if (repairText != null && NoObjectGeneratedError.isInstance(error) && (import_provider22.JSONParseError.isInstance(error.cause) || import_provider22.TypeValidationError.isInstance(error.cause))) {
|
4568
|
+
const repairedText = await repairText({
|
4569
|
+
text: result,
|
4570
|
+
error: error.cause
|
4571
|
+
});
|
4572
|
+
if (repairedText === null) {
|
4573
|
+
throw error;
|
4574
|
+
}
|
4575
|
+
object2 = await processResult(repairedText);
|
4576
|
+
} else {
|
4634
4577
|
throw error;
|
4635
4578
|
}
|
4636
|
-
object2 = await processResult(repairedText);
|
4637
|
-
} else {
|
4638
|
-
throw error;
|
4639
4579
|
}
|
4580
|
+
span.setAttributes(
|
4581
|
+
selectTelemetryAttributes({
|
4582
|
+
telemetry,
|
4583
|
+
attributes: {
|
4584
|
+
"ai.response.finishReason": finishReason,
|
4585
|
+
"ai.response.object": {
|
4586
|
+
output: () => JSON.stringify(object2)
|
4587
|
+
},
|
4588
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4589
|
+
"ai.usage.promptTokens": usage.inputTokens,
|
4590
|
+
"ai.usage.completionTokens": usage.outputTokens
|
4591
|
+
}
|
4592
|
+
})
|
4593
|
+
);
|
4594
|
+
return new DefaultGenerateObjectResult({
|
4595
|
+
object: object2,
|
4596
|
+
finishReason,
|
4597
|
+
usage,
|
4598
|
+
warnings,
|
4599
|
+
request,
|
4600
|
+
response,
|
4601
|
+
providerMetadata: resultProviderMetadata
|
4602
|
+
});
|
4640
4603
|
}
|
4641
|
-
|
4642
|
-
|
4643
|
-
|
4644
|
-
|
4645
|
-
"ai.response.finishReason": finishReason,
|
4646
|
-
"ai.response.object": {
|
4647
|
-
output: () => JSON.stringify(object2)
|
4648
|
-
},
|
4649
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4650
|
-
"ai.usage.promptTokens": usage.inputTokens,
|
4651
|
-
"ai.usage.completionTokens": usage.outputTokens
|
4652
|
-
}
|
4653
|
-
})
|
4654
|
-
);
|
4655
|
-
return new DefaultGenerateObjectResult({
|
4656
|
-
object: object2,
|
4657
|
-
finishReason,
|
4658
|
-
usage,
|
4659
|
-
warnings,
|
4660
|
-
request,
|
4661
|
-
response,
|
4662
|
-
providerMetadata: resultProviderMetadata
|
4663
|
-
});
|
4664
|
-
}
|
4665
|
-
});
|
4604
|
+
});
|
4605
|
+
} catch (error) {
|
4606
|
+
throw wrapGatewayError(error);
|
4607
|
+
}
|
4666
4608
|
}
|
4667
4609
|
var DefaultGenerateObjectResult = class {
|
4668
4610
|
constructor(options) {
|
@@ -4686,7 +4628,7 @@ var DefaultGenerateObjectResult = class {
|
|
4686
4628
|
};
|
4687
4629
|
|
4688
4630
|
// core/generate-object/stream-object.ts
|
4689
|
-
var
|
4631
|
+
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
4690
4632
|
|
4691
4633
|
// src/util/create-resolvable-promise.ts
|
4692
4634
|
function createResolvablePromise() {
|
@@ -4830,7 +4772,7 @@ function now() {
|
|
4830
4772
|
}
|
4831
4773
|
|
4832
4774
|
// core/generate-object/stream-object.ts
|
4833
|
-
var originalGenerateId2 = (0,
|
4775
|
+
var originalGenerateId2 = (0, import_provider_utils15.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
4834
4776
|
function streamObject(options) {
|
4835
4777
|
const {
|
4836
4778
|
model,
|
@@ -4843,7 +4785,9 @@ function streamObject(options) {
|
|
4843
4785
|
headers,
|
4844
4786
|
experimental_telemetry: telemetry,
|
4845
4787
|
providerOptions,
|
4846
|
-
onError
|
4788
|
+
onError = ({ error }) => {
|
4789
|
+
console.error(error);
|
4790
|
+
},
|
4847
4791
|
onFinish,
|
4848
4792
|
_internal: {
|
4849
4793
|
generateId: generateId3 = originalGenerateId2,
|
@@ -4936,7 +4880,7 @@ var DefaultStreamObjectResult = class {
|
|
4936
4880
|
transform(chunk, controller) {
|
4937
4881
|
controller.enqueue(chunk);
|
4938
4882
|
if (chunk.type === "error") {
|
4939
|
-
onError
|
4883
|
+
onError({ error: wrapGatewayError(chunk.error) });
|
4940
4884
|
}
|
4941
4885
|
}
|
4942
4886
|
});
|
@@ -5336,8 +5280,8 @@ var DefaultStreamObjectResult = class {
|
|
5336
5280
|
};
|
5337
5281
|
|
5338
5282
|
// src/error/no-speech-generated-error.ts
|
5339
|
-
var
|
5340
|
-
var NoSpeechGeneratedError = class extends
|
5283
|
+
var import_provider23 = require("@ai-sdk/provider");
|
5284
|
+
var NoSpeechGeneratedError = class extends import_provider23.AISDKError {
|
5341
5285
|
constructor(options) {
|
5342
5286
|
super({
|
5343
5287
|
name: "AI_NoSpeechGeneratedError",
|
@@ -5426,7 +5370,7 @@ var DefaultSpeechResult = class {
|
|
5426
5370
|
};
|
5427
5371
|
|
5428
5372
|
// core/generate-text/generate-text.ts
|
5429
|
-
var
|
5373
|
+
var import_provider_utils18 = require("@ai-sdk/provider-utils");
|
5430
5374
|
|
5431
5375
|
// src/util/as-array.ts
|
5432
5376
|
function asArray(value) {
|
@@ -5434,7 +5378,7 @@ function asArray(value) {
|
|
5434
5378
|
}
|
5435
5379
|
|
5436
5380
|
// core/prompt/prepare-tools-and-tool-choice.ts
|
5437
|
-
var
|
5381
|
+
var import_provider_utils16 = require("@ai-sdk/provider-utils");
|
5438
5382
|
|
5439
5383
|
// src/util/is-non-empty-object.ts
|
5440
5384
|
function isNonEmptyObject(object2) {
|
@@ -5466,7 +5410,7 @@ function prepareToolsAndToolChoice({
|
|
5466
5410
|
type: "function",
|
5467
5411
|
name: name17,
|
5468
5412
|
description: tool2.description,
|
5469
|
-
parameters: (0,
|
5413
|
+
parameters: (0, import_provider_utils16.asSchema)(tool2.parameters).jsonSchema
|
5470
5414
|
};
|
5471
5415
|
case "provider-defined":
|
5472
5416
|
return {
|
@@ -5536,7 +5480,7 @@ function asContent({
|
|
5536
5480
|
}
|
5537
5481
|
|
5538
5482
|
// core/generate-text/parse-tool-call.ts
|
5539
|
-
var
|
5483
|
+
var import_provider_utils17 = require("@ai-sdk/provider-utils");
|
5540
5484
|
async function parseToolCall({
|
5541
5485
|
toolCall,
|
5542
5486
|
tools,
|
@@ -5560,7 +5504,7 @@ async function parseToolCall({
|
|
5560
5504
|
tools,
|
5561
5505
|
parameterSchema: ({ toolName }) => {
|
5562
5506
|
const { parameters } = tools[toolName];
|
5563
|
-
return (0,
|
5507
|
+
return (0, import_provider_utils17.asSchema)(parameters).jsonSchema;
|
5564
5508
|
},
|
5565
5509
|
system,
|
5566
5510
|
messages,
|
@@ -5590,8 +5534,8 @@ async function doParseToolCall({
|
|
5590
5534
|
availableTools: Object.keys(tools)
|
5591
5535
|
});
|
5592
5536
|
}
|
5593
|
-
const schema = (0,
|
5594
|
-
const parseResult = toolCall.args.trim() === "" ? await (0,
|
5537
|
+
const schema = (0, import_provider_utils17.asSchema)(tool2.parameters);
|
5538
|
+
const parseResult = toolCall.args.trim() === "" ? await (0, import_provider_utils17.safeValidateTypes)({ value: {}, schema }) : await (0, import_provider_utils17.safeParseJSON)({ text: toolCall.args, schema });
|
5595
5539
|
if (parseResult.success === false) {
|
5596
5540
|
throw new InvalidToolArgumentsError({
|
5597
5541
|
toolName,
|
@@ -5727,7 +5671,7 @@ function toResponseMessages({
|
|
5727
5671
|
}
|
5728
5672
|
|
5729
5673
|
// core/generate-text/generate-text.ts
|
5730
|
-
var originalGenerateId3 = (0,
|
5674
|
+
var originalGenerateId3 = (0, import_provider_utils18.createIdGenerator)({
|
5731
5675
|
prefix: "aitxt",
|
5732
5676
|
size: 24
|
5733
5677
|
});
|
@@ -5773,239 +5717,243 @@ async function generateText({
|
|
5773
5717
|
messages
|
5774
5718
|
});
|
5775
5719
|
const tracer = getTracer(telemetry);
|
5776
|
-
|
5777
|
-
|
5778
|
-
|
5779
|
-
|
5780
|
-
|
5781
|
-
|
5782
|
-
|
5783
|
-
|
5784
|
-
|
5785
|
-
|
5786
|
-
|
5787
|
-
|
5788
|
-
|
5789
|
-
|
5790
|
-
|
5791
|
-
|
5792
|
-
|
5793
|
-
}
|
5794
|
-
}),
|
5795
|
-
tracer,
|
5796
|
-
fn: async (span) => {
|
5797
|
-
var _a17, _b, _c, _d, _e;
|
5798
|
-
const callSettings2 = prepareCallSettings(settings);
|
5799
|
-
let currentModelResponse;
|
5800
|
-
let currentToolCalls = [];
|
5801
|
-
let currentToolResults = [];
|
5802
|
-
const responseMessages = [];
|
5803
|
-
const steps = [];
|
5804
|
-
do {
|
5805
|
-
const stepInputMessages = [
|
5806
|
-
...initialPrompt.messages,
|
5807
|
-
...responseMessages
|
5808
|
-
];
|
5809
|
-
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5810
|
-
model,
|
5811
|
-
steps,
|
5812
|
-
stepNumber: steps.length
|
5813
|
-
}));
|
5814
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
5815
|
-
prompt: {
|
5816
|
-
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
5817
|
-
messages: stepInputMessages
|
5818
|
-
},
|
5819
|
-
supportedUrls: await model.supportedUrls
|
5820
|
-
});
|
5821
|
-
const stepModel = resolveLanguageModel(
|
5822
|
-
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
5823
|
-
);
|
5824
|
-
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5825
|
-
tools,
|
5826
|
-
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
5827
|
-
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
5828
|
-
});
|
5829
|
-
currentModelResponse = await retry(
|
5830
|
-
() => {
|
5831
|
-
var _a18;
|
5832
|
-
return recordSpan({
|
5833
|
-
name: "ai.generateText.doGenerate",
|
5834
|
-
attributes: selectTelemetryAttributes({
|
5835
|
-
telemetry,
|
5836
|
-
attributes: {
|
5837
|
-
...assembleOperationName({
|
5838
|
-
operationId: "ai.generateText.doGenerate",
|
5839
|
-
telemetry
|
5840
|
-
}),
|
5841
|
-
...baseTelemetryAttributes,
|
5842
|
-
// model:
|
5843
|
-
"ai.model.provider": stepModel.provider,
|
5844
|
-
"ai.model.id": stepModel.modelId,
|
5845
|
-
// prompt:
|
5846
|
-
"ai.prompt.messages": {
|
5847
|
-
input: () => stringifyForTelemetry(promptMessages)
|
5848
|
-
},
|
5849
|
-
"ai.prompt.tools": {
|
5850
|
-
// convert the language model level tools:
|
5851
|
-
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5852
|
-
},
|
5853
|
-
"ai.prompt.toolChoice": {
|
5854
|
-
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5855
|
-
},
|
5856
|
-
// standardized gen-ai llm span attributes:
|
5857
|
-
"gen_ai.system": stepModel.provider,
|
5858
|
-
"gen_ai.request.model": stepModel.modelId,
|
5859
|
-
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5860
|
-
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5861
|
-
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5862
|
-
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5863
|
-
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5864
|
-
"gen_ai.request.top_k": settings.topK,
|
5865
|
-
"gen_ai.request.top_p": settings.topP
|
5866
|
-
}
|
5867
|
-
}),
|
5868
|
-
tracer,
|
5869
|
-
fn: async (span2) => {
|
5870
|
-
var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
|
5871
|
-
const result = await stepModel.doGenerate({
|
5872
|
-
...callSettings2,
|
5873
|
-
tools: stepTools,
|
5874
|
-
toolChoice: stepToolChoice,
|
5875
|
-
responseFormat: output == null ? void 0 : output.responseFormat,
|
5876
|
-
prompt: promptMessages,
|
5877
|
-
providerOptions,
|
5878
|
-
abortSignal,
|
5879
|
-
headers
|
5880
|
-
});
|
5881
|
-
const responseData = {
|
5882
|
-
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5883
|
-
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5884
|
-
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
|
5885
|
-
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5886
|
-
body: (_h = result.response) == null ? void 0 : _h.body
|
5887
|
-
};
|
5888
|
-
span2.setAttributes(
|
5889
|
-
selectTelemetryAttributes({
|
5890
|
-
telemetry,
|
5891
|
-
attributes: {
|
5892
|
-
"ai.response.finishReason": result.finishReason,
|
5893
|
-
"ai.response.text": {
|
5894
|
-
output: () => extractContentText(result.content)
|
5895
|
-
},
|
5896
|
-
"ai.response.toolCalls": {
|
5897
|
-
output: () => {
|
5898
|
-
const toolCalls = asToolCalls(result.content);
|
5899
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5900
|
-
}
|
5901
|
-
},
|
5902
|
-
"ai.response.id": responseData.id,
|
5903
|
-
"ai.response.model": responseData.modelId,
|
5904
|
-
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5905
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5906
|
-
"ai.usage.promptTokens": result.usage.inputTokens,
|
5907
|
-
"ai.usage.completionTokens": result.usage.outputTokens,
|
5908
|
-
// standardized gen-ai llm span attributes:
|
5909
|
-
"gen_ai.response.finish_reasons": [result.finishReason],
|
5910
|
-
"gen_ai.response.id": responseData.id,
|
5911
|
-
"gen_ai.response.model": responseData.modelId,
|
5912
|
-
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5913
|
-
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5914
|
-
}
|
5915
|
-
})
|
5916
|
-
);
|
5917
|
-
return { ...result, response: responseData };
|
5918
|
-
}
|
5919
|
-
});
|
5720
|
+
try {
|
5721
|
+
return await recordSpan({
|
5722
|
+
name: "ai.generateText",
|
5723
|
+
attributes: selectTelemetryAttributes({
|
5724
|
+
telemetry,
|
5725
|
+
attributes: {
|
5726
|
+
...assembleOperationName({
|
5727
|
+
operationId: "ai.generateText",
|
5728
|
+
telemetry
|
5729
|
+
}),
|
5730
|
+
...baseTelemetryAttributes,
|
5731
|
+
// model:
|
5732
|
+
"ai.model.provider": model.provider,
|
5733
|
+
"ai.model.id": model.modelId,
|
5734
|
+
// specific settings that only make sense on the outer level:
|
5735
|
+
"ai.prompt": {
|
5736
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
5920
5737
|
}
|
5921
|
-
|
5922
|
-
|
5923
|
-
|
5924
|
-
|
5925
|
-
|
5926
|
-
|
5927
|
-
|
5928
|
-
|
5929
|
-
|
5930
|
-
|
5738
|
+
}
|
5739
|
+
}),
|
5740
|
+
tracer,
|
5741
|
+
fn: async (span) => {
|
5742
|
+
var _a17, _b, _c, _d, _e;
|
5743
|
+
const callSettings2 = prepareCallSettings(settings);
|
5744
|
+
let currentModelResponse;
|
5745
|
+
let currentToolCalls = [];
|
5746
|
+
let currentToolResults = [];
|
5747
|
+
const responseMessages = [];
|
5748
|
+
const steps = [];
|
5749
|
+
do {
|
5750
|
+
const stepInputMessages = [
|
5751
|
+
...initialPrompt.messages,
|
5752
|
+
...responseMessages
|
5753
|
+
];
|
5754
|
+
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5755
|
+
model,
|
5756
|
+
steps,
|
5757
|
+
stepNumber: steps.length
|
5758
|
+
}));
|
5759
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
5760
|
+
prompt: {
|
5761
|
+
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
5931
5762
|
messages: stepInputMessages
|
5763
|
+
},
|
5764
|
+
supportedUrls: await model.supportedUrls
|
5765
|
+
});
|
5766
|
+
const stepModel = resolveLanguageModel(
|
5767
|
+
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
5768
|
+
);
|
5769
|
+
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5770
|
+
tools,
|
5771
|
+
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
5772
|
+
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
5773
|
+
});
|
5774
|
+
currentModelResponse = await retry(
|
5775
|
+
() => {
|
5776
|
+
var _a18;
|
5777
|
+
return recordSpan({
|
5778
|
+
name: "ai.generateText.doGenerate",
|
5779
|
+
attributes: selectTelemetryAttributes({
|
5780
|
+
telemetry,
|
5781
|
+
attributes: {
|
5782
|
+
...assembleOperationName({
|
5783
|
+
operationId: "ai.generateText.doGenerate",
|
5784
|
+
telemetry
|
5785
|
+
}),
|
5786
|
+
...baseTelemetryAttributes,
|
5787
|
+
// model:
|
5788
|
+
"ai.model.provider": stepModel.provider,
|
5789
|
+
"ai.model.id": stepModel.modelId,
|
5790
|
+
// prompt:
|
5791
|
+
"ai.prompt.messages": {
|
5792
|
+
input: () => stringifyForTelemetry(promptMessages)
|
5793
|
+
},
|
5794
|
+
"ai.prompt.tools": {
|
5795
|
+
// convert the language model level tools:
|
5796
|
+
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5797
|
+
},
|
5798
|
+
"ai.prompt.toolChoice": {
|
5799
|
+
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5800
|
+
},
|
5801
|
+
// standardized gen-ai llm span attributes:
|
5802
|
+
"gen_ai.system": stepModel.provider,
|
5803
|
+
"gen_ai.request.model": stepModel.modelId,
|
5804
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5805
|
+
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5806
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5807
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5808
|
+
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5809
|
+
"gen_ai.request.top_k": settings.topK,
|
5810
|
+
"gen_ai.request.top_p": settings.topP
|
5811
|
+
}
|
5812
|
+
}),
|
5813
|
+
tracer,
|
5814
|
+
fn: async (span2) => {
|
5815
|
+
var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
|
5816
|
+
const result = await stepModel.doGenerate({
|
5817
|
+
...callSettings2,
|
5818
|
+
tools: stepTools,
|
5819
|
+
toolChoice: stepToolChoice,
|
5820
|
+
responseFormat: output == null ? void 0 : output.responseFormat,
|
5821
|
+
prompt: promptMessages,
|
5822
|
+
providerOptions,
|
5823
|
+
abortSignal,
|
5824
|
+
headers
|
5825
|
+
});
|
5826
|
+
const responseData = {
|
5827
|
+
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5828
|
+
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5829
|
+
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
|
5830
|
+
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5831
|
+
body: (_h = result.response) == null ? void 0 : _h.body
|
5832
|
+
};
|
5833
|
+
span2.setAttributes(
|
5834
|
+
selectTelemetryAttributes({
|
5835
|
+
telemetry,
|
5836
|
+
attributes: {
|
5837
|
+
"ai.response.finishReason": result.finishReason,
|
5838
|
+
"ai.response.text": {
|
5839
|
+
output: () => extractContentText(result.content)
|
5840
|
+
},
|
5841
|
+
"ai.response.toolCalls": {
|
5842
|
+
output: () => {
|
5843
|
+
const toolCalls = asToolCalls(result.content);
|
5844
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5845
|
+
}
|
5846
|
+
},
|
5847
|
+
"ai.response.id": responseData.id,
|
5848
|
+
"ai.response.model": responseData.modelId,
|
5849
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5850
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5851
|
+
"ai.usage.promptTokens": result.usage.inputTokens,
|
5852
|
+
"ai.usage.completionTokens": result.usage.outputTokens,
|
5853
|
+
// standardized gen-ai llm span attributes:
|
5854
|
+
"gen_ai.response.finish_reasons": [result.finishReason],
|
5855
|
+
"gen_ai.response.id": responseData.id,
|
5856
|
+
"gen_ai.response.model": responseData.modelId,
|
5857
|
+
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5858
|
+
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5859
|
+
}
|
5860
|
+
})
|
5861
|
+
);
|
5862
|
+
return { ...result, response: responseData };
|
5863
|
+
}
|
5864
|
+
});
|
5865
|
+
}
|
5866
|
+
);
|
5867
|
+
currentToolCalls = await Promise.all(
|
5868
|
+
currentModelResponse.content.filter(
|
5869
|
+
(part) => part.type === "tool-call"
|
5870
|
+
).map(
|
5871
|
+
(toolCall) => parseToolCall({
|
5872
|
+
toolCall,
|
5873
|
+
tools,
|
5874
|
+
repairToolCall,
|
5875
|
+
system,
|
5876
|
+
messages: stepInputMessages
|
5877
|
+
})
|
5878
|
+
)
|
5879
|
+
);
|
5880
|
+
currentToolResults = tools == null ? [] : await executeTools({
|
5881
|
+
toolCalls: currentToolCalls,
|
5882
|
+
tools,
|
5883
|
+
tracer,
|
5884
|
+
telemetry,
|
5885
|
+
messages: stepInputMessages,
|
5886
|
+
abortSignal
|
5887
|
+
});
|
5888
|
+
const stepContent = asContent({
|
5889
|
+
content: currentModelResponse.content,
|
5890
|
+
toolCalls: currentToolCalls,
|
5891
|
+
toolResults: currentToolResults
|
5892
|
+
});
|
5893
|
+
responseMessages.push(
|
5894
|
+
...toResponseMessages({
|
5895
|
+
content: stepContent,
|
5896
|
+
tools: tools != null ? tools : {}
|
5932
5897
|
})
|
5933
|
-
)
|
5934
|
-
|
5935
|
-
currentToolResults = tools == null ? [] : await executeTools({
|
5936
|
-
toolCalls: currentToolCalls,
|
5937
|
-
tools,
|
5938
|
-
tracer,
|
5939
|
-
telemetry,
|
5940
|
-
messages: stepInputMessages,
|
5941
|
-
abortSignal
|
5942
|
-
});
|
5943
|
-
const stepContent = asContent({
|
5944
|
-
content: currentModelResponse.content,
|
5945
|
-
toolCalls: currentToolCalls,
|
5946
|
-
toolResults: currentToolResults
|
5947
|
-
});
|
5948
|
-
responseMessages.push(
|
5949
|
-
...toResponseMessages({
|
5898
|
+
);
|
5899
|
+
const currentStepResult = new DefaultStepResult({
|
5950
5900
|
content: stepContent,
|
5951
|
-
|
5901
|
+
finishReason: currentModelResponse.finishReason,
|
5902
|
+
usage: currentModelResponse.usage,
|
5903
|
+
warnings: currentModelResponse.warnings,
|
5904
|
+
providerMetadata: currentModelResponse.providerMetadata,
|
5905
|
+
request: (_e = currentModelResponse.request) != null ? _e : {},
|
5906
|
+
response: {
|
5907
|
+
...currentModelResponse.response,
|
5908
|
+
// deep clone msgs to avoid mutating past messages in multi-step:
|
5909
|
+
messages: structuredClone(responseMessages)
|
5910
|
+
}
|
5911
|
+
});
|
5912
|
+
steps.push(currentStepResult);
|
5913
|
+
await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
|
5914
|
+
} while (
|
5915
|
+
// there are tool calls:
|
5916
|
+
currentToolCalls.length > 0 && // all current tool calls have results:
|
5917
|
+
currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
|
5918
|
+
!await isStopConditionMet({ stopConditions, steps })
|
5919
|
+
);
|
5920
|
+
span.setAttributes(
|
5921
|
+
selectTelemetryAttributes({
|
5922
|
+
telemetry,
|
5923
|
+
attributes: {
|
5924
|
+
"ai.response.finishReason": currentModelResponse.finishReason,
|
5925
|
+
"ai.response.text": {
|
5926
|
+
output: () => extractContentText(currentModelResponse.content)
|
5927
|
+
},
|
5928
|
+
"ai.response.toolCalls": {
|
5929
|
+
output: () => {
|
5930
|
+
const toolCalls = asToolCalls(currentModelResponse.content);
|
5931
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5932
|
+
}
|
5933
|
+
},
|
5934
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5935
|
+
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
5936
|
+
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
5937
|
+
}
|
5952
5938
|
})
|
5953
5939
|
);
|
5954
|
-
const
|
5955
|
-
|
5956
|
-
|
5957
|
-
|
5958
|
-
|
5959
|
-
|
5960
|
-
|
5961
|
-
|
5962
|
-
|
5963
|
-
|
5964
|
-
|
5965
|
-
}
|
5940
|
+
const lastStep = steps[steps.length - 1];
|
5941
|
+
return new DefaultGenerateTextResult({
|
5942
|
+
steps,
|
5943
|
+
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
5944
|
+
{ text: lastStep.text },
|
5945
|
+
{
|
5946
|
+
response: lastStep.response,
|
5947
|
+
usage: lastStep.usage,
|
5948
|
+
finishReason: lastStep.finishReason
|
5949
|
+
}
|
5950
|
+
))
|
5966
5951
|
});
|
5967
|
-
|
5968
|
-
|
5969
|
-
|
5970
|
-
|
5971
|
-
|
5972
|
-
currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
|
5973
|
-
!await isStopConditionMet({ stopConditions, steps })
|
5974
|
-
);
|
5975
|
-
span.setAttributes(
|
5976
|
-
selectTelemetryAttributes({
|
5977
|
-
telemetry,
|
5978
|
-
attributes: {
|
5979
|
-
"ai.response.finishReason": currentModelResponse.finishReason,
|
5980
|
-
"ai.response.text": {
|
5981
|
-
output: () => extractContentText(currentModelResponse.content)
|
5982
|
-
},
|
5983
|
-
"ai.response.toolCalls": {
|
5984
|
-
output: () => {
|
5985
|
-
const toolCalls = asToolCalls(currentModelResponse.content);
|
5986
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5987
|
-
}
|
5988
|
-
},
|
5989
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5990
|
-
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
5991
|
-
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
5992
|
-
}
|
5993
|
-
})
|
5994
|
-
);
|
5995
|
-
const lastStep = steps[steps.length - 1];
|
5996
|
-
return new DefaultGenerateTextResult({
|
5997
|
-
steps,
|
5998
|
-
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
5999
|
-
{ text: lastStep.text },
|
6000
|
-
{
|
6001
|
-
response: lastStep.response,
|
6002
|
-
usage: lastStep.usage,
|
6003
|
-
finishReason: lastStep.finishReason
|
6004
|
-
}
|
6005
|
-
))
|
6006
|
-
});
|
6007
|
-
}
|
6008
|
-
});
|
5952
|
+
}
|
5953
|
+
});
|
5954
|
+
} catch (error) {
|
5955
|
+
throw wrapGatewayError(error);
|
5956
|
+
}
|
6009
5957
|
}
|
6010
5958
|
async function executeTools({
|
6011
5959
|
toolCalls,
|
@@ -6174,7 +6122,7 @@ __export(output_exports, {
|
|
6174
6122
|
object: () => object,
|
6175
6123
|
text: () => text
|
6176
6124
|
});
|
6177
|
-
var
|
6125
|
+
var import_provider_utils19 = require("@ai-sdk/provider-utils");
|
6178
6126
|
var text = () => ({
|
6179
6127
|
type: "text",
|
6180
6128
|
responseFormat: { type: "text" },
|
@@ -6188,7 +6136,7 @@ var text = () => ({
|
|
6188
6136
|
var object = ({
|
6189
6137
|
schema: inputSchema
|
6190
6138
|
}) => {
|
6191
|
-
const schema = (0,
|
6139
|
+
const schema = (0, import_provider_utils19.asSchema)(inputSchema);
|
6192
6140
|
return {
|
6193
6141
|
type: "object",
|
6194
6142
|
responseFormat: {
|
@@ -6214,7 +6162,7 @@ var object = ({
|
|
6214
6162
|
}
|
6215
6163
|
},
|
6216
6164
|
async parseOutput({ text: text2 }, context) {
|
6217
|
-
const parseResult = await (0,
|
6165
|
+
const parseResult = await (0, import_provider_utils19.safeParseJSON)({ text: text2 });
|
6218
6166
|
if (!parseResult.success) {
|
6219
6167
|
throw new NoObjectGeneratedError({
|
6220
6168
|
message: "No object generated: could not parse the response.",
|
@@ -6225,7 +6173,7 @@ var object = ({
|
|
6225
6173
|
finishReason: context.finishReason
|
6226
6174
|
});
|
6227
6175
|
}
|
6228
|
-
const validationResult = await (0,
|
6176
|
+
const validationResult = await (0, import_provider_utils19.safeValidateTypes)({
|
6229
6177
|
value: parseResult.value,
|
6230
6178
|
schema
|
6231
6179
|
});
|
@@ -6245,8 +6193,8 @@ var object = ({
|
|
6245
6193
|
};
|
6246
6194
|
|
6247
6195
|
// core/generate-text/smooth-stream.ts
|
6248
|
-
var
|
6249
|
-
var
|
6196
|
+
var import_provider_utils20 = require("@ai-sdk/provider-utils");
|
6197
|
+
var import_provider24 = require("@ai-sdk/provider");
|
6250
6198
|
var CHUNKING_REGEXPS = {
|
6251
6199
|
word: /\S+\s+/m,
|
6252
6200
|
line: /\n+/m
|
@@ -6254,7 +6202,7 @@ var CHUNKING_REGEXPS = {
|
|
6254
6202
|
function smoothStream({
|
6255
6203
|
delayInMs = 10,
|
6256
6204
|
chunking = "word",
|
6257
|
-
_internal: { delay: delay2 =
|
6205
|
+
_internal: { delay: delay2 = import_provider_utils20.delay } = {}
|
6258
6206
|
} = {}) {
|
6259
6207
|
let detectChunk;
|
6260
6208
|
if (typeof chunking === "function") {
|
@@ -6276,7 +6224,7 @@ function smoothStream({
|
|
6276
6224
|
} else {
|
6277
6225
|
const chunkingRegex = typeof chunking === "string" ? CHUNKING_REGEXPS[chunking] : chunking;
|
6278
6226
|
if (chunkingRegex == null) {
|
6279
|
-
throw new
|
6227
|
+
throw new import_provider24.InvalidArgumentError({
|
6280
6228
|
argument: "chunking",
|
6281
6229
|
message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`
|
6282
6230
|
});
|
@@ -6314,10 +6262,10 @@ function smoothStream({
|
|
6314
6262
|
}
|
6315
6263
|
|
6316
6264
|
// core/generate-text/stream-text.ts
|
6317
|
-
var
|
6265
|
+
var import_provider_utils22 = require("@ai-sdk/provider-utils");
|
6318
6266
|
|
6319
6267
|
// core/generate-text/run-tools-transformation.ts
|
6320
|
-
var
|
6268
|
+
var import_provider_utils21 = require("@ai-sdk/provider-utils");
|
6321
6269
|
function runToolsTransformation({
|
6322
6270
|
tools,
|
6323
6271
|
generatorStream,
|
@@ -6403,7 +6351,7 @@ function runToolsTransformation({
|
|
6403
6351
|
controller.enqueue(toolCall);
|
6404
6352
|
const tool2 = tools[toolCall.toolName];
|
6405
6353
|
if (tool2.execute != null) {
|
6406
|
-
const toolExecutionId = (0,
|
6354
|
+
const toolExecutionId = (0, import_provider_utils21.generateId)();
|
6407
6355
|
outstandingToolResults.add(toolExecutionId);
|
6408
6356
|
recordSpan({
|
6409
6357
|
name: "ai.toolCall",
|
@@ -6512,7 +6460,7 @@ function runToolsTransformation({
|
|
6512
6460
|
}
|
6513
6461
|
|
6514
6462
|
// core/generate-text/stream-text.ts
|
6515
|
-
var originalGenerateId4 = (0,
|
6463
|
+
var originalGenerateId4 = (0, import_provider_utils22.createIdGenerator)({
|
6516
6464
|
prefix: "aitxt",
|
6517
6465
|
size: 24
|
6518
6466
|
});
|
@@ -6538,7 +6486,9 @@ function streamText({
|
|
6538
6486
|
experimental_repairToolCall: repairToolCall,
|
6539
6487
|
experimental_transform: transform,
|
6540
6488
|
onChunk,
|
6541
|
-
onError
|
6489
|
+
onError = ({ error }) => {
|
6490
|
+
console.error(error);
|
6491
|
+
},
|
6542
6492
|
onFinish,
|
6543
6493
|
onStepFinish,
|
6544
6494
|
_internal: {
|
@@ -6677,7 +6627,7 @@ var DefaultStreamTextResult = class {
|
|
6677
6627
|
await (onChunk == null ? void 0 : onChunk({ chunk: part }));
|
6678
6628
|
}
|
6679
6629
|
if (part.type === "error") {
|
6680
|
-
await
|
6630
|
+
await onError({ error: wrapGatewayError(part.error) });
|
6681
6631
|
}
|
6682
6632
|
if (part.type === "text") {
|
6683
6633
|
const latestContent = recordedContent[recordedContent.length - 1];
|
@@ -7773,7 +7723,7 @@ var doWrap = ({
|
|
7773
7723
|
};
|
7774
7724
|
|
7775
7725
|
// core/registry/custom-provider.ts
|
7776
|
-
var
|
7726
|
+
var import_provider25 = require("@ai-sdk/provider");
|
7777
7727
|
function customProvider({
|
7778
7728
|
languageModels,
|
7779
7729
|
textEmbeddingModels,
|
@@ -7788,7 +7738,7 @@ function customProvider({
|
|
7788
7738
|
if (fallbackProvider) {
|
7789
7739
|
return fallbackProvider.languageModel(modelId);
|
7790
7740
|
}
|
7791
|
-
throw new
|
7741
|
+
throw new import_provider25.NoSuchModelError({ modelId, modelType: "languageModel" });
|
7792
7742
|
},
|
7793
7743
|
textEmbeddingModel(modelId) {
|
7794
7744
|
if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
|
@@ -7797,7 +7747,7 @@ function customProvider({
|
|
7797
7747
|
if (fallbackProvider) {
|
7798
7748
|
return fallbackProvider.textEmbeddingModel(modelId);
|
7799
7749
|
}
|
7800
|
-
throw new
|
7750
|
+
throw new import_provider25.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
|
7801
7751
|
},
|
7802
7752
|
imageModel(modelId) {
|
7803
7753
|
if (imageModels != null && modelId in imageModels) {
|
@@ -7806,19 +7756,19 @@ function customProvider({
|
|
7806
7756
|
if (fallbackProvider == null ? void 0 : fallbackProvider.imageModel) {
|
7807
7757
|
return fallbackProvider.imageModel(modelId);
|
7808
7758
|
}
|
7809
|
-
throw new
|
7759
|
+
throw new import_provider25.NoSuchModelError({ modelId, modelType: "imageModel" });
|
7810
7760
|
}
|
7811
7761
|
};
|
7812
7762
|
}
|
7813
7763
|
var experimental_customProvider = customProvider;
|
7814
7764
|
|
7815
7765
|
// core/registry/no-such-provider-error.ts
|
7816
|
-
var
|
7766
|
+
var import_provider26 = require("@ai-sdk/provider");
|
7817
7767
|
var name16 = "AI_NoSuchProviderError";
|
7818
7768
|
var marker16 = `vercel.ai.error.${name16}`;
|
7819
7769
|
var symbol16 = Symbol.for(marker16);
|
7820
7770
|
var _a16;
|
7821
|
-
var NoSuchProviderError = class extends
|
7771
|
+
var NoSuchProviderError = class extends import_provider26.NoSuchModelError {
|
7822
7772
|
constructor({
|
7823
7773
|
modelId,
|
7824
7774
|
modelType,
|
@@ -7832,13 +7782,13 @@ var NoSuchProviderError = class extends import_provider25.NoSuchModelError {
|
|
7832
7782
|
this.availableProviders = availableProviders;
|
7833
7783
|
}
|
7834
7784
|
static isInstance(error) {
|
7835
|
-
return
|
7785
|
+
return import_provider26.AISDKError.hasMarker(error, marker16);
|
7836
7786
|
}
|
7837
7787
|
};
|
7838
7788
|
_a16 = symbol16;
|
7839
7789
|
|
7840
7790
|
// core/registry/provider-registry.ts
|
7841
|
-
var
|
7791
|
+
var import_provider27 = require("@ai-sdk/provider");
|
7842
7792
|
function createProviderRegistry(providers, {
|
7843
7793
|
separator = ":"
|
7844
7794
|
} = {}) {
|
@@ -7877,7 +7827,7 @@ var DefaultProviderRegistry = class {
|
|
7877
7827
|
splitId(id, modelType) {
|
7878
7828
|
const index = id.indexOf(this.separator);
|
7879
7829
|
if (index === -1) {
|
7880
|
-
throw new
|
7830
|
+
throw new import_provider27.NoSuchModelError({
|
7881
7831
|
modelId: id,
|
7882
7832
|
modelType,
|
7883
7833
|
message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId${this.separator}modelId")`
|
@@ -7890,7 +7840,7 @@ var DefaultProviderRegistry = class {
|
|
7890
7840
|
const [providerId, modelId] = this.splitId(id, "languageModel");
|
7891
7841
|
const model = (_b = (_a17 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a17, modelId);
|
7892
7842
|
if (model == null) {
|
7893
|
-
throw new
|
7843
|
+
throw new import_provider27.NoSuchModelError({ modelId: id, modelType: "languageModel" });
|
7894
7844
|
}
|
7895
7845
|
return model;
|
7896
7846
|
}
|
@@ -7900,7 +7850,7 @@ var DefaultProviderRegistry = class {
|
|
7900
7850
|
const provider = this.getProvider(providerId);
|
7901
7851
|
const model = (_a17 = provider.textEmbeddingModel) == null ? void 0 : _a17.call(provider, modelId);
|
7902
7852
|
if (model == null) {
|
7903
|
-
throw new
|
7853
|
+
throw new import_provider27.NoSuchModelError({
|
7904
7854
|
modelId: id,
|
7905
7855
|
modelType: "textEmbeddingModel"
|
7906
7856
|
});
|
@@ -7913,14 +7863,14 @@ var DefaultProviderRegistry = class {
|
|
7913
7863
|
const provider = this.getProvider(providerId);
|
7914
7864
|
const model = (_a17 = provider.imageModel) == null ? void 0 : _a17.call(provider, modelId);
|
7915
7865
|
if (model == null) {
|
7916
|
-
throw new
|
7866
|
+
throw new import_provider27.NoSuchModelError({ modelId: id, modelType: "imageModel" });
|
7917
7867
|
}
|
7918
7868
|
return model;
|
7919
7869
|
}
|
7920
7870
|
};
|
7921
7871
|
|
7922
7872
|
// core/tool/mcp/mcp-client.ts
|
7923
|
-
var
|
7873
|
+
var import_provider_utils24 = require("@ai-sdk/provider-utils");
|
7924
7874
|
|
7925
7875
|
// core/tool/tool.ts
|
7926
7876
|
function tool(tool2) {
|
@@ -7928,7 +7878,7 @@ function tool(tool2) {
|
|
7928
7878
|
}
|
7929
7879
|
|
7930
7880
|
// core/tool/mcp/mcp-sse-transport.ts
|
7931
|
-
var
|
7881
|
+
var import_provider_utils23 = require("@ai-sdk/provider-utils");
|
7932
7882
|
|
7933
7883
|
// core/tool/mcp/json-rpc-message.ts
|
7934
7884
|
var import_zod10 = require("zod");
|
@@ -8099,7 +8049,7 @@ var SseMCPTransport = class {
|
|
8099
8049
|
(_b = this.onerror) == null ? void 0 : _b.call(this, error);
|
8100
8050
|
return reject(error);
|
8101
8051
|
}
|
8102
|
-
const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0,
|
8052
|
+
const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0, import_provider_utils23.createEventSourceParserStream)());
|
8103
8053
|
const reader = stream.getReader();
|
8104
8054
|
const processEvents = async () => {
|
8105
8055
|
var _a18, _b2, _c2;
|
@@ -8423,7 +8373,7 @@ var MCPClient = class {
|
|
8423
8373
|
if (schemas !== "automatic" && !(name17 in schemas)) {
|
8424
8374
|
continue;
|
8425
8375
|
}
|
8426
|
-
const parameters = schemas === "automatic" ? (0,
|
8376
|
+
const parameters = schemas === "automatic" ? (0, import_provider_utils24.jsonSchema)({
|
8427
8377
|
...inputSchema,
|
8428
8378
|
properties: (_a17 = inputSchema.properties) != null ? _a17 : {},
|
8429
8379
|
additionalProperties: false
|
@@ -8487,8 +8437,8 @@ var MCPClient = class {
|
|
8487
8437
|
};
|
8488
8438
|
|
8489
8439
|
// src/error/no-transcript-generated-error.ts
|
8490
|
-
var
|
8491
|
-
var NoTranscriptGeneratedError = class extends
|
8440
|
+
var import_provider28 = require("@ai-sdk/provider");
|
8441
|
+
var NoTranscriptGeneratedError = class extends import_provider28.AISDKError {
|
8492
8442
|
constructor(options) {
|
8493
8443
|
super({
|
8494
8444
|
name: "AI_NoTranscriptGeneratedError",
|
@@ -8553,10 +8503,11 @@ var DefaultTranscriptionResult = class {
|
|
8553
8503
|
0 && (module.exports = {
|
8554
8504
|
AISDKError,
|
8555
8505
|
APICallError,
|
8556
|
-
|
8506
|
+
AbstractChat,
|
8557
8507
|
DefaultChatTransport,
|
8558
8508
|
DownloadError,
|
8559
8509
|
EmptyResponseBodyError,
|
8510
|
+
GLOBAL_DEFAULT_PROVIDER,
|
8560
8511
|
InvalidArgumentError,
|
8561
8512
|
InvalidDataContentError,
|
8562
8513
|
InvalidMessageRoleError,
|
@@ -8602,7 +8553,6 @@ var DefaultTranscriptionResult = class {
|
|
8602
8553
|
createUIMessageStream,
|
8603
8554
|
createUIMessageStreamResponse,
|
8604
8555
|
customProvider,
|
8605
|
-
defaultChatStoreOptions,
|
8606
8556
|
defaultSettingsMiddleware,
|
8607
8557
|
embed,
|
8608
8558
|
embedMany,
|