ai 5.0.0-alpha.3 → 5.0.0-alpha.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -962,12 +962,12 @@ function getToolInvocations(message) {
962
962
  // src/ui/process-ui-message-stream.ts
963
963
  function createStreamingUIMessageState({
964
964
  lastMessage,
965
- newMessageId = "no-id"
965
+ newMessageId = ""
966
966
  } = {}) {
967
967
  var _a17;
968
968
  const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
969
969
  const step = isContinuation ? 1 + ((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) : 0;
970
- const message = isContinuation ? structuredClone(lastMessage) : {
970
+ const message = isContinuation ? lastMessage : {
971
971
  id: newMessageId,
972
972
  metadata: {},
973
973
  role: "assistant",
@@ -1347,7 +1347,7 @@ async function consumeUIMessageStream({
1347
1347
  messageMetadataSchema
1348
1348
  }) {
1349
1349
  const state = createStreamingUIMessageState({
1350
- lastMessage,
1350
+ lastMessage: lastMessage ? structuredClone(lastMessage) : void 0,
1351
1351
  newMessageId: generateId3()
1352
1352
  });
1353
1353
  const runUpdateMessageJob = async (job) => {
@@ -1550,54 +1550,22 @@ import {
1550
1550
  generateId as generateIdFunc
1551
1551
  } from "@ai-sdk/provider-utils";
1552
1552
 
1553
- // src/util/serial-job-executor.ts
1554
- var SerialJobExecutor = class {
1555
- constructor() {
1556
- this.queue = [];
1557
- this.isProcessing = false;
1558
- }
1559
- async processQueue() {
1560
- if (this.isProcessing) {
1561
- return;
1562
- }
1563
- this.isProcessing = true;
1564
- while (this.queue.length > 0) {
1565
- await this.queue[0]();
1566
- this.queue.shift();
1567
- }
1568
- this.isProcessing = false;
1569
- }
1570
- async run(job) {
1571
- return new Promise((resolve, reject) => {
1572
- this.queue.push(async () => {
1573
- try {
1574
- await job();
1575
- resolve();
1576
- } catch (error) {
1577
- reject(error);
1578
- }
1579
- });
1580
- void this.processQueue();
1581
- });
1582
- }
1583
- };
1584
-
1585
1553
  // src/ui/should-resubmit-messages.ts
1586
1554
  function shouldResubmitMessages({
1587
1555
  originalMaxToolInvocationStep,
1588
1556
  originalMessageCount,
1589
- maxSteps: maxSteps2,
1557
+ maxSteps,
1590
1558
  messages
1591
1559
  }) {
1592
1560
  var _a17;
1593
1561
  const lastMessage = messages[messages.length - 1];
1594
1562
  return (
1595
1563
  // check if the feature is enabled:
1596
- maxSteps2 > 1 && // ensure there is a last message:
1564
+ maxSteps > 1 && // ensure there is a last message:
1597
1565
  lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
1598
1566
  (messages.length > originalMessageCount || extractMaxToolInvocationStep(getToolInvocations(lastMessage)) !== originalMaxToolInvocationStep) && // check that next step is possible:
1599
1567
  isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
1600
- ((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) < maxSteps2
1568
+ ((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) < maxSteps
1601
1569
  );
1602
1570
  }
1603
1571
  function isAssistantMessageWithCompletedToolCalls(message) {
@@ -1637,23 +1605,19 @@ var ChatStore = class {
1637
1605
  chats = {},
1638
1606
  generateId: generateId3,
1639
1607
  transport,
1640
- maxSteps: maxSteps2 = 1,
1608
+ maxSteps = 1,
1641
1609
  messageMetadataSchema,
1642
- dataPartSchemas
1610
+ dataPartSchemas,
1611
+ createChat
1643
1612
  }) {
1613
+ this.createChat = createChat;
1644
1614
  this.chats = new Map(
1645
- Object.entries(chats).map(([id, state]) => [
1615
+ Object.entries(chats).map(([id, chat]) => [
1646
1616
  id,
1647
- {
1648
- messages: [...state.messages],
1649
- status: "ready",
1650
- activeResponse: void 0,
1651
- error: void 0,
1652
- jobExecutor: new SerialJobExecutor()
1653
- }
1617
+ this.createChat({ messages: chat.messages })
1654
1618
  ])
1655
1619
  );
1656
- this.maxSteps = maxSteps2;
1620
+ this.maxSteps = maxSteps;
1657
1621
  this.transport = transport;
1658
1622
  this.subscribers = /* @__PURE__ */ new Set();
1659
1623
  this.generateId = generateId3 != null ? generateId3 : generateIdFunc;
@@ -1664,11 +1628,7 @@ var ChatStore = class {
1664
1628
  return this.chats.has(id);
1665
1629
  }
1666
1630
  addChat(id, messages) {
1667
- this.chats.set(id, {
1668
- messages,
1669
- status: "ready",
1670
- jobExecutor: new SerialJobExecutor()
1671
- });
1631
+ this.chats.set(id, this.createChat({ messages }));
1672
1632
  }
1673
1633
  getChats() {
1674
1634
  return Array.from(this.chats.entries());
@@ -1677,28 +1637,28 @@ var ChatStore = class {
1677
1637
  return this.chats.size;
1678
1638
  }
1679
1639
  getStatus(id) {
1680
- return this.getChat(id).status;
1640
+ return this.getChatState(id).status;
1681
1641
  }
1682
1642
  setStatus({
1683
1643
  id,
1684
1644
  status,
1685
1645
  error
1686
1646
  }) {
1687
- const chat = this.getChat(id);
1688
- if (chat.status === status)
1647
+ const state = this.getChatState(id);
1648
+ if (state.status === status)
1689
1649
  return;
1690
- chat.status = status;
1691
- chat.error = error;
1650
+ state.setStatus(status);
1651
+ state.setError(error);
1692
1652
  this.emit({ type: "chat-status-changed", chatId: id, error });
1693
1653
  }
1694
1654
  getError(id) {
1695
- return this.getChat(id).error;
1655
+ return this.getChatState(id).error;
1696
1656
  }
1697
1657
  getMessages(id) {
1698
- return this.getChat(id).messages;
1658
+ return this.getChatState(id).messages;
1699
1659
  }
1700
1660
  getLastMessage(id) {
1701
- const chat = this.getChat(id);
1661
+ const chat = this.getChatState(id);
1702
1662
  return chat.messages[chat.messages.length - 1];
1703
1663
  }
1704
1664
  subscribe(subscriber) {
@@ -1709,11 +1669,11 @@ var ChatStore = class {
1709
1669
  id,
1710
1670
  messages
1711
1671
  }) {
1712
- this.getChat(id).messages = [...messages];
1672
+ this.getChatState(id).setMessages(messages);
1713
1673
  this.emit({ type: "chat-messages-changed", chatId: id });
1714
1674
  }
1715
1675
  removeAssistantResponse(id) {
1716
- const chat = this.getChat(id);
1676
+ const chat = this.getChatState(id);
1717
1677
  const lastMessage = chat.messages[chat.messages.length - 1];
1718
1678
  if (lastMessage == null) {
1719
1679
  throw new Error("Cannot remove assistant response from empty chat");
@@ -1721,7 +1681,8 @@ var ChatStore = class {
1721
1681
  if (lastMessage.role !== "assistant") {
1722
1682
  throw new Error("Last message is not an assistant message");
1723
1683
  }
1724
- this.setMessages({ id, messages: chat.messages.slice(0, -1) });
1684
+ chat.popMessage();
1685
+ this.emit({ type: "chat-messages-changed", chatId: id });
1725
1686
  }
1726
1687
  async submitMessage({
1727
1688
  chatId,
@@ -1733,14 +1694,14 @@ var ChatStore = class {
1733
1694
  onFinish
1734
1695
  }) {
1735
1696
  var _a17;
1736
- const chat = this.getChat(chatId);
1737
- const currentMessages = chat.messages;
1697
+ const state = this.getChatState(chatId);
1698
+ state.pushMessage({ ...message, id: (_a17 = message.id) != null ? _a17 : this.generateId() });
1699
+ this.emit({
1700
+ type: "chat-messages-changed",
1701
+ chatId
1702
+ });
1738
1703
  await this.triggerRequest({
1739
1704
  chatId,
1740
- messages: currentMessages.concat({
1741
- ...message,
1742
- id: (_a17 = message.id) != null ? _a17 : this.generateId()
1743
- }),
1744
1705
  headers,
1745
1706
  body,
1746
1707
  requestType: "generate",
@@ -1757,15 +1718,20 @@ var ChatStore = class {
1757
1718
  onToolCall,
1758
1719
  onFinish
1759
1720
  }) {
1760
- const messages = this.getChat(chatId).messages;
1761
- const messagesToSubmit = messages[messages.length - 1].role === "assistant" ? messages.slice(0, -1) : messages;
1762
- if (messagesToSubmit.length === 0) {
1721
+ const chat = this.getChatState(chatId);
1722
+ if (chat.messages[chat.messages.length - 1].role === "assistant") {
1723
+ chat.popMessage();
1724
+ this.emit({
1725
+ type: "chat-messages-changed",
1726
+ chatId
1727
+ });
1728
+ }
1729
+ if (chat.messages.length === 0) {
1763
1730
  return;
1764
1731
  }
1765
1732
  return this.triggerRequest({
1766
1733
  chatId,
1767
1734
  requestType: "generate",
1768
- messages: messagesToSubmit,
1769
1735
  headers,
1770
1736
  body,
1771
1737
  onError,
@@ -1781,11 +1747,8 @@ var ChatStore = class {
1781
1747
  onToolCall,
1782
1748
  onFinish
1783
1749
  }) {
1784
- const chat = this.getChat(chatId);
1785
- const currentMessages = chat.messages;
1786
1750
  return this.triggerRequest({
1787
1751
  chatId,
1788
- messages: currentMessages,
1789
1752
  requestType: "resume",
1790
1753
  headers,
1791
1754
  body,
@@ -1799,22 +1762,23 @@ var ChatStore = class {
1799
1762
  toolCallId,
1800
1763
  result
1801
1764
  }) {
1802
- const chat = this.getChat(chatId);
1765
+ const chat = this.getChatState(chatId);
1803
1766
  chat.jobExecutor.run(async () => {
1804
- const currentMessages = chat.messages;
1805
1767
  updateToolCallResult({
1806
- messages: currentMessages,
1768
+ messages: chat.messages,
1807
1769
  toolCallId,
1808
1770
  toolResult: result
1809
1771
  });
1810
- this.setMessages({ id: chatId, messages: currentMessages });
1772
+ this.setMessages({
1773
+ id: chatId,
1774
+ messages: chat.messages
1775
+ });
1811
1776
  if (chat.status === "submitted" || chat.status === "streaming") {
1812
1777
  return;
1813
1778
  }
1814
- const lastMessage = currentMessages[currentMessages.length - 1];
1779
+ const lastMessage = chat.messages[chat.messages.length - 1];
1815
1780
  if (isAssistantMessageWithCompletedToolCalls(lastMessage)) {
1816
- await this.triggerRequest({
1817
- messages: currentMessages,
1781
+ this.triggerRequest({
1818
1782
  requestType: "generate",
1819
1783
  chatId
1820
1784
  });
@@ -1823,7 +1787,7 @@ var ChatStore = class {
1823
1787
  }
1824
1788
  async stopStream({ chatId }) {
1825
1789
  var _a17;
1826
- const chat = this.getChat(chatId);
1790
+ const chat = this.getChatState(chatId);
1827
1791
  if (chat.status !== "streaming" && chat.status !== "submitted")
1828
1792
  return;
1829
1793
  if ((_a17 = chat.activeResponse) == null ? void 0 : _a17.abortController) {
@@ -1836,15 +1800,14 @@ var ChatStore = class {
1836
1800
  subscriber.onChatChanged(event);
1837
1801
  }
1838
1802
  }
1839
- getChat(id) {
1803
+ getChatState(id) {
1840
1804
  if (!this.hasChat(id)) {
1841
- throw new Error(`chat '${id}' not found`);
1805
+ this.addChat(id, []);
1842
1806
  }
1843
1807
  return this.chats.get(id);
1844
1808
  }
1845
1809
  async triggerRequest({
1846
1810
  chatId,
1847
- messages: chatMessages,
1848
1811
  requestType,
1849
1812
  headers,
1850
1813
  body,
@@ -1852,26 +1815,25 @@ var ChatStore = class {
1852
1815
  onToolCall,
1853
1816
  onFinish
1854
1817
  }) {
1855
- const self = this;
1856
- const chat = this.getChat(chatId);
1857
- this.setMessages({ id: chatId, messages: chatMessages });
1818
+ const chat = this.getChatState(chatId);
1858
1819
  this.setStatus({ id: chatId, status: "submitted", error: void 0 });
1859
- const messageCount = chatMessages.length;
1820
+ const messageCount = chat.messages.length;
1860
1821
  const maxStep = extractMaxToolInvocationStep(
1861
- getToolInvocations(chatMessages[chatMessages.length - 1])
1822
+ getToolInvocations(chat.messages[chat.messages.length - 1])
1862
1823
  );
1863
1824
  try {
1825
+ const lastMessage = chat.messages[chat.messages.length - 1];
1864
1826
  const activeResponse = {
1865
1827
  state: createStreamingUIMessageState({
1866
- lastMessage: chatMessages[chatMessages.length - 1],
1867
- newMessageId: self.generateId()
1828
+ lastMessage: chat.snapshot ? chat.snapshot(lastMessage) : lastMessage,
1829
+ newMessageId: this.generateId()
1868
1830
  }),
1869
1831
  abortController: new AbortController()
1870
1832
  };
1871
- chat.activeResponse = activeResponse;
1872
- const stream = await self.transport.submitMessages({
1833
+ chat.setActiveResponse(activeResponse);
1834
+ const stream = await this.transport.submitMessages({
1873
1835
  chatId,
1874
- messages: chatMessages,
1836
+ messages: chat.messages,
1875
1837
  body,
1876
1838
  headers,
1877
1839
  abortController: activeResponse.abortController,
@@ -1883,15 +1845,19 @@ var ChatStore = class {
1883
1845
  () => job({
1884
1846
  state: activeResponse.state,
1885
1847
  write: () => {
1886
- self.setStatus({ id: chatId, status: "streaming" });
1887
- const replaceLastMessage = activeResponse.state.message.id === chatMessages[chatMessages.length - 1].id;
1888
- const newMessages = [
1889
- ...replaceLastMessage ? chatMessages.slice(0, chatMessages.length - 1) : chatMessages,
1890
- activeResponse.state.message
1891
- ];
1892
- self.setMessages({
1893
- id: chatId,
1894
- messages: newMessages
1848
+ this.setStatus({ id: chatId, status: "streaming" });
1849
+ const replaceLastMessage = activeResponse.state.message.id === chat.messages[chat.messages.length - 1].id;
1850
+ if (replaceLastMessage) {
1851
+ chat.replaceMessage(
1852
+ chat.messages.length - 1,
1853
+ activeResponse.state.message
1854
+ );
1855
+ } else {
1856
+ chat.pushMessage(activeResponse.state.message);
1857
+ }
1858
+ this.emit({
1859
+ type: "chat-messages-changed",
1860
+ chatId
1895
1861
  });
1896
1862
  }
1897
1863
  })
@@ -1901,8 +1867,8 @@ var ChatStore = class {
1901
1867
  stream: processUIMessageStream({
1902
1868
  stream,
1903
1869
  onToolCall,
1904
- messageMetadataSchema: self.messageMetadataSchema,
1905
- dataPartSchemas: self.dataPartSchemas,
1870
+ messageMetadataSchema: this.messageMetadataSchema,
1871
+ dataPartSchemas: this.dataPartSchemas,
1906
1872
  runUpdateMessageJob
1907
1873
  }),
1908
1874
  onError: (error) => {
@@ -1921,24 +1887,22 @@ var ChatStore = class {
1921
1887
  }
1922
1888
  this.setStatus({ id: chatId, status: "error", error: err });
1923
1889
  } finally {
1924
- chat.activeResponse = void 0;
1890
+ chat.setActiveResponse(void 0);
1925
1891
  }
1926
- const currentMessages = self.getMessages(chatId);
1927
1892
  if (shouldResubmitMessages({
1928
1893
  originalMaxToolInvocationStep: maxStep,
1929
1894
  originalMessageCount: messageCount,
1930
- maxSteps: self.maxSteps,
1931
- messages: currentMessages
1895
+ maxSteps: this.maxSteps,
1896
+ messages: chat.messages
1932
1897
  })) {
1933
- await self.triggerRequest({
1898
+ await this.triggerRequest({
1934
1899
  chatId,
1935
1900
  requestType,
1936
1901
  onError,
1937
1902
  onToolCall,
1938
1903
  onFinish,
1939
1904
  headers,
1940
- body,
1941
- messages: currentMessages
1905
+ body
1942
1906
  });
1943
1907
  }
1944
1908
  }
@@ -2230,24 +2194,24 @@ function convertToModelMessages(messages, options) {
2230
2194
  }
2231
2195
  var convertToCoreMessages = convertToModelMessages;
2232
2196
 
2233
- // src/ui/default-chat-store.ts
2197
+ // src/ui/default-chat-store-options.ts
2234
2198
  import {
2235
2199
  generateId as generateIdFunc2
2236
2200
  } from "@ai-sdk/provider-utils";
2237
- function defaultChatStore({
2238
- api,
2201
+ function defaultChatStoreOptions({
2202
+ api = "/api/chat",
2239
2203
  fetch: fetch2,
2240
2204
  credentials,
2241
2205
  headers,
2242
2206
  body,
2243
2207
  prepareRequestBody,
2244
2208
  generateId: generateId3 = generateIdFunc2,
2245
- dataPartSchemas,
2246
2209
  messageMetadataSchema,
2247
- maxSteps: maxSteps2 = 1,
2210
+ maxSteps = 1,
2211
+ dataPartSchemas,
2248
2212
  chats
2249
2213
  }) {
2250
- return new ChatStore({
2214
+ return () => ({
2251
2215
  transport: new DefaultChatTransport({
2252
2216
  api,
2253
2217
  fetch: fetch2,
@@ -2259,16 +2223,62 @@ function defaultChatStore({
2259
2223
  generateId: generateId3,
2260
2224
  messageMetadataSchema,
2261
2225
  dataPartSchemas,
2262
- maxSteps: maxSteps2,
2226
+ maxSteps,
2263
2227
  chats
2264
2228
  });
2265
2229
  }
2266
2230
 
2231
+ // src/ui-message-stream/handle-ui-message-stream-finish.ts
2232
+ function handleUIMessageStreamFinish({
2233
+ newMessageId,
2234
+ originalMessages = [],
2235
+ onFinish,
2236
+ stream
2237
+ }) {
2238
+ if (onFinish == null) {
2239
+ return stream;
2240
+ }
2241
+ const lastMessage = originalMessages[originalMessages.length - 1];
2242
+ const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
2243
+ const messageId = isContinuation ? lastMessage.id : newMessageId;
2244
+ const state = createStreamingUIMessageState({
2245
+ lastMessage: structuredClone(lastMessage),
2246
+ newMessageId: messageId
2247
+ });
2248
+ const runUpdateMessageJob = async (job) => {
2249
+ await job({ state, write: () => {
2250
+ } });
2251
+ };
2252
+ return processUIMessageStream({
2253
+ stream,
2254
+ runUpdateMessageJob
2255
+ }).pipeThrough(
2256
+ new TransformStream({
2257
+ transform(chunk, controller) {
2258
+ controller.enqueue(chunk);
2259
+ },
2260
+ flush() {
2261
+ const isContinuation2 = state.message.id === (lastMessage == null ? void 0 : lastMessage.id);
2262
+ onFinish({
2263
+ isContinuation: isContinuation2,
2264
+ responseMessage: state.message,
2265
+ messages: [
2266
+ ...isContinuation2 ? originalMessages.slice(0, -1) : originalMessages,
2267
+ state.message
2268
+ ]
2269
+ });
2270
+ }
2271
+ })
2272
+ );
2273
+ }
2274
+
2267
2275
  // src/ui-message-stream/create-ui-message-stream.ts
2268
2276
  function createUIMessageStream({
2269
2277
  execute,
2270
- onError = () => "An error occurred."
2278
+ onError = () => "An error occurred.",
2271
2279
  // mask error messages for safety by default
2280
+ originalMessages,
2281
+ onFinish
2272
2282
  }) {
2273
2283
  let controller;
2274
2284
  const ongoingStreamPromises = [];
@@ -2285,25 +2295,27 @@ function createUIMessageStream({
2285
2295
  }
2286
2296
  try {
2287
2297
  const result = execute({
2288
- write(part) {
2289
- safeEnqueue(part);
2290
- },
2291
- merge(streamArg) {
2292
- ongoingStreamPromises.push(
2293
- (async () => {
2294
- const reader = streamArg.getReader();
2295
- while (true) {
2296
- const { done, value } = await reader.read();
2297
- if (done)
2298
- break;
2299
- safeEnqueue(value);
2300
- }
2301
- })().catch((error) => {
2302
- safeEnqueue({ type: "error", errorText: onError(error) });
2303
- })
2304
- );
2305
- },
2306
- onError
2298
+ writer: {
2299
+ write(part) {
2300
+ safeEnqueue(part);
2301
+ },
2302
+ merge(streamArg) {
2303
+ ongoingStreamPromises.push(
2304
+ (async () => {
2305
+ const reader = streamArg.getReader();
2306
+ while (true) {
2307
+ const { done, value } = await reader.read();
2308
+ if (done)
2309
+ break;
2310
+ safeEnqueue(value);
2311
+ }
2312
+ })().catch((error) => {
2313
+ safeEnqueue({ type: "error", errorText: onError(error) });
2314
+ })
2315
+ );
2316
+ },
2317
+ onError
2318
+ }
2307
2319
  });
2308
2320
  if (result) {
2309
2321
  ongoingStreamPromises.push(
@@ -2327,7 +2339,12 @@ function createUIMessageStream({
2327
2339
  } catch (error) {
2328
2340
  }
2329
2341
  });
2330
- return stream;
2342
+ return handleUIMessageStreamFinish({
2343
+ stream,
2344
+ newMessageId: "",
2345
+ originalMessages,
2346
+ onFinish
2347
+ });
2331
2348
  }
2332
2349
 
2333
2350
  // src/ui-message-stream/ui-message-stream-headers.ts
@@ -2392,6 +2409,32 @@ function pipeUIMessageStreamToResponse({
2392
2409
  });
2393
2410
  }
2394
2411
 
2412
+ // src/util/cosine-similarity.ts
2413
+ function cosineSimilarity(vector1, vector2) {
2414
+ if (vector1.length !== vector2.length) {
2415
+ throw new InvalidArgumentError({
2416
+ parameter: "vector1,vector2",
2417
+ value: { vector1Length: vector1.length, vector2Length: vector2.length },
2418
+ message: `Vectors must have the same length`
2419
+ });
2420
+ }
2421
+ const n = vector1.length;
2422
+ if (n === 0) {
2423
+ return 0;
2424
+ }
2425
+ let magnitudeSquared1 = 0;
2426
+ let magnitudeSquared2 = 0;
2427
+ let dotProduct = 0;
2428
+ for (let i = 0; i < n; i++) {
2429
+ const value1 = vector1[i];
2430
+ const value2 = vector2[i];
2431
+ magnitudeSquared1 += value1 * value1;
2432
+ magnitudeSquared2 += value2 * value2;
2433
+ dotProduct += value1 * value2;
2434
+ }
2435
+ return magnitudeSquared1 === 0 || magnitudeSquared2 === 0 ? 0 : dotProduct / (Math.sqrt(magnitudeSquared1) * Math.sqrt(magnitudeSquared2));
2436
+ }
2437
+
2395
2438
  // src/util/data-url.ts
2396
2439
  function getTextFromDataUrl(dataUrl) {
2397
2440
  const [header, base64Content] = dataUrl.split(",");
@@ -2441,31 +2484,37 @@ function isDeepEqualData(obj1, obj2) {
2441
2484
  return true;
2442
2485
  }
2443
2486
 
2444
- // src/util/cosine-similarity.ts
2445
- function cosineSimilarity(vector1, vector2) {
2446
- if (vector1.length !== vector2.length) {
2447
- throw new InvalidArgumentError({
2448
- parameter: "vector1,vector2",
2449
- value: { vector1Length: vector1.length, vector2Length: vector2.length },
2450
- message: `Vectors must have the same length`
2451
- });
2487
+ // src/util/serial-job-executor.ts
2488
+ var SerialJobExecutor = class {
2489
+ constructor() {
2490
+ this.queue = [];
2491
+ this.isProcessing = false;
2452
2492
  }
2453
- const n = vector1.length;
2454
- if (n === 0) {
2455
- return 0;
2493
+ async processQueue() {
2494
+ if (this.isProcessing) {
2495
+ return;
2496
+ }
2497
+ this.isProcessing = true;
2498
+ while (this.queue.length > 0) {
2499
+ await this.queue[0]();
2500
+ this.queue.shift();
2501
+ }
2502
+ this.isProcessing = false;
2456
2503
  }
2457
- let magnitudeSquared1 = 0;
2458
- let magnitudeSquared2 = 0;
2459
- let dotProduct = 0;
2460
- for (let i = 0; i < n; i++) {
2461
- const value1 = vector1[i];
2462
- const value2 = vector2[i];
2463
- magnitudeSquared1 += value1 * value1;
2464
- magnitudeSquared2 += value2 * value2;
2465
- dotProduct += value1 * value2;
2504
+ async run(job) {
2505
+ return new Promise((resolve, reject) => {
2506
+ this.queue.push(async () => {
2507
+ try {
2508
+ await job();
2509
+ resolve();
2510
+ } catch (error) {
2511
+ reject(error);
2512
+ }
2513
+ });
2514
+ void this.processQueue();
2515
+ });
2466
2516
  }
2467
- return magnitudeSquared1 === 0 || magnitudeSquared2 === 0 ? 0 : dotProduct / (Math.sqrt(magnitudeSquared1) * Math.sqrt(magnitudeSquared2));
2468
- }
2517
+ };
2469
2518
 
2470
2519
  // src/util/simulate-readable-stream.ts
2471
2520
  import { delay as delayFunction } from "@ai-sdk/provider-utils";
@@ -3471,6 +3520,15 @@ function convertToLanguageModelV2DataContent(content) {
3471
3520
  }
3472
3521
  return { data: content, mediaType: void 0 };
3473
3522
  }
3523
+ function convertDataContentToBase64String(content) {
3524
+ if (typeof content === "string") {
3525
+ return content;
3526
+ }
3527
+ if (content instanceof ArrayBuffer) {
3528
+ return convertUint8ArrayToBase642(new Uint8Array(content));
3529
+ }
3530
+ return convertUint8ArrayToBase642(content);
3531
+ }
3474
3532
  function convertDataContentToUint8Array(content) {
3475
3533
  if (content instanceof Uint8Array) {
3476
3534
  return content;
@@ -3988,6 +4046,21 @@ async function standardizePrompt(prompt) {
3988
4046
  };
3989
4047
  }
3990
4048
 
4049
+ // core/telemetry/stringify-for-telemetry.ts
4050
+ function stringifyForTelemetry(prompt) {
4051
+ return JSON.stringify(
4052
+ prompt.map((message) => ({
4053
+ ...message,
4054
+ content: typeof message.content === "string" ? message.content : message.content.map(
4055
+ (part) => part.type === "file" ? {
4056
+ ...part,
4057
+ data: part.data instanceof Uint8Array ? convertDataContentToBase64String(part.data) : part.data
4058
+ } : part
4059
+ )
4060
+ }))
4061
+ );
4062
+ }
4063
+
3991
4064
  // core/generate-object/output-strategy.ts
3992
4065
  import {
3993
4066
  isJSONArray,
@@ -4488,7 +4561,7 @@ async function generateObject(options) {
4488
4561
  }),
4489
4562
  ...baseTelemetryAttributes,
4490
4563
  "ai.prompt.messages": {
4491
- input: () => JSON.stringify(promptMessages)
4564
+ input: () => stringifyForTelemetry(promptMessages)
4492
4565
  },
4493
4566
  // standardized gen-ai llm span attributes:
4494
4567
  "gen_ai.system": model.provider,
@@ -4990,7 +5063,7 @@ var DefaultStreamObjectResult = class {
4990
5063
  }),
4991
5064
  ...baseTelemetryAttributes,
4992
5065
  "ai.prompt.messages": {
4993
- input: () => JSON.stringify(callOptions.prompt)
5066
+ input: () => stringifyForTelemetry(callOptions.prompt)
4994
5067
  },
4995
5068
  // standardized gen-ai llm span attributes:
4996
5069
  "gen_ai.system": model.provider,
@@ -5403,6 +5476,11 @@ var DefaultSpeechResult = class {
5403
5476
  // core/generate-text/generate-text.ts
5404
5477
  import { createIdGenerator as createIdGenerator3 } from "@ai-sdk/provider-utils";
5405
5478
 
5479
+ // src/util/as-array.ts
5480
+ function asArray(value) {
5481
+ return value === void 0 ? [] : Array.isArray(value) ? value : [value];
5482
+ }
5483
+
5406
5484
  // core/prompt/prepare-tools-and-tool-choice.ts
5407
5485
  import { asSchema as asSchema2 } from "@ai-sdk/provider-utils";
5408
5486
 
@@ -5624,8 +5702,8 @@ var DefaultStepResult = class {
5624
5702
  };
5625
5703
 
5626
5704
  // core/generate-text/stop-condition.ts
5627
- function maxSteps(maxSteps2) {
5628
- return ({ steps }) => steps.length >= maxSteps2;
5705
+ function stepCountIs(stepCount) {
5706
+ return ({ steps }) => steps.length === stepCount;
5629
5707
  }
5630
5708
  function hasToolCall(toolName) {
5631
5709
  return ({ steps }) => {
@@ -5635,6 +5713,12 @@ function hasToolCall(toolName) {
5635
5713
  )) != null ? _c : false;
5636
5714
  };
5637
5715
  }
5716
+ async function isStopConditionMet({
5717
+ stopConditions,
5718
+ steps
5719
+ }) {
5720
+ return (await Promise.all(stopConditions.map((condition) => condition({ steps })))).some((result) => result);
5721
+ }
5638
5722
 
5639
5723
  // core/generate-text/to-response-messages.ts
5640
5724
  function toResponseMessages({
@@ -5709,12 +5793,14 @@ async function generateText({
5709
5793
  maxRetries: maxRetriesArg,
5710
5794
  abortSignal,
5711
5795
  headers,
5712
- continueUntil = maxSteps(1),
5796
+ stopWhen = stepCountIs(1),
5713
5797
  experimental_output: output,
5714
5798
  experimental_telemetry: telemetry,
5715
5799
  providerOptions,
5716
- experimental_activeTools: activeTools,
5717
- experimental_prepareStep: prepareStep,
5800
+ experimental_activeTools,
5801
+ activeTools = experimental_activeTools,
5802
+ experimental_prepareStep,
5803
+ prepareStep = experimental_prepareStep,
5718
5804
  experimental_repairToolCall: repairToolCall,
5719
5805
  _internal: {
5720
5806
  generateId: generateId3 = originalGenerateId3,
@@ -5723,6 +5809,7 @@ async function generateText({
5723
5809
  onStepFinish,
5724
5810
  ...settings
5725
5811
  }) {
5812
+ const stopConditions = asArray(stopWhen);
5726
5813
  const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
5727
5814
  const callSettings = prepareCallSettings(settings);
5728
5815
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
@@ -5786,7 +5873,7 @@ async function generateText({
5786
5873
  const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
5787
5874
  tools,
5788
5875
  toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
5789
- activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.experimental_activeTools) != null ? _c : activeTools
5876
+ activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _c : activeTools
5790
5877
  });
5791
5878
  currentModelResponse = await retry(
5792
5879
  () => {
@@ -5806,7 +5893,7 @@ async function generateText({
5806
5893
  "ai.model.id": stepModel.modelId,
5807
5894
  // prompt:
5808
5895
  "ai.prompt.messages": {
5809
- input: () => JSON.stringify(promptMessages)
5896
+ input: () => stringifyForTelemetry(promptMessages)
5810
5897
  },
5811
5898
  "ai.prompt.tools": {
5812
5899
  // convert the language model level tools:
@@ -5931,8 +6018,8 @@ async function generateText({
5931
6018
  } while (
5932
6019
  // there are tool calls:
5933
6020
  currentToolCalls.length > 0 && // all current tool calls have results:
5934
- currentToolResults.length === currentToolCalls.length && // continue until the stop condition is met:
5935
- !await continueUntil({ steps })
6021
+ currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
6022
+ !await isStopConditionMet({ stopConditions, steps })
5936
6023
  );
5937
6024
  span.setAttributes(
5938
6025
  selectTelemetryAttributes({
@@ -6282,11 +6369,6 @@ function smoothStream({
6282
6369
  // core/generate-text/stream-text.ts
6283
6370
  import { createIdGenerator as createIdGenerator4 } from "@ai-sdk/provider-utils";
6284
6371
 
6285
- // src/util/as-array.ts
6286
- function asArray(value) {
6287
- return value === void 0 ? [] : Array.isArray(value) ? value : [value];
6288
- }
6289
-
6290
6372
  // core/generate-text/run-tools-transformation.ts
6291
6373
  import { generateId } from "@ai-sdk/provider-utils";
6292
6374
  function runToolsTransformation({
@@ -6497,13 +6579,15 @@ function streamText({
6497
6579
  maxRetries,
6498
6580
  abortSignal,
6499
6581
  headers,
6500
- continueUntil = maxSteps(1),
6582
+ stopWhen = stepCountIs(1),
6501
6583
  experimental_output: output,
6502
6584
  experimental_telemetry: telemetry,
6585
+ prepareStep,
6503
6586
  providerOptions,
6504
6587
  experimental_toolCallStreaming = false,
6505
6588
  toolCallStreaming = experimental_toolCallStreaming,
6506
- experimental_activeTools: activeTools,
6589
+ experimental_activeTools,
6590
+ activeTools = experimental_activeTools,
6507
6591
  experimental_repairToolCall: repairToolCall,
6508
6592
  experimental_transform: transform,
6509
6593
  onChunk,
@@ -6533,9 +6617,10 @@ function streamText({
6533
6617
  transforms: asArray(transform),
6534
6618
  activeTools,
6535
6619
  repairToolCall,
6536
- continueUntil,
6620
+ stopConditions: asArray(stopWhen),
6537
6621
  output,
6538
6622
  providerOptions,
6623
+ prepareStep,
6539
6624
  onChunk,
6540
6625
  onError,
6541
6626
  onFinish,
@@ -6610,9 +6695,10 @@ var DefaultStreamTextResult = class {
6610
6695
  transforms,
6611
6696
  activeTools,
6612
6697
  repairToolCall,
6613
- continueUntil,
6698
+ stopConditions,
6614
6699
  output,
6615
6700
  providerOptions,
6701
+ prepareStep,
6616
6702
  now: now2,
6617
6703
  currentDate,
6618
6704
  generateId: generateId3,
@@ -6831,6 +6917,7 @@ var DefaultStreamTextResult = class {
6831
6917
  responseMessages,
6832
6918
  usage
6833
6919
  }) {
6920
+ var _a17, _b, _c;
6834
6921
  stepFinish = new DelayedPromise();
6835
6922
  const initialPrompt = await standardizePrompt({
6836
6923
  system,
@@ -6841,6 +6928,11 @@ var DefaultStreamTextResult = class {
6841
6928
  ...initialPrompt.messages,
6842
6929
  ...responseMessages
6843
6930
  ];
6931
+ const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
6932
+ model,
6933
+ steps: recordedSteps,
6934
+ stepNumber: recordedSteps.length
6935
+ }));
6844
6936
  const promptMessages = await convertToLanguageModelPrompt({
6845
6937
  prompt: {
6846
6938
  system: initialPrompt.system,
@@ -6848,9 +6940,12 @@ var DefaultStreamTextResult = class {
6848
6940
  },
6849
6941
  supportedUrls: await model.supportedUrls
6850
6942
  });
6851
- const toolsAndToolChoice = {
6852
- ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
6853
- };
6943
+ const stepModel = (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model;
6944
+ const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
6945
+ tools,
6946
+ toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
6947
+ activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _c : activeTools
6948
+ });
6854
6949
  const {
6855
6950
  result: { stream: stream2, response, request },
6856
6951
  doStreamSpan,
@@ -6866,24 +6961,23 @@ var DefaultStreamTextResult = class {
6866
6961
  telemetry
6867
6962
  }),
6868
6963
  ...baseTelemetryAttributes,
6964
+ // model:
6965
+ "ai.model.provider": stepModel.provider,
6966
+ "ai.model.id": stepModel.modelId,
6967
+ // prompt:
6869
6968
  "ai.prompt.messages": {
6870
- input: () => JSON.stringify(promptMessages)
6969
+ input: () => stringifyForTelemetry(promptMessages)
6871
6970
  },
6872
6971
  "ai.prompt.tools": {
6873
6972
  // convert the language model level tools:
6874
- input: () => {
6875
- var _a17;
6876
- return (_a17 = toolsAndToolChoice.tools) == null ? void 0 : _a17.map(
6877
- (tool2) => JSON.stringify(tool2)
6878
- );
6879
- }
6973
+ input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
6880
6974
  },
6881
6975
  "ai.prompt.toolChoice": {
6882
- input: () => toolsAndToolChoice.toolChoice != null ? JSON.stringify(toolsAndToolChoice.toolChoice) : void 0
6976
+ input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
6883
6977
  },
6884
6978
  // standardized gen-ai llm span attributes:
6885
- "gen_ai.system": model.provider,
6886
- "gen_ai.request.model": model.modelId,
6979
+ "gen_ai.system": stepModel.provider,
6980
+ "gen_ai.request.model": stepModel.modelId,
6887
6981
  "gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
6888
6982
  "gen_ai.request.max_tokens": callSettings.maxOutputTokens,
6889
6983
  "gen_ai.request.presence_penalty": callSettings.presencePenalty,
@@ -6900,9 +6994,10 @@ var DefaultStreamTextResult = class {
6900
6994
  startTimestampMs: now2(),
6901
6995
  // get before the call
6902
6996
  doStreamSpan: doStreamSpan2,
6903
- result: await model.doStream({
6997
+ result: await stepModel.doStream({
6904
6998
  ...callSettings,
6905
- ...toolsAndToolChoice,
6999
+ tools: stepTools,
7000
+ toolChoice: stepToolChoice,
6906
7001
  responseFormat: output == null ? void 0 : output.responseFormat,
6907
7002
  prompt: promptMessages,
6908
7003
  providerOptions,
@@ -6955,7 +7050,7 @@ var DefaultStreamTextResult = class {
6955
7050
  streamWithToolResults.pipeThrough(
6956
7051
  new TransformStream({
6957
7052
  async transform(chunk, controller) {
6958
- var _a17, _b, _c, _d;
7053
+ var _a18, _b2, _c2, _d;
6959
7054
  if (chunk.type === "stream-start") {
6960
7055
  warnings = chunk.warnings;
6961
7056
  return;
@@ -7018,9 +7113,9 @@ var DefaultStreamTextResult = class {
7018
7113
  }
7019
7114
  case "response-metadata": {
7020
7115
  stepResponse = {
7021
- id: (_a17 = chunk.id) != null ? _a17 : stepResponse.id,
7022
- timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
7023
- modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
7116
+ id: (_a18 = chunk.id) != null ? _a18 : stepResponse.id,
7117
+ timestamp: (_b2 = chunk.timestamp) != null ? _b2 : stepResponse.timestamp,
7118
+ modelId: (_c2 = chunk.modelId) != null ? _c2 : stepResponse.modelId
7024
7119
  };
7025
7120
  break;
7026
7121
  }
@@ -7109,7 +7204,11 @@ var DefaultStreamTextResult = class {
7109
7204
  const combinedUsage = addLanguageModelUsage(usage, stepUsage);
7110
7205
  await stepFinish.promise;
7111
7206
  if (stepToolCalls.length > 0 && // all current tool calls have results:
7112
- stepToolResults.length === stepToolCalls.length && !await continueUntil({ steps: recordedSteps })) {
7207
+ stepToolResults.length === stepToolCalls.length && // continue until a stop condition is met:
7208
+ !await isStopConditionMet({
7209
+ stopConditions,
7210
+ steps: recordedSteps
7211
+ })) {
7113
7212
  responseMessages.push(
7114
7213
  ...toResponseMessages({
7115
7214
  content: stepContent,
@@ -7278,14 +7377,14 @@ var DefaultStreamTextResult = class {
7278
7377
  messageMetadata,
7279
7378
  sendReasoning = false,
7280
7379
  sendSources = false,
7281
- experimental_sendStart = true,
7282
- experimental_sendFinish = true,
7380
+ sendStart = true,
7381
+ sendFinish = true,
7283
7382
  onError = () => "An error occurred."
7284
7383
  // mask error messages for safety by default
7285
7384
  } = {}) {
7286
7385
  const lastMessage = originalMessages[originalMessages.length - 1];
7287
7386
  const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
7288
- const messageId = isContinuation ? lastMessage.id : newMessageId;
7387
+ const messageId = isContinuation ? lastMessage.id : newMessageId != null ? newMessageId : this.generateId();
7289
7388
  const baseStream = this.fullStream.pipeThrough(
7290
7389
  new TransformStream({
7291
7390
  transform: async (part, controller) => {
@@ -7391,7 +7490,7 @@ var DefaultStreamTextResult = class {
7391
7490
  break;
7392
7491
  }
7393
7492
  case "start": {
7394
- if (experimental_sendStart) {
7493
+ if (sendStart) {
7395
7494
  const metadata = messageMetadata == null ? void 0 : messageMetadata({ part });
7396
7495
  controller.enqueue({
7397
7496
  type: "start",
@@ -7402,7 +7501,7 @@ var DefaultStreamTextResult = class {
7402
7501
  break;
7403
7502
  }
7404
7503
  case "finish": {
7405
- if (experimental_sendFinish) {
7504
+ if (sendFinish) {
7406
7505
  const metadata = messageMetadata == null ? void 0 : messageMetadata({ part });
7407
7506
  controller.enqueue({
7408
7507
  type: "finish",
@@ -7419,38 +7518,12 @@ var DefaultStreamTextResult = class {
7419
7518
  }
7420
7519
  })
7421
7520
  );
7422
- if (onFinish == null) {
7423
- return baseStream;
7424
- }
7425
- const state = createStreamingUIMessageState({
7426
- lastMessage,
7427
- newMessageId: messageId != null ? messageId : this.generateId()
7428
- });
7429
- const runUpdateMessageJob = async (job) => {
7430
- await job({ state, write: () => {
7431
- } });
7432
- };
7433
- return processUIMessageStream({
7521
+ return handleUIMessageStreamFinish({
7434
7522
  stream: baseStream,
7435
- runUpdateMessageJob
7436
- }).pipeThrough(
7437
- new TransformStream({
7438
- transform(chunk, controller) {
7439
- controller.enqueue(chunk);
7440
- },
7441
- flush() {
7442
- const isContinuation2 = state.message.id === (lastMessage == null ? void 0 : lastMessage.id);
7443
- onFinish({
7444
- isContinuation: isContinuation2,
7445
- responseMessage: state.message,
7446
- messages: [
7447
- ...isContinuation2 ? originalMessages.slice(0, -1) : originalMessages,
7448
- state.message
7449
- ]
7450
- });
7451
- }
7452
- })
7453
- );
7523
+ newMessageId: messageId,
7524
+ originalMessages,
7525
+ onFinish
7526
+ });
7454
7527
  }
7455
7528
  pipeUIMessageStreamToResponse(response, {
7456
7529
  newMessageId,
@@ -7459,8 +7532,8 @@ var DefaultStreamTextResult = class {
7459
7532
  messageMetadata,
7460
7533
  sendReasoning,
7461
7534
  sendSources,
7462
- experimental_sendFinish,
7463
- experimental_sendStart,
7535
+ sendFinish,
7536
+ sendStart,
7464
7537
  onError,
7465
7538
  ...init
7466
7539
  } = {}) {
@@ -7473,8 +7546,8 @@ var DefaultStreamTextResult = class {
7473
7546
  messageMetadata,
7474
7547
  sendReasoning,
7475
7548
  sendSources,
7476
- experimental_sendFinish,
7477
- experimental_sendStart,
7549
+ sendFinish,
7550
+ sendStart,
7478
7551
  onError
7479
7552
  }),
7480
7553
  ...init
@@ -7494,8 +7567,8 @@ var DefaultStreamTextResult = class {
7494
7567
  messageMetadata,
7495
7568
  sendReasoning,
7496
7569
  sendSources,
7497
- experimental_sendFinish,
7498
- experimental_sendStart,
7570
+ sendFinish,
7571
+ sendStart,
7499
7572
  onError,
7500
7573
  ...init
7501
7574
  } = {}) {
@@ -7507,8 +7580,8 @@ var DefaultStreamTextResult = class {
7507
7580
  messageMetadata,
7508
7581
  sendReasoning,
7509
7582
  sendSources,
7510
- experimental_sendFinish,
7511
- experimental_sendStart,
7583
+ sendFinish,
7584
+ sendStart,
7512
7585
  onError
7513
7586
  }),
7514
7587
  ...init
@@ -8555,6 +8628,7 @@ export {
8555
8628
  NoSuchToolError,
8556
8629
  output_exports as Output,
8557
8630
  RetryError,
8631
+ SerialJobExecutor,
8558
8632
  TextStreamChatTransport,
8559
8633
  ToolCallRepairError,
8560
8634
  ToolExecutionError,
@@ -8580,7 +8654,7 @@ export {
8580
8654
  createUIMessageStream,
8581
8655
  createUIMessageStreamResponse,
8582
8656
  customProvider,
8583
- defaultChatStore,
8657
+ defaultChatStoreOptions,
8584
8658
  defaultSettingsMiddleware,
8585
8659
  embed,
8586
8660
  embedMany,
@@ -8601,7 +8675,6 @@ export {
8601
8675
  isAssistantMessageWithCompletedToolCalls,
8602
8676
  isDeepEqualData,
8603
8677
  jsonSchema2 as jsonSchema,
8604
- maxSteps,
8605
8678
  modelMessageSchema,
8606
8679
  parsePartialJson,
8607
8680
  pipeTextStreamToResponse,
@@ -8610,6 +8683,7 @@ export {
8610
8683
  simulateReadableStream,
8611
8684
  simulateStreamingMiddleware,
8612
8685
  smoothStream,
8686
+ stepCountIs,
8613
8687
  streamObject,
8614
8688
  streamText,
8615
8689
  systemModelMessageSchema,