ai 5.0.0-alpha.3 → 5.0.0-alpha.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -47,6 +47,7 @@ __export(src_exports, {
47
47
  NoSuchToolError: () => NoSuchToolError,
48
48
  Output: () => output_exports,
49
49
  RetryError: () => RetryError,
50
+ SerialJobExecutor: () => SerialJobExecutor,
50
51
  TextStreamChatTransport: () => TextStreamChatTransport,
51
52
  ToolCallRepairError: () => ToolCallRepairError,
52
53
  ToolExecutionError: () => ToolExecutionError,
@@ -72,7 +73,7 @@ __export(src_exports, {
72
73
  createUIMessageStream: () => createUIMessageStream,
73
74
  createUIMessageStreamResponse: () => createUIMessageStreamResponse,
74
75
  customProvider: () => customProvider,
75
- defaultChatStore: () => defaultChatStore,
76
+ defaultChatStoreOptions: () => defaultChatStoreOptions,
76
77
  defaultSettingsMiddleware: () => defaultSettingsMiddleware,
77
78
  embed: () => embed,
78
79
  embedMany: () => embedMany,
@@ -93,7 +94,6 @@ __export(src_exports, {
93
94
  isAssistantMessageWithCompletedToolCalls: () => isAssistantMessageWithCompletedToolCalls,
94
95
  isDeepEqualData: () => isDeepEqualData,
95
96
  jsonSchema: () => import_provider_utils26.jsonSchema,
96
- maxSteps: () => maxSteps,
97
97
  modelMessageSchema: () => modelMessageSchema,
98
98
  parsePartialJson: () => parsePartialJson,
99
99
  pipeTextStreamToResponse: () => pipeTextStreamToResponse,
@@ -102,6 +102,7 @@ __export(src_exports, {
102
102
  simulateReadableStream: () => simulateReadableStream,
103
103
  simulateStreamingMiddleware: () => simulateStreamingMiddleware,
104
104
  smoothStream: () => smoothStream,
105
+ stepCountIs: () => stepCountIs,
105
106
  streamObject: () => streamObject,
106
107
  streamText: () => streamText,
107
108
  systemModelMessageSchema: () => systemModelMessageSchema,
@@ -1048,12 +1049,12 @@ function getToolInvocations(message) {
1048
1049
  // src/ui/process-ui-message-stream.ts
1049
1050
  function createStreamingUIMessageState({
1050
1051
  lastMessage,
1051
- newMessageId = "no-id"
1052
+ newMessageId = ""
1052
1053
  } = {}) {
1053
1054
  var _a17;
1054
1055
  const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
1055
1056
  const step = isContinuation ? 1 + ((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) : 0;
1056
- const message = isContinuation ? structuredClone(lastMessage) : {
1057
+ const message = isContinuation ? lastMessage : {
1057
1058
  id: newMessageId,
1058
1059
  metadata: {},
1059
1060
  role: "assistant",
@@ -1433,7 +1434,7 @@ async function consumeUIMessageStream({
1433
1434
  messageMetadataSchema
1434
1435
  }) {
1435
1436
  const state = createStreamingUIMessageState({
1436
- lastMessage,
1437
+ lastMessage: lastMessage ? structuredClone(lastMessage) : void 0,
1437
1438
  newMessageId: generateId3()
1438
1439
  });
1439
1440
  const runUpdateMessageJob = async (job) => {
@@ -1634,54 +1635,22 @@ async function callCompletionApi({
1634
1635
  // src/ui/chat-store.ts
1635
1636
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
1636
1637
 
1637
- // src/util/serial-job-executor.ts
1638
- var SerialJobExecutor = class {
1639
- constructor() {
1640
- this.queue = [];
1641
- this.isProcessing = false;
1642
- }
1643
- async processQueue() {
1644
- if (this.isProcessing) {
1645
- return;
1646
- }
1647
- this.isProcessing = true;
1648
- while (this.queue.length > 0) {
1649
- await this.queue[0]();
1650
- this.queue.shift();
1651
- }
1652
- this.isProcessing = false;
1653
- }
1654
- async run(job) {
1655
- return new Promise((resolve, reject) => {
1656
- this.queue.push(async () => {
1657
- try {
1658
- await job();
1659
- resolve();
1660
- } catch (error) {
1661
- reject(error);
1662
- }
1663
- });
1664
- void this.processQueue();
1665
- });
1666
- }
1667
- };
1668
-
1669
1638
  // src/ui/should-resubmit-messages.ts
1670
1639
  function shouldResubmitMessages({
1671
1640
  originalMaxToolInvocationStep,
1672
1641
  originalMessageCount,
1673
- maxSteps: maxSteps2,
1642
+ maxSteps,
1674
1643
  messages
1675
1644
  }) {
1676
1645
  var _a17;
1677
1646
  const lastMessage = messages[messages.length - 1];
1678
1647
  return (
1679
1648
  // check if the feature is enabled:
1680
- maxSteps2 > 1 && // ensure there is a last message:
1649
+ maxSteps > 1 && // ensure there is a last message:
1681
1650
  lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
1682
1651
  (messages.length > originalMessageCount || extractMaxToolInvocationStep(getToolInvocations(lastMessage)) !== originalMaxToolInvocationStep) && // check that next step is possible:
1683
1652
  isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
1684
- ((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) < maxSteps2
1653
+ ((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) < maxSteps
1685
1654
  );
1686
1655
  }
1687
1656
  function isAssistantMessageWithCompletedToolCalls(message) {
@@ -1721,23 +1690,19 @@ var ChatStore = class {
1721
1690
  chats = {},
1722
1691
  generateId: generateId3,
1723
1692
  transport,
1724
- maxSteps: maxSteps2 = 1,
1693
+ maxSteps = 1,
1725
1694
  messageMetadataSchema,
1726
- dataPartSchemas
1695
+ dataPartSchemas,
1696
+ createChat
1727
1697
  }) {
1698
+ this.createChat = createChat;
1728
1699
  this.chats = new Map(
1729
- Object.entries(chats).map(([id, state]) => [
1700
+ Object.entries(chats).map(([id, chat]) => [
1730
1701
  id,
1731
- {
1732
- messages: [...state.messages],
1733
- status: "ready",
1734
- activeResponse: void 0,
1735
- error: void 0,
1736
- jobExecutor: new SerialJobExecutor()
1737
- }
1702
+ this.createChat({ messages: chat.messages })
1738
1703
  ])
1739
1704
  );
1740
- this.maxSteps = maxSteps2;
1705
+ this.maxSteps = maxSteps;
1741
1706
  this.transport = transport;
1742
1707
  this.subscribers = /* @__PURE__ */ new Set();
1743
1708
  this.generateId = generateId3 != null ? generateId3 : import_provider_utils5.generateId;
@@ -1748,11 +1713,7 @@ var ChatStore = class {
1748
1713
  return this.chats.has(id);
1749
1714
  }
1750
1715
  addChat(id, messages) {
1751
- this.chats.set(id, {
1752
- messages,
1753
- status: "ready",
1754
- jobExecutor: new SerialJobExecutor()
1755
- });
1716
+ this.chats.set(id, this.createChat({ messages }));
1756
1717
  }
1757
1718
  getChats() {
1758
1719
  return Array.from(this.chats.entries());
@@ -1761,28 +1722,28 @@ var ChatStore = class {
1761
1722
  return this.chats.size;
1762
1723
  }
1763
1724
  getStatus(id) {
1764
- return this.getChat(id).status;
1725
+ return this.getChatState(id).status;
1765
1726
  }
1766
1727
  setStatus({
1767
1728
  id,
1768
1729
  status,
1769
1730
  error
1770
1731
  }) {
1771
- const chat = this.getChat(id);
1772
- if (chat.status === status)
1732
+ const state = this.getChatState(id);
1733
+ if (state.status === status)
1773
1734
  return;
1774
- chat.status = status;
1775
- chat.error = error;
1735
+ state.setStatus(status);
1736
+ state.setError(error);
1776
1737
  this.emit({ type: "chat-status-changed", chatId: id, error });
1777
1738
  }
1778
1739
  getError(id) {
1779
- return this.getChat(id).error;
1740
+ return this.getChatState(id).error;
1780
1741
  }
1781
1742
  getMessages(id) {
1782
- return this.getChat(id).messages;
1743
+ return this.getChatState(id).messages;
1783
1744
  }
1784
1745
  getLastMessage(id) {
1785
- const chat = this.getChat(id);
1746
+ const chat = this.getChatState(id);
1786
1747
  return chat.messages[chat.messages.length - 1];
1787
1748
  }
1788
1749
  subscribe(subscriber) {
@@ -1793,11 +1754,11 @@ var ChatStore = class {
1793
1754
  id,
1794
1755
  messages
1795
1756
  }) {
1796
- this.getChat(id).messages = [...messages];
1757
+ this.getChatState(id).setMessages(messages);
1797
1758
  this.emit({ type: "chat-messages-changed", chatId: id });
1798
1759
  }
1799
1760
  removeAssistantResponse(id) {
1800
- const chat = this.getChat(id);
1761
+ const chat = this.getChatState(id);
1801
1762
  const lastMessage = chat.messages[chat.messages.length - 1];
1802
1763
  if (lastMessage == null) {
1803
1764
  throw new Error("Cannot remove assistant response from empty chat");
@@ -1805,7 +1766,8 @@ var ChatStore = class {
1805
1766
  if (lastMessage.role !== "assistant") {
1806
1767
  throw new Error("Last message is not an assistant message");
1807
1768
  }
1808
- this.setMessages({ id, messages: chat.messages.slice(0, -1) });
1769
+ chat.popMessage();
1770
+ this.emit({ type: "chat-messages-changed", chatId: id });
1809
1771
  }
1810
1772
  async submitMessage({
1811
1773
  chatId,
@@ -1817,14 +1779,14 @@ var ChatStore = class {
1817
1779
  onFinish
1818
1780
  }) {
1819
1781
  var _a17;
1820
- const chat = this.getChat(chatId);
1821
- const currentMessages = chat.messages;
1782
+ const state = this.getChatState(chatId);
1783
+ state.pushMessage({ ...message, id: (_a17 = message.id) != null ? _a17 : this.generateId() });
1784
+ this.emit({
1785
+ type: "chat-messages-changed",
1786
+ chatId
1787
+ });
1822
1788
  await this.triggerRequest({
1823
1789
  chatId,
1824
- messages: currentMessages.concat({
1825
- ...message,
1826
- id: (_a17 = message.id) != null ? _a17 : this.generateId()
1827
- }),
1828
1790
  headers,
1829
1791
  body,
1830
1792
  requestType: "generate",
@@ -1841,15 +1803,20 @@ var ChatStore = class {
1841
1803
  onToolCall,
1842
1804
  onFinish
1843
1805
  }) {
1844
- const messages = this.getChat(chatId).messages;
1845
- const messagesToSubmit = messages[messages.length - 1].role === "assistant" ? messages.slice(0, -1) : messages;
1846
- if (messagesToSubmit.length === 0) {
1806
+ const chat = this.getChatState(chatId);
1807
+ if (chat.messages[chat.messages.length - 1].role === "assistant") {
1808
+ chat.popMessage();
1809
+ this.emit({
1810
+ type: "chat-messages-changed",
1811
+ chatId
1812
+ });
1813
+ }
1814
+ if (chat.messages.length === 0) {
1847
1815
  return;
1848
1816
  }
1849
1817
  return this.triggerRequest({
1850
1818
  chatId,
1851
1819
  requestType: "generate",
1852
- messages: messagesToSubmit,
1853
1820
  headers,
1854
1821
  body,
1855
1822
  onError,
@@ -1865,11 +1832,8 @@ var ChatStore = class {
1865
1832
  onToolCall,
1866
1833
  onFinish
1867
1834
  }) {
1868
- const chat = this.getChat(chatId);
1869
- const currentMessages = chat.messages;
1870
1835
  return this.triggerRequest({
1871
1836
  chatId,
1872
- messages: currentMessages,
1873
1837
  requestType: "resume",
1874
1838
  headers,
1875
1839
  body,
@@ -1883,22 +1847,23 @@ var ChatStore = class {
1883
1847
  toolCallId,
1884
1848
  result
1885
1849
  }) {
1886
- const chat = this.getChat(chatId);
1850
+ const chat = this.getChatState(chatId);
1887
1851
  chat.jobExecutor.run(async () => {
1888
- const currentMessages = chat.messages;
1889
1852
  updateToolCallResult({
1890
- messages: currentMessages,
1853
+ messages: chat.messages,
1891
1854
  toolCallId,
1892
1855
  toolResult: result
1893
1856
  });
1894
- this.setMessages({ id: chatId, messages: currentMessages });
1857
+ this.setMessages({
1858
+ id: chatId,
1859
+ messages: chat.messages
1860
+ });
1895
1861
  if (chat.status === "submitted" || chat.status === "streaming") {
1896
1862
  return;
1897
1863
  }
1898
- const lastMessage = currentMessages[currentMessages.length - 1];
1864
+ const lastMessage = chat.messages[chat.messages.length - 1];
1899
1865
  if (isAssistantMessageWithCompletedToolCalls(lastMessage)) {
1900
- await this.triggerRequest({
1901
- messages: currentMessages,
1866
+ this.triggerRequest({
1902
1867
  requestType: "generate",
1903
1868
  chatId
1904
1869
  });
@@ -1907,7 +1872,7 @@ var ChatStore = class {
1907
1872
  }
1908
1873
  async stopStream({ chatId }) {
1909
1874
  var _a17;
1910
- const chat = this.getChat(chatId);
1875
+ const chat = this.getChatState(chatId);
1911
1876
  if (chat.status !== "streaming" && chat.status !== "submitted")
1912
1877
  return;
1913
1878
  if ((_a17 = chat.activeResponse) == null ? void 0 : _a17.abortController) {
@@ -1920,15 +1885,14 @@ var ChatStore = class {
1920
1885
  subscriber.onChatChanged(event);
1921
1886
  }
1922
1887
  }
1923
- getChat(id) {
1888
+ getChatState(id) {
1924
1889
  if (!this.hasChat(id)) {
1925
- throw new Error(`chat '${id}' not found`);
1890
+ this.addChat(id, []);
1926
1891
  }
1927
1892
  return this.chats.get(id);
1928
1893
  }
1929
1894
  async triggerRequest({
1930
1895
  chatId,
1931
- messages: chatMessages,
1932
1896
  requestType,
1933
1897
  headers,
1934
1898
  body,
@@ -1936,26 +1900,25 @@ var ChatStore = class {
1936
1900
  onToolCall,
1937
1901
  onFinish
1938
1902
  }) {
1939
- const self = this;
1940
- const chat = this.getChat(chatId);
1941
- this.setMessages({ id: chatId, messages: chatMessages });
1903
+ const chat = this.getChatState(chatId);
1942
1904
  this.setStatus({ id: chatId, status: "submitted", error: void 0 });
1943
- const messageCount = chatMessages.length;
1905
+ const messageCount = chat.messages.length;
1944
1906
  const maxStep = extractMaxToolInvocationStep(
1945
- getToolInvocations(chatMessages[chatMessages.length - 1])
1907
+ getToolInvocations(chat.messages[chat.messages.length - 1])
1946
1908
  );
1947
1909
  try {
1910
+ const lastMessage = chat.messages[chat.messages.length - 1];
1948
1911
  const activeResponse = {
1949
1912
  state: createStreamingUIMessageState({
1950
- lastMessage: chatMessages[chatMessages.length - 1],
1951
- newMessageId: self.generateId()
1913
+ lastMessage: chat.snapshot ? chat.snapshot(lastMessage) : lastMessage,
1914
+ newMessageId: this.generateId()
1952
1915
  }),
1953
1916
  abortController: new AbortController()
1954
1917
  };
1955
- chat.activeResponse = activeResponse;
1956
- const stream = await self.transport.submitMessages({
1918
+ chat.setActiveResponse(activeResponse);
1919
+ const stream = await this.transport.submitMessages({
1957
1920
  chatId,
1958
- messages: chatMessages,
1921
+ messages: chat.messages,
1959
1922
  body,
1960
1923
  headers,
1961
1924
  abortController: activeResponse.abortController,
@@ -1967,15 +1930,19 @@ var ChatStore = class {
1967
1930
  () => job({
1968
1931
  state: activeResponse.state,
1969
1932
  write: () => {
1970
- self.setStatus({ id: chatId, status: "streaming" });
1971
- const replaceLastMessage = activeResponse.state.message.id === chatMessages[chatMessages.length - 1].id;
1972
- const newMessages = [
1973
- ...replaceLastMessage ? chatMessages.slice(0, chatMessages.length - 1) : chatMessages,
1974
- activeResponse.state.message
1975
- ];
1976
- self.setMessages({
1977
- id: chatId,
1978
- messages: newMessages
1933
+ this.setStatus({ id: chatId, status: "streaming" });
1934
+ const replaceLastMessage = activeResponse.state.message.id === chat.messages[chat.messages.length - 1].id;
1935
+ if (replaceLastMessage) {
1936
+ chat.replaceMessage(
1937
+ chat.messages.length - 1,
1938
+ activeResponse.state.message
1939
+ );
1940
+ } else {
1941
+ chat.pushMessage(activeResponse.state.message);
1942
+ }
1943
+ this.emit({
1944
+ type: "chat-messages-changed",
1945
+ chatId
1979
1946
  });
1980
1947
  }
1981
1948
  })
@@ -1985,8 +1952,8 @@ var ChatStore = class {
1985
1952
  stream: processUIMessageStream({
1986
1953
  stream,
1987
1954
  onToolCall,
1988
- messageMetadataSchema: self.messageMetadataSchema,
1989
- dataPartSchemas: self.dataPartSchemas,
1955
+ messageMetadataSchema: this.messageMetadataSchema,
1956
+ dataPartSchemas: this.dataPartSchemas,
1990
1957
  runUpdateMessageJob
1991
1958
  }),
1992
1959
  onError: (error) => {
@@ -2005,24 +1972,22 @@ var ChatStore = class {
2005
1972
  }
2006
1973
  this.setStatus({ id: chatId, status: "error", error: err });
2007
1974
  } finally {
2008
- chat.activeResponse = void 0;
1975
+ chat.setActiveResponse(void 0);
2009
1976
  }
2010
- const currentMessages = self.getMessages(chatId);
2011
1977
  if (shouldResubmitMessages({
2012
1978
  originalMaxToolInvocationStep: maxStep,
2013
1979
  originalMessageCount: messageCount,
2014
- maxSteps: self.maxSteps,
2015
- messages: currentMessages
1980
+ maxSteps: this.maxSteps,
1981
+ messages: chat.messages
2016
1982
  })) {
2017
- await self.triggerRequest({
1983
+ await this.triggerRequest({
2018
1984
  chatId,
2019
1985
  requestType,
2020
1986
  onError,
2021
1987
  onToolCall,
2022
1988
  onFinish,
2023
1989
  headers,
2024
- body,
2025
- messages: currentMessages
1990
+ body
2026
1991
  });
2027
1992
  }
2028
1993
  }
@@ -2314,22 +2279,22 @@ function convertToModelMessages(messages, options) {
2314
2279
  }
2315
2280
  var convertToCoreMessages = convertToModelMessages;
2316
2281
 
2317
- // src/ui/default-chat-store.ts
2282
+ // src/ui/default-chat-store-options.ts
2318
2283
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
2319
- function defaultChatStore({
2320
- api,
2284
+ function defaultChatStoreOptions({
2285
+ api = "/api/chat",
2321
2286
  fetch: fetch2,
2322
2287
  credentials,
2323
2288
  headers,
2324
2289
  body,
2325
2290
  prepareRequestBody,
2326
2291
  generateId: generateId3 = import_provider_utils6.generateId,
2327
- dataPartSchemas,
2328
2292
  messageMetadataSchema,
2329
- maxSteps: maxSteps2 = 1,
2293
+ maxSteps = 1,
2294
+ dataPartSchemas,
2330
2295
  chats
2331
2296
  }) {
2332
- return new ChatStore({
2297
+ return () => ({
2333
2298
  transport: new DefaultChatTransport({
2334
2299
  api,
2335
2300
  fetch: fetch2,
@@ -2341,16 +2306,62 @@ function defaultChatStore({
2341
2306
  generateId: generateId3,
2342
2307
  messageMetadataSchema,
2343
2308
  dataPartSchemas,
2344
- maxSteps: maxSteps2,
2309
+ maxSteps,
2345
2310
  chats
2346
2311
  });
2347
2312
  }
2348
2313
 
2314
+ // src/ui-message-stream/handle-ui-message-stream-finish.ts
2315
+ function handleUIMessageStreamFinish({
2316
+ newMessageId,
2317
+ originalMessages = [],
2318
+ onFinish,
2319
+ stream
2320
+ }) {
2321
+ if (onFinish == null) {
2322
+ return stream;
2323
+ }
2324
+ const lastMessage = originalMessages[originalMessages.length - 1];
2325
+ const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
2326
+ const messageId = isContinuation ? lastMessage.id : newMessageId;
2327
+ const state = createStreamingUIMessageState({
2328
+ lastMessage: structuredClone(lastMessage),
2329
+ newMessageId: messageId
2330
+ });
2331
+ const runUpdateMessageJob = async (job) => {
2332
+ await job({ state, write: () => {
2333
+ } });
2334
+ };
2335
+ return processUIMessageStream({
2336
+ stream,
2337
+ runUpdateMessageJob
2338
+ }).pipeThrough(
2339
+ new TransformStream({
2340
+ transform(chunk, controller) {
2341
+ controller.enqueue(chunk);
2342
+ },
2343
+ flush() {
2344
+ const isContinuation2 = state.message.id === (lastMessage == null ? void 0 : lastMessage.id);
2345
+ onFinish({
2346
+ isContinuation: isContinuation2,
2347
+ responseMessage: state.message,
2348
+ messages: [
2349
+ ...isContinuation2 ? originalMessages.slice(0, -1) : originalMessages,
2350
+ state.message
2351
+ ]
2352
+ });
2353
+ }
2354
+ })
2355
+ );
2356
+ }
2357
+
2349
2358
  // src/ui-message-stream/create-ui-message-stream.ts
2350
2359
  function createUIMessageStream({
2351
2360
  execute,
2352
- onError = () => "An error occurred."
2361
+ onError = () => "An error occurred.",
2353
2362
  // mask error messages for safety by default
2363
+ originalMessages,
2364
+ onFinish
2354
2365
  }) {
2355
2366
  let controller;
2356
2367
  const ongoingStreamPromises = [];
@@ -2367,25 +2378,27 @@ function createUIMessageStream({
2367
2378
  }
2368
2379
  try {
2369
2380
  const result = execute({
2370
- write(part) {
2371
- safeEnqueue(part);
2372
- },
2373
- merge(streamArg) {
2374
- ongoingStreamPromises.push(
2375
- (async () => {
2376
- const reader = streamArg.getReader();
2377
- while (true) {
2378
- const { done, value } = await reader.read();
2379
- if (done)
2380
- break;
2381
- safeEnqueue(value);
2382
- }
2383
- })().catch((error) => {
2384
- safeEnqueue({ type: "error", errorText: onError(error) });
2385
- })
2386
- );
2387
- },
2388
- onError
2381
+ writer: {
2382
+ write(part) {
2383
+ safeEnqueue(part);
2384
+ },
2385
+ merge(streamArg) {
2386
+ ongoingStreamPromises.push(
2387
+ (async () => {
2388
+ const reader = streamArg.getReader();
2389
+ while (true) {
2390
+ const { done, value } = await reader.read();
2391
+ if (done)
2392
+ break;
2393
+ safeEnqueue(value);
2394
+ }
2395
+ })().catch((error) => {
2396
+ safeEnqueue({ type: "error", errorText: onError(error) });
2397
+ })
2398
+ );
2399
+ },
2400
+ onError
2401
+ }
2389
2402
  });
2390
2403
  if (result) {
2391
2404
  ongoingStreamPromises.push(
@@ -2409,7 +2422,12 @@ function createUIMessageStream({
2409
2422
  } catch (error) {
2410
2423
  }
2411
2424
  });
2412
- return stream;
2425
+ return handleUIMessageStreamFinish({
2426
+ stream,
2427
+ newMessageId: "",
2428
+ originalMessages,
2429
+ onFinish
2430
+ });
2413
2431
  }
2414
2432
 
2415
2433
  // src/ui-message-stream/ui-message-stream-headers.ts
@@ -2474,6 +2492,32 @@ function pipeUIMessageStreamToResponse({
2474
2492
  });
2475
2493
  }
2476
2494
 
2495
+ // src/util/cosine-similarity.ts
2496
+ function cosineSimilarity(vector1, vector2) {
2497
+ if (vector1.length !== vector2.length) {
2498
+ throw new InvalidArgumentError({
2499
+ parameter: "vector1,vector2",
2500
+ value: { vector1Length: vector1.length, vector2Length: vector2.length },
2501
+ message: `Vectors must have the same length`
2502
+ });
2503
+ }
2504
+ const n = vector1.length;
2505
+ if (n === 0) {
2506
+ return 0;
2507
+ }
2508
+ let magnitudeSquared1 = 0;
2509
+ let magnitudeSquared2 = 0;
2510
+ let dotProduct = 0;
2511
+ for (let i = 0; i < n; i++) {
2512
+ const value1 = vector1[i];
2513
+ const value2 = vector2[i];
2514
+ magnitudeSquared1 += value1 * value1;
2515
+ magnitudeSquared2 += value2 * value2;
2516
+ dotProduct += value1 * value2;
2517
+ }
2518
+ return magnitudeSquared1 === 0 || magnitudeSquared2 === 0 ? 0 : dotProduct / (Math.sqrt(magnitudeSquared1) * Math.sqrt(magnitudeSquared2));
2519
+ }
2520
+
2477
2521
  // src/util/data-url.ts
2478
2522
  function getTextFromDataUrl(dataUrl) {
2479
2523
  const [header, base64Content] = dataUrl.split(",");
@@ -2523,31 +2567,37 @@ function isDeepEqualData(obj1, obj2) {
2523
2567
  return true;
2524
2568
  }
2525
2569
 
2526
- // src/util/cosine-similarity.ts
2527
- function cosineSimilarity(vector1, vector2) {
2528
- if (vector1.length !== vector2.length) {
2529
- throw new InvalidArgumentError({
2530
- parameter: "vector1,vector2",
2531
- value: { vector1Length: vector1.length, vector2Length: vector2.length },
2532
- message: `Vectors must have the same length`
2533
- });
2570
+ // src/util/serial-job-executor.ts
2571
+ var SerialJobExecutor = class {
2572
+ constructor() {
2573
+ this.queue = [];
2574
+ this.isProcessing = false;
2534
2575
  }
2535
- const n = vector1.length;
2536
- if (n === 0) {
2537
- return 0;
2576
+ async processQueue() {
2577
+ if (this.isProcessing) {
2578
+ return;
2579
+ }
2580
+ this.isProcessing = true;
2581
+ while (this.queue.length > 0) {
2582
+ await this.queue[0]();
2583
+ this.queue.shift();
2584
+ }
2585
+ this.isProcessing = false;
2538
2586
  }
2539
- let magnitudeSquared1 = 0;
2540
- let magnitudeSquared2 = 0;
2541
- let dotProduct = 0;
2542
- for (let i = 0; i < n; i++) {
2543
- const value1 = vector1[i];
2544
- const value2 = vector2[i];
2545
- magnitudeSquared1 += value1 * value1;
2546
- magnitudeSquared2 += value2 * value2;
2547
- dotProduct += value1 * value2;
2587
+ async run(job) {
2588
+ return new Promise((resolve, reject) => {
2589
+ this.queue.push(async () => {
2590
+ try {
2591
+ await job();
2592
+ resolve();
2593
+ } catch (error) {
2594
+ reject(error);
2595
+ }
2596
+ });
2597
+ void this.processQueue();
2598
+ });
2548
2599
  }
2549
- return magnitudeSquared1 === 0 || magnitudeSquared2 === 0 ? 0 : dotProduct / (Math.sqrt(magnitudeSquared1) * Math.sqrt(magnitudeSquared2));
2550
- }
2600
+ };
2551
2601
 
2552
2602
  // src/util/simulate-readable-stream.ts
2553
2603
  var import_provider_utils7 = require("@ai-sdk/provider-utils");
@@ -3541,6 +3591,15 @@ function convertToLanguageModelV2DataContent(content) {
3541
3591
  }
3542
3592
  return { data: content, mediaType: void 0 };
3543
3593
  }
3594
+ function convertDataContentToBase64String(content) {
3595
+ if (typeof content === "string") {
3596
+ return content;
3597
+ }
3598
+ if (content instanceof ArrayBuffer) {
3599
+ return (0, import_provider_utils11.convertUint8ArrayToBase64)(new Uint8Array(content));
3600
+ }
3601
+ return (0, import_provider_utils11.convertUint8ArrayToBase64)(content);
3602
+ }
3544
3603
  function convertDataContentToUint8Array(content) {
3545
3604
  if (content instanceof Uint8Array) {
3546
3605
  return content;
@@ -4058,6 +4117,21 @@ async function standardizePrompt(prompt) {
4058
4117
  };
4059
4118
  }
4060
4119
 
4120
+ // core/telemetry/stringify-for-telemetry.ts
4121
+ function stringifyForTelemetry(prompt) {
4122
+ return JSON.stringify(
4123
+ prompt.map((message) => ({
4124
+ ...message,
4125
+ content: typeof message.content === "string" ? message.content : message.content.map(
4126
+ (part) => part.type === "file" ? {
4127
+ ...part,
4128
+ data: part.data instanceof Uint8Array ? convertDataContentToBase64String(part.data) : part.data
4129
+ } : part
4130
+ )
4131
+ }))
4132
+ );
4133
+ }
4134
+
4061
4135
  // core/generate-object/output-strategy.ts
4062
4136
  var import_provider20 = require("@ai-sdk/provider");
4063
4137
  var import_provider_utils14 = require("@ai-sdk/provider-utils");
@@ -4550,7 +4624,7 @@ async function generateObject(options) {
4550
4624
  }),
4551
4625
  ...baseTelemetryAttributes,
4552
4626
  "ai.prompt.messages": {
4553
- input: () => JSON.stringify(promptMessages)
4627
+ input: () => stringifyForTelemetry(promptMessages)
4554
4628
  },
4555
4629
  // standardized gen-ai llm span attributes:
4556
4630
  "gen_ai.system": model.provider,
@@ -5052,7 +5126,7 @@ var DefaultStreamObjectResult = class {
5052
5126
  }),
5053
5127
  ...baseTelemetryAttributes,
5054
5128
  "ai.prompt.messages": {
5055
- input: () => JSON.stringify(callOptions.prompt)
5129
+ input: () => stringifyForTelemetry(callOptions.prompt)
5056
5130
  },
5057
5131
  // standardized gen-ai llm span attributes:
5058
5132
  "gen_ai.system": model.provider,
@@ -5465,6 +5539,11 @@ var DefaultSpeechResult = class {
5465
5539
  // core/generate-text/generate-text.ts
5466
5540
  var import_provider_utils19 = require("@ai-sdk/provider-utils");
5467
5541
 
5542
+ // src/util/as-array.ts
5543
+ function asArray(value) {
5544
+ return value === void 0 ? [] : Array.isArray(value) ? value : [value];
5545
+ }
5546
+
5468
5547
  // core/prompt/prepare-tools-and-tool-choice.ts
5469
5548
  var import_provider_utils17 = require("@ai-sdk/provider-utils");
5470
5549
 
@@ -5682,8 +5761,8 @@ var DefaultStepResult = class {
5682
5761
  };
5683
5762
 
5684
5763
  // core/generate-text/stop-condition.ts
5685
- function maxSteps(maxSteps2) {
5686
- return ({ steps }) => steps.length >= maxSteps2;
5764
+ function stepCountIs(stepCount) {
5765
+ return ({ steps }) => steps.length === stepCount;
5687
5766
  }
5688
5767
  function hasToolCall(toolName) {
5689
5768
  return ({ steps }) => {
@@ -5693,6 +5772,12 @@ function hasToolCall(toolName) {
5693
5772
  )) != null ? _c : false;
5694
5773
  };
5695
5774
  }
5775
+ async function isStopConditionMet({
5776
+ stopConditions,
5777
+ steps
5778
+ }) {
5779
+ return (await Promise.all(stopConditions.map((condition) => condition({ steps })))).some((result) => result);
5780
+ }
5696
5781
 
5697
5782
  // core/generate-text/to-response-messages.ts
5698
5783
  function toResponseMessages({
@@ -5767,12 +5852,14 @@ async function generateText({
5767
5852
  maxRetries: maxRetriesArg,
5768
5853
  abortSignal,
5769
5854
  headers,
5770
- continueUntil = maxSteps(1),
5855
+ stopWhen = stepCountIs(1),
5771
5856
  experimental_output: output,
5772
5857
  experimental_telemetry: telemetry,
5773
5858
  providerOptions,
5774
- experimental_activeTools: activeTools,
5775
- experimental_prepareStep: prepareStep,
5859
+ experimental_activeTools,
5860
+ activeTools = experimental_activeTools,
5861
+ experimental_prepareStep,
5862
+ prepareStep = experimental_prepareStep,
5776
5863
  experimental_repairToolCall: repairToolCall,
5777
5864
  _internal: {
5778
5865
  generateId: generateId3 = originalGenerateId3,
@@ -5781,6 +5868,7 @@ async function generateText({
5781
5868
  onStepFinish,
5782
5869
  ...settings
5783
5870
  }) {
5871
+ const stopConditions = asArray(stopWhen);
5784
5872
  const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
5785
5873
  const callSettings = prepareCallSettings(settings);
5786
5874
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
@@ -5844,7 +5932,7 @@ async function generateText({
5844
5932
  const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
5845
5933
  tools,
5846
5934
  toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
5847
- activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.experimental_activeTools) != null ? _c : activeTools
5935
+ activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _c : activeTools
5848
5936
  });
5849
5937
  currentModelResponse = await retry(
5850
5938
  () => {
@@ -5864,7 +5952,7 @@ async function generateText({
5864
5952
  "ai.model.id": stepModel.modelId,
5865
5953
  // prompt:
5866
5954
  "ai.prompt.messages": {
5867
- input: () => JSON.stringify(promptMessages)
5955
+ input: () => stringifyForTelemetry(promptMessages)
5868
5956
  },
5869
5957
  "ai.prompt.tools": {
5870
5958
  // convert the language model level tools:
@@ -5989,8 +6077,8 @@ async function generateText({
5989
6077
  } while (
5990
6078
  // there are tool calls:
5991
6079
  currentToolCalls.length > 0 && // all current tool calls have results:
5992
- currentToolResults.length === currentToolCalls.length && // continue until the stop condition is met:
5993
- !await continueUntil({ steps })
6080
+ currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
6081
+ !await isStopConditionMet({ stopConditions, steps })
5994
6082
  );
5995
6083
  span.setAttributes(
5996
6084
  selectTelemetryAttributes({
@@ -6336,11 +6424,6 @@ function smoothStream({
6336
6424
  // core/generate-text/stream-text.ts
6337
6425
  var import_provider_utils23 = require("@ai-sdk/provider-utils");
6338
6426
 
6339
- // src/util/as-array.ts
6340
- function asArray(value) {
6341
- return value === void 0 ? [] : Array.isArray(value) ? value : [value];
6342
- }
6343
-
6344
6427
  // core/generate-text/run-tools-transformation.ts
6345
6428
  var import_provider_utils22 = require("@ai-sdk/provider-utils");
6346
6429
  function runToolsTransformation({
@@ -6551,13 +6634,15 @@ function streamText({
6551
6634
  maxRetries,
6552
6635
  abortSignal,
6553
6636
  headers,
6554
- continueUntil = maxSteps(1),
6637
+ stopWhen = stepCountIs(1),
6555
6638
  experimental_output: output,
6556
6639
  experimental_telemetry: telemetry,
6640
+ prepareStep,
6557
6641
  providerOptions,
6558
6642
  experimental_toolCallStreaming = false,
6559
6643
  toolCallStreaming = experimental_toolCallStreaming,
6560
- experimental_activeTools: activeTools,
6644
+ experimental_activeTools,
6645
+ activeTools = experimental_activeTools,
6561
6646
  experimental_repairToolCall: repairToolCall,
6562
6647
  experimental_transform: transform,
6563
6648
  onChunk,
@@ -6587,9 +6672,10 @@ function streamText({
6587
6672
  transforms: asArray(transform),
6588
6673
  activeTools,
6589
6674
  repairToolCall,
6590
- continueUntil,
6675
+ stopConditions: asArray(stopWhen),
6591
6676
  output,
6592
6677
  providerOptions,
6678
+ prepareStep,
6593
6679
  onChunk,
6594
6680
  onError,
6595
6681
  onFinish,
@@ -6664,9 +6750,10 @@ var DefaultStreamTextResult = class {
6664
6750
  transforms,
6665
6751
  activeTools,
6666
6752
  repairToolCall,
6667
- continueUntil,
6753
+ stopConditions,
6668
6754
  output,
6669
6755
  providerOptions,
6756
+ prepareStep,
6670
6757
  now: now2,
6671
6758
  currentDate,
6672
6759
  generateId: generateId3,
@@ -6885,6 +6972,7 @@ var DefaultStreamTextResult = class {
6885
6972
  responseMessages,
6886
6973
  usage
6887
6974
  }) {
6975
+ var _a17, _b, _c;
6888
6976
  stepFinish = new DelayedPromise();
6889
6977
  const initialPrompt = await standardizePrompt({
6890
6978
  system,
@@ -6895,6 +6983,11 @@ var DefaultStreamTextResult = class {
6895
6983
  ...initialPrompt.messages,
6896
6984
  ...responseMessages
6897
6985
  ];
6986
+ const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
6987
+ model,
6988
+ steps: recordedSteps,
6989
+ stepNumber: recordedSteps.length
6990
+ }));
6898
6991
  const promptMessages = await convertToLanguageModelPrompt({
6899
6992
  prompt: {
6900
6993
  system: initialPrompt.system,
@@ -6902,9 +6995,12 @@ var DefaultStreamTextResult = class {
6902
6995
  },
6903
6996
  supportedUrls: await model.supportedUrls
6904
6997
  });
6905
- const toolsAndToolChoice = {
6906
- ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
6907
- };
6998
+ const stepModel = (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model;
6999
+ const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
7000
+ tools,
7001
+ toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
7002
+ activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _c : activeTools
7003
+ });
6908
7004
  const {
6909
7005
  result: { stream: stream2, response, request },
6910
7006
  doStreamSpan,
@@ -6920,24 +7016,23 @@ var DefaultStreamTextResult = class {
6920
7016
  telemetry
6921
7017
  }),
6922
7018
  ...baseTelemetryAttributes,
7019
+ // model:
7020
+ "ai.model.provider": stepModel.provider,
7021
+ "ai.model.id": stepModel.modelId,
7022
+ // prompt:
6923
7023
  "ai.prompt.messages": {
6924
- input: () => JSON.stringify(promptMessages)
7024
+ input: () => stringifyForTelemetry(promptMessages)
6925
7025
  },
6926
7026
  "ai.prompt.tools": {
6927
7027
  // convert the language model level tools:
6928
- input: () => {
6929
- var _a17;
6930
- return (_a17 = toolsAndToolChoice.tools) == null ? void 0 : _a17.map(
6931
- (tool2) => JSON.stringify(tool2)
6932
- );
6933
- }
7028
+ input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
6934
7029
  },
6935
7030
  "ai.prompt.toolChoice": {
6936
- input: () => toolsAndToolChoice.toolChoice != null ? JSON.stringify(toolsAndToolChoice.toolChoice) : void 0
7031
+ input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
6937
7032
  },
6938
7033
  // standardized gen-ai llm span attributes:
6939
- "gen_ai.system": model.provider,
6940
- "gen_ai.request.model": model.modelId,
7034
+ "gen_ai.system": stepModel.provider,
7035
+ "gen_ai.request.model": stepModel.modelId,
6941
7036
  "gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
6942
7037
  "gen_ai.request.max_tokens": callSettings.maxOutputTokens,
6943
7038
  "gen_ai.request.presence_penalty": callSettings.presencePenalty,
@@ -6954,9 +7049,10 @@ var DefaultStreamTextResult = class {
6954
7049
  startTimestampMs: now2(),
6955
7050
  // get before the call
6956
7051
  doStreamSpan: doStreamSpan2,
6957
- result: await model.doStream({
7052
+ result: await stepModel.doStream({
6958
7053
  ...callSettings,
6959
- ...toolsAndToolChoice,
7054
+ tools: stepTools,
7055
+ toolChoice: stepToolChoice,
6960
7056
  responseFormat: output == null ? void 0 : output.responseFormat,
6961
7057
  prompt: promptMessages,
6962
7058
  providerOptions,
@@ -7009,7 +7105,7 @@ var DefaultStreamTextResult = class {
7009
7105
  streamWithToolResults.pipeThrough(
7010
7106
  new TransformStream({
7011
7107
  async transform(chunk, controller) {
7012
- var _a17, _b, _c, _d;
7108
+ var _a18, _b2, _c2, _d;
7013
7109
  if (chunk.type === "stream-start") {
7014
7110
  warnings = chunk.warnings;
7015
7111
  return;
@@ -7072,9 +7168,9 @@ var DefaultStreamTextResult = class {
7072
7168
  }
7073
7169
  case "response-metadata": {
7074
7170
  stepResponse = {
7075
- id: (_a17 = chunk.id) != null ? _a17 : stepResponse.id,
7076
- timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
7077
- modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
7171
+ id: (_a18 = chunk.id) != null ? _a18 : stepResponse.id,
7172
+ timestamp: (_b2 = chunk.timestamp) != null ? _b2 : stepResponse.timestamp,
7173
+ modelId: (_c2 = chunk.modelId) != null ? _c2 : stepResponse.modelId
7078
7174
  };
7079
7175
  break;
7080
7176
  }
@@ -7163,7 +7259,11 @@ var DefaultStreamTextResult = class {
7163
7259
  const combinedUsage = addLanguageModelUsage(usage, stepUsage);
7164
7260
  await stepFinish.promise;
7165
7261
  if (stepToolCalls.length > 0 && // all current tool calls have results:
7166
- stepToolResults.length === stepToolCalls.length && !await continueUntil({ steps: recordedSteps })) {
7262
+ stepToolResults.length === stepToolCalls.length && // continue until a stop condition is met:
7263
+ !await isStopConditionMet({
7264
+ stopConditions,
7265
+ steps: recordedSteps
7266
+ })) {
7167
7267
  responseMessages.push(
7168
7268
  ...toResponseMessages({
7169
7269
  content: stepContent,
@@ -7332,14 +7432,14 @@ var DefaultStreamTextResult = class {
7332
7432
  messageMetadata,
7333
7433
  sendReasoning = false,
7334
7434
  sendSources = false,
7335
- experimental_sendStart = true,
7336
- experimental_sendFinish = true,
7435
+ sendStart = true,
7436
+ sendFinish = true,
7337
7437
  onError = () => "An error occurred."
7338
7438
  // mask error messages for safety by default
7339
7439
  } = {}) {
7340
7440
  const lastMessage = originalMessages[originalMessages.length - 1];
7341
7441
  const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
7342
- const messageId = isContinuation ? lastMessage.id : newMessageId;
7442
+ const messageId = isContinuation ? lastMessage.id : newMessageId != null ? newMessageId : this.generateId();
7343
7443
  const baseStream = this.fullStream.pipeThrough(
7344
7444
  new TransformStream({
7345
7445
  transform: async (part, controller) => {
@@ -7445,7 +7545,7 @@ var DefaultStreamTextResult = class {
7445
7545
  break;
7446
7546
  }
7447
7547
  case "start": {
7448
- if (experimental_sendStart) {
7548
+ if (sendStart) {
7449
7549
  const metadata = messageMetadata == null ? void 0 : messageMetadata({ part });
7450
7550
  controller.enqueue({
7451
7551
  type: "start",
@@ -7456,7 +7556,7 @@ var DefaultStreamTextResult = class {
7456
7556
  break;
7457
7557
  }
7458
7558
  case "finish": {
7459
- if (experimental_sendFinish) {
7559
+ if (sendFinish) {
7460
7560
  const metadata = messageMetadata == null ? void 0 : messageMetadata({ part });
7461
7561
  controller.enqueue({
7462
7562
  type: "finish",
@@ -7473,38 +7573,12 @@ var DefaultStreamTextResult = class {
7473
7573
  }
7474
7574
  })
7475
7575
  );
7476
- if (onFinish == null) {
7477
- return baseStream;
7478
- }
7479
- const state = createStreamingUIMessageState({
7480
- lastMessage,
7481
- newMessageId: messageId != null ? messageId : this.generateId()
7482
- });
7483
- const runUpdateMessageJob = async (job) => {
7484
- await job({ state, write: () => {
7485
- } });
7486
- };
7487
- return processUIMessageStream({
7576
+ return handleUIMessageStreamFinish({
7488
7577
  stream: baseStream,
7489
- runUpdateMessageJob
7490
- }).pipeThrough(
7491
- new TransformStream({
7492
- transform(chunk, controller) {
7493
- controller.enqueue(chunk);
7494
- },
7495
- flush() {
7496
- const isContinuation2 = state.message.id === (lastMessage == null ? void 0 : lastMessage.id);
7497
- onFinish({
7498
- isContinuation: isContinuation2,
7499
- responseMessage: state.message,
7500
- messages: [
7501
- ...isContinuation2 ? originalMessages.slice(0, -1) : originalMessages,
7502
- state.message
7503
- ]
7504
- });
7505
- }
7506
- })
7507
- );
7578
+ newMessageId: messageId,
7579
+ originalMessages,
7580
+ onFinish
7581
+ });
7508
7582
  }
7509
7583
  pipeUIMessageStreamToResponse(response, {
7510
7584
  newMessageId,
@@ -7513,8 +7587,8 @@ var DefaultStreamTextResult = class {
7513
7587
  messageMetadata,
7514
7588
  sendReasoning,
7515
7589
  sendSources,
7516
- experimental_sendFinish,
7517
- experimental_sendStart,
7590
+ sendFinish,
7591
+ sendStart,
7518
7592
  onError,
7519
7593
  ...init
7520
7594
  } = {}) {
@@ -7527,8 +7601,8 @@ var DefaultStreamTextResult = class {
7527
7601
  messageMetadata,
7528
7602
  sendReasoning,
7529
7603
  sendSources,
7530
- experimental_sendFinish,
7531
- experimental_sendStart,
7604
+ sendFinish,
7605
+ sendStart,
7532
7606
  onError
7533
7607
  }),
7534
7608
  ...init
@@ -7548,8 +7622,8 @@ var DefaultStreamTextResult = class {
7548
7622
  messageMetadata,
7549
7623
  sendReasoning,
7550
7624
  sendSources,
7551
- experimental_sendFinish,
7552
- experimental_sendStart,
7625
+ sendFinish,
7626
+ sendStart,
7553
7627
  onError,
7554
7628
  ...init
7555
7629
  } = {}) {
@@ -7561,8 +7635,8 @@ var DefaultStreamTextResult = class {
7561
7635
  messageMetadata,
7562
7636
  sendReasoning,
7563
7637
  sendSources,
7564
- experimental_sendFinish,
7565
- experimental_sendStart,
7638
+ sendFinish,
7639
+ sendStart,
7566
7640
  onError
7567
7641
  }),
7568
7642
  ...init
@@ -8610,6 +8684,7 @@ var DefaultTranscriptionResult = class {
8610
8684
  NoSuchToolError,
8611
8685
  Output,
8612
8686
  RetryError,
8687
+ SerialJobExecutor,
8613
8688
  TextStreamChatTransport,
8614
8689
  ToolCallRepairError,
8615
8690
  ToolExecutionError,
@@ -8635,7 +8710,7 @@ var DefaultTranscriptionResult = class {
8635
8710
  createUIMessageStream,
8636
8711
  createUIMessageStreamResponse,
8637
8712
  customProvider,
8638
- defaultChatStore,
8713
+ defaultChatStoreOptions,
8639
8714
  defaultSettingsMiddleware,
8640
8715
  embed,
8641
8716
  embedMany,
@@ -8656,7 +8731,6 @@ var DefaultTranscriptionResult = class {
8656
8731
  isAssistantMessageWithCompletedToolCalls,
8657
8732
  isDeepEqualData,
8658
8733
  jsonSchema,
8659
- maxSteps,
8660
8734
  modelMessageSchema,
8661
8735
  parsePartialJson,
8662
8736
  pipeTextStreamToResponse,
@@ -8665,6 +8739,7 @@ var DefaultTranscriptionResult = class {
8665
8739
  simulateReadableStream,
8666
8740
  simulateStreamingMiddleware,
8667
8741
  smoothStream,
8742
+ stepCountIs,
8668
8743
  streamObject,
8669
8744
  streamText,
8670
8745
  systemModelMessageSchema,