@mastra/ai-sdk 1.0.0-beta.6 → 1.0.0-beta.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -3,6 +3,9 @@
3
3
  var server = require('@mastra/core/server');
4
4
  var ai = require('ai');
5
5
  var stream = require('@mastra/core/stream');
6
+ var agent = require('@mastra/core/agent');
7
+ var di = require('@mastra/core/di');
8
+ var processors = require('@mastra/core/processors');
6
9
 
7
10
  // src/chat-route.ts
8
11
 
@@ -22,6 +25,8 @@ var isMastraTextStreamChunk = (chunk) => {
22
25
  "source",
23
26
  "tool-input-start",
24
27
  "tool-input-delta",
28
+ "tool-call-approval",
29
+ "tool-call-suspended",
25
30
  "tool-call",
26
31
  "tool-result",
27
32
  "tool-error",
@@ -32,6 +37,8 @@ var isMastraTextStreamChunk = (chunk) => {
32
37
  "finish",
33
38
  "abort",
34
39
  "tool-input-end",
40
+ "object",
41
+ "tripwire",
35
42
  "raw"
36
43
  ].includes(chunk.type);
37
44
  };
@@ -57,6 +64,12 @@ var isWorkflowExecutionDataChunkType = (chunk) => {
57
64
  };
58
65
 
59
66
  // src/helpers.ts
67
+ function toAISDKFinishReason(reason) {
68
+ if (reason === "tripwire" || reason === "retry") {
69
+ return "other";
70
+ }
71
+ return reason;
72
+ }
60
73
  function convertMastraChunkToAISDKv5({
61
74
  chunk,
62
75
  mode = "stream"
@@ -81,7 +94,7 @@ function convertMastraChunkToAISDKv5({
81
94
  case "finish": {
82
95
  return {
83
96
  type: "finish",
84
- finishReason: chunk.payload.stepResult.reason,
97
+ finishReason: toAISDKFinishReason(chunk.payload.stepResult.reason),
85
98
  totalUsage: chunk.payload.output.usage
86
99
  };
87
100
  }
@@ -220,7 +233,7 @@ function convertMastraChunkToAISDKv5({
220
233
  ...rest2
221
234
  },
222
235
  usage: chunk.payload.output.usage,
223
- finishReason: chunk.payload.stepResult.reason,
236
+ finishReason: toAISDKFinishReason(chunk.payload.stepResult.reason),
224
237
  providerMetadata
225
238
  };
226
239
  }
@@ -281,7 +294,10 @@ function convertMastraChunkToAISDKv5({
281
294
  return {
282
295
  type: "data-tripwire",
283
296
  data: {
284
- tripwireReason: chunk.payload.tripwireReason
297
+ reason: chunk.payload.reason,
298
+ retry: chunk.payload.retry,
299
+ metadata: chunk.payload.metadata,
300
+ processorId: chunk.payload.processorId
285
301
  }
286
302
  };
287
303
  default:
@@ -1134,6 +1150,26 @@ function transformNetwork(payload, bufferedNetworks, isNested) {
1134
1150
  };
1135
1151
  }
1136
1152
  default: {
1153
+ if (isAgentExecutionDataChunkType(payload)) {
1154
+ if (!("data" in payload.payload)) {
1155
+ throw new Error(
1156
+ `UI Messages require a data property when using data- prefixed chunks
1157
+ ${JSON.stringify(payload)}`
1158
+ );
1159
+ }
1160
+ const { type, data } = payload.payload;
1161
+ return { type, data };
1162
+ }
1163
+ if (isWorkflowExecutionDataChunkType(payload)) {
1164
+ if (!("data" in payload.payload)) {
1165
+ throw new Error(
1166
+ `UI Messages require a data property when using data- prefixed chunks
1167
+ ${JSON.stringify(payload)}`
1168
+ );
1169
+ }
1170
+ const { type, data } = payload.payload;
1171
+ return { type, data };
1172
+ }
1137
1173
  if (payload.type.startsWith("agent-execution-event-")) {
1138
1174
  const stepId = payload.payload.runId;
1139
1175
  const current = bufferedNetworks.get(payload.runId);
@@ -1148,6 +1184,15 @@ function transformNetwork(payload, bufferedNetworks, isNested) {
1148
1184
  const { request, response, ...data } = result.data;
1149
1185
  step.task = data;
1150
1186
  }
1187
+ bufferedNetworks.set(payload.runId, current);
1188
+ return {
1189
+ type: isNested ? "data-tool-network" : "data-network",
1190
+ id: payload.runId,
1191
+ data: {
1192
+ ...current,
1193
+ status: "running"
1194
+ }
1195
+ };
1151
1196
  }
1152
1197
  if (payload.type.startsWith("workflow-execution-event-")) {
1153
1198
  const stepId = payload.payload.runId;
@@ -1166,6 +1211,15 @@ function transformNetwork(payload, bufferedNetworks, isNested) {
1166
1211
  step.task.id = data.name;
1167
1212
  }
1168
1213
  }
1214
+ bufferedNetworks.set(payload.runId, current);
1215
+ return {
1216
+ type: isNested ? "data-tool-network" : "data-network",
1217
+ id: payload.runId,
1218
+ data: {
1219
+ ...current,
1220
+ status: "running"
1221
+ }
1222
+ };
1169
1223
  }
1170
1224
  if (isDataChunkType(payload)) {
1171
1225
  if (!("data" in payload)) {
@@ -1177,26 +1231,6 @@ function transformNetwork(payload, bufferedNetworks, isNested) {
1177
1231
  const { type, data } = payload;
1178
1232
  return { type, data };
1179
1233
  }
1180
- if (isAgentExecutionDataChunkType(payload)) {
1181
- if (!("data" in payload.payload)) {
1182
- throw new Error(
1183
- `UI Messages require a data property when using data- prefixed chunks
1184
- ${JSON.stringify(payload)}`
1185
- );
1186
- }
1187
- const { type, data } = payload.payload;
1188
- return { type, data };
1189
- }
1190
- if (isWorkflowExecutionDataChunkType(payload)) {
1191
- if (!("data" in payload.payload)) {
1192
- throw new Error(
1193
- `UI Messages require a data property when using data- prefixed chunks
1194
- ${JSON.stringify(payload)}`
1195
- );
1196
- }
1197
- const { type, data } = payload.payload;
1198
- return { type, data };
1199
- }
1200
1234
  return null;
1201
1235
  }
1202
1236
  }
@@ -1653,6 +1687,455 @@ function networkRoute({
1653
1687
  }
1654
1688
  });
1655
1689
  }
1690
+ function withMastra(model, options = {}) {
1691
+ const { memory, inputProcessors = [], outputProcessors = [] } = options;
1692
+ const allInputProcessors = [...inputProcessors];
1693
+ const allOutputProcessors = [...outputProcessors];
1694
+ if (memory) {
1695
+ const { storage, lastMessages, semanticRecall, workingMemory } = memory;
1696
+ const isWorkingMemoryEnabled = typeof workingMemory === "object" && workingMemory.enabled !== false;
1697
+ if (isWorkingMemoryEnabled && typeof workingMemory === "object") {
1698
+ let template;
1699
+ if (workingMemory.template) {
1700
+ template = {
1701
+ format: "markdown",
1702
+ content: workingMemory.template
1703
+ };
1704
+ }
1705
+ const workingMemoryProcessor = new processors.WorkingMemory({
1706
+ storage,
1707
+ template,
1708
+ scope: workingMemory.scope,
1709
+ useVNext: "version" in workingMemory && workingMemory.version === "vnext"
1710
+ });
1711
+ allInputProcessors.push(workingMemoryProcessor);
1712
+ }
1713
+ if (lastMessages !== false && lastMessages !== void 0) {
1714
+ const messageHistory = new processors.MessageHistory({
1715
+ storage,
1716
+ lastMessages: typeof lastMessages === "number" ? lastMessages : void 0
1717
+ });
1718
+ allInputProcessors.push(messageHistory);
1719
+ allOutputProcessors.push(messageHistory);
1720
+ }
1721
+ if (semanticRecall) {
1722
+ const { vector, embedder, indexName, ...semanticConfig } = semanticRecall;
1723
+ const semanticRecallProcessor = new processors.SemanticRecall({
1724
+ storage,
1725
+ vector,
1726
+ embedder,
1727
+ indexName: indexName || "memory_messages",
1728
+ ...semanticConfig
1729
+ });
1730
+ allInputProcessors.push(semanticRecallProcessor);
1731
+ allOutputProcessors.push(semanticRecallProcessor);
1732
+ }
1733
+ }
1734
+ return ai.wrapLanguageModel({
1735
+ model,
1736
+ middleware: createProcessorMiddleware({
1737
+ inputProcessors: allInputProcessors,
1738
+ outputProcessors: allOutputProcessors,
1739
+ memory: memory ? {
1740
+ threadId: memory.threadId,
1741
+ resourceId: memory.resourceId
1742
+ } : void 0
1743
+ })
1744
+ });
1745
+ }
1746
+ function createProcessorMiddleware(options) {
1747
+ const { inputProcessors = [], outputProcessors = [], memory } = options;
1748
+ const requestContext = new di.RequestContext();
1749
+ if (memory) {
1750
+ requestContext.set("MastraMemory", {
1751
+ thread: memory.threadId ? { id: memory.threadId } : void 0,
1752
+ resourceId: memory.resourceId,
1753
+ memoryConfig: memory.config
1754
+ });
1755
+ }
1756
+ return {
1757
+ middlewareVersion: "v2",
1758
+ /**
1759
+ * Transform params runs input processors (processInput)
1760
+ */
1761
+ async transformParams({ params }) {
1762
+ const messageList = new agent.MessageList({
1763
+ threadId: memory?.threadId,
1764
+ resourceId: memory?.resourceId
1765
+ });
1766
+ for (const msg of params.prompt) {
1767
+ if (msg.role === "system") {
1768
+ messageList.addSystem(msg.content);
1769
+ } else {
1770
+ messageList.add(msg, "input");
1771
+ }
1772
+ }
1773
+ for (const processor of inputProcessors) {
1774
+ if (processor.processInput) {
1775
+ try {
1776
+ await processor.processInput({
1777
+ messages: messageList.get.input.db(),
1778
+ systemMessages: messageList.getAllSystemMessages(),
1779
+ messageList,
1780
+ requestContext,
1781
+ abort: (reason) => {
1782
+ throw new agent.TripWire(reason || "Aborted by processor");
1783
+ }
1784
+ });
1785
+ } catch (error) {
1786
+ if (error instanceof agent.TripWire) {
1787
+ return {
1788
+ ...params,
1789
+ providerOptions: {
1790
+ ...params.providerOptions,
1791
+ mastraProcessors: {
1792
+ tripwire: true,
1793
+ reason: error.message
1794
+ }
1795
+ }
1796
+ };
1797
+ }
1798
+ throw error;
1799
+ }
1800
+ }
1801
+ }
1802
+ const newPrompt = messageList.get.all.aiV5.prompt().map(agent.MessageList.aiV5ModelMessageToV2PromptMessage);
1803
+ return {
1804
+ ...params,
1805
+ prompt: newPrompt
1806
+ };
1807
+ },
1808
+ /**
1809
+ * Wrap generate for non-streaming output processing
1810
+ */
1811
+ async wrapGenerate({ doGenerate, params }) {
1812
+ const processorState = params.providerOptions?.mastraProcessors;
1813
+ if (processorState?.tripwire) {
1814
+ const reason = processorState.reason || "Blocked by processor";
1815
+ return {
1816
+ content: [{ type: "text", text: reason }],
1817
+ finishReason: "stop",
1818
+ usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
1819
+ warnings: [{ type: "other", message: `Tripwire: ${reason}` }]
1820
+ };
1821
+ }
1822
+ const result = await doGenerate();
1823
+ if (!outputProcessors.length) return result;
1824
+ const messageList = new agent.MessageList({
1825
+ threadId: memory?.threadId,
1826
+ resourceId: memory?.resourceId
1827
+ });
1828
+ for (const msg of params.prompt) {
1829
+ if (msg.role === "system") {
1830
+ messageList.addSystem(msg.content);
1831
+ } else {
1832
+ messageList.add(msg, "input");
1833
+ }
1834
+ }
1835
+ const textContent = result.content.filter((c) => c.type === "text").map((c) => c.text).join("");
1836
+ const responseMessage = {
1837
+ id: crypto.randomUUID(),
1838
+ role: "assistant",
1839
+ content: {
1840
+ format: 2,
1841
+ parts: [{ type: "text", text: textContent }]
1842
+ },
1843
+ createdAt: /* @__PURE__ */ new Date(),
1844
+ ...memory?.threadId && { threadId: memory.threadId },
1845
+ ...memory?.resourceId && { resourceId: memory.resourceId }
1846
+ };
1847
+ messageList.add(responseMessage, "response");
1848
+ for (const processor of outputProcessors) {
1849
+ if (processor.processOutputResult) {
1850
+ try {
1851
+ await processor.processOutputResult({
1852
+ messages: messageList.get.all.db(),
1853
+ messageList,
1854
+ requestContext,
1855
+ abort: (reason) => {
1856
+ throw new agent.TripWire(reason || "Aborted by processor");
1857
+ }
1858
+ });
1859
+ } catch (error) {
1860
+ if (error instanceof agent.TripWire) {
1861
+ return {
1862
+ content: [{ type: "text", text: error.message }],
1863
+ finishReason: "stop",
1864
+ usage: result.usage,
1865
+ warnings: [{ type: "other", message: `Output blocked: ${error.message}` }]
1866
+ };
1867
+ }
1868
+ throw error;
1869
+ }
1870
+ }
1871
+ }
1872
+ const processedText = messageList.get.response.db().map((m) => extractTextFromMastraMessage(m)).join("");
1873
+ return {
1874
+ ...result,
1875
+ content: [{ type: "text", text: processedText }]
1876
+ };
1877
+ },
1878
+ /**
1879
+ * Wrap stream for streaming output processing
1880
+ */
1881
+ async wrapStream({ doStream, params }) {
1882
+ const processorState = params.providerOptions?.mastraProcessors;
1883
+ if (processorState?.tripwire) {
1884
+ const reason = processorState.reason || "Blocked by processor";
1885
+ return {
1886
+ stream: createBlockedStream(reason)
1887
+ };
1888
+ }
1889
+ const { stream: stream$1, ...rest } = await doStream();
1890
+ if (!outputProcessors.length) return { stream: stream$1, ...rest };
1891
+ const processorStates = /* @__PURE__ */ new Map();
1892
+ const runId = crypto.randomUUID();
1893
+ const transformedStream = stream$1.pipeThrough(
1894
+ new TransformStream({
1895
+ async transform(chunk, controller) {
1896
+ let mastraChunk = stream.convertFullStreamChunkToMastra(
1897
+ chunk,
1898
+ { runId }
1899
+ );
1900
+ if (!mastraChunk) {
1901
+ controller.enqueue(chunk);
1902
+ return;
1903
+ }
1904
+ for (const processor of outputProcessors) {
1905
+ if (processor.processOutputStream && mastraChunk) {
1906
+ let state = processorStates.get(processor.id);
1907
+ if (!state) {
1908
+ state = { streamParts: [], customState: {} };
1909
+ processorStates.set(processor.id, state);
1910
+ }
1911
+ state.streamParts.push(mastraChunk);
1912
+ try {
1913
+ const result = await processor.processOutputStream({
1914
+ part: mastraChunk,
1915
+ streamParts: state.streamParts,
1916
+ state: state.customState,
1917
+ requestContext,
1918
+ abort: (reason) => {
1919
+ throw new agent.TripWire(reason || "Aborted by processor");
1920
+ }
1921
+ });
1922
+ if (result === null || result === void 0) {
1923
+ mastraChunk = void 0;
1924
+ } else {
1925
+ mastraChunk = result;
1926
+ }
1927
+ } catch (error) {
1928
+ if (error instanceof agent.TripWire) {
1929
+ controller.enqueue({
1930
+ type: "error",
1931
+ error: new Error(error.message)
1932
+ });
1933
+ controller.terminate();
1934
+ return;
1935
+ }
1936
+ throw error;
1937
+ }
1938
+ }
1939
+ }
1940
+ if (mastraChunk) {
1941
+ const aiChunk = convertMastraChunkToAISDKStreamPart(mastraChunk);
1942
+ if (aiChunk) {
1943
+ controller.enqueue(aiChunk);
1944
+ }
1945
+ }
1946
+ }
1947
+ })
1948
+ );
1949
+ return { stream: transformedStream, ...rest };
1950
+ }
1951
+ };
1952
+ }
1953
+ function createBlockedStream(reason) {
1954
+ return new ReadableStream({
1955
+ start(controller) {
1956
+ const id = crypto.randomUUID();
1957
+ controller.enqueue({
1958
+ type: "text-start",
1959
+ id
1960
+ });
1961
+ controller.enqueue({
1962
+ type: "text-delta",
1963
+ id,
1964
+ delta: reason
1965
+ });
1966
+ controller.enqueue({
1967
+ type: "text-end",
1968
+ id
1969
+ });
1970
+ controller.enqueue({
1971
+ type: "finish",
1972
+ finishReason: "stop",
1973
+ usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }
1974
+ });
1975
+ controller.close();
1976
+ }
1977
+ });
1978
+ }
1979
+ function extractTextFromMastraMessage(msg) {
1980
+ const content = msg.content;
1981
+ if (typeof content === "string") {
1982
+ return content;
1983
+ }
1984
+ if (content?.parts) {
1985
+ return content.parts.filter((p) => p.type === "text" && "text" in p).map((p) => p.text).join("");
1986
+ }
1987
+ return "";
1988
+ }
1989
+ function convertMastraChunkToAISDKStreamPart(chunk) {
1990
+ switch (chunk.type) {
1991
+ // Text streaming
1992
+ case "text-start":
1993
+ return {
1994
+ type: "text-start",
1995
+ id: chunk.payload.id || crypto.randomUUID(),
1996
+ providerMetadata: chunk.payload.providerMetadata
1997
+ };
1998
+ case "text-delta":
1999
+ return {
2000
+ type: "text-delta",
2001
+ id: chunk.payload.id || crypto.randomUUID(),
2002
+ delta: chunk.payload.text,
2003
+ providerMetadata: chunk.payload.providerMetadata
2004
+ };
2005
+ case "text-end":
2006
+ return {
2007
+ type: "text-end",
2008
+ id: chunk.payload.id || crypto.randomUUID(),
2009
+ providerMetadata: chunk.payload.providerMetadata
2010
+ };
2011
+ // Reasoning streaming
2012
+ case "reasoning-start":
2013
+ return {
2014
+ type: "reasoning-start",
2015
+ id: chunk.payload.id || crypto.randomUUID(),
2016
+ providerMetadata: chunk.payload.providerMetadata
2017
+ };
2018
+ case "reasoning-delta":
2019
+ return {
2020
+ type: "reasoning-delta",
2021
+ id: chunk.payload.id || crypto.randomUUID(),
2022
+ delta: chunk.payload.text,
2023
+ providerMetadata: chunk.payload.providerMetadata
2024
+ };
2025
+ case "reasoning-end":
2026
+ return {
2027
+ type: "reasoning-end",
2028
+ id: chunk.payload.id || crypto.randomUUID(),
2029
+ providerMetadata: chunk.payload.providerMetadata
2030
+ };
2031
+ // Tool call (complete)
2032
+ case "tool-call":
2033
+ return {
2034
+ type: "tool-call",
2035
+ toolCallId: chunk.payload.toolCallId,
2036
+ toolName: chunk.payload.toolName,
2037
+ input: JSON.stringify(chunk.payload.args),
2038
+ providerExecuted: chunk.payload.providerExecuted,
2039
+ providerMetadata: chunk.payload.providerMetadata
2040
+ };
2041
+ // Tool call input streaming
2042
+ case "tool-call-input-streaming-start":
2043
+ return {
2044
+ type: "tool-input-start",
2045
+ id: chunk.payload.toolCallId,
2046
+ toolName: chunk.payload.toolName,
2047
+ providerExecuted: chunk.payload.providerExecuted,
2048
+ providerMetadata: chunk.payload.providerMetadata
2049
+ };
2050
+ case "tool-call-delta":
2051
+ return {
2052
+ type: "tool-input-delta",
2053
+ id: chunk.payload.toolCallId,
2054
+ delta: chunk.payload.argsTextDelta,
2055
+ providerMetadata: chunk.payload.providerMetadata
2056
+ };
2057
+ case "tool-call-input-streaming-end":
2058
+ return {
2059
+ type: "tool-input-end",
2060
+ id: chunk.payload.toolCallId,
2061
+ providerMetadata: chunk.payload.providerMetadata
2062
+ };
2063
+ // Tool result
2064
+ case "tool-result":
2065
+ return {
2066
+ type: "tool-result",
2067
+ toolCallId: chunk.payload.toolCallId,
2068
+ toolName: chunk.payload.toolName,
2069
+ result: { type: "json", value: chunk.payload.result },
2070
+ isError: chunk.payload.isError,
2071
+ providerExecuted: chunk.payload.providerExecuted,
2072
+ providerMetadata: chunk.payload.providerMetadata
2073
+ };
2074
+ // Source (citations)
2075
+ case "source":
2076
+ if (chunk.payload.sourceType === "url") {
2077
+ return {
2078
+ type: "source",
2079
+ sourceType: "url",
2080
+ id: chunk.payload.id,
2081
+ url: chunk.payload.url,
2082
+ title: chunk.payload.title,
2083
+ providerMetadata: chunk.payload.providerMetadata
2084
+ };
2085
+ } else {
2086
+ return {
2087
+ type: "source",
2088
+ sourceType: "document",
2089
+ id: chunk.payload.id,
2090
+ mediaType: chunk.payload.mimeType,
2091
+ title: chunk.payload.title,
2092
+ filename: chunk.payload.filename,
2093
+ providerMetadata: chunk.payload.providerMetadata
2094
+ };
2095
+ }
2096
+ // File output
2097
+ case "file":
2098
+ return {
2099
+ type: "file",
2100
+ data: chunk.payload.data || chunk.payload.base64,
2101
+ mediaType: chunk.payload.mimeType
2102
+ };
2103
+ // Response metadata
2104
+ case "response-metadata":
2105
+ return {
2106
+ type: "response-metadata",
2107
+ ...chunk.payload
2108
+ };
2109
+ // Raw provider data
2110
+ case "raw":
2111
+ return {
2112
+ type: "raw",
2113
+ rawValue: chunk.payload
2114
+ };
2115
+ // Finish
2116
+ case "finish": {
2117
+ const usage = chunk.payload.output?.usage;
2118
+ return {
2119
+ type: "finish",
2120
+ finishReason: toAISDKFinishReason(chunk.payload.stepResult?.reason || "stop"),
2121
+ usage: usage ? {
2122
+ inputTokens: usage.inputTokens || 0,
2123
+ outputTokens: usage.outputTokens || 0,
2124
+ totalTokens: usage.totalTokens || 0
2125
+ } : { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
2126
+ providerMetadata: chunk.payload.metadata?.providerMetadata
2127
+ };
2128
+ }
2129
+ // Error
2130
+ case "error":
2131
+ return {
2132
+ type: "error",
2133
+ error: chunk.payload.error || chunk.payload
2134
+ };
2135
+ default:
2136
+ return null;
2137
+ }
2138
+ }
1656
2139
 
1657
2140
  // src/to-ai-sdk-format.ts
1658
2141
  function toAISdkFormat() {
@@ -1668,6 +2151,7 @@ exports.handleWorkflowStream = handleWorkflowStream;
1668
2151
  exports.networkRoute = networkRoute;
1669
2152
  exports.toAISdkFormat = toAISdkFormat;
1670
2153
  exports.toAISdkStream = toAISdkV5Stream;
2154
+ exports.withMastra = withMastra;
1671
2155
  exports.workflowRoute = workflowRoute;
1672
2156
  //# sourceMappingURL=index.cjs.map
1673
2157
  //# sourceMappingURL=index.cjs.map