@mastra/ai-sdk 1.0.0-beta.6 → 1.0.0-beta.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,43 @@
1
1
  # @mastra/ai-sdk
2
2
 
3
+ ## 1.0.0-beta.7
4
+
5
+ ### Patch Changes
6
+
7
+ - Return NetworkDataPart on each agent-execution-event and workflow-execution-event in network streams ([#10982](https://github.com/mastra-ai/mastra/pull/10982))
8
+
9
+ - Fixed tool-call-suspended chunks being dropped in workflow-step-output when using AI SDK. Previously, when an agent inside a workflow step called a tool that got suspended, the tool-call-suspended chunk was not received on the frontend even though tool-input-available chunks were correctly received. ([#10987](https://github.com/mastra-ai/mastra/pull/10987))
10
+
11
+ The issue occurred because tool-call-suspended was not included in the isMastraTextStreamChunk list, causing it to be filtered out in transformWorkflow. Now tool-call-suspended, tool-call-approval, object, and tripwire chunks are properly included in the text stream chunk list and will be transformed and passed through correctly.
12
+
13
+ Fixes #10978
14
+
15
+ - Adds `withMastra()` for wrapping AI SDK models with Mastra processors and memory. ([#10911](https://github.com/mastra-ai/mastra/pull/10911))
16
+
17
+ ```typescript
18
+ import { openai } from '@ai-sdk/openai';
19
+ import { generateText } from 'ai';
20
+ import { withMastra } from '@mastra/ai-sdk';
21
+
22
+ const model = withMastra(openai('gpt-4o'), {
23
+ inputProcessors: [myGuardProcessor],
24
+ outputProcessors: [myLoggingProcessor],
25
+ memory: {
26
+ storage,
27
+ threadId: 'thread-123',
28
+ resourceId: 'user-123',
29
+ lastMessages: 10,
30
+ },
31
+ });
32
+
33
+ const { text } = await generateText({ model, prompt: 'Hello!' });
34
+ ```
35
+
36
+ Works with `generateText`, `streamText`, `generateObject`, and `streamObject`.
37
+
38
+ - Updated dependencies [[`72df8ae`](https://github.com/mastra-ai/mastra/commit/72df8ae595584cdd7747d5c39ffaca45e4507227), [`9198899`](https://github.com/mastra-ai/mastra/commit/91988995c427b185c33714b7f3be955367911324), [`653e65a`](https://github.com/mastra-ai/mastra/commit/653e65ae1f9502c2958a32f47a5a2df11e612a92), [`c6fd6fe`](https://github.com/mastra-ai/mastra/commit/c6fd6fedd09e9cf8004b03a80925f5e94826ad7e), [`0bed332`](https://github.com/mastra-ai/mastra/commit/0bed332843f627202c6520eaf671771313cd20f3)]:
39
+ - @mastra/core@1.0.0-beta.9
40
+
3
41
  ## 1.0.0-beta.6
4
42
 
5
43
  ### Patch Changes
package/dist/index.cjs CHANGED
@@ -3,6 +3,9 @@
3
3
  var server = require('@mastra/core/server');
4
4
  var ai = require('ai');
5
5
  var stream = require('@mastra/core/stream');
6
+ var agent = require('@mastra/core/agent');
7
+ var di = require('@mastra/core/di');
8
+ var processors = require('@mastra/core/processors');
6
9
 
7
10
  // src/chat-route.ts
8
11
 
@@ -22,6 +25,8 @@ var isMastraTextStreamChunk = (chunk) => {
22
25
  "source",
23
26
  "tool-input-start",
24
27
  "tool-input-delta",
28
+ "tool-call-approval",
29
+ "tool-call-suspended",
25
30
  "tool-call",
26
31
  "tool-result",
27
32
  "tool-error",
@@ -32,6 +37,8 @@ var isMastraTextStreamChunk = (chunk) => {
32
37
  "finish",
33
38
  "abort",
34
39
  "tool-input-end",
40
+ "object",
41
+ "tripwire",
35
42
  "raw"
36
43
  ].includes(chunk.type);
37
44
  };
@@ -1134,6 +1141,26 @@ function transformNetwork(payload, bufferedNetworks, isNested) {
1134
1141
  };
1135
1142
  }
1136
1143
  default: {
1144
+ if (isAgentExecutionDataChunkType(payload)) {
1145
+ if (!("data" in payload.payload)) {
1146
+ throw new Error(
1147
+ `UI Messages require a data property when using data- prefixed chunks
1148
+ ${JSON.stringify(payload)}`
1149
+ );
1150
+ }
1151
+ const { type, data } = payload.payload;
1152
+ return { type, data };
1153
+ }
1154
+ if (isWorkflowExecutionDataChunkType(payload)) {
1155
+ if (!("data" in payload.payload)) {
1156
+ throw new Error(
1157
+ `UI Messages require a data property when using data- prefixed chunks
1158
+ ${JSON.stringify(payload)}`
1159
+ );
1160
+ }
1161
+ const { type, data } = payload.payload;
1162
+ return { type, data };
1163
+ }
1137
1164
  if (payload.type.startsWith("agent-execution-event-")) {
1138
1165
  const stepId = payload.payload.runId;
1139
1166
  const current = bufferedNetworks.get(payload.runId);
@@ -1148,6 +1175,15 @@ function transformNetwork(payload, bufferedNetworks, isNested) {
1148
1175
  const { request, response, ...data } = result.data;
1149
1176
  step.task = data;
1150
1177
  }
1178
+ bufferedNetworks.set(payload.runId, current);
1179
+ return {
1180
+ type: isNested ? "data-tool-network" : "data-network",
1181
+ id: payload.runId,
1182
+ data: {
1183
+ ...current,
1184
+ status: "running"
1185
+ }
1186
+ };
1151
1187
  }
1152
1188
  if (payload.type.startsWith("workflow-execution-event-")) {
1153
1189
  const stepId = payload.payload.runId;
@@ -1166,6 +1202,15 @@ function transformNetwork(payload, bufferedNetworks, isNested) {
1166
1202
  step.task.id = data.name;
1167
1203
  }
1168
1204
  }
1205
+ bufferedNetworks.set(payload.runId, current);
1206
+ return {
1207
+ type: isNested ? "data-tool-network" : "data-network",
1208
+ id: payload.runId,
1209
+ data: {
1210
+ ...current,
1211
+ status: "running"
1212
+ }
1213
+ };
1169
1214
  }
1170
1215
  if (isDataChunkType(payload)) {
1171
1216
  if (!("data" in payload)) {
@@ -1177,26 +1222,6 @@ function transformNetwork(payload, bufferedNetworks, isNested) {
1177
1222
  const { type, data } = payload;
1178
1223
  return { type, data };
1179
1224
  }
1180
- if (isAgentExecutionDataChunkType(payload)) {
1181
- if (!("data" in payload.payload)) {
1182
- throw new Error(
1183
- `UI Messages require a data property when using data- prefixed chunks
1184
- ${JSON.stringify(payload)}`
1185
- );
1186
- }
1187
- const { type, data } = payload.payload;
1188
- return { type, data };
1189
- }
1190
- if (isWorkflowExecutionDataChunkType(payload)) {
1191
- if (!("data" in payload.payload)) {
1192
- throw new Error(
1193
- `UI Messages require a data property when using data- prefixed chunks
1194
- ${JSON.stringify(payload)}`
1195
- );
1196
- }
1197
- const { type, data } = payload.payload;
1198
- return { type, data };
1199
- }
1200
1225
  return null;
1201
1226
  }
1202
1227
  }
@@ -1653,6 +1678,455 @@ function networkRoute({
1653
1678
  }
1654
1679
  });
1655
1680
  }
1681
+ function withMastra(model, options = {}) {
1682
+ const { memory, inputProcessors = [], outputProcessors = [] } = options;
1683
+ const allInputProcessors = [...inputProcessors];
1684
+ const allOutputProcessors = [...outputProcessors];
1685
+ if (memory) {
1686
+ const { storage, lastMessages, semanticRecall, workingMemory } = memory;
1687
+ const isWorkingMemoryEnabled = typeof workingMemory === "object" && workingMemory.enabled !== false;
1688
+ if (isWorkingMemoryEnabled && typeof workingMemory === "object") {
1689
+ let template;
1690
+ if (workingMemory.template) {
1691
+ template = {
1692
+ format: "markdown",
1693
+ content: workingMemory.template
1694
+ };
1695
+ }
1696
+ const workingMemoryProcessor = new processors.WorkingMemory({
1697
+ storage,
1698
+ template,
1699
+ scope: workingMemory.scope,
1700
+ useVNext: "version" in workingMemory && workingMemory.version === "vnext"
1701
+ });
1702
+ allInputProcessors.push(workingMemoryProcessor);
1703
+ }
1704
+ if (lastMessages !== false && lastMessages !== void 0) {
1705
+ const messageHistory = new processors.MessageHistory({
1706
+ storage,
1707
+ lastMessages: typeof lastMessages === "number" ? lastMessages : void 0
1708
+ });
1709
+ allInputProcessors.push(messageHistory);
1710
+ allOutputProcessors.push(messageHistory);
1711
+ }
1712
+ if (semanticRecall) {
1713
+ const { vector, embedder, indexName, ...semanticConfig } = semanticRecall;
1714
+ const semanticRecallProcessor = new processors.SemanticRecall({
1715
+ storage,
1716
+ vector,
1717
+ embedder,
1718
+ indexName: indexName || "memory_messages",
1719
+ ...semanticConfig
1720
+ });
1721
+ allInputProcessors.push(semanticRecallProcessor);
1722
+ allOutputProcessors.push(semanticRecallProcessor);
1723
+ }
1724
+ }
1725
+ return ai.wrapLanguageModel({
1726
+ model,
1727
+ middleware: createProcessorMiddleware({
1728
+ inputProcessors: allInputProcessors,
1729
+ outputProcessors: allOutputProcessors,
1730
+ memory: memory ? {
1731
+ threadId: memory.threadId,
1732
+ resourceId: memory.resourceId
1733
+ } : void 0
1734
+ })
1735
+ });
1736
+ }
1737
+ function createProcessorMiddleware(options) {
1738
+ const { inputProcessors = [], outputProcessors = [], memory } = options;
1739
+ const requestContext = new di.RequestContext();
1740
+ if (memory) {
1741
+ requestContext.set("MastraMemory", {
1742
+ thread: memory.threadId ? { id: memory.threadId } : void 0,
1743
+ resourceId: memory.resourceId,
1744
+ memoryConfig: memory.config
1745
+ });
1746
+ }
1747
+ return {
1748
+ middlewareVersion: "v2",
1749
+ /**
1750
+ * Transform params runs input processors (processInput)
1751
+ */
1752
+ async transformParams({ params }) {
1753
+ const messageList = new agent.MessageList({
1754
+ threadId: memory?.threadId,
1755
+ resourceId: memory?.resourceId
1756
+ });
1757
+ for (const msg of params.prompt) {
1758
+ if (msg.role === "system") {
1759
+ messageList.addSystem(msg.content);
1760
+ } else {
1761
+ messageList.add(msg, "input");
1762
+ }
1763
+ }
1764
+ for (const processor of inputProcessors) {
1765
+ if (processor.processInput) {
1766
+ try {
1767
+ await processor.processInput({
1768
+ messages: messageList.get.input.db(),
1769
+ systemMessages: messageList.getAllSystemMessages(),
1770
+ messageList,
1771
+ requestContext,
1772
+ abort: (reason) => {
1773
+ throw new agent.TripWire(reason || "Aborted by processor");
1774
+ }
1775
+ });
1776
+ } catch (error) {
1777
+ if (error instanceof agent.TripWire) {
1778
+ return {
1779
+ ...params,
1780
+ providerOptions: {
1781
+ ...params.providerOptions,
1782
+ mastraProcessors: {
1783
+ tripwire: true,
1784
+ reason: error.message
1785
+ }
1786
+ }
1787
+ };
1788
+ }
1789
+ throw error;
1790
+ }
1791
+ }
1792
+ }
1793
+ const newPrompt = messageList.get.all.aiV5.prompt().map(agent.MessageList.aiV5ModelMessageToV2PromptMessage);
1794
+ return {
1795
+ ...params,
1796
+ prompt: newPrompt
1797
+ };
1798
+ },
1799
+ /**
1800
+ * Wrap generate for non-streaming output processing
1801
+ */
1802
+ async wrapGenerate({ doGenerate, params }) {
1803
+ const processorState = params.providerOptions?.mastraProcessors;
1804
+ if (processorState?.tripwire) {
1805
+ const reason = processorState.reason || "Blocked by processor";
1806
+ return {
1807
+ content: [{ type: "text", text: reason }],
1808
+ finishReason: "stop",
1809
+ usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
1810
+ warnings: [{ type: "other", message: `Tripwire: ${reason}` }]
1811
+ };
1812
+ }
1813
+ const result = await doGenerate();
1814
+ if (!outputProcessors.length) return result;
1815
+ const messageList = new agent.MessageList({
1816
+ threadId: memory?.threadId,
1817
+ resourceId: memory?.resourceId
1818
+ });
1819
+ for (const msg of params.prompt) {
1820
+ if (msg.role === "system") {
1821
+ messageList.addSystem(msg.content);
1822
+ } else {
1823
+ messageList.add(msg, "input");
1824
+ }
1825
+ }
1826
+ const textContent = result.content.filter((c) => c.type === "text").map((c) => c.text).join("");
1827
+ const responseMessage = {
1828
+ id: crypto.randomUUID(),
1829
+ role: "assistant",
1830
+ content: {
1831
+ format: 2,
1832
+ parts: [{ type: "text", text: textContent }]
1833
+ },
1834
+ createdAt: /* @__PURE__ */ new Date(),
1835
+ ...memory?.threadId && { threadId: memory.threadId },
1836
+ ...memory?.resourceId && { resourceId: memory.resourceId }
1837
+ };
1838
+ messageList.add(responseMessage, "response");
1839
+ for (const processor of outputProcessors) {
1840
+ if (processor.processOutputResult) {
1841
+ try {
1842
+ await processor.processOutputResult({
1843
+ messages: messageList.get.all.db(),
1844
+ messageList,
1845
+ requestContext,
1846
+ abort: (reason) => {
1847
+ throw new agent.TripWire(reason || "Aborted by processor");
1848
+ }
1849
+ });
1850
+ } catch (error) {
1851
+ if (error instanceof agent.TripWire) {
1852
+ return {
1853
+ content: [{ type: "text", text: error.message }],
1854
+ finishReason: "stop",
1855
+ usage: result.usage,
1856
+ warnings: [{ type: "other", message: `Output blocked: ${error.message}` }]
1857
+ };
1858
+ }
1859
+ throw error;
1860
+ }
1861
+ }
1862
+ }
1863
+ const processedText = messageList.get.response.db().map((m) => extractTextFromMastraMessage(m)).join("");
1864
+ return {
1865
+ ...result,
1866
+ content: [{ type: "text", text: processedText }]
1867
+ };
1868
+ },
1869
+ /**
1870
+ * Wrap stream for streaming output processing
1871
+ */
1872
+ async wrapStream({ doStream, params }) {
1873
+ const processorState = params.providerOptions?.mastraProcessors;
1874
+ if (processorState?.tripwire) {
1875
+ const reason = processorState.reason || "Blocked by processor";
1876
+ return {
1877
+ stream: createBlockedStream(reason)
1878
+ };
1879
+ }
1880
+ const { stream: stream$1, ...rest } = await doStream();
1881
+ if (!outputProcessors.length) return { stream: stream$1, ...rest };
1882
+ const processorStates = /* @__PURE__ */ new Map();
1883
+ const runId = crypto.randomUUID();
1884
+ const transformedStream = stream$1.pipeThrough(
1885
+ new TransformStream({
1886
+ async transform(chunk, controller) {
1887
+ let mastraChunk = stream.convertFullStreamChunkToMastra(
1888
+ chunk,
1889
+ { runId }
1890
+ );
1891
+ if (!mastraChunk) {
1892
+ controller.enqueue(chunk);
1893
+ return;
1894
+ }
1895
+ for (const processor of outputProcessors) {
1896
+ if (processor.processOutputStream && mastraChunk) {
1897
+ let state = processorStates.get(processor.id);
1898
+ if (!state) {
1899
+ state = { streamParts: [], customState: {} };
1900
+ processorStates.set(processor.id, state);
1901
+ }
1902
+ state.streamParts.push(mastraChunk);
1903
+ try {
1904
+ const result = await processor.processOutputStream({
1905
+ part: mastraChunk,
1906
+ streamParts: state.streamParts,
1907
+ state: state.customState,
1908
+ requestContext,
1909
+ abort: (reason) => {
1910
+ throw new agent.TripWire(reason || "Aborted by processor");
1911
+ }
1912
+ });
1913
+ if (result === null || result === void 0) {
1914
+ mastraChunk = void 0;
1915
+ } else {
1916
+ mastraChunk = result;
1917
+ }
1918
+ } catch (error) {
1919
+ if (error instanceof agent.TripWire) {
1920
+ controller.enqueue({
1921
+ type: "error",
1922
+ error: new Error(error.message)
1923
+ });
1924
+ controller.terminate();
1925
+ return;
1926
+ }
1927
+ throw error;
1928
+ }
1929
+ }
1930
+ }
1931
+ if (mastraChunk) {
1932
+ const aiChunk = convertMastraChunkToAISDKStreamPart(mastraChunk);
1933
+ if (aiChunk) {
1934
+ controller.enqueue(aiChunk);
1935
+ }
1936
+ }
1937
+ }
1938
+ })
1939
+ );
1940
+ return { stream: transformedStream, ...rest };
1941
+ }
1942
+ };
1943
+ }
1944
+ function createBlockedStream(reason) {
1945
+ return new ReadableStream({
1946
+ start(controller) {
1947
+ const id = crypto.randomUUID();
1948
+ controller.enqueue({
1949
+ type: "text-start",
1950
+ id
1951
+ });
1952
+ controller.enqueue({
1953
+ type: "text-delta",
1954
+ id,
1955
+ delta: reason
1956
+ });
1957
+ controller.enqueue({
1958
+ type: "text-end",
1959
+ id
1960
+ });
1961
+ controller.enqueue({
1962
+ type: "finish",
1963
+ finishReason: "stop",
1964
+ usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }
1965
+ });
1966
+ controller.close();
1967
+ }
1968
+ });
1969
+ }
1970
+ function extractTextFromMastraMessage(msg) {
1971
+ const content = msg.content;
1972
+ if (typeof content === "string") {
1973
+ return content;
1974
+ }
1975
+ if (content?.parts) {
1976
+ return content.parts.filter((p) => p.type === "text" && "text" in p).map((p) => p.text).join("");
1977
+ }
1978
+ return "";
1979
+ }
1980
+ function convertMastraChunkToAISDKStreamPart(chunk) {
1981
+ switch (chunk.type) {
1982
+ // Text streaming
1983
+ case "text-start":
1984
+ return {
1985
+ type: "text-start",
1986
+ id: chunk.payload.id || crypto.randomUUID(),
1987
+ providerMetadata: chunk.payload.providerMetadata
1988
+ };
1989
+ case "text-delta":
1990
+ return {
1991
+ type: "text-delta",
1992
+ id: chunk.payload.id || crypto.randomUUID(),
1993
+ delta: chunk.payload.text,
1994
+ providerMetadata: chunk.payload.providerMetadata
1995
+ };
1996
+ case "text-end":
1997
+ return {
1998
+ type: "text-end",
1999
+ id: chunk.payload.id || crypto.randomUUID(),
2000
+ providerMetadata: chunk.payload.providerMetadata
2001
+ };
2002
+ // Reasoning streaming
2003
+ case "reasoning-start":
2004
+ return {
2005
+ type: "reasoning-start",
2006
+ id: chunk.payload.id || crypto.randomUUID(),
2007
+ providerMetadata: chunk.payload.providerMetadata
2008
+ };
2009
+ case "reasoning-delta":
2010
+ return {
2011
+ type: "reasoning-delta",
2012
+ id: chunk.payload.id || crypto.randomUUID(),
2013
+ delta: chunk.payload.text,
2014
+ providerMetadata: chunk.payload.providerMetadata
2015
+ };
2016
+ case "reasoning-end":
2017
+ return {
2018
+ type: "reasoning-end",
2019
+ id: chunk.payload.id || crypto.randomUUID(),
2020
+ providerMetadata: chunk.payload.providerMetadata
2021
+ };
2022
+ // Tool call (complete)
2023
+ case "tool-call":
2024
+ return {
2025
+ type: "tool-call",
2026
+ toolCallId: chunk.payload.toolCallId,
2027
+ toolName: chunk.payload.toolName,
2028
+ input: JSON.stringify(chunk.payload.args),
2029
+ providerExecuted: chunk.payload.providerExecuted,
2030
+ providerMetadata: chunk.payload.providerMetadata
2031
+ };
2032
+ // Tool call input streaming
2033
+ case "tool-call-input-streaming-start":
2034
+ return {
2035
+ type: "tool-input-start",
2036
+ id: chunk.payload.toolCallId,
2037
+ toolName: chunk.payload.toolName,
2038
+ providerExecuted: chunk.payload.providerExecuted,
2039
+ providerMetadata: chunk.payload.providerMetadata
2040
+ };
2041
+ case "tool-call-delta":
2042
+ return {
2043
+ type: "tool-input-delta",
2044
+ id: chunk.payload.toolCallId,
2045
+ delta: chunk.payload.argsTextDelta,
2046
+ providerMetadata: chunk.payload.providerMetadata
2047
+ };
2048
+ case "tool-call-input-streaming-end":
2049
+ return {
2050
+ type: "tool-input-end",
2051
+ id: chunk.payload.toolCallId,
2052
+ providerMetadata: chunk.payload.providerMetadata
2053
+ };
2054
+ // Tool result
2055
+ case "tool-result":
2056
+ return {
2057
+ type: "tool-result",
2058
+ toolCallId: chunk.payload.toolCallId,
2059
+ toolName: chunk.payload.toolName,
2060
+ result: { type: "json", value: chunk.payload.result },
2061
+ isError: chunk.payload.isError,
2062
+ providerExecuted: chunk.payload.providerExecuted,
2063
+ providerMetadata: chunk.payload.providerMetadata
2064
+ };
2065
+ // Source (citations)
2066
+ case "source":
2067
+ if (chunk.payload.sourceType === "url") {
2068
+ return {
2069
+ type: "source",
2070
+ sourceType: "url",
2071
+ id: chunk.payload.id,
2072
+ url: chunk.payload.url,
2073
+ title: chunk.payload.title,
2074
+ providerMetadata: chunk.payload.providerMetadata
2075
+ };
2076
+ } else {
2077
+ return {
2078
+ type: "source",
2079
+ sourceType: "document",
2080
+ id: chunk.payload.id,
2081
+ mediaType: chunk.payload.mimeType,
2082
+ title: chunk.payload.title,
2083
+ filename: chunk.payload.filename,
2084
+ providerMetadata: chunk.payload.providerMetadata
2085
+ };
2086
+ }
2087
+ // File output
2088
+ case "file":
2089
+ return {
2090
+ type: "file",
2091
+ data: chunk.payload.data || chunk.payload.base64,
2092
+ mediaType: chunk.payload.mimeType
2093
+ };
2094
+ // Response metadata
2095
+ case "response-metadata":
2096
+ return {
2097
+ type: "response-metadata",
2098
+ ...chunk.payload
2099
+ };
2100
+ // Raw provider data
2101
+ case "raw":
2102
+ return {
2103
+ type: "raw",
2104
+ rawValue: chunk.payload
2105
+ };
2106
+ // Finish
2107
+ case "finish": {
2108
+ const usage = chunk.payload.output?.usage;
2109
+ return {
2110
+ type: "finish",
2111
+ finishReason: chunk.payload.stepResult?.reason || "stop",
2112
+ usage: usage ? {
2113
+ inputTokens: usage.inputTokens || 0,
2114
+ outputTokens: usage.outputTokens || 0,
2115
+ totalTokens: usage.totalTokens || 0
2116
+ } : { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
2117
+ providerMetadata: chunk.payload.metadata?.providerMetadata
2118
+ };
2119
+ }
2120
+ // Error
2121
+ case "error":
2122
+ return {
2123
+ type: "error",
2124
+ error: chunk.payload.error || chunk.payload
2125
+ };
2126
+ default:
2127
+ return null;
2128
+ }
2129
+ }
1656
2130
 
1657
2131
  // src/to-ai-sdk-format.ts
1658
2132
  function toAISdkFormat() {
@@ -1668,6 +2142,7 @@ exports.handleWorkflowStream = handleWorkflowStream;
1668
2142
  exports.networkRoute = networkRoute;
1669
2143
  exports.toAISdkFormat = toAISdkFormat;
1670
2144
  exports.toAISdkStream = toAISdkV5Stream;
2145
+ exports.withMastra = withMastra;
1671
2146
  exports.workflowRoute = workflowRoute;
1672
2147
  //# sourceMappingURL=index.cjs.map
1673
2148
  //# sourceMappingURL=index.cjs.map