@mastra/ai-sdk 1.0.0-beta.6 → 1.0.0-beta.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -8,5 +8,7 @@ export type { NetworkRouteOptions, NetworkStreamHandlerParams, NetworkStreamHand
8
8
  export type { NetworkDataPart } from './transformers.js';
9
9
  export type { AgentDataPart } from './transformers.js';
10
10
  export { toAISdkV5Stream as toAISdkStream } from './convert-streams.js';
11
+ export { withMastra } from './middleware.js';
12
+ export type { WithMastraOptions, WithMastraMemoryOptions, WithMastraSemanticRecallOptions } from './middleware.js';
11
13
  export { toAISdkFormat } from './to-ai-sdk-format.js';
12
14
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,SAAS,EAAE,gBAAgB,EAAE,MAAM,cAAc,CAAC;AAC3D,YAAY,EAAE,gBAAgB,EAAE,uBAAuB,EAAE,wBAAwB,EAAE,MAAM,cAAc,CAAC;AACxG,OAAO,EAAE,aAAa,EAAE,oBAAoB,EAAE,MAAM,kBAAkB,CAAC;AACvE,YAAY,EAAE,oBAAoB,EAAE,2BAA2B,EAAE,4BAA4B,EAAE,MAAM,kBAAkB,CAAC;AACxH,YAAY,EAAE,gBAAgB,EAAE,MAAM,gBAAgB,CAAC;AACvD,OAAO,EAAE,YAAY,EAAE,mBAAmB,EAAE,MAAM,iBAAiB,CAAC;AACpE,YAAY,EAAE,mBAAmB,EAAE,0BAA0B,EAAE,2BAA2B,EAAE,MAAM,iBAAiB,CAAC;AACpH,YAAY,EAAE,eAAe,EAAE,MAAM,gBAAgB,CAAC;AACtD,YAAY,EAAE,aAAa,EAAE,MAAM,gBAAgB,CAAC;AAEpD,OAAO,EAAE,eAAe,IAAI,aAAa,EAAE,MAAM,mBAAmB,CAAC;AAGrE,OAAO,EAAE,aAAa,EAAE,MAAM,oBAAoB,CAAC"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,SAAS,EAAE,gBAAgB,EAAE,MAAM,cAAc,CAAC;AAC3D,YAAY,EAAE,gBAAgB,EAAE,uBAAuB,EAAE,wBAAwB,EAAE,MAAM,cAAc,CAAC;AACxG,OAAO,EAAE,aAAa,EAAE,oBAAoB,EAAE,MAAM,kBAAkB,CAAC;AACvE,YAAY,EAAE,oBAAoB,EAAE,2BAA2B,EAAE,4BAA4B,EAAE,MAAM,kBAAkB,CAAC;AACxH,YAAY,EAAE,gBAAgB,EAAE,MAAM,gBAAgB,CAAC;AACvD,OAAO,EAAE,YAAY,EAAE,mBAAmB,EAAE,MAAM,iBAAiB,CAAC;AACpE,YAAY,EAAE,mBAAmB,EAAE,0BAA0B,EAAE,2BAA2B,EAAE,MAAM,iBAAiB,CAAC;AACpH,YAAY,EAAE,eAAe,EAAE,MAAM,gBAAgB,CAAC;AACtD,YAAY,EAAE,aAAa,EAAE,MAAM,gBAAgB,CAAC;AAEpD,OAAO,EAAE,eAAe,IAAI,aAAa,EAAE,MAAM,mBAAmB,CAAC;AAGrE,OAAO,EAAE,UAAU,EAAE,MAAM,cAAc,CAAC;AAC1C,YAAY,EAAE,iBAAiB,EAAE,uBAAuB,EAAE,+BAA+B,EAAE,MAAM,cAAc,CAAC;AAGhH,OAAO,EAAE,aAAa,EAAE,MAAM,oBAAoB,CAAC"}
package/dist/index.js CHANGED
@@ -1,6 +1,9 @@
1
1
  import { registerApiRoute } from '@mastra/core/server';
2
- import { createUIMessageStream, createUIMessageStreamResponse } from 'ai';
3
- import { DefaultGeneratedFile, DefaultGeneratedFileWithType } from '@mastra/core/stream';
2
+ import { createUIMessageStream, createUIMessageStreamResponse, wrapLanguageModel } from 'ai';
3
+ import { convertFullStreamChunkToMastra, DefaultGeneratedFile, DefaultGeneratedFileWithType } from '@mastra/core/stream';
4
+ import { TripWire, MessageList } from '@mastra/core/agent';
5
+ import { RequestContext } from '@mastra/core/di';
6
+ import { WorkingMemory, MessageHistory, SemanticRecall } from '@mastra/core/processors';
4
7
 
5
8
  // src/chat-route.ts
6
9
 
@@ -20,6 +23,8 @@ var isMastraTextStreamChunk = (chunk) => {
20
23
  "source",
21
24
  "tool-input-start",
22
25
  "tool-input-delta",
26
+ "tool-call-approval",
27
+ "tool-call-suspended",
23
28
  "tool-call",
24
29
  "tool-result",
25
30
  "tool-error",
@@ -30,6 +35,8 @@ var isMastraTextStreamChunk = (chunk) => {
30
35
  "finish",
31
36
  "abort",
32
37
  "tool-input-end",
38
+ "object",
39
+ "tripwire",
33
40
  "raw"
34
41
  ].includes(chunk.type);
35
42
  };
@@ -1132,6 +1139,26 @@ function transformNetwork(payload, bufferedNetworks, isNested) {
1132
1139
  };
1133
1140
  }
1134
1141
  default: {
1142
+ if (isAgentExecutionDataChunkType(payload)) {
1143
+ if (!("data" in payload.payload)) {
1144
+ throw new Error(
1145
+ `UI Messages require a data property when using data- prefixed chunks
1146
+ ${JSON.stringify(payload)}`
1147
+ );
1148
+ }
1149
+ const { type, data } = payload.payload;
1150
+ return { type, data };
1151
+ }
1152
+ if (isWorkflowExecutionDataChunkType(payload)) {
1153
+ if (!("data" in payload.payload)) {
1154
+ throw new Error(
1155
+ `UI Messages require a data property when using data- prefixed chunks
1156
+ ${JSON.stringify(payload)}`
1157
+ );
1158
+ }
1159
+ const { type, data } = payload.payload;
1160
+ return { type, data };
1161
+ }
1135
1162
  if (payload.type.startsWith("agent-execution-event-")) {
1136
1163
  const stepId = payload.payload.runId;
1137
1164
  const current = bufferedNetworks.get(payload.runId);
@@ -1146,6 +1173,15 @@ function transformNetwork(payload, bufferedNetworks, isNested) {
1146
1173
  const { request, response, ...data } = result.data;
1147
1174
  step.task = data;
1148
1175
  }
1176
+ bufferedNetworks.set(payload.runId, current);
1177
+ return {
1178
+ type: isNested ? "data-tool-network" : "data-network",
1179
+ id: payload.runId,
1180
+ data: {
1181
+ ...current,
1182
+ status: "running"
1183
+ }
1184
+ };
1149
1185
  }
1150
1186
  if (payload.type.startsWith("workflow-execution-event-")) {
1151
1187
  const stepId = payload.payload.runId;
@@ -1164,6 +1200,15 @@ function transformNetwork(payload, bufferedNetworks, isNested) {
1164
1200
  step.task.id = data.name;
1165
1201
  }
1166
1202
  }
1203
+ bufferedNetworks.set(payload.runId, current);
1204
+ return {
1205
+ type: isNested ? "data-tool-network" : "data-network",
1206
+ id: payload.runId,
1207
+ data: {
1208
+ ...current,
1209
+ status: "running"
1210
+ }
1211
+ };
1167
1212
  }
1168
1213
  if (isDataChunkType(payload)) {
1169
1214
  if (!("data" in payload)) {
@@ -1175,26 +1220,6 @@ function transformNetwork(payload, bufferedNetworks, isNested) {
1175
1220
  const { type, data } = payload;
1176
1221
  return { type, data };
1177
1222
  }
1178
- if (isAgentExecutionDataChunkType(payload)) {
1179
- if (!("data" in payload.payload)) {
1180
- throw new Error(
1181
- `UI Messages require a data property when using data- prefixed chunks
1182
- ${JSON.stringify(payload)}`
1183
- );
1184
- }
1185
- const { type, data } = payload.payload;
1186
- return { type, data };
1187
- }
1188
- if (isWorkflowExecutionDataChunkType(payload)) {
1189
- if (!("data" in payload.payload)) {
1190
- throw new Error(
1191
- `UI Messages require a data property when using data- prefixed chunks
1192
- ${JSON.stringify(payload)}`
1193
- );
1194
- }
1195
- const { type, data } = payload.payload;
1196
- return { type, data };
1197
- }
1198
1223
  return null;
1199
1224
  }
1200
1225
  }
@@ -1651,6 +1676,455 @@ function networkRoute({
1651
1676
  }
1652
1677
  });
1653
1678
  }
1679
+ function withMastra(model, options = {}) {
1680
+ const { memory, inputProcessors = [], outputProcessors = [] } = options;
1681
+ const allInputProcessors = [...inputProcessors];
1682
+ const allOutputProcessors = [...outputProcessors];
1683
+ if (memory) {
1684
+ const { storage, lastMessages, semanticRecall, workingMemory } = memory;
1685
+ const isWorkingMemoryEnabled = typeof workingMemory === "object" && workingMemory.enabled !== false;
1686
+ if (isWorkingMemoryEnabled && typeof workingMemory === "object") {
1687
+ let template;
1688
+ if (workingMemory.template) {
1689
+ template = {
1690
+ format: "markdown",
1691
+ content: workingMemory.template
1692
+ };
1693
+ }
1694
+ const workingMemoryProcessor = new WorkingMemory({
1695
+ storage,
1696
+ template,
1697
+ scope: workingMemory.scope,
1698
+ useVNext: "version" in workingMemory && workingMemory.version === "vnext"
1699
+ });
1700
+ allInputProcessors.push(workingMemoryProcessor);
1701
+ }
1702
+ if (lastMessages !== false && lastMessages !== void 0) {
1703
+ const messageHistory = new MessageHistory({
1704
+ storage,
1705
+ lastMessages: typeof lastMessages === "number" ? lastMessages : void 0
1706
+ });
1707
+ allInputProcessors.push(messageHistory);
1708
+ allOutputProcessors.push(messageHistory);
1709
+ }
1710
+ if (semanticRecall) {
1711
+ const { vector, embedder, indexName, ...semanticConfig } = semanticRecall;
1712
+ const semanticRecallProcessor = new SemanticRecall({
1713
+ storage,
1714
+ vector,
1715
+ embedder,
1716
+ indexName: indexName || "memory_messages",
1717
+ ...semanticConfig
1718
+ });
1719
+ allInputProcessors.push(semanticRecallProcessor);
1720
+ allOutputProcessors.push(semanticRecallProcessor);
1721
+ }
1722
+ }
1723
+ return wrapLanguageModel({
1724
+ model,
1725
+ middleware: createProcessorMiddleware({
1726
+ inputProcessors: allInputProcessors,
1727
+ outputProcessors: allOutputProcessors,
1728
+ memory: memory ? {
1729
+ threadId: memory.threadId,
1730
+ resourceId: memory.resourceId
1731
+ } : void 0
1732
+ })
1733
+ });
1734
+ }
1735
+ function createProcessorMiddleware(options) {
1736
+ const { inputProcessors = [], outputProcessors = [], memory } = options;
1737
+ const requestContext = new RequestContext();
1738
+ if (memory) {
1739
+ requestContext.set("MastraMemory", {
1740
+ thread: memory.threadId ? { id: memory.threadId } : void 0,
1741
+ resourceId: memory.resourceId,
1742
+ memoryConfig: memory.config
1743
+ });
1744
+ }
1745
+ return {
1746
+ middlewareVersion: "v2",
1747
+ /**
1748
+ * Transform params runs input processors (processInput)
1749
+ */
1750
+ async transformParams({ params }) {
1751
+ const messageList = new MessageList({
1752
+ threadId: memory?.threadId,
1753
+ resourceId: memory?.resourceId
1754
+ });
1755
+ for (const msg of params.prompt) {
1756
+ if (msg.role === "system") {
1757
+ messageList.addSystem(msg.content);
1758
+ } else {
1759
+ messageList.add(msg, "input");
1760
+ }
1761
+ }
1762
+ for (const processor of inputProcessors) {
1763
+ if (processor.processInput) {
1764
+ try {
1765
+ await processor.processInput({
1766
+ messages: messageList.get.input.db(),
1767
+ systemMessages: messageList.getAllSystemMessages(),
1768
+ messageList,
1769
+ requestContext,
1770
+ abort: (reason) => {
1771
+ throw new TripWire(reason || "Aborted by processor");
1772
+ }
1773
+ });
1774
+ } catch (error) {
1775
+ if (error instanceof TripWire) {
1776
+ return {
1777
+ ...params,
1778
+ providerOptions: {
1779
+ ...params.providerOptions,
1780
+ mastraProcessors: {
1781
+ tripwire: true,
1782
+ reason: error.message
1783
+ }
1784
+ }
1785
+ };
1786
+ }
1787
+ throw error;
1788
+ }
1789
+ }
1790
+ }
1791
+ const newPrompt = messageList.get.all.aiV5.prompt().map(MessageList.aiV5ModelMessageToV2PromptMessage);
1792
+ return {
1793
+ ...params,
1794
+ prompt: newPrompt
1795
+ };
1796
+ },
1797
+ /**
1798
+ * Wrap generate for non-streaming output processing
1799
+ */
1800
+ async wrapGenerate({ doGenerate, params }) {
1801
+ const processorState = params.providerOptions?.mastraProcessors;
1802
+ if (processorState?.tripwire) {
1803
+ const reason = processorState.reason || "Blocked by processor";
1804
+ return {
1805
+ content: [{ type: "text", text: reason }],
1806
+ finishReason: "stop",
1807
+ usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
1808
+ warnings: [{ type: "other", message: `Tripwire: ${reason}` }]
1809
+ };
1810
+ }
1811
+ const result = await doGenerate();
1812
+ if (!outputProcessors.length) return result;
1813
+ const messageList = new MessageList({
1814
+ threadId: memory?.threadId,
1815
+ resourceId: memory?.resourceId
1816
+ });
1817
+ for (const msg of params.prompt) {
1818
+ if (msg.role === "system") {
1819
+ messageList.addSystem(msg.content);
1820
+ } else {
1821
+ messageList.add(msg, "input");
1822
+ }
1823
+ }
1824
+ const textContent = result.content.filter((c) => c.type === "text").map((c) => c.text).join("");
1825
+ const responseMessage = {
1826
+ id: crypto.randomUUID(),
1827
+ role: "assistant",
1828
+ content: {
1829
+ format: 2,
1830
+ parts: [{ type: "text", text: textContent }]
1831
+ },
1832
+ createdAt: /* @__PURE__ */ new Date(),
1833
+ ...memory?.threadId && { threadId: memory.threadId },
1834
+ ...memory?.resourceId && { resourceId: memory.resourceId }
1835
+ };
1836
+ messageList.add(responseMessage, "response");
1837
+ for (const processor of outputProcessors) {
1838
+ if (processor.processOutputResult) {
1839
+ try {
1840
+ await processor.processOutputResult({
1841
+ messages: messageList.get.all.db(),
1842
+ messageList,
1843
+ requestContext,
1844
+ abort: (reason) => {
1845
+ throw new TripWire(reason || "Aborted by processor");
1846
+ }
1847
+ });
1848
+ } catch (error) {
1849
+ if (error instanceof TripWire) {
1850
+ return {
1851
+ content: [{ type: "text", text: error.message }],
1852
+ finishReason: "stop",
1853
+ usage: result.usage,
1854
+ warnings: [{ type: "other", message: `Output blocked: ${error.message}` }]
1855
+ };
1856
+ }
1857
+ throw error;
1858
+ }
1859
+ }
1860
+ }
1861
+ const processedText = messageList.get.response.db().map((m) => extractTextFromMastraMessage(m)).join("");
1862
+ return {
1863
+ ...result,
1864
+ content: [{ type: "text", text: processedText }]
1865
+ };
1866
+ },
1867
+ /**
1868
+ * Wrap stream for streaming output processing
1869
+ */
1870
+ async wrapStream({ doStream, params }) {
1871
+ const processorState = params.providerOptions?.mastraProcessors;
1872
+ if (processorState?.tripwire) {
1873
+ const reason = processorState.reason || "Blocked by processor";
1874
+ return {
1875
+ stream: createBlockedStream(reason)
1876
+ };
1877
+ }
1878
+ const { stream, ...rest } = await doStream();
1879
+ if (!outputProcessors.length) return { stream, ...rest };
1880
+ const processorStates = /* @__PURE__ */ new Map();
1881
+ const runId = crypto.randomUUID();
1882
+ const transformedStream = stream.pipeThrough(
1883
+ new TransformStream({
1884
+ async transform(chunk, controller) {
1885
+ let mastraChunk = convertFullStreamChunkToMastra(
1886
+ chunk,
1887
+ { runId }
1888
+ );
1889
+ if (!mastraChunk) {
1890
+ controller.enqueue(chunk);
1891
+ return;
1892
+ }
1893
+ for (const processor of outputProcessors) {
1894
+ if (processor.processOutputStream && mastraChunk) {
1895
+ let state = processorStates.get(processor.id);
1896
+ if (!state) {
1897
+ state = { streamParts: [], customState: {} };
1898
+ processorStates.set(processor.id, state);
1899
+ }
1900
+ state.streamParts.push(mastraChunk);
1901
+ try {
1902
+ const result = await processor.processOutputStream({
1903
+ part: mastraChunk,
1904
+ streamParts: state.streamParts,
1905
+ state: state.customState,
1906
+ requestContext,
1907
+ abort: (reason) => {
1908
+ throw new TripWire(reason || "Aborted by processor");
1909
+ }
1910
+ });
1911
+ if (result === null || result === void 0) {
1912
+ mastraChunk = void 0;
1913
+ } else {
1914
+ mastraChunk = result;
1915
+ }
1916
+ } catch (error) {
1917
+ if (error instanceof TripWire) {
1918
+ controller.enqueue({
1919
+ type: "error",
1920
+ error: new Error(error.message)
1921
+ });
1922
+ controller.terminate();
1923
+ return;
1924
+ }
1925
+ throw error;
1926
+ }
1927
+ }
1928
+ }
1929
+ if (mastraChunk) {
1930
+ const aiChunk = convertMastraChunkToAISDKStreamPart(mastraChunk);
1931
+ if (aiChunk) {
1932
+ controller.enqueue(aiChunk);
1933
+ }
1934
+ }
1935
+ }
1936
+ })
1937
+ );
1938
+ return { stream: transformedStream, ...rest };
1939
+ }
1940
+ };
1941
+ }
1942
+ function createBlockedStream(reason) {
1943
+ return new ReadableStream({
1944
+ start(controller) {
1945
+ const id = crypto.randomUUID();
1946
+ controller.enqueue({
1947
+ type: "text-start",
1948
+ id
1949
+ });
1950
+ controller.enqueue({
1951
+ type: "text-delta",
1952
+ id,
1953
+ delta: reason
1954
+ });
1955
+ controller.enqueue({
1956
+ type: "text-end",
1957
+ id
1958
+ });
1959
+ controller.enqueue({
1960
+ type: "finish",
1961
+ finishReason: "stop",
1962
+ usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }
1963
+ });
1964
+ controller.close();
1965
+ }
1966
+ });
1967
+ }
1968
+ function extractTextFromMastraMessage(msg) {
1969
+ const content = msg.content;
1970
+ if (typeof content === "string") {
1971
+ return content;
1972
+ }
1973
+ if (content?.parts) {
1974
+ return content.parts.filter((p) => p.type === "text" && "text" in p).map((p) => p.text).join("");
1975
+ }
1976
+ return "";
1977
+ }
1978
+ function convertMastraChunkToAISDKStreamPart(chunk) {
1979
+ switch (chunk.type) {
1980
+ // Text streaming
1981
+ case "text-start":
1982
+ return {
1983
+ type: "text-start",
1984
+ id: chunk.payload.id || crypto.randomUUID(),
1985
+ providerMetadata: chunk.payload.providerMetadata
1986
+ };
1987
+ case "text-delta":
1988
+ return {
1989
+ type: "text-delta",
1990
+ id: chunk.payload.id || crypto.randomUUID(),
1991
+ delta: chunk.payload.text,
1992
+ providerMetadata: chunk.payload.providerMetadata
1993
+ };
1994
+ case "text-end":
1995
+ return {
1996
+ type: "text-end",
1997
+ id: chunk.payload.id || crypto.randomUUID(),
1998
+ providerMetadata: chunk.payload.providerMetadata
1999
+ };
2000
+ // Reasoning streaming
2001
+ case "reasoning-start":
2002
+ return {
2003
+ type: "reasoning-start",
2004
+ id: chunk.payload.id || crypto.randomUUID(),
2005
+ providerMetadata: chunk.payload.providerMetadata
2006
+ };
2007
+ case "reasoning-delta":
2008
+ return {
2009
+ type: "reasoning-delta",
2010
+ id: chunk.payload.id || crypto.randomUUID(),
2011
+ delta: chunk.payload.text,
2012
+ providerMetadata: chunk.payload.providerMetadata
2013
+ };
2014
+ case "reasoning-end":
2015
+ return {
2016
+ type: "reasoning-end",
2017
+ id: chunk.payload.id || crypto.randomUUID(),
2018
+ providerMetadata: chunk.payload.providerMetadata
2019
+ };
2020
+ // Tool call (complete)
2021
+ case "tool-call":
2022
+ return {
2023
+ type: "tool-call",
2024
+ toolCallId: chunk.payload.toolCallId,
2025
+ toolName: chunk.payload.toolName,
2026
+ input: JSON.stringify(chunk.payload.args),
2027
+ providerExecuted: chunk.payload.providerExecuted,
2028
+ providerMetadata: chunk.payload.providerMetadata
2029
+ };
2030
+ // Tool call input streaming
2031
+ case "tool-call-input-streaming-start":
2032
+ return {
2033
+ type: "tool-input-start",
2034
+ id: chunk.payload.toolCallId,
2035
+ toolName: chunk.payload.toolName,
2036
+ providerExecuted: chunk.payload.providerExecuted,
2037
+ providerMetadata: chunk.payload.providerMetadata
2038
+ };
2039
+ case "tool-call-delta":
2040
+ return {
2041
+ type: "tool-input-delta",
2042
+ id: chunk.payload.toolCallId,
2043
+ delta: chunk.payload.argsTextDelta,
2044
+ providerMetadata: chunk.payload.providerMetadata
2045
+ };
2046
+ case "tool-call-input-streaming-end":
2047
+ return {
2048
+ type: "tool-input-end",
2049
+ id: chunk.payload.toolCallId,
2050
+ providerMetadata: chunk.payload.providerMetadata
2051
+ };
2052
+ // Tool result
2053
+ case "tool-result":
2054
+ return {
2055
+ type: "tool-result",
2056
+ toolCallId: chunk.payload.toolCallId,
2057
+ toolName: chunk.payload.toolName,
2058
+ result: { type: "json", value: chunk.payload.result },
2059
+ isError: chunk.payload.isError,
2060
+ providerExecuted: chunk.payload.providerExecuted,
2061
+ providerMetadata: chunk.payload.providerMetadata
2062
+ };
2063
+ // Source (citations)
2064
+ case "source":
2065
+ if (chunk.payload.sourceType === "url") {
2066
+ return {
2067
+ type: "source",
2068
+ sourceType: "url",
2069
+ id: chunk.payload.id,
2070
+ url: chunk.payload.url,
2071
+ title: chunk.payload.title,
2072
+ providerMetadata: chunk.payload.providerMetadata
2073
+ };
2074
+ } else {
2075
+ return {
2076
+ type: "source",
2077
+ sourceType: "document",
2078
+ id: chunk.payload.id,
2079
+ mediaType: chunk.payload.mimeType,
2080
+ title: chunk.payload.title,
2081
+ filename: chunk.payload.filename,
2082
+ providerMetadata: chunk.payload.providerMetadata
2083
+ };
2084
+ }
2085
+ // File output
2086
+ case "file":
2087
+ return {
2088
+ type: "file",
2089
+ data: chunk.payload.data || chunk.payload.base64,
2090
+ mediaType: chunk.payload.mimeType
2091
+ };
2092
+ // Response metadata
2093
+ case "response-metadata":
2094
+ return {
2095
+ type: "response-metadata",
2096
+ ...chunk.payload
2097
+ };
2098
+ // Raw provider data
2099
+ case "raw":
2100
+ return {
2101
+ type: "raw",
2102
+ rawValue: chunk.payload
2103
+ };
2104
+ // Finish
2105
+ case "finish": {
2106
+ const usage = chunk.payload.output?.usage;
2107
+ return {
2108
+ type: "finish",
2109
+ finishReason: chunk.payload.stepResult?.reason || "stop",
2110
+ usage: usage ? {
2111
+ inputTokens: usage.inputTokens || 0,
2112
+ outputTokens: usage.outputTokens || 0,
2113
+ totalTokens: usage.totalTokens || 0
2114
+ } : { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
2115
+ providerMetadata: chunk.payload.metadata?.providerMetadata
2116
+ };
2117
+ }
2118
+ // Error
2119
+ case "error":
2120
+ return {
2121
+ type: "error",
2122
+ error: chunk.payload.error || chunk.payload
2123
+ };
2124
+ default:
2125
+ return null;
2126
+ }
2127
+ }
1654
2128
 
1655
2129
  // src/to-ai-sdk-format.ts
1656
2130
  function toAISdkFormat() {
@@ -1659,6 +2133,6 @@ function toAISdkFormat() {
1659
2133
  );
1660
2134
  }
1661
2135
 
1662
- export { chatRoute, handleChatStream, handleNetworkStream, handleWorkflowStream, networkRoute, toAISdkFormat, toAISdkV5Stream as toAISdkStream, workflowRoute };
2136
+ export { chatRoute, handleChatStream, handleNetworkStream, handleWorkflowStream, networkRoute, toAISdkFormat, toAISdkV5Stream as toAISdkStream, withMastra, workflowRoute };
1663
2137
  //# sourceMappingURL=index.js.map
1664
2138
  //# sourceMappingURL=index.js.map