@mastra/ai-sdk 0.0.0-fix-backport-setserver-20251201144151 → 0.0.0-fix-request-context-as-query-key-20251209093005

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -3,6 +3,9 @@
3
3
  var server = require('@mastra/core/server');
4
4
  var ai = require('ai');
5
5
  var stream = require('@mastra/core/stream');
6
+ var agent = require('@mastra/core/agent');
7
+ var di = require('@mastra/core/di');
8
+ var processors = require('@mastra/core/processors');
6
9
 
7
10
  // src/chat-route.ts
8
11
 
@@ -22,6 +25,8 @@ var isMastraTextStreamChunk = (chunk) => {
22
25
  "source",
23
26
  "tool-input-start",
24
27
  "tool-input-delta",
28
+ "tool-call-approval",
29
+ "tool-call-suspended",
25
30
  "tool-call",
26
31
  "tool-result",
27
32
  "tool-error",
@@ -32,6 +37,8 @@ var isMastraTextStreamChunk = (chunk) => {
32
37
  "finish",
33
38
  "abort",
34
39
  "tool-input-end",
40
+ "object",
41
+ "tripwire",
35
42
  "raw"
36
43
  ].includes(chunk.type);
37
44
  };
@@ -1134,6 +1141,26 @@ function transformNetwork(payload, bufferedNetworks, isNested) {
1134
1141
  };
1135
1142
  }
1136
1143
  default: {
1144
+ if (isAgentExecutionDataChunkType(payload)) {
1145
+ if (!("data" in payload.payload)) {
1146
+ throw new Error(
1147
+ `UI Messages require a data property when using data- prefixed chunks
1148
+ ${JSON.stringify(payload)}`
1149
+ );
1150
+ }
1151
+ const { type, data } = payload.payload;
1152
+ return { type, data };
1153
+ }
1154
+ if (isWorkflowExecutionDataChunkType(payload)) {
1155
+ if (!("data" in payload.payload)) {
1156
+ throw new Error(
1157
+ `UI Messages require a data property when using data- prefixed chunks
1158
+ ${JSON.stringify(payload)}`
1159
+ );
1160
+ }
1161
+ const { type, data } = payload.payload;
1162
+ return { type, data };
1163
+ }
1137
1164
  if (payload.type.startsWith("agent-execution-event-")) {
1138
1165
  const stepId = payload.payload.runId;
1139
1166
  const current = bufferedNetworks.get(payload.runId);
@@ -1148,6 +1175,15 @@ function transformNetwork(payload, bufferedNetworks, isNested) {
1148
1175
  const { request, response, ...data } = result.data;
1149
1176
  step.task = data;
1150
1177
  }
1178
+ bufferedNetworks.set(payload.runId, current);
1179
+ return {
1180
+ type: isNested ? "data-tool-network" : "data-network",
1181
+ id: payload.runId,
1182
+ data: {
1183
+ ...current,
1184
+ status: "running"
1185
+ }
1186
+ };
1151
1187
  }
1152
1188
  if (payload.type.startsWith("workflow-execution-event-")) {
1153
1189
  const stepId = payload.payload.runId;
@@ -1166,6 +1202,15 @@ function transformNetwork(payload, bufferedNetworks, isNested) {
1166
1202
  step.task.id = data.name;
1167
1203
  }
1168
1204
  }
1205
+ bufferedNetworks.set(payload.runId, current);
1206
+ return {
1207
+ type: isNested ? "data-tool-network" : "data-network",
1208
+ id: payload.runId,
1209
+ data: {
1210
+ ...current,
1211
+ status: "running"
1212
+ }
1213
+ };
1169
1214
  }
1170
1215
  if (isDataChunkType(payload)) {
1171
1216
  if (!("data" in payload)) {
@@ -1177,40 +1222,20 @@ function transformNetwork(payload, bufferedNetworks, isNested) {
1177
1222
  const { type, data } = payload;
1178
1223
  return { type, data };
1179
1224
  }
1180
- if (isAgentExecutionDataChunkType(payload)) {
1181
- if (!("data" in payload.payload)) {
1182
- throw new Error(
1183
- `UI Messages require a data property when using data- prefixed chunks
1184
- ${JSON.stringify(payload)}`
1185
- );
1186
- }
1187
- const { type, data } = payload.payload;
1188
- return { type, data };
1189
- }
1190
- if (isWorkflowExecutionDataChunkType(payload)) {
1191
- if (!("data" in payload.payload)) {
1192
- throw new Error(
1193
- `UI Messages require a data property when using data- prefixed chunks
1194
- ${JSON.stringify(payload)}`
1195
- );
1196
- }
1197
- const { type, data } = payload.payload;
1198
- return { type, data };
1199
- }
1200
1225
  return null;
1201
1226
  }
1202
1227
  }
1203
1228
  }
1204
1229
 
1205
- // src/to-ai-sdk-format.ts
1206
- function toAISdkFormat(stream, options = {
1230
+ // src/convert-streams.ts
1231
+ function toAISdkV5Stream(stream, options = {
1207
1232
  from: "agent",
1208
1233
  sendStart: true,
1209
1234
  sendFinish: true
1210
1235
  }) {
1211
1236
  const from = options?.from;
1212
1237
  if (from === "workflow") {
1213
- const includeTextStreamParts = options?.includeTextStreamParts ?? false;
1238
+ const includeTextStreamParts = options?.includeTextStreamParts ?? true;
1214
1239
  return stream.pipeThrough(
1215
1240
  WorkflowStreamToAISDKTransformer({ includeTextStreamParts })
1216
1241
  );
@@ -1233,6 +1258,57 @@ function toAISdkFormat(stream, options = {
1233
1258
  }
1234
1259
 
1235
1260
  // src/chat-route.ts
1261
+ async function handleChatStream({
1262
+ mastra,
1263
+ agentId,
1264
+ params,
1265
+ defaultOptions,
1266
+ sendStart = true,
1267
+ sendFinish = true,
1268
+ sendReasoning = false,
1269
+ sendSources = false
1270
+ }) {
1271
+ const { messages, resumeData, runId, requestContext, ...rest } = params;
1272
+ if (resumeData && !runId) {
1273
+ throw new Error("runId is required when resumeData is provided");
1274
+ }
1275
+ const agentObj = mastra.getAgentById(agentId);
1276
+ if (!agentObj) {
1277
+ throw new Error(`Agent ${agentId} not found`);
1278
+ }
1279
+ if (!Array.isArray(messages)) {
1280
+ throw new Error("Messages must be an array of UIMessage objects");
1281
+ }
1282
+ const mergedOptions = {
1283
+ ...defaultOptions,
1284
+ ...rest,
1285
+ ...runId && { runId },
1286
+ requestContext: requestContext || defaultOptions?.requestContext
1287
+ };
1288
+ const result = resumeData ? await agentObj.resumeStream(resumeData, mergedOptions) : await agentObj.stream(messages, mergedOptions);
1289
+ let lastMessageId;
1290
+ if (messages.length) {
1291
+ const lastMessage = messages[messages.length - 1];
1292
+ if (lastMessage?.role === "assistant") {
1293
+ lastMessageId = lastMessage.id;
1294
+ }
1295
+ }
1296
+ return ai.createUIMessageStream({
1297
+ originalMessages: messages,
1298
+ execute: async ({ writer }) => {
1299
+ for await (const part of toAISdkV5Stream(result, {
1300
+ from: "agent",
1301
+ lastMessageId,
1302
+ sendStart,
1303
+ sendFinish,
1304
+ sendReasoning,
1305
+ sendSources
1306
+ })) {
1307
+ writer.write(part);
1308
+ }
1309
+ }
1310
+ });
1311
+ }
1236
1312
  function chatRoute({
1237
1313
  path = "/chat/:agentId",
1238
1314
  agent,
@@ -1269,6 +1345,14 @@ function chatRoute({
1269
1345
  schema: {
1270
1346
  type: "object",
1271
1347
  properties: {
1348
+ resumeData: {
1349
+ type: "object",
1350
+ description: "Resume data for the agent"
1351
+ },
1352
+ runId: {
1353
+ type: "string",
1354
+ description: "The run ID required when resuming an agent execution"
1355
+ },
1272
1356
  messages: {
1273
1357
  type: "array",
1274
1358
  description: "Array of messages in the conversation",
@@ -1339,9 +1423,9 @@ function chatRoute({
1339
1423
  }
1340
1424
  },
1341
1425
  handler: async (c) => {
1342
- const { messages, ...rest } = await c.req.json();
1426
+ const params = await c.req.json();
1343
1427
  const mastra = c.get("mastra");
1344
- const runtimeContext = c.get("runtimeContext");
1428
+ const contextRequestContext = c.get("requestContext");
1345
1429
  let agentToUse = agent;
1346
1430
  if (!agent) {
1347
1431
  const agentId = c.req.param("agentId");
@@ -1352,39 +1436,24 @@ function chatRoute({
1352
1436
  `Fixed agent ID was set together with an agentId path parameter. This can lead to unexpected behavior.`
1353
1437
  );
1354
1438
  }
1355
- if (runtimeContext && defaultOptions?.runtimeContext) {
1356
- mastra.getLogger()?.warn(`"runtimeContext" set in the route options will be overridden by the request's "runtimeContext".`);
1439
+ if (contextRequestContext && defaultOptions?.requestContext) {
1440
+ mastra.getLogger()?.warn(`"requestContext" set in the route options will be overridden by the request's "requestContext".`);
1357
1441
  }
1358
1442
  if (!agentToUse) {
1359
1443
  throw new Error("Agent ID is required");
1360
1444
  }
1361
- const agentObj = mastra.getAgentById(agentToUse);
1362
- if (!agentObj) {
1363
- throw new Error(`Agent ${agentToUse} not found`);
1364
- }
1365
- const result = await agentObj.stream(messages, {
1366
- ...defaultOptions,
1367
- ...rest,
1368
- runtimeContext: runtimeContext || defaultOptions?.runtimeContext
1369
- });
1370
- let lastMessageId;
1371
- if (messages.length > 0 && messages[messages.length - 1].role === "assistant") {
1372
- lastMessageId = messages[messages.length - 1].id;
1373
- }
1374
- const uiMessageStream = ai.createUIMessageStream({
1375
- originalMessages: messages,
1376
- execute: async ({ writer }) => {
1377
- for await (const part of toAISdkFormat(result, {
1378
- from: "agent",
1379
- lastMessageId,
1380
- sendStart,
1381
- sendFinish,
1382
- sendReasoning,
1383
- sendSources
1384
- })) {
1385
- writer.write(part);
1386
- }
1387
- }
1445
+ const uiMessageStream = await handleChatStream({
1446
+ mastra,
1447
+ agentId: agentToUse,
1448
+ params: {
1449
+ ...params,
1450
+ requestContext: contextRequestContext || params.requestContext
1451
+ },
1452
+ defaultOptions,
1453
+ sendStart,
1454
+ sendFinish,
1455
+ sendReasoning,
1456
+ sendSources
1388
1457
  });
1389
1458
  return ai.createUIMessageStreamResponse({
1390
1459
  stream: uiMessageStream
@@ -1392,10 +1461,31 @@ function chatRoute({
1392
1461
  }
1393
1462
  });
1394
1463
  }
1464
+ async function handleWorkflowStream({
1465
+ mastra,
1466
+ workflowId,
1467
+ params,
1468
+ includeTextStreamParts = true
1469
+ }) {
1470
+ const { runId, resourceId, inputData, resumeData, requestContext, ...rest } = params;
1471
+ const workflowObj = mastra.getWorkflowById(workflowId);
1472
+ if (!workflowObj) {
1473
+ throw new Error(`Workflow ${workflowId} not found`);
1474
+ }
1475
+ const run = await workflowObj.createRun({ runId, resourceId, ...rest });
1476
+ const stream = resumeData ? run.resumeStream({ resumeData, ...rest, requestContext }) : run.stream({ inputData, ...rest, requestContext });
1477
+ return ai.createUIMessageStream({
1478
+ execute: async ({ writer }) => {
1479
+ for await (const part of toAISdkV5Stream(stream, { from: "workflow", includeTextStreamParts })) {
1480
+ writer.write(part);
1481
+ }
1482
+ }
1483
+ });
1484
+ }
1395
1485
  function workflowRoute({
1396
1486
  path = "/api/workflows/:workflowId/stream",
1397
1487
  workflow,
1398
- includeTextStreamParts = false
1488
+ includeTextStreamParts = true
1399
1489
  }) {
1400
1490
  if (!workflow && !path.includes("/:workflowId")) {
1401
1491
  throw new Error("Path must include :workflowId to route to the correct workflow or pass the workflow explicitly");
@@ -1426,7 +1516,7 @@ function workflowRoute({
1426
1516
  resourceId: { type: "string" },
1427
1517
  inputData: { type: "object", additionalProperties: true },
1428
1518
  resumeData: { type: "object", additionalProperties: true },
1429
- runtimeContext: { type: "object", additionalProperties: true },
1519
+ requestContext: { type: "object", additionalProperties: true },
1430
1520
  tracingOptions: { type: "object", additionalProperties: true },
1431
1521
  step: { type: "string" }
1432
1522
  }
@@ -1446,9 +1536,9 @@ function workflowRoute({
1446
1536
  }
1447
1537
  },
1448
1538
  handler: async (c) => {
1449
- const { runId, resourceId, inputData, resumeData, ...rest } = await c.req.json();
1539
+ const params = await c.req.json();
1450
1540
  const mastra = c.get("mastra");
1451
- const runtimeContext = c.get("runtimeContext");
1541
+ const contextRequestContext = c.get("requestContext");
1452
1542
  let workflowToUse = workflow;
1453
1543
  if (!workflow) {
1454
1544
  const workflowId = c.req.param("workflowId");
@@ -1462,28 +1552,47 @@ function workflowRoute({
1462
1552
  if (!workflowToUse) {
1463
1553
  throw new Error("Workflow ID is required");
1464
1554
  }
1465
- const workflowObj = mastra.getWorkflowById(workflowToUse);
1466
- if (!workflowObj) {
1467
- throw new Error(`Workflow ${workflowToUse} not found`);
1468
- }
1469
- if (runtimeContext && rest.runtimeContext) {
1555
+ if (contextRequestContext && params.requestContext) {
1470
1556
  mastra.getLogger()?.warn(
1471
- `"runtimeContext" from the request body will be ignored because "runtimeContext" is already set in the route options.`
1557
+ `"requestContext" from the request body will be ignored because "requestContext" is already set in the route options.`
1472
1558
  );
1473
1559
  }
1474
- const run = await workflowObj.createRunAsync({ runId, resourceId, ...rest });
1475
- const stream = resumeData ? run.resumeStream({ resumeData, ...rest, runtimeContext: runtimeContext || rest.runtimeContext }) : run.stream({ inputData, ...rest, runtimeContext: runtimeContext || rest.runtimeContext });
1476
- const uiMessageStream = ai.createUIMessageStream({
1477
- execute: async ({ writer }) => {
1478
- for await (const part of toAISdkFormat(stream, { from: "workflow", includeTextStreamParts })) {
1479
- writer.write(part);
1480
- }
1481
- }
1560
+ const uiMessageStream = await handleWorkflowStream({
1561
+ mastra,
1562
+ workflowId: workflowToUse,
1563
+ params: {
1564
+ ...params,
1565
+ requestContext: contextRequestContext || params.requestContext
1566
+ },
1567
+ includeTextStreamParts
1482
1568
  });
1483
1569
  return ai.createUIMessageStreamResponse({ stream: uiMessageStream });
1484
1570
  }
1485
1571
  });
1486
1572
  }
1573
+ async function handleNetworkStream({
1574
+ mastra,
1575
+ agentId,
1576
+ params,
1577
+ defaultOptions
1578
+ }) {
1579
+ const { messages, ...rest } = params;
1580
+ const agentObj = mastra.getAgentById(agentId);
1581
+ if (!agentObj) {
1582
+ throw new Error(`Agent ${agentId} not found`);
1583
+ }
1584
+ const result = await agentObj.network(messages, {
1585
+ ...defaultOptions,
1586
+ ...rest
1587
+ });
1588
+ return ai.createUIMessageStream({
1589
+ execute: async ({ writer }) => {
1590
+ for await (const part of toAISdkV5Stream(result, { from: "network" })) {
1591
+ writer.write(part);
1592
+ }
1593
+ }
1594
+ });
1595
+ }
1487
1596
  function networkRoute({
1488
1597
  path = "/network/:agentId",
1489
1598
  agent,
@@ -1515,13 +1624,12 @@ function networkRoute({
1515
1624
  type: "object",
1516
1625
  properties: {
1517
1626
  messages: { type: "array", items: { type: "object" } },
1518
- runtimeContext: { type: "object", additionalProperties: true },
1627
+ requestContext: { type: "object", additionalProperties: true },
1519
1628
  runId: { type: "string" },
1520
1629
  maxSteps: { type: "number" },
1521
1630
  threadId: { type: "string" },
1522
1631
  resourceId: { type: "string" },
1523
1632
  modelSettings: { type: "object", additionalProperties: true },
1524
- telemetry: { type: "object", additionalProperties: true },
1525
1633
  tools: { type: "array", items: { type: "object" } }
1526
1634
  },
1527
1635
  required: ["messages"]
@@ -1545,7 +1653,7 @@ function networkRoute({
1545
1653
  }
1546
1654
  },
1547
1655
  handler: async (c) => {
1548
- const { messages, ...rest } = await c.req.json();
1656
+ const params = await c.req.json();
1549
1657
  const mastra = c.get("mastra");
1550
1658
  let agentToUse = agent;
1551
1659
  if (!agent) {
@@ -1560,29 +1668,481 @@ function networkRoute({
1560
1668
  if (!agentToUse) {
1561
1669
  throw new Error("Agent ID is required");
1562
1670
  }
1563
- const agentObj = mastra.getAgentById(agentToUse);
1564
- if (!agentObj) {
1565
- throw new Error(`Agent ${agentToUse} not found`);
1671
+ const uiMessageStream = await handleNetworkStream({
1672
+ mastra,
1673
+ agentId: agentToUse,
1674
+ params,
1675
+ defaultOptions
1676
+ });
1677
+ return ai.createUIMessageStreamResponse({ stream: uiMessageStream });
1678
+ }
1679
+ });
1680
+ }
1681
+ function withMastra(model, options = {}) {
1682
+ const { memory, inputProcessors = [], outputProcessors = [] } = options;
1683
+ const allInputProcessors = [...inputProcessors];
1684
+ const allOutputProcessors = [...outputProcessors];
1685
+ if (memory) {
1686
+ const { storage, lastMessages, semanticRecall, workingMemory } = memory;
1687
+ const isWorkingMemoryEnabled = typeof workingMemory === "object" && workingMemory.enabled !== false;
1688
+ if (isWorkingMemoryEnabled && typeof workingMemory === "object") {
1689
+ let template;
1690
+ if (workingMemory.template) {
1691
+ template = {
1692
+ format: "markdown",
1693
+ content: workingMemory.template
1694
+ };
1566
1695
  }
1567
- const result = await agentObj.network(messages, {
1568
- ...defaultOptions,
1569
- ...rest
1696
+ const workingMemoryProcessor = new processors.WorkingMemory({
1697
+ storage,
1698
+ template,
1699
+ scope: workingMemory.scope,
1700
+ useVNext: "version" in workingMemory && workingMemory.version === "vnext"
1701
+ });
1702
+ allInputProcessors.push(workingMemoryProcessor);
1703
+ }
1704
+ if (lastMessages !== false && lastMessages !== void 0) {
1705
+ const messageHistory = new processors.MessageHistory({
1706
+ storage,
1707
+ lastMessages: typeof lastMessages === "number" ? lastMessages : void 0
1708
+ });
1709
+ allInputProcessors.push(messageHistory);
1710
+ allOutputProcessors.push(messageHistory);
1711
+ }
1712
+ if (semanticRecall) {
1713
+ const { vector, embedder, indexName, ...semanticConfig } = semanticRecall;
1714
+ const semanticRecallProcessor = new processors.SemanticRecall({
1715
+ storage,
1716
+ vector,
1717
+ embedder,
1718
+ indexName: indexName || "memory_messages",
1719
+ ...semanticConfig
1720
+ });
1721
+ allInputProcessors.push(semanticRecallProcessor);
1722
+ allOutputProcessors.push(semanticRecallProcessor);
1723
+ }
1724
+ }
1725
+ return ai.wrapLanguageModel({
1726
+ model,
1727
+ middleware: createProcessorMiddleware({
1728
+ inputProcessors: allInputProcessors,
1729
+ outputProcessors: allOutputProcessors,
1730
+ memory: memory ? {
1731
+ threadId: memory.threadId,
1732
+ resourceId: memory.resourceId
1733
+ } : void 0
1734
+ })
1735
+ });
1736
+ }
1737
+ function createProcessorMiddleware(options) {
1738
+ const { inputProcessors = [], outputProcessors = [], memory } = options;
1739
+ const requestContext = new di.RequestContext();
1740
+ if (memory) {
1741
+ requestContext.set("MastraMemory", {
1742
+ thread: memory.threadId ? { id: memory.threadId } : void 0,
1743
+ resourceId: memory.resourceId,
1744
+ memoryConfig: memory.config
1745
+ });
1746
+ }
1747
+ return {
1748
+ middlewareVersion: "v2",
1749
+ /**
1750
+ * Transform params runs input processors (processInput)
1751
+ */
1752
+ async transformParams({ params }) {
1753
+ const messageList = new agent.MessageList({
1754
+ threadId: memory?.threadId,
1755
+ resourceId: memory?.resourceId
1570
1756
  });
1571
- const uiMessageStream = ai.createUIMessageStream({
1572
- execute: async ({ writer }) => {
1573
- for await (const part of toAISdkFormat(result, { from: "network" })) {
1574
- writer.write(part);
1757
+ for (const msg of params.prompt) {
1758
+ if (msg.role === "system") {
1759
+ messageList.addSystem(msg.content);
1760
+ } else {
1761
+ messageList.add(msg, "input");
1762
+ }
1763
+ }
1764
+ for (const processor of inputProcessors) {
1765
+ if (processor.processInput) {
1766
+ try {
1767
+ await processor.processInput({
1768
+ messages: messageList.get.input.db(),
1769
+ systemMessages: messageList.getAllSystemMessages(),
1770
+ messageList,
1771
+ requestContext,
1772
+ abort: (reason) => {
1773
+ throw new agent.TripWire(reason || "Aborted by processor");
1774
+ }
1775
+ });
1776
+ } catch (error) {
1777
+ if (error instanceof agent.TripWire) {
1778
+ return {
1779
+ ...params,
1780
+ providerOptions: {
1781
+ ...params.providerOptions,
1782
+ mastraProcessors: {
1783
+ tripwire: true,
1784
+ reason: error.message
1785
+ }
1786
+ }
1787
+ };
1788
+ }
1789
+ throw error;
1575
1790
  }
1576
1791
  }
1792
+ }
1793
+ const newPrompt = messageList.get.all.aiV5.prompt().map(agent.MessageList.aiV5ModelMessageToV2PromptMessage);
1794
+ return {
1795
+ ...params,
1796
+ prompt: newPrompt
1797
+ };
1798
+ },
1799
+ /**
1800
+ * Wrap generate for non-streaming output processing
1801
+ */
1802
+ async wrapGenerate({ doGenerate, params }) {
1803
+ const processorState = params.providerOptions?.mastraProcessors;
1804
+ if (processorState?.tripwire) {
1805
+ const reason = processorState.reason || "Blocked by processor";
1806
+ return {
1807
+ content: [{ type: "text", text: reason }],
1808
+ finishReason: "stop",
1809
+ usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
1810
+ warnings: [{ type: "other", message: `Tripwire: ${reason}` }]
1811
+ };
1812
+ }
1813
+ const result = await doGenerate();
1814
+ if (!outputProcessors.length) return result;
1815
+ const messageList = new agent.MessageList({
1816
+ threadId: memory?.threadId,
1817
+ resourceId: memory?.resourceId
1577
1818
  });
1578
- return ai.createUIMessageStreamResponse({ stream: uiMessageStream });
1819
+ for (const msg of params.prompt) {
1820
+ if (msg.role === "system") {
1821
+ messageList.addSystem(msg.content);
1822
+ } else {
1823
+ messageList.add(msg, "input");
1824
+ }
1825
+ }
1826
+ const textContent = result.content.filter((c) => c.type === "text").map((c) => c.text).join("");
1827
+ const responseMessage = {
1828
+ id: crypto.randomUUID(),
1829
+ role: "assistant",
1830
+ content: {
1831
+ format: 2,
1832
+ parts: [{ type: "text", text: textContent }]
1833
+ },
1834
+ createdAt: /* @__PURE__ */ new Date(),
1835
+ ...memory?.threadId && { threadId: memory.threadId },
1836
+ ...memory?.resourceId && { resourceId: memory.resourceId }
1837
+ };
1838
+ messageList.add(responseMessage, "response");
1839
+ for (const processor of outputProcessors) {
1840
+ if (processor.processOutputResult) {
1841
+ try {
1842
+ await processor.processOutputResult({
1843
+ messages: messageList.get.all.db(),
1844
+ messageList,
1845
+ requestContext,
1846
+ abort: (reason) => {
1847
+ throw new agent.TripWire(reason || "Aborted by processor");
1848
+ }
1849
+ });
1850
+ } catch (error) {
1851
+ if (error instanceof agent.TripWire) {
1852
+ return {
1853
+ content: [{ type: "text", text: error.message }],
1854
+ finishReason: "stop",
1855
+ usage: result.usage,
1856
+ warnings: [{ type: "other", message: `Output blocked: ${error.message}` }]
1857
+ };
1858
+ }
1859
+ throw error;
1860
+ }
1861
+ }
1862
+ }
1863
+ const processedText = messageList.get.response.db().map((m) => extractTextFromMastraMessage(m)).join("");
1864
+ return {
1865
+ ...result,
1866
+ content: [{ type: "text", text: processedText }]
1867
+ };
1868
+ },
1869
+ /**
1870
+ * Wrap stream for streaming output processing
1871
+ */
1872
+ async wrapStream({ doStream, params }) {
1873
+ const processorState = params.providerOptions?.mastraProcessors;
1874
+ if (processorState?.tripwire) {
1875
+ const reason = processorState.reason || "Blocked by processor";
1876
+ return {
1877
+ stream: createBlockedStream(reason)
1878
+ };
1879
+ }
1880
+ const { stream: stream$1, ...rest } = await doStream();
1881
+ if (!outputProcessors.length) return { stream: stream$1, ...rest };
1882
+ const processorStates = /* @__PURE__ */ new Map();
1883
+ const runId = crypto.randomUUID();
1884
+ const transformedStream = stream$1.pipeThrough(
1885
+ new TransformStream({
1886
+ async transform(chunk, controller) {
1887
+ let mastraChunk = stream.convertFullStreamChunkToMastra(
1888
+ chunk,
1889
+ { runId }
1890
+ );
1891
+ if (!mastraChunk) {
1892
+ controller.enqueue(chunk);
1893
+ return;
1894
+ }
1895
+ for (const processor of outputProcessors) {
1896
+ if (processor.processOutputStream && mastraChunk) {
1897
+ let state = processorStates.get(processor.id);
1898
+ if (!state) {
1899
+ state = { streamParts: [], customState: {} };
1900
+ processorStates.set(processor.id, state);
1901
+ }
1902
+ state.streamParts.push(mastraChunk);
1903
+ try {
1904
+ const result = await processor.processOutputStream({
1905
+ part: mastraChunk,
1906
+ streamParts: state.streamParts,
1907
+ state: state.customState,
1908
+ requestContext,
1909
+ abort: (reason) => {
1910
+ throw new agent.TripWire(reason || "Aborted by processor");
1911
+ }
1912
+ });
1913
+ if (result === null || result === void 0) {
1914
+ mastraChunk = void 0;
1915
+ } else {
1916
+ mastraChunk = result;
1917
+ }
1918
+ } catch (error) {
1919
+ if (error instanceof agent.TripWire) {
1920
+ controller.enqueue({
1921
+ type: "error",
1922
+ error: new Error(error.message)
1923
+ });
1924
+ controller.terminate();
1925
+ return;
1926
+ }
1927
+ throw error;
1928
+ }
1929
+ }
1930
+ }
1931
+ if (mastraChunk) {
1932
+ const aiChunk = convertMastraChunkToAISDKStreamPart(mastraChunk);
1933
+ if (aiChunk) {
1934
+ controller.enqueue(aiChunk);
1935
+ }
1936
+ }
1937
+ }
1938
+ })
1939
+ );
1940
+ return { stream: transformedStream, ...rest };
1941
+ }
1942
+ };
1943
+ }
1944
+ function createBlockedStream(reason) {
1945
+ return new ReadableStream({
1946
+ start(controller) {
1947
+ const id = crypto.randomUUID();
1948
+ controller.enqueue({
1949
+ type: "text-start",
1950
+ id
1951
+ });
1952
+ controller.enqueue({
1953
+ type: "text-delta",
1954
+ id,
1955
+ delta: reason
1956
+ });
1957
+ controller.enqueue({
1958
+ type: "text-end",
1959
+ id
1960
+ });
1961
+ controller.enqueue({
1962
+ type: "finish",
1963
+ finishReason: "stop",
1964
+ usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }
1965
+ });
1966
+ controller.close();
1579
1967
  }
1580
1968
  });
1581
1969
  }
1970
+ function extractTextFromMastraMessage(msg) {
1971
+ const content = msg.content;
1972
+ if (typeof content === "string") {
1973
+ return content;
1974
+ }
1975
+ if (content?.parts) {
1976
+ return content.parts.filter((p) => p.type === "text" && "text" in p).map((p) => p.text).join("");
1977
+ }
1978
+ return "";
1979
+ }
1980
+ function convertMastraChunkToAISDKStreamPart(chunk) {
1981
+ switch (chunk.type) {
1982
+ // Text streaming
1983
+ case "text-start":
1984
+ return {
1985
+ type: "text-start",
1986
+ id: chunk.payload.id || crypto.randomUUID(),
1987
+ providerMetadata: chunk.payload.providerMetadata
1988
+ };
1989
+ case "text-delta":
1990
+ return {
1991
+ type: "text-delta",
1992
+ id: chunk.payload.id || crypto.randomUUID(),
1993
+ delta: chunk.payload.text,
1994
+ providerMetadata: chunk.payload.providerMetadata
1995
+ };
1996
+ case "text-end":
1997
+ return {
1998
+ type: "text-end",
1999
+ id: chunk.payload.id || crypto.randomUUID(),
2000
+ providerMetadata: chunk.payload.providerMetadata
2001
+ };
2002
+ // Reasoning streaming
2003
+ case "reasoning-start":
2004
+ return {
2005
+ type: "reasoning-start",
2006
+ id: chunk.payload.id || crypto.randomUUID(),
2007
+ providerMetadata: chunk.payload.providerMetadata
2008
+ };
2009
+ case "reasoning-delta":
2010
+ return {
2011
+ type: "reasoning-delta",
2012
+ id: chunk.payload.id || crypto.randomUUID(),
2013
+ delta: chunk.payload.text,
2014
+ providerMetadata: chunk.payload.providerMetadata
2015
+ };
2016
+ case "reasoning-end":
2017
+ return {
2018
+ type: "reasoning-end",
2019
+ id: chunk.payload.id || crypto.randomUUID(),
2020
+ providerMetadata: chunk.payload.providerMetadata
2021
+ };
2022
+ // Tool call (complete)
2023
+ case "tool-call":
2024
+ return {
2025
+ type: "tool-call",
2026
+ toolCallId: chunk.payload.toolCallId,
2027
+ toolName: chunk.payload.toolName,
2028
+ input: JSON.stringify(chunk.payload.args),
2029
+ providerExecuted: chunk.payload.providerExecuted,
2030
+ providerMetadata: chunk.payload.providerMetadata
2031
+ };
2032
+ // Tool call input streaming
2033
+ case "tool-call-input-streaming-start":
2034
+ return {
2035
+ type: "tool-input-start",
2036
+ id: chunk.payload.toolCallId,
2037
+ toolName: chunk.payload.toolName,
2038
+ providerExecuted: chunk.payload.providerExecuted,
2039
+ providerMetadata: chunk.payload.providerMetadata
2040
+ };
2041
+ case "tool-call-delta":
2042
+ return {
2043
+ type: "tool-input-delta",
2044
+ id: chunk.payload.toolCallId,
2045
+ delta: chunk.payload.argsTextDelta,
2046
+ providerMetadata: chunk.payload.providerMetadata
2047
+ };
2048
+ case "tool-call-input-streaming-end":
2049
+ return {
2050
+ type: "tool-input-end",
2051
+ id: chunk.payload.toolCallId,
2052
+ providerMetadata: chunk.payload.providerMetadata
2053
+ };
2054
+ // Tool result
2055
+ case "tool-result":
2056
+ return {
2057
+ type: "tool-result",
2058
+ toolCallId: chunk.payload.toolCallId,
2059
+ toolName: chunk.payload.toolName,
2060
+ result: { type: "json", value: chunk.payload.result },
2061
+ isError: chunk.payload.isError,
2062
+ providerExecuted: chunk.payload.providerExecuted,
2063
+ providerMetadata: chunk.payload.providerMetadata
2064
+ };
2065
+ // Source (citations)
2066
+ case "source":
2067
+ if (chunk.payload.sourceType === "url") {
2068
+ return {
2069
+ type: "source",
2070
+ sourceType: "url",
2071
+ id: chunk.payload.id,
2072
+ url: chunk.payload.url,
2073
+ title: chunk.payload.title,
2074
+ providerMetadata: chunk.payload.providerMetadata
2075
+ };
2076
+ } else {
2077
+ return {
2078
+ type: "source",
2079
+ sourceType: "document",
2080
+ id: chunk.payload.id,
2081
+ mediaType: chunk.payload.mimeType,
2082
+ title: chunk.payload.title,
2083
+ filename: chunk.payload.filename,
2084
+ providerMetadata: chunk.payload.providerMetadata
2085
+ };
2086
+ }
2087
+ // File output
2088
+ case "file":
2089
+ return {
2090
+ type: "file",
2091
+ data: chunk.payload.data || chunk.payload.base64,
2092
+ mediaType: chunk.payload.mimeType
2093
+ };
2094
+ // Response metadata
2095
+ case "response-metadata":
2096
+ return {
2097
+ type: "response-metadata",
2098
+ ...chunk.payload
2099
+ };
2100
+ // Raw provider data
2101
+ case "raw":
2102
+ return {
2103
+ type: "raw",
2104
+ rawValue: chunk.payload
2105
+ };
2106
+ // Finish
2107
+ case "finish": {
2108
+ const usage = chunk.payload.output?.usage;
2109
+ return {
2110
+ type: "finish",
2111
+ finishReason: chunk.payload.stepResult?.reason || "stop",
2112
+ usage: usage ? {
2113
+ inputTokens: usage.inputTokens || 0,
2114
+ outputTokens: usage.outputTokens || 0,
2115
+ totalTokens: usage.totalTokens || 0
2116
+ } : { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
2117
+ providerMetadata: chunk.payload.metadata?.providerMetadata
2118
+ };
2119
+ }
2120
+ // Error
2121
+ case "error":
2122
+ return {
2123
+ type: "error",
2124
+ error: chunk.payload.error || chunk.payload
2125
+ };
2126
+ default:
2127
+ return null;
2128
+ }
2129
+ }
2130
+
2131
+ // src/to-ai-sdk-format.ts
2132
+ function toAISdkFormat() {
2133
+ throw new Error(
2134
+ 'toAISdkFormat() has been deprecated. Please use toAISdkStream() instead.\n\nMigration:\n import { toAISdkFormat } from "@mastra/ai-sdk";\n // Change to:\n import { toAISdkStream } from "@mastra/ai-sdk";\n\nThe function signature remains the same.'
2135
+ );
2136
+ }
1582
2137
 
1583
2138
  exports.chatRoute = chatRoute;
2139
+ exports.handleChatStream = handleChatStream;
2140
+ exports.handleNetworkStream = handleNetworkStream;
2141
+ exports.handleWorkflowStream = handleWorkflowStream;
1584
2142
  exports.networkRoute = networkRoute;
1585
2143
  exports.toAISdkFormat = toAISdkFormat;
2144
+ exports.toAISdkStream = toAISdkV5Stream;
2145
+ exports.withMastra = withMastra;
1586
2146
  exports.workflowRoute = workflowRoute;
1587
2147
  //# sourceMappingURL=index.cjs.map
1588
2148
  //# sourceMappingURL=index.cjs.map