@mastra/mcp-docs-server 0.13.31 → 0.13.32-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +14 -14
  2. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +8 -8
  3. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +14 -14
  4. package/.docs/organized/changelogs/%40mastra%2Fcore.md +45 -45
  5. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +9 -9
  6. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +9 -9
  7. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +9 -9
  8. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +9 -9
  9. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +11 -11
  10. package/.docs/organized/changelogs/%40mastra%2Fevals.md +10 -10
  11. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +14 -14
  12. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +8 -8
  13. package/.docs/organized/changelogs/%40mastra%2Fpg.md +12 -12
  14. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +14 -14
  15. package/.docs/organized/changelogs/%40mastra%2Freact.md +7 -0
  16. package/.docs/organized/changelogs/%40mastra%2Fserver.md +10 -10
  17. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +11 -11
  18. package/.docs/organized/changelogs/create-mastra.md +3 -3
  19. package/.docs/organized/changelogs/mastra.md +11 -11
  20. package/.docs/organized/code-examples/agui.md +2 -2
  21. package/.docs/organized/code-examples/ai-elements.md +2 -2
  22. package/.docs/organized/code-examples/ai-sdk-useChat.md +2 -2
  23. package/.docs/organized/code-examples/ai-sdk-v5.md +2 -2
  24. package/.docs/organized/code-examples/assistant-ui.md +2 -2
  25. package/.docs/organized/code-examples/bird-checker-with-nextjs-and-eval.md +2 -2
  26. package/.docs/organized/code-examples/bird-checker-with-nextjs.md +2 -2
  27. package/.docs/organized/code-examples/client-side-tools.md +2 -2
  28. package/.docs/organized/code-examples/crypto-chatbot.md +2 -2
  29. package/.docs/organized/code-examples/heads-up-game.md +2 -2
  30. package/.docs/organized/code-examples/openapi-spec-writer.md +2 -2
  31. package/.docs/raw/agents/agent-memory.mdx +48 -31
  32. package/.docs/raw/agents/guardrails.mdx +8 -1
  33. package/.docs/raw/agents/networks.mdx +197 -128
  34. package/.docs/raw/agents/overview.mdx +10 -9
  35. package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +92 -1
  36. package/.docs/raw/getting-started/installation.mdx +61 -68
  37. package/.docs/raw/memory/conversation-history.mdx +2 -2
  38. package/.docs/raw/memory/semantic-recall.mdx +36 -10
  39. package/.docs/raw/rag/chunking-and-embedding.mdx +19 -7
  40. package/.docs/raw/reference/client-js/agents.mdx +44 -25
  41. package/.docs/raw/reference/scorers/answer-relevancy.mdx +3 -6
  42. package/.docs/raw/reference/scorers/answer-similarity.mdx +7 -13
  43. package/.docs/raw/reference/scorers/bias.mdx +3 -6
  44. package/.docs/raw/reference/scorers/completeness.mdx +3 -6
  45. package/.docs/raw/reference/scorers/context-precision.mdx +6 -9
  46. package/.docs/raw/reference/scorers/context-relevance.mdx +12 -18
  47. package/.docs/raw/reference/scorers/faithfulness.mdx +3 -6
  48. package/.docs/raw/reference/scorers/hallucination.mdx +3 -6
  49. package/.docs/raw/reference/scorers/noise-sensitivity.mdx +13 -23
  50. package/.docs/raw/reference/scorers/prompt-alignment.mdx +16 -20
  51. package/.docs/raw/reference/scorers/tool-call-accuracy.mdx +4 -5
  52. package/.docs/raw/reference/scorers/toxicity.mdx +3 -6
  53. package/.docs/raw/reference/workflows/step.mdx +1 -1
  54. package/.docs/raw/reference/workflows/workflow-methods/sendEvent.mdx +23 -2
  55. package/.docs/raw/reference/workflows/workflow-methods/sleep.mdx +22 -4
  56. package/.docs/raw/reference/workflows/workflow-methods/sleepUntil.mdx +14 -4
  57. package/.docs/raw/reference/workflows/workflow-methods/waitForEvent.mdx +18 -1
  58. package/.docs/raw/server-db/runtime-context.mdx +13 -3
  59. package/.docs/raw/streaming/tool-streaming.mdx +30 -0
  60. package/.docs/raw/tools-mcp/overview.mdx +1 -1
  61. package/.docs/raw/workflows/overview.mdx +1 -1
  62. package/.docs/raw/workflows/suspend-and-resume.mdx +34 -23
  63. package/CHANGELOG.md +7 -0
  64. package/package.json +4 -4
  65. package/.docs/raw/workflows/pausing-execution.mdx +0 -142
@@ -1,166 +1,235 @@
1
1
  ---
2
- title: "Handling Complex LLM Operations | Networks | Mastra"
3
- description: "Networks in Mastra help you execute individual or multiple Mastra primitives in a non-deterministic way using a single API."
2
+ title: "Agent Networks | Agents | Mastra Docs"
3
+ description: Learn how to coordinate multiple agents, workflows, and tools using agent networks for complex, non-deterministic task execution.
4
4
  ---
5
5
 
6
- # Agent.network()
6
+ # Agent Networks
7
7
 
8
- `Agent.network()` introduces a flexible, composable and non-deterministic way to orchestrate multiple specialized agents and workflows, enabling complex, reasoning and task completion.
8
+ Agent networks in Mastra coordinate multiple agents, workflows, and tools to handle tasks that aren't clearly defined upfront but can be inferred from the user's message or context. A top-level **routing agent** (a Mastra agent with other agents, workflows, and tools configured) uses an LLM to interpret the request and decide which primitives (sub-agents, workflows, or tools) to call, in what order, and with what data.
9
9
 
10
- There are two main problem areas that this system is designed to solve:
10
+ ## When to use networks
11
11
 
12
- - Scenarios where a single agent is insufficient, and tasks require collaboration, routing, or sequential/parallel execution across multiple agents and workflows.
13
- - Scenarios where the task is not fully defined and is initiated with unstructured input. A network allows your Agent to figure out which primitive to call and turn unstructured input into a structured task.
12
+ Use networks for complex tasks that require coordination across multiple primitives. Unlike workflows, which follow a predefined sequence, networks rely on LLM reasoning to interpret the request and decide what to run.
14
13
 
15
- ## Differences from Workflows
14
+ ## Core principles
16
15
 
17
- - Workflows are linear or branched sequences of steps. This creates a deterministic flow of execution.
18
- - `Agent.network()` adds a layer of non-deterministic LLM-based orchestration, allowing dynamic, multi-agent collaboration and routing. This creates a non-deterministic flow of execution.
16
+ Mastra agent networks operate using these principles:
19
17
 
20
- ## Important details
18
+ - Memory is required when using `.network()` and is used to store task history and determine when a task is complete.
19
+ - Primitives are selected based on their descriptions. Clear, specific descriptions improve routing. For workflows and tools, the input schema helps determine the right inputs at runtime.
20
+ - If multiple primitives have overlapping functionality, the agent favors the more specific one, using a combination of schema and descriptions to decide which to run.
21
21
 
22
- - Providing memory to the Agent when using `network()` is _not_ optional, as it is required to store the task history. Memory is the core primitive used for any decisions on which primitives to run, as well as determine task completion.
23
- - Any available primitives (agents, workflows) are used based on their descriptions. The better the description, the better the routing agent will be able to select the right primitive. For workflows, the input schema is also used to determine which inputs to use when calling the workflow. More descriptive naming yields better results.
24
- - When primitives with overlapping capabilities are available, the agent will use the most specific primitive. For example, if both an agent and a workflow can do research, it will use the input schema of the workflow to determine which primitive to select.
22
+ ## Creating an agent network
25
23
 
26
- ## Turning an Agent into a Network
24
+ An agent network is built around a top-level routing agent that delegates tasks to agents, workflows, and tools defined in its configuration. Memory is configured on the routing agent using the `memory` option, and `instructions` define the agent's routing behavior.
27
25
 
28
- As an example, we have an Agent with 3 primitives at its disposal:
26
+ ```typescript {22-23,26,29} filename="src/mastra/agents/routing-agent.ts" showLineNumbers copy
27
+ import { openai } from "@ai-sdk/openai";
28
+ import { Agent } from "@mastra/core/agent";
29
+ import { Memory } from "@mastra/memory";
30
+ import { LibSQLStore } from "@mastra/libsql";
29
31
 
30
- - `agent1`: A general research agent that can do research on a given topic.
31
- - `agent2`: A general writing agent that can write a full report based on the researched material.
32
- - `workflow1`: A workflow that can research a given city and write a full report based on the researched material (using both agent1 and agent2).
32
+ import { researchAgent } from "./research-agent";
33
+ import { writingAgent } from "./writing-agent";
33
34
 
34
- We use the `network` method to create a task that requires multiple primitives. The Agent will, using memory, figure out which primitives to call and in which order, as well as when the task is complete.
35
+ import { cityWorkflow } from "../workflows/city-workflow";
36
+ import { weatherTool } from "../tools/weather-tool";
35
37
 
38
+ export const routingAgent = new Agent({
39
+ name: "routing-agent",
40
+ instructions: `
41
+ You are a network of writers and researchers.
42
+ The user will ask you to research a topic.
43
+ Always respond with a complete report—no bullet points.
44
+ Write in full paragraphs, like a blog post.
45
+ Do not answer with incomplete or uncertain information.`,
46
+ model: openai("gpt-4o-mini"),
47
+ agents: {
48
+ researchAgent,
49
+ writingAgent
50
+ },
51
+ workflows: {
52
+ cityWorkflow
53
+ },
54
+ tools: {
55
+ weatherTool
56
+ },
57
+ memory: new Memory({
58
+ storage: new LibSQLStore({
59
+ url: "file:../mastra.db"
60
+ })
61
+ })
62
+ });
63
+ ```
36
64
 
37
- ```typescript
38
- import { Agent } from '@mastra/core/agent';
39
- import { createStep, createWorkflow } from '@mastra/core/workflows';
40
- import { RuntimeContext } from '@mastra/core/runtime-context';
41
- import { Memory } from '@mastra/memory';
42
- import { openai } from '@ai-sdk/openai';
43
- import { LibSQLStore } from '@mastra/libsql';
44
- import { z } from 'zod';
65
+ ### Writing descriptions for network primitives
45
66
 
46
- const memory = new Memory({
47
- storage: new LibSQLStore({
48
- url: 'file:../mastra.db', // Or your database URL
49
- }),
50
- });
67
+ When configuring a Mastra agent network, each primitive (agent, workflow, or tool) needs a clear description to help the routing agent decide which to use. The routing agent uses each primitive's description and schema to determine what it does and how to use it. Clear descriptions and well-defined input and output schemas improve routing accuracy.
51
68
 
52
- const agentStep1 = createStep({
53
- id: 'agent-step',
54
- description: 'This step is used to do research and text synthesis.',
55
- inputSchema: z.object({
56
- city: z.string().describe('The city to research'),
57
- }),
58
- outputSchema: z.object({
59
- text: z.string(),
60
- }),
61
- execute: async ({ inputData }) => {
62
- const resp = await agent1.generate(inputData.city, {
63
- structuredOutput: {
64
- schema: z.object({
65
- text: z.string(),
66
- })
67
- },
68
- });
69
-
70
- return { text: resp.object.text };
71
- },
69
+ #### Agent descriptions
70
+
71
+ Each agent in a network should include a clear `description` that explains what the agent does.
72
+
73
+ ```typescript filename="src/mastra/agents/research-agent.ts" showLineNumbers
74
+ export const researchAgent = new Agent({
75
+ name: "research-agent",
76
+ description: `This agent gathers concise research insights in bullet-point form.
77
+ It's designed to extract key facts without generating full
78
+ responses or narrative content.`,
79
+ // ...
72
80
  });
81
+ ```
82
+ ```typescript filename="src/mastra/agents/writing-agent.ts" showLineNumbers
83
+ export const writingAgent = new Agent({
84
+ name: "writing-agent",
85
+ description: `This agent turns researched material into well-structured
86
+ written content. It produces full-paragraph reports with no bullet points,
87
+ suitable for use in articles, summaries, or blog posts.`,
88
+ // ...
89
+ });
90
+ ```
73
91
 
74
- const agentStep2 = createStep({
75
- id: 'agent-step-two',
76
- description: 'This step is used to do research and text synthesis.',
92
+ #### Workflow descriptions
93
+
94
+ Workflows in a network should include a `description` to explain their purpose, along with `inputSchema` and `outputSchema` to describe the expected data.
95
+
96
+ ```typescript filename="src/mastra/workflows/city-workflow.ts" showLineNumbers
97
+ export const cityWorkflow = createWorkflow({
98
+ id: "city-workflow",
99
+ description: `This workflow handles city-specific research tasks.
100
+ It first gathers factual information about the city, then synthesizes
101
+ that research into a full written report. Use it when the user input
102
+ includes a city to be researched.`,
77
103
  inputSchema: z.object({
78
- text: z.string().describe('The city to research'),
104
+ city: z.string()
79
105
  }),
80
106
  outputSchema: z.object({
81
- text: z.string(),
82
- }),
83
- execute: async ({ inputData }) => {
84
- const resp = await agent2.generate(inputData.text, {
85
- structuredOutput: {
86
- schema: z.object({
87
- text: z.string(),
88
- })
89
- },
90
- });
91
-
92
- return { text: resp.object.text };
93
- },
94
- });
107
+ text: z.string()
108
+ })
109
+ //...
110
+ })
111
+ ```
95
112
 
96
- const workflow1 = createWorkflow({
97
- id: 'workflow1',
98
- description:
99
- 'This workflow is perfect for researching a specific city. It should be used when you have a city in mind to research.',
100
- steps: [],
113
+ #### Tool descriptions
114
+
115
+ Tools in a network should include a `description` to explain their purpose, along with `inputSchema` and `outputSchema` to describe the expected data.
116
+
117
+ ```typescript filename="src/mastra/tools/weather-tool.ts" showLineNumbers
118
+ export const weatherTool = createTool({
119
+ id: "weather-tool",
120
+ description: ` Retrieves current weather information using the wttr.in API.
121
+ Accepts a city or location name as input and returns a short weather summary.
122
+ Use this tool whenever up-to-date weather data is requested.
123
+ `,
101
124
  inputSchema: z.object({
102
- city: z.string(),
125
+ location: z.string()
103
126
  }),
104
127
  outputSchema: z.object({
105
- text: z.string(),
128
+ weather: z.string()
106
129
  }),
107
- })
108
- .then(agentStep1)
109
- .then(agentStep2)
110
- .commit();
111
-
112
- const agent1 = new Agent({
113
- name: 'agent1',
114
- instructions:
115
- 'This agent is used to do research, but not create full responses. Answer in bullet points only and be concise.',
116
- description:
117
- 'This agent is used to do research, but not create full responses. Answer in bullet points only and be concise.',
118
- model: openai('gpt-4o'),
130
+ // ...
119
131
  });
132
+ ```
120
133
 
121
- const agent2 = new Agent({
122
- name: 'agent2',
123
- description:
124
- 'This agent is used to do text synthesis on researched material. Write a full report based on the researched material. Writes reports in full paragraphs. Should be used to synthesize text from different sources together as a final report.',
125
- instructions:
126
- 'This agent is used to do text synthesis on researched material. Write a full report based on the researched material. Do not use bullet points. Write full paragraphs. There should not be a single bullet point in the final report.',
127
- model: openai('gpt-4o'),
128
- });
134
+ ## Calling agent networks
129
135
 
130
- const routingAgent = new Agent({
131
- id: 'test-network',
132
- name: 'Test Network',
133
- instructions:
134
- 'You are a network of writers and researchers. The user will ask you to research a topic. You always need to answer with a full report. Bullet points are NOT a full report. WRITE FULL PARAGRAPHS like this is a blog post or something similar. You should not rely on partial information.',
135
- model: openai('gpt-4o'),
136
- agents: {
137
- agent1,
138
- agent2,
139
- },
140
- workflows: {
141
- workflow1,
142
- },
143
- memory: memory,
144
- });
136
+ Call a Mastra agent network using `.network()` with a user message. The method returns a stream of events that you can iterate over to track execution progress and retrieve the final result.
137
+
138
+ ### Agent example
139
+
140
+ In this example, the network interprets the message and would route the request to both the `researchAgent` and `writingAgent` to generate a complete response.
141
+
142
+ ```typescript showLineNumbers copy
143
+ const result = await routingAgent.network("Tell me three cool ways to use Mastra");
144
+
145
+ for await (const chunk of result) {
146
+ console.log(chunk.type);
147
+ if (chunk.type === "network-execution-event-step-finish") {
148
+ console.log(chunk.payload.result);
149
+ }
150
+ }
151
+ ```
152
+
153
+ #### Agent output
154
+
155
+ The following `chunk.type` events are emitted during this request:
156
+
157
+ ```text
158
+ routing-agent-start
159
+ routing-agent-end
160
+ agent-execution-start
161
+ agent-execution-event-start
162
+ agent-execution-event-step-start
163
+ agent-execution-event-text-start
164
+ agent-execution-event-text-delta
165
+ agent-execution-event-text-end
166
+ agent-execution-event-step-finish
167
+ agent-execution-event-finish
168
+ agent-execution-end
169
+ network-execution-event-step-finish
170
+ ```
171
+
172
+ ## Workflow example
173
+
174
+ In this example, the routing agent recognizes the city name in the message and runs the `cityWorkflow`. The workflow defines steps that call the `researchAgent` to gather facts, then the `writingAgent` to generate the final text.
175
+
176
+ ```typescript showLineNumbers copy
177
+ const result = await routingAgent.network("Tell me some historical facts about London");
145
178
 
146
- const runtimeContext = new RuntimeContext();
179
+ for await (const chunk of result) {
180
+ console.log(chunk.type);
181
+ if (chunk.type === "network-execution-event-step-finish") {
182
+ console.log(chunk.payload.result);
183
+ }
184
+ }
185
+ ```
186
+
187
+ #### Workflow output
188
+
189
+ The following `chunk.type` events are emitted during this request:
147
190
 
148
- console.log(
149
- // specifying the task, note that there is a mention here about using an agent for synthesis. This is because the routing agent can actually do some synthesis on results on its own, so this will force it to use agent2 instead
150
- await routingAgent.network(
151
- 'What are the biggest cities in France? Give me 3. How are they like? Find cities, then do thorough research on each city, and give me a final full report synthesizing all that information. Make sure to use an agent for synthesis.',
152
- { runtimeContext },
153
- ),
154
- );
191
+ ```text
192
+ routing-agent-end
193
+ workflow-execution-start
194
+ workflow-execution-event-workflow-start
195
+ workflow-execution-event-workflow-step-start
196
+ workflow-execution-event-workflow-step-result
197
+ workflow-execution-event-workflow-finish
198
+ workflow-execution-end
199
+ routing-agent-start
200
+ network-execution-event-step-finish
155
201
  ```
156
202
 
157
- For the given task (research 3 biggest cities in France and write a full report), the AgentNetwork will call the following primitives:
203
+ ### Tool example
204
+
205
+ In this example, the routing agent skips the `researchAgent`, `writingAgent`, and `cityWorkflow`, and calls the `weatherTool` directly to complete the task.
206
+
207
+ ```typescript showLineNumbers copy
208
+ const result = await routingAgent.network("What's the weather in London?");
209
+
210
+ for await (const chunk of result) {
211
+ console.log(chunk.type);
212
+ if (chunk.type === "network-execution-event-step-finish") {
213
+ console.log(chunk.payload.result);
214
+ }
215
+ }
216
+ ```
217
+
218
+ #### Tool output
219
+
220
+ The following `chunk.type` events are emitted during this request:
221
+
222
+ ```text
223
+ routing-agent-start
224
+ routing-agent-end
225
+ tool-execution-start
226
+ tool-execution-end
227
+ network-execution-event-step-finish
228
+ ```
158
229
 
159
- 1. `agent1` to find the 3 biggest cities in France.
160
- 2. `workflow1` to research each city one by one. The workflow uses `memory` to figure out which cities have already been researched and makes sure it has researched all of them before proceeding.
161
- 3. `agent2` to synthesize the final report.
230
+ ## Related
162
231
 
163
- ### How It Works
232
+ - [Agent Memory](./agent-memory.mdx)
233
+ - [Workflows Overview](../workflows/overview.mdx)
234
+ - [Runtime Context](../server-db/runtime-context.mdx)
164
235
 
165
- - The underlying engine is a Mastra workflow that wraps the single call `generate` workflow.
166
- - The workflow will repeatedly call the network execution workflow with a `dountil` structure, until the routing model determines the task is complete. This check is used as the `dountil` condition.
@@ -13,12 +13,12 @@ Agents use LLMs and tools to solve open-ended tasks. They reason about goals, de
13
13
 
14
14
  > **📹 Watch**: → An introduction to agents, and how they compare to workflows [YouTube (7 minutes)](https://youtu.be/0jg2g3sNvgw)
15
15
 
16
- ## Getting started
16
+ ## Setting up agents
17
17
 
18
18
  <Tabs items={["Mastra model router", "Vercel AI SDK"]}>
19
19
  <Tabs.Tab>
20
20
  <Steps>
21
- ### Install dependencies
21
+ ### Install dependencies [#install-dependencies-mastra-router]
22
22
 
23
23
  Add the Mastra core package to your project:
24
24
 
@@ -26,7 +26,7 @@ Add the Mastra core package to your project:
26
26
  npm install @mastra/core
27
27
  ```
28
28
 
29
- ### Set your API key
29
+ ### Set your API key [#set-api-key-mastra-router]
30
30
 
31
31
  Mastra's model router auto-detects environment variables for your chosen provider. For OpenAI, set `OPENAI_API_KEY`:
32
32
 
@@ -36,7 +36,7 @@ OPENAI_API_KEY=<your-api-key>
36
36
 
37
37
  > Mastra supports more than 600 models. Choose from the full list [here](/models).
38
38
 
39
- ### Create an agent
39
+ ### Creating an agent [#creating-an-agent-mastra-router]
40
40
 
41
41
  Create an agent by instantiating the `Agent` class with system `instructions` and a `model`:
42
42
 
@@ -53,7 +53,8 @@ export const testAgent = new Agent({
53
53
  </Tabs.Tab>
54
54
  <Tabs.Tab>
55
55
  <Steps>
56
- ### Install dependencies
56
+
57
+ ### Install dependencies [#install-dependencies-ai-sdk]
57
58
 
58
59
  Include the Mastra core package alongside the Vercel AI SDK provider you want to use:
59
60
 
@@ -61,7 +62,7 @@ Include the Mastra core package alongside the Vercel AI SDK provider you want to
61
62
  npm install @mastra/core @ai-sdk/openai
62
63
  ```
63
64
 
64
- ### Set your API key
65
+ ### Set your API key [#set-api-key-ai-sdk]
65
66
 
66
67
  Set the corresponding environment variable for your provider. For OpenAI via the AI SDK:
67
68
 
@@ -71,7 +72,7 @@ OPENAI_API_KEY=<your-api-key>
71
72
 
72
73
  > See the [AI SDK Providers](https://ai-sdk.dev/providers/ai-sdk-providers) in the Vercel AI SDK docs for additional configuration options.
73
74
 
74
- ### Create an agent
75
+ ### Creating an agent [#creating-an-agent-ai-sdk]
75
76
 
76
77
  To create an agent in Mastra, use the `Agent` class. Every agent must include `instructions` to define its behavior, and a `model` parameter to specify the LLM provider and model. When using the Vercel AI SDK, provide the client to your agent's `model` field:
77
78
 
@@ -125,8 +126,8 @@ instructions: {
125
126
  content:
126
127
  "You are an expert code reviewer. Analyze code for bugs, performance issues, and best practices.",
127
128
  providerOptions: {
128
- openai: { reasoning_effort: "high" }, // OpenAI's reasoning models
129
- anthropic: { cache_control: { type: "ephemeral" } } // Anthropic's prompt caching
129
+ openai: { reasoningEffort: "high" }, // OpenAI's reasoning models
130
+ anthropic: { cacheControl: { type: "ephemeral" } } // Anthropic's prompt caching
130
131
  }
131
132
  }
132
133
  ```
@@ -82,6 +82,30 @@ const { error, status, sendMessage, messages, regenerate, stop } =
82
82
  }),
83
83
  });
84
84
  ```
85
+
86
+ Pass extra agent stream execution options:
87
+
88
+ ```typescript
89
+ const { error, status, sendMessage, messages, regenerate, stop } =
90
+ useChat({
91
+ transport: new DefaultChatTransport({
92
+ api: 'http://localhost:4111/chat',
93
+ prepareSendMessagesRequest({ messages }) {
94
+ return {
95
+ body: {
96
+ messages,
97
+ // Pass memory config
98
+ memory: {
99
+ thread: "user-1",
100
+ resource: "user-1"
101
+ }
102
+ },
103
+ }
104
+ }
105
+ }),
106
+ });
107
+ ```
108
+
85
109
  ### `workflowRoute()`
86
110
 
87
111
  Use the `workflowRoute()` utility to create a route handler that automatically formats the workflow stream into an AI SDK-compatible format.
@@ -155,7 +179,7 @@ const { error, status, sendMessage, messages, regenerate, stop } =
155
179
 
156
180
  ### Custom UI
157
181
 
158
- The `@mastra/ai-sdk` package transforms and emits Mastra streams (e.g workflow, network streams) into AI SDK-compatible format.
182
+ The `@mastra/ai-sdk` package transforms and emits Mastra streams (e.g workflow, network streams) into AI SDK-compatible [uiMessages DataParts](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#datauipart) format.
159
183
 
160
184
  - **Top-level parts**: These are streamed via direct workflow and network stream transformations (e.g in `workflowRoute()` and `networkRoute()`)
161
185
  - `data-workflow`: Aggregates a workflow run with step inputs/outputs and final usage.
@@ -221,6 +245,38 @@ export const AgentTool = ({ id, text, status }: AgentDataPart) => {
221
245
  );
222
246
  };
223
247
  ```
248
+ ### Custom Tool streaming
249
+ To stream custom data parts from within your tool execution function, use the
250
+ `writer.custom()` method.
251
+
252
+ ```typescript {5,8,15} showLineNumbers copy
253
+ import { createTool } from "@mastra/core/tools";
254
+
255
+ export const testTool = createTool({
256
+ // ...
257
+ execute: async ({ context, writer }) => {
258
+ const { value } = context;
259
+
260
+ await writer?.custom({
261
+ type: "data-tool-progress",
262
+ status: "pending"
263
+ });
264
+
265
+ const response = await fetch(...);
266
+
267
+ await writer?.custom({
268
+ type: "data-tool-progress",
269
+ status: "success"
270
+ });
271
+
272
+ return {
273
+ value: ""
274
+ };
275
+ }
276
+ });
277
+ ```
278
+
279
+ For more information about tool streaming see [Tool streaming documentation](/docs/streaming/tool-streaming)
224
280
 
225
281
  ### Stream Transformations
226
282
 
@@ -252,6 +308,41 @@ export async function POST(req: Request) {
252
308
  }
253
309
  ```
254
310
 
311
+ ### Client Side Stream Transformations
312
+
313
+ If you have a client-side `response` from `agent.stream(...)` and want AI SDK-formatted parts without custom SSE parsing, wrap `response.processDataStream` into a `ReadableStream<ChunkType>` and pipe it through `toAISdkFormat`:
314
+
315
+ ```typescript filename="client-stream-to-ai-sdk.ts" copy
316
+ import { createUIMessageStream } from 'ai';
317
+ import { toAISdkFormat } from '@mastra/ai-sdk';
318
+ import type { ChunkType, MastraModelOutput } from '@mastra/core/stream';
319
+
320
+ // Client SDK agent stream
321
+ const response = await agent.stream({ messages: 'What is the weather in Tokyo' });
322
+
323
+ const chunkStream: ReadableStream<ChunkType> = new ReadableStream<ChunkType>({
324
+ start(controller) {
325
+ response.processDataStream({
326
+ onChunk: async (chunk) => {
327
+ controller.enqueue(chunk as ChunkType);
328
+ },
329
+ }).finally(() => controller.close());
330
+ },
331
+ });
332
+
333
+ const uiMessageStream = createUIMessageStream({
334
+ execute: async ({ writer }) => {
335
+ for await (const part of toAISdkFormat(chunkStream as unknown as MastraModelOutput, { from: 'agent' })) {
336
+ writer.write(part);
337
+ }
338
+ },
339
+ });
340
+
341
+ for await (const part of uiMessageStream) {
342
+ console.log(part);
343
+ }
344
+ ```
345
+
255
346
  ## UI Hooks
256
347
 
257
348
  Mastra supports AI SDK UI hooks for connecting frontend components directly to agents using HTTP streams.