@mastra/mcp-docs-server 1.0.0-beta.7 → 1.0.0-beta.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fchangeset-cli.md +1 -15
  2. package/.docs/organized/changelogs/%40internal%2Fexternal-types.md +1 -7
  3. package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +1 -55
  4. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +12 -12
  5. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +48 -48
  6. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +17 -17
  7. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +17 -17
  8. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +18 -18
  9. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +17 -17
  10. package/.docs/organized/changelogs/%40mastra%2Fcodemod.md +6 -0
  11. package/.docs/organized/changelogs/%40mastra%2Fconvex.md +16 -0
  12. package/.docs/organized/changelogs/%40mastra%2Fcore.md +78 -78
  13. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +18 -18
  14. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +23 -23
  15. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +17 -17
  16. package/.docs/organized/changelogs/%40mastra%2Flance.md +17 -17
  17. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +65 -65
  18. package/.docs/organized/changelogs/%40mastra%2Floggers.md +29 -29
  19. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +16 -16
  20. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +12 -12
  21. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +10 -10
  22. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +17 -17
  23. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +17 -17
  24. package/.docs/organized/changelogs/%40mastra%2Fpg.md +69 -69
  25. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +31 -31
  26. package/.docs/organized/changelogs/%40mastra%2Freact.md +14 -0
  27. package/.docs/organized/changelogs/%40mastra%2Fserver.md +56 -56
  28. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +17 -17
  29. package/.docs/organized/changelogs/create-mastra.md +13 -13
  30. package/.docs/organized/changelogs/mastra.md +21 -21
  31. package/.docs/organized/code-examples/mcp-server-adapters.md +1 -2
  32. package/.docs/organized/code-examples/processors-with-ai-sdk.md +14 -0
  33. package/.docs/organized/code-examples/server-app-access.md +1 -1
  34. package/.docs/organized/code-examples/server-hono-adapter.md +1 -1
  35. package/.docs/raw/getting-started/studio.mdx +4 -2
  36. package/.docs/raw/guides/agent-frameworks/ai-sdk.mdx +161 -0
  37. package/.docs/raw/guides/build-your-ui/ai-sdk-ui.mdx +381 -431
  38. package/.docs/raw/guides/getting-started/quickstart.mdx +11 -0
  39. package/.docs/raw/guides/migrations/upgrade-to-v1/tools.mdx +3 -3
  40. package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +31 -0
  41. package/.docs/raw/reference/ai-sdk/chat-route.mdx +127 -0
  42. package/.docs/raw/reference/ai-sdk/handle-chat-stream.mdx +117 -0
  43. package/.docs/raw/reference/ai-sdk/handle-network-stream.mdx +64 -0
  44. package/.docs/raw/reference/ai-sdk/handle-workflow-stream.mdx +116 -0
  45. package/.docs/raw/reference/ai-sdk/network-route.mdx +99 -0
  46. package/.docs/raw/reference/ai-sdk/to-ai-sdk-stream.mdx +289 -0
  47. package/.docs/raw/reference/ai-sdk/workflow-route.mdx +110 -0
  48. package/.docs/raw/reference/client-js/agents.mdx +251 -67
  49. package/.docs/raw/reference/client-js/mastra-client.mdx +2 -2
  50. package/.docs/raw/reference/client-js/memory.mdx +4 -1
  51. package/.docs/raw/reference/core/getMemory.mdx +73 -0
  52. package/.docs/raw/reference/core/getStoredAgentById.mdx +183 -0
  53. package/.docs/raw/reference/core/listMemory.mdx +70 -0
  54. package/.docs/raw/reference/core/listStoredAgents.mdx +151 -0
  55. package/.docs/raw/reference/core/mastra-class.mdx +8 -0
  56. package/.docs/raw/reference/server/express-adapter.mdx +52 -0
  57. package/.docs/raw/reference/server/hono-adapter.mdx +54 -0
  58. package/.docs/raw/server-db/custom-api-routes.mdx +5 -5
  59. package/.docs/raw/server-db/server-adapters.mdx +94 -91
  60. package/.docs/raw/streaming/tool-streaming.mdx +10 -14
  61. package/.docs/raw/workflows/workflow-state.mdx +4 -5
  62. package/CHANGELOG.md +15 -0
  63. package/package.json +4 -4
@@ -8,7 +8,7 @@ import TabItem from "@theme/TabItem";
8
8
 
9
9
  # Using AI SDK UI
10
10
 
11
- [AI SDK UI](https://sdk.vercel.ai) is a free open-source library that gives you the tools you need to build AI-powered products. Mastra has great integration with AI SDK UI, including model routing, streaming support, custom React hooks, custom tool/UI, and more.
11
+ [AI SDK UI](https://sdk.vercel.ai) is a library of React utilities and components for building AI-powered interfaces. In this guide, you'll learn how to use `@mastra/ai-sdk` to convert Mastra's output to AI SDK-compatible formats, enabling you to use its hooks and components in your frontend.
12
12
 
13
13
  :::note
14
14
  Migrating from AI SDK v4 to v5? See the [migration guide](/guides/v1/migrations/ai-sdk-v4-to-v5).
@@ -16,40 +16,69 @@ Migrating from AI SDK v4 to v5? See the [migration guide](/guides/v1/migrations/
16
16
 
17
17
  :::tip
18
18
 
19
- Visit Mastra's [**UI Dojo**](https://ui-dojo.mastra.ai/) to see real-world examples of AI SDK integrated with Mastra.
19
+ Want to see more examples? Visit Mastra's [**UI Dojo**](https://ui-dojo.mastra.ai/) or the [Next.js quickstart guide](/guides/v1/getting-started/next-js).
20
20
 
21
21
  :::
22
22
 
23
- ## Streaming
23
+ ## Getting Started
24
24
 
25
- The recommended way of using Mastra and AI SDK together is by installing the `@mastra/ai-sdk` package. `@mastra/ai-sdk` provides custom API routes and utilities for streaming Mastra agents in AI SDK-compatible formats. Including chat, workflow, and network route handlers, along with utilities and exported types for UI integrations.
25
+ Use Mastra and AI SDK UI together by installing the `@mastra/ai-sdk` package. `@mastra/ai-sdk` provides custom API routes and utilities for streaming Mastra agents in AI SDK-compatible formats. This includes chat, workflow, and network route handlers, along with utilities and exported types for UI integrations.
26
+
27
+ `@mastra/ai-sdk` integrates with AI SDK UI's three main hooks: [`useChat()`](https://ai-sdk.dev/docs/ai-sdk-ui/chatbot), [`useCompletion()`](https://ai-sdk.dev/docs/ai-sdk-ui/completion), and [`useObject()`](https://ai-sdk.dev/docs/ai-sdk-ui/object-generation).
28
+
29
+ Install the required packages to get started:
26
30
 
27
31
  <Tabs>
28
32
  <TabItem value="npm" label="npm">
29
33
  ```bash copy
30
- npm install @mastra/ai-sdk@beta
34
+ npm install @mastra/ai-sdk@beta @ai-sdk/react ai
31
35
  ```
32
36
  </TabItem>
33
37
  <TabItem value="pnpm" label="pnpm">
34
38
  ```bash copy
35
- pnpm add @mastra/ai-sdk@beta
39
+ pnpm add @mastra/ai-sdk@beta @ai-sdk/react ai
36
40
  ```
37
41
  </TabItem>
38
42
  <TabItem value="yarn" label="yarn">
39
43
  ```bash copy
40
- yarn add @mastra/ai-sdk@beta
44
+ yarn add @mastra/ai-sdk@beta @ai-sdk/react ai
41
45
  ```
42
46
  </TabItem>
43
47
  <TabItem value="bun" label="bun">
44
48
  ```bash copy
45
- bun add @mastra/ai-sdk@beta
49
+ bun add @mastra/ai-sdk@beta @ai-sdk/react ai
46
50
  ```
47
51
  </TabItem>
48
52
  </Tabs>
49
53
 
50
- ### `chatRoute()`
54
+ You're now ready to follow the integration guides and recipes below!
55
+
56
+ ## Integration Guides
57
+
58
+ Typically, you'll set up API routes that stream Mastra content in AI SDK-compatible format, and then use those routes in AI SDK UI hooks like `useChat()`. Below you'll find two main approaches to achieve this:
59
+
60
+ - [Mastra's server](#mastras-server)
61
+ - [Framework-agnostic](#framework-agnostic)
62
+
63
+ Once you have your API routes set up, you can use them in the [`useChat()`](#usechat) hook.
64
+
65
+ ### Mastra's server
66
+
67
+ Run Mastra as a standalone server and connect your frontend (e.g. using Vite + React) to its API endpoints. You'll be using Mastra's [custom API routes](/docs/v1/server-db/custom-api-routes) feature for this.
68
+
69
+ :::info
70
+
71
+ Mastra's [**UI Dojo**](https://ui-dojo.mastra.ai/) is an example of this setup.
72
+
73
+ :::
51
74
 
52
- When setting up a [custom API route](/docs/v1/server-db/custom-api-routes), use the `chatRoute()` utility to create a route handler that automatically formats the agent stream into an AI SDK-compatible format.
75
+ You can use [`chatRoute()`](/reference/v1/ai-sdk/chat-route), [`workflowRoute()`](/reference/v1/ai-sdk/workflow-route), and [`networkRoute()`](/reference/v1/ai-sdk/network-route) to create API routes that stream Mastra content in AI SDK-compatible format. Once implemented, you can use these API routes in [`useChat()`](#usechat).
76
+
77
+ <Tabs>
78
+
79
+ <TabItem value="chatRoute" label="chatRoute()">
80
+
81
+ This example shows how to set up a chat route at the `/chat` endpoint that uses an agent with the ID `weatherAgent`.
53
82
 
54
83
  ```typescript title="src/mastra/index.ts" copy
55
84
  import { Mastra } from "@mastra/core";
@@ -67,41 +96,13 @@ export const mastra = new Mastra({
67
96
  });
68
97
  ```
69
98
 
70
- Once you have your `/chat` API route set up, you can call the `useChat()` hook in your application.
99
+ You can also use dynamic agent routing, see the [`chatRoute()` reference documentation](/reference/v1/ai-sdk/chat-route) for more details.
71
100
 
72
- ```typescript
73
- const { error, status, sendMessage, messages, regenerate, stop } = useChat({
74
- transport: new DefaultChatTransport({
75
- api: "http://localhost:4111/chat",
76
- }),
77
- });
78
- ```
101
+ </TabItem>
79
102
 
80
- Pass extra agent stream execution options:
81
-
82
- ```typescript
83
- const { error, status, sendMessage, messages, regenerate, stop } = useChat({
84
- transport: new DefaultChatTransport({
85
- api: "http://localhost:4111/chat",
86
- prepareSendMessagesRequest({ messages }) {
87
- return {
88
- body: {
89
- messages,
90
- // Pass memory config
91
- memory: {
92
- thread: "user-1",
93
- resource: "user-1",
94
- },
95
- },
96
- };
97
- },
98
- }),
99
- });
100
- ```
103
+ <TabItem value="workflowRoute" label="workflowRoute()">
101
104
 
102
- ### `workflowRoute()`
103
-
104
- Use the `workflowRoute()` utility to create a route handler that automatically formats the workflow stream into an AI SDK-compatible format.
105
+ This example shows how to set up a workflow route at the `/workflow` endpoint that uses a workflow with the ID `weatherWorkflow`.
105
106
 
106
107
  ```typescript title="src/mastra/index.ts" copy
107
108
  import { Mastra } from "@mastra/core";
@@ -112,45 +113,28 @@ export const mastra = new Mastra({
112
113
  apiRoutes: [
113
114
  workflowRoute({
114
115
  path: "/workflow",
115
- agent: "weatherAgent",
116
+ workflow: "weatherWorkflow",
116
117
  }),
117
118
  ],
118
119
  },
119
120
  });
120
121
  ```
121
122
 
122
- Once you have your `/workflow` API route set up, you can call the `useChat()` hook in your application.
123
-
124
- ```typescript
125
- const { error, status, sendMessage, messages, regenerate, stop } = useChat({
126
- transport: new DefaultChatTransport({
127
- api: "http://localhost:4111/workflow",
128
- prepareSendMessagesRequest({ messages }) {
129
- return {
130
- body: {
131
- inputData: {
132
- city: messages[messages.length - 1].parts[0].text,
133
- },
134
- //Or resumeData for resuming a suspended workflow
135
- resumeData: {
136
- confirmation: messages[messages.length - 1].parts[0].text
137
- }
138
- },
139
- };
140
- },
141
- }),
142
- });
143
- ```
123
+ You can also use dynamic workflow routing, see the [`workflowRoute()` reference documentation](/reference/v1/ai-sdk/workflow-route) for more details.
144
124
 
145
125
  :::tip Agent streaming in workflows
146
- When a workflow step pipes an agent's stream to the workflow writer (e.g., `await response.fullStream.pipeTo(writer)`), the agent's text chunks and tool calls are automatically streamed to the UI in real-time. This provides a seamless streaming experience even when agents are running inside workflow steps.
147
126
 
148
- Learn more in [Workflow Streaming](/docs/v1/streaming/workflow-streaming#streaming-agent-text-chunks-to-ui).
127
+ When a workflow step pipes an agent's stream to the workflow writer (e.g., `await response.fullStream.pipeTo(writer)`), the agent's text chunks and tool calls are forwarded to the UI stream in real time, even when the agent runs inside workflow steps.
128
+
129
+ See [Workflow Streaming](/docs/v1/streaming/workflow-streaming#streaming-agent-text-chunks-to-ui) for more details.
130
+
149
131
  :::
150
132
 
151
- ### `networkRoute()`
133
+ </TabItem>
152
134
 
153
- Use the `networkRoute()` utility to create a route handler that automatically formats the agent network stream into an AI SDK-compatible format.
135
+ <TabItem value="networkRoute" label="networkRoute()">
136
+
137
+ This example shows how to set up a network route at the `/network` endpoint that uses an agent with the ID `weatherAgent`.
154
138
 
155
139
  ```typescript title="src/mastra/index.ts" copy
156
140
  import { Mastra } from "@mastra/core";
@@ -168,333 +152,106 @@ export const mastra = new Mastra({
168
152
  });
169
153
  ```
170
154
 
171
- Once you have your `/network` API route set up, you can call the `useChat()` hook in your application.
172
-
173
- ```typescript
174
- const { error, status, sendMessage, messages, regenerate, stop } = useChat({
175
- transport: new DefaultChatTransport({
176
- api: "http://localhost:4111/network",
177
- }),
178
- });
179
- ```
155
+ You can also use dynamic network routing, see the [`networkRoute()` reference documentation](/reference/v1/ai-sdk/network-route) for more details.
180
156
 
181
- ### Custom UI
157
+ </TabItem>
182
158
 
183
- The `@mastra/ai-sdk` package transforms and emits Mastra streams (e.g workflow, network streams) into AI SDK-compatible [uiMessages DataParts](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#datauipart) format.
184
-
185
- - **Top-level parts**: These are streamed via direct workflow and network stream transformations (e.g in `workflowRoute()` and `networkRoute()`)
186
- - `data-workflow`: Aggregates a workflow run with step inputs/outputs and final usage.
187
- - `data-network`: Aggregates a routing/network run with ordered steps (agent/workflow/tool executions) and outputs.
188
-
189
- - **Nested parts**: These are streamed via nested and merged streams from within a tool's `execute()` method.
190
- - `data-tool-workflow`: Nested workflow emitted from within a tool stream.
191
- - `data-tool-network`: Nested network emitted from within an tool stream.
192
- - `data-tool-agent`: Nested agent emitted from within an tool stream.
193
-
194
- Here's an example: For a [nested agent stream within a tool](/docs/v1/streaming/tool-streaming#tool-using-an-agent), `data-tool-agent` UI message parts will be emitted and can be leveraged on the client as documented below:
195
-
196
- ```typescript title="app/page.tsx" copy
197
- "use client";
198
-
199
- import { useChat } from "@ai-sdk/react";
200
- import { AgentTool } from '../ui/agent-tool';
201
- import { DefaultChatTransport } from 'ai';
202
- import type { AgentDataPart } from "@mastra/ai-sdk";
203
-
204
- export default function Page() {
205
- const { messages } = useChat({
206
- transport: new DefaultChatTransport({
207
- api: 'http://localhost:4111/chat',
208
- }),
209
- });
210
-
211
- return (
212
- <div>
213
- {messages.map((message) => (
214
- <div key={message.id}>
215
- {message.parts.map((part, i) => {
216
- switch (part.type) {
217
- case 'data-tool-agent':
218
- return (
219
- <AgentTool {...part.data as AgentDataPart} key={`${message.id}-${i}`} />
220
- );
221
- default:
222
- return null;
223
- }
224
- })}
225
- </div>
226
- ))}
227
- </div>
228
- );
229
- }
230
- ```
231
-
232
- ```typescript title="ui/agent-tool.ts" copy
233
- import { Tool, ToolContent, ToolHeader, ToolOutput } from "../ai-elements/tool";
234
- import type { AgentDataPart } from "@mastra/ai-sdk";
235
-
236
- export const AgentTool = ({ id, text, status }: AgentDataPart) => {
237
- return (
238
- <Tool>
239
- <ToolHeader
240
- type={`${id}`}
241
- state={status === 'finished' ? 'output-available' : 'input-available'}
242
- />
243
- <ToolContent>
244
- <ToolOutput output={text} />
245
- </ToolContent>
246
- </Tool>
247
- );
248
- };
249
- ```
250
-
251
- ### Custom Tool streaming
252
-
253
- To stream custom data parts from within your tool execution function, use the
254
- `writer.custom()` method.
159
+ </Tabs>
255
160
 
256
- ```typescript {5,8,15} showLineNumbers copy
257
- import { createTool } from "@mastra/core/tools";
161
+ ### Framework-agnostic
258
162
 
259
- export const testTool = createTool({
260
- // ...
261
- execute: async ({ context, writer }) => {
262
- const { value } = context;
163
+ If you don't want to run Mastra's server and instead use frameworks like Next.js or Express, you can use the [`handleChatStream()`](/reference/v1/ai-sdk/handle-chat-stream), [`handleWorkflowStream()`](/reference/v1/ai-sdk/handle-workflow-stream), and [`handleNetworkStream()`](/reference/v1/ai-sdk/handle-network-stream) functions in your own API route handlers.
263
164
 
264
- await writer?.custom({
265
- type: "data-tool-progress",
266
- status: "pending"
267
- });
165
+ They return a `ReadableStream` that you can wrap with [`createUIMessageStreamResponse()`](https://ai-sdk.dev/docs/reference/ai-sdk-ui/create-ui-message-stream-response).
268
166
 
269
- const response = await fetch(...);
270
-
271
- await writer?.custom({
272
- type: "data-tool-progress",
273
- status: "success"
274
- });
167
+ The examples below show you how to use them with Next.js App Router.
275
168
 
276
- return {
277
- value: ""
278
- };
279
- }
280
- });
281
- ```
282
-
283
- For more information about tool streaming see [Tool streaming documentation](/docs/v1/streaming/tool-streaming)
169
+ <Tabs>
284
170
 
285
- ### Stream Transformations
171
+ <TabItem value="handleChatStream" label="handleChatStream()">
286
172
 
287
- To manually transform Mastra's streams to AI SDK-compatible format, use the `toAISdkStream()` utility.
173
+ This example shows how to set up a chat route at the `/chat` endpoint that uses an agent with the ID `weatherAgent`.
288
174
 
289
- ```typescript title="app/api/chat/route.ts" copy {3,13}
290
- import { mastra } from "../../mastra";
291
- import { createUIMessageStream, createUIMessageStreamResponse } from "ai";
292
- import { toAISdkStream } from "@mastra/ai-sdk";
175
+ ```typescript title="app/chat/route.ts" copy
176
+ import { handleChatStream } from '@mastra/ai-sdk';
177
+ import { createUIMessageStreamResponse } from 'ai';
178
+ import { mastra } from '@/src/mastra';
293
179
 
294
180
  export async function POST(req: Request) {
295
- const { messages } = await req.json();
296
- const myAgent = mastra.getAgent("weatherAgent");
297
- const stream = await myAgent.stream(messages);
298
-
299
- // Transform stream into AI SDK format and create UI messages stream
300
- const uiMessageStream = createUIMessageStream({
301
- originalMessages: messages,
302
- execute: async ({ writer }) => {
303
- for await (const part of toAISdkStream(stream, { from: "agent" })!) {
304
- writer.write(part);
305
- }
306
- },
307
- });
308
-
309
- // Create a Response that streams the UI message stream to the client
310
- return createUIMessageStreamResponse({
311
- stream: uiMessageStream,
181
+ const params = await req.json();
182
+ const stream = await handleChatStream({
183
+ mastra,
184
+ agentId: 'weatherAgent',
185
+ params,
312
186
  });
187
+ return createUIMessageStreamResponse({ stream });
313
188
  }
314
189
  ```
315
190
 
316
- #### Stream Transformation Options
191
+ </TabItem>
317
192
 
318
- The `toAISdkStream()` function accepts the following options:
193
+ <TabItem value="handleWorkflowStream" label="handleWorkflowStream()">
319
194
 
320
- <PropertiesTable
321
- content={[
322
- {
323
- name: "from",
324
- type: "'agent' | 'network' | 'workflow'",
325
- isRequired: true,
326
- description: "The type of Mastra stream being converted.",
327
- },
328
- {
329
- name: "lastMessageId",
330
- type: "string",
331
- isOptional: true,
332
- description: "(Agent only) The ID of the last message in the conversation.",
333
- },
334
- {
335
- name: "sendStart",
336
- type: "boolean",
337
- isOptional: true,
338
- defaultValue: "true",
339
- description: "(Agent only) Whether to send start events.",
340
- },
341
- {
342
- name: "sendFinish",
343
- type: "boolean",
344
- isOptional: true,
345
- defaultValue: "true",
346
- description: "(Agent only) Whether to send finish events.",
347
- },
348
- {
349
- name: "sendReasoning",
350
- type: "boolean",
351
- isOptional: true,
352
- defaultValue: "false",
353
- description: "(Agent only) Whether to include reasoning-delta chunks in the stream. Set to true to stream the actual reasoning content from models that support extended thinking.",
354
- },
355
- {
356
- name: "sendSources",
357
- type: "boolean",
358
- isOptional: true,
359
- defaultValue: "false",
360
- description: "(Agent only) Whether to include source citations in the output.",
361
- },
362
- {
363
- name: "messageMetadata",
364
- type: "Function",
365
- isOptional: true,
366
- description: "(Agent only) A function that receives the current stream part and returns metadata to attach to start and finish chunks.",
367
- },
368
- {
369
- name: "onError",
370
- type: "Function",
371
- isOptional: true,
372
- description: "(Agent only) A function to handle errors during stream conversion. Receives the error and should return a string representation.",
373
- },
374
- ]}
375
- />
195
+ This example shows how to set up a workflow route at the `/workflow` endpoint that uses a workflow with the ID `weatherWorkflow`.
376
196
 
377
- **Example with reasoning enabled:**
378
-
379
- ```typescript title="app/api/chat/route.ts" copy {11-14}
380
- import { mastra } from "../../mastra";
381
- import { createUIMessageStream, createUIMessageStreamResponse } from "ai";
382
- import { toAISdkStream } from "@mastra/ai-sdk";
197
+ ```typescript title="app/workflow/route.ts" copy
198
+ import { handleWorkflowStream } from '@mastra/ai-sdk';
199
+ import { createUIMessageStreamResponse } from 'ai';
200
+ import { mastra } from '@/src/mastra';
383
201
 
384
202
  export async function POST(req: Request) {
385
- const { messages } = await req.json();
386
- const myAgent = mastra.getAgent("reasoningAgent");
387
- const stream = await myAgent.stream(messages, {
388
- providerOptions: {
389
- openai: { reasoningEffort: "high" },
390
- },
391
- });
392
-
393
- const uiMessageStream = createUIMessageStream({
394
- initialMessages: messages,
395
- execute: async ({ writer }) => {
396
- for await (const part of toAISdkStream(stream, {
397
- from: "agent",
398
- sendReasoning: true, // Enable reasoning content streaming
399
- })!) {
400
- writer.write(part);
401
- }
402
- },
403
- });
404
-
405
- return createUIMessageStreamResponse({
406
- stream: uiMessageStream,
203
+ const params = await req.json();
204
+ const stream = await handleWorkflowStream({
205
+ mastra,
206
+ workflowId: 'weatherWorkflow',
207
+ params,
407
208
  });
209
+ return createUIMessageStreamResponse({ stream });
408
210
  }
409
211
  ```
410
212
 
411
- ### Client Side Stream Transformations
213
+ </TabItem>
412
214
 
413
- If you have a client-side `response` from `agent.stream(...)` and want AI SDK-formatted parts without custom SSE parsing, wrap `response.processDataStream` into a `ReadableStream<ChunkType>` and pipe it through `toAISdkStream`:
215
+ <TabItem value="handleNetworkStream" label="handleNetworkStream()">
414
216
 
415
- ```typescript title="client-stream-to-ai-sdk.ts" copy
416
- import { createUIMessageStream } from "ai";
417
- import { toAISdkStream } from "@mastra/ai-sdk";
418
- import type { ChunkType, MastraModelOutput } from "@mastra/core/stream";
217
+ This example shows how to set up a network route at the `/network` endpoint that uses an agent with the ID `routingAgent`.
419
218
 
420
- // Client SDK agent stream
421
- const response = await agent.stream({
422
- messages: "What is the weather in Tokyo",
423
- });
424
-
425
- const chunkStream: ReadableStream<ChunkType> = new ReadableStream<ChunkType>({
426
- start(controller) {
427
- response
428
- .processDataStream({
429
- onChunk: async (chunk) => {
430
- controller.enqueue(chunk as ChunkType);
431
- },
432
- })
433
- .finally(() => controller.close());
434
- },
435
- });
219
+ ```typescript title="app/network/route.ts" copy
220
+ import { handleNetworkStream } from '@mastra/ai-sdk';
221
+ import { createUIMessageStreamResponse } from 'ai';
222
+ import { mastra } from '@/src/mastra';
436
223
 
437
- const uiMessageStream = createUIMessageStream({
438
- execute: async ({ writer }) => {
439
- for await (const part of toAISdkStream(
440
- chunkStream as unknown as MastraModelOutput,
441
- { from: "agent" },
442
- )) {
443
- writer.write(part);
444
- }
445
- },
446
- });
447
-
448
- for await (const part of uiMessageStream) {
449
- console.log(part);
224
+ export async function POST(req: Request) {
225
+ const params = await req.json();
226
+ const stream = await handleNetworkStream({
227
+ mastra,
228
+ agentId: 'routingAgent',
229
+ params,
230
+ });
231
+ return createUIMessageStreamResponse({ stream });
450
232
  }
451
233
  ```
452
234
 
453
- ## UI Hooks
454
-
455
- Mastra supports AI SDK UI hooks for connecting frontend components directly to agents using HTTP streams.
235
+ </TabItem>
456
236
 
457
- Install the required AI SDK React package:
458
-
459
- <Tabs>
460
- <TabItem value="npm" label="npm">
461
- ```bash copy
462
- npm install @ai-sdk/react
463
- ```
464
- </TabItem>
465
- <TabItem value="pnpm" label="pnpm">
466
- ```bash copy
467
- pnpm add @ai-sdk/react
468
- ```
469
- </TabItem>
470
- <TabItem value="yarn" label="yarn">
471
- ```bash copy
472
- yarn add @ai-sdk/react
473
- ```
474
- </TabItem>
475
- <TabItem value="bun" label="bun">
476
- ```bash copy
477
- bun add @ai-sdk/react
478
- ```
479
- </TabItem>
480
237
  </Tabs>
481
238
 
482
- ### Using `useChat()`
239
+ ### `useChat()`
483
240
 
484
- The `useChat()` hook handles real-time chat interactions between your frontend and a Mastra agent, enabling you to send prompts and receive streaming responses over HTTP.
241
+ Whether you created API routes through [Mastra's server](#mastras-server) or used a [framework of your choice](#framework-agnostic), you can now use the API endpoints in the `useChat()` hook.
485
242
 
486
- ```typescript {8-12} title="app/test/chat.tsx" copy
487
- "use client";
243
+ Assuming you set up a route at `/chat` that uses a weather agent, you can ask it questions as seen below. It's important that you set the correct `api` URL.
488
244
 
245
+ ```ts {9}
489
246
  import { useChat } from "@ai-sdk/react";
490
247
  import { useState } from "react";
491
- import { DefaultChatTransport } from 'ai';
248
+ import { DefaultChatTransport } from "ai";
492
249
 
493
- export function Chat() {
494
- const [inputValue, setInputValue] = useState('')
495
- const { messages, sendMessage} = useChat({
250
+ export default function Chat() {
251
+ const [inputValue, setInputValue] = useState("")
252
+ const { messages, sendMessage } = useChat({
496
253
  transport: new DefaultChatTransport({
497
- api: 'http://localhost:4111/chat',
254
+ api: "http://localhost:4111/chat",
498
255
  }),
499
256
  });
500
257
 
@@ -507,86 +264,154 @@ export function Chat() {
507
264
  <div>
508
265
  <pre>{JSON.stringify(messages, null, 2)}</pre>
509
266
  <form onSubmit={handleFormSubmit}>
510
- <input value={inputValue} onChange={e=>setInputValue(e.target.value)} placeholder="Name of city" />
267
+ <input value={inputValue} onChange={e => setInputValue(e.target.value)} placeholder="Name of the city" />
511
268
  </form>
512
269
  </div>
513
270
  );
514
271
  }
515
272
  ```
516
273
 
517
- Requests sent using the `useChat()` hook are handled by a standard server route. This example shows how to define a POST route using a Next.js Route Handler.
518
-
519
- ```typescript title="app/api/chat/route.ts" copy
520
- import { mastra } from "../../mastra";
274
+ Use [`prepareSendMessagesRequest`](https://ai-sdk.dev/docs/reference/ai-sdk-ui/use-chat#transport.default-chat-transport.prepare-send-messages-request) to customize the request sent to the chat route, for example to pass additional configuration to the agent.
521
275
 
522
- export async function POST(req: Request) {
523
- const { messages } = await req.json();
524
- const myAgent = mastra.getAgent("weatherAgent");
525
- const stream = await myAgent.stream(messages, { format: "aisdk" });
526
-
527
- return stream.toUIMessageStreamResponse();
528
- }
529
- ```
530
-
531
- > When using `useChat()` with agent memory, refer to the [Agent Memory section](/docs/v1/agents/agent-memory) for key implementation details.
532
-
533
- ### Using `useCompletion()`
276
+ ### `useCompletion()`
534
277
 
535
278
  The `useCompletion()` hook handles single-turn completions between your frontend and a Mastra agent, allowing you to send a prompt and receive a streamed response over HTTP.
536
279
 
537
- ```typescript {6-8} title="app/test/completion.tsx" copy
538
- "use client";
280
+ Your frontend could look like this:
539
281
 
540
- import { useCompletion } from "@ai-sdk/react";
282
+ ```typescript title="app/page.tsx" copy
283
+ import { useCompletion } from '@ai-sdk/react';
541
284
 
542
- export function Completion() {
285
+ export default function Page() {
543
286
  const { completion, input, handleInputChange, handleSubmit } = useCompletion({
544
- api: "api/completion"
287
+ api: '/api/completion',
545
288
  });
546
289
 
547
290
  return (
548
- <div>
549
- <form onSubmit={handleSubmit}>
550
- <input value={input} onChange={handleInputChange} placeholder="Name of city" />
551
- </form>
552
- <p>Completion result: {completion}</p>
553
- </div>
291
+ <form onSubmit={handleSubmit}>
292
+ <input
293
+ name="prompt"
294
+ value={input}
295
+ onChange={handleInputChange}
296
+ id="input"
297
+ />
298
+ <button type="submit">Submit</button>
299
+ <div>{completion}</div>
300
+ </form>
554
301
  );
555
302
  }
556
303
  ```
557
304
 
558
- Requests sent using the `useCompletion()` hook are handled by a standard server route. This example shows how to define a POST route using a Next.js Route Handler.
305
+ Below are two approaches to implementing the backend:
306
+
307
+ <Tabs>
559
308
 
560
- ```typescript title="app/api/completion/route.ts" copy
561
- import { mastra } from "../../../mastra";
309
+ <TabItem value="mastra-server" label="Mastra Server">
310
+
311
+ ```ts title="src/mastra/index.ts" copy
312
+ import { Mastra } from '@mastra/core/mastra';
313
+ import { registerApiRoute } from '@mastra/core/server';
314
+ import { handleChatStream } from '@mastra/ai-sdk';
315
+ import { createUIMessageStreamResponse } from 'ai';
316
+
317
+ export const mastra = new Mastra({
318
+ server: {
319
+ apiRoutes: [
320
+ registerApiRoute('/completion', {
321
+ method: 'POST',
322
+ handler: async (c) => {
323
+ const { prompt } = await c.req.json();
324
+ const mastra = c.get('mastra');
325
+ const stream = await handleChatStream({
326
+ mastra,
327
+ agentId: 'weatherAgent',
328
+ params: {
329
+ messages: [
330
+ {
331
+ id: "1",
332
+ role: 'user',
333
+ parts: [
334
+ {
335
+ type: 'text',
336
+ text: prompt
337
+ }
338
+ ]
339
+ }
340
+ ],
341
+ }
342
+ })
343
+
344
+ return createUIMessageStreamResponse({ stream });
345
+ }
346
+ })
347
+ ]
348
+ }
349
+ });
350
+ ```
351
+
352
+ </TabItem>
353
+
354
+ <TabItem value="nextjs" label="Next.js">
355
+
356
+ ```ts title="app/completion/route.ts" copy
357
+ import { handleChatStream } from '@mastra/ai-sdk';
358
+ import { createUIMessageStreamResponse } from 'ai';
359
+ import { mastra } from '@/src/mastra';
360
+
361
+ // Allow streaming responses up to 30 seconds
362
+ export const maxDuration = 30;
562
363
 
563
364
  export async function POST(req: Request) {
564
- const { prompt } = await req.json();
565
- const myAgent = mastra.getAgent("weatherAgent");
566
- const stream = await myAgent.stream([{ role: "user", content: prompt }], {
567
- format: "aisdk",
365
+ const { prompt }: { prompt: string } = await req.json();
366
+
367
+ const stream = await handleChatStream({
368
+ mastra,
369
+ agentId: 'weatherAgent',
370
+ params: {
371
+ messages: [
372
+ {
373
+ id: "1",
374
+ role: 'user',
375
+ parts: [
376
+ {
377
+ type: 'text',
378
+ text: prompt
379
+ }
380
+ ]
381
+ }
382
+ ],
383
+ },
568
384
  });
569
-
570
- return stream.toUIMessageStreamResponse();
385
+ return createUIMessageStreamResponse({ stream });
571
386
  }
572
387
  ```
573
388
 
389
+ </TabItem>
390
+
391
+ </Tabs>
392
+
393
+ ## Recipes
394
+
395
+ ### Stream transformations
396
+
397
+ To manually transform Mastra's streams to AI SDK-compatible format, use the [`toAISdkStream()`](/reference/v1/ai-sdk/to-ai-sdk-stream) utility. See the [examples](/reference/v1/ai-sdk/to-ai-sdk-stream#examples) for concrete usage patterns.
398
+
574
399
  ### Passing additional data
575
400
 
576
- `sendMessage()` allows you to pass additional data from the frontend to Mastra. This data can then be used on the server as `RequestContext`.
401
+ [`sendMessage()`](https://ai-sdk.dev/docs/reference/ai-sdk-ui/use-chat#send-message) allows you to pass additional data from the frontend to Mastra. This data can then be used on the server as [`RequestContext`](/docs/v1/server-db/request-context).
577
402
 
578
- ```typescript {16-26} title="app/test/chat-extra.tsx" copy
579
- "use client";
403
+ Here's an example of the frontend code:
580
404
 
405
+ ```typescript {15-25} copy
581
406
  import { useChat } from "@ai-sdk/react";
582
407
  import { useState } from "react";
583
408
  import { DefaultChatTransport } from 'ai';
584
409
 
585
- export function ChatExtra() {
410
+ export function ChatAdditional() {
586
411
  const [inputValue, setInputValue] = useState('')
587
412
  const { messages, sendMessage } = useChat({
588
413
  transport: new DefaultChatTransport({
589
- api: 'http://localhost:4111/chat',
414
+ api: 'http://localhost:4111/chat-extra',
590
415
  }),
591
416
  });
592
417
 
@@ -609,20 +434,65 @@ export function ChatExtra() {
609
434
  <div>
610
435
  <pre>{JSON.stringify(messages, null, 2)}</pre>
611
436
  <form onSubmit={handleFormSubmit}>
612
- <input value={inputValue} onChange={e=>setInputValue(e.target.value)} placeholder="Name of city" />
437
+ <input value={inputValue} onChange={e => setInputValue(e.target.value)} placeholder="Name of the city" />
613
438
  </form>
614
439
  </div>
615
440
  );
616
441
  }
617
442
  ```
618
443
 
619
- ```typescript {8,12} title="app/api/chat-extra/route.ts" copy
620
- import { mastra } from "../../../mastra";
444
+ Two examples on how to implement the backend portion of it.
445
+
446
+ <Tabs>
447
+
448
+ <TabItem value="mastra-server" label="Mastra Server">
449
+
450
+ Add a `chatRoute()` to your Mastra configuration like shown above. Then, add a server-level middleware:
451
+
452
+ ```typescript title="src/mastra/index.ts" copy
453
+ import { Mastra } from "@mastra/core";
454
+
455
+ export const mastra = new Mastra({
456
+ server: {
457
+ middleware: [
458
+ async (c, next) => {
459
+ const requestContext = c.get("requestContext");
460
+
461
+ if (c.req.method === "POST") {
462
+ const clonedReq = c.req.raw.clone();
463
+ const body = await clonedReq.json();
464
+
465
+ if (body?.data) {
466
+ for (const [key, value] of Object.entries(body.data)) {
467
+ requestContext.set(key, value);
468
+ }
469
+ }
470
+ }
471
+ await next();
472
+ },
473
+ ],
474
+ },
475
+ });
476
+ ```
477
+
478
+ :::info
479
+
480
+ You can access this data in your tools via the `requestContext` parameter. See the [Request Context documentation](/docs/v1/server-db/request-context) for more details.
481
+
482
+ :::
483
+
484
+ </TabItem>
485
+
486
+ <TabItem value="nextjs" label="Next.js">
487
+
488
+ ```typescript title="app/chat-extra/route.ts" copy
489
+ import { handleChatStream } from '@mastra/ai-sdk';
621
490
  import { RequestContext } from "@mastra/core/request-context";
491
+ import { createUIMessageStreamResponse } from 'ai';
492
+ import { mastra } from '@/src/mastra';
622
493
 
623
494
  export async function POST(req: Request) {
624
495
  const { messages, data } = await req.json();
625
- const myAgent = mastra.getAgent("weatherAgent");
626
496
 
627
497
  const requestContext = new RequestContext();
628
498
 
@@ -632,46 +502,126 @@ export async function POST(req: Request) {
632
502
  }
633
503
  }
634
504
 
635
- const stream = await myAgent.stream(messages, {
636
- requestContext,
637
- format: "aisdk",
505
+ const stream = await handleChatStream({
506
+ mastra,
507
+ agentId: 'weatherAgent',
508
+ params: {
509
+ messages,
510
+ requestContext,
511
+ },
638
512
  });
639
- return stream.toUIMessageStreamResponse();
513
+ return createUIMessageStreamResponse({ stream });
640
514
  }
641
515
  ```
642
516
 
643
- ### Handling `requestContext` with `server.middleware`
517
+ </TabItem>
644
518
 
645
- You can also populate the `RequestContext` by reading custom data in a server middleware:
519
+ </Tabs>
646
520
 
647
- ```typescript {8,17} title="mastra/index.ts" copy
648
- import { Mastra } from "@mastra/core";
521
+ ### Custom UI
649
522
 
650
- export const mastra = new Mastra({
651
- agents: { weatherAgent },
652
- server: {
653
- middleware: [
654
- async (c, next) => {
655
- const requestContext = c.get("requestContext");
523
+ The `@mastra/ai-sdk` package transforms and emits Mastra streams (e.g workflow, network streams) into AI SDK-compatible [uiMessages DataParts](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#datauipart) format.
656
524
 
657
- if (c.req.method === "POST") {
658
- try {
659
- const clonedReq = c.req.raw.clone();
660
- const body = await clonedReq.json();
661
-
662
- if (body?.data) {
663
- for (const [key, value] of Object.entries(body.data)) {
664
- requestContext.set(key, value);
665
- }
525
+ - **Top-level parts**: These are streamed via direct workflow and network stream transformations (e.g in `workflowRoute()` and `networkRoute()`)
526
+ - `data-workflow`: Aggregates a workflow run with step inputs/outputs and final usage.
527
+ - `data-network`: Aggregates a routing/network run with ordered steps (agent/workflow/tool executions) and outputs.
528
+
529
+ - **Nested parts**: These are streamed via nested and merged streams from within a tool's `execute()` method.
530
+ - `data-tool-workflow`: Nested workflow emitted from within a tool stream.
531
+ - `data-tool-network`: Nested network emitted from within an tool stream.
532
+ - `data-tool-agent`: Nested agent emitted from within an tool stream.
533
+
534
+ Here's an example: For a [nested agent stream within a tool](/docs/v1/streaming/tool-streaming#tool-using-an-agent), `data-tool-agent` UI message parts will be emitted and can be leveraged on the client as documented below:
535
+
536
+ ```typescript title="app/page.tsx" copy
537
+ "use client";
538
+
539
+ import { useChat } from "@ai-sdk/react";
540
+ import { AgentTool } from '../ui/agent-tool';
541
+ import { DefaultChatTransport } from 'ai';
542
+ import type { AgentDataPart } from "@mastra/ai-sdk";
543
+
544
+ export default function Page() {
545
+ const { messages } = useChat({
546
+ transport: new DefaultChatTransport({
547
+ api: 'http://localhost:4111/chat',
548
+ }),
549
+ });
550
+
551
+ return (
552
+ <div>
553
+ {messages.map((message) => (
554
+ <div key={message.id}>
555
+ {message.parts.map((part, i) => {
556
+ switch (part.type) {
557
+ case 'data-tool-agent':
558
+ return (
559
+ <AgentTool {...part.data as AgentDataPart} key={`${message.id}-${i}`} />
560
+ );
561
+ default:
562
+ return null;
666
563
  }
667
- } catch {}
668
- }
669
- await next();
670
- },
671
- ],
672
- },
673
- });
564
+ })}
565
+ </div>
566
+ ))}
567
+ </div>
568
+ );
569
+ }
570
+ ```
571
+
572
+ ```typescript title="ui/agent-tool.ts" copy
573
+ import { Tool, ToolContent, ToolHeader, ToolOutput } from "../ai-elements/tool";
574
+ import type { AgentDataPart } from "@mastra/ai-sdk";
575
+
576
+ export const AgentTool = ({ id, text, status }: AgentDataPart) => {
577
+ return (
578
+ <Tool>
579
+ <ToolHeader
580
+ type={`${id}`}
581
+ state={status === 'finished' ? 'output-available' : 'input-available'}
582
+ />
583
+ <ToolContent>
584
+ <ToolOutput output={text} />
585
+ </ToolContent>
586
+ </Tool>
587
+ );
588
+ };
674
589
  ```
675
590
 
676
- > You can then access this data in your tools via the `requestContext` parameter. See the [Request Context documentation](/docs/v1/server-db/request-context) for more details.
591
+ ### Custom Tool streaming
592
+
593
+ To stream custom data parts from within your tool execution function, use the `writer.custom()` method.
594
+
595
+ :::tip
596
+
597
+ It is important that you `await` the `writer.custom()` call.
598
+
599
+ :::
600
+
601
+ ```typescript {4,7-10,14-17} copy
602
+ import { createTool } from "@mastra/core/tools";
603
+
604
+ export const testTool = createTool({
605
+ execute: async (inputData, context) => {
606
+ const { value } = inputData;
607
+
608
+ await context?.writer?.custom({
609
+ type: "data-tool-progress",
610
+ status: "pending"
611
+ });
612
+
613
+ const response = await fetch(...);
614
+
615
+ await context?.writer?.custom({
616
+ type: "data-tool-progress",
617
+ status: "success"
618
+ });
619
+
620
+ return {
621
+ value: ""
622
+ };
623
+ }
624
+ });
625
+ ```
677
626
 
627
+ For more information about tool streaming see [Tool streaming documentation](/docs/v1/streaming/tool-streaming).