@mastra/mcp-docs-server 1.0.0-beta.6 → 1.0.0-beta.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +12 -12
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fastra.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fchroma.md +12 -12
- package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +48 -48
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +65 -65
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +48 -48
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +48 -48
- package/.docs/organized/changelogs/%40mastra%2Fcodemod.md +6 -0
- package/.docs/organized/changelogs/%40mastra%2Fconvex.md +47 -0
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +196 -196
- package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +19 -19
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +27 -27
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +48 -48
- package/.docs/organized/changelogs/%40mastra%2Felasticsearch.md +9 -0
- package/.docs/organized/changelogs/%40mastra%2Flance.md +48 -48
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +48 -48
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +40 -40
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +43 -43
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +48 -48
- package/.docs/organized/changelogs/%40mastra%2Fmssql.md +48 -48
- package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +52 -52
- package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +41 -41
- package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Freact.md +14 -0
- package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +65 -65
- package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +48 -48
- package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +10 -10
- package/.docs/organized/changelogs/create-mastra.md +7 -7
- package/.docs/organized/changelogs/mastra.md +14 -14
- package/.docs/organized/code-examples/ai-elements.md +1 -1
- package/.docs/organized/code-examples/ai-sdk-useChat.md +1 -1
- package/.docs/organized/code-examples/ai-sdk-v5.md +1 -1
- package/.docs/organized/code-examples/assistant-ui.md +1 -1
- package/.docs/organized/code-examples/bird-checker-with-nextjs-and-eval.md +1 -1
- package/.docs/organized/code-examples/bird-checker-with-nextjs.md +1 -1
- package/.docs/organized/code-examples/crypto-chatbot.md +1 -1
- package/.docs/organized/code-examples/mcp-server-adapters.md +2 -2
- package/.docs/organized/code-examples/server-app-access.md +2 -2
- package/.docs/organized/code-examples/server-express-adapter.md +87 -0
- package/.docs/organized/code-examples/server-hono-adapter.md +85 -0
- package/.docs/raw/agents/overview.mdx +2 -111
- package/.docs/raw/agents/processors.mdx +1 -1
- package/.docs/raw/agents/structured-output.mdx +224 -0
- package/.docs/raw/guides/build-your-ui/ai-sdk-ui.mdx +381 -431
- package/.docs/raw/guides/getting-started/quickstart.mdx +11 -0
- package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +40 -1
- package/.docs/raw/memory/working-memory.mdx +1 -0
- package/.docs/raw/observability/tracing/bridges/otel.mdx +25 -1
- package/.docs/raw/observability/tracing/exporters/arize.mdx +19 -0
- package/.docs/raw/observability/tracing/exporters/langfuse.mdx +63 -0
- package/.docs/raw/observability/tracing/exporters/otel.mdx +30 -19
- package/.docs/raw/observability/tracing/exporters/posthog.mdx +20 -0
- package/.docs/raw/observability/tracing/overview.mdx +6 -1
- package/.docs/raw/reference/ai-sdk/chat-route.mdx +127 -0
- package/.docs/raw/reference/ai-sdk/handle-chat-stream.mdx +117 -0
- package/.docs/raw/reference/ai-sdk/handle-network-stream.mdx +64 -0
- package/.docs/raw/reference/ai-sdk/handle-workflow-stream.mdx +116 -0
- package/.docs/raw/reference/ai-sdk/network-route.mdx +99 -0
- package/.docs/raw/reference/ai-sdk/to-ai-sdk-stream.mdx +289 -0
- package/.docs/raw/reference/ai-sdk/workflow-route.mdx +110 -0
- package/.docs/raw/reference/observability/tracing/bridges/otel.mdx +27 -0
- package/.docs/raw/reference/observability/tracing/exporters/arize.mdx +25 -0
- package/.docs/raw/reference/observability/tracing/exporters/langfuse.mdx +43 -0
- package/.docs/raw/reference/observability/tracing/exporters/otel.mdx +27 -43
- package/.docs/raw/reference/server/create-route.mdx +314 -0
- package/.docs/raw/reference/server/express-adapter.mdx +193 -0
- package/.docs/raw/reference/server/hono-adapter.mdx +174 -0
- package/.docs/raw/reference/server/mastra-server.mdx +316 -0
- package/.docs/raw/reference/server/routes.mdx +250 -0
- package/.docs/raw/reference/streaming/workflows/timeTravelStream.mdx +170 -0
- package/.docs/raw/reference/tools/mcp-client.mdx +54 -1
- package/.docs/raw/reference/workflows/run-methods/timeTravel.mdx +310 -0
- package/.docs/raw/reference/workflows/run.mdx +14 -0
- package/.docs/raw/server-db/custom-adapters.mdx +380 -0
- package/.docs/raw/server-db/custom-api-routes.mdx +5 -5
- package/.docs/raw/server-db/mastra-server.mdx +11 -32
- package/.docs/raw/server-db/server-adapters.mdx +286 -0
- package/.docs/raw/workflows/workflow-state.mdx +4 -5
- package/CHANGELOG.md +15 -0
- package/package.json +4 -4
|
@@ -8,7 +8,7 @@ import TabItem from "@theme/TabItem";
|
|
|
8
8
|
|
|
9
9
|
# Using AI SDK UI
|
|
10
10
|
|
|
11
|
-
[AI SDK UI](https://sdk.vercel.ai) is a
|
|
11
|
+
[AI SDK UI](https://sdk.vercel.ai) is a library of React utilities and components for building AI-powered interfaces. In this guide, you'll learn how to use `@mastra/ai-sdk` to convert Mastra's output to AI SDK-compatible formats, enabling you to use its hooks and components in your frontend.
|
|
12
12
|
|
|
13
13
|
:::note
|
|
14
14
|
Migrating from AI SDK v4 to v5? See the [migration guide](/guides/v1/migrations/ai-sdk-v4-to-v5).
|
|
@@ -16,40 +16,69 @@ Migrating from AI SDK v4 to v5? See the [migration guide](/guides/v1/migrations/
|
|
|
16
16
|
|
|
17
17
|
:::tip
|
|
18
18
|
|
|
19
|
-
Visit Mastra's [**UI Dojo**](https://ui-dojo.mastra.ai/)
|
|
19
|
+
Want to see more examples? Visit Mastra's [**UI Dojo**](https://ui-dojo.mastra.ai/) or the [Next.js quickstart guide](/guides/v1/getting-started/next-js).
|
|
20
20
|
|
|
21
21
|
:::
|
|
22
22
|
|
|
23
|
-
##
|
|
23
|
+
## Getting Started
|
|
24
24
|
|
|
25
|
-
|
|
25
|
+
Use Mastra and AI SDK UI together by installing the `@mastra/ai-sdk` package. `@mastra/ai-sdk` provides custom API routes and utilities for streaming Mastra agents in AI SDK-compatible formats. This includes chat, workflow, and network route handlers, along with utilities and exported types for UI integrations.
|
|
26
|
+
|
|
27
|
+
`@mastra/ai-sdk` integrates with AI SDK UI's three main hooks: [`useChat()`](https://ai-sdk.dev/docs/ai-sdk-ui/chatbot), [`useCompletion()`](https://ai-sdk.dev/docs/ai-sdk-ui/completion), and [`useObject()`](https://ai-sdk.dev/docs/ai-sdk-ui/object-generation).
|
|
28
|
+
|
|
29
|
+
Install the required packages to get started:
|
|
26
30
|
|
|
27
31
|
<Tabs>
|
|
28
32
|
<TabItem value="npm" label="npm">
|
|
29
33
|
```bash copy
|
|
30
|
-
npm install @mastra/ai-sdk@beta
|
|
34
|
+
npm install @mastra/ai-sdk@beta @ai-sdk/react ai
|
|
31
35
|
```
|
|
32
36
|
</TabItem>
|
|
33
37
|
<TabItem value="pnpm" label="pnpm">
|
|
34
38
|
```bash copy
|
|
35
|
-
pnpm add @mastra/ai-sdk@beta
|
|
39
|
+
pnpm add @mastra/ai-sdk@beta @ai-sdk/react ai
|
|
36
40
|
```
|
|
37
41
|
</TabItem>
|
|
38
42
|
<TabItem value="yarn" label="yarn">
|
|
39
43
|
```bash copy
|
|
40
|
-
yarn add @mastra/ai-sdk@beta
|
|
44
|
+
yarn add @mastra/ai-sdk@beta @ai-sdk/react ai
|
|
41
45
|
```
|
|
42
46
|
</TabItem>
|
|
43
47
|
<TabItem value="bun" label="bun">
|
|
44
48
|
```bash copy
|
|
45
|
-
bun add @mastra/ai-sdk@beta
|
|
49
|
+
bun add @mastra/ai-sdk@beta @ai-sdk/react ai
|
|
46
50
|
```
|
|
47
51
|
</TabItem>
|
|
48
52
|
</Tabs>
|
|
49
53
|
|
|
50
|
-
|
|
54
|
+
You're now ready to follow the integration guides and recipes below!
|
|
55
|
+
|
|
56
|
+
## Integration Guides
|
|
57
|
+
|
|
58
|
+
Typically, you'll set up API routes that stream Mastra content in AI SDK-compatible format, and then use those routes in AI SDK UI hooks like `useChat()`. Below you'll find two main approaches to achieve this:
|
|
59
|
+
|
|
60
|
+
- [Mastra's server](#mastras-server)
|
|
61
|
+
- [Framework-agnostic](#framework-agnostic)
|
|
62
|
+
|
|
63
|
+
Once you have your API routes set up, you can use them in the [`useChat()`](#usechat) hook.
|
|
64
|
+
|
|
65
|
+
### Mastra's server
|
|
66
|
+
|
|
67
|
+
Run Mastra as a standalone server and connect your frontend (e.g. using Vite + React) to its API endpoints. You'll be using Mastra's [custom API routes](/docs/v1/server-db/custom-api-routes) feature for this.
|
|
68
|
+
|
|
69
|
+
:::info
|
|
70
|
+
|
|
71
|
+
Mastra's [**UI Dojo**](https://ui-dojo.mastra.ai/) is an example of this setup.
|
|
72
|
+
|
|
73
|
+
:::
|
|
51
74
|
|
|
52
|
-
|
|
75
|
+
You can use [`chatRoute()`](/reference/v1/ai-sdk/chat-route), [`workflowRoute()`](/reference/v1/ai-sdk/workflow-route), and [`networkRoute()`](/reference/v1/ai-sdk/network-route) to create API routes that stream Mastra content in AI SDK-compatible format. Once implemented, you can use these API routes in [`useChat()`](#usechat).
|
|
76
|
+
|
|
77
|
+
<Tabs>
|
|
78
|
+
|
|
79
|
+
<TabItem value="chatRoute" label="chatRoute()">
|
|
80
|
+
|
|
81
|
+
This example shows how to set up a chat route at the `/chat` endpoint that uses an agent with the ID `weatherAgent`.
|
|
53
82
|
|
|
54
83
|
```typescript title="src/mastra/index.ts" copy
|
|
55
84
|
import { Mastra } from "@mastra/core";
|
|
@@ -67,41 +96,13 @@ export const mastra = new Mastra({
|
|
|
67
96
|
});
|
|
68
97
|
```
|
|
69
98
|
|
|
70
|
-
|
|
99
|
+
You can also use dynamic agent routing, see the [`chatRoute()` reference documentation](/reference/v1/ai-sdk/chat-route) for more details.
|
|
71
100
|
|
|
72
|
-
|
|
73
|
-
const { error, status, sendMessage, messages, regenerate, stop } = useChat({
|
|
74
|
-
transport: new DefaultChatTransport({
|
|
75
|
-
api: "http://localhost:4111/chat",
|
|
76
|
-
}),
|
|
77
|
-
});
|
|
78
|
-
```
|
|
101
|
+
</TabItem>
|
|
79
102
|
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
```typescript
|
|
83
|
-
const { error, status, sendMessage, messages, regenerate, stop } = useChat({
|
|
84
|
-
transport: new DefaultChatTransport({
|
|
85
|
-
api: "http://localhost:4111/chat",
|
|
86
|
-
prepareSendMessagesRequest({ messages }) {
|
|
87
|
-
return {
|
|
88
|
-
body: {
|
|
89
|
-
messages,
|
|
90
|
-
// Pass memory config
|
|
91
|
-
memory: {
|
|
92
|
-
thread: "user-1",
|
|
93
|
-
resource: "user-1",
|
|
94
|
-
},
|
|
95
|
-
},
|
|
96
|
-
};
|
|
97
|
-
},
|
|
98
|
-
}),
|
|
99
|
-
});
|
|
100
|
-
```
|
|
103
|
+
<TabItem value="workflowRoute" label="workflowRoute()">
|
|
101
104
|
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
Use the `workflowRoute()` utility to create a route handler that automatically formats the workflow stream into an AI SDK-compatible format.
|
|
105
|
+
This example shows how to set up a workflow route at the `/workflow` endpoint that uses a workflow with the ID `weatherWorkflow`.
|
|
105
106
|
|
|
106
107
|
```typescript title="src/mastra/index.ts" copy
|
|
107
108
|
import { Mastra } from "@mastra/core";
|
|
@@ -112,45 +113,28 @@ export const mastra = new Mastra({
|
|
|
112
113
|
apiRoutes: [
|
|
113
114
|
workflowRoute({
|
|
114
115
|
path: "/workflow",
|
|
115
|
-
|
|
116
|
+
workflow: "weatherWorkflow",
|
|
116
117
|
}),
|
|
117
118
|
],
|
|
118
119
|
},
|
|
119
120
|
});
|
|
120
121
|
```
|
|
121
122
|
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
```typescript
|
|
125
|
-
const { error, status, sendMessage, messages, regenerate, stop } = useChat({
|
|
126
|
-
transport: new DefaultChatTransport({
|
|
127
|
-
api: "http://localhost:4111/workflow",
|
|
128
|
-
prepareSendMessagesRequest({ messages }) {
|
|
129
|
-
return {
|
|
130
|
-
body: {
|
|
131
|
-
inputData: {
|
|
132
|
-
city: messages[messages.length - 1].parts[0].text,
|
|
133
|
-
},
|
|
134
|
-
//Or resumeData for resuming a suspended workflow
|
|
135
|
-
resumeData: {
|
|
136
|
-
confirmation: messages[messages.length - 1].parts[0].text
|
|
137
|
-
}
|
|
138
|
-
},
|
|
139
|
-
};
|
|
140
|
-
},
|
|
141
|
-
}),
|
|
142
|
-
});
|
|
143
|
-
```
|
|
123
|
+
You can also use dynamic workflow routing, see the [`workflowRoute()` reference documentation](/reference/v1/ai-sdk/workflow-route) for more details.
|
|
144
124
|
|
|
145
125
|
:::tip Agent streaming in workflows
|
|
146
|
-
When a workflow step pipes an agent's stream to the workflow writer (e.g., `await response.fullStream.pipeTo(writer)`), the agent's text chunks and tool calls are automatically streamed to the UI in real-time. This provides a seamless streaming experience even when agents are running inside workflow steps.
|
|
147
126
|
|
|
148
|
-
|
|
127
|
+
When a workflow step pipes an agent's stream to the workflow writer (e.g., `await response.fullStream.pipeTo(writer)`), the agent's text chunks and tool calls are forwarded to the UI stream in real time, even when the agent runs inside workflow steps.
|
|
128
|
+
|
|
129
|
+
See [Workflow Streaming](/docs/v1/streaming/workflow-streaming#streaming-agent-text-chunks-to-ui) for more details.
|
|
130
|
+
|
|
149
131
|
:::
|
|
150
132
|
|
|
151
|
-
|
|
133
|
+
</TabItem>
|
|
152
134
|
|
|
153
|
-
|
|
135
|
+
<TabItem value="networkRoute" label="networkRoute()">
|
|
136
|
+
|
|
137
|
+
This example shows how to set up a network route at the `/network` endpoint that uses an agent with the ID `weatherAgent`.
|
|
154
138
|
|
|
155
139
|
```typescript title="src/mastra/index.ts" copy
|
|
156
140
|
import { Mastra } from "@mastra/core";
|
|
@@ -168,333 +152,106 @@ export const mastra = new Mastra({
|
|
|
168
152
|
});
|
|
169
153
|
```
|
|
170
154
|
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
```typescript
|
|
174
|
-
const { error, status, sendMessage, messages, regenerate, stop } = useChat({
|
|
175
|
-
transport: new DefaultChatTransport({
|
|
176
|
-
api: "http://localhost:4111/network",
|
|
177
|
-
}),
|
|
178
|
-
});
|
|
179
|
-
```
|
|
180
|
-
|
|
181
|
-
### Custom UI
|
|
182
|
-
|
|
183
|
-
The `@mastra/ai-sdk` package transforms and emits Mastra streams (e.g workflow, network streams) into AI SDK-compatible [uiMessages DataParts](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#datauipart) format.
|
|
184
|
-
|
|
185
|
-
- **Top-level parts**: These are streamed via direct workflow and network stream transformations (e.g in `workflowRoute()` and `networkRoute()`)
|
|
186
|
-
- `data-workflow`: Aggregates a workflow run with step inputs/outputs and final usage.
|
|
187
|
-
- `data-network`: Aggregates a routing/network run with ordered steps (agent/workflow/tool executions) and outputs.
|
|
188
|
-
|
|
189
|
-
- **Nested parts**: These are streamed via nested and merged streams from within a tool's `execute()` method.
|
|
190
|
-
- `data-tool-workflow`: Nested workflow emitted from within a tool stream.
|
|
191
|
-
- `data-tool-network`: Nested network emitted from within an tool stream.
|
|
192
|
-
- `data-tool-agent`: Nested agent emitted from within an tool stream.
|
|
193
|
-
|
|
194
|
-
Here's an example: For a [nested agent stream within a tool](/docs/v1/streaming/tool-streaming#tool-using-an-agent), `data-tool-agent` UI message parts will be emitted and can be leveraged on the client as documented below:
|
|
195
|
-
|
|
196
|
-
```typescript title="app/page.tsx" copy
|
|
197
|
-
"use client";
|
|
198
|
-
|
|
199
|
-
import { useChat } from "@ai-sdk/react";
|
|
200
|
-
import { AgentTool } from '../ui/agent-tool';
|
|
201
|
-
import { DefaultChatTransport } from 'ai';
|
|
202
|
-
import type { AgentDataPart } from "@mastra/ai-sdk";
|
|
203
|
-
|
|
204
|
-
export default function Page() {
|
|
205
|
-
const { messages } = useChat({
|
|
206
|
-
transport: new DefaultChatTransport({
|
|
207
|
-
api: 'http://localhost:4111/chat',
|
|
208
|
-
}),
|
|
209
|
-
});
|
|
210
|
-
|
|
211
|
-
return (
|
|
212
|
-
<div>
|
|
213
|
-
{messages.map((message) => (
|
|
214
|
-
<div key={message.id}>
|
|
215
|
-
{message.parts.map((part, i) => {
|
|
216
|
-
switch (part.type) {
|
|
217
|
-
case 'data-tool-agent':
|
|
218
|
-
return (
|
|
219
|
-
<AgentTool {...part.data as AgentDataPart} key={`${message.id}-${i}`} />
|
|
220
|
-
);
|
|
221
|
-
default:
|
|
222
|
-
return null;
|
|
223
|
-
}
|
|
224
|
-
})}
|
|
225
|
-
</div>
|
|
226
|
-
))}
|
|
227
|
-
</div>
|
|
228
|
-
);
|
|
229
|
-
}
|
|
230
|
-
```
|
|
155
|
+
You can also use dynamic network routing, see the [`networkRoute()` reference documentation](/reference/v1/ai-sdk/network-route) for more details.
|
|
231
156
|
|
|
232
|
-
|
|
233
|
-
import { Tool, ToolContent, ToolHeader, ToolOutput } from "../ai-elements/tool";
|
|
234
|
-
import type { AgentDataPart } from "@mastra/ai-sdk";
|
|
157
|
+
</TabItem>
|
|
235
158
|
|
|
236
|
-
|
|
237
|
-
return (
|
|
238
|
-
<Tool>
|
|
239
|
-
<ToolHeader
|
|
240
|
-
type={`${id}`}
|
|
241
|
-
state={status === 'finished' ? 'output-available' : 'input-available'}
|
|
242
|
-
/>
|
|
243
|
-
<ToolContent>
|
|
244
|
-
<ToolOutput output={text} />
|
|
245
|
-
</ToolContent>
|
|
246
|
-
</Tool>
|
|
247
|
-
);
|
|
248
|
-
};
|
|
249
|
-
```
|
|
250
|
-
|
|
251
|
-
### Custom Tool streaming
|
|
252
|
-
|
|
253
|
-
To stream custom data parts from within your tool execution function, use the
|
|
254
|
-
`writer.custom()` method.
|
|
255
|
-
|
|
256
|
-
```typescript {5,8,15} showLineNumbers copy
|
|
257
|
-
import { createTool } from "@mastra/core/tools";
|
|
159
|
+
</Tabs>
|
|
258
160
|
|
|
259
|
-
|
|
260
|
-
// ...
|
|
261
|
-
execute: async ({ context, writer }) => {
|
|
262
|
-
const { value } = context;
|
|
161
|
+
### Framework-agnostic
|
|
263
162
|
|
|
264
|
-
|
|
265
|
-
type: "data-tool-progress",
|
|
266
|
-
status: "pending"
|
|
267
|
-
});
|
|
163
|
+
If you don't want to run Mastra's server and instead use frameworks like Next.js or Express, you can use the [`handleChatStream()`](/reference/v1/ai-sdk/handle-chat-stream), [`handleWorkflowStream()`](/reference/v1/ai-sdk/handle-workflow-stream), and [`handleNetworkStream()`](/reference/v1/ai-sdk/handle-network-stream) functions in your own API route handlers.
|
|
268
164
|
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
await writer?.custom({
|
|
272
|
-
type: "data-tool-progress",
|
|
273
|
-
status: "success"
|
|
274
|
-
});
|
|
165
|
+
They return a `ReadableStream` that you can wrap with [`createUIMessageStreamResponse()`](https://ai-sdk.dev/docs/reference/ai-sdk-ui/create-ui-message-stream-response).
|
|
275
166
|
|
|
276
|
-
|
|
277
|
-
value: ""
|
|
278
|
-
};
|
|
279
|
-
}
|
|
280
|
-
});
|
|
281
|
-
```
|
|
167
|
+
The examples below show you how to use them with Next.js App Router.
|
|
282
168
|
|
|
283
|
-
|
|
169
|
+
<Tabs>
|
|
284
170
|
|
|
285
|
-
|
|
171
|
+
<TabItem value="handleChatStream" label="handleChatStream()">
|
|
286
172
|
|
|
287
|
-
|
|
173
|
+
This example shows how to set up a chat route at the `/chat` endpoint that uses an agent with the ID `weatherAgent`.
|
|
288
174
|
|
|
289
|
-
```typescript title="app/
|
|
290
|
-
import {
|
|
291
|
-
import {
|
|
292
|
-
import {
|
|
175
|
+
```typescript title="app/chat/route.ts" copy
|
|
176
|
+
import { handleChatStream } from '@mastra/ai-sdk';
|
|
177
|
+
import { createUIMessageStreamResponse } from 'ai';
|
|
178
|
+
import { mastra } from '@/src/mastra';
|
|
293
179
|
|
|
294
180
|
export async function POST(req: Request) {
|
|
295
|
-
const
|
|
296
|
-
const
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
const uiMessageStream = createUIMessageStream({
|
|
301
|
-
originalMessages: messages,
|
|
302
|
-
execute: async ({ writer }) => {
|
|
303
|
-
for await (const part of toAISdkStream(stream, { from: "agent" })!) {
|
|
304
|
-
writer.write(part);
|
|
305
|
-
}
|
|
306
|
-
},
|
|
307
|
-
});
|
|
308
|
-
|
|
309
|
-
// Create a Response that streams the UI message stream to the client
|
|
310
|
-
return createUIMessageStreamResponse({
|
|
311
|
-
stream: uiMessageStream,
|
|
181
|
+
const params = await req.json();
|
|
182
|
+
const stream = await handleChatStream({
|
|
183
|
+
mastra,
|
|
184
|
+
agentId: 'weatherAgent',
|
|
185
|
+
params,
|
|
312
186
|
});
|
|
187
|
+
return createUIMessageStreamResponse({ stream });
|
|
313
188
|
}
|
|
314
189
|
```
|
|
315
190
|
|
|
316
|
-
|
|
191
|
+
</TabItem>
|
|
317
192
|
|
|
318
|
-
|
|
193
|
+
<TabItem value="handleWorkflowStream" label="handleWorkflowStream()">
|
|
319
194
|
|
|
320
|
-
|
|
321
|
-
content={[
|
|
322
|
-
{
|
|
323
|
-
name: "from",
|
|
324
|
-
type: "'agent' | 'network' | 'workflow'",
|
|
325
|
-
isRequired: true,
|
|
326
|
-
description: "The type of Mastra stream being converted.",
|
|
327
|
-
},
|
|
328
|
-
{
|
|
329
|
-
name: "lastMessageId",
|
|
330
|
-
type: "string",
|
|
331
|
-
isOptional: true,
|
|
332
|
-
description: "(Agent only) The ID of the last message in the conversation.",
|
|
333
|
-
},
|
|
334
|
-
{
|
|
335
|
-
name: "sendStart",
|
|
336
|
-
type: "boolean",
|
|
337
|
-
isOptional: true,
|
|
338
|
-
defaultValue: "true",
|
|
339
|
-
description: "(Agent only) Whether to send start events.",
|
|
340
|
-
},
|
|
341
|
-
{
|
|
342
|
-
name: "sendFinish",
|
|
343
|
-
type: "boolean",
|
|
344
|
-
isOptional: true,
|
|
345
|
-
defaultValue: "true",
|
|
346
|
-
description: "(Agent only) Whether to send finish events.",
|
|
347
|
-
},
|
|
348
|
-
{
|
|
349
|
-
name: "sendReasoning",
|
|
350
|
-
type: "boolean",
|
|
351
|
-
isOptional: true,
|
|
352
|
-
defaultValue: "false",
|
|
353
|
-
description: "(Agent only) Whether to include reasoning-delta chunks in the stream. Set to true to stream the actual reasoning content from models that support extended thinking.",
|
|
354
|
-
},
|
|
355
|
-
{
|
|
356
|
-
name: "sendSources",
|
|
357
|
-
type: "boolean",
|
|
358
|
-
isOptional: true,
|
|
359
|
-
defaultValue: "false",
|
|
360
|
-
description: "(Agent only) Whether to include source citations in the output.",
|
|
361
|
-
},
|
|
362
|
-
{
|
|
363
|
-
name: "messageMetadata",
|
|
364
|
-
type: "Function",
|
|
365
|
-
isOptional: true,
|
|
366
|
-
description: "(Agent only) A function that receives the current stream part and returns metadata to attach to start and finish chunks.",
|
|
367
|
-
},
|
|
368
|
-
{
|
|
369
|
-
name: "onError",
|
|
370
|
-
type: "Function",
|
|
371
|
-
isOptional: true,
|
|
372
|
-
description: "(Agent only) A function to handle errors during stream conversion. Receives the error and should return a string representation.",
|
|
373
|
-
},
|
|
374
|
-
]}
|
|
375
|
-
/>
|
|
195
|
+
This example shows how to set up a workflow route at the `/workflow` endpoint that uses a workflow with the ID `weatherWorkflow`.
|
|
376
196
|
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
import { mastra } from
|
|
381
|
-
import { createUIMessageStream, createUIMessageStreamResponse } from "ai";
|
|
382
|
-
import { toAISdkStream } from "@mastra/ai-sdk";
|
|
197
|
+
```typescript title="app/workflow/route.ts" copy
|
|
198
|
+
import { handleWorkflowStream } from '@mastra/ai-sdk';
|
|
199
|
+
import { createUIMessageStreamResponse } from 'ai';
|
|
200
|
+
import { mastra } from '@/src/mastra';
|
|
383
201
|
|
|
384
202
|
export async function POST(req: Request) {
|
|
385
|
-
const
|
|
386
|
-
const
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
},
|
|
391
|
-
});
|
|
392
|
-
|
|
393
|
-
const uiMessageStream = createUIMessageStream({
|
|
394
|
-
initialMessages: messages,
|
|
395
|
-
execute: async ({ writer }) => {
|
|
396
|
-
for await (const part of toAISdkStream(stream, {
|
|
397
|
-
from: "agent",
|
|
398
|
-
sendReasoning: true, // Enable reasoning content streaming
|
|
399
|
-
})!) {
|
|
400
|
-
writer.write(part);
|
|
401
|
-
}
|
|
402
|
-
},
|
|
403
|
-
});
|
|
404
|
-
|
|
405
|
-
return createUIMessageStreamResponse({
|
|
406
|
-
stream: uiMessageStream,
|
|
203
|
+
const params = await req.json();
|
|
204
|
+
const stream = await handleWorkflowStream({
|
|
205
|
+
mastra,
|
|
206
|
+
workflowId: 'weatherWorkflow',
|
|
207
|
+
params,
|
|
407
208
|
});
|
|
209
|
+
return createUIMessageStreamResponse({ stream });
|
|
408
210
|
}
|
|
409
211
|
```
|
|
410
212
|
|
|
411
|
-
|
|
213
|
+
</TabItem>
|
|
412
214
|
|
|
413
|
-
|
|
215
|
+
<TabItem value="handleNetworkStream" label="handleNetworkStream()">
|
|
414
216
|
|
|
415
|
-
|
|
416
|
-
import { createUIMessageStream } from "ai";
|
|
417
|
-
import { toAISdkStream } from "@mastra/ai-sdk";
|
|
418
|
-
import type { ChunkType, MastraModelOutput } from "@mastra/core/stream";
|
|
217
|
+
This example shows how to set up a network route at the `/network` endpoint that uses an agent with the ID `routingAgent`.
|
|
419
218
|
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
}
|
|
424
|
-
|
|
425
|
-
const chunkStream: ReadableStream<ChunkType> = new ReadableStream<ChunkType>({
|
|
426
|
-
start(controller) {
|
|
427
|
-
response
|
|
428
|
-
.processDataStream({
|
|
429
|
-
onChunk: async (chunk) => {
|
|
430
|
-
controller.enqueue(chunk as ChunkType);
|
|
431
|
-
},
|
|
432
|
-
})
|
|
433
|
-
.finally(() => controller.close());
|
|
434
|
-
},
|
|
435
|
-
});
|
|
219
|
+
```typescript title="app/network/route.ts" copy
|
|
220
|
+
import { handleNetworkStream } from '@mastra/ai-sdk';
|
|
221
|
+
import { createUIMessageStreamResponse } from 'ai';
|
|
222
|
+
import { mastra } from '@/src/mastra';
|
|
436
223
|
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
},
|
|
446
|
-
});
|
|
447
|
-
|
|
448
|
-
for await (const part of uiMessageStream) {
|
|
449
|
-
console.log(part);
|
|
224
|
+
export async function POST(req: Request) {
|
|
225
|
+
const params = await req.json();
|
|
226
|
+
const stream = await handleNetworkStream({
|
|
227
|
+
mastra,
|
|
228
|
+
agentId: 'routingAgent',
|
|
229
|
+
params,
|
|
230
|
+
});
|
|
231
|
+
return createUIMessageStreamResponse({ stream });
|
|
450
232
|
}
|
|
451
233
|
```
|
|
452
234
|
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
Mastra supports AI SDK UI hooks for connecting frontend components directly to agents using HTTP streams.
|
|
235
|
+
</TabItem>
|
|
456
236
|
|
|
457
|
-
Install the required AI SDK React package:
|
|
458
|
-
|
|
459
|
-
<Tabs>
|
|
460
|
-
<TabItem value="npm" label="npm">
|
|
461
|
-
```bash copy
|
|
462
|
-
npm install @ai-sdk/react
|
|
463
|
-
```
|
|
464
|
-
</TabItem>
|
|
465
|
-
<TabItem value="pnpm" label="pnpm">
|
|
466
|
-
```bash copy
|
|
467
|
-
pnpm add @ai-sdk/react
|
|
468
|
-
```
|
|
469
|
-
</TabItem>
|
|
470
|
-
<TabItem value="yarn" label="yarn">
|
|
471
|
-
```bash copy
|
|
472
|
-
yarn add @ai-sdk/react
|
|
473
|
-
```
|
|
474
|
-
</TabItem>
|
|
475
|
-
<TabItem value="bun" label="bun">
|
|
476
|
-
```bash copy
|
|
477
|
-
bun add @ai-sdk/react
|
|
478
|
-
```
|
|
479
|
-
</TabItem>
|
|
480
237
|
</Tabs>
|
|
481
238
|
|
|
482
|
-
###
|
|
239
|
+
### `useChat()`
|
|
483
240
|
|
|
484
|
-
|
|
241
|
+
Whether you created API routes through [Mastra's server](#mastras-server) or used a [framework of your choice](#framework-agnostic), you can now use the API endpoints in the `useChat()` hook.
|
|
485
242
|
|
|
486
|
-
|
|
487
|
-
"use client";
|
|
243
|
+
Assuming you set up a route at `/chat` that uses a weather agent, you can ask it questions as seen below. It's important that you set the correct `api` URL.
|
|
488
244
|
|
|
245
|
+
```ts {9}
|
|
489
246
|
import { useChat } from "@ai-sdk/react";
|
|
490
247
|
import { useState } from "react";
|
|
491
|
-
import { DefaultChatTransport } from
|
|
248
|
+
import { DefaultChatTransport } from "ai";
|
|
492
249
|
|
|
493
|
-
export function Chat() {
|
|
494
|
-
const [inputValue, setInputValue] = useState(
|
|
495
|
-
const { messages, sendMessage} = useChat({
|
|
250
|
+
export default function Chat() {
|
|
251
|
+
const [inputValue, setInputValue] = useState("")
|
|
252
|
+
const { messages, sendMessage } = useChat({
|
|
496
253
|
transport: new DefaultChatTransport({
|
|
497
|
-
api:
|
|
254
|
+
api: "http://localhost:4111/chat",
|
|
498
255
|
}),
|
|
499
256
|
});
|
|
500
257
|
|
|
@@ -507,86 +264,154 @@ export function Chat() {
|
|
|
507
264
|
<div>
|
|
508
265
|
<pre>{JSON.stringify(messages, null, 2)}</pre>
|
|
509
266
|
<form onSubmit={handleFormSubmit}>
|
|
510
|
-
<input value={inputValue} onChange={e=>setInputValue(e.target.value)} placeholder="Name of city" />
|
|
267
|
+
<input value={inputValue} onChange={e => setInputValue(e.target.value)} placeholder="Name of the city" />
|
|
511
268
|
</form>
|
|
512
269
|
</div>
|
|
513
270
|
);
|
|
514
271
|
}
|
|
515
272
|
```
|
|
516
273
|
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
```typescript title="app/api/chat/route.ts" copy
|
|
520
|
-
import { mastra } from "../../mastra";
|
|
274
|
+
Use [`prepareSendMessagesRequest`](https://ai-sdk.dev/docs/reference/ai-sdk-ui/use-chat#transport.default-chat-transport.prepare-send-messages-request) to customize the request sent to the chat route, for example to pass additional configuration to the agent.
|
|
521
275
|
|
|
522
|
-
|
|
523
|
-
const { messages } = await req.json();
|
|
524
|
-
const myAgent = mastra.getAgent("weatherAgent");
|
|
525
|
-
const stream = await myAgent.stream(messages, { format: "aisdk" });
|
|
526
|
-
|
|
527
|
-
return stream.toUIMessageStreamResponse();
|
|
528
|
-
}
|
|
529
|
-
```
|
|
530
|
-
|
|
531
|
-
> When using `useChat()` with agent memory, refer to the [Agent Memory section](/docs/v1/agents/agent-memory) for key implementation details.
|
|
532
|
-
|
|
533
|
-
### Using `useCompletion()`
|
|
276
|
+
### `useCompletion()`
|
|
534
277
|
|
|
535
278
|
The `useCompletion()` hook handles single-turn completions between your frontend and a Mastra agent, allowing you to send a prompt and receive a streamed response over HTTP.
|
|
536
279
|
|
|
537
|
-
|
|
538
|
-
"use client";
|
|
280
|
+
Your frontend could look like this:
|
|
539
281
|
|
|
540
|
-
|
|
282
|
+
```typescript title="app/page.tsx" copy
|
|
283
|
+
import { useCompletion } from '@ai-sdk/react';
|
|
541
284
|
|
|
542
|
-
export function
|
|
285
|
+
export default function Page() {
|
|
543
286
|
const { completion, input, handleInputChange, handleSubmit } = useCompletion({
|
|
544
|
-
api:
|
|
287
|
+
api: '/api/completion',
|
|
545
288
|
});
|
|
546
289
|
|
|
547
290
|
return (
|
|
548
|
-
<
|
|
549
|
-
<
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
291
|
+
<form onSubmit={handleSubmit}>
|
|
292
|
+
<input
|
|
293
|
+
name="prompt"
|
|
294
|
+
value={input}
|
|
295
|
+
onChange={handleInputChange}
|
|
296
|
+
id="input"
|
|
297
|
+
/>
|
|
298
|
+
<button type="submit">Submit</button>
|
|
299
|
+
<div>{completion}</div>
|
|
300
|
+
</form>
|
|
554
301
|
);
|
|
555
302
|
}
|
|
556
303
|
```
|
|
557
304
|
|
|
558
|
-
|
|
305
|
+
Below are two approaches to implementing the backend:
|
|
306
|
+
|
|
307
|
+
<Tabs>
|
|
308
|
+
|
|
309
|
+
<TabItem value="mastra-server" label="Mastra Server">
|
|
310
|
+
|
|
311
|
+
```ts title="src/mastra/index.ts" copy
|
|
312
|
+
import { Mastra } from '@mastra/core/mastra';
|
|
313
|
+
import { registerApiRoute } from '@mastra/core/server';
|
|
314
|
+
import { handleChatStream } from '@mastra/ai-sdk';
|
|
315
|
+
import { createUIMessageStreamResponse } from 'ai';
|
|
316
|
+
|
|
317
|
+
export const mastra = new Mastra({
|
|
318
|
+
server: {
|
|
319
|
+
apiRoutes: [
|
|
320
|
+
registerApiRoute('/completion', {
|
|
321
|
+
method: 'POST',
|
|
322
|
+
handler: async (c) => {
|
|
323
|
+
const { prompt } = await c.req.json();
|
|
324
|
+
const mastra = c.get('mastra');
|
|
325
|
+
const stream = await handleChatStream({
|
|
326
|
+
mastra,
|
|
327
|
+
agentId: 'weatherAgent',
|
|
328
|
+
params: {
|
|
329
|
+
messages: [
|
|
330
|
+
{
|
|
331
|
+
id: "1",
|
|
332
|
+
role: 'user',
|
|
333
|
+
parts: [
|
|
334
|
+
{
|
|
335
|
+
type: 'text',
|
|
336
|
+
text: prompt
|
|
337
|
+
}
|
|
338
|
+
]
|
|
339
|
+
}
|
|
340
|
+
],
|
|
341
|
+
}
|
|
342
|
+
})
|
|
343
|
+
|
|
344
|
+
return createUIMessageStreamResponse({ stream });
|
|
345
|
+
}
|
|
346
|
+
})
|
|
347
|
+
]
|
|
348
|
+
}
|
|
349
|
+
});
|
|
350
|
+
```
|
|
351
|
+
|
|
352
|
+
</TabItem>
|
|
559
353
|
|
|
560
|
-
|
|
561
|
-
|
|
354
|
+
<TabItem value="nextjs" label="Next.js">
|
|
355
|
+
|
|
356
|
+
```ts title="app/completion/route.ts" copy
|
|
357
|
+
import { handleChatStream } from '@mastra/ai-sdk';
|
|
358
|
+
import { createUIMessageStreamResponse } from 'ai';
|
|
359
|
+
import { mastra } from '@/src/mastra';
|
|
360
|
+
|
|
361
|
+
// Allow streaming responses up to 30 seconds
|
|
362
|
+
export const maxDuration = 30;
|
|
562
363
|
|
|
563
364
|
export async function POST(req: Request) {
|
|
564
|
-
const { prompt } = await req.json();
|
|
565
|
-
|
|
566
|
-
const stream = await
|
|
567
|
-
|
|
365
|
+
const { prompt }: { prompt: string } = await req.json();
|
|
366
|
+
|
|
367
|
+
const stream = await handleChatStream({
|
|
368
|
+
mastra,
|
|
369
|
+
agentId: 'weatherAgent',
|
|
370
|
+
params: {
|
|
371
|
+
messages: [
|
|
372
|
+
{
|
|
373
|
+
id: "1",
|
|
374
|
+
role: 'user',
|
|
375
|
+
parts: [
|
|
376
|
+
{
|
|
377
|
+
type: 'text',
|
|
378
|
+
text: prompt
|
|
379
|
+
}
|
|
380
|
+
]
|
|
381
|
+
}
|
|
382
|
+
],
|
|
383
|
+
},
|
|
568
384
|
});
|
|
569
|
-
|
|
570
|
-
return stream.toUIMessageStreamResponse();
|
|
385
|
+
return createUIMessageStreamResponse({ stream });
|
|
571
386
|
}
|
|
572
387
|
```
|
|
573
388
|
|
|
389
|
+
</TabItem>
|
|
390
|
+
|
|
391
|
+
</Tabs>
|
|
392
|
+
|
|
393
|
+
## Recipes
|
|
394
|
+
|
|
395
|
+
### Stream transformations
|
|
396
|
+
|
|
397
|
+
To manually transform Mastra's streams to AI SDK-compatible format, use the [`toAISdkStream()`](/reference/v1/ai-sdk/to-ai-sdk-stream) utility. See the [examples](/reference/v1/ai-sdk/to-ai-sdk-stream#examples) for concrete usage patterns.
|
|
398
|
+
|
|
574
399
|
### Passing additional data
|
|
575
400
|
|
|
576
|
-
`sendMessage()` allows you to pass additional data from the frontend to Mastra. This data can then be used on the server as `RequestContext
|
|
401
|
+
[`sendMessage()`](https://ai-sdk.dev/docs/reference/ai-sdk-ui/use-chat#send-message) allows you to pass additional data from the frontend to Mastra. This data can then be used on the server as [`RequestContext`](/docs/v1/server-db/request-context).
|
|
577
402
|
|
|
578
|
-
|
|
579
|
-
"use client";
|
|
403
|
+
Here's an example of the frontend code:
|
|
580
404
|
|
|
405
|
+
```typescript {15-25} copy
|
|
581
406
|
import { useChat } from "@ai-sdk/react";
|
|
582
407
|
import { useState } from "react";
|
|
583
408
|
import { DefaultChatTransport } from 'ai';
|
|
584
409
|
|
|
585
|
-
export function
|
|
410
|
+
export function ChatAdditional() {
|
|
586
411
|
const [inputValue, setInputValue] = useState('')
|
|
587
412
|
const { messages, sendMessage } = useChat({
|
|
588
413
|
transport: new DefaultChatTransport({
|
|
589
|
-
api: 'http://localhost:4111/chat',
|
|
414
|
+
api: 'http://localhost:4111/chat-extra',
|
|
590
415
|
}),
|
|
591
416
|
});
|
|
592
417
|
|
|
@@ -609,20 +434,65 @@ export function ChatExtra() {
|
|
|
609
434
|
<div>
|
|
610
435
|
<pre>{JSON.stringify(messages, null, 2)}</pre>
|
|
611
436
|
<form onSubmit={handleFormSubmit}>
|
|
612
|
-
<input value={inputValue} onChange={e=>setInputValue(e.target.value)} placeholder="Name of city" />
|
|
437
|
+
<input value={inputValue} onChange={e => setInputValue(e.target.value)} placeholder="Name of the city" />
|
|
613
438
|
</form>
|
|
614
439
|
</div>
|
|
615
440
|
);
|
|
616
441
|
}
|
|
617
442
|
```
|
|
618
443
|
|
|
619
|
-
|
|
620
|
-
|
|
444
|
+
Two examples on how to implement the backend portion of it.
|
|
445
|
+
|
|
446
|
+
<Tabs>
|
|
447
|
+
|
|
448
|
+
<TabItem value="mastra-server" label="Mastra Server">
|
|
449
|
+
|
|
450
|
+
Add a `chatRoute()` to your Mastra configuration like shown above. Then, add a server-level middleware:
|
|
451
|
+
|
|
452
|
+
```typescript title="src/mastra/index.ts" copy
|
|
453
|
+
import { Mastra } from "@mastra/core";
|
|
454
|
+
|
|
455
|
+
export const mastra = new Mastra({
|
|
456
|
+
server: {
|
|
457
|
+
middleware: [
|
|
458
|
+
async (c, next) => {
|
|
459
|
+
const requestContext = c.get("requestContext");
|
|
460
|
+
|
|
461
|
+
if (c.req.method === "POST") {
|
|
462
|
+
const clonedReq = c.req.raw.clone();
|
|
463
|
+
const body = await clonedReq.json();
|
|
464
|
+
|
|
465
|
+
if (body?.data) {
|
|
466
|
+
for (const [key, value] of Object.entries(body.data)) {
|
|
467
|
+
requestContext.set(key, value);
|
|
468
|
+
}
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
await next();
|
|
472
|
+
},
|
|
473
|
+
],
|
|
474
|
+
},
|
|
475
|
+
});
|
|
476
|
+
```
|
|
477
|
+
|
|
478
|
+
:::info
|
|
479
|
+
|
|
480
|
+
You can access this data in your tools via the `requestContext` parameter. See the [Request Context documentation](/docs/v1/server-db/request-context) for more details.
|
|
481
|
+
|
|
482
|
+
:::
|
|
483
|
+
|
|
484
|
+
</TabItem>
|
|
485
|
+
|
|
486
|
+
<TabItem value="nextjs" label="Next.js">
|
|
487
|
+
|
|
488
|
+
```typescript title="app/chat-extra/route.ts" copy
|
|
489
|
+
import { handleChatStream } from '@mastra/ai-sdk';
|
|
621
490
|
import { RequestContext } from "@mastra/core/request-context";
|
|
491
|
+
import { createUIMessageStreamResponse } from 'ai';
|
|
492
|
+
import { mastra } from '@/src/mastra';
|
|
622
493
|
|
|
623
494
|
export async function POST(req: Request) {
|
|
624
495
|
const { messages, data } = await req.json();
|
|
625
|
-
const myAgent = mastra.getAgent("weatherAgent");
|
|
626
496
|
|
|
627
497
|
const requestContext = new RequestContext();
|
|
628
498
|
|
|
@@ -632,46 +502,126 @@ export async function POST(req: Request) {
|
|
|
632
502
|
}
|
|
633
503
|
}
|
|
634
504
|
|
|
635
|
-
const stream = await
|
|
636
|
-
|
|
637
|
-
|
|
505
|
+
const stream = await handleChatStream({
|
|
506
|
+
mastra,
|
|
507
|
+
agentId: 'weatherAgent',
|
|
508
|
+
params: {
|
|
509
|
+
messages,
|
|
510
|
+
requestContext,
|
|
511
|
+
},
|
|
638
512
|
});
|
|
639
|
-
return stream
|
|
513
|
+
return createUIMessageStreamResponse({ stream });
|
|
640
514
|
}
|
|
641
515
|
```
|
|
642
516
|
|
|
643
|
-
|
|
517
|
+
</TabItem>
|
|
644
518
|
|
|
645
|
-
|
|
519
|
+
</Tabs>
|
|
646
520
|
|
|
647
|
-
|
|
648
|
-
import { Mastra } from "@mastra/core";
|
|
521
|
+
### Custom UI
|
|
649
522
|
|
|
650
|
-
|
|
651
|
-
agents: { weatherAgent },
|
|
652
|
-
server: {
|
|
653
|
-
middleware: [
|
|
654
|
-
async (c, next) => {
|
|
655
|
-
const requestContext = c.get("requestContext");
|
|
523
|
+
The `@mastra/ai-sdk` package transforms and emits Mastra streams (e.g workflow, network streams) into AI SDK-compatible [uiMessages DataParts](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#datauipart) format.
|
|
656
524
|
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
525
|
+
- **Top-level parts**: These are streamed via direct workflow and network stream transformations (e.g in `workflowRoute()` and `networkRoute()`)
|
|
526
|
+
- `data-workflow`: Aggregates a workflow run with step inputs/outputs and final usage.
|
|
527
|
+
- `data-network`: Aggregates a routing/network run with ordered steps (agent/workflow/tool executions) and outputs.
|
|
528
|
+
|
|
529
|
+
- **Nested parts**: These are streamed via nested and merged streams from within a tool's `execute()` method.
|
|
530
|
+
- `data-tool-workflow`: Nested workflow emitted from within a tool stream.
|
|
531
|
+
- `data-tool-network`: Nested network emitted from within an tool stream.
|
|
532
|
+
- `data-tool-agent`: Nested agent emitted from within an tool stream.
|
|
533
|
+
|
|
534
|
+
Here's an example: For a [nested agent stream within a tool](/docs/v1/streaming/tool-streaming#tool-using-an-agent), `data-tool-agent` UI message parts will be emitted and can be leveraged on the client as documented below:
|
|
535
|
+
|
|
536
|
+
```typescript title="app/page.tsx" copy
|
|
537
|
+
"use client";
|
|
538
|
+
|
|
539
|
+
import { useChat } from "@ai-sdk/react";
|
|
540
|
+
import { AgentTool } from '../ui/agent-tool';
|
|
541
|
+
import { DefaultChatTransport } from 'ai';
|
|
542
|
+
import type { AgentDataPart } from "@mastra/ai-sdk";
|
|
543
|
+
|
|
544
|
+
export default function Page() {
|
|
545
|
+
const { messages } = useChat({
|
|
546
|
+
transport: new DefaultChatTransport({
|
|
547
|
+
api: 'http://localhost:4111/chat',
|
|
548
|
+
}),
|
|
549
|
+
});
|
|
550
|
+
|
|
551
|
+
return (
|
|
552
|
+
<div>
|
|
553
|
+
{messages.map((message) => (
|
|
554
|
+
<div key={message.id}>
|
|
555
|
+
{message.parts.map((part, i) => {
|
|
556
|
+
switch (part.type) {
|
|
557
|
+
case 'data-tool-agent':
|
|
558
|
+
return (
|
|
559
|
+
<AgentTool {...part.data as AgentDataPart} key={`${message.id}-${i}`} />
|
|
560
|
+
);
|
|
561
|
+
default:
|
|
562
|
+
return null;
|
|
666
563
|
}
|
|
667
|
-
}
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
});
|
|
564
|
+
})}
|
|
565
|
+
</div>
|
|
566
|
+
))}
|
|
567
|
+
</div>
|
|
568
|
+
);
|
|
569
|
+
}
|
|
674
570
|
```
|
|
675
571
|
|
|
676
|
-
|
|
572
|
+
```typescript title="ui/agent-tool.ts" copy
|
|
573
|
+
import { Tool, ToolContent, ToolHeader, ToolOutput } from "../ai-elements/tool";
|
|
574
|
+
import type { AgentDataPart } from "@mastra/ai-sdk";
|
|
575
|
+
|
|
576
|
+
export const AgentTool = ({ id, text, status }: AgentDataPart) => {
|
|
577
|
+
return (
|
|
578
|
+
<Tool>
|
|
579
|
+
<ToolHeader
|
|
580
|
+
type={`${id}`}
|
|
581
|
+
state={status === 'finished' ? 'output-available' : 'input-available'}
|
|
582
|
+
/>
|
|
583
|
+
<ToolContent>
|
|
584
|
+
<ToolOutput output={text} />
|
|
585
|
+
</ToolContent>
|
|
586
|
+
</Tool>
|
|
587
|
+
);
|
|
588
|
+
};
|
|
589
|
+
```
|
|
590
|
+
|
|
591
|
+
### Custom Tool streaming
|
|
592
|
+
|
|
593
|
+
To stream custom data parts from within your tool execution function, use the `writer.custom()` method.
|
|
594
|
+
|
|
595
|
+
:::tip
|
|
596
|
+
|
|
597
|
+
It is important that you `await` the `writer.custom()` call.
|
|
598
|
+
|
|
599
|
+
:::
|
|
600
|
+
|
|
601
|
+
```typescript {4,7-10,14-17} copy
|
|
602
|
+
import { createTool } from "@mastra/core/tools";
|
|
603
|
+
|
|
604
|
+
export const testTool = createTool({
|
|
605
|
+
execute: async ({ context, writer }) => {
|
|
606
|
+
const { value } = context;
|
|
607
|
+
|
|
608
|
+
await writer?.custom({
|
|
609
|
+
type: "data-tool-progress",
|
|
610
|
+
status: "pending"
|
|
611
|
+
});
|
|
612
|
+
|
|
613
|
+
const response = await fetch(...);
|
|
614
|
+
|
|
615
|
+
await writer?.custom({
|
|
616
|
+
type: "data-tool-progress",
|
|
617
|
+
status: "success"
|
|
618
|
+
});
|
|
619
|
+
|
|
620
|
+
return {
|
|
621
|
+
value: ""
|
|
622
|
+
};
|
|
623
|
+
}
|
|
624
|
+
});
|
|
625
|
+
```
|
|
677
626
|
|
|
627
|
+
For more information about tool streaming see [Tool streaming documentation](/docs/v1/streaming/tool-streaming).
|