@mastra/mcp-docs-server 0.13.12 → 0.13.13-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +24 -24
  2. package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +2 -0
  3. package/.docs/organized/changelogs/%40mastra%2Fastra.md +25 -25
  4. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +26 -26
  5. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +39 -39
  6. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +41 -41
  7. package/.docs/organized/changelogs/%40mastra%2Fcloud.md +25 -25
  8. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +35 -35
  9. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +34 -34
  10. package/.docs/organized/changelogs/%40mastra%2Fcore.md +48 -48
  11. package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +25 -25
  12. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +45 -45
  13. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +45 -45
  14. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +45 -45
  15. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +63 -63
  16. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +34 -34
  17. package/.docs/organized/changelogs/%40mastra%2Fevals.md +26 -26
  18. package/.docs/organized/changelogs/%40mastra%2Ffastembed.md +7 -0
  19. package/.docs/organized/changelogs/%40mastra%2Ffirecrawl.md +27 -27
  20. package/.docs/organized/changelogs/%40mastra%2Fgithub.md +25 -25
  21. package/.docs/organized/changelogs/%40mastra%2Flance.md +34 -34
  22. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +38 -38
  23. package/.docs/organized/changelogs/%40mastra%2Floggers.md +25 -25
  24. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +42 -42
  25. package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +25 -25
  26. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +27 -27
  27. package/.docs/organized/changelogs/%40mastra%2Fmem0.md +25 -25
  28. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +44 -44
  29. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +34 -34
  30. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +33 -0
  31. package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +25 -25
  32. package/.docs/organized/changelogs/%40mastra%2Fpg.md +35 -35
  33. package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +25 -25
  34. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +47 -47
  35. package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +25 -25
  36. package/.docs/organized/changelogs/%40mastra%2Frag.md +29 -29
  37. package/.docs/organized/changelogs/%40mastra%2Fragie.md +25 -25
  38. package/.docs/organized/changelogs/%40mastra%2Fserver.md +47 -47
  39. package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +25 -25
  40. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +34 -34
  41. package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +25 -25
  42. package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +25 -25
  43. package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +25 -25
  44. package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +25 -25
  45. package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +25 -25
  46. package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +24 -0
  47. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +27 -27
  48. package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +25 -25
  49. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +25 -25
  50. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +25 -25
  51. package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +25 -25
  52. package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +25 -25
  53. package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +25 -25
  54. package/.docs/organized/changelogs/create-mastra.md +13 -13
  55. package/.docs/organized/changelogs/mastra.md +58 -58
  56. package/.docs/organized/code-examples/a2a.md +1 -1
  57. package/.docs/organized/code-examples/agent-network.md +1 -1
  58. package/.docs/organized/code-examples/agent.md +4 -4
  59. package/.docs/organized/code-examples/agui.md +1 -1
  60. package/.docs/organized/code-examples/ai-sdk-v5.md +2 -2
  61. package/.docs/organized/code-examples/client-side-tools.md +1 -1
  62. package/.docs/organized/code-examples/experimental-auth-weather-agent.md +1 -1
  63. package/.docs/organized/code-examples/fireworks-r1.md +1 -1
  64. package/.docs/organized/code-examples/mcp-registry-registry.md +1 -1
  65. package/.docs/organized/code-examples/memory-per-resource-example.md +1 -1
  66. package/.docs/organized/code-examples/memory-with-context.md +1 -1
  67. package/.docs/organized/code-examples/memory-with-processors.md +1 -1
  68. package/.docs/organized/code-examples/openapi-spec-writer.md +1 -1
  69. package/.docs/organized/code-examples/weather-agent.md +1 -1
  70. package/.docs/raw/agents/output-processors.mdx +20 -68
  71. package/.docs/raw/agents/streaming.mdx +50 -20
  72. package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +41 -59
  73. package/.docs/raw/reference/agents/streamVNext.mdx +147 -70
  74. package/.docs/raw/server-db/storage.mdx +8 -0
  75. package/package.json +4 -4
@@ -3,7 +3,7 @@
3
3
  {
4
4
  "name": "examples-mcp-registry-registry",
5
5
  "dependencies": {
6
- "@ai-sdk/openai": "^1.3.22",
6
+ "@ai-sdk/openai": "^1.3.24",
7
7
  "@mastra/core": "latest",
8
8
  "@mastra/mcp": "latest",
9
9
  "@mastra/mcp-registry-registry": "latest",
@@ -3,7 +3,7 @@
3
3
  {
4
4
  "name": "memory-per-resource-example",
5
5
  "dependencies": {
6
- "@ai-sdk/openai": "^1.3.22",
6
+ "@ai-sdk/openai": "^1.3.24",
7
7
  "@mastra/core": "latest",
8
8
  "@mastra/memory": "latest",
9
9
  "@mastra/libsql": "latest",
@@ -3,7 +3,7 @@
3
3
  {
4
4
  "name": "memory-with-context",
5
5
  "dependencies": {
6
- "@ai-sdk/openai": "^1.3.22",
6
+ "@ai-sdk/openai": "^1.3.24",
7
7
  "@mastra/core": "latest",
8
8
  "@mastra/memory": "latest",
9
9
  "chalk": "^5.4.1",
@@ -6,7 +6,7 @@
6
6
  "@ai-sdk/openai": "latest",
7
7
  "@mastra/core": "latest",
8
8
  "@mastra/memory": "latest",
9
- "ai": "^4.3.16",
9
+ "ai": "^4.3.19",
10
10
  "chalk": "^5.4.1",
11
11
  "dotenv": "^16.3.1",
12
12
  "js-tiktoken": "^1.0.13",
@@ -3,7 +3,7 @@
3
3
  {
4
4
  "name": "examples-openapi-spec-writer",
5
5
  "dependencies": {
6
- "@ai-sdk/openai": "^1.3.22",
6
+ "@ai-sdk/openai": "^1.3.24",
7
7
  "@mastra/core": "latest",
8
8
  "@mastra/firecrawl": "latest",
9
9
  "@mastra/github": "latest",
@@ -3,7 +3,7 @@
3
3
  {
4
4
  "name": "examples-weather-agent",
5
5
  "dependencies": {
6
- "@ai-sdk/openai": "^1.3.22",
6
+ "@ai-sdk/openai": "^1.3.24",
7
7
  "@mastra/core": "latest",
8
8
  "@mastra/libsql": "latest",
9
9
  "zod": "^3.25.67"
@@ -82,42 +82,6 @@ Strategies available:
82
82
  - `filter`: Remove messages containing PII
83
83
  - `redact`: Replace PII with placeholder values
84
84
 
85
- ### `StructuredOutputProcessor`
86
-
87
- This processor converts unstructured LLM text responses into structured data using an internal agent. It preserves the original text while adding structured data to the response metadata as well as to result.object.
88
-
89
- ```typescript copy showLineNumbers {5-15}
90
- import { StructuredOutputProcessor } from "@mastra/core/processors";
91
- import { z } from "zod";
92
-
93
- const agent = new Agent({
94
- outputProcessors: [
95
- new StructuredOutputProcessor({
96
- schema: z.object({
97
- sentiment: z.enum(['positive', 'negative', 'neutral']),
98
- confidence: z.number().min(0).max(1),
99
- topics: z.array(z.string()),
100
- }),
101
- model: openai("gpt-4o-mini"),
102
- errorStrategy: 'warn', // Log warnings but continue on errors
103
- instructions: 'Analyze the sentiment and extract key topics from the response',
104
- }),
105
- ],
106
- });
107
-
108
- const result = await agent.generate("Some conversational text")
109
-
110
- console.log(result.object) // { sentiment: "positive", confidence: 0.6, topics: ["foo", "bar"] }
111
- ```
112
-
113
- Available options:
114
- - `schema`: Zod schema defining the expected structured output (required)
115
- - `model`: Language model for the internal structuring agent (required)
116
- - `errorStrategy`: Strategy when parsing or validation fails ('strict' | 'warn' | 'fallback', default: 'strict')
117
- - `fallbackValue`: Fallback value when errorStrategy is 'fallback'
118
- - `instructions`: Custom instructions for the structuring agent
119
-
120
- The structured data is stored in `result.object` and the original text is preserved in `result.text`.
121
85
 
122
86
  ### `BatchPartsProcessor`
123
87
 
@@ -199,21 +163,15 @@ You can chain multiple output processors. They execute sequentially in the order
199
163
  ```typescript copy showLineNumbers {9-18}
200
164
  import { Agent } from "@mastra/core/agent";
201
165
  import {
202
- UnicodeNormalizer,
203
166
  ModerationProcessor,
204
- PromptInjectionDetector,
205
167
  PIIDetector
206
168
  } from "@mastra/core/processors";
207
169
 
208
170
  const secureAgent = new Agent({
209
171
  outputProcessors: [
210
- // 1. Normalize text first
211
- new UnicodeNormalizer({ stripControlChars: true }),
212
- // 2. Check for security threats
213
- new PromptInjectionDetector({ model: openai("gpt-4.1-nano") }),
214
- // 3. Moderate content
172
+ // 1. Check for security threats
215
173
  new ModerationProcessor({ model: openai("gpt-4.1-nano") }),
216
- // 4. Handle PII last
174
+ // 2. Handle PII
217
175
  new PIIDetector({ model: openai("gpt-4.1-nano"), strategy: 'redact' }),
218
176
  ],
219
177
  });
@@ -226,40 +184,34 @@ You can create custom output processors by implementing the `Processor` interfac
226
184
  ### Streaming Output Processor
227
185
 
228
186
  ```typescript copy showLineNumbers {4-25}
229
- import type { Processor, MastraMessageV2, TripWire } from "@mastra/core/processors";
230
- import type { TextStreamPart, ObjectStreamPart } from 'ai';
187
+ import type { Processor, MastraMessageV2 } from "@mastra/core/processors";
188
+ import type { ChunkType } from "@mastra/core/stream";
231
189
 
232
190
  class ResponseLengthLimiter implements Processor {
233
- readonly name = 'response-length-limiter-and-lower-case-results';
191
+ readonly name = 'response-length-limiter';
234
192
 
235
193
  constructor(private maxLength: number = 1000) {}
236
194
 
237
- async processOutputStream({ chunk, streamParts, state, abort }: { // will run on every stream part emitted from the LLM
238
- chunk: TextStreamPart<any> | ObjectStreamPart<any>;
239
- streamParts: (TextStreamPart<any> | ObjectStreamPart<any>)[];
195
+ async processOutputStream({ part, streamParts, state, abort }: {
196
+ part: ChunkType;
197
+ streamParts: ChunkType[];
240
198
  state: Record<string, any>;
241
199
  abort: (reason?: string) => never;
242
- }): Promise<TextStreamPart<any> | ObjectStreamPart<any> | null> {
200
+ }): Promise<ChunkType | null | undefined> {
243
201
  // Track cumulative length in state, each processor gets its own state
244
202
  if (!state.cumulativeLength) {
245
203
  state.cumulativeLength = 0;
246
204
  }
247
205
 
248
- const shouldEmitChunk = chunk?.textDelta?.includes('foo');
249
-
250
- if (chunk.type === 'text-delta') {
251
- state.cumulativeLength += chunk.textDelta.length;
206
+ if (part.type === 'text-delta') {
207
+ state.cumulativeLength += part.payload.text.length;
252
208
 
253
209
  if (state.cumulativeLength > this.maxLength) {
254
210
  abort(`Response too long: ${state.cumulativeLength} characters (max: ${this.maxLength})`);
255
211
  }
256
212
  }
257
213
 
258
- if (shouldEmitChunk) {
259
- return chunk; // Emit the chunk
260
- } else {
261
- return null; // Emit nothing
262
- }
214
+ return part; // Emit the part
263
215
  }
264
216
  }
265
217
  ```
@@ -267,7 +219,7 @@ class ResponseLengthLimiter implements Processor {
267
219
  ### Final Result Processor
268
220
 
269
221
  ```typescript copy showLineNumbers {4-19}
270
- import type { Processor, MastraMessageV2, TripWire } from "@mastra/core/processors";
222
+ import type { Processor, MastraMessageV2 } from "@mastra/core/processors";
271
223
 
272
224
  class ResponseValidator implements Processor {
273
225
  readonly name = 'response-validator';
@@ -299,9 +251,9 @@ class ResponseValidator implements Processor {
299
251
  ```
300
252
 
301
253
  When creating custom output processors:
302
- - Always return the processed data (chunks or messages)
254
+ - Always return the processed data (parts or messages)
303
255
  - Use `abort(reason)` to terminate processing early. Abort is used to simulate blocking a response. Errors thrown with `abort` will be an instance of TripWire.
304
- - For streaming processors, return `null` or `undefined` to skip emitting a chunk
256
+ - For streaming processors, return `null` or `undefined` to skip emitting a part
305
257
  - Keep processors focused on a single responsibility
306
258
  - If using an agent inside your processor, use a fast model, limit the size of the response from it as much as possible, and make the system prompt as concise as possible.
307
259
 
@@ -315,10 +267,10 @@ const result = await agent.generate('Hello');
315
267
  console.log(result.text); // Processed text
316
268
  console.log(result.object); // Structured data if applicable
317
269
 
318
- // Processors also run during streamVNext() for each chunk
270
+ // Processors also run during streamVNext() for each part
319
271
  const stream = await agent.streamVNext('Hello');
320
- for await (const chunk of stream) {
321
- console.log(chunk); // Processed chunks
272
+ for await (const part of stream) {
273
+ console.log(part); // Processed parts
322
274
  }
323
275
  ```
324
276
 
@@ -342,9 +294,9 @@ const stream = await agent.streamVNext('Hello', {
342
294
  });
343
295
  ```
344
296
 
345
- ### Structured Output with Better DX
297
+ ### Structured Output Processor
346
298
 
347
- For better developer experience with structured output, you can use the `structuredOutput` option:
299
+ To use the StructuredOutputProcessor, you should use the `structuredOutput` option:
348
300
 
349
301
  ```typescript copy showLineNumbers
350
302
  import { z } from "zod";
@@ -5,46 +5,76 @@ description: Documentation on how to stream agents
5
5
 
6
6
  import { Callout } from "nextra/components";
7
7
 
8
- # Agent Streaming
8
+ # Agent Streaming (VNext)
9
9
 
10
10
  Agents in Mastra support streaming responses for real-time interaction with clients. This enables progressive rendering of responses and better user experience.
11
11
 
12
12
  <Callout type="info">
13
- **Experimental API**: The `streamVNext` method shown in this guide is an experimental feature that will replace the current `stream()` method after additional testing and refinement. For production use, consider using the stable [`stream()` method](/docs/agents/overview#2-generating-and-streaming-text) until `streamVNext` is finalized.
13
+ **Experimental API**: The `streamVNext` method shown in this guide is an experimental feature with enhanced streaming format support. It will replace the current `stream()` method after additional testing and refinement. For production use, consider using the stable [`stream()` method](/docs/agents/overview#streaming-responses) until `streamVNext` is finalized.
14
14
  </Callout>
15
15
 
16
16
  ## Usage
17
17
 
18
- The experimental streaming protocol uses the `streamVNext` method on an agent. This method returns a custom MastraAgentStream that extends ReadableStream with additional utilities.
18
+ The experimental streaming protocol uses the `streamVNext` method on an agent. This method now supports multiple output stream formats, for Mastra (default) and AI SDK v5.
19
+
20
+
21
+ ## Format Parameter
22
+
23
+ The `format` parameter determines the output stream type:
24
+
25
+ - **`'mastra'` (default)**: Returns `MastraModelOutput` - Mastra's native streaming format
26
+ - **`'aisdk'`**: Returns `AISDKV5OutputStream` - Compatible with AI SDK v5 interfaces like useChat.
27
+
28
+ ```typescript
29
+ // Mastra format (default)
30
+ const mastraStream = await agent.streamVNext("Hello");
31
+
32
+ // AI SDK v5 format
33
+ const aiSdkStream = await agent.streamVNext("Hello", {
34
+ format: 'aisdk'
35
+ });
36
+ ```
37
+
38
+ ### Default Mastra Format
39
+
40
+ By default, `streamVNext` returns a `MastraModelOutput` stream:
19
41
 
20
42
  ```typescript
21
- const stream = await agent.streamVNext({ role: "user", content: "Tell me a story." });
43
+ const stream = await agent.streamVNext("Tell me a story.");
22
44
 
23
- for await (const chunk of stream) {
45
+ // Access the text stream
46
+ for await (const chunk of stream.textStream) {
24
47
  console.log(chunk);
25
48
  }
49
+
50
+ // Or get the full text after streaming
51
+ const fullText = await stream.text;
26
52
  ```
27
53
 
28
- Each chunk is a JSON object with the following properties:
54
+ ### AI SDK v5 Compatibility
29
55
 
30
- ```json
31
- {
32
- type: string;
33
- runId: string;
34
- from: string;
35
- payload: Record<string, any>;
56
+ For integration with AI SDK v5, use the `format: 'aisdk'` parameter to get an `AISDKV5OutputStream`:
57
+
58
+ ```typescript
59
+ const stream = await agent.streamVNext("Tell me a story.", {
60
+ format: 'aisdk'
61
+ });
62
+
63
+ // The stream is now compatible with AI SDK v5 interfaces
64
+ for await (const chunk of stream.fullStream) {
65
+ // Process AI SDK v5 formatted chunks
66
+ console.log(chunk);
36
67
  }
37
68
  ```
38
69
 
39
- The stream provides several utility functions for working with streaming responses:
70
+ ## Stream Properties
71
+
72
+ Both stream formats provide access to various response properties:
40
73
 
41
- - `stream.finishReason` - The reason the agent stopped streaming.
42
- - `stream.toolCalls` - The tool calls made by the agent.
43
- - `stream.toolResults` - The tool results returned by the agent.
44
- - `stream.usage` - The total token usage of the agent, including agents/workflows as a tool.
45
- - `stream.text` - The full text of the agent's response.
46
- - `stream.object` - The object of the agent's response, if you use output or experimental output.
47
- - `stream.textStream` - A readable stream that will emit the text of the agent's response.
74
+ - `stream.textStream` - A readable stream that emits text chunks
75
+ - `stream.text` - Promise that resolves to the full text response
76
+ - `stream.finishReason` - The reason the agent stopped streaming
77
+ - `stream.usage` - Token usage information
48
78
 
49
79
  ### How to use the stream in a tool
50
80
 
@@ -9,9 +9,9 @@ import { Callout, Tabs } from "nextra/components";
9
9
 
10
10
  Mastra integrates with [Vercel's AI SDK](https://sdk.vercel.ai) to support model routing, React Hooks, and data streaming methods.
11
11
 
12
- ## AI SDK v5 (beta)
12
+ ## AI SDK v5
13
13
 
14
- Mastra also supports AI SDK v5 (beta) see the following section for v5 specific methods: [Vercel AI SDK v5](/docs/frameworks/agentic-uis/ai-sdk#vercel-ai-sdk-v5)
14
+ Mastra also supports AI SDK v5 see the following section for v5 specific methods: [Vercel AI SDK v5](/docs/frameworks/agentic-uis/ai-sdk#vercel-ai-sdk-v5)
15
15
 
16
16
  <Callout type="warning">
17
17
  The code examples contained with this page assume you're using the Next.js App Router at the root of your
@@ -380,94 +380,76 @@ export async function POST(req: Request) {
380
380
 
381
381
  ## Vercel AI SDK v5
382
382
 
383
- This guide covers Mastra-specific considerations when migrating from AI SDK v4 to v5 beta.
383
+ This guide covers Mastra-specific considerations when migrating from AI SDK v4 to v5.
384
384
 
385
385
  > Please add any feedback or bug reports to the [AI SDK v5 mega issue in Github.](https://github.com/mastra-ai/mastra/issues/5470)
386
386
 
387
+ ### Experimental streamVNext Support
388
+
389
+ Mastra's experimental `streamVNext` method now includes native AI SDK v5 support through the `format` parameter. This provides seamless integration with AI SDK v5's streaming interfaces without requiring compatibility wrappers.
390
+
391
+ ```typescript
392
+ // Use streamVNext with AI SDK v5 format
393
+ const stream = await agent.streamVNext(messages, {
394
+ format: 'aisdk' // Enable AI SDK v5 compatibility
395
+ });
396
+
397
+ // The stream is now compatible with AI SDK v5 interfaces
398
+ return stream.toUIMessageStreamResponse();
399
+ ```
400
+
387
401
  ### Official migration guide
388
402
 
389
403
  Follow the official [AI SDK v5 Migration Guide](https://v5.ai-sdk.dev/docs/migration-guides/migration-guide-5-0) for all AI SDK core breaking changes, package updates, and API changes.
390
404
 
391
405
  This guide covers only the Mastra-specific aspects of the migration.
392
406
 
393
- - **Data compatibility**: New data stored in v5 format will no longer work if you downgrade from the beta
394
- - **Backup recommendation**: Keep DB backups from before you upgrade to v5 beta
395
- - **Production use**: Wait for the AI SDK v5 stable release before using in production applications
396
- - **Prerelease status**: The Mastra `ai-v5` tag is a prerelease version and may have bugs
407
+ - **Data compatibility**: New data stored in v5 format will no longer work if you downgrade from v5 to v4
408
+ - **Backup recommendation**: Keep DB backups from before you upgrade to v5
397
409
 
398
410
  ### Memory and Storage
399
411
 
400
412
  Mastra automatically handles AI SDK v4 data using its internal `MessageList` class, which manages format conversion—including v4 to v5. No database migrations are required; your existing messages are translated on the fly and continue working after you upgrade.
401
413
 
402
- ### Migration strategy
403
-
404
- Migrating to AI SDK v5 with Mastra involves updating both your **backend** (Mastra server) and **frontend**.
405
- We provide a compatibility mode to handle stream format conversion during the transition.
406
-
407
- ### Upgrade dependencies
414
+ ### Message Format Conversion
408
415
 
409
- Install the `@ai-v5` prerelease version of all Mastra packages:
410
-
411
- <Tabs items={["npm", "yarn", "pnpm", "bun"]}>
412
- <Tabs.Tab>
413
- ```bash copy
414
- npm install mastra@ai-v5 @mastra/core@ai-v5 @mastra/memory@ai-v5
415
- ```
416
- </Tabs.Tab>
417
- <Tabs.Tab>
418
- ```bash copy
419
- yarn add mastra@ai-v5 @mastra/core@ai-v5 @mastra/memory@ai-v5
420
- ```
421
- </Tabs.Tab>
422
- <Tabs.Tab>
423
- ```bash copy
424
- pnpm add mastra@ai-v5 @mastra/core@ai-v5 @mastra/memory@ai-v5
425
- ```
426
- </Tabs.Tab>
427
- <Tabs.Tab>
428
- ```bash copy
429
- bun add mastra@ai-v5 @mastra/core@ai-v5 @mastra/memory@ai-v5
430
- ```
431
- </Tabs.Tab>
432
- </Tabs>
416
+ For cases where you need to manually convert messages between AI SDK and Mastra formats, use the `convertMessages` utility:
433
417
 
434
- Configure your Mastra instance with `v4` compatibility so your frontend, and the Mastra Playground will continue to work:
418
+ ```typescript
419
+ import { convertMessages } from '@mastra/core';
435
420
 
436
- ```typescript {7} filename="mastra/index.ts" showLineNumbers copy
437
- import { Mastra } from "@mastra/core/mastra";
421
+ // Convert AI SDK v4 messages to v5
422
+ const aiv5Messages = convertMessages(aiv4Messages).to('AIV5.UI');
438
423
 
439
- import { weatherAgent } from "./agents/weather-agent";
424
+ // Convert Mastra messages to AI SDK v5
425
+ const aiv5Messages = convertMessages(mastraMessages).to('AIV5.Core');
440
426
 
441
- export const mastra = new Mastra({
442
- agents: { weatherAgent },
443
- aiSdkCompat: 'v4',
444
- });
427
+ // Supported output formats:
428
+ // 'Mastra.V2', 'AIV4.UI', 'AIV5.UI', 'AIV5.Core', 'AIV5.Model'
445
429
  ```
446
430
 
447
- ### Enabling stream compatibility
431
+ This utility is helpful when you want to fetch messages directly from your storage DB and convert them for use in AI SDK.
448
432
 
449
- If your frontend calls a Mastra agent using a custom API route, wrap the `toUIMessageStreamResponse()` result with `createV4CompatibleResponse` to maintain AI SDK `v4` compatibility.
433
+ ### Enabling stream compatibility
450
434
 
435
+ To enable AI SDK v5 compatibility, use the experimental `streamVNext` method with the `format` parameter:
451
436
 
452
437
  ```typescript filename="app/api/chat/route.ts" showLineNumbers copy
453
438
  import { mastra } from "../../../mastra";
454
- import { createV4CompatibleResponse } from "@mastra/core/agent";
455
439
 
456
440
  export async function POST(req: Request) {
457
441
  const { messages } = await req.json();
458
442
  const myAgent = mastra.getAgent("weatherAgent");
459
- const stream = await myAgent.stream(messages);
443
+
444
+ // Use streamVNext with AI SDK v5 format (experimental)
445
+ const stream = await myAgent.streamVNext(messages, {
446
+ format: 'aisdk'
447
+ });
460
448
 
461
- return createV4CompatibleResponse(stream.toUIMessageStreamResponse().body!);
449
+ return stream.toUIMessageStreamResponse();
462
450
  }
463
451
  ```
464
452
 
465
- ### Frontend upgrade
466
-
467
- When you're ready, remove the compatibility flag and upgrade your frontend:
468
-
469
- 1. Remove `aiSdkCompat: 'v4'` from your Mastra configuration
470
- 2. Follow the AI SDK guide on upgrading your frontend dependencies
471
- 3. Update your frontend code for v5 breaking changes
472
-
473
-
453
+ <Callout type="info">
454
+ **Note**: The `streamVNext` method with format support is experimental and may change as we refine the feature based on feedback. See the [Agent Streaming documentation](/docs/agents/streaming) for more details about streamVNext.
455
+ </Callout>