@mastra/mcp-docs-server 0.13.13-alpha.0 → 0.13.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +9 -9
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +9 -9
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +9 -9
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +9 -9
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +8 -8
- package/.docs/raw/agents/streaming.mdx +50 -20
- package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +41 -59
- package/.docs/raw/reference/agents/streamVNext.mdx +147 -70
- package/package.json +4 -4
|
@@ -1,5 +1,12 @@
|
|
|
1
1
|
# @mastra/client-js
|
|
2
2
|
|
|
3
|
+
## 0.10.23-alpha.1
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- Updated dependencies [[`0f00e17`](https://github.com/mastra-ai/mastra/commit/0f00e172953ccdccadb35ed3d70f5e4d89115869), [`217cd7a`](https://github.com/mastra-ai/mastra/commit/217cd7a4ce171e9a575c41bb8c83300f4db03236)]:
|
|
8
|
+
- @mastra/core@0.14.1-alpha.1
|
|
9
|
+
|
|
3
10
|
## 0.10.23-alpha.0
|
|
4
11
|
|
|
5
12
|
### Patch Changes
|
|
@@ -291,12 +298,5 @@
|
|
|
291
298
|
- b641ba3: fix: save score params
|
|
292
299
|
- 9802f42: Added types and tests to ensure client-js and hono endpoints can save memory messages where the input is either a v1 or v2 mastra message
|
|
293
300
|
- 1ac8f6b: deduplicate message
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
- Single message ID as string: `deleteMessages('msg-123')`
|
|
297
|
-
- Array of message IDs: `deleteMessages(['msg-1', 'msg-2'])`
|
|
298
|
-
- Message object with id property: `deleteMessages({ id: 'msg-123' })`
|
|
299
|
-
- Array of message objects: `deleteMessages([{ id: 'msg-1' }, { id: 'msg-2' }])`
|
|
300
|
-
- Implemented in all storage adapters (LibSQL, PostgreSQL, Upstash, InMemory)
|
|
301
|
-
|
|
302
|
-
... 2101 more lines hidden. See full changelog in package directory.
|
|
301
|
+
|
|
302
|
+
... 2108 more lines hidden. See full changelog in package directory.
|
|
@@ -1,5 +1,13 @@
|
|
|
1
1
|
# @mastra/core
|
|
2
2
|
|
|
3
|
+
## 0.14.1-alpha.1
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- [#6864](https://github.com/mastra-ai/mastra/pull/6864) [`0f00e17`](https://github.com/mastra-ai/mastra/commit/0f00e172953ccdccadb35ed3d70f5e4d89115869) Thanks [@TylerBarnes](https://github.com/TylerBarnes)! - Added a convertMessages(from).to("Mastra.V2" | "AIV\*") util for operating on DB messages directly
|
|
8
|
+
|
|
9
|
+
- [#6927](https://github.com/mastra-ai/mastra/pull/6927) [`217cd7a`](https://github.com/mastra-ai/mastra/commit/217cd7a4ce171e9a575c41bb8c83300f4db03236) Thanks [@DanielSLew](https://github.com/DanielSLew)! - Fix output processors to match new stream types.
|
|
10
|
+
|
|
3
11
|
## 0.14.1-alpha.0
|
|
4
12
|
|
|
5
13
|
### Patch Changes
|
|
@@ -290,13 +298,5 @@
|
|
|
290
298
|
|
|
291
299
|
## 0.12.0
|
|
292
300
|
|
|
293
|
-
### Minor Changes
|
|
294
|
-
|
|
295
|
-
- 2ecf658: Added the option to provide a custom ID generator when creating an instance of Mastra. If the generator is not provided, a fallback of using UUID is used to generate IDs instead.
|
|
296
|
-
|
|
297
|
-
### Patch Changes
|
|
298
|
-
|
|
299
|
-
- 510e2c8: dependencies updates:
|
|
300
|
-
- Updated dependency [`radash@^12.1.1` ↗︎](https://www.npmjs.com/package/radash/v/12.1.1) (from `^12.1.0`, in `dependencies`)
|
|
301
301
|
|
|
302
|
-
...
|
|
302
|
+
... 2677 more lines hidden. See full changelog in package directory.
|
|
@@ -1,5 +1,13 @@
|
|
|
1
1
|
# @mastra/deployer
|
|
2
2
|
|
|
3
|
+
## 0.14.1-alpha.1
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- Updated dependencies [[`0f00e17`](https://github.com/mastra-ai/mastra/commit/0f00e172953ccdccadb35ed3d70f5e4d89115869), [`217cd7a`](https://github.com/mastra-ai/mastra/commit/217cd7a4ce171e9a575c41bb8c83300f4db03236)]:
|
|
8
|
+
- @mastra/core@0.14.1-alpha.1
|
|
9
|
+
- @mastra/server@0.14.1-alpha.1
|
|
10
|
+
|
|
3
11
|
## 0.14.1-alpha.0
|
|
4
12
|
|
|
5
13
|
### Patch Changes
|
|
@@ -290,13 +298,5 @@
|
|
|
290
298
|
|
|
291
299
|
- d0496e6: dependencies updates:
|
|
292
300
|
- Updated dependency [`hono@^4.8.12` ↗︎](https://www.npmjs.com/package/hono/v/4.8.12) (from `^4.8.11`, in `dependencies`)
|
|
293
|
-
- Updated dependencies [d0496e6]
|
|
294
|
-
- @mastra/core@0.13.0-alpha.3
|
|
295
|
-
- @mastra/server@0.13.0-alpha.3
|
|
296
|
-
|
|
297
|
-
## 0.13.0-alpha.2
|
|
298
|
-
|
|
299
|
-
### Patch Changes
|
|
300
|
-
|
|
301
301
|
|
|
302
|
-
...
|
|
302
|
+
... 3542 more lines hidden. See full changelog in package directory.
|
|
@@ -1,5 +1,12 @@
|
|
|
1
1
|
# @mastra/mcp-docs-server
|
|
2
2
|
|
|
3
|
+
## 0.13.13-alpha.1
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- Updated dependencies [[`0f00e17`](https://github.com/mastra-ai/mastra/commit/0f00e172953ccdccadb35ed3d70f5e4d89115869), [`217cd7a`](https://github.com/mastra-ai/mastra/commit/217cd7a4ce171e9a575c41bb8c83300f4db03236)]:
|
|
8
|
+
- @mastra/core@0.14.1-alpha.1
|
|
9
|
+
|
|
3
10
|
## 0.13.13-alpha.0
|
|
4
11
|
|
|
5
12
|
### Patch Changes
|
|
@@ -291,12 +298,5 @@
|
|
|
291
298
|
- Updated dependencies [3f89307]
|
|
292
299
|
- Updated dependencies [9eda7d4]
|
|
293
300
|
- Updated dependencies [9d49408]
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
- Updated dependencies [254a36b]
|
|
297
|
-
- Updated dependencies [2ecf658]
|
|
298
|
-
- Updated dependencies [7a7754f]
|
|
299
|
-
- Updated dependencies [fc92d80]
|
|
300
|
-
- Updated dependencies [e0f73c6]
|
|
301
|
-
|
|
302
|
-
... 1376 more lines hidden. See full changelog in package directory.
|
|
301
|
+
|
|
302
|
+
... 1383 more lines hidden. See full changelog in package directory.
|
|
@@ -1,5 +1,12 @@
|
|
|
1
1
|
# @mastra/server
|
|
2
2
|
|
|
3
|
+
## 0.14.1-alpha.1
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- Updated dependencies [[`0f00e17`](https://github.com/mastra-ai/mastra/commit/0f00e172953ccdccadb35ed3d70f5e4d89115869), [`217cd7a`](https://github.com/mastra-ai/mastra/commit/217cd7a4ce171e9a575c41bb8c83300f4db03236)]:
|
|
8
|
+
- @mastra/core@0.14.1-alpha.1
|
|
9
|
+
|
|
3
10
|
## 0.14.1-alpha.0
|
|
4
11
|
|
|
5
12
|
### Patch Changes
|
|
@@ -292,11 +299,4 @@
|
|
|
292
299
|
|
|
293
300
|
## 0.12.0
|
|
294
301
|
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
- f42c4c2: update peer deps for packages to latest core range
|
|
298
|
-
|
|
299
|
-
### Patch Changes
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
... 2910 more lines hidden. See full changelog in package directory.
|
|
302
|
+
... 2917 more lines hidden. See full changelog in package directory.
|
|
@@ -5,46 +5,76 @@ description: Documentation on how to stream agents
|
|
|
5
5
|
|
|
6
6
|
import { Callout } from "nextra/components";
|
|
7
7
|
|
|
8
|
-
# Agent Streaming
|
|
8
|
+
# Agent Streaming (VNext)
|
|
9
9
|
|
|
10
10
|
Agents in Mastra support streaming responses for real-time interaction with clients. This enables progressive rendering of responses and better user experience.
|
|
11
11
|
|
|
12
12
|
<Callout type="info">
|
|
13
|
-
**Experimental API**: The `streamVNext` method shown in this guide is an experimental feature
|
|
13
|
+
**Experimental API**: The `streamVNext` method shown in this guide is an experimental feature with enhanced streaming format support. It will replace the current `stream()` method after additional testing and refinement. For production use, consider using the stable [`stream()` method](/docs/agents/overview#streaming-responses) until `streamVNext` is finalized.
|
|
14
14
|
</Callout>
|
|
15
15
|
|
|
16
16
|
## Usage
|
|
17
17
|
|
|
18
|
-
The experimental streaming protocol uses the `streamVNext` method on an agent. This method
|
|
18
|
+
The experimental streaming protocol uses the `streamVNext` method on an agent. This method now supports multiple output stream formats, for Mastra (default) and AI SDK v5.
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
## Format Parameter
|
|
22
|
+
|
|
23
|
+
The `format` parameter determines the output stream type:
|
|
24
|
+
|
|
25
|
+
- **`'mastra'` (default)**: Returns `MastraModelOutput` - Mastra's native streaming format
|
|
26
|
+
- **`'aisdk'`**: Returns `AISDKV5OutputStream` - Compatible with AI SDK v5 interfaces like useChat.
|
|
27
|
+
|
|
28
|
+
```typescript
|
|
29
|
+
// Mastra format (default)
|
|
30
|
+
const mastraStream = await agent.streamVNext("Hello");
|
|
31
|
+
|
|
32
|
+
// AI SDK v5 format
|
|
33
|
+
const aiSdkStream = await agent.streamVNext("Hello", {
|
|
34
|
+
format: 'aisdk'
|
|
35
|
+
});
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
### Default Mastra Format
|
|
39
|
+
|
|
40
|
+
By default, `streamVNext` returns a `MastraModelOutput` stream:
|
|
19
41
|
|
|
20
42
|
```typescript
|
|
21
|
-
const stream = await agent.streamVNext(
|
|
43
|
+
const stream = await agent.streamVNext("Tell me a story.");
|
|
22
44
|
|
|
23
|
-
|
|
45
|
+
// Access the text stream
|
|
46
|
+
for await (const chunk of stream.textStream) {
|
|
24
47
|
console.log(chunk);
|
|
25
48
|
}
|
|
49
|
+
|
|
50
|
+
// Or get the full text after streaming
|
|
51
|
+
const fullText = await stream.text;
|
|
26
52
|
```
|
|
27
53
|
|
|
28
|
-
|
|
54
|
+
### AI SDK v5 Compatibility
|
|
29
55
|
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
56
|
+
For integration with AI SDK v5, use the `format: 'aisdk'` parameter to get an `AISDKV5OutputStream`:
|
|
57
|
+
|
|
58
|
+
```typescript
|
|
59
|
+
const stream = await agent.streamVNext("Tell me a story.", {
|
|
60
|
+
format: 'aisdk'
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
// The stream is now compatible with AI SDK v5 interfaces
|
|
64
|
+
for await (const chunk of stream.fullStream) {
|
|
65
|
+
// Process AI SDK v5 formatted chunks
|
|
66
|
+
console.log(chunk);
|
|
36
67
|
}
|
|
37
68
|
```
|
|
38
69
|
|
|
39
|
-
|
|
70
|
+
## Stream Properties
|
|
71
|
+
|
|
72
|
+
Both stream formats provide access to various response properties:
|
|
40
73
|
|
|
41
|
-
- `stream.
|
|
42
|
-
- `stream.
|
|
43
|
-
- `stream.
|
|
44
|
-
- `stream.usage` -
|
|
45
|
-
- `stream.text` - The full text of the agent's response.
|
|
46
|
-
- `stream.object` - The object of the agent's response, if you use output or experimental output.
|
|
47
|
-
- `stream.textStream` - A readable stream that will emit the text of the agent's response.
|
|
74
|
+
- `stream.textStream` - A readable stream that emits text chunks
|
|
75
|
+
- `stream.text` - Promise that resolves to the full text response
|
|
76
|
+
- `stream.finishReason` - The reason the agent stopped streaming
|
|
77
|
+
- `stream.usage` - Token usage information
|
|
48
78
|
|
|
49
79
|
### How to use the stream in a tool
|
|
50
80
|
|
|
@@ -9,9 +9,9 @@ import { Callout, Tabs } from "nextra/components";
|
|
|
9
9
|
|
|
10
10
|
Mastra integrates with [Vercel's AI SDK](https://sdk.vercel.ai) to support model routing, React Hooks, and data streaming methods.
|
|
11
11
|
|
|
12
|
-
## AI SDK v5
|
|
12
|
+
## AI SDK v5
|
|
13
13
|
|
|
14
|
-
Mastra also supports AI SDK v5
|
|
14
|
+
Mastra also supports AI SDK v5 see the following section for v5 specific methods: [Vercel AI SDK v5](/docs/frameworks/agentic-uis/ai-sdk#vercel-ai-sdk-v5)
|
|
15
15
|
|
|
16
16
|
<Callout type="warning">
|
|
17
17
|
The code examples contained with this page assume you're using the Next.js App Router at the root of your
|
|
@@ -380,94 +380,76 @@ export async function POST(req: Request) {
|
|
|
380
380
|
|
|
381
381
|
## Vercel AI SDK v5
|
|
382
382
|
|
|
383
|
-
This guide covers Mastra-specific considerations when migrating from AI SDK v4 to v5
|
|
383
|
+
This guide covers Mastra-specific considerations when migrating from AI SDK v4 to v5.
|
|
384
384
|
|
|
385
385
|
> Please add any feedback or bug reports to the [AI SDK v5 mega issue in Github.](https://github.com/mastra-ai/mastra/issues/5470)
|
|
386
386
|
|
|
387
|
+
### Experimental streamVNext Support
|
|
388
|
+
|
|
389
|
+
Mastra's experimental `streamVNext` method now includes native AI SDK v5 support through the `format` parameter. This provides seamless integration with AI SDK v5's streaming interfaces without requiring compatibility wrappers.
|
|
390
|
+
|
|
391
|
+
```typescript
|
|
392
|
+
// Use streamVNext with AI SDK v5 format
|
|
393
|
+
const stream = await agent.streamVNext(messages, {
|
|
394
|
+
format: 'aisdk' // Enable AI SDK v5 compatibility
|
|
395
|
+
});
|
|
396
|
+
|
|
397
|
+
// The stream is now compatible with AI SDK v5 interfaces
|
|
398
|
+
return stream.toUIMessageStreamResponse();
|
|
399
|
+
```
|
|
400
|
+
|
|
387
401
|
### Official migration guide
|
|
388
402
|
|
|
389
403
|
Follow the official [AI SDK v5 Migration Guide](https://v5.ai-sdk.dev/docs/migration-guides/migration-guide-5-0) for all AI SDK core breaking changes, package updates, and API changes.
|
|
390
404
|
|
|
391
405
|
This guide covers only the Mastra-specific aspects of the migration.
|
|
392
406
|
|
|
393
|
-
- **Data compatibility**: New data stored in v5 format will no longer work if you downgrade from
|
|
394
|
-
- **Backup recommendation**: Keep DB backups from before you upgrade to v5
|
|
395
|
-
- **Production use**: Wait for the AI SDK v5 stable release before using in production applications
|
|
396
|
-
- **Prerelease status**: The Mastra `ai-v5` tag is a prerelease version and may have bugs
|
|
407
|
+
- **Data compatibility**: New data stored in v5 format will no longer work if you downgrade from v5 to v4
|
|
408
|
+
- **Backup recommendation**: Keep DB backups from before you upgrade to v5
|
|
397
409
|
|
|
398
410
|
### Memory and Storage
|
|
399
411
|
|
|
400
412
|
Mastra automatically handles AI SDK v4 data using its internal `MessageList` class, which manages format conversion—including v4 to v5. No database migrations are required; your existing messages are translated on the fly and continue working after you upgrade.
|
|
401
413
|
|
|
402
|
-
###
|
|
403
|
-
|
|
404
|
-
Migrating to AI SDK v5 with Mastra involves updating both your **backend** (Mastra server) and **frontend**.
|
|
405
|
-
We provide a compatibility mode to handle stream format conversion during the transition.
|
|
406
|
-
|
|
407
|
-
### Upgrade dependencies
|
|
414
|
+
### Message Format Conversion
|
|
408
415
|
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
<Tabs items={["npm", "yarn", "pnpm", "bun"]}>
|
|
412
|
-
<Tabs.Tab>
|
|
413
|
-
```bash copy
|
|
414
|
-
npm install mastra@ai-v5 @mastra/core@ai-v5 @mastra/memory@ai-v5
|
|
415
|
-
```
|
|
416
|
-
</Tabs.Tab>
|
|
417
|
-
<Tabs.Tab>
|
|
418
|
-
```bash copy
|
|
419
|
-
yarn add mastra@ai-v5 @mastra/core@ai-v5 @mastra/memory@ai-v5
|
|
420
|
-
```
|
|
421
|
-
</Tabs.Tab>
|
|
422
|
-
<Tabs.Tab>
|
|
423
|
-
```bash copy
|
|
424
|
-
pnpm add mastra@ai-v5 @mastra/core@ai-v5 @mastra/memory@ai-v5
|
|
425
|
-
```
|
|
426
|
-
</Tabs.Tab>
|
|
427
|
-
<Tabs.Tab>
|
|
428
|
-
```bash copy
|
|
429
|
-
bun add mastra@ai-v5 @mastra/core@ai-v5 @mastra/memory@ai-v5
|
|
430
|
-
```
|
|
431
|
-
</Tabs.Tab>
|
|
432
|
-
</Tabs>
|
|
416
|
+
For cases where you need to manually convert messages between AI SDK and Mastra formats, use the `convertMessages` utility:
|
|
433
417
|
|
|
434
|
-
|
|
418
|
+
```typescript
|
|
419
|
+
import { convertMessages } from '@mastra/core';
|
|
435
420
|
|
|
436
|
-
|
|
437
|
-
|
|
421
|
+
// Convert AI SDK v4 messages to v5
|
|
422
|
+
const aiv5Messages = convertMessages(aiv4Messages).to('AIV5.UI');
|
|
438
423
|
|
|
439
|
-
|
|
424
|
+
// Convert Mastra messages to AI SDK v5
|
|
425
|
+
const aiv5Messages = convertMessages(mastraMessages).to('AIV5.Core');
|
|
440
426
|
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
aiSdkCompat: 'v4',
|
|
444
|
-
});
|
|
427
|
+
// Supported output formats:
|
|
428
|
+
// 'Mastra.V2', 'AIV4.UI', 'AIV5.UI', 'AIV5.Core', 'AIV5.Model'
|
|
445
429
|
```
|
|
446
430
|
|
|
447
|
-
|
|
431
|
+
This utility is helpful when you want to fetch messages directly from your storage DB and convert them for use in AI SDK.
|
|
448
432
|
|
|
449
|
-
|
|
433
|
+
### Enabling stream compatibility
|
|
450
434
|
|
|
435
|
+
To enable AI SDK v5 compatibility, use the experimental `streamVNext` method with the `format` parameter:
|
|
451
436
|
|
|
452
437
|
```typescript filename="app/api/chat/route.ts" showLineNumbers copy
|
|
453
438
|
import { mastra } from "../../../mastra";
|
|
454
|
-
import { createV4CompatibleResponse } from "@mastra/core/agent";
|
|
455
439
|
|
|
456
440
|
export async function POST(req: Request) {
|
|
457
441
|
const { messages } = await req.json();
|
|
458
442
|
const myAgent = mastra.getAgent("weatherAgent");
|
|
459
|
-
|
|
443
|
+
|
|
444
|
+
// Use streamVNext with AI SDK v5 format (experimental)
|
|
445
|
+
const stream = await myAgent.streamVNext(messages, {
|
|
446
|
+
format: 'aisdk'
|
|
447
|
+
});
|
|
460
448
|
|
|
461
|
-
return
|
|
449
|
+
return stream.toUIMessageStreamResponse();
|
|
462
450
|
}
|
|
463
451
|
```
|
|
464
452
|
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
1. Remove `aiSdkCompat: 'v4'` from your Mastra configuration
|
|
470
|
-
2. Follow the AI SDK guide on upgrading your frontend dependencies
|
|
471
|
-
3. Update your frontend code for v5 breaking changes
|
|
472
|
-
|
|
473
|
-
|
|
453
|
+
<Callout type="info">
|
|
454
|
+
**Note**: The `streamVNext` method with format support is experimental and may change as we refine the feature based on feedback. See the [Agent Streaming documentation](/docs/agents/streaming) for more details about streamVNext.
|
|
455
|
+
</Callout>
|
|
@@ -8,15 +8,21 @@ import { Callout } from "nextra/components";
|
|
|
8
8
|
# Agent.streamVNext() (Experimental)
|
|
9
9
|
|
|
10
10
|
<Callout type="warning">
|
|
11
|
-
**Experimental Feature**: This is a new streaming implementation
|
|
11
|
+
**Experimental Feature**: This is a new streaming implementation with support for multiple output formats, including AI SDK v5 compatibility. It will replace the existing `stream()` method once battle-tested. The API may change as we refine the feature based on feedback.
|
|
12
12
|
</Callout>
|
|
13
13
|
|
|
14
|
-
The `.streamVNext()` method enables real-time streaming of responses from an agent with enhanced capabilities. This method accepts messages and optional streaming options, providing a next-generation streaming experience
|
|
14
|
+
The `.streamVNext()` method enables real-time streaming of responses from an agent with enhanced capabilities and format flexibility. This method accepts messages and optional streaming options, providing a next-generation streaming experience with support for both Mastra's native format and AI SDK v5 compatibility.
|
|
15
15
|
|
|
16
16
|
## Usage example
|
|
17
17
|
|
|
18
18
|
```typescript copy
|
|
19
|
-
|
|
19
|
+
// Default Mastra format
|
|
20
|
+
const mastraStream = await agent.streamVNext("message for agent");
|
|
21
|
+
|
|
22
|
+
// AI SDK v5 compatible format
|
|
23
|
+
const aiSdkStream = await agent.streamVNext("message for agent", {
|
|
24
|
+
format: 'aisdk'
|
|
25
|
+
});
|
|
20
26
|
```
|
|
21
27
|
|
|
22
28
|
## Parameters
|
|
@@ -30,7 +36,7 @@ await agent.streamVNext("message for agent");
|
|
|
30
36
|
},
|
|
31
37
|
{
|
|
32
38
|
name: "options",
|
|
33
|
-
type: "AgentExecutionOptions<Output, StructuredOutput>",
|
|
39
|
+
type: "AgentExecutionOptions<Output, StructuredOutput, Format>",
|
|
34
40
|
isOptional: true,
|
|
35
41
|
description: "Optional configuration for the streaming process.",
|
|
36
42
|
},
|
|
@@ -41,6 +47,13 @@ await agent.streamVNext("message for agent");
|
|
|
41
47
|
|
|
42
48
|
<PropertiesTable
|
|
43
49
|
content={[
|
|
50
|
+
{
|
|
51
|
+
name: "format",
|
|
52
|
+
type: "'mastra' | 'aisdk'",
|
|
53
|
+
isOptional: true,
|
|
54
|
+
defaultValue: "'mastra'",
|
|
55
|
+
description: "Determines the output stream format. Use 'mastra' for Mastra's native format (default) or 'aisdk' for AI SDK v5 compatibility.",
|
|
56
|
+
},
|
|
44
57
|
{
|
|
45
58
|
name: "abortSignal",
|
|
46
59
|
type: "AbortSignal",
|
|
@@ -167,13 +180,6 @@ await agent.streamVNext("message for agent");
|
|
|
167
180
|
}
|
|
168
181
|
]
|
|
169
182
|
},
|
|
170
|
-
{
|
|
171
|
-
name: "maxSteps",
|
|
172
|
-
type: "number",
|
|
173
|
-
isOptional: true,
|
|
174
|
-
defaultValue: "5",
|
|
175
|
-
description: "Maximum number of execution steps allowed.",
|
|
176
|
-
},
|
|
177
183
|
{
|
|
178
184
|
name: "maxRetries",
|
|
179
185
|
type: "number",
|
|
@@ -251,11 +257,77 @@ await agent.streamVNext("message for agent");
|
|
|
251
257
|
]
|
|
252
258
|
},
|
|
253
259
|
{
|
|
254
|
-
name: "
|
|
255
|
-
type: "
|
|
260
|
+
name: "modelSettings",
|
|
261
|
+
type: "CallSettings",
|
|
256
262
|
isOptional: true,
|
|
257
263
|
description:
|
|
258
|
-
"
|
|
264
|
+
"Model-specific settings like temperature, maxTokens, topP, etc. These are passed to the underlying language model.",
|
|
265
|
+
properties: [
|
|
266
|
+
{
|
|
267
|
+
parameters: [{
|
|
268
|
+
name: "temperature",
|
|
269
|
+
type: "number",
|
|
270
|
+
isOptional: true,
|
|
271
|
+
description: "Controls randomness in the model's output. Higher values (e.g., 0.8) make the output more random, lower values (e.g., 0.2) make it more focused and deterministic."
|
|
272
|
+
}]
|
|
273
|
+
},
|
|
274
|
+
{
|
|
275
|
+
parameters: [{
|
|
276
|
+
name: "maxTokens",
|
|
277
|
+
type: "number",
|
|
278
|
+
isOptional: true,
|
|
279
|
+
description: "Maximum number of tokens to generate."
|
|
280
|
+
}]
|
|
281
|
+
},
|
|
282
|
+
{
|
|
283
|
+
parameters: [{
|
|
284
|
+
name: "topP",
|
|
285
|
+
type: "number",
|
|
286
|
+
isOptional: true,
|
|
287
|
+
description: "Nucleus sampling. This is a number between 0 and 1. It is recommended to set either temperature or topP, but not both."
|
|
288
|
+
}]
|
|
289
|
+
},
|
|
290
|
+
{
|
|
291
|
+
parameters: [{
|
|
292
|
+
name: "topK",
|
|
293
|
+
type: "number",
|
|
294
|
+
isOptional: true,
|
|
295
|
+
description: "Only sample from the top K options for each subsequent token. Used to remove 'long tail' low probability responses."
|
|
296
|
+
}]
|
|
297
|
+
},
|
|
298
|
+
{
|
|
299
|
+
parameters: [{
|
|
300
|
+
name: "presencePenalty",
|
|
301
|
+
type: "number",
|
|
302
|
+
isOptional: true,
|
|
303
|
+
description: "Presence penalty setting. It affects the likelihood of the model to repeat information that is already in the prompt. A number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition)."
|
|
304
|
+
}]
|
|
305
|
+
},
|
|
306
|
+
{
|
|
307
|
+
parameters: [{
|
|
308
|
+
name: "frequencyPenalty",
|
|
309
|
+
type: "number",
|
|
310
|
+
isOptional: true,
|
|
311
|
+
description: "Frequency penalty setting. It affects the likelihood of the model to repeatedly use the same words or phrases. A number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition)."
|
|
312
|
+
}]
|
|
313
|
+
},
|
|
314
|
+
{
|
|
315
|
+
parameters: [{
|
|
316
|
+
name: "stopSequences",
|
|
317
|
+
type: "string[]",
|
|
318
|
+
isOptional: true,
|
|
319
|
+
description: "Stop sequences. If set, the model will stop generating text when one of the stop sequences is generated."
|
|
320
|
+
}]
|
|
321
|
+
},
|
|
322
|
+
{
|
|
323
|
+
parameters: [{
|
|
324
|
+
name: "seed",
|
|
325
|
+
type: "number",
|
|
326
|
+
isOptional: true,
|
|
327
|
+
description: "The seed (integer) to use for random sampling. If set and supported by the model, calls will generate deterministic results."
|
|
328
|
+
}]
|
|
329
|
+
}
|
|
330
|
+
]
|
|
259
331
|
},
|
|
260
332
|
{
|
|
261
333
|
name: "threadId",
|
|
@@ -380,64 +452,16 @@ await agent.streamVNext("message for agent");
|
|
|
380
452
|
description: "Generate a unique ID for each message.",
|
|
381
453
|
},
|
|
382
454
|
{
|
|
383
|
-
name: "
|
|
384
|
-
type: "
|
|
455
|
+
name: "stopWhen",
|
|
456
|
+
type: "StopCondition | StopCondition[]",
|
|
385
457
|
isOptional: true,
|
|
386
|
-
description: "
|
|
387
|
-
},
|
|
388
|
-
{
|
|
389
|
-
name: "topP",
|
|
390
|
-
type: "number",
|
|
391
|
-
isOptional: true,
|
|
392
|
-
description: "Nucleus sampling. This is a number between 0 and 1. It is recommended to set either `temperature` or `topP`, but not both.",
|
|
393
|
-
},
|
|
394
|
-
{
|
|
395
|
-
name: "topK",
|
|
396
|
-
type: "number",
|
|
397
|
-
isOptional: true,
|
|
398
|
-
description: "Only sample from the top K options for each subsequent token. Used to remove 'long tail' low probability responses.",
|
|
399
|
-
},
|
|
400
|
-
{
|
|
401
|
-
name: "presencePenalty",
|
|
402
|
-
type: "number",
|
|
403
|
-
isOptional: true,
|
|
404
|
-
description: "Presence penalty setting. It affects the likelihood of the model to repeat information that is already in the prompt. A number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).",
|
|
405
|
-
},
|
|
406
|
-
{
|
|
407
|
-
name: "frequencyPenalty",
|
|
408
|
-
type: "number",
|
|
409
|
-
isOptional: true,
|
|
410
|
-
description: "Frequency penalty setting. It affects the likelihood of the model to repeatedly use the same words or phrases. A number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).",
|
|
411
|
-
},
|
|
412
|
-
{
|
|
413
|
-
name: "stopSequences",
|
|
414
|
-
type: "string[]",
|
|
415
|
-
isOptional: true,
|
|
416
|
-
description: "Stop sequences. If set, the model will stop generating text when one of the stop sequences is generated.",
|
|
417
|
-
},
|
|
418
|
-
{
|
|
419
|
-
name: "seed",
|
|
420
|
-
type: "number",
|
|
421
|
-
isOptional: true,
|
|
422
|
-
description: "The seed (integer) to use for random sampling. If set and supported by the model, calls will generate deterministic results.",
|
|
458
|
+
description: "Condition(s) that determine when to stop the agent's execution. Can be a single condition or array of conditions.",
|
|
423
459
|
},
|
|
424
460
|
{
|
|
425
461
|
name: "headers",
|
|
426
462
|
type: "Record<string, string | undefined>",
|
|
427
463
|
isOptional: true,
|
|
428
464
|
description: "Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.",
|
|
429
|
-
},
|
|
430
|
-
{
|
|
431
|
-
name: "system",
|
|
432
|
-
type: "string",
|
|
433
|
-
isOptional: true,
|
|
434
|
-
description: "System message to include in the prompt. Can be used with `prompt` or `messages`.",
|
|
435
|
-
},
|
|
436
|
-
{
|
|
437
|
-
name: "prompt",
|
|
438
|
-
type: "string",
|
|
439
|
-
isOptional: true,
|
|
440
|
-
description: "A simple text prompt. You can either use `prompt` or `messages` but not both.",
|
|
441
465
|
}
|
|
442
466
|
]}
|
|
443
467
|
/>
|
|
@@ -448,25 +472,78 @@ await agent.streamVNext("message for agent");
|
|
|
448
472
|
content={[
|
|
449
473
|
{
|
|
450
474
|
name: "stream",
|
|
451
|
-
type: "
|
|
452
|
-
description: "
|
|
475
|
+
type: "MastraModelOutput<Output> | AISDKV5OutputStream<Output>",
|
|
476
|
+
description: "Returns a streaming interface based on the format parameter. When format is 'mastra' (default), returns MastraModelOutput. When format is 'aisdk', returns AISDKV5OutputStream for AI SDK v5 compatibility.",
|
|
453
477
|
},
|
|
454
478
|
]}
|
|
455
479
|
/>
|
|
456
480
|
|
|
457
481
|
## Extended usage example
|
|
458
482
|
|
|
483
|
+
### Mastra Format (Default)
|
|
484
|
+
|
|
485
|
+
```typescript showLineNumbers copy
|
|
486
|
+
import { stepCountIs } from 'ai-v5';
|
|
487
|
+
|
|
488
|
+
const stream = await agent.streamVNext("Tell me a story", {
|
|
489
|
+
stopWhen: stepCountIs(3), // Stop after 3 steps
|
|
490
|
+
modelSettings: {
|
|
491
|
+
temperature: 0.7,
|
|
492
|
+
},
|
|
493
|
+
});
|
|
494
|
+
|
|
495
|
+
// Access text stream
|
|
496
|
+
for await (const chunk of stream.textStream) {
|
|
497
|
+
console.log(chunk);
|
|
498
|
+
}
|
|
499
|
+
|
|
500
|
+
// Get full text after streaming
|
|
501
|
+
const fullText = await stream.text;
|
|
502
|
+
```
|
|
503
|
+
|
|
504
|
+
### AI SDK v5 Format
|
|
505
|
+
|
|
506
|
+
```typescript showLineNumbers copy
|
|
507
|
+
import { stepCountIs } from 'ai-v5';
|
|
508
|
+
|
|
509
|
+
const stream = await agent.streamVNext("Tell me a story", {
|
|
510
|
+
format: 'aisdk',
|
|
511
|
+
stopWhen: stepCountIs(3), // Stop after 3 steps
|
|
512
|
+
modelSettings: {
|
|
513
|
+
temperature: 0.7,
|
|
514
|
+
},
|
|
515
|
+
});
|
|
516
|
+
|
|
517
|
+
// Use with AI SDK v5 compatible interfaces
|
|
518
|
+
for await (const part of stream.fullStream) {
|
|
519
|
+
if (part.type === 'text-delta') {
|
|
520
|
+
console.log(part.text);
|
|
521
|
+
}
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
// In an API route for frontend integration
|
|
525
|
+
return stream.toUIMessageStreamResponse();
|
|
526
|
+
```
|
|
527
|
+
|
|
528
|
+
### Advanced Example with Options
|
|
529
|
+
|
|
459
530
|
```typescript showLineNumbers copy
|
|
460
531
|
import { z } from "zod";
|
|
461
|
-
import {
|
|
532
|
+
import { stepCountIs } from 'ai-v5';
|
|
462
533
|
|
|
463
534
|
await agent.streamVNext("message for agent", {
|
|
464
|
-
|
|
465
|
-
|
|
535
|
+
format: 'aisdk', // Enable AI SDK v5 compatibility
|
|
536
|
+
stopWhen: stepCountIs(3), // Stop after 3 steps
|
|
537
|
+
modelSettings: {
|
|
538
|
+
temperature: 0.7,
|
|
539
|
+
modelSettings: {
|
|
540
|
+
temperature: 0.7,
|
|
541
|
+
},
|
|
466
542
|
memory: {
|
|
467
543
|
thread: "user-123",
|
|
468
544
|
resource: "test-app"
|
|
469
545
|
},
|
|
546
|
+
},
|
|
470
547
|
toolChoice: "auto",
|
|
471
548
|
// Structured output with better DX
|
|
472
549
|
structuredOutput: {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@mastra/mcp-docs-server",
|
|
3
|
-
"version": "0.13.13
|
|
3
|
+
"version": "0.13.13",
|
|
4
4
|
"description": "MCP server for accessing Mastra.ai documentation, changelogs, and news.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/index.js",
|
|
@@ -32,7 +32,7 @@
|
|
|
32
32
|
"uuid": "^11.1.0",
|
|
33
33
|
"zod": "^3.25.67",
|
|
34
34
|
"zod-to-json-schema": "^3.24.5",
|
|
35
|
-
"@mastra/core": "0.14.1
|
|
35
|
+
"@mastra/core": "0.14.1",
|
|
36
36
|
"@mastra/mcp": "^0.10.12"
|
|
37
37
|
},
|
|
38
38
|
"devDependencies": {
|
|
@@ -48,8 +48,8 @@
|
|
|
48
48
|
"tsx": "^4.19.4",
|
|
49
49
|
"typescript": "^5.8.3",
|
|
50
50
|
"vitest": "^3.2.4",
|
|
51
|
-
"@
|
|
52
|
-
"@
|
|
51
|
+
"@mastra/core": "0.14.1",
|
|
52
|
+
"@internal/lint": "0.0.31"
|
|
53
53
|
},
|
|
54
54
|
"scripts": {
|
|
55
55
|
"prepare-docs": "cross-env PREPARE=true node dist/prepare-docs/prepare.js",
|