@mastra/mcp-docs-server 0.13.20 → 0.13.21-alpha.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +8 -8
- package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +2 -0
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +8 -0
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +19 -19
- package/.docs/organized/changelogs/%40mastra%2Fcloud.md +13 -13
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +41 -41
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +21 -21
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +22 -22
- package/.docs/organized/changelogs/%40mastra%2Fevals.md +12 -12
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +21 -21
- package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +6 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +17 -17
- package/.docs/organized/changelogs/create-mastra.md +5 -5
- package/.docs/organized/changelogs/mastra.md +21 -21
- package/.docs/organized/code-examples/agent.md +34 -9
- package/.docs/organized/code-examples/ai-sdk-v5.md +16 -14
- package/.docs/organized/code-examples/heads-up-game.md +3 -3
- package/.docs/raw/auth/clerk.mdx +142 -0
- package/.docs/raw/getting-started/installation.mdx +3 -1
- package/.docs/raw/getting-started/model-providers.mdx +31 -31
- package/.docs/raw/observability/ai-tracing.mdx +123 -24
- package/.docs/raw/reference/agents/ChunkType.mdx +857 -0
- package/.docs/raw/reference/agents/MastraModelOutput.mdx +321 -0
- package/.docs/raw/reference/agents/generateVNext.mdx +519 -0
- package/.docs/raw/reference/agents/streamVNext.mdx +6 -20
- package/.docs/raw/reference/auth/clerk.mdx +71 -0
- package/.docs/raw/reference/scorers/answer-similarity.mdx +179 -0
- package/.docs/raw/scorers/off-the-shelf-scorers.mdx +1 -0
- package/.docs/raw/server-db/production-server.mdx +27 -2
- package/.docs/raw/tools-mcp/mcp-overview.mdx +144 -159
- package/.docs/raw/workflows/control-flow.mdx +1 -1
- package/.docs/raw/workflows/suspend-and-resume.mdx +5 -0
- package/CHANGELOG.md +9 -0
- package/dist/stdio.js +11 -4
- package/dist/tools/docs.d.ts.map +1 -1
- package/package.json +4 -4
- /package/.docs/raw/reference/{templates.mdx → templates/overview.mdx} +0 -0
|
@@ -0,0 +1,321 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: "Reference: MastraModelOutput | Agents | Mastra Docs"
|
|
3
|
+
description: "Complete reference for MastraModelOutput - the stream object returned by agent.streamVNext() with streaming and promise-based access to model outputs."
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
import { Callout } from "nextra/components";
|
|
7
|
+
import { PropertiesTable } from "@/components/properties-table";
|
|
8
|
+
|
|
9
|
+
# MastraModelOutput
|
|
10
|
+
|
|
11
|
+
<Callout type="warning">
|
|
12
|
+
**Experimental API**: This type is part of the experimental [`streamVNext()`](/reference/agents/streamVNext) method. The API may change as we refine the feature based on feedback.
|
|
13
|
+
</Callout>
|
|
14
|
+
|
|
15
|
+
The `MastraModelOutput` class is returned by [`agent.streamVNext()`](/reference/agents/streamVNext) and provides both streaming and promise-based access to model outputs. It supports structured output generation, tool calls, reasoning, and comprehensive usage tracking.
|
|
16
|
+
|
|
17
|
+
```typescript
|
|
18
|
+
// MastraModelOutput is returned by agent.streamVNext()
|
|
19
|
+
const stream = await agent.streamVNext("Hello world");
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
For setup and basic usage, see the [streamVNext() method documentation](/reference/agents/streamVNext).
|
|
23
|
+
|
|
24
|
+
## Streaming Properties
|
|
25
|
+
|
|
26
|
+
These properties provide real-time access to model outputs as they're generated:
|
|
27
|
+
|
|
28
|
+
<PropertiesTable
|
|
29
|
+
content={[
|
|
30
|
+
{
|
|
31
|
+
name: "fullStream",
|
|
32
|
+
type: "ReadableStream<ChunkType<OUTPUT>>",
|
|
33
|
+
description: "Complete stream of all chunk types including text, tool calls, reasoning, metadata, and control chunks. Provides granular access to every aspect of the model's response.",
|
|
34
|
+
properties: [{
|
|
35
|
+
type: "ReadableStream",
|
|
36
|
+
parameters: [
|
|
37
|
+
{ name: "ChunkType", type: "ChunkType<OUTPUT>", description: "All possible chunk types that can be emitted during streaming" }
|
|
38
|
+
]
|
|
39
|
+
}]
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
name: "textStream",
|
|
43
|
+
type: "ReadableStream<string>",
|
|
44
|
+
description: "Stream of incremental text content only. Filters out all metadata, tool calls, and control chunks to provide just the text being generated."
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
name: "objectStream",
|
|
48
|
+
type: "ReadableStream<PartialSchemaOutput<OUTPUT>>",
|
|
49
|
+
description: "Stream of progressive structured object updates when using output schemas. Emits partial objects as they're built up, allowing real-time visualization of structured data generation.",
|
|
50
|
+
properties: [{
|
|
51
|
+
type: "ReadableStream",
|
|
52
|
+
parameters: [
|
|
53
|
+
{ name: "PartialSchemaOutput", type: "PartialSchemaOutput<OUTPUT>", description: "Partially completed object matching the defined schema" }
|
|
54
|
+
]
|
|
55
|
+
}]
|
|
56
|
+
},
|
|
57
|
+
{
|
|
58
|
+
name: "elementStream",
|
|
59
|
+
type: "ReadableStream<InferSchemaOutput<OUTPUT> extends (infer T)[] ? T : never>",
|
|
60
|
+
description: "Stream of individual array elements when the output schema defines an array type. Each element is emitted as it's completed rather than waiting for the entire array."
|
|
61
|
+
}
|
|
62
|
+
]}
|
|
63
|
+
/>
|
|
64
|
+
|
|
65
|
+
## Promise-based Properties
|
|
66
|
+
|
|
67
|
+
These properties resolve to final values after the stream completes:
|
|
68
|
+
|
|
69
|
+
<PropertiesTable
|
|
70
|
+
content={[
|
|
71
|
+
{
|
|
72
|
+
name: "text",
|
|
73
|
+
type: "Promise<string>",
|
|
74
|
+
description: "The complete concatenated text response from the model. Resolves when text generation is finished."
|
|
75
|
+
},
|
|
76
|
+
{
|
|
77
|
+
name: "object",
|
|
78
|
+
type: "Promise<InferSchemaOutput<OUTPUT>>",
|
|
79
|
+
description: "The complete structured object response when using output schemas. Validated against the schema before resolving. Rejects if validation fails.",
|
|
80
|
+
properties: [{
|
|
81
|
+
type: "Promise",
|
|
82
|
+
parameters: [
|
|
83
|
+
{ name: "InferSchemaOutput", type: "InferSchemaOutput<OUTPUT>", description: "Fully typed object matching the exact schema definition" }
|
|
84
|
+
]
|
|
85
|
+
}]
|
|
86
|
+
},
|
|
87
|
+
{
|
|
88
|
+
name: "reasoning",
|
|
89
|
+
type: "Promise<string>",
|
|
90
|
+
description: "Complete reasoning text for models that support reasoning (like OpenAI's o1 series). Returns empty string for models without reasoning capability."
|
|
91
|
+
},
|
|
92
|
+
{
|
|
93
|
+
name: "reasoningText",
|
|
94
|
+
type: "Promise<string | undefined>",
|
|
95
|
+
description: "Alternative access to reasoning content. May be undefined for models that don't support reasoning, while 'reasoning' returns empty string."
|
|
96
|
+
},
|
|
97
|
+
{
|
|
98
|
+
name: "toolCalls",
|
|
99
|
+
type: "Promise<ToolCallChunk[]>",
|
|
100
|
+
description: "Array of all tool call chunks made during execution. Each chunk contains tool metadata and execution details.",
|
|
101
|
+
properties: [{
|
|
102
|
+
type: "ToolCallChunk",
|
|
103
|
+
parameters: [
|
|
104
|
+
{ name: "type", type: "'tool-call'", description: "Chunk type identifier" },
|
|
105
|
+
{ name: "runId", type: "string", description: "Execution run identifier" },
|
|
106
|
+
{ name: "from", type: "ChunkFrom", description: "Source of the chunk (AGENT, WORKFLOW, etc.)" },
|
|
107
|
+
{ name: "payload", type: "ToolCallPayload", description: "Tool call data including toolCallId, toolName, args, and execution details" }
|
|
108
|
+
]
|
|
109
|
+
}]
|
|
110
|
+
},
|
|
111
|
+
{
|
|
112
|
+
name: "toolResults",
|
|
113
|
+
type: "Promise<ToolResultChunk[]>",
|
|
114
|
+
description: "Array of all tool result chunks corresponding to the tool calls. Contains execution results and error information.",
|
|
115
|
+
properties: [{
|
|
116
|
+
type: "ToolResultChunk",
|
|
117
|
+
parameters: [
|
|
118
|
+
{ name: "type", type: "'tool-result'", description: "Chunk type identifier" },
|
|
119
|
+
{ name: "runId", type: "string", description: "Execution run identifier" },
|
|
120
|
+
{ name: "from", type: "ChunkFrom", description: "Source of the chunk (AGENT, WORKFLOW, etc.)" },
|
|
121
|
+
{ name: "payload", type: "ToolResultPayload", description: "Tool result data including toolCallId, toolName, result, and error status" }
|
|
122
|
+
]
|
|
123
|
+
}]
|
|
124
|
+
},
|
|
125
|
+
{
|
|
126
|
+
name: "usage",
|
|
127
|
+
type: "Promise<Record<string, number>>",
|
|
128
|
+
description: "Token usage statistics including input tokens, output tokens, total tokens, and reasoning tokens (for reasoning models).",
|
|
129
|
+
properties: [{
|
|
130
|
+
type: "Record",
|
|
131
|
+
parameters: [
|
|
132
|
+
{ name: "inputTokens", type: "number", description: "Tokens consumed by the input prompt" },
|
|
133
|
+
{ name: "outputTokens", type: "number", description: "Tokens generated in the response" },
|
|
134
|
+
{ name: "totalTokens", type: "number", description: "Sum of input and output tokens" },
|
|
135
|
+
{ name: "reasoningTokens", type: "number", isOptional: true, description: "Hidden reasoning tokens (for reasoning models)" }
|
|
136
|
+
]
|
|
137
|
+
}]
|
|
138
|
+
},
|
|
139
|
+
{
|
|
140
|
+
name: "finishReason",
|
|
141
|
+
type: "Promise<string | undefined>",
|
|
142
|
+
description: "Reason why generation stopped (e.g., 'stop', 'length', 'tool_calls', 'content_filter'). Undefined if the stream hasn't finished.",
|
|
143
|
+
properties: [{
|
|
144
|
+
type: "enum",
|
|
145
|
+
parameters: [
|
|
146
|
+
{ name: "stop", type: "'stop'", description: "Model finished naturally" },
|
|
147
|
+
{ name: "length", type: "'length'", description: "Hit maximum token limit" },
|
|
148
|
+
{ name: "tool_calls", type: "'tool_calls'", description: "Model called tools" },
|
|
149
|
+
{ name: "content_filter", type: "'content_filter'", description: "Content was filtered" }
|
|
150
|
+
]
|
|
151
|
+
}]
|
|
152
|
+
}
|
|
153
|
+
]}
|
|
154
|
+
/>
|
|
155
|
+
|
|
156
|
+
## Error Properties
|
|
157
|
+
|
|
158
|
+
<PropertiesTable
|
|
159
|
+
content={[
|
|
160
|
+
{
|
|
161
|
+
name: "error",
|
|
162
|
+
type: "string | Error | { message: string; stack: string; } | undefined",
|
|
163
|
+
description: "Error information if the stream encountered an error. Undefined if no errors occurred. Can be a string message, Error object, or serialized error with stack trace."
|
|
164
|
+
}
|
|
165
|
+
]}
|
|
166
|
+
/>
|
|
167
|
+
|
|
168
|
+
## Methods
|
|
169
|
+
|
|
170
|
+
<PropertiesTable
|
|
171
|
+
content={[
|
|
172
|
+
{
|
|
173
|
+
name: "getFullOutput",
|
|
174
|
+
type: "() => Promise<FullOutput>",
|
|
175
|
+
description: "Returns a comprehensive output object containing all results: text, structured object, tool calls, usage statistics, reasoning, and metadata. Convenient single method to access all stream results.",
|
|
176
|
+
properties: [{
|
|
177
|
+
type: "FullOutput",
|
|
178
|
+
parameters: [
|
|
179
|
+
{ name: "text", type: "string", description: "Complete text response" },
|
|
180
|
+
{ name: "object", type: "InferSchemaOutput<OUTPUT>", isOptional: true, description: "Structured output if schema was provided" },
|
|
181
|
+
{ name: "toolCalls", type: "ToolCallChunk[]", description: "All tool call chunks made" },
|
|
182
|
+
{ name: "toolResults", type: "ToolResultChunk[]", description: "All tool result chunks" },
|
|
183
|
+
{ name: "usage", type: "Record<string, number>", description: "Token usage statistics" },
|
|
184
|
+
{ name: "reasoning", type: "string", isOptional: true, description: "Reasoning text if available" },
|
|
185
|
+
{ name: "finishReason", type: "string", isOptional: true, description: "Why generation finished" }
|
|
186
|
+
]
|
|
187
|
+
}]
|
|
188
|
+
},
|
|
189
|
+
{
|
|
190
|
+
name: "consumeStream",
|
|
191
|
+
type: "(options?: ConsumeStreamOptions) => Promise<void>",
|
|
192
|
+
description: "Manually consume the entire stream without processing chunks. Useful when you only need the final promise-based results and want to trigger stream consumption.",
|
|
193
|
+
properties: [{
|
|
194
|
+
type: "ConsumeStreamOptions",
|
|
195
|
+
parameters: [
|
|
196
|
+
{ name: "onError", type: "(error: Error) => void", isOptional: true, description: "Callback for handling stream errors" }
|
|
197
|
+
]
|
|
198
|
+
}]
|
|
199
|
+
}
|
|
200
|
+
]}
|
|
201
|
+
/>
|
|
202
|
+
|
|
203
|
+
## Usage Examples
|
|
204
|
+
|
|
205
|
+
### Basic Text Streaming
|
|
206
|
+
|
|
207
|
+
```typescript
|
|
208
|
+
const stream = await agent.streamVNext("Write a haiku");
|
|
209
|
+
|
|
210
|
+
// Stream text as it's generated
|
|
211
|
+
for await (const text of stream.textStream) {
|
|
212
|
+
process.stdout.write(text);
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
// Or get the complete text
|
|
216
|
+
const fullText = await stream.text;
|
|
217
|
+
console.log(fullText);
|
|
218
|
+
```
|
|
219
|
+
|
|
220
|
+
### Structured Output Streaming
|
|
221
|
+
|
|
222
|
+
```typescript
|
|
223
|
+
const stream = await agent.streamVNext("Generate user data", {
|
|
224
|
+
output: z.object({
|
|
225
|
+
name: z.string(),
|
|
226
|
+
age: z.number(),
|
|
227
|
+
email: z.string()
|
|
228
|
+
})
|
|
229
|
+
});
|
|
230
|
+
|
|
231
|
+
// Stream partial objects
|
|
232
|
+
for await (const partial of stream.objectStream) {
|
|
233
|
+
console.log("Progress:", partial); // { name: "John" }, { name: "John", age: 30 }, ...
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
// Get final validated object
|
|
237
|
+
const user = await stream.object;
|
|
238
|
+
console.log("Final:", user); // { name: "John", age: 30, email: "john@example.com" }
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
### Tool Calls and Results
|
|
242
|
+
|
|
243
|
+
```typescript
|
|
244
|
+
const stream = await agent.streamVNext("What's the weather in NYC?", {
|
|
245
|
+
tools: { weather: weatherTool }
|
|
246
|
+
});
|
|
247
|
+
|
|
248
|
+
// Monitor tool calls
|
|
249
|
+
const toolCalls = await stream.toolCalls;
|
|
250
|
+
const toolResults = await stream.toolResults;
|
|
251
|
+
|
|
252
|
+
console.log("Tools called:", toolCalls);
|
|
253
|
+
console.log("Results:", toolResults);
|
|
254
|
+
```
|
|
255
|
+
|
|
256
|
+
### Complete Output Access
|
|
257
|
+
|
|
258
|
+
```typescript
|
|
259
|
+
const stream = await agent.streamVNext("Analyze this data");
|
|
260
|
+
|
|
261
|
+
const output = await stream.getFullOutput();
|
|
262
|
+
console.log({
|
|
263
|
+
text: output.text,
|
|
264
|
+
usage: output.usage,
|
|
265
|
+
reasoning: output.reasoning,
|
|
266
|
+
finishReason: output.finishReason
|
|
267
|
+
});
|
|
268
|
+
```
|
|
269
|
+
|
|
270
|
+
### Full Stream Processing
|
|
271
|
+
|
|
272
|
+
```typescript
|
|
273
|
+
const stream = await agent.streamVNext("Complex task");
|
|
274
|
+
|
|
275
|
+
for await (const chunk of stream.fullStream) {
|
|
276
|
+
switch (chunk.type) {
|
|
277
|
+
case 'text-delta':
|
|
278
|
+
process.stdout.write(chunk.payload.text);
|
|
279
|
+
break;
|
|
280
|
+
case 'tool-call':
|
|
281
|
+
console.log(`Calling ${chunk.payload.toolName}...`);
|
|
282
|
+
break;
|
|
283
|
+
case 'reasoning-delta':
|
|
284
|
+
console.log(`Reasoning: ${chunk.payload.text}`);
|
|
285
|
+
break;
|
|
286
|
+
case 'finish':
|
|
287
|
+
console.log(`Done! Reason: ${chunk.payload.stepResult.reason}`);
|
|
288
|
+
break;
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
```
|
|
292
|
+
|
|
293
|
+
### Error Handling
|
|
294
|
+
|
|
295
|
+
```typescript
|
|
296
|
+
const stream = await agent.streamVNext("Analyze this data");
|
|
297
|
+
|
|
298
|
+
try {
|
|
299
|
+
// Option 1: Handle errors in consumeStream
|
|
300
|
+
await stream.consumeStream({
|
|
301
|
+
onError: (error) => {
|
|
302
|
+
console.error("Stream error:", error);
|
|
303
|
+
}
|
|
304
|
+
});
|
|
305
|
+
|
|
306
|
+
const result = await stream.text;
|
|
307
|
+
} catch (error) {
|
|
308
|
+
console.error("Failed to get result:", error);
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
// Option 2: Check error property
|
|
312
|
+
const result = await stream.getFullOutput();
|
|
313
|
+
if (stream.error) {
|
|
314
|
+
console.error("Stream had errors:", stream.error);
|
|
315
|
+
}
|
|
316
|
+
```
|
|
317
|
+
|
|
318
|
+
## Related Types
|
|
319
|
+
|
|
320
|
+
- [ChunkType](/reference/agents/ChunkType) - All possible chunk types in the full stream
|
|
321
|
+
- [Agent.streamVNext()](/reference/agents/streamVNext) - Method that returns MastraModelOutput
|