@mastra/mcp-docs-server 0.13.26 → 0.13.27
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +9 -9
- package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +2 -0
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +13 -13
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +11 -0
- package/.docs/organized/changelogs/%40mastra%2Fastra.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fchroma.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +23 -23
- package/.docs/organized/changelogs/%40mastra%2Fcloud.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +61 -61
- package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +24 -24
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +20 -20
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +20 -20
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +22 -22
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +45 -45
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Fevals.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Flance.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +12 -12
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fmssql.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +65 -65
- package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Frag.md +12 -12
- package/.docs/organized/changelogs/%40mastra%2Freact.md +18 -0
- package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +12 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +27 -27
- package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +9 -0
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +14 -14
- package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +10 -10
- package/.docs/organized/changelogs/create-mastra.md +47 -47
- package/.docs/organized/changelogs/mastra.md +63 -63
- package/.docs/organized/code-examples/agent.md +19 -14
- package/.docs/organized/code-examples/heads-up-game.md +5 -5
- package/.docs/raw/agents/agent-memory.mdx +86 -125
- package/.docs/raw/agents/input-processors.mdx +3 -3
- package/.docs/raw/agents/output-processors.mdx +4 -4
- package/.docs/raw/agents/overview.mdx +11 -52
- package/.docs/raw/agents/using-tools-and-mcp.mdx +34 -29
- package/.docs/raw/deployment/serverless-platforms/vercel-deployer.mdx +18 -0
- package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +4 -8
- package/.docs/raw/getting-started/installation.mdx +2 -3
- package/.docs/raw/getting-started/model-providers.mdx +28 -123
- package/.docs/raw/memory/semantic-recall.mdx +1 -1
- package/.docs/raw/reference/agents/agent.mdx +1 -1
- package/.docs/raw/reference/agents/generate.mdx +255 -218
- package/.docs/raw/reference/agents/generateLegacy.mdx +528 -0
- package/.docs/raw/reference/agents/getDefaultStreamOptions.mdx +3 -4
- package/.docs/raw/reference/agents/migration-guide.mdx +291 -0
- package/.docs/raw/reference/client-js/agents.mdx +14 -8
- package/.docs/raw/reference/deployer/vercel.mdx +26 -0
- package/.docs/raw/reference/streaming/ChunkType.mdx +4 -4
- package/.docs/raw/reference/streaming/agents/MastraModelOutput.mdx +13 -13
- package/.docs/raw/reference/streaming/agents/stream.mdx +351 -178
- package/.docs/raw/reference/streaming/agents/streamLegacy.mdx +515 -0
- package/.docs/raw/reference/{rag → vectors}/astra.mdx +2 -2
- package/.docs/raw/reference/{rag → vectors}/chroma.mdx +2 -2
- package/.docs/raw/reference/{rag → vectors}/couchbase.mdx +2 -2
- package/.docs/raw/reference/{rag → vectors}/lance.mdx +2 -2
- package/.docs/raw/reference/{rag → vectors}/libsql.mdx +2 -4
- package/.docs/raw/reference/{rag → vectors}/mongodb.mdx +2 -2
- package/.docs/raw/reference/{rag → vectors}/opensearch.mdx +2 -2
- package/.docs/raw/reference/{rag → vectors}/pg.mdx +2 -2
- package/.docs/raw/reference/{rag → vectors}/pinecone.mdx +2 -2
- package/.docs/raw/reference/{rag → vectors}/qdrant.mdx +2 -2
- package/.docs/raw/reference/{rag → vectors}/s3vectors.mdx +2 -2
- package/.docs/raw/reference/{rag → vectors}/turbopuffer.mdx +2 -2
- package/.docs/raw/reference/{rag → vectors}/upstash.mdx +2 -2
- package/.docs/raw/reference/{rag → vectors}/vectorize.mdx +2 -2
- package/.docs/raw/streaming/events.mdx +4 -4
- package/.docs/raw/streaming/overview.mdx +9 -11
- package/.docs/raw/streaming/tool-streaming.mdx +3 -3
- package/.docs/raw/streaming/workflow-streaming.mdx +3 -3
- package/CHANGELOG.md +16 -0
- package/package.json +5 -5
- package/.docs/organized/code-examples/agent-network.md +0 -322
- package/.docs/raw/getting-started/model-capability.mdx +0 -11
- package/.docs/raw/reference/agents/generateVNext.mdx +0 -557
- package/.docs/raw/reference/agents/getDefaultVNextStreamOptions.mdx +0 -67
- package/.docs/raw/reference/streaming/agents/streamVNext.mdx +0 -673
|
@@ -1,18 +1,32 @@
|
|
|
1
1
|
---
|
|
2
2
|
title: "Reference: Agent.stream() | Agents | Mastra Docs"
|
|
3
|
-
description: "Documentation for the `Agent.stream()` method in Mastra agents, which enables real-time streaming of responses."
|
|
3
|
+
description: "Documentation for the `Agent.stream()` method in Mastra agents, which enables real-time streaming of responses with enhanced capabilities."
|
|
4
4
|
---
|
|
5
5
|
|
|
6
|
-
|
|
6
|
+
import { Callout } from 'nextra/components';
|
|
7
7
|
|
|
8
|
-
|
|
8
|
+
# Agent.stream()
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
The `.stream()` method enables real-time streaming of responses from an agent with enhanced capabilities and format flexibility. This method accepts messages and optional streaming options, providing a next-generation streaming experience with support for both Mastra's native format and AI SDK v5 compatibility.
|
|
9
12
|
|
|
10
13
|
## Usage example
|
|
11
14
|
|
|
12
|
-
```
|
|
13
|
-
|
|
15
|
+
```ts filename="index.ts" copy
|
|
16
|
+
// Default Mastra format
|
|
17
|
+
const mastraStream = await agent.stream("message for agent");
|
|
18
|
+
|
|
19
|
+
// AI SDK v5 compatible format
|
|
20
|
+
const aiSdkStream = await agent.stream("message for agent", {
|
|
21
|
+
format: 'aisdk'
|
|
22
|
+
});
|
|
14
23
|
```
|
|
15
24
|
|
|
25
|
+
<Callout type="info">
|
|
26
|
+
**Model Compatibility**: This method is designed for V2 models. V1 models should use the [`.streamLegacy()`](./streamLegacy.mdx) method. The framework automatically detects your model version and will throw an error if there's a mismatch.
|
|
27
|
+
</Callout>
|
|
28
|
+
|
|
29
|
+
|
|
16
30
|
## Parameters
|
|
17
31
|
|
|
18
32
|
<PropertiesTable
|
|
@@ -24,36 +38,187 @@ await agent.stream("message for agent");
|
|
|
24
38
|
},
|
|
25
39
|
{
|
|
26
40
|
name: "options",
|
|
27
|
-
type: "
|
|
41
|
+
type: "AgentExecutionOptions<Output, StructuredOutput, Format>",
|
|
28
42
|
isOptional: true,
|
|
29
43
|
description: "Optional configuration for the streaming process.",
|
|
30
44
|
},
|
|
31
45
|
]}
|
|
32
46
|
/>
|
|
33
47
|
|
|
34
|
-
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
### Options
|
|
35
51
|
|
|
36
52
|
<PropertiesTable
|
|
37
53
|
content={[
|
|
54
|
+
{
|
|
55
|
+
name: "format",
|
|
56
|
+
type: "'mastra' | 'aisdk'",
|
|
57
|
+
isOptional: true,
|
|
58
|
+
defaultValue: "'mastra'",
|
|
59
|
+
description: "Determines the output stream format. Use 'mastra' for Mastra's native format (default) or 'aisdk' for AI SDK v5 compatibility.",
|
|
60
|
+
},
|
|
61
|
+
{
|
|
62
|
+
name: "maxSteps",
|
|
63
|
+
type: "number",
|
|
64
|
+
isOptional: true,
|
|
65
|
+
description: "Maximum number of steps to run during execution.",
|
|
66
|
+
},
|
|
67
|
+
{
|
|
68
|
+
name: "scorers",
|
|
69
|
+
type: "MastraScorers | Record<string, { scorer: MastraScorer['name']; sampling?: ScoringSamplingConfig }>",
|
|
70
|
+
isOptional: true,
|
|
71
|
+
description: "Evaluation scorers to run on the execution results.",
|
|
72
|
+
properties: [
|
|
73
|
+
{
|
|
74
|
+
parameters: [{
|
|
75
|
+
name: "scorer",
|
|
76
|
+
type: "string",
|
|
77
|
+
isOptional: false,
|
|
78
|
+
description: "Name of the scorer to use."
|
|
79
|
+
}]
|
|
80
|
+
},
|
|
81
|
+
{
|
|
82
|
+
parameters: [{
|
|
83
|
+
name: "sampling",
|
|
84
|
+
type: "ScoringSamplingConfig",
|
|
85
|
+
isOptional: true,
|
|
86
|
+
description: "Sampling configuration for the scorer.",
|
|
87
|
+
properties: [
|
|
88
|
+
{
|
|
89
|
+
parameters: [{
|
|
90
|
+
name: "type",
|
|
91
|
+
type: "'none' | 'ratio'",
|
|
92
|
+
isOptional: false,
|
|
93
|
+
description: "Type of sampling strategy. Use 'none' to disable sampling or 'ratio' for percentage-based sampling."
|
|
94
|
+
}]
|
|
95
|
+
},
|
|
96
|
+
{
|
|
97
|
+
parameters: [{
|
|
98
|
+
name: "rate",
|
|
99
|
+
type: "number",
|
|
100
|
+
isOptional: true,
|
|
101
|
+
description: "Sampling rate (0-1). Required when type is 'ratio'."
|
|
102
|
+
}]
|
|
103
|
+
}
|
|
104
|
+
]
|
|
105
|
+
}]
|
|
106
|
+
}
|
|
107
|
+
]
|
|
108
|
+
},
|
|
109
|
+
{
|
|
110
|
+
name: "tracingContext",
|
|
111
|
+
type: "TracingContext",
|
|
112
|
+
isOptional: true,
|
|
113
|
+
description: "AI tracing context for span hierarchy and metadata.",
|
|
114
|
+
},
|
|
115
|
+
{
|
|
116
|
+
name: "returnScorerData",
|
|
117
|
+
type: "boolean",
|
|
118
|
+
isOptional: true,
|
|
119
|
+
description: "Whether to return detailed scoring data in the response.",
|
|
120
|
+
},
|
|
121
|
+
{
|
|
122
|
+
name: "onChunk",
|
|
123
|
+
type: "(chunk: ChunkType) => Promise<void> | void",
|
|
124
|
+
isOptional: true,
|
|
125
|
+
description: "Callback function called for each chunk during streaming.",
|
|
126
|
+
},
|
|
127
|
+
{
|
|
128
|
+
name: "onError",
|
|
129
|
+
type: "({ error }: { error: Error | string }) => Promise<void> | void",
|
|
130
|
+
isOptional: true,
|
|
131
|
+
description: "Callback function called when an error occurs during streaming.",
|
|
132
|
+
},
|
|
133
|
+
{
|
|
134
|
+
name: "onAbort",
|
|
135
|
+
type: "(event: any) => Promise<void> | void",
|
|
136
|
+
isOptional: true,
|
|
137
|
+
description: "Callback function called when the stream is aborted.",
|
|
138
|
+
},
|
|
38
139
|
{
|
|
39
140
|
name: "abortSignal",
|
|
40
141
|
type: "AbortSignal",
|
|
41
142
|
isOptional: true,
|
|
42
|
-
description:
|
|
43
|
-
|
|
143
|
+
description: "Signal object that allows you to abort the agent's execution. When the signal is aborted, all ongoing operations will be terminated.",
|
|
144
|
+
},
|
|
145
|
+
{
|
|
146
|
+
name: "activeTools",
|
|
147
|
+
type: "Array<keyof ToolSet> | undefined",
|
|
148
|
+
isOptional: true,
|
|
149
|
+
description: "Array of active tool names that can be used during execution.",
|
|
150
|
+
},
|
|
151
|
+
{
|
|
152
|
+
name: "prepareStep",
|
|
153
|
+
type: "PrepareStepFunction<any>",
|
|
154
|
+
isOptional: true,
|
|
155
|
+
description: "Callback function called before each step of multi-step execution.",
|
|
44
156
|
},
|
|
45
157
|
{
|
|
46
158
|
name: "context",
|
|
47
|
-
type: "
|
|
159
|
+
type: "ModelMessage[]",
|
|
48
160
|
isOptional: true,
|
|
49
161
|
description: "Additional context messages to provide to the agent.",
|
|
50
162
|
},
|
|
51
163
|
{
|
|
52
|
-
name: "
|
|
53
|
-
type: "
|
|
164
|
+
name: "structuredOutput",
|
|
165
|
+
type: "StructuredOutputOptions<S extends ZodTypeAny = ZodTypeAny>",
|
|
54
166
|
isOptional: true,
|
|
55
|
-
description:
|
|
56
|
-
|
|
167
|
+
description: "Enables structured output generation with better developer experience. Automatically creates and uses a StructuredOutputProcessor internally.",
|
|
168
|
+
properties: [
|
|
169
|
+
{
|
|
170
|
+
parameters: [{
|
|
171
|
+
name: "schema",
|
|
172
|
+
type: "z.ZodSchema<S>",
|
|
173
|
+
isOptional: false,
|
|
174
|
+
description: "Zod schema to validate the output against."
|
|
175
|
+
}]
|
|
176
|
+
},
|
|
177
|
+
{
|
|
178
|
+
parameters: [{
|
|
179
|
+
name: "model",
|
|
180
|
+
type: "MastraLanguageModel",
|
|
181
|
+
isOptional: true,
|
|
182
|
+
description: "Model to use for the internal structuring agent. If not provided, falls back to the agent's model."
|
|
183
|
+
}]
|
|
184
|
+
},
|
|
185
|
+
{
|
|
186
|
+
parameters: [{
|
|
187
|
+
name: "errorStrategy",
|
|
188
|
+
type: "'strict' | 'warn' | 'fallback'",
|
|
189
|
+
isOptional: true,
|
|
190
|
+
description: "Strategy when parsing or validation fails. Defaults to 'strict'."
|
|
191
|
+
}]
|
|
192
|
+
},
|
|
193
|
+
{
|
|
194
|
+
parameters: [{
|
|
195
|
+
name: "fallbackValue",
|
|
196
|
+
type: "<S extends ZodTypeAny>",
|
|
197
|
+
isOptional: true,
|
|
198
|
+
description: "Fallback value when errorStrategy is 'fallback'."
|
|
199
|
+
}]
|
|
200
|
+
},
|
|
201
|
+
{
|
|
202
|
+
parameters: [{
|
|
203
|
+
name: "instructions",
|
|
204
|
+
type: "string",
|
|
205
|
+
isOptional: true,
|
|
206
|
+
description: "Custom instructions for the structuring agent."
|
|
207
|
+
}]
|
|
208
|
+
}
|
|
209
|
+
]
|
|
210
|
+
},
|
|
211
|
+
{
|
|
212
|
+
name: "outputProcessors",
|
|
213
|
+
type: "Processor[]",
|
|
214
|
+
isOptional: true,
|
|
215
|
+
description: "Overrides the output processors set on the agent. Output processors that can modify or validate messages from the agent before they are returned to the user. Must implement either (or both) of the `processOutputResult` and `processOutputStream` functions.",
|
|
216
|
+
},
|
|
217
|
+
{
|
|
218
|
+
name: "inputProcessors",
|
|
219
|
+
type: "Processor[]",
|
|
220
|
+
isOptional: true,
|
|
221
|
+
description: "Overrides the input processors set on the agent. Input processors that can modify or validate messages before they are processed by the agent. Must implement the `processInput` function.",
|
|
57
222
|
},
|
|
58
223
|
{
|
|
59
224
|
name: "instructions",
|
|
@@ -62,12 +227,18 @@ await agent.stream("message for agent");
|
|
|
62
227
|
description:
|
|
63
228
|
"Custom instructions that override the agent's default instructions for this specific generation. Useful for dynamically modifying agent behavior without creating a new agent instance.",
|
|
64
229
|
},
|
|
230
|
+
{
|
|
231
|
+
name: "system",
|
|
232
|
+
type: "string | string[] | CoreSystemMessage | SystemModelMessage | CoreSystemMessage[] | SystemModelMessage[]",
|
|
233
|
+
isOptional: true,
|
|
234
|
+
description: "Custom system message(s) to include in the prompt. Can be a single string, message object, or array of either. System messages provide additional context or behavior instructions that supplement the agent's main instructions.",
|
|
235
|
+
},
|
|
65
236
|
{
|
|
66
237
|
name: "output",
|
|
67
238
|
type: "Zod schema | JsonSchema7",
|
|
68
239
|
isOptional: true,
|
|
69
240
|
description:
|
|
70
|
-
"Defines the expected structure of the output. Can be a JSON Schema object or a Zod schema.",
|
|
241
|
+
"**Deprecated.** Use structuredOutput with maxSteps:1 to achieve the same thing. Defines the expected structure of the output. Can be a JSON Schema object or a Zod schema.",
|
|
71
242
|
},
|
|
72
243
|
{
|
|
73
244
|
name: "memory",
|
|
@@ -101,61 +272,6 @@ await agent.stream("message for agent");
|
|
|
101
272
|
}
|
|
102
273
|
]
|
|
103
274
|
},
|
|
104
|
-
{
|
|
105
|
-
name: "maxSteps",
|
|
106
|
-
type: "number",
|
|
107
|
-
isOptional: true,
|
|
108
|
-
defaultValue: "5",
|
|
109
|
-
description: "Maximum number of execution steps allowed.",
|
|
110
|
-
},
|
|
111
|
-
{
|
|
112
|
-
name: "maxRetries",
|
|
113
|
-
type: "number",
|
|
114
|
-
isOptional: true,
|
|
115
|
-
defaultValue: "2",
|
|
116
|
-
description: "Maximum number of retries. Set to 0 to disable retries.",
|
|
117
|
-
},
|
|
118
|
-
{
|
|
119
|
-
name: "memoryOptions",
|
|
120
|
-
type: "MemoryConfig",
|
|
121
|
-
isOptional: true,
|
|
122
|
-
description:
|
|
123
|
-
"**Deprecated.** Use `memory.options` instead. Configuration options for memory management.",
|
|
124
|
-
properties: [
|
|
125
|
-
{
|
|
126
|
-
parameters: [{
|
|
127
|
-
name: "lastMessages",
|
|
128
|
-
type: "number | false",
|
|
129
|
-
isOptional: true,
|
|
130
|
-
description: "Number of recent messages to include in context, or false to disable."
|
|
131
|
-
}]
|
|
132
|
-
},
|
|
133
|
-
{
|
|
134
|
-
parameters: [{
|
|
135
|
-
name: "semanticRecall",
|
|
136
|
-
type: "boolean | { topK: number; messageRange: number | { before: number; after: number }; scope?: 'thread' | 'resource' }",
|
|
137
|
-
isOptional: true,
|
|
138
|
-
description: "Enable semantic recall to find relevant past messages. Can be a boolean or detailed configuration."
|
|
139
|
-
}]
|
|
140
|
-
},
|
|
141
|
-
{
|
|
142
|
-
parameters: [{
|
|
143
|
-
name: "workingMemory",
|
|
144
|
-
type: "WorkingMemory",
|
|
145
|
-
isOptional: true,
|
|
146
|
-
description: "Configuration for working memory functionality."
|
|
147
|
-
}]
|
|
148
|
-
},
|
|
149
|
-
{
|
|
150
|
-
parameters: [{
|
|
151
|
-
name: "threads",
|
|
152
|
-
type: "{ generateTitle?: boolean | { model: DynamicArgument<MastraLanguageModel>; instructions?: DynamicArgument<string> } }",
|
|
153
|
-
isOptional: true,
|
|
154
|
-
description: "Thread-specific configuration, including automatic title generation."
|
|
155
|
-
}]
|
|
156
|
-
}
|
|
157
|
-
]
|
|
158
|
-
},
|
|
159
275
|
{
|
|
160
276
|
name: "onFinish",
|
|
161
277
|
type: "StreamTextOnFinishCallback<any> | StreamObjectOnFinishCallback<OUTPUT>",
|
|
@@ -219,11 +335,69 @@ await agent.stream("message for agent");
|
|
|
219
335
|
]
|
|
220
336
|
},
|
|
221
337
|
{
|
|
222
|
-
name: "
|
|
223
|
-
type: "
|
|
338
|
+
name: "modelSettings",
|
|
339
|
+
type: "CallSettings",
|
|
224
340
|
isOptional: true,
|
|
225
341
|
description:
|
|
226
|
-
"
|
|
342
|
+
"Model-specific settings like temperature, maxTokens, topP, etc. These are passed to the underlying language model.",
|
|
343
|
+
properties: [
|
|
344
|
+
{
|
|
345
|
+
parameters: [{
|
|
346
|
+
name: "temperature",
|
|
347
|
+
type: "number",
|
|
348
|
+
isOptional: true,
|
|
349
|
+
description: "Controls randomness in the model's output. Higher values (e.g., 0.8) make the output more random, lower values (e.g., 0.2) make it more focused and deterministic."
|
|
350
|
+
}]
|
|
351
|
+
},
|
|
352
|
+
{
|
|
353
|
+
parameters: [{
|
|
354
|
+
name: "maxRetries",
|
|
355
|
+
type: "number",
|
|
356
|
+
isOptional: true,
|
|
357
|
+
description: "Maximum number of retries for failed requests."
|
|
358
|
+
}]
|
|
359
|
+
},
|
|
360
|
+
{
|
|
361
|
+
parameters: [{
|
|
362
|
+
name: "topP",
|
|
363
|
+
type: "number",
|
|
364
|
+
isOptional: true,
|
|
365
|
+
description: "Nucleus sampling. This is a number between 0 and 1. It is recommended to set either temperature or topP, but not both."
|
|
366
|
+
}]
|
|
367
|
+
},
|
|
368
|
+
{
|
|
369
|
+
parameters: [{
|
|
370
|
+
name: "topK",
|
|
371
|
+
type: "number",
|
|
372
|
+
isOptional: true,
|
|
373
|
+
description: "Only sample from the top K options for each subsequent token. Used to remove 'long tail' low probability responses."
|
|
374
|
+
}]
|
|
375
|
+
},
|
|
376
|
+
{
|
|
377
|
+
parameters: [{
|
|
378
|
+
name: "presencePenalty",
|
|
379
|
+
type: "number",
|
|
380
|
+
isOptional: true,
|
|
381
|
+
description: "Presence penalty setting. It affects the likelihood of the model to repeat information that is already in the prompt. A number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition)."
|
|
382
|
+
}]
|
|
383
|
+
},
|
|
384
|
+
{
|
|
385
|
+
parameters: [{
|
|
386
|
+
name: "frequencyPenalty",
|
|
387
|
+
type: "number",
|
|
388
|
+
isOptional: true,
|
|
389
|
+
description: "Frequency penalty setting. It affects the likelihood of the model to repeatedly use the same words or phrases. A number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition)."
|
|
390
|
+
}]
|
|
391
|
+
},
|
|
392
|
+
{
|
|
393
|
+
parameters: [{
|
|
394
|
+
name: "stopSequences",
|
|
395
|
+
type: "string[]",
|
|
396
|
+
isOptional: true,
|
|
397
|
+
description: "Stop sequences. If set, the model will stop generating text when one of the stop sequences is generated."
|
|
398
|
+
}]
|
|
399
|
+
},
|
|
400
|
+
]
|
|
227
401
|
},
|
|
228
402
|
{
|
|
229
403
|
name: "threadId",
|
|
@@ -377,50 +551,8 @@ await agent.stream("message for agent");
|
|
|
377
551
|
name: "maxTokens",
|
|
378
552
|
type: "number",
|
|
379
553
|
isOptional: true,
|
|
380
|
-
description: "
|
|
381
|
-
},
|
|
382
|
-
{
|
|
383
|
-
name: "topP",
|
|
384
|
-
type: "number",
|
|
385
|
-
isOptional: true,
|
|
386
|
-
description: "Nucleus sampling. This is a number between 0 and 1. It is recommended to set either `temperature` or `topP`, but not both.",
|
|
387
|
-
},
|
|
388
|
-
{
|
|
389
|
-
name: "topK",
|
|
390
|
-
type: "number",
|
|
391
|
-
isOptional: true,
|
|
392
|
-
description: "Only sample from the top K options for each subsequent token. Used to remove 'long tail' low probability responses.",
|
|
393
|
-
},
|
|
394
|
-
{
|
|
395
|
-
name: "presencePenalty",
|
|
396
|
-
type: "number",
|
|
397
|
-
isOptional: true,
|
|
398
|
-
description: "Presence penalty setting. It affects the likelihood of the model to repeat information that is already in the prompt. A number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).",
|
|
399
|
-
},
|
|
400
|
-
{
|
|
401
|
-
name: "frequencyPenalty",
|
|
402
|
-
type: "number",
|
|
403
|
-
isOptional: true,
|
|
404
|
-
description: "Frequency penalty setting. It affects the likelihood of the model to repeatedly use the same words or phrases. A number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).",
|
|
405
|
-
},
|
|
406
|
-
{
|
|
407
|
-
name: "stopSequences",
|
|
408
|
-
type: "string[]",
|
|
409
|
-
isOptional: true,
|
|
410
|
-
description: "Stop sequences. If set, the model will stop generating text when one of the stop sequences is generated.",
|
|
411
|
-
},
|
|
412
|
-
{
|
|
413
|
-
name: "seed",
|
|
414
|
-
type: "number",
|
|
415
|
-
isOptional: true,
|
|
416
|
-
description: "The seed (integer) to use for random sampling. If set and supported by the model, calls will generate deterministic results.",
|
|
554
|
+
description: "Condition(s) that determine when to stop the agent's execution. Can be a single condition or array of conditions.",
|
|
417
555
|
},
|
|
418
|
-
{
|
|
419
|
-
name: "headers",
|
|
420
|
-
type: "Record<string, string | undefined>",
|
|
421
|
-
isOptional: true,
|
|
422
|
-
description: "Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.",
|
|
423
|
-
}
|
|
424
556
|
]}
|
|
425
557
|
/>
|
|
426
558
|
|
|
@@ -429,64 +561,9 @@ await agent.stream("message for agent");
|
|
|
429
561
|
<PropertiesTable
|
|
430
562
|
content={[
|
|
431
563
|
{
|
|
432
|
-
name: "
|
|
433
|
-
type: "
|
|
434
|
-
|
|
435
|
-
description:
|
|
436
|
-
"Async generator that yields text chunks as they become available.",
|
|
437
|
-
},
|
|
438
|
-
{
|
|
439
|
-
name: "fullStream",
|
|
440
|
-
type: "Promise<ReadableStream>",
|
|
441
|
-
isOptional: true,
|
|
442
|
-
description:
|
|
443
|
-
"Promise that resolves to a ReadableStream for the complete response.",
|
|
444
|
-
},
|
|
445
|
-
{
|
|
446
|
-
name: "text",
|
|
447
|
-
type: "Promise<string>",
|
|
448
|
-
isOptional: true,
|
|
449
|
-
description:
|
|
450
|
-
"Promise that resolves to the complete text response.",
|
|
451
|
-
},
|
|
452
|
-
{
|
|
453
|
-
name: "usage",
|
|
454
|
-
type: "Promise<{ totalTokens: number; promptTokens: number; completionTokens: number }>",
|
|
455
|
-
isOptional: true,
|
|
456
|
-
description:
|
|
457
|
-
"Promise that resolves to token usage information.",
|
|
458
|
-
},
|
|
459
|
-
{
|
|
460
|
-
name: "finishReason",
|
|
461
|
-
type: "Promise<string>",
|
|
462
|
-
isOptional: true,
|
|
463
|
-
description:
|
|
464
|
-
"Promise that resolves to the reason why the stream finished.",
|
|
465
|
-
},
|
|
466
|
-
{
|
|
467
|
-
name: "toolCalls",
|
|
468
|
-
type: "Promise<Array<ToolCall>>",
|
|
469
|
-
isOptional: true,
|
|
470
|
-
description:
|
|
471
|
-
"Promise that resolves to the tool calls made during the streaming process.",
|
|
472
|
-
properties: [
|
|
473
|
-
{
|
|
474
|
-
parameters: [{
|
|
475
|
-
name: "toolName",
|
|
476
|
-
type: "string",
|
|
477
|
-
required: true,
|
|
478
|
-
description: "The name of the tool invoked."
|
|
479
|
-
}]
|
|
480
|
-
},
|
|
481
|
-
{
|
|
482
|
-
parameters: [{
|
|
483
|
-
name: "args",
|
|
484
|
-
type: "any",
|
|
485
|
-
required: true,
|
|
486
|
-
description: "The arguments passed to the tool."
|
|
487
|
-
}]
|
|
488
|
-
}
|
|
489
|
-
]
|
|
564
|
+
name: "stream",
|
|
565
|
+
type: "MastraModelOutput<Output> | AISDKV5OutputStream<Output>",
|
|
566
|
+
description: "Returns a streaming interface based on the format parameter. When format is 'mastra' (default), returns MastraModelOutput. When format is 'aisdk', returns AISDKV5OutputStream for AI SDK v5 compatibility.",
|
|
490
567
|
},
|
|
491
568
|
{
|
|
492
569
|
name: "traceId",
|
|
@@ -499,15 +576,111 @@ await agent.stream("message for agent");
|
|
|
499
576
|
|
|
500
577
|
## Extended usage example
|
|
501
578
|
|
|
502
|
-
|
|
579
|
+
### Mastra Format (Default)
|
|
580
|
+
|
|
581
|
+
```ts filename="index.ts" showLineNumbers copy
|
|
582
|
+
import { stepCountIs } from 'ai-v5';
|
|
583
|
+
|
|
584
|
+
const stream = await agent.stream("Tell me a story", {
|
|
585
|
+
stopWhen: stepCountIs(3), // Stop after 3 steps
|
|
586
|
+
modelSettings: {
|
|
587
|
+
temperature: 0.7,
|
|
588
|
+
},
|
|
589
|
+
});
|
|
590
|
+
|
|
591
|
+
// Access text stream
|
|
592
|
+
for await (const chunk of stream.textStream) {
|
|
593
|
+
console.log(chunk);
|
|
594
|
+
}
|
|
595
|
+
|
|
596
|
+
// Get full text after streaming
|
|
597
|
+
const fullText = await stream.text;
|
|
598
|
+
```
|
|
599
|
+
|
|
600
|
+
### AI SDK v5 Format
|
|
601
|
+
|
|
602
|
+
```ts filename="index.ts" showLineNumbers copy
|
|
603
|
+
import { stepCountIs } from 'ai-v5';
|
|
604
|
+
|
|
605
|
+
const stream = await agent.stream("Tell me a story", {
|
|
606
|
+
format: 'aisdk',
|
|
607
|
+
stopWhen: stepCountIs(3), // Stop after 3 steps
|
|
608
|
+
modelSettings: {
|
|
609
|
+
temperature: 0.7,
|
|
610
|
+
},
|
|
611
|
+
});
|
|
612
|
+
|
|
613
|
+
// Use with AI SDK v5 compatible interfaces
|
|
614
|
+
for await (const part of stream.fullStream) {
|
|
615
|
+
if (part.type === 'text-delta') {
|
|
616
|
+
console.log(part.text);
|
|
617
|
+
}
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
// In an API route for frontend integration
|
|
621
|
+
return stream.toUIMessageStreamResponse();
|
|
622
|
+
```
|
|
623
|
+
|
|
624
|
+
### Using Callbacks
|
|
625
|
+
|
|
626
|
+
All callback functions are now available as top-level properties for a cleaner API experience.
|
|
627
|
+
|
|
628
|
+
```ts filename="index.ts" showLineNumbers copy
|
|
629
|
+
const stream = await agent.stream("Tell me a story", {
|
|
630
|
+
onFinish: (result) => {
|
|
631
|
+
console.log('Streaming finished:', result);
|
|
632
|
+
},
|
|
633
|
+
onStepFinish: (step) => {
|
|
634
|
+
console.log('Step completed:', step);
|
|
635
|
+
},
|
|
636
|
+
onChunk: (chunk) => {
|
|
637
|
+
console.log('Received chunk:', chunk);
|
|
638
|
+
},
|
|
639
|
+
onError: ({ error }) => {
|
|
640
|
+
console.error('Streaming error:', error);
|
|
641
|
+
},
|
|
642
|
+
onAbort: (event) => {
|
|
643
|
+
console.log('Stream aborted:', event);
|
|
644
|
+
},
|
|
645
|
+
});
|
|
646
|
+
|
|
647
|
+
// Process the stream
|
|
648
|
+
for await (const chunk of stream.textStream) {
|
|
649
|
+
console.log(chunk);
|
|
650
|
+
}
|
|
651
|
+
```
|
|
652
|
+
|
|
653
|
+
### Advanced Example with Options
|
|
654
|
+
|
|
655
|
+
```ts filename="index.ts" showLineNumbers copy
|
|
656
|
+
import { z } from "zod";
|
|
657
|
+
import { stepCountIs } from 'ai-v5';
|
|
658
|
+
|
|
503
659
|
await agent.stream("message for agent", {
|
|
504
|
-
|
|
505
|
-
|
|
660
|
+
format: 'aisdk', // Enable AI SDK v5 compatibility
|
|
661
|
+
stopWhen: stepCountIs(3), // Stop after 3 steps
|
|
662
|
+
modelSettings: {
|
|
663
|
+
temperature: 0.7,
|
|
664
|
+
},
|
|
506
665
|
memory: {
|
|
507
666
|
thread: "user-123",
|
|
508
667
|
resource: "test-app"
|
|
509
668
|
},
|
|
510
|
-
toolChoice: "auto"
|
|
669
|
+
toolChoice: "auto",
|
|
670
|
+
// Structured output with better DX
|
|
671
|
+
structuredOutput: {
|
|
672
|
+
schema: z.object({
|
|
673
|
+
sentiment: z.enum(['positive', 'negative', 'neutral']),
|
|
674
|
+
confidence: z.number(),
|
|
675
|
+
}),
|
|
676
|
+
model: openai("gpt-4o-mini"),
|
|
677
|
+
errorStrategy: 'warn',
|
|
678
|
+
},
|
|
679
|
+
// Output processors for streaming response validation
|
|
680
|
+
outputProcessors: [
|
|
681
|
+
new ModerationProcessor({ model: openai("gpt-4.1-nano") }),
|
|
682
|
+
new BatchPartsProcessor({ maxBatchSize: 3, maxWaitTime: 100 }),
|
|
683
|
+
],
|
|
511
684
|
});
|
|
512
685
|
```
|
|
513
686
|
|