@mastra/mcp-docs-server 0.13.22-alpha.3 → 0.13.22-alpha.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +10 -0
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +14 -14
- package/.docs/organized/changelogs/%40mastra%2Fcloud.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +9 -9
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +13 -13
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +12 -12
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +9 -9
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +11 -11
- package/.docs/organized/changelogs/create-mastra.md +3 -3
- package/.docs/organized/changelogs/mastra.md +11 -11
- package/.docs/organized/code-examples/agent-network.md +2 -1
- package/.docs/organized/code-examples/quick-start.md +2 -1
- package/.docs/organized/code-examples/workflow-ai-recruiter.md +2 -1
- package/.docs/raw/course/04-workflows/08-running-workflows-programmatically.md +2 -2
- package/.docs/raw/reference/agents/network.mdx +258 -0
- package/.docs/raw/reference/storage/dynamodb.mdx +0 -4
- package/.docs/raw/reference/{agents → streaming}/ChunkType.mdx +5 -5
- package/.docs/raw/reference/{agents → streaming}/MastraModelOutput.mdx +10 -10
- package/.docs/raw/reference/tools/mcp-server.mdx +1 -1
- package/.docs/raw/server-db/snapshots.mdx +155 -114
- package/.docs/raw/workflows/inngest-workflow.mdx +82 -0
- package/CHANGELOG.md +8 -0
- package/package.json +4 -4
- /package/.docs/raw/reference/{agents → streaming}/stream.mdx +0 -0
- /package/.docs/raw/reference/{agents → streaming}/streamVNext.mdx +0 -0
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: "Reference: Agent.network() (Experimental) | Agents | Mastra Docs"
|
|
3
|
+
description: "Documentation for the `Agent.network()` method in Mastra agents, which enables multi-agent collaboration and routing."
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
import { NetworkCallout } from "@/components/network-callout.tsx"
|
|
7
|
+
|
|
8
|
+
# Agent.network()
|
|
9
|
+
|
|
10
|
+
<NetworkCallout />
|
|
11
|
+
|
|
12
|
+
The `.network()` method enables multi-agent collaboration and routing. This method accepts messages and optional execution options.
|
|
13
|
+
|
|
14
|
+
## Usage example
|
|
15
|
+
|
|
16
|
+
```typescript copy
|
|
17
|
+
import { Agent } from '@mastra/core/agent';
|
|
18
|
+
import { openai } from '@ai-sdk/openai';
|
|
19
|
+
import { agent1, agent2 } from './agents';
|
|
20
|
+
import { workflow1 } from './workflows';
|
|
21
|
+
import { tool1, tool2 } from './tools';
|
|
22
|
+
|
|
23
|
+
const agent = new Agent({
|
|
24
|
+
name: 'network-agent',
|
|
25
|
+
instructions: 'You are a network agent that can help users with a variety of tasks.',
|
|
26
|
+
model: openai('gpt-4o'),
|
|
27
|
+
agents: {
|
|
28
|
+
agent1,
|
|
29
|
+
agent2,
|
|
30
|
+
},
|
|
31
|
+
workflows: {
|
|
32
|
+
workflow1,
|
|
33
|
+
},
|
|
34
|
+
tools: {
|
|
35
|
+
tool1,
|
|
36
|
+
tool2,
|
|
37
|
+
},
|
|
38
|
+
})
|
|
39
|
+
|
|
40
|
+
await agent.network(`
|
|
41
|
+
Find me the weather in Tokyo.
|
|
42
|
+
Based on the weather, plan an activity for me.
|
|
43
|
+
`);
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
## Parameters
|
|
47
|
+
|
|
48
|
+
<PropertiesTable
|
|
49
|
+
content={[
|
|
50
|
+
{
|
|
51
|
+
name: "messages",
|
|
52
|
+
type: "string | string[] | CoreMessage[] | AiMessageType[] | UIMessageWithMetadata[]",
|
|
53
|
+
description: "The messages to send to the agent. Can be a single string, array of strings, or structured message objects.",
|
|
54
|
+
},
|
|
55
|
+
{
|
|
56
|
+
name: "options",
|
|
57
|
+
type: "MultiPrimitiveExecutionOptions",
|
|
58
|
+
isOptional: true,
|
|
59
|
+
description: "Optional configuration for the network process.",
|
|
60
|
+
},
|
|
61
|
+
]}
|
|
62
|
+
/>
|
|
63
|
+
|
|
64
|
+
### Options
|
|
65
|
+
|
|
66
|
+
<PropertiesTable
|
|
67
|
+
content={[
|
|
68
|
+
{
|
|
69
|
+
name: "maxSteps",
|
|
70
|
+
type: "number",
|
|
71
|
+
isOptional: true,
|
|
72
|
+
description: "Maximum number of steps to run during execution.",
|
|
73
|
+
},
|
|
74
|
+
{
|
|
75
|
+
name: "memory",
|
|
76
|
+
type: "object",
|
|
77
|
+
isOptional: true,
|
|
78
|
+
description: "Configuration for memory. This is the preferred way to manage memory.",
|
|
79
|
+
properties: [
|
|
80
|
+
{
|
|
81
|
+
parameters: [{
|
|
82
|
+
name: "thread",
|
|
83
|
+
type: "string | { id: string; metadata?: Record<string, any>, title?: string }",
|
|
84
|
+
isOptional: false,
|
|
85
|
+
description: "The conversation thread, as a string ID or an object with an `id` and optional `metadata`."
|
|
86
|
+
}]
|
|
87
|
+
},
|
|
88
|
+
{
|
|
89
|
+
parameters: [{
|
|
90
|
+
name: "resource",
|
|
91
|
+
type: "string",
|
|
92
|
+
isOptional: false,
|
|
93
|
+
description: "Identifier for the user or resource associated with the thread."
|
|
94
|
+
}]
|
|
95
|
+
},
|
|
96
|
+
{
|
|
97
|
+
parameters: [{
|
|
98
|
+
name: "options",
|
|
99
|
+
type: "MemoryConfig",
|
|
100
|
+
isOptional: true,
|
|
101
|
+
description: "Configuration for memory behavior, like message history and semantic recall."
|
|
102
|
+
}]
|
|
103
|
+
}
|
|
104
|
+
]
|
|
105
|
+
},
|
|
106
|
+
{
|
|
107
|
+
name: "tracingContext",
|
|
108
|
+
type: "TracingContext",
|
|
109
|
+
isOptional: true,
|
|
110
|
+
description: "AI tracing context for span hierarchy and metadata.",
|
|
111
|
+
},
|
|
112
|
+
{
|
|
113
|
+
name: "telemetry",
|
|
114
|
+
type: "TelemetrySettings",
|
|
115
|
+
isOptional: true,
|
|
116
|
+
description:
|
|
117
|
+
"Settings for telemetry collection during streaming.",
|
|
118
|
+
properties: [
|
|
119
|
+
{
|
|
120
|
+
parameters: [{
|
|
121
|
+
name: "isEnabled",
|
|
122
|
+
type: "boolean",
|
|
123
|
+
isOptional: true,
|
|
124
|
+
description: "Enable or disable telemetry. Disabled by default while experimental."
|
|
125
|
+
}]
|
|
126
|
+
},
|
|
127
|
+
{
|
|
128
|
+
parameters: [{
|
|
129
|
+
name: "recordInputs",
|
|
130
|
+
type: "boolean",
|
|
131
|
+
isOptional: true,
|
|
132
|
+
description: "Enable or disable input recording. Enabled by default. You might want to disable input recording to avoid recording sensitive information."
|
|
133
|
+
}]
|
|
134
|
+
},
|
|
135
|
+
{
|
|
136
|
+
parameters: [{
|
|
137
|
+
name: "recordOutputs",
|
|
138
|
+
type: "boolean",
|
|
139
|
+
isOptional: true,
|
|
140
|
+
description: "Enable or disable output recording. Enabled by default. You might want to disable output recording to avoid recording sensitive information."
|
|
141
|
+
}]
|
|
142
|
+
},
|
|
143
|
+
{
|
|
144
|
+
parameters: [{
|
|
145
|
+
name: "functionId",
|
|
146
|
+
type: "string",
|
|
147
|
+
isOptional: true,
|
|
148
|
+
description: "Identifier for this function. Used to group telemetry data by function."
|
|
149
|
+
}]
|
|
150
|
+
}
|
|
151
|
+
]
|
|
152
|
+
},
|
|
153
|
+
{
|
|
154
|
+
name: "modelSettings",
|
|
155
|
+
type: "CallSettings",
|
|
156
|
+
isOptional: true,
|
|
157
|
+
description:
|
|
158
|
+
"Model-specific settings like temperature, maxTokens, topP, etc. These are passed to the underlying language model.",
|
|
159
|
+
properties: [
|
|
160
|
+
{
|
|
161
|
+
parameters: [{
|
|
162
|
+
name: "temperature",
|
|
163
|
+
type: "number",
|
|
164
|
+
isOptional: true,
|
|
165
|
+
description: "Controls randomness in the model's output. Higher values (e.g., 0.8) make the output more random, lower values (e.g., 0.2) make it more focused and deterministic."
|
|
166
|
+
}]
|
|
167
|
+
},
|
|
168
|
+
{
|
|
169
|
+
parameters: [{
|
|
170
|
+
name: "maxRetries",
|
|
171
|
+
type: "number",
|
|
172
|
+
isOptional: true,
|
|
173
|
+
description: "Maximum number of retries for failed requests."
|
|
174
|
+
}]
|
|
175
|
+
},
|
|
176
|
+
{
|
|
177
|
+
parameters: [{
|
|
178
|
+
name: "topP",
|
|
179
|
+
type: "number",
|
|
180
|
+
isOptional: true,
|
|
181
|
+
description: "Nucleus sampling. This is a number between 0 and 1. It is recommended to set either temperature or topP, but not both."
|
|
182
|
+
}]
|
|
183
|
+
},
|
|
184
|
+
{
|
|
185
|
+
parameters: [{
|
|
186
|
+
name: "topK",
|
|
187
|
+
type: "number",
|
|
188
|
+
isOptional: true,
|
|
189
|
+
description: "Only sample from the top K options for each subsequent token. Used to remove 'long tail' low probability responses."
|
|
190
|
+
}]
|
|
191
|
+
},
|
|
192
|
+
{
|
|
193
|
+
parameters: [{
|
|
194
|
+
name: "presencePenalty",
|
|
195
|
+
type: "number",
|
|
196
|
+
isOptional: true,
|
|
197
|
+
description: "Presence penalty setting. It affects the likelihood of the model to repeat information that is already in the prompt. A number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition)."
|
|
198
|
+
}]
|
|
199
|
+
},
|
|
200
|
+
{
|
|
201
|
+
parameters: [{
|
|
202
|
+
name: "frequencyPenalty",
|
|
203
|
+
type: "number",
|
|
204
|
+
isOptional: true,
|
|
205
|
+
description: "Frequency penalty setting. It affects the likelihood of the model to repeatedly use the same words or phrases. A number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition)."
|
|
206
|
+
}]
|
|
207
|
+
},
|
|
208
|
+
{
|
|
209
|
+
parameters: [{
|
|
210
|
+
name: "stopSequences",
|
|
211
|
+
type: "string[]",
|
|
212
|
+
isOptional: true,
|
|
213
|
+
description: "Stop sequences. If set, the model will stop generating text when one of the stop sequences is generated."
|
|
214
|
+
}]
|
|
215
|
+
},
|
|
216
|
+
]
|
|
217
|
+
},
|
|
218
|
+
{
|
|
219
|
+
name: "runId",
|
|
220
|
+
type: "string",
|
|
221
|
+
isOptional: true,
|
|
222
|
+
description: "Unique ID for this generation run. Useful for tracking and debugging purposes.",
|
|
223
|
+
},
|
|
224
|
+
{
|
|
225
|
+
name: "runtimeContext",
|
|
226
|
+
type: "RuntimeContext",
|
|
227
|
+
isOptional: true,
|
|
228
|
+
description: "Runtime context for dependency injection and contextual information.",
|
|
229
|
+
},
|
|
230
|
+
]}
|
|
231
|
+
/>
|
|
232
|
+
|
|
233
|
+
## Returns
|
|
234
|
+
|
|
235
|
+
<PropertiesTable
|
|
236
|
+
content={[
|
|
237
|
+
{
|
|
238
|
+
name: "stream",
|
|
239
|
+
type: "MastraAgentNetworkStream<NetworkChunkType>",
|
|
240
|
+
description: "A custom stream that extends ReadableStream<NetworkChunkType> with additional network-specific properties",
|
|
241
|
+
},
|
|
242
|
+
{
|
|
243
|
+
name: "status",
|
|
244
|
+
type: "Promise<RunStatus>",
|
|
245
|
+
description: "A promise that resolves to the current workflow run status",
|
|
246
|
+
},
|
|
247
|
+
{
|
|
248
|
+
name: "result",
|
|
249
|
+
type: "Promise<WorkflowResult<TOutput, TSteps>>",
|
|
250
|
+
description: "A promise that resolves to the final workflow result",
|
|
251
|
+
},
|
|
252
|
+
{
|
|
253
|
+
name: "usage",
|
|
254
|
+
type: "Promise<{ promptTokens: number; completionTokens: number; totalTokens: number }>",
|
|
255
|
+
description: "A promise that resolves to token usage statistics",
|
|
256
|
+
},
|
|
257
|
+
]}
|
|
258
|
+
/>
|
|
@@ -194,7 +194,3 @@ This implementation uses a single-table design pattern with ElectroDB, which off
|
|
|
194
194
|
3. **Simplified administration:** Fewer distinct tables to monitor, back up, and manage.
|
|
195
195
|
4. **Reduced complexity in access patterns:** ElectroDB helps manage the complexity of item types and access patterns on a single table.
|
|
196
196
|
5. **Transaction support:** DynamoDB transactions can be used across different "entity" types stored within the same table if needed.
|
|
197
|
-
|
|
198
|
-
## License
|
|
199
|
-
|
|
200
|
-
This package is distributed under the MIT License. See [LICENSE.md](https://github.com/mastra-ai/mastra/blob/main/LICENSE.md) for more information.
|
|
@@ -1,15 +1,15 @@
|
|
|
1
1
|
---
|
|
2
|
-
title: "Reference: ChunkType | Agents | Mastra Docs"
|
|
2
|
+
title: "Reference: ChunkType (Experimental) | Agents | Mastra Docs"
|
|
3
3
|
description: "Documentation for the ChunkType type used in Mastra streaming responses, defining all possible chunk types and their payloads."
|
|
4
4
|
---
|
|
5
5
|
|
|
6
6
|
import { Callout } from "nextra/components";
|
|
7
7
|
import { PropertiesTable } from "@/components/properties-table";
|
|
8
8
|
|
|
9
|
-
# ChunkType
|
|
9
|
+
# ChunkType (Experimental)
|
|
10
10
|
|
|
11
|
-
<Callout type="
|
|
12
|
-
|
|
11
|
+
<Callout type="important">
|
|
12
|
+
<strong className="block">Experimental API: </strong>This type is part of the experimental `.streamVNext()` method. The API may change as we refine the feature based on feedback.
|
|
13
13
|
</Callout>
|
|
14
14
|
|
|
15
15
|
The `ChunkType` type defines the mastra format of stream chunks that can be emitted during streaming responses from agents.
|
|
@@ -854,4 +854,4 @@ for await (const chunk of stream.fullStream) {
|
|
|
854
854
|
## Related Types
|
|
855
855
|
|
|
856
856
|
- [MastraModelOutput](./MastraModelOutput.mdx) - The stream object that emits these chunks
|
|
857
|
-
- [
|
|
857
|
+
- [.streamVNext()](./streamVNext.mdx) - Method that returns streams emitting these chunks
|
|
@@ -1,25 +1,25 @@
|
|
|
1
1
|
---
|
|
2
|
-
title: "Reference: MastraModelOutput | Agents | Mastra Docs"
|
|
2
|
+
title: "Reference: MastraModelOutput (Experimental) | Agents | Mastra Docs"
|
|
3
3
|
description: "Complete reference for MastraModelOutput - the stream object returned by agent.streamVNext() with streaming and promise-based access to model outputs."
|
|
4
4
|
---
|
|
5
5
|
|
|
6
6
|
import { Callout } from "nextra/components";
|
|
7
7
|
import { PropertiesTable } from "@/components/properties-table";
|
|
8
8
|
|
|
9
|
-
# MastraModelOutput
|
|
9
|
+
# MastraModelOutput (Experimental)
|
|
10
10
|
|
|
11
|
-
<Callout type="
|
|
12
|
-
|
|
11
|
+
<Callout type="important">
|
|
12
|
+
<strong className="block">Experimental API: </strong>This type is part of the experimental `.streamVNext()` method. The API may change as we refine the feature based on feedback.
|
|
13
13
|
</Callout>
|
|
14
14
|
|
|
15
|
-
The `MastraModelOutput` class is returned by [
|
|
15
|
+
The `MastraModelOutput` class is returned by [.streamVNext()](./streamVNext.mdx) and provides both streaming and promise-based access to model outputs. It supports structured output generation, tool calls, reasoning, and comprehensive usage tracking.
|
|
16
16
|
|
|
17
17
|
```typescript
|
|
18
18
|
// MastraModelOutput is returned by agent.streamVNext()
|
|
19
19
|
const stream = await agent.streamVNext("Hello world");
|
|
20
20
|
```
|
|
21
21
|
|
|
22
|
-
For setup and basic usage, see the [streamVNext() method documentation
|
|
22
|
+
For setup and basic usage, see the [.streamVNext()](./streamVNext.mdx) method documentation.
|
|
23
23
|
|
|
24
24
|
## Streaming Properties
|
|
25
25
|
|
|
@@ -113,7 +113,7 @@ These properties resolve to final values after the stream completes:
|
|
|
113
113
|
type: "Promise<ToolResultChunk[]>",
|
|
114
114
|
description: "Array of all tool result chunks corresponding to the tool calls. Contains execution results and error information.",
|
|
115
115
|
properties: [{
|
|
116
|
-
type: "ToolResultChunk",
|
|
116
|
+
type: "ToolResultChunk",
|
|
117
117
|
parameters: [
|
|
118
118
|
{ name: "type", type: "'tool-result'", description: "Chunk type identifier" },
|
|
119
119
|
{ name: "runId", type: "string", description: "Execution run identifier" },
|
|
@@ -302,7 +302,7 @@ try {
|
|
|
302
302
|
console.error("Stream error:", error);
|
|
303
303
|
}
|
|
304
304
|
});
|
|
305
|
-
|
|
305
|
+
|
|
306
306
|
const result = await stream.text;
|
|
307
307
|
} catch (error) {
|
|
308
308
|
console.error("Failed to get result:", error);
|
|
@@ -317,5 +317,5 @@ if (stream.error) {
|
|
|
317
317
|
|
|
318
318
|
## Related Types
|
|
319
319
|
|
|
320
|
-
- [ChunkType](
|
|
321
|
-
- [
|
|
320
|
+
- [ChunkType](./ChunkType.mdx) - All possible chunk types in the full stream
|
|
321
|
+
- [.streamVNext()](./streamVNext.mdx) - Method that returns MastraModelOutput
|
|
@@ -86,7 +86,7 @@ The constructor accepts an `MCPServerConfig` object with the following propertie
|
|
|
86
86
|
type: "Record<string, Workflow>",
|
|
87
87
|
isOptional: true,
|
|
88
88
|
description:
|
|
89
|
-
"An object where keys are workflow identifiers and values are Mastra Workflow instances. Each workflow is converted into a tool named `run_<workflowKey>`. The workflow's `inputSchema` becomes the tool's input schema. The workflow **must** have a non-empty `description` string property, which is used for the tool's description. If a workflow's description is missing or empty, an error will be thrown. The tool executes the workflow by calling `workflow.
|
|
89
|
+
"An object where keys are workflow identifiers and values are Mastra Workflow instances. Each workflow is converted into a tool named `run_<workflowKey>`. The workflow's `inputSchema` becomes the tool's input schema. The workflow **must** have a non-empty `description` string property, which is used for the tool's description. If a workflow's description is missing or empty, an error will be thrown. The tool executes the workflow by calling `workflow.createRunAsync()` followed by `run.start({ inputData: <tool_input> })`. If a tool name derived from an agent or workflow (e.g., `ask_myAgent` or `run_myWorkflow`) collides with an explicitly defined tool name or another derived name, the explicitly defined tool takes precedence, and a warning is logged. Agents/workflows leading to subsequent collisions are skipped.",
|
|
90
90
|
},
|
|
91
91
|
{
|
|
92
92
|
name: "id",
|