@mastra/memory 1.0.0-beta.1 → 1.0.0-beta.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +355 -0
- package/dist/_types/@internal_ai-sdk-v4/dist/index.d.ts +7549 -0
- package/dist/chunk-DGUM43GV.js +10 -0
- package/dist/chunk-DGUM43GV.js.map +1 -0
- package/dist/chunk-JEQ2X3Z6.cjs +12 -0
- package/dist/chunk-JEQ2X3Z6.cjs.map +1 -0
- package/dist/chunk-KMQS2YEC.js +79 -0
- package/dist/chunk-KMQS2YEC.js.map +1 -0
- package/dist/chunk-MMUHFOCG.js +79 -0
- package/dist/chunk-MMUHFOCG.js.map +1 -0
- package/dist/chunk-QY6BZOPJ.js +250 -0
- package/dist/chunk-QY6BZOPJ.js.map +1 -0
- package/dist/chunk-SG3GRV3O.cjs +84 -0
- package/dist/chunk-SG3GRV3O.cjs.map +1 -0
- package/dist/chunk-W72AYUIF.cjs +252 -0
- package/dist/chunk-W72AYUIF.cjs.map +1 -0
- package/dist/chunk-WC4XBMZT.js +250 -0
- package/dist/chunk-WC4XBMZT.js.map +1 -0
- package/dist/chunk-YMNW6DEN.cjs +252 -0
- package/dist/chunk-YMNW6DEN.cjs.map +1 -0
- package/dist/chunk-ZUQPUTTO.cjs +84 -0
- package/dist/chunk-ZUQPUTTO.cjs.map +1 -0
- package/dist/docs/README.md +36 -0
- package/dist/docs/SKILL.md +42 -0
- package/dist/docs/SOURCE_MAP.json +31 -0
- package/dist/docs/agents/01-agent-memory.md +160 -0
- package/dist/docs/agents/02-networks.md +236 -0
- package/dist/docs/agents/03-agent-approval.md +317 -0
- package/dist/docs/core/01-reference.md +114 -0
- package/dist/docs/memory/01-overview.md +76 -0
- package/dist/docs/memory/02-storage.md +181 -0
- package/dist/docs/memory/03-working-memory.md +386 -0
- package/dist/docs/memory/04-semantic-recall.md +235 -0
- package/dist/docs/memory/05-memory-processors.md +319 -0
- package/dist/docs/memory/06-reference.md +617 -0
- package/dist/docs/processors/01-reference.md +81 -0
- package/dist/docs/storage/01-reference.md +972 -0
- package/dist/docs/vectors/01-reference.md +929 -0
- package/dist/index.cjs +14845 -115
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.ts +145 -5
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +14807 -119
- package/dist/index.js.map +1 -1
- package/dist/token-6GSAFR2W-JV3TZR4M.cjs +63 -0
- package/dist/token-6GSAFR2W-JV3TZR4M.cjs.map +1 -0
- package/dist/token-6GSAFR2W-K2BTU23I.js +61 -0
- package/dist/token-6GSAFR2W-K2BTU23I.js.map +1 -0
- package/dist/token-6GSAFR2W-VLY2XUPA.js +61 -0
- package/dist/token-6GSAFR2W-VLY2XUPA.js.map +1 -0
- package/dist/token-6GSAFR2W-YCB5SK2Z.cjs +63 -0
- package/dist/token-6GSAFR2W-YCB5SK2Z.cjs.map +1 -0
- package/dist/token-util-NEHG7TUY-7IL6JUVY.cjs +10 -0
- package/dist/token-util-NEHG7TUY-7IL6JUVY.cjs.map +1 -0
- package/dist/token-util-NEHG7TUY-HF7KBP2H.cjs +10 -0
- package/dist/token-util-NEHG7TUY-HF7KBP2H.cjs.map +1 -0
- package/dist/token-util-NEHG7TUY-KSXDO2NO.js +8 -0
- package/dist/token-util-NEHG7TUY-KSXDO2NO.js.map +1 -0
- package/dist/token-util-NEHG7TUY-TIJ3LMSH.js +8 -0
- package/dist/token-util-NEHG7TUY-TIJ3LMSH.js.map +1 -0
- package/dist/tools/working-memory.d.ts +10 -2
- package/dist/tools/working-memory.d.ts.map +1 -1
- package/package.json +19 -25
- package/dist/processors/index.cjs +0 -165
- package/dist/processors/index.cjs.map +0 -1
- package/dist/processors/index.d.ts +0 -3
- package/dist/processors/index.d.ts.map +0 -1
- package/dist/processors/index.js +0 -158
- package/dist/processors/index.js.map +0 -1
- package/dist/processors/token-limiter.d.ts +0 -32
- package/dist/processors/token-limiter.d.ts.map +0 -1
- package/dist/processors/tool-call-filter.d.ts +0 -20
- package/dist/processors/tool-call-filter.d.ts.map +0 -1
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
> Learn how to coordinate multiple agents, workflows, and tools using agent networks for complex, non-deterministic task execution.
|
|
2
|
+
|
|
3
|
+
# Agent Networks
|
|
4
|
+
|
|
5
|
+
Agent networks in Mastra coordinate multiple agents, workflows, and tools to handle tasks that aren't clearly defined upfront but can be inferred from the user's message or context. A top-level **routing agent** (a Mastra agent with other agents, workflows, and tools configured) uses an LLM to interpret the request and decide which primitives (sub-agents, workflows, or tools) to call, in what order, and with what data.
|
|
6
|
+
|
|
7
|
+
## When to use networks
|
|
8
|
+
|
|
9
|
+
Use networks for complex tasks that require coordination across multiple primitives. Unlike workflows, which follow a predefined sequence, networks rely on LLM reasoning to interpret the request and decide what to run.
|
|
10
|
+
|
|
11
|
+
## Core principles
|
|
12
|
+
|
|
13
|
+
Mastra agent networks operate using these principles:
|
|
14
|
+
|
|
15
|
+
- Memory is required when using `.network()` and is used to store task history and determine when a task is complete.
|
|
16
|
+
- Primitives are selected based on their descriptions. Clear, specific descriptions improve routing. For workflows and tools, the input schema helps determine the right inputs at runtime.
|
|
17
|
+
- If multiple primitives have overlapping functionality, the agent favors the more specific one, using a combination of schema and descriptions to decide which to run.
|
|
18
|
+
|
|
19
|
+
## Creating an agent network
|
|
20
|
+
|
|
21
|
+
An agent network is built around a top-level routing agent that delegates tasks to agents, workflows, and tools defined in its configuration. Memory is configured on the routing agent using the `memory` option, and `instructions` define the agent's routing behavior.
|
|
22
|
+
|
|
23
|
+
```typescript {22-23,26,29} title="src/mastra/agents/routing-agent.ts"
|
|
24
|
+
import { Agent } from "@mastra/core/agent";
|
|
25
|
+
import { Memory } from "@mastra/memory";
|
|
26
|
+
import { LibSQLStore } from "@mastra/libsql";
|
|
27
|
+
|
|
28
|
+
import { researchAgent } from "./research-agent";
|
|
29
|
+
import { writingAgent } from "./writing-agent";
|
|
30
|
+
|
|
31
|
+
import { cityWorkflow } from "../workflows/city-workflow";
|
|
32
|
+
import { weatherTool } from "../tools/weather-tool";
|
|
33
|
+
|
|
34
|
+
export const routingAgent = new Agent({
|
|
35
|
+
id: "routing-agent",
|
|
36
|
+
name: "Routing Agent",
|
|
37
|
+
instructions: `
|
|
38
|
+
You are a network of writers and researchers.
|
|
39
|
+
The user will ask you to research a topic.
|
|
40
|
+
Always respond with a complete report—no bullet points.
|
|
41
|
+
Write in full paragraphs, like a blog post.
|
|
42
|
+
Do not answer with incomplete or uncertain information.`,
|
|
43
|
+
model: "openai/gpt-5.1",
|
|
44
|
+
agents: {
|
|
45
|
+
researchAgent,
|
|
46
|
+
writingAgent,
|
|
47
|
+
},
|
|
48
|
+
workflows: {
|
|
49
|
+
cityWorkflow,
|
|
50
|
+
},
|
|
51
|
+
tools: {
|
|
52
|
+
weatherTool,
|
|
53
|
+
},
|
|
54
|
+
memory: new Memory({
|
|
55
|
+
storage: new LibSQLStore({
|
|
56
|
+
id: 'mastra-storage',
|
|
57
|
+
url: "file:../mastra.db",
|
|
58
|
+
}),
|
|
59
|
+
}),
|
|
60
|
+
});
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
### Writing descriptions for network primitives
|
|
64
|
+
|
|
65
|
+
When configuring a Mastra agent network, each primitive (agent, workflow, or tool) needs a clear description to help the routing agent decide which to use. The routing agent uses each primitive's description and schema to determine what it does and how to use it. Clear descriptions and well-defined input and output schemas improve routing accuracy.
|
|
66
|
+
|
|
67
|
+
#### Agent descriptions
|
|
68
|
+
|
|
69
|
+
Each agent in a network should include a clear `description` that explains what the agent does.
|
|
70
|
+
|
|
71
|
+
```typescript title="src/mastra/agents/research-agent.ts"
|
|
72
|
+
export const researchAgent = new Agent({
|
|
73
|
+
id: "research-agent",
|
|
74
|
+
name: "Research Agent",
|
|
75
|
+
description: `This agent gathers concise research insights in bullet-point form.
|
|
76
|
+
It's designed to extract key facts without generating full
|
|
77
|
+
responses or narrative content.`,
|
|
78
|
+
});
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
```typescript title="src/mastra/agents/writing-agent.ts"
|
|
82
|
+
export const writingAgent = new Agent({
|
|
83
|
+
id: "writing-agent",
|
|
84
|
+
name: "Writing Agent",
|
|
85
|
+
description: `This agent turns researched material into well-structured
|
|
86
|
+
written content. It produces full-paragraph reports with no bullet points,
|
|
87
|
+
suitable for use in articles, summaries, or blog posts.`,
|
|
88
|
+
});
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
#### Workflow descriptions
|
|
92
|
+
|
|
93
|
+
Workflows in a network should include a `description` to explain their purpose, along with `inputSchema` and `outputSchema` to describe the expected data.
|
|
94
|
+
|
|
95
|
+
```typescript title="src/mastra/workflows/city-workflow.ts"
|
|
96
|
+
export const cityWorkflow = createWorkflow({
|
|
97
|
+
id: "city-workflow",
|
|
98
|
+
description: `This workflow handles city-specific research tasks.
|
|
99
|
+
It first gathers factual information about the city, then synthesizes
|
|
100
|
+
that research into a full written report. Use it when the user input
|
|
101
|
+
includes a city to be researched.`,
|
|
102
|
+
inputSchema: z.object({
|
|
103
|
+
city: z.string(),
|
|
104
|
+
}),
|
|
105
|
+
outputSchema: z.object({
|
|
106
|
+
text: z.string(),
|
|
107
|
+
}),
|
|
108
|
+
});
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
#### Tool descriptions
|
|
112
|
+
|
|
113
|
+
Tools in a network should include a `description` to explain their purpose, along with `inputSchema` and `outputSchema` to describe the expected data.
|
|
114
|
+
|
|
115
|
+
```typescript title="src/mastra/tools/weather-tool.ts"
|
|
116
|
+
export const weatherTool = createTool({
|
|
117
|
+
id: "weather-tool",
|
|
118
|
+
description: ` Retrieves current weather information using the wttr.in API.
|
|
119
|
+
Accepts a city or location name as input and returns a short weather summary.
|
|
120
|
+
Use this tool whenever up-to-date weather data is requested.
|
|
121
|
+
`,
|
|
122
|
+
inputSchema: z.object({
|
|
123
|
+
location: z.string(),
|
|
124
|
+
}),
|
|
125
|
+
outputSchema: z.object({
|
|
126
|
+
weather: z.string(),
|
|
127
|
+
}),
|
|
128
|
+
});
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
## Calling agent networks
|
|
132
|
+
|
|
133
|
+
Call a Mastra agent network using `.network()` with a user message. The method returns a stream of events that you can iterate over to track execution progress and retrieve the final result.
|
|
134
|
+
|
|
135
|
+
### Agent example
|
|
136
|
+
|
|
137
|
+
In this example, the network interprets the message and would route the request to both the `researchAgent` and `writingAgent` to generate a complete response.
|
|
138
|
+
|
|
139
|
+
```typescript
|
|
140
|
+
const result = await routingAgent.network(
|
|
141
|
+
"Tell me three cool ways to use Mastra",
|
|
142
|
+
);
|
|
143
|
+
|
|
144
|
+
for await (const chunk of result) {
|
|
145
|
+
console.log(chunk.type);
|
|
146
|
+
if (chunk.type === "network-execution-event-step-finish") {
|
|
147
|
+
console.log(chunk.payload.result);
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
#### Agent output
|
|
153
|
+
|
|
154
|
+
The following `chunk.type` events are emitted during this request:
|
|
155
|
+
|
|
156
|
+
```text
|
|
157
|
+
routing-agent-start
|
|
158
|
+
routing-agent-end
|
|
159
|
+
agent-execution-start
|
|
160
|
+
agent-execution-event-start
|
|
161
|
+
agent-execution-event-step-start
|
|
162
|
+
agent-execution-event-text-start
|
|
163
|
+
agent-execution-event-text-delta
|
|
164
|
+
agent-execution-event-text-end
|
|
165
|
+
agent-execution-event-step-finish
|
|
166
|
+
agent-execution-event-finish
|
|
167
|
+
agent-execution-end
|
|
168
|
+
network-execution-event-step-finish
|
|
169
|
+
```
|
|
170
|
+
|
|
171
|
+
## Workflow example
|
|
172
|
+
|
|
173
|
+
In this example, the routing agent recognizes the city name in the message and runs the `cityWorkflow`. The workflow defines steps that call the `researchAgent` to gather facts, then the `writingAgent` to generate the final text.
|
|
174
|
+
|
|
175
|
+
```typescript
|
|
176
|
+
const result = await routingAgent.network(
|
|
177
|
+
"Tell me some historical facts about London",
|
|
178
|
+
);
|
|
179
|
+
|
|
180
|
+
for await (const chunk of result) {
|
|
181
|
+
console.log(chunk.type);
|
|
182
|
+
if (chunk.type === "network-execution-event-step-finish") {
|
|
183
|
+
console.log(chunk.payload.result);
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
```
|
|
187
|
+
|
|
188
|
+
#### Workflow output
|
|
189
|
+
|
|
190
|
+
The following `chunk.type` events are emitted during this request:
|
|
191
|
+
|
|
192
|
+
```text
|
|
193
|
+
routing-agent-end
|
|
194
|
+
workflow-execution-start
|
|
195
|
+
workflow-execution-event-workflow-start
|
|
196
|
+
workflow-execution-event-workflow-step-start
|
|
197
|
+
workflow-execution-event-workflow-step-result
|
|
198
|
+
workflow-execution-event-workflow-finish
|
|
199
|
+
workflow-execution-end
|
|
200
|
+
routing-agent-start
|
|
201
|
+
network-execution-event-step-finish
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
### Tool example
|
|
205
|
+
|
|
206
|
+
In this example, the routing agent skips the `researchAgent`, `writingAgent`, and `cityWorkflow`, and calls the `weatherTool` directly to complete the task.
|
|
207
|
+
|
|
208
|
+
```typescript
|
|
209
|
+
const result = await routingAgent.network("What's the weather in London?");
|
|
210
|
+
|
|
211
|
+
for await (const chunk of result) {
|
|
212
|
+
console.log(chunk.type);
|
|
213
|
+
if (chunk.type === "network-execution-event-step-finish") {
|
|
214
|
+
console.log(chunk.payload.result);
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
```
|
|
218
|
+
|
|
219
|
+
#### Tool output
|
|
220
|
+
|
|
221
|
+
The following `chunk.type` events are emitted during this request:
|
|
222
|
+
|
|
223
|
+
```text
|
|
224
|
+
routing-agent-start
|
|
225
|
+
routing-agent-end
|
|
226
|
+
tool-execution-start
|
|
227
|
+
tool-execution-end
|
|
228
|
+
network-execution-event-step-finish
|
|
229
|
+
```
|
|
230
|
+
|
|
231
|
+
## Related
|
|
232
|
+
|
|
233
|
+
- [Agent Memory](./agent-memory)
|
|
234
|
+
- [Workflows Overview](../workflows/overview)
|
|
235
|
+
- [Request Context](https://mastra.ai/docs/v1/server/request-context)
|
|
236
|
+
- [Supervisor example](https://github.com/mastra-ai/mastra/tree/main/examples/supervisor-agent)
|
|
@@ -0,0 +1,317 @@
|
|
|
1
|
+
> Learn how to require approvals, suspend tool execution, and automatically resume suspended tools while keeping humans in control of agent workflows.
|
|
2
|
+
|
|
3
|
+
# Agent Approval
|
|
4
|
+
|
|
5
|
+
Agents sometimes require the same [human-in-the-loop](https://mastra.ai/docs/v1/workflows/human-in-the-loop) oversight used in workflows when calling tools that handle sensitive operations, like deleting resources or performing running long processes. With agent approval you can suspend a tool call and provide feedback to the user, or approve or decline a tool call based on targeted application conditions.
|
|
6
|
+
|
|
7
|
+
## Tool call approval
|
|
8
|
+
|
|
9
|
+
Tool call approval can be enabled at the agent level and apply to every tool the agent uses, or at the tool level providing more granular control over individual tool calls.
|
|
10
|
+
|
|
11
|
+
### Storage
|
|
12
|
+
|
|
13
|
+
Agent approval uses a snapshot to capture the state of the request. Ensure you've enabled a storage provider in your main Mastra instance. If storage isn't enabled you'll see an error relating to snapshot not found.
|
|
14
|
+
|
|
15
|
+
```typescript title="src/mastra/index.ts"
|
|
16
|
+
import { Mastra } from "@mastra/core/mastra";
|
|
17
|
+
import { LibSQLStore } from "@mastra/libsql";
|
|
18
|
+
|
|
19
|
+
export const mastra = new Mastra({
|
|
20
|
+
storage: new LibSQLStore({
|
|
21
|
+
id: "mastra-storage",
|
|
22
|
+
url: ":memory:"
|
|
23
|
+
})
|
|
24
|
+
});
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
## Agent-level approval
|
|
28
|
+
|
|
29
|
+
When calling an agent using `.stream()` set `requireToolApproval` to `true` which will prevent the agent from calling any of the tools defined in its configuration.
|
|
30
|
+
|
|
31
|
+
```typescript
|
|
32
|
+
const stream = await agent.stream("What's the weather in London?", {
|
|
33
|
+
requireToolApproval: true
|
|
34
|
+
});
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
### Approving tool calls
|
|
38
|
+
|
|
39
|
+
To approve a tool call, access `approveToolCall` from the `agent`, passing in the `runId` of the stream. This will let the agent know its now OK to call its tools.
|
|
40
|
+
|
|
41
|
+
```typescript
|
|
42
|
+
const handleApproval = async () => {
|
|
43
|
+
const approvedStream = await agent.approveToolCall({ runId: stream.runId });
|
|
44
|
+
|
|
45
|
+
for await (const chunk of approvedStream.textStream) {
|
|
46
|
+
process.stdout.write(chunk);
|
|
47
|
+
}
|
|
48
|
+
process.stdout.write("\n");
|
|
49
|
+
};
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
### Declining tool calls
|
|
53
|
+
|
|
54
|
+
To decline a tool call, access the `declineToolCall` from the `agent`. You will see the streamed response from the agent, but it won't call its tools.
|
|
55
|
+
|
|
56
|
+
```typescript
|
|
57
|
+
const handleDecline = async () => {
|
|
58
|
+
const declinedStream = await agent.declineToolCall({ runId: stream.runId });
|
|
59
|
+
|
|
60
|
+
for await (const chunk of declinedStream.textStream) {
|
|
61
|
+
process.stdout.write(chunk);
|
|
62
|
+
}
|
|
63
|
+
process.stdout.write("\n");
|
|
64
|
+
};
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
## Tool-level approval
|
|
68
|
+
|
|
69
|
+
There are two types of tool call approval. The first uses `requireApproval`, which is a property on the tool definition, while `requireToolApproval` is a parameter passed to `agent.stream()`. The second uses `suspend` and lets the agent provide context or confirmation prompts so the user can decide whether the tool call should continue.
|
|
70
|
+
|
|
71
|
+
### Tool approval using `requireToolApproval`
|
|
72
|
+
|
|
73
|
+
In this approach, `requireApproval` is configured on the tool definition (shown below) rather than on the agent.
|
|
74
|
+
|
|
75
|
+
```typescript
|
|
76
|
+
export const testTool = createTool({
|
|
77
|
+
id: "test-tool",
|
|
78
|
+
description: "Fetches weather for a location",
|
|
79
|
+
inputSchema: z.object({
|
|
80
|
+
location: z.string()
|
|
81
|
+
}),
|
|
82
|
+
outputSchema: z.object({
|
|
83
|
+
weather: z.string()
|
|
84
|
+
}),
|
|
85
|
+
resumeSchema: z.object({
|
|
86
|
+
approved: z.boolean()
|
|
87
|
+
}),
|
|
88
|
+
execute: async ({ location }) => {
|
|
89
|
+
const response = await fetch(`https://wttr.in/${location}?format=3`);
|
|
90
|
+
const weather = await response.text();
|
|
91
|
+
|
|
92
|
+
return { weather };
|
|
93
|
+
},
|
|
94
|
+
requireApproval: true
|
|
95
|
+
});
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
When `requireApproval` is true for a tool, the stream will include chunks of type `tool-call-approval` to indicate that the call is paused. To continue the call, invoke `resumeStream` with the required `resumeSchema` and the `runId`.
|
|
99
|
+
|
|
100
|
+
```typescript
|
|
101
|
+
const stream = await agent.stream("What's the weather in London?");
|
|
102
|
+
|
|
103
|
+
for await (const chunk of stream.fullStream) {
|
|
104
|
+
if (chunk.type === "tool-call-approval") {
|
|
105
|
+
console.log("Approval required.");
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
const handleResume = async () => {
|
|
110
|
+
const resumedStream = await agent.resumeStream({ approved: true }, { runId: stream.runId });
|
|
111
|
+
|
|
112
|
+
for await (const chunk of resumedStream.textStream) {
|
|
113
|
+
process.stdout.write(chunk);
|
|
114
|
+
}
|
|
115
|
+
process.stdout.write("\n");
|
|
116
|
+
};
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
### Tool approval using `suspend`
|
|
120
|
+
|
|
121
|
+
With this approach, neither the agent nor the tool uses `requireApproval`. Instead, the tool implementation calls `suspend` to pause execution and return context or confirmation prompts to the user.
|
|
122
|
+
|
|
123
|
+
```typescript
|
|
124
|
+
|
|
125
|
+
export const testToolB = createTool({
|
|
126
|
+
id: "test-tool-b",
|
|
127
|
+
description: "Fetches weather for a location",
|
|
128
|
+
inputSchema: z.object({
|
|
129
|
+
location: z.string()
|
|
130
|
+
}),
|
|
131
|
+
outputSchema: z.object({
|
|
132
|
+
weather: z.string()
|
|
133
|
+
}),
|
|
134
|
+
resumeSchema: z.object({
|
|
135
|
+
approved: z.boolean()
|
|
136
|
+
}),
|
|
137
|
+
suspendSchema: z.object({
|
|
138
|
+
reason: z.string()
|
|
139
|
+
}),
|
|
140
|
+
execute: async ({ location }, { agent } = {}) => {
|
|
141
|
+
const { resumeData: { approved } = {}, suspend } = agent ?? {};
|
|
142
|
+
|
|
143
|
+
if (!approved) {
|
|
144
|
+
return suspend?.({ reason: "Approval required." });
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
const response = await fetch(`https://wttr.in/${location}?format=3`);
|
|
148
|
+
const weather = await response.text();
|
|
149
|
+
|
|
150
|
+
return { weather };
|
|
151
|
+
}
|
|
152
|
+
});
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
With this approach the stream will include a `tool-call-suspended` chunk, and the `suspendPayload` will contain the `reason` defined by the tool's `suspendSchema`. To continue the call, invoke `resumeStream` with the required `resumeSchema` and the `runId`.
|
|
156
|
+
|
|
157
|
+
```typescript
|
|
158
|
+
const stream = await agent.stream("What's the weather in London?");
|
|
159
|
+
|
|
160
|
+
for await (const chunk of stream.fullStream) {
|
|
161
|
+
if (chunk.type === "tool-call-suspended") {
|
|
162
|
+
console.log(chunk.payload.suspendPayload);
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
const handleResume = async () => {
|
|
167
|
+
const resumedStream = await agent.resumeStream({ approved: true }, { runId: stream.runId });
|
|
168
|
+
|
|
169
|
+
for await (const chunk of resumedStream.textStream) {
|
|
170
|
+
process.stdout.write(chunk);
|
|
171
|
+
}
|
|
172
|
+
process.stdout.write("\n");
|
|
173
|
+
};
|
|
174
|
+
|
|
175
|
+
```
|
|
176
|
+
|
|
177
|
+
## Automatic tool resumption
|
|
178
|
+
|
|
179
|
+
When using tools that call `suspend()`, you can enable automatic resumption so the agent resumes suspended tools based on the user's next message. This creates a conversational flow where users provide the required information naturally, without your application needing to call `resumeStream()` explicitly.
|
|
180
|
+
|
|
181
|
+
### Enabling auto-resume
|
|
182
|
+
|
|
183
|
+
Set `autoResumeSuspendedTools` to `true` in the agent's default options or when calling `stream()`:
|
|
184
|
+
|
|
185
|
+
```typescript
|
|
186
|
+
import { Agent } from "@mastra/core/agent";
|
|
187
|
+
import { Memory } from "@mastra/memory";
|
|
188
|
+
|
|
189
|
+
// Option 1: In agent configuration
|
|
190
|
+
const agent = new Agent({
|
|
191
|
+
id: "my-agent",
|
|
192
|
+
name: "My Agent",
|
|
193
|
+
instructions: "You are a helpful assistant",
|
|
194
|
+
model: "openai/gpt-4o-mini",
|
|
195
|
+
tools: { weatherTool },
|
|
196
|
+
memory: new Memory(),
|
|
197
|
+
defaultOptions: {
|
|
198
|
+
autoResumeSuspendedTools: true,
|
|
199
|
+
},
|
|
200
|
+
});
|
|
201
|
+
|
|
202
|
+
// Option 2: Per-request
|
|
203
|
+
const stream = await agent.stream("What's the weather?", {
|
|
204
|
+
autoResumeSuspendedTools: true,
|
|
205
|
+
});
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
### How it works
|
|
209
|
+
|
|
210
|
+
When `autoResumeSuspendedTools` is enabled:
|
|
211
|
+
|
|
212
|
+
1. A tool suspends execution by calling `suspend()` with a payload (e.g., requesting more information)
|
|
213
|
+
2. The suspension is persisted to memory along with the conversation
|
|
214
|
+
3. When the user sends their next message on the same thread, the agent:
|
|
215
|
+
- Detects the suspended tool from message history
|
|
216
|
+
- Extracts `resumeData` from the user's message based on the tool's `resumeSchema`
|
|
217
|
+
- Automatically resumes the tool with the extracted data
|
|
218
|
+
|
|
219
|
+
### Example
|
|
220
|
+
|
|
221
|
+
```typescript
|
|
222
|
+
import { createTool } from "@mastra/core/tools";
|
|
223
|
+
import { z } from "zod";
|
|
224
|
+
|
|
225
|
+
export const weatherTool = createTool({
|
|
226
|
+
id: "weather-info",
|
|
227
|
+
description: "Fetches weather information for a city",
|
|
228
|
+
suspendSchema: z.object({
|
|
229
|
+
message: z.string(),
|
|
230
|
+
}),
|
|
231
|
+
resumeSchema: z.object({
|
|
232
|
+
city: z.string(),
|
|
233
|
+
}),
|
|
234
|
+
execute: async (_inputData, context) => {
|
|
235
|
+
// Check if this is a resume with data
|
|
236
|
+
if (!context?.agent?.resumeData) {
|
|
237
|
+
// First call - suspend and ask for the city
|
|
238
|
+
return context?.agent?.suspend({
|
|
239
|
+
message: "What city do you want to know the weather for?",
|
|
240
|
+
});
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
// Resume call - city was extracted from user's message
|
|
244
|
+
const { city } = context.agent.resumeData;
|
|
245
|
+
const response = await fetch(`https://wttr.in/${city}?format=3`);
|
|
246
|
+
const weather = await response.text();
|
|
247
|
+
|
|
248
|
+
return { city, weather };
|
|
249
|
+
},
|
|
250
|
+
});
|
|
251
|
+
|
|
252
|
+
const agent = new Agent({
|
|
253
|
+
id: "my-agent",
|
|
254
|
+
name: "My Agent",
|
|
255
|
+
instructions: "You are a helpful assistant",
|
|
256
|
+
model: "openai/gpt-4o-mini",
|
|
257
|
+
tools: { weatherTool },
|
|
258
|
+
memory: new Memory(),
|
|
259
|
+
defaultOptions: {
|
|
260
|
+
autoResumeSuspendedTools: true,
|
|
261
|
+
},
|
|
262
|
+
});
|
|
263
|
+
|
|
264
|
+
const stream = await agent.stream("What's the weather like?");
|
|
265
|
+
|
|
266
|
+
for await (const chunk of stream.fullStream) {
|
|
267
|
+
if (chunk.type === "tool-call-suspended") {
|
|
268
|
+
console.log(chunk.payload.suspendPayload);
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
const handleResume = async () => {
|
|
273
|
+
const resumedStream = await agent.stream("San Francisco");
|
|
274
|
+
|
|
275
|
+
for await (const chunk of resumedStream.textStream) {
|
|
276
|
+
process.stdout.write(chunk);
|
|
277
|
+
}
|
|
278
|
+
process.stdout.write("\n");
|
|
279
|
+
};
|
|
280
|
+
```
|
|
281
|
+
|
|
282
|
+
**Conversation flow:**
|
|
283
|
+
|
|
284
|
+
```
|
|
285
|
+
User: "What's the weather like?"
|
|
286
|
+
Agent: "What city do you want to know the weather for?"
|
|
287
|
+
|
|
288
|
+
User: "San Francisco"
|
|
289
|
+
Agent: "The weather in San Francisco is: San Francisco: ☀️ +72°F"
|
|
290
|
+
```
|
|
291
|
+
|
|
292
|
+
The second message automatically resumes the suspended tool - the agent extracts `{ city: "San Francisco" }` from the user's message and passes it as `resumeData`.
|
|
293
|
+
|
|
294
|
+
### Requirements
|
|
295
|
+
|
|
296
|
+
For automatic tool resumption to work:
|
|
297
|
+
|
|
298
|
+
- **Memory configured**: The agent needs memory to track suspended tools across messages
|
|
299
|
+
- **Same thread**: The follow-up message must use the same memory thread and resource identifiers
|
|
300
|
+
- **`resumeSchema` defined**: The tool must define a `resumeSchema` so the agent knows what data structure to extract from the user's message
|
|
301
|
+
|
|
302
|
+
### Manual vs automatic resumption
|
|
303
|
+
|
|
304
|
+
| Approach | Use case |
|
|
305
|
+
|----------|----------|
|
|
306
|
+
| Manual (`resumeStream()`) | Programmatic control, webhooks, button clicks, external triggers |
|
|
307
|
+
| Automatic (`autoResumeSuspendedTools`) | Conversational flows where users provide resume data in natural language |
|
|
308
|
+
|
|
309
|
+
Both approaches work with the same tool definitions. Automatic resumption triggers only when suspended tools exist in the message history and the user sends a new message on the same thread.
|
|
310
|
+
|
|
311
|
+
## Related
|
|
312
|
+
|
|
313
|
+
- [Using Tools](./using-tools)
|
|
314
|
+
- [Agent Overview](./overview)
|
|
315
|
+
- [Tools Overview](../mcp/overview)
|
|
316
|
+
- [Agent Memory](./agent-memory)
|
|
317
|
+
- [Request Context](https://mastra.ai/docs/v1/server/request-context)
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
# Core API Reference
|
|
2
|
+
|
|
3
|
+
> API reference for core - 2 entries
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Reference: Mastra.getMemory()
|
|
9
|
+
|
|
10
|
+
> Documentation for the `Mastra.getMemory()` method in Mastra, which retrieves a registered memory instance by its registry key.
|
|
11
|
+
|
|
12
|
+
The `.getMemory()` method retrieves a memory instance from the Mastra registry by its key. Memory instances are registered in the Mastra constructor and can be referenced by stored agents.
|
|
13
|
+
|
|
14
|
+
## Usage example
|
|
15
|
+
|
|
16
|
+
```typescript
|
|
17
|
+
const memory = mastra.getMemory("conversationMemory");
|
|
18
|
+
|
|
19
|
+
// Use the memory instance
|
|
20
|
+
const thread = await memory.createThread({
|
|
21
|
+
resourceId: "user-123",
|
|
22
|
+
title: "New Conversation",
|
|
23
|
+
});
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
## Parameters
|
|
27
|
+
|
|
28
|
+
## Returns
|
|
29
|
+
|
|
30
|
+
## Example: Registering and Retrieving Memory
|
|
31
|
+
|
|
32
|
+
```typescript
|
|
33
|
+
import { Mastra } from "@mastra/core";
|
|
34
|
+
import { Memory } from "@mastra/memory";
|
|
35
|
+
import { LibSQLStore } from "@mastra/libsql";
|
|
36
|
+
|
|
37
|
+
const conversationMemory = new Memory({
|
|
38
|
+
storage: new LibSQLStore({ url: ":memory:" }),
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
const mastra = new Mastra({
|
|
42
|
+
memory: {
|
|
43
|
+
conversationMemory,
|
|
44
|
+
},
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
// Later, retrieve the memory instance
|
|
48
|
+
const memory = mastra.getMemory("conversationMemory");
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
## Related
|
|
52
|
+
|
|
53
|
+
- [Mastra.listMemory()](https://mastra.ai/reference/v1/core/listMemory)
|
|
54
|
+
- [Memory overview](https://mastra.ai/docs/v1/memory/overview)
|
|
55
|
+
- [Agent Memory](https://mastra.ai/docs/v1/agents/agent-memory)
|
|
56
|
+
|
|
57
|
+
---
|
|
58
|
+
|
|
59
|
+
## Reference: Mastra.listMemory()
|
|
60
|
+
|
|
61
|
+
> Documentation for the `Mastra.listMemory()` method in Mastra, which returns all registered memory instances.
|
|
62
|
+
|
|
63
|
+
The `.listMemory()` method returns all memory instances registered with the Mastra instance.
|
|
64
|
+
|
|
65
|
+
## Usage example
|
|
66
|
+
|
|
67
|
+
```typescript
|
|
68
|
+
const memoryInstances = mastra.listMemory();
|
|
69
|
+
|
|
70
|
+
for (const [key, memory] of Object.entries(memoryInstances)) {
|
|
71
|
+
console.log(`Memory "${key}": ${memory.id}`);
|
|
72
|
+
}
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
## Parameters
|
|
76
|
+
|
|
77
|
+
This method takes no parameters.
|
|
78
|
+
|
|
79
|
+
## Returns
|
|
80
|
+
|
|
81
|
+
## Example: Checking Registered Memory
|
|
82
|
+
|
|
83
|
+
```typescript
|
|
84
|
+
import { Mastra } from "@mastra/core";
|
|
85
|
+
import { Memory } from "@mastra/memory";
|
|
86
|
+
import { LibSQLStore } from "@mastra/libsql";
|
|
87
|
+
|
|
88
|
+
const conversationMemory = new Memory({
|
|
89
|
+
id: "conversation-memory",
|
|
90
|
+
storage: new LibSQLStore({ url: ":memory:" }),
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
const analyticsMemory = new Memory({
|
|
94
|
+
id: "analytics-memory",
|
|
95
|
+
storage: new LibSQLStore({ url: ":memory:" }),
|
|
96
|
+
});
|
|
97
|
+
|
|
98
|
+
const mastra = new Mastra({
|
|
99
|
+
memory: {
|
|
100
|
+
conversationMemory,
|
|
101
|
+
analyticsMemory,
|
|
102
|
+
},
|
|
103
|
+
});
|
|
104
|
+
|
|
105
|
+
// List all registered memory instances
|
|
106
|
+
const allMemory = mastra.listMemory();
|
|
107
|
+
console.log(Object.keys(allMemory)); // ["conversationMemory", "analyticsMemory"]
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
## Related
|
|
111
|
+
|
|
112
|
+
- [Mastra.getMemory()](https://mastra.ai/reference/v1/core/getMemory)
|
|
113
|
+
- [Memory overview](https://mastra.ai/docs/v1/memory/overview)
|
|
114
|
+
- [Agent Memory](https://mastra.ai/docs/v1/agents/agent-memory)
|