@mastra/libsql 0.0.0-error-handler-fix-20251020202607 → 0.0.0-execa-dynamic-import-20260304221256
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +2839 -3
- package/LICENSE.md +15 -0
- package/README.md +30 -20
- package/dist/docs/SKILL.md +50 -0
- package/dist/docs/assets/SOURCE_MAP.json +6 -0
- package/dist/docs/references/docs-agents-agent-approval.md +588 -0
- package/dist/docs/references/docs-agents-agent-memory.md +209 -0
- package/dist/docs/references/docs-agents-network-approval.md +275 -0
- package/dist/docs/references/docs-agents-networks.md +299 -0
- package/dist/docs/references/docs-memory-memory-processors.md +314 -0
- package/dist/docs/references/docs-memory-message-history.md +260 -0
- package/dist/docs/references/docs-memory-overview.md +45 -0
- package/dist/docs/references/docs-memory-semantic-recall.md +288 -0
- package/dist/docs/references/docs-memory-storage.md +261 -0
- package/dist/docs/references/docs-memory-working-memory.md +400 -0
- package/dist/docs/references/docs-observability-overview.md +70 -0
- package/dist/docs/references/docs-observability-tracing-exporters-default.md +209 -0
- package/dist/docs/references/docs-rag-retrieval.md +515 -0
- package/dist/docs/references/docs-workflows-snapshots.md +238 -0
- package/dist/docs/references/guides-agent-frameworks-ai-sdk.md +140 -0
- package/dist/docs/references/reference-core-getMemory.md +50 -0
- package/dist/docs/references/reference-core-listMemory.md +56 -0
- package/dist/docs/references/reference-core-mastra-class.md +66 -0
- package/dist/docs/references/reference-memory-memory-class.md +147 -0
- package/dist/docs/references/reference-storage-composite.md +235 -0
- package/dist/docs/references/reference-storage-dynamodb.md +282 -0
- package/dist/docs/references/reference-storage-libsql.md +135 -0
- package/dist/docs/references/reference-vectors-libsql.md +305 -0
- package/dist/index.cjs +8693 -1962
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +8682 -1965
- package/dist/index.js.map +1 -1
- package/dist/storage/db/index.d.ts +306 -0
- package/dist/storage/db/index.d.ts.map +1 -0
- package/dist/storage/{domains → db}/utils.d.ts +21 -13
- package/dist/storage/db/utils.d.ts.map +1 -0
- package/dist/storage/domains/agents/index.d.ts +30 -0
- package/dist/storage/domains/agents/index.d.ts.map +1 -0
- package/dist/storage/domains/blobs/index.d.ts +17 -0
- package/dist/storage/domains/blobs/index.d.ts.map +1 -0
- package/dist/storage/domains/datasets/index.d.ts +43 -0
- package/dist/storage/domains/datasets/index.d.ts.map +1 -0
- package/dist/storage/domains/experiments/index.d.ts +29 -0
- package/dist/storage/domains/experiments/index.d.ts.map +1 -0
- package/dist/storage/domains/mcp-clients/index.d.ts +26 -0
- package/dist/storage/domains/mcp-clients/index.d.ts.map +1 -0
- package/dist/storage/domains/mcp-servers/index.d.ts +26 -0
- package/dist/storage/domains/mcp-servers/index.d.ts.map +1 -0
- package/dist/storage/domains/memory/index.d.ts +40 -53
- package/dist/storage/domains/memory/index.d.ts.map +1 -1
- package/dist/storage/domains/observability/index.d.ts +41 -29
- package/dist/storage/domains/observability/index.d.ts.map +1 -1
- package/dist/storage/domains/prompt-blocks/index.d.ts +25 -0
- package/dist/storage/domains/prompt-blocks/index.d.ts.map +1 -0
- package/dist/storage/domains/scorer-definitions/index.d.ts +26 -0
- package/dist/storage/domains/scorer-definitions/index.d.ts.map +1 -0
- package/dist/storage/domains/scores/index.d.ts +19 -31
- package/dist/storage/domains/scores/index.d.ts.map +1 -1
- package/dist/storage/domains/skills/index.d.ts +26 -0
- package/dist/storage/domains/skills/index.d.ts.map +1 -0
- package/dist/storage/domains/workflows/index.d.ts +20 -32
- package/dist/storage/domains/workflows/index.d.ts.map +1 -1
- package/dist/storage/domains/workspaces/index.d.ts +26 -0
- package/dist/storage/domains/workspaces/index.d.ts.map +1 -0
- package/dist/storage/index.d.ts +68 -259
- package/dist/storage/index.d.ts.map +1 -1
- package/dist/vector/index.d.ts +12 -4
- package/dist/vector/index.d.ts.map +1 -1
- package/dist/vector/sql-builder.d.ts.map +1 -1
- package/package.json +16 -12
- package/dist/storage/domains/legacy-evals/index.d.ts +0 -18
- package/dist/storage/domains/legacy-evals/index.d.ts.map +0 -1
- package/dist/storage/domains/operations/index.d.ts +0 -110
- package/dist/storage/domains/operations/index.d.ts.map +0 -1
- package/dist/storage/domains/traces/index.d.ts +0 -21
- package/dist/storage/domains/traces/index.d.ts.map +0 -1
- package/dist/storage/domains/utils.d.ts.map +0 -1
|
@@ -0,0 +1,588 @@
|
|
|
1
|
+
# Agent approval
|
|
2
|
+
|
|
3
|
+
Agents sometimes require the same [human-in-the-loop](https://mastra.ai/docs/workflows/human-in-the-loop) oversight used in workflows when calling tools that handle sensitive operations, like deleting resources or running long processes. With agent approval you can suspend a tool call before it executes so a human can approve or decline it, or let tools suspend themselves to request additional context from the user.
|
|
4
|
+
|
|
5
|
+
## How approval works
|
|
6
|
+
|
|
7
|
+
Mastra offers two distinct mechanisms for pausing tool calls: **pre-execution approval** and **runtime suspension**.
|
|
8
|
+
|
|
9
|
+
### Pre-execution approval
|
|
10
|
+
|
|
11
|
+
Pre-execution approval pauses a tool call _before_ its `execute` function runs. The LLM still decides which tool to call and provides arguments, but `execute` doesn't run until you explicitly approve.
|
|
12
|
+
|
|
13
|
+
Two flags control this, combined with OR logic. If _either_ is `true`, the call pauses:
|
|
14
|
+
|
|
15
|
+
| Flag | Where to set it | Scope |
|
|
16
|
+
| --------------------------- | --------------------------------- | ------------------------------------------- |
|
|
17
|
+
| `requireToolApproval: true` | `stream()` / `generate()` options | Pauses **every** tool call for that request |
|
|
18
|
+
| `requireApproval: true` | `createTool()` definition | Pauses calls to **that specific tool** |
|
|
19
|
+
|
|
20
|
+
The stream emits a `tool-call-approval` chunk when a call is paused this way. You then call `approveToolCall()` or `declineToolCall()` to continue.
|
|
21
|
+
|
|
22
|
+
### Runtime suspension with `suspend()`
|
|
23
|
+
|
|
24
|
+
A tool can also pause _during_ its `execute` function by calling `suspend()`. This is useful when the tool starts running and then discovers it needs additional user input or confirmation before it can finish.
|
|
25
|
+
|
|
26
|
+
The stream emits a `tool-call-suspended` chunk with a custom payload defined by the tool's `suspendSchema`. You resume by calling `resumeStream()` with data matching the tool's `resumeSchema`.
|
|
27
|
+
|
|
28
|
+
### Storage
|
|
29
|
+
|
|
30
|
+
Agent approval uses a snapshot to capture the state of the request. Ensure you've enabled a storage provider in your main Mastra instance. If storage isn't enabled you'll see an error relating to snapshot not found.
|
|
31
|
+
|
|
32
|
+
```typescript
|
|
33
|
+
import { Mastra } from '@mastra/core/mastra'
|
|
34
|
+
import { LibSQLStore } from '@mastra/libsql'
|
|
35
|
+
|
|
36
|
+
export const mastra = new Mastra({
|
|
37
|
+
storage: new LibSQLStore({
|
|
38
|
+
id: 'mastra-storage',
|
|
39
|
+
url: ':memory:',
|
|
40
|
+
}),
|
|
41
|
+
})
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
## Agent-level approval
|
|
45
|
+
|
|
46
|
+
Pass `requireToolApproval: true` to `stream()` or `generate()` to pause every tool call before execution. The LLM still decides which tools to call and with what arguments but no tool runs until you approve or decline.
|
|
47
|
+
|
|
48
|
+
```typescript
|
|
49
|
+
const stream = await agent.stream("What's the weather in London?", {
|
|
50
|
+
requireToolApproval: true,
|
|
51
|
+
})
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
When a tool call is paused, the stream emits a `tool-call-approval` chunk containing the `toolCallId`, `toolName`, and `args`. Use this to inspect the pending call and decide whether to approve or decline:
|
|
55
|
+
|
|
56
|
+
```typescript
|
|
57
|
+
for await (const chunk of stream.fullStream) {
|
|
58
|
+
if (chunk.type === 'tool-call-approval') {
|
|
59
|
+
console.log('Tool:', chunk.payload.toolName)
|
|
60
|
+
console.log('Args:', chunk.payload.args)
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
### Approving tool calls
|
|
66
|
+
|
|
67
|
+
Call `approveToolCall()` on the agent with the `runId` of the stream to resume the suspended tool call and let it execute:
|
|
68
|
+
|
|
69
|
+
```typescript
|
|
70
|
+
const handleApproval = async () => {
|
|
71
|
+
const approvedStream = await agent.approveToolCall({ runId: stream.runId })
|
|
72
|
+
|
|
73
|
+
for await (const chunk of approvedStream.textStream) {
|
|
74
|
+
process.stdout.write(chunk)
|
|
75
|
+
}
|
|
76
|
+
process.stdout.write('\n')
|
|
77
|
+
}
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
### Declining tool calls
|
|
81
|
+
|
|
82
|
+
Call `declineToolCall()` on the agent to skip the tool call. The agent continues without executing the tool and responds accordingly:
|
|
83
|
+
|
|
84
|
+
```typescript
|
|
85
|
+
const handleDecline = async () => {
|
|
86
|
+
const declinedStream = await agent.declineToolCall({ runId: stream.runId })
|
|
87
|
+
|
|
88
|
+
for await (const chunk of declinedStream.textStream) {
|
|
89
|
+
process.stdout.write(chunk)
|
|
90
|
+
}
|
|
91
|
+
process.stdout.write('\n')
|
|
92
|
+
}
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
## Tool approval with generate()
|
|
96
|
+
|
|
97
|
+
Tool approval also works with the `generate()` method for non-streaming use cases. When a tool requires approval during a `generate()` call, the method returns immediately instead of executing the tool.
|
|
98
|
+
|
|
99
|
+
### How it works
|
|
100
|
+
|
|
101
|
+
When a tool requires approval during a `generate()` call, the response includes:
|
|
102
|
+
|
|
103
|
+
- `finishReason: 'suspended'`: Indicates the agent is waiting for approval
|
|
104
|
+
- `suspendPayload`: Contains tool call details (`toolCallId`, `toolName`, `args`)
|
|
105
|
+
- `runId`: Needed to approve or decline the tool call
|
|
106
|
+
|
|
107
|
+
### Approving tool calls
|
|
108
|
+
|
|
109
|
+
Use `approveToolCallGenerate()` to approve the tool call and get the final result:
|
|
110
|
+
|
|
111
|
+
```typescript
|
|
112
|
+
const output = await agent.generate('Find user John', {
|
|
113
|
+
requireToolApproval: true,
|
|
114
|
+
})
|
|
115
|
+
|
|
116
|
+
if (output.finishReason === 'suspended') {
|
|
117
|
+
console.log('Tool requires approval:', output.suspendPayload.toolName)
|
|
118
|
+
console.log('Arguments:', output.suspendPayload.args)
|
|
119
|
+
|
|
120
|
+
// Approve the tool call and get the final result
|
|
121
|
+
const result = await agent.approveToolCallGenerate({
|
|
122
|
+
runId: output.runId,
|
|
123
|
+
toolCallId: output.suspendPayload.toolCallId,
|
|
124
|
+
})
|
|
125
|
+
|
|
126
|
+
console.log('Final result:', result.text)
|
|
127
|
+
}
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
### Declining tool calls
|
|
131
|
+
|
|
132
|
+
Use `declineToolCallGenerate()` to skip the tool call:
|
|
133
|
+
|
|
134
|
+
```typescript
|
|
135
|
+
if (output.finishReason === 'suspended') {
|
|
136
|
+
const result = await agent.declineToolCallGenerate({
|
|
137
|
+
runId: output.runId,
|
|
138
|
+
toolCallId: output.suspendPayload.toolCallId,
|
|
139
|
+
})
|
|
140
|
+
|
|
141
|
+
// Agent responds acknowledging the declined tool
|
|
142
|
+
console.log(result.text)
|
|
143
|
+
}
|
|
144
|
+
```
|
|
145
|
+
|
|
146
|
+
### Stream vs generate comparison
|
|
147
|
+
|
|
148
|
+
| Aspect | `stream()` | `generate()` |
|
|
149
|
+
| ------------------ | ---------------------------- | ------------------------------------------------ |
|
|
150
|
+
| Response type | Streaming chunks | Complete response |
|
|
151
|
+
| Approval detection | `tool-call-approval` chunk | `finishReason: 'suspended'` |
|
|
152
|
+
| Approve method | `approveToolCall({ runId })` | `approveToolCallGenerate({ runId, toolCallId })` |
|
|
153
|
+
| Decline method | `declineToolCall({ runId })` | `declineToolCallGenerate({ runId, toolCallId })` |
|
|
154
|
+
| Result | Stream to iterate | Full output object |
|
|
155
|
+
|
|
156
|
+
## Tool-level approval
|
|
157
|
+
|
|
158
|
+
Instead of pausing every tool call at the agent level, you can mark individual tools as requiring approval. This gives you granular control — only specific tools pause, while others execute immediately.
|
|
159
|
+
|
|
160
|
+
### Approval using `requireApproval`
|
|
161
|
+
|
|
162
|
+
Set `requireApproval: true` on a tool definition. The tool pauses before execution regardless of whether `requireToolApproval` is set on the agent:
|
|
163
|
+
|
|
164
|
+
```typescript
|
|
165
|
+
export const testTool = createTool({
|
|
166
|
+
id: 'test-tool',
|
|
167
|
+
description: 'Fetches weather for a location',
|
|
168
|
+
inputSchema: z.object({
|
|
169
|
+
location: z.string(),
|
|
170
|
+
}),
|
|
171
|
+
outputSchema: z.object({
|
|
172
|
+
weather: z.string(),
|
|
173
|
+
}),
|
|
174
|
+
resumeSchema: z.object({
|
|
175
|
+
approved: z.boolean(),
|
|
176
|
+
}),
|
|
177
|
+
execute: async inputData => {
|
|
178
|
+
const response = await fetch(`https://wttr.in/${inputData.location}?format=3`)
|
|
179
|
+
const weather = await response.text()
|
|
180
|
+
|
|
181
|
+
return { weather }
|
|
182
|
+
},
|
|
183
|
+
requireApproval: true,
|
|
184
|
+
})
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
When `requireApproval` is `true`, the stream emits `tool-call-approval` chunks the same way agent-level approval does. Use `approveToolCall()` or `declineToolCall()` to continue:
|
|
188
|
+
|
|
189
|
+
```typescript
|
|
190
|
+
const stream = await agent.stream("What's the weather in London?")
|
|
191
|
+
|
|
192
|
+
for await (const chunk of stream.fullStream) {
|
|
193
|
+
if (chunk.type === 'tool-call-approval') {
|
|
194
|
+
console.log('Approval required for:', chunk.payload.toolName)
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
const handleApproval = async () => {
|
|
199
|
+
const approvedStream = await agent.approveToolCall({ runId: stream.runId })
|
|
200
|
+
|
|
201
|
+
for await (const chunk of approvedStream.textStream) {
|
|
202
|
+
process.stdout.write(chunk)
|
|
203
|
+
}
|
|
204
|
+
process.stdout.write('\n')
|
|
205
|
+
}
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
### Approval using `suspend`
|
|
209
|
+
|
|
210
|
+
With this approach, neither the agent nor the tool uses `requireApproval`. Instead, the tool's `execute` function calls `suspend` to pause at a specific point and return context or confirmation prompts to the user. This is useful when approval depends on runtime conditions rather than being unconditional.
|
|
211
|
+
|
|
212
|
+
```typescript
|
|
213
|
+
export const testToolB = createTool({
|
|
214
|
+
id: 'test-tool-b',
|
|
215
|
+
description: 'Fetches weather for a location',
|
|
216
|
+
inputSchema: z.object({
|
|
217
|
+
location: z.string(),
|
|
218
|
+
}),
|
|
219
|
+
outputSchema: z.object({
|
|
220
|
+
weather: z.string(),
|
|
221
|
+
}),
|
|
222
|
+
resumeSchema: z.object({
|
|
223
|
+
approved: z.boolean(),
|
|
224
|
+
}),
|
|
225
|
+
suspendSchema: z.object({
|
|
226
|
+
reason: z.string(),
|
|
227
|
+
}),
|
|
228
|
+
execute: async (inputData, context) => {
|
|
229
|
+
const { resumeData: { approved } = {}, suspend } = context?.agent ?? {}
|
|
230
|
+
|
|
231
|
+
if (!approved) {
|
|
232
|
+
return suspend?.({ reason: 'Approval required.' })
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
const response = await fetch(`https://wttr.in/${inputData.location}?format=3`)
|
|
236
|
+
const weather = await response.text()
|
|
237
|
+
|
|
238
|
+
return { weather }
|
|
239
|
+
},
|
|
240
|
+
})
|
|
241
|
+
```
|
|
242
|
+
|
|
243
|
+
With this approach the stream includes a `tool-call-suspended` chunk, and the `suspendPayload` contains the `reason` defined by the tool's `suspendSchema`. Call `resumeStream` with the `resumeSchema` data and `runId` to continue:
|
|
244
|
+
|
|
245
|
+
```typescript
|
|
246
|
+
const stream = await agent.stream("What's the weather in London?")
|
|
247
|
+
|
|
248
|
+
for await (const chunk of stream.fullStream) {
|
|
249
|
+
if (chunk.type === 'tool-call-suspended') {
|
|
250
|
+
console.log(chunk.payload.suspendPayload)
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
const handleResume = async () => {
|
|
255
|
+
const resumedStream = await agent.resumeStream({ approved: true }, { runId: stream.runId })
|
|
256
|
+
|
|
257
|
+
for await (const chunk of resumedStream.textStream) {
|
|
258
|
+
process.stdout.write(chunk)
|
|
259
|
+
}
|
|
260
|
+
process.stdout.write('\n')
|
|
261
|
+
}
|
|
262
|
+
```
|
|
263
|
+
|
|
264
|
+
## Automatic tool resumption
|
|
265
|
+
|
|
266
|
+
When using tools that call `suspend()`, you can enable automatic resumption so the agent resumes suspended tools based on the user's next message. This creates a conversational flow where users provide the required information naturally, without your application needing to call `resumeStream()` explicitly.
|
|
267
|
+
|
|
268
|
+
### Enabling auto-resume
|
|
269
|
+
|
|
270
|
+
Set `autoResumeSuspendedTools` to `true` in the agent's default options or when calling `stream()`:
|
|
271
|
+
|
|
272
|
+
```typescript
|
|
273
|
+
import { Agent } from '@mastra/core/agent'
|
|
274
|
+
import { Memory } from '@mastra/memory'
|
|
275
|
+
|
|
276
|
+
// Option 1: In agent configuration
|
|
277
|
+
const agent = new Agent({
|
|
278
|
+
id: 'my-agent',
|
|
279
|
+
name: 'My Agent',
|
|
280
|
+
instructions: 'You are a helpful assistant',
|
|
281
|
+
model: 'openai/gpt-4o-mini',
|
|
282
|
+
tools: { weatherTool },
|
|
283
|
+
memory: new Memory(),
|
|
284
|
+
defaultOptions: {
|
|
285
|
+
autoResumeSuspendedTools: true,
|
|
286
|
+
},
|
|
287
|
+
})
|
|
288
|
+
|
|
289
|
+
// Option 2: Per-request
|
|
290
|
+
const stream = await agent.stream("What's the weather?", {
|
|
291
|
+
autoResumeSuspendedTools: true,
|
|
292
|
+
})
|
|
293
|
+
```
|
|
294
|
+
|
|
295
|
+
### How it works
|
|
296
|
+
|
|
297
|
+
When `autoResumeSuspendedTools` is enabled:
|
|
298
|
+
|
|
299
|
+
1. A tool suspends execution by calling `suspend()` with a payload (e.g., requesting more information)
|
|
300
|
+
|
|
301
|
+
2. The suspension is persisted to memory along with the conversation
|
|
302
|
+
|
|
303
|
+
3. When the user sends their next message on the same thread, the agent:
|
|
304
|
+
|
|
305
|
+
- Detects the suspended tool from message history
|
|
306
|
+
- Extracts `resumeData` from the user's message based on the tool's `resumeSchema`
|
|
307
|
+
- Automatically resumes the tool with the extracted data
|
|
308
|
+
|
|
309
|
+
### Example
|
|
310
|
+
|
|
311
|
+
```typescript
|
|
312
|
+
import { createTool } from '@mastra/core/tools'
|
|
313
|
+
import { z } from 'zod'
|
|
314
|
+
|
|
315
|
+
export const weatherTool = createTool({
|
|
316
|
+
id: 'weather-info',
|
|
317
|
+
description: 'Fetches weather information for a city',
|
|
318
|
+
suspendSchema: z.object({
|
|
319
|
+
message: z.string(),
|
|
320
|
+
}),
|
|
321
|
+
resumeSchema: z.object({
|
|
322
|
+
city: z.string(),
|
|
323
|
+
}),
|
|
324
|
+
execute: async (_inputData, context) => {
|
|
325
|
+
// Check if this is a resume with data
|
|
326
|
+
if (!context?.agent?.resumeData) {
|
|
327
|
+
// First call - suspend and ask for the city
|
|
328
|
+
return context?.agent?.suspend({
|
|
329
|
+
message: 'What city do you want to know the weather for?',
|
|
330
|
+
})
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
// Resume call - city was extracted from user's message
|
|
334
|
+
const { city } = context.agent.resumeData
|
|
335
|
+
const response = await fetch(`https://wttr.in/${city}?format=3`)
|
|
336
|
+
const weather = await response.text()
|
|
337
|
+
|
|
338
|
+
return { city, weather }
|
|
339
|
+
},
|
|
340
|
+
})
|
|
341
|
+
|
|
342
|
+
const agent = new Agent({
|
|
343
|
+
id: 'my-agent',
|
|
344
|
+
name: 'My Agent',
|
|
345
|
+
instructions: 'You are a helpful assistant',
|
|
346
|
+
model: 'openai/gpt-4o-mini',
|
|
347
|
+
tools: { weatherTool },
|
|
348
|
+
memory: new Memory(),
|
|
349
|
+
defaultOptions: {
|
|
350
|
+
autoResumeSuspendedTools: true,
|
|
351
|
+
},
|
|
352
|
+
})
|
|
353
|
+
|
|
354
|
+
const stream = await agent.stream("What's the weather like?")
|
|
355
|
+
|
|
356
|
+
for await (const chunk of stream.fullStream) {
|
|
357
|
+
if (chunk.type === 'tool-call-suspended') {
|
|
358
|
+
console.log(chunk.payload.suspendPayload)
|
|
359
|
+
}
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
const handleResume = async () => {
|
|
363
|
+
const resumedStream = await agent.stream('San Francisco')
|
|
364
|
+
|
|
365
|
+
for await (const chunk of resumedStream.textStream) {
|
|
366
|
+
process.stdout.write(chunk)
|
|
367
|
+
}
|
|
368
|
+
process.stdout.write('\n')
|
|
369
|
+
}
|
|
370
|
+
```
|
|
371
|
+
|
|
372
|
+
**Conversation flow:**
|
|
373
|
+
|
|
374
|
+
```text
|
|
375
|
+
User: "What's the weather like?"
|
|
376
|
+
Agent: "What city do you want to know the weather for?"
|
|
377
|
+
|
|
378
|
+
User: "San Francisco"
|
|
379
|
+
Agent: "The weather in San Francisco is: San Francisco: ☀️ +72°F"
|
|
380
|
+
```
|
|
381
|
+
|
|
382
|
+
The second message automatically resumes the suspended tool, the agent extracts `{ city: "San Francisco" }` from the user's message and passes it as `resumeData`.
|
|
383
|
+
|
|
384
|
+
### Requirements
|
|
385
|
+
|
|
386
|
+
For automatic tool resumption to work:
|
|
387
|
+
|
|
388
|
+
- **Memory configured**: The agent needs memory to track suspended tools across messages
|
|
389
|
+
- **Same thread**: The follow-up message must use the same memory thread and resource identifiers
|
|
390
|
+
- **`resumeSchema` defined**: The tool must define a `resumeSchema` so the agent knows what data structure to extract from the user's message
|
|
391
|
+
|
|
392
|
+
### Manual vs automatic resumption
|
|
393
|
+
|
|
394
|
+
| Approach | Use case |
|
|
395
|
+
| -------------------------------------- | ------------------------------------------------------------------------ |
|
|
396
|
+
| Manual (`resumeStream()`) | Programmatic control, webhooks, button clicks, external triggers |
|
|
397
|
+
| Automatic (`autoResumeSuspendedTools`) | Conversational flows where users provide resume data in natural language |
|
|
398
|
+
|
|
399
|
+
Both approaches work with the same tool definitions. Automatic resumption triggers only when suspended tools exist in the message history and the user sends a new message on the same thread.
|
|
400
|
+
|
|
401
|
+
## Tool approval: Supervisor pattern
|
|
402
|
+
|
|
403
|
+
The [supervisor pattern](https://mastra.ai/docs/agents/networks) lets a supervisor agent coordinate multiple subagents using `.stream()` or `.generate()`. The supervisor delegates tasks to subagents, which may use tools that require approval. When this happens, tool approvals properly propagate through the delegation chain — the approval request surfaces at the supervisor level where you can handle it, regardless of which subagent triggered it.
|
|
404
|
+
|
|
405
|
+
### How it works
|
|
406
|
+
|
|
407
|
+
1. The supervisor agent delegates a task to a subagent.
|
|
408
|
+
2. The subagent calls a tool that has `requireApproval: true` or uses `suspend()`.
|
|
409
|
+
3. The approval request bubbles up through the delegation chain to the supervisor.
|
|
410
|
+
4. You handle the approval or decline at the supervisor level.
|
|
411
|
+
5. The decision propagates back down to the subagent, which continues or terminates accordingly.
|
|
412
|
+
|
|
413
|
+
### Example
|
|
414
|
+
|
|
415
|
+
The following example creates a subagent with a database lookup tool that requires approval. The supervisor delegates to this subagent, and when the tool triggers an approval request, it surfaces in the supervisor's stream as a `tool-call-approval` chunk. You then approve the tool call using `approveToolCall` with the stream's `runId`.
|
|
416
|
+
|
|
417
|
+
```typescript
|
|
418
|
+
import { Agent } from '@mastra/core/agent'
|
|
419
|
+
import { createTool } from '@mastra/core/tools'
|
|
420
|
+
import { Memory } from '@mastra/memory'
|
|
421
|
+
import { z } from 'zod'
|
|
422
|
+
|
|
423
|
+
// subagent with approval-required tool
|
|
424
|
+
const findUserTool = createTool({
|
|
425
|
+
id: 'find-user',
|
|
426
|
+
description: 'Finds user by ID in the database',
|
|
427
|
+
inputSchema: z.object({
|
|
428
|
+
userId: z.string(),
|
|
429
|
+
}),
|
|
430
|
+
outputSchema: z.object({
|
|
431
|
+
user: z.object({
|
|
432
|
+
id: z.string(),
|
|
433
|
+
name: z.string(),
|
|
434
|
+
email: z.string(),
|
|
435
|
+
}),
|
|
436
|
+
}),
|
|
437
|
+
requireApproval: true, // Requires approval before execution
|
|
438
|
+
execute: async input => {
|
|
439
|
+
const user = await database.findUser(input.userId)
|
|
440
|
+
return { user }
|
|
441
|
+
},
|
|
442
|
+
})
|
|
443
|
+
|
|
444
|
+
const dataAgent = new Agent({
|
|
445
|
+
id: 'data-agent',
|
|
446
|
+
name: 'Data Agent',
|
|
447
|
+
description: 'Handles database queries and user data retrieval',
|
|
448
|
+
model: 'openai/gpt-4o-mini',
|
|
449
|
+
tools: { findUserTool },
|
|
450
|
+
})
|
|
451
|
+
|
|
452
|
+
const supervisorAgent = new Agent({
|
|
453
|
+
id: 'supervisor',
|
|
454
|
+
name: 'Supervisor Agent',
|
|
455
|
+
instructions: `You coordinate data retrieval tasks.
|
|
456
|
+
Delegate to data-agent for user lookups.`,
|
|
457
|
+
model: 'openai/gpt-5.1',
|
|
458
|
+
agents: { dataAgent },
|
|
459
|
+
memory: new Memory(),
|
|
460
|
+
})
|
|
461
|
+
|
|
462
|
+
// When supervisor delegates to dataAgent and tool requires approval
|
|
463
|
+
const stream = await supervisorAgent.stream('Find user with ID 12345')
|
|
464
|
+
|
|
465
|
+
for await (const chunk of stream.fullStream) {
|
|
466
|
+
if (chunk.type === 'tool-call-approval') {
|
|
467
|
+
console.log('Tool requires approval:', chunk.payload.toolName)
|
|
468
|
+
console.log('Arguments:', chunk.payload.args)
|
|
469
|
+
|
|
470
|
+
// Approve the tool call
|
|
471
|
+
const resumeStream = await supervisorAgent.approveToolCall({
|
|
472
|
+
runId: stream.runId,
|
|
473
|
+
toolCallId: chunk.payload.toolCallId,
|
|
474
|
+
})
|
|
475
|
+
|
|
476
|
+
// Process resumed stream
|
|
477
|
+
for await (const resumeChunk of resumeStream.textStream) {
|
|
478
|
+
process.stdout.write(resumeChunk)
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
```
|
|
483
|
+
|
|
484
|
+
### Declining tool calls in supervisor pattern
|
|
485
|
+
|
|
486
|
+
Decline tool calls at the supervisor level by calling `declineToolCall`. The supervisor responds acknowledging the declined tool without executing it:
|
|
487
|
+
|
|
488
|
+
```typescript
|
|
489
|
+
for await (const chunk of stream.fullStream) {
|
|
490
|
+
if (chunk.type === 'tool-call-approval') {
|
|
491
|
+
console.log('Declining tool call:', chunk.payload.toolName)
|
|
492
|
+
|
|
493
|
+
// Decline the tool call
|
|
494
|
+
const declineStream = await supervisorAgent.declineToolCall({
|
|
495
|
+
runId: stream.runId,
|
|
496
|
+
toolCallId: chunk.payload.toolCallId,
|
|
497
|
+
})
|
|
498
|
+
|
|
499
|
+
// The supervisor responds acknowledging the declined tool
|
|
500
|
+
for await (const declineChunk of declineStream.textStream) {
|
|
501
|
+
process.stdout.write(declineChunk)
|
|
502
|
+
}
|
|
503
|
+
}
|
|
504
|
+
}
|
|
505
|
+
```
|
|
506
|
+
|
|
507
|
+
### Using suspend() in supervisor pattern
|
|
508
|
+
|
|
509
|
+
Tools can also use [`suspend()`](#approval-using-suspend) to pause execution and return context to the user. This approach works through the supervisor delegation chain the same way `requireApproval` does — the suspension surfaces at the supervisor level:
|
|
510
|
+
|
|
511
|
+
```typescript
|
|
512
|
+
const conditionalTool = createTool({
|
|
513
|
+
id: 'conditional-operation',
|
|
514
|
+
description: 'Performs an operation that may require confirmation',
|
|
515
|
+
inputSchema: z.object({
|
|
516
|
+
operation: z.string(),
|
|
517
|
+
}),
|
|
518
|
+
suspendSchema: z.object({
|
|
519
|
+
message: z.string(),
|
|
520
|
+
}),
|
|
521
|
+
resumeSchema: z.object({
|
|
522
|
+
confirmed: z.boolean(),
|
|
523
|
+
}),
|
|
524
|
+
execute: async (input, context) => {
|
|
525
|
+
const { resumeData } = context?.agent ?? {}
|
|
526
|
+
|
|
527
|
+
if (!resumeData?.confirmed) {
|
|
528
|
+
return context?.agent?.suspend({
|
|
529
|
+
message: `Confirm: ${input.operation}?`,
|
|
530
|
+
})
|
|
531
|
+
}
|
|
532
|
+
|
|
533
|
+
// Proceed with operation
|
|
534
|
+
return await performOperation(input.operation)
|
|
535
|
+
},
|
|
536
|
+
})
|
|
537
|
+
|
|
538
|
+
// When using this tool through a subagent in supervisor pattern
|
|
539
|
+
for await (const chunk of stream.fullStream) {
|
|
540
|
+
if (chunk.type === 'tool-call-suspended') {
|
|
541
|
+
console.log('Tool suspended:', chunk.payload.suspendPayload.message)
|
|
542
|
+
|
|
543
|
+
// Resume with confirmation
|
|
544
|
+
const resumeStream = await supervisorAgent.resumeStream(
|
|
545
|
+
{ confirmed: true },
|
|
546
|
+
{ runId: stream.runId },
|
|
547
|
+
)
|
|
548
|
+
|
|
549
|
+
for await (const resumeChunk of resumeStream.textStream) {
|
|
550
|
+
process.stdout.write(resumeChunk)
|
|
551
|
+
}
|
|
552
|
+
}
|
|
553
|
+
}
|
|
554
|
+
```
|
|
555
|
+
|
|
556
|
+
### Tool approval with generate()
|
|
557
|
+
|
|
558
|
+
Tool approval propagation also works with `generate()` in supervisor pattern:
|
|
559
|
+
|
|
560
|
+
```typescript
|
|
561
|
+
const output = await supervisorAgent.generate('Find user with ID 12345', {
|
|
562
|
+
maxSteps: 10,
|
|
563
|
+
})
|
|
564
|
+
|
|
565
|
+
if (output.finishReason === 'suspended') {
|
|
566
|
+
console.log('Tool requires approval:', output.suspendPayload.toolName)
|
|
567
|
+
|
|
568
|
+
// Approve
|
|
569
|
+
const result = await supervisorAgent.approveToolCallGenerate({
|
|
570
|
+
runId: output.runId,
|
|
571
|
+
toolCallId: output.suspendPayload.toolCallId,
|
|
572
|
+
})
|
|
573
|
+
|
|
574
|
+
console.log('Final result:', result.text)
|
|
575
|
+
}
|
|
576
|
+
```
|
|
577
|
+
|
|
578
|
+
### Multi-level delegation
|
|
579
|
+
|
|
580
|
+
Tool approvals propagate through multiple levels of delegation. For example, if a supervisor delegates to subagent A, which in turn delegates to subagent B that has a tool with `requireApproval: true`, the approval request still surfaces at the top-level supervisor. You handle the approval or decline there, and the result flows back down through the entire delegation chain to the tool that requested it.
|
|
581
|
+
|
|
582
|
+
## Related
|
|
583
|
+
|
|
584
|
+
- [Using Tools](https://mastra.ai/docs/agents/using-tools)
|
|
585
|
+
- [Agent Overview](https://mastra.ai/docs/agents/overview)
|
|
586
|
+
- [Tools Overview](https://mastra.ai/docs/mcp/overview)
|
|
587
|
+
- [Agent Memory](https://mastra.ai/docs/agents/agent-memory)
|
|
588
|
+
- [Request Context](https://mastra.ai/docs/server/request-context)
|