@mastra/mcp-docs-server 1.1.28 → 1.1.29-alpha.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/docs/agents/background-tasks.md +242 -0
- package/.docs/docs/agents/channels.md +2 -1
- package/.docs/docs/agents/supervisor-agents.md +35 -4
- package/.docs/docs/agents/using-tools.md +1 -0
- package/.docs/docs/browser/overview.md +1 -0
- package/.docs/docs/evals/custom-scorers.md +60 -0
- package/.docs/docs/streaming/background-task-streaming.md +80 -0
- package/.docs/docs/streaming/overview.md +3 -0
- package/.docs/docs/workspace/filesystem.md +3 -1
- package/.docs/docs/workspace/overview.md +1 -1
- package/.docs/docs/workspace/search.md +2 -2
- package/.docs/docs/workspace/skills.md +16 -16
- package/.docs/guides/build-your-ui/ai-sdk-ui.md +5 -3
- package/.docs/guides/guide/code-review-bot.md +2 -2
- package/.docs/guides/guide/dev-assistant.md +4 -4
- package/.docs/guides/guide/slack-assistant.md +191 -0
- package/.docs/models/gateways/azure-openai.md +25 -25
- package/.docs/models/gateways/mastra.md +64 -0
- package/.docs/models/gateways/netlify.md +5 -1
- package/.docs/models/gateways/openrouter.md +8 -1
- package/.docs/models/gateways/vercel.md +7 -1
- package/.docs/models/gateways.md +1 -0
- package/.docs/models/index.md +4 -4
- package/.docs/models/providers/abliteration-ai.md +71 -0
- package/.docs/models/providers/alibaba-cn.md +4 -1
- package/.docs/models/providers/alibaba.md +6 -1
- package/.docs/models/providers/deepseek.md +6 -4
- package/.docs/models/providers/huggingface.md +2 -1
- package/.docs/models/providers/llmgateway.md +4 -1
- package/.docs/models/providers/novita-ai.md +4 -1
- package/.docs/models/providers/nvidia.md +3 -1
- package/.docs/models/providers/ollama-cloud.md +3 -1
- package/.docs/models/providers/opencode-go.md +20 -18
- package/.docs/models/providers/opencode.md +3 -2
- package/.docs/models/providers/poe.md +3 -1
- package/.docs/models/providers/togetherai.md +2 -1
- package/.docs/models/providers/xiaomi-token-plan-ams.md +9 -7
- package/.docs/models/providers/xiaomi-token-plan-cn.md +9 -7
- package/.docs/models/providers/xiaomi-token-plan-sgp.md +9 -7
- package/.docs/models/providers/xiaomi.md +4 -2
- package/.docs/models/providers/zai-coding-plan.md +11 -20
- package/.docs/models/providers/zhipuai-coding-plan.md +11 -21
- package/.docs/models/providers.md +1 -0
- package/.docs/reference/client-js/agents.md +44 -0
- package/.docs/reference/configuration.md +63 -0
- package/.docs/reference/evals/create-scorer.md +2 -0
- package/.docs/reference/evals/filter-run.md +117 -0
- package/.docs/reference/index.md +3 -0
- package/.docs/reference/memory/clone-utilities.md +4 -2
- package/.docs/reference/memory/cloneThread.md +4 -2
- package/.docs/reference/processors/skill-search-processor.md +1 -1
- package/.docs/reference/server/routes.md +9 -8
- package/.docs/reference/streaming/ChunkType.md +140 -0
- package/.docs/reference/streaming/agents/streamUntilIdle.md +94 -0
- package/.docs/reference/workspace/azure-blob-filesystem.md +219 -0
- package/.docs/reference/workspace/gcs-filesystem.md +1 -0
- package/.docs/reference/workspace/s3-filesystem.md +1 -0
- package/.docs/reference/workspace/workspace-class.md +1 -1
- package/CHANGELOG.md +42 -0
- package/package.json +4 -4
|
@@ -36,6 +36,69 @@ export const mastra = new Mastra({
|
|
|
36
36
|
})
|
|
37
37
|
```
|
|
38
38
|
|
|
39
|
+
### backgroundTasks
|
|
40
|
+
|
|
41
|
+
**Type:** `BackgroundTaskManagerConfig`
|
|
42
|
+
|
|
43
|
+
Enables and configures the background task manager. When enabled, agents can dispatch long-running tool calls (including subagent invocations) to run asynchronously while the agentic loop continues. Tasks are persisted, so a configured `storage` backend is required.
|
|
44
|
+
|
|
45
|
+
Visit the [Background tasks documentation](https://mastra.ai/docs/agents/background-tasks) to learn more.
|
|
46
|
+
|
|
47
|
+
```typescript
|
|
48
|
+
import { Mastra } from '@mastra/core'
|
|
49
|
+
import { LibSQLStore } from '@mastra/libsql'
|
|
50
|
+
|
|
51
|
+
export const mastra = new Mastra({
|
|
52
|
+
storage: new LibSQLStore({
|
|
53
|
+
id: 'mastra-storage',
|
|
54
|
+
url: 'file:./mastra.db',
|
|
55
|
+
}),
|
|
56
|
+
backgroundTasks: {
|
|
57
|
+
enabled: true,
|
|
58
|
+
globalConcurrency: 10,
|
|
59
|
+
perAgentConcurrency: 5,
|
|
60
|
+
backpressure: 'queue',
|
|
61
|
+
defaultTimeoutMs: 300_000,
|
|
62
|
+
},
|
|
63
|
+
})
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
**enabled** (`boolean`): Whether background tasks are enabled. The manager only initializes when this is true and a storage backend is configured. (Default: `false`)
|
|
67
|
+
|
|
68
|
+
**globalConcurrency** (`number`): Maximum number of background tasks running concurrently across all agents. (Default: `10`)
|
|
69
|
+
|
|
70
|
+
**perAgentConcurrency** (`number`): Maximum number of background tasks running concurrently for a single agent. (Default: `5`)
|
|
71
|
+
|
|
72
|
+
**backpressure** (`'queue' | 'reject' | 'fallback-sync'`): Behavior when a concurrency limit is reached. 'queue' waits for a slot, 'reject' throws on enqueue, 'fallback-sync' runs the tool synchronously in the agentic loop instead. (Default: `'queue'`)
|
|
73
|
+
|
|
74
|
+
**defaultTimeoutMs** (`number`): Default per-task timeout in milliseconds. Can be overridden per-tool or per-call. (Default: `300000`)
|
|
75
|
+
|
|
76
|
+
**defaultRetries** (`RetryConfig`): Default retry policy applied to tasks that fail.
|
|
77
|
+
|
|
78
|
+
**defaultRetries.maxRetries** (`number`): Maximum retry attempts before the task is marked failed.
|
|
79
|
+
|
|
80
|
+
**defaultRetries.retryDelayMs** (`number`): Delay between retries in milliseconds.
|
|
81
|
+
|
|
82
|
+
**defaultRetries.backoffMultiplier** (`number`): Multiplier applied to retryDelayMs on each subsequent attempt.
|
|
83
|
+
|
|
84
|
+
**defaultRetries.maxRetryDelayMs** (`number`): Upper bound on the retry delay regardless of backoff.
|
|
85
|
+
|
|
86
|
+
**defaultRetries.retryableErrors** (`(error: Error) => boolean`): Predicate that decides whether a given error should be retried. Default: retry all errors.
|
|
87
|
+
|
|
88
|
+
**cleanup** (`CleanupConfig`): Controls how long task records are kept and how often the cleanup process runs.
|
|
89
|
+
|
|
90
|
+
**cleanup.completedTtlMs** (`number`): How long to keep completed task records, in milliseconds. Default: 1 hour.
|
|
91
|
+
|
|
92
|
+
**cleanup.failedTtlMs** (`number`): How long to keep failed task records, in milliseconds. Default: 24 hours.
|
|
93
|
+
|
|
94
|
+
**cleanup.cleanupIntervalMs** (`number`): How often the cleanup process runs, in milliseconds. Default: 1 minute.
|
|
95
|
+
|
|
96
|
+
**waitTimeoutMs** (`number`): How long the agentic loop waits for a background task to complete before moving on. If a task has not finished within this time, the loop proceeds without setting isContinued. Default: undefined (do not wait). Can be overridden per-agent or per-tool.
|
|
97
|
+
|
|
98
|
+
**onTaskComplete** (`(task: BackgroundTask) => void | Promise<void>`): Global callback invoked when any background task completes successfully. Fires in addition to per-tool and per-agent callbacks.
|
|
99
|
+
|
|
100
|
+
**onTaskFailed** (`(task: BackgroundTask) => void | Promise<void>`): Global callback invoked when any background task fails. Fires in addition to per-tool and per-agent callbacks.
|
|
101
|
+
|
|
39
102
|
### deployer
|
|
40
103
|
|
|
41
104
|
**Type:** `MastraDeployer`
|
|
@@ -51,6 +51,8 @@ const scorer = createScorer({
|
|
|
51
51
|
|
|
52
52
|
**type** (`string`): Type specification for input/output. Use 'agent' for automatic agent types. For custom types, use the generic approach instead.
|
|
53
53
|
|
|
54
|
+
**prepareRun** (`(run: ScorerRun) => ScorerRun | Promise<ScorerRun>`): Transform the scorer run data before the pipeline executes. Use this to filter messages, limit context size, or drop fields the scorer doesn't need. The \[\`filterRun()\`]\(/reference/evals/filter-run) utility creates this function from declarative options. Can be async.
|
|
55
|
+
|
|
54
56
|
This function returns a scorer builder that you can chain step methods onto. See the [MastraScorer reference](https://mastra.ai/reference/evals/mastra-scorer) for details on the `.run()` method and its input/output.
|
|
55
57
|
|
|
56
58
|
The judge only runs for steps defined as **prompt objects** (`preprocess`, `analyze`, `generateScore`, `generateReason` in prompt mode). If you use function steps only, the judge is never called and there is no LLM output to inspect. In that case, any score/reason must be produced by your functions.
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
# filterRun()
|
|
2
|
+
|
|
3
|
+
Creates a `prepareRun` function from declarative options. Pass the result to `createScorer()` to filter messages, limit context size, and drop unnecessary fields before the scorer pipeline runs.
|
|
4
|
+
|
|
5
|
+
Use [`filterRun()`](#usage-example) for declarative filtering. Write a custom `prepareRun` function directly when you need imperative logic that `filterRun()` doesn't cover. See [Custom scorers: input filtering](https://mastra.ai/docs/evals/custom-scorers) for more.
|
|
6
|
+
|
|
7
|
+
## Usage example
|
|
8
|
+
|
|
9
|
+
The following example creates a scorer that only sees tool invocations and text messages, limited to the 20 most recent context messages:
|
|
10
|
+
|
|
11
|
+
```typescript
|
|
12
|
+
import { createScorer, filterRun } from '@mastra/core/evals'
|
|
13
|
+
|
|
14
|
+
const toolScorer = createScorer({
|
|
15
|
+
id: 'tool-usage',
|
|
16
|
+
description: 'Evaluates tool usage patterns',
|
|
17
|
+
type: 'agent',
|
|
18
|
+
prepareRun: filterRun({
|
|
19
|
+
partTypes: ['tool-invocation', 'text'],
|
|
20
|
+
maxRememberedMessages: 20,
|
|
21
|
+
}),
|
|
22
|
+
}).generateScore(({ run }) => {
|
|
23
|
+
// run.input.rememberedMessages contains only tool and text messages
|
|
24
|
+
// run.output contains only tool and text messages
|
|
25
|
+
return 1
|
|
26
|
+
})
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
### Filter by tool name
|
|
30
|
+
|
|
31
|
+
Keep only messages involving specific tools:
|
|
32
|
+
|
|
33
|
+
```typescript
|
|
34
|
+
import { createScorer, filterRun } from '@mastra/core/evals'
|
|
35
|
+
|
|
36
|
+
const fileEditScorer = createScorer({
|
|
37
|
+
id: 'file-edit-quality',
|
|
38
|
+
description: 'Evaluates file editing patterns',
|
|
39
|
+
type: 'agent',
|
|
40
|
+
prepareRun: filterRun({
|
|
41
|
+
toolNames: ['write_file', 'string_replace_lsp', 'view'],
|
|
42
|
+
}),
|
|
43
|
+
}).generateScore(({ run }) => {
|
|
44
|
+
// Only messages with these tool calls remain
|
|
45
|
+
return 1
|
|
46
|
+
})
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
### Drop fields
|
|
50
|
+
|
|
51
|
+
Remove fields the scorer doesn't need:
|
|
52
|
+
|
|
53
|
+
```typescript
|
|
54
|
+
import { createScorer, filterRun } from '@mastra/core/evals'
|
|
55
|
+
|
|
56
|
+
const simpleScorer = createScorer({
|
|
57
|
+
id: 'response-length',
|
|
58
|
+
description: 'Checks response length',
|
|
59
|
+
type: 'agent',
|
|
60
|
+
prepareRun: filterRun({
|
|
61
|
+
dropRequestContext: true,
|
|
62
|
+
dropExpectedTrajectory: true,
|
|
63
|
+
dropGroundTruth: true,
|
|
64
|
+
maxOutputMessages: 5,
|
|
65
|
+
}),
|
|
66
|
+
}).generateScore(({ run }) => {
|
|
67
|
+
return run.output.length > 0 ? 1 : 0
|
|
68
|
+
})
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
## Parameters
|
|
72
|
+
|
|
73
|
+
**options** (`FilterRunOptions`): Configuration object that controls what data the scorer receives.
|
|
74
|
+
|
|
75
|
+
**options.partTypes** (`MastraPartType[]`): Keep only messages whose parts match these types. Each entry is prefix-matched against the message part's type. Plain text messages (no tool invocations) are always kept unless explicitly excluded. System messages and tagged system messages are never filtered.
|
|
76
|
+
|
|
77
|
+
**options.toolNames** (`string[]`): Keep only tool-invocation messages for these specific tools. Each entry is prefix-matched against the tool name. Non-tool messages (text, data) are unaffected.
|
|
78
|
+
|
|
79
|
+
**options.maxRememberedMessages** (`number`): Maximum number of messages to keep in remembered messages (context). Takes from the end (most recent). Applied after type and tool filtering.
|
|
80
|
+
|
|
81
|
+
**options.maxOutputMessages** (`number`): Maximum number of messages to keep in the output. Takes from the end. Applied after type and tool filtering.
|
|
82
|
+
|
|
83
|
+
**options.dropRequestContext** (`boolean`): Remove request context from the run entirely.
|
|
84
|
+
|
|
85
|
+
**options.dropExpectedTrajectory** (`boolean`): Remove expected trajectory from the run.
|
|
86
|
+
|
|
87
|
+
**options.dropGroundTruth** (`boolean`): Remove ground truth from the run.
|
|
88
|
+
|
|
89
|
+
**Returns:** `(run: ScorerRun) => ScorerRun` — A function suitable for the `prepareRun` option on [`createScorer()`](https://mastra.ai/reference/evals/create-scorer).
|
|
90
|
+
|
|
91
|
+
## Part types
|
|
92
|
+
|
|
93
|
+
The `partTypes` option accepts `MastraPartType` values. Each value is prefix-matched, so `'data-'` matches all data part types.
|
|
94
|
+
|
|
95
|
+
| Type | Description |
|
|
96
|
+
| ------------------- | -------------------------------------------- |
|
|
97
|
+
| `'text'` | Text content parts |
|
|
98
|
+
| `'tool-invocation'` | Tool call and result parts |
|
|
99
|
+
| `'reasoning'` | Chain-of-thought reasoning parts |
|
|
100
|
+
| `'step-start'` | Step marker parts |
|
|
101
|
+
| `'image'` | Image parts |
|
|
102
|
+
| `'file'` | File parts |
|
|
103
|
+
| `'source'` | Source reference parts |
|
|
104
|
+
| `'source-document'` | Source document parts |
|
|
105
|
+
| `'data-'` | All data parts (matches any `data-*` prefix) |
|
|
106
|
+
| `'data-om-'` | Observational memory data parts |
|
|
107
|
+
| `'data-workspace-'` | Workspace data parts |
|
|
108
|
+
| `'data-sandbox-'` | Sandbox data parts |
|
|
109
|
+
| `'data-tool-'` | Tool-related data parts |
|
|
110
|
+
|
|
111
|
+
## Filtering behavior
|
|
112
|
+
|
|
113
|
+
- **Part type filtering** applies to both `input.rememberedMessages` and `output` when they contain agent message arrays.
|
|
114
|
+
- **Tool name filtering** only affects messages that contain tool invocations. Text-only messages pass through.
|
|
115
|
+
- **System messages** (`systemMessages`, `taggedSystemMessages`) are never filtered, regardless of `partTypes` or `toolNames`.
|
|
116
|
+
- **Message limits** (`maxRememberedMessages`, `maxOutputMessages`) apply after type and tool filtering, taking the most recent messages.
|
|
117
|
+
- When both `partTypes` and `toolNames` are set, a message must satisfy both filters to be kept.
|
package/.docs/reference/index.md
CHANGED
|
@@ -96,6 +96,7 @@ The Reference section provides documentation of Mastra's API, including paramete
|
|
|
96
96
|
- [MastraEditor Class](https://mastra.ai/reference/editor/mastra-editor)
|
|
97
97
|
- [ToolProvider](https://mastra.ai/reference/editor/tool-provider)
|
|
98
98
|
- [createScorer()](https://mastra.ai/reference/evals/create-scorer)
|
|
99
|
+
- [filterRun()](https://mastra.ai/reference/evals/filter-run)
|
|
99
100
|
- [MastraScorer](https://mastra.ai/reference/evals/mastra-scorer)
|
|
100
101
|
- [runEvals()](https://mastra.ai/reference/evals/run-evals)
|
|
101
102
|
- [Scorer Utils](https://mastra.ai/reference/evals/scorer-utils)
|
|
@@ -208,6 +209,7 @@ The Reference section provides documentation of Mastra's API, including paramete
|
|
|
208
209
|
- [MastraModelOutput](https://mastra.ai/reference/streaming/agents/MastraModelOutput)
|
|
209
210
|
- [.stream()](https://mastra.ai/reference/streaming/agents/stream)
|
|
210
211
|
- [.streamLegacy()](https://mastra.ai/reference/streaming/agents/streamLegacy)
|
|
212
|
+
- [.streamUntilIdle()](https://mastra.ai/reference/streaming/agents/streamUntilIdle)
|
|
211
213
|
- [.observeStream()](https://mastra.ai/reference/streaming/workflows/observeStream)
|
|
212
214
|
- [.resumeStream()](https://mastra.ai/reference/streaming/workflows/resumeStream)
|
|
213
215
|
- [.stream()](https://mastra.ai/reference/streaming/workflows/stream)
|
|
@@ -285,6 +287,7 @@ The Reference section provides documentation of Mastra's API, including paramete
|
|
|
285
287
|
- [.startAsync()](https://mastra.ai/reference/workflows/run-methods/startAsync)
|
|
286
288
|
- [.timeTravel()](https://mastra.ai/reference/workflows/run-methods/timeTravel)
|
|
287
289
|
- [AgentFSFilesystem](https://mastra.ai/reference/workspace/agentfs-filesystem)
|
|
290
|
+
- [AzureBlobFilesystem](https://mastra.ai/reference/workspace/azure-blob-filesystem)
|
|
288
291
|
- [BlaxelSandbox](https://mastra.ai/reference/workspace/blaxel-sandbox)
|
|
289
292
|
- [DaytonaSandbox](https://mastra.ai/reference/workspace/daytona-sandbox)
|
|
290
293
|
- [DockerSandbox](https://mastra.ai/reference/workspace/docker-sandbox)
|
|
@@ -153,8 +153,10 @@ async function manageClones() {
|
|
|
153
153
|
|
|
154
154
|
// Have a conversation...
|
|
155
155
|
await agent.generate("Hello! Let's discuss project options.", {
|
|
156
|
-
|
|
157
|
-
|
|
156
|
+
memory: {
|
|
157
|
+
thread: originalThread.id,
|
|
158
|
+
resource: 'user-123',
|
|
159
|
+
},
|
|
158
160
|
})
|
|
159
161
|
|
|
160
162
|
// Create multiple branches (clones) to explore different paths
|
|
@@ -93,8 +93,10 @@ const { thread: dateFilteredClone } = await memory.cloneThread({
|
|
|
93
93
|
|
|
94
94
|
// Continue conversation on the cloned thread
|
|
95
95
|
const response = await agent.generate("Let's try a different approach", {
|
|
96
|
-
|
|
97
|
-
|
|
96
|
+
memory: {
|
|
97
|
+
thread: fullClone.id,
|
|
98
|
+
resource: fullClone.resourceId,
|
|
99
|
+
},
|
|
98
100
|
})
|
|
99
101
|
```
|
|
100
102
|
|
|
@@ -4,14 +4,15 @@ Server adapters register these routes when you call `server.init()`. All routes
|
|
|
4
4
|
|
|
5
5
|
## Agents
|
|
6
6
|
|
|
7
|
-
| Method | Path | Description
|
|
8
|
-
| ------ | -------------------------------------------- |
|
|
9
|
-
| `GET` | `/api/agents` | List all agents
|
|
10
|
-
| `GET` | `/api/agents/:agentId` | Get agent by ID (supports version query params)
|
|
11
|
-
| `POST` | `/api/agents/:agentId/generate` | Generate agent response
|
|
12
|
-
| `POST` | `/api/agents/:agentId/stream` | Stream agent response
|
|
13
|
-
| `
|
|
14
|
-
| `
|
|
7
|
+
| Method | Path | Description |
|
|
8
|
+
| ------ | -------------------------------------------- | ------------------------------------------------ |
|
|
9
|
+
| `GET` | `/api/agents` | List all agents |
|
|
10
|
+
| `GET` | `/api/agents/:agentId` | Get agent by ID (supports version query params) |
|
|
11
|
+
| `POST` | `/api/agents/:agentId/generate` | Generate agent response |
|
|
12
|
+
| `POST` | `/api/agents/:agentId/stream` | Stream agent response |
|
|
13
|
+
| `POST` | `/api/agents/:agentId/resume-stream` | Resume a suspended agent stream with custom data |
|
|
14
|
+
| `GET` | `/api/agents/:agentId/tools` | List agent tools |
|
|
15
|
+
| `POST` | `/api/agents/:agentId/tools/:toolId/execute` | Execute agent tool |
|
|
15
16
|
|
|
16
17
|
### Get agent query parameters
|
|
17
18
|
|
|
@@ -398,6 +398,146 @@ Contains output from workflow step execution, used primarily for usage tracking
|
|
|
398
398
|
|
|
399
399
|
**payload.output** (`ChunkType`): Nested chunk data from step execution, typically containing finish events or other step results
|
|
400
400
|
|
|
401
|
+
## Background task chunks
|
|
402
|
+
|
|
403
|
+
Emitted when a tool call is dispatched as a [background task](https://mastra.ai/docs/agents/background-tasks) and `streamUntilIdle()` is used.
|
|
404
|
+
|
|
405
|
+
### background-task-started
|
|
406
|
+
|
|
407
|
+
Emitted when a tool call is enqueued as a background task and assigned a `taskId`.
|
|
408
|
+
|
|
409
|
+
**type** (`"background-task-started"`): Chunk type identifier
|
|
410
|
+
|
|
411
|
+
**payload** (`BackgroundTaskStartedPayload`): Identifies the newly enqueued task
|
|
412
|
+
|
|
413
|
+
**payload.taskId** (`string`): Unique identifier for the background task
|
|
414
|
+
|
|
415
|
+
**payload.toolName** (`string`): Name of the tool being executed
|
|
416
|
+
|
|
417
|
+
**payload.toolCallId** (`string`): Tool-call ID from the originating LLM tool call
|
|
418
|
+
|
|
419
|
+
### background-task-running
|
|
420
|
+
|
|
421
|
+
Emitted when a worker picks up the task and execution begins.
|
|
422
|
+
|
|
423
|
+
**type** (`"background-task-running"`): Chunk type identifier
|
|
424
|
+
|
|
425
|
+
**payload** (`BackgroundTaskRunningPayload`): Details about the running task
|
|
426
|
+
|
|
427
|
+
**payload.taskId** (`string`): Unique identifier for the background task
|
|
428
|
+
|
|
429
|
+
**payload.toolName** (`string`): Name of the tool being executed
|
|
430
|
+
|
|
431
|
+
**payload.toolCallId** (`string`): Tool-call ID from the originating LLM tool call
|
|
432
|
+
|
|
433
|
+
**payload.runId** (`string`): Run ID of the agent that dispatched the task
|
|
434
|
+
|
|
435
|
+
**payload.agentId** (`string`): ID of the agent that dispatched the task
|
|
436
|
+
|
|
437
|
+
**payload.startedAt** (`Date`): Timestamp at which execution started
|
|
438
|
+
|
|
439
|
+
**payload.args** (`Record<string, unknown>`): Arguments passed to the tool's execute function
|
|
440
|
+
|
|
441
|
+
### background-task-progress
|
|
442
|
+
|
|
443
|
+
Periodic snapshot of how many background tasks are currently running across the agent.
|
|
444
|
+
|
|
445
|
+
**type** (`"background-task-progress"`): Chunk type identifier
|
|
446
|
+
|
|
447
|
+
**payload** (`BackgroundTaskProgressPayload`): Aggregate progress for all running tasks
|
|
448
|
+
|
|
449
|
+
**payload.taskIds** (`string[]`): IDs of all currently running background tasks
|
|
450
|
+
|
|
451
|
+
**payload.runningCount** (`number`): Number of background tasks currently running
|
|
452
|
+
|
|
453
|
+
**payload.elapsedMs** (`number`): Milliseconds elapsed since the agent run started
|
|
454
|
+
|
|
455
|
+
### background-task-output
|
|
456
|
+
|
|
457
|
+
A streamed output chunk emitted by the task's `execute` function. Wraps an inner [`tool-output`](#tool-output) chunk.
|
|
458
|
+
|
|
459
|
+
**type** (`"background-task-output"`): Chunk type identifier
|
|
460
|
+
|
|
461
|
+
**payload** (`BackgroundTaskOutputPayload`): Streamed output from the running task
|
|
462
|
+
|
|
463
|
+
**payload.taskId** (`string`): Unique identifier for the background task
|
|
464
|
+
|
|
465
|
+
**payload.toolName** (`string`): Name of the tool being executed
|
|
466
|
+
|
|
467
|
+
**payload.toolCallId** (`string`): Tool-call ID from the originating LLM tool call
|
|
468
|
+
|
|
469
|
+
**payload.runId** (`string`): Run ID of the agent that dispatched the task
|
|
470
|
+
|
|
471
|
+
**payload.agentId** (`string`): ID of the agent that dispatched the task
|
|
472
|
+
|
|
473
|
+
**payload.payload** (`ToolOutputChunk`): Inner tool-output chunk produced by the task
|
|
474
|
+
|
|
475
|
+
### background-task-completed
|
|
476
|
+
|
|
477
|
+
Emitted when the task finishes successfully. Triggers a continuation turn when consumed by [`Agent.streamUntilIdle()`](https://mastra.ai/reference/streaming/agents/streamUntilIdle).
|
|
478
|
+
|
|
479
|
+
**type** (`"background-task-completed"`): Chunk type identifier
|
|
480
|
+
|
|
481
|
+
**payload** (`BackgroundTaskResultPayload`): The completed task's result
|
|
482
|
+
|
|
483
|
+
**payload.taskId** (`string`): Unique identifier for the background task
|
|
484
|
+
|
|
485
|
+
**payload.toolName** (`string`): Name of the tool that was executed
|
|
486
|
+
|
|
487
|
+
**payload.toolCallId** (`string`): Tool-call ID from the originating LLM tool call
|
|
488
|
+
|
|
489
|
+
**payload.agentId** (`string`): ID of the agent that dispatched the task
|
|
490
|
+
|
|
491
|
+
**payload.runId** (`string`): Run ID of the agent that dispatched the task
|
|
492
|
+
|
|
493
|
+
**payload.result** (`unknown`): The tool's resolved return value
|
|
494
|
+
|
|
495
|
+
**payload.completedAt** (`Date`): Timestamp at which the task completed
|
|
496
|
+
|
|
497
|
+
**payload.isError** (`boolean`): True when the tool returned an error result rather than throwing
|
|
498
|
+
|
|
499
|
+
### background-task-failed
|
|
500
|
+
|
|
501
|
+
Emitted when the task throws or times out. Triggers a continuation turn when consumed by [`Agent.streamUntilIdle()`](https://mastra.ai/reference/streaming/agents/streamUntilIdle).
|
|
502
|
+
|
|
503
|
+
**type** (`"background-task-failed"`): Chunk type identifier
|
|
504
|
+
|
|
505
|
+
**payload** (`BackgroundTaskFailedPayload`): Failure details for the task
|
|
506
|
+
|
|
507
|
+
**payload.taskId** (`string`): Unique identifier for the background task
|
|
508
|
+
|
|
509
|
+
**payload.toolName** (`string`): Name of the tool that was executed
|
|
510
|
+
|
|
511
|
+
**payload.toolCallId** (`string`): Tool-call ID from the originating LLM tool call
|
|
512
|
+
|
|
513
|
+
**payload.runId** (`string`): Run ID of the agent that dispatched the task
|
|
514
|
+
|
|
515
|
+
**payload.agentId** (`string`): ID of the agent that dispatched the task
|
|
516
|
+
|
|
517
|
+
**payload.error** (`{ message: string }`): Error details thrown by the task
|
|
518
|
+
|
|
519
|
+
**payload.completedAt** (`Date`): Timestamp at which the task failed
|
|
520
|
+
|
|
521
|
+
### background-task-cancelled
|
|
522
|
+
|
|
523
|
+
Emitted when the task is cancelled before completing. Triggers a continuation turn when consumed by [`Agent.streamUntilIdle()`](https://mastra.ai/reference/streaming/agents/streamUntilIdle).
|
|
524
|
+
|
|
525
|
+
**type** (`"background-task-cancelled"`): Chunk type identifier
|
|
526
|
+
|
|
527
|
+
**payload** (`BackgroundTaskCancelledPayload`): Cancellation details for the task
|
|
528
|
+
|
|
529
|
+
**payload.taskId** (`string`): Unique identifier for the background task
|
|
530
|
+
|
|
531
|
+
**payload.toolName** (`string`): Name of the tool that was executed
|
|
532
|
+
|
|
533
|
+
**payload.toolCallId** (`string`): Tool-call ID from the originating LLM tool call
|
|
534
|
+
|
|
535
|
+
**payload.runId** (`string`): Run ID of the agent that dispatched the task
|
|
536
|
+
|
|
537
|
+
**payload.agentId** (`string`): ID of the agent that dispatched the task
|
|
538
|
+
|
|
539
|
+
**payload.completedAt** (`Date`): Timestamp at which the task was cancelled
|
|
540
|
+
|
|
401
541
|
## Metadata and special chunks
|
|
402
542
|
|
|
403
543
|
### response-metadata
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
# Agent.streamUntilIdle()
|
|
2
|
+
|
|
3
|
+
**Added in:** `@mastra/core@1.28.0`
|
|
4
|
+
|
|
5
|
+
`streamUntilIdle()` streams an agent's response and keeps the stream open until every background task dispatched during the run completes. When a task finishes, its result is written to memory and the agentic loop re-enters automatically so the LLM can react to it. The stream closes once no tasks are running and no completions are queued.
|
|
6
|
+
|
|
7
|
+
Use it when the agent dispatches background tasks (typically long-running tools or subagents) and you want a single stream that spans the initial response **plus** every continuation triggered by a task completion. For foreground-only runs or if you prefer to manage the continuation manually (manually prompt agent to process the result), use [`Agent.stream()`](https://mastra.ai/reference/streaming/agents/stream).
|
|
8
|
+
|
|
9
|
+
## Usage example
|
|
10
|
+
|
|
11
|
+
```ts
|
|
12
|
+
const stream = await agent.streamUntilIdle('Research solana for me', {
|
|
13
|
+
memory: { thread: 't1', resource: 'u1' },
|
|
14
|
+
})
|
|
15
|
+
|
|
16
|
+
for await (const chunk of stream.fullStream) {
|
|
17
|
+
// chunks from the initial turn AND any continuation turns triggered by
|
|
18
|
+
// background task completions flow through here
|
|
19
|
+
}
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
> **Info:** `streamUntilIdle()` requires both a [`BackgroundTaskManager`](https://mastra.ai/reference/configuration) and a [memory](https://mastra.ai/docs/memory/overview) backend. Without either, it falls through to a plain `agent.stream()` call.
|
|
23
|
+
|
|
24
|
+
## Parameters
|
|
25
|
+
|
|
26
|
+
**messages** (`string | string[] | CoreMessage[] | AiMessageType[] | UIMessageWithMetadata[]`): The messages to send to the agent. Can be a single string, array of strings, or structured message objects.
|
|
27
|
+
|
|
28
|
+
**options** (`AgentExecutionOptions<Output> & { maxIdleMs?: number }`): Accepts every option that Agent.stream() accepts, plus maxIdleMs. See the Agent.stream() reference for the full list.
|
|
29
|
+
|
|
30
|
+
**options.maxIdleMs** (`number`): Closes the outer stream after this many ms of idleness between turns. The timer only runs while the wrapper is between turns, so a slow first token does not close the stream. Default: 5 minutes.
|
|
31
|
+
|
|
32
|
+
**options.memory** (`{ thread?: string | { id: string }; resource?: string }`): Memory thread and resource for the run. Required for continuations to write background task results back into the conversation.
|
|
33
|
+
|
|
34
|
+
**options.structuredOutput** (`PublicStructuredOutputOptions<Output>`): Schema-based structured output. Same shape as Agent.stream(). Note that aggregate properties resolve against the first turn only.
|
|
35
|
+
|
|
36
|
+
For every other option (`maxSteps`, `modelSettings`, `toolChoice`, `outputProcessors`, `onFinish`, `onChunk`, etc.), see the [`Agent.stream()` parameters](https://mastra.ai/reference/streaming/agents/stream). `streamUntilIdle()` forwards them to the initial turn.
|
|
37
|
+
|
|
38
|
+
## Returns
|
|
39
|
+
|
|
40
|
+
**stream** (`MastraModelOutput<Output>`): A MastraModelOutput where fullStream spans the initial turn plus every auto-continuation. Aggregate properties (text, toolCalls, toolResults, finishReason, messageList, getFullOutput()) resolve against the first turn only.
|
|
41
|
+
|
|
42
|
+
### Aggregate properties caveat
|
|
43
|
+
|
|
44
|
+
`streamUntilIdle()` returns a proxy over the first turn's `MastraModelOutput`. Only `fullStream` is replaced with a combined stream that spans every continuation. Every other property — `text`, `toolCalls`, `toolResults`, `finishReason`, `messageList`, `getFullOutput()` — resolves against the **first turn's** internal buffer.
|
|
45
|
+
|
|
46
|
+
If you need an aggregate view across all continuations, consume `fullStream` yourself and accumulate.
|
|
47
|
+
|
|
48
|
+
## Continuation behavior
|
|
49
|
+
|
|
50
|
+
Internally, `streamUntilIdle()`:
|
|
51
|
+
|
|
52
|
+
1. Runs the initial turn via `agent.stream(...)` and pipes its `fullStream` into the outer stream.
|
|
53
|
+
2. Subscribes to background-task completion events for the resolved memory scope.
|
|
54
|
+
3. Queues each terminal event (`background-task-completed`, `background-task-failed`, `background-task-cancelled`) and, when the outer wrapper is idle between turns, re-invokes `agent.stream([], ...)` with a directive listing the just-completed `toolCallId`s. The continuation turn flows into the same outer stream.
|
|
55
|
+
4. Closes the outer stream once no tasks are running and no completions are queued.
|
|
56
|
+
|
|
57
|
+
## Extended usage example
|
|
58
|
+
|
|
59
|
+
### Cap idle time between turns
|
|
60
|
+
|
|
61
|
+
```ts
|
|
62
|
+
const stream = await agent.streamUntilIdle('Kick off the long jobs', {
|
|
63
|
+
memory: { thread: 't1', resource: 'u1' },
|
|
64
|
+
maxIdleMs: 60_000, // close the stream after 1 minute of idleness between turns
|
|
65
|
+
})
|
|
66
|
+
|
|
67
|
+
for await (const chunk of stream.fullStream) {
|
|
68
|
+
if (chunk.type === 'background-task-completed') {
|
|
69
|
+
console.log('Task complete:', chunk.payload.taskId)
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
### Aggregate text across continuations
|
|
75
|
+
|
|
76
|
+
```ts
|
|
77
|
+
const stream = await agent.streamUntilIdle('Research and summarize', {
|
|
78
|
+
memory: { thread: 't1', resource: 'u1' },
|
|
79
|
+
})
|
|
80
|
+
|
|
81
|
+
let fullText = ''
|
|
82
|
+
for await (const chunk of stream.fullStream) {
|
|
83
|
+
if (chunk.type === 'text-delta') {
|
|
84
|
+
fullText += chunk.payload.text
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
## Related
|
|
90
|
+
|
|
91
|
+
- [Background tasks](https://mastra.ai/docs/agents/background-tasks)
|
|
92
|
+
- [`Agent.stream()` reference](https://mastra.ai/reference/streaming/agents/stream)
|
|
93
|
+
- [backgroundTasks configuration reference](https://mastra.ai/reference/configuration)
|
|
94
|
+
- [Stream chunk types](https://mastra.ai/reference/streaming/ChunkType)
|