@mastra/mcp-docs-server 1.1.26-alpha.8 → 1.1.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/docs/agents/structured-output.md +22 -0
- package/.docs/docs/agents/supervisor-agents.md +18 -0
- package/.docs/docs/editor/overview.md +69 -0
- package/.docs/docs/memory/storage.md +1 -0
- package/.docs/docs/observability/tracing/exporters/langfuse.md +31 -0
- package/.docs/guides/deployment/netlify.md +16 -1
- package/.docs/guides/getting-started/next-js.md +0 -4
- package/.docs/guides/migrations/mastra-cloud.md +2 -2
- package/.docs/models/gateways/netlify.md +2 -3
- package/.docs/models/gateways/openrouter.md +3 -1
- package/.docs/models/index.md +1 -1
- package/.docs/models/providers/302ai.md +32 -1
- package/.docs/models/providers/berget.md +9 -12
- package/.docs/models/providers/cloudflare-workers-ai.md +2 -1
- package/.docs/models/providers/cortecs.md +2 -1
- package/.docs/models/providers/deepinfra.md +4 -1
- package/.docs/models/providers/digitalocean.md +116 -0
- package/.docs/models/providers/fireworks-ai.md +2 -1
- package/.docs/models/providers/helicone.md +1 -2
- package/.docs/models/providers/huggingface.md +2 -1
- package/.docs/models/providers/kilo.md +2 -1
- package/.docs/models/providers/kimi-for-coding.md +2 -1
- package/.docs/models/providers/llmgateway.md +59 -77
- package/.docs/models/providers/moonshotai-cn.md +3 -2
- package/.docs/models/providers/moonshotai.md +3 -2
- package/.docs/models/providers/nano-gpt.md +8 -1
- package/.docs/models/providers/nvidia.md +2 -1
- package/.docs/models/providers/ollama-cloud.md +2 -1
- package/.docs/models/providers/openai.md +1 -2
- package/.docs/models/providers/opencode-go.md +2 -1
- package/.docs/models/providers/opencode.md +4 -1
- package/.docs/models/providers/ovhcloud.md +4 -7
- package/.docs/models/providers/poe.md +2 -1
- package/.docs/models/providers/tencent-token-plan.md +71 -0
- package/.docs/models/providers/tencent-tokenhub.md +71 -0
- package/.docs/models/providers/wafer.ai.md +72 -0
- package/.docs/models/providers/zenmux.md +2 -1
- package/.docs/models/providers.md +4 -0
- package/.docs/reference/agents/generate.md +8 -0
- package/.docs/reference/client-js/workflows.md +12 -0
- package/.docs/reference/core/mastra-class.md +9 -1
- package/.docs/reference/deployer/cloudflare.md +14 -1
- package/.docs/reference/deployer/netlify.md +50 -2
- package/.docs/reference/harness/harness-class.md +72 -49
- package/.docs/reference/index.md +2 -0
- package/.docs/reference/observability/tracing/exporters/langfuse.md +2 -0
- package/.docs/reference/processors/prefill-error-handler.md +5 -5
- package/.docs/reference/storage/cloudflare-d1.md +42 -42
- package/.docs/reference/storage/redis.md +266 -0
- package/.docs/reference/streaming/agents/stream.md +8 -0
- package/.docs/reference/streaming/workflows/resumeStream.md +2 -0
- package/.docs/reference/tools/tavily.md +307 -0
- package/.docs/reference/workflows/run-methods/resume.md +24 -0
- package/.docs/reference/workflows/workflow-methods/foreach.md +14 -1
- package/CHANGELOG.md +71 -0
- package/package.json +10 -10
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Poe
|
|
2
2
|
|
|
3
|
-
Access
|
|
3
|
+
Access 118 Poe models through Mastra's model router. Authentication is handled automatically using the `POE_API_KEY` environment variable.
|
|
4
4
|
|
|
5
5
|
Learn more in the [Poe documentation](https://creator.poe.com/docs/external-applications/openai-compatible-api).
|
|
6
6
|
|
|
@@ -41,6 +41,7 @@ for await (const chunk of stream) {
|
|
|
41
41
|
| `poe/anthropic/claude-opus-4.1` | 197K | | | | | | $13 | $64 |
|
|
42
42
|
| `poe/anthropic/claude-opus-4.5` | 197K | | | | | | $4 | $21 |
|
|
43
43
|
| `poe/anthropic/claude-opus-4.6` | 983K | | | | | | $4 | $21 |
|
|
44
|
+
| `poe/anthropic/claude-opus-4.7` | 1.0M | | | | | | $4 | $21 |
|
|
44
45
|
| `poe/anthropic/claude-sonnet-3.7` | 197K | | | | | | $3 | $13 |
|
|
45
46
|
| `poe/anthropic/claude-sonnet-4` | 983K | | | | | | $3 | $13 |
|
|
46
47
|
| `poe/anthropic/claude-sonnet-4.5` | 983K | | | | | | $3 | $13 |
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
# Tencent Token Plan
|
|
2
|
+
|
|
3
|
+
Access 1 Tencent Token Plan model through Mastra's model router. Authentication is handled automatically using the `TENCENT_TOKEN_PLAN_API_KEY` environment variable.
|
|
4
|
+
|
|
5
|
+
Learn more in the [Tencent Token Plan documentation](https://cloud.tencent.com/document/product/1823/130060).
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
TENCENT_TOKEN_PLAN_API_KEY=your-api-key
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
```typescript
|
|
12
|
+
import { Agent } from "@mastra/core/agent";
|
|
13
|
+
|
|
14
|
+
const agent = new Agent({
|
|
15
|
+
id: "my-agent",
|
|
16
|
+
name: "My Agent",
|
|
17
|
+
instructions: "You are a helpful assistant",
|
|
18
|
+
model: "tencent-token-plan/hy3-preview"
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
// Generate a response
|
|
22
|
+
const response = await agent.generate("Hello!");
|
|
23
|
+
|
|
24
|
+
// Stream a response
|
|
25
|
+
const stream = await agent.stream("Tell me a story");
|
|
26
|
+
for await (const chunk of stream) {
|
|
27
|
+
console.log(chunk);
|
|
28
|
+
}
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
> **Info:** Mastra uses the OpenAI-compatible `/chat/completions` endpoint. Some provider-specific features may not be available. Check the [Tencent Token Plan documentation](https://cloud.tencent.com/document/product/1823/130060) for details.
|
|
32
|
+
|
|
33
|
+
## Models
|
|
34
|
+
|
|
35
|
+
| Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
|
|
36
|
+
| -------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
|
|
37
|
+
| `tencent-token-plan/hy3-preview` | 256K | | | | | | — | — |
|
|
38
|
+
|
|
39
|
+
## Advanced configuration
|
|
40
|
+
|
|
41
|
+
### Custom headers
|
|
42
|
+
|
|
43
|
+
```typescript
|
|
44
|
+
const agent = new Agent({
|
|
45
|
+
id: "custom-agent",
|
|
46
|
+
name: "custom-agent",
|
|
47
|
+
model: {
|
|
48
|
+
url: "https://api.lkeap.cloud.tencent.com/plan/v3",
|
|
49
|
+
id: "tencent-token-plan/hy3-preview",
|
|
50
|
+
apiKey: process.env.TENCENT_TOKEN_PLAN_API_KEY,
|
|
51
|
+
headers: {
|
|
52
|
+
"X-Custom-Header": "value"
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
});
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
### Dynamic model selection
|
|
59
|
+
|
|
60
|
+
```typescript
|
|
61
|
+
const agent = new Agent({
|
|
62
|
+
id: "dynamic-agent",
|
|
63
|
+
name: "Dynamic Agent",
|
|
64
|
+
model: ({ requestContext }) => {
|
|
65
|
+
const useAdvanced = requestContext.task === "complex";
|
|
66
|
+
return useAdvanced
|
|
67
|
+
? "tencent-token-plan/hy3-preview"
|
|
68
|
+
: "tencent-token-plan/hy3-preview";
|
|
69
|
+
}
|
|
70
|
+
});
|
|
71
|
+
```
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
# Tencent TokenHub
|
|
2
|
+
|
|
3
|
+
Access 1 Tencent TokenHub model through Mastra's model router. Authentication is handled automatically using the `TENCENT_TOKENHUB_API_KEY` environment variable.
|
|
4
|
+
|
|
5
|
+
Learn more in the [Tencent TokenHub documentation](https://cloud.tencent.com/document/product/1823/130050).
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
TENCENT_TOKENHUB_API_KEY=your-api-key
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
```typescript
|
|
12
|
+
import { Agent } from "@mastra/core/agent";
|
|
13
|
+
|
|
14
|
+
const agent = new Agent({
|
|
15
|
+
id: "my-agent",
|
|
16
|
+
name: "My Agent",
|
|
17
|
+
instructions: "You are a helpful assistant",
|
|
18
|
+
model: "tencent-tokenhub/hy3-preview"
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
// Generate a response
|
|
22
|
+
const response = await agent.generate("Hello!");
|
|
23
|
+
|
|
24
|
+
// Stream a response
|
|
25
|
+
const stream = await agent.stream("Tell me a story");
|
|
26
|
+
for await (const chunk of stream) {
|
|
27
|
+
console.log(chunk);
|
|
28
|
+
}
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
> **Info:** Mastra uses the OpenAI-compatible `/chat/completions` endpoint. Some provider-specific features may not be available. Check the [Tencent TokenHub documentation](https://cloud.tencent.com/document/product/1823/130050) for details.
|
|
32
|
+
|
|
33
|
+
## Models
|
|
34
|
+
|
|
35
|
+
| Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
|
|
36
|
+
| ------------------------------ | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
|
|
37
|
+
| `tencent-tokenhub/hy3-preview` | 256K | | | | | | — | — |
|
|
38
|
+
|
|
39
|
+
## Advanced configuration
|
|
40
|
+
|
|
41
|
+
### Custom headers
|
|
42
|
+
|
|
43
|
+
```typescript
|
|
44
|
+
const agent = new Agent({
|
|
45
|
+
id: "custom-agent",
|
|
46
|
+
name: "custom-agent",
|
|
47
|
+
model: {
|
|
48
|
+
url: "https://tokenhub.tencentmaas.com/v1",
|
|
49
|
+
id: "tencent-tokenhub/hy3-preview",
|
|
50
|
+
apiKey: process.env.TENCENT_TOKENHUB_API_KEY,
|
|
51
|
+
headers: {
|
|
52
|
+
"X-Custom-Header": "value"
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
});
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
### Dynamic model selection
|
|
59
|
+
|
|
60
|
+
```typescript
|
|
61
|
+
const agent = new Agent({
|
|
62
|
+
id: "dynamic-agent",
|
|
63
|
+
name: "Dynamic Agent",
|
|
64
|
+
model: ({ requestContext }) => {
|
|
65
|
+
const useAdvanced = requestContext.task === "complex";
|
|
66
|
+
return useAdvanced
|
|
67
|
+
? "tencent-tokenhub/hy3-preview"
|
|
68
|
+
: "tencent-tokenhub/hy3-preview";
|
|
69
|
+
}
|
|
70
|
+
});
|
|
71
|
+
```
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
# Wafer
|
|
2
|
+
|
|
3
|
+
Access 2 Wafer models through Mastra's model router. Authentication is handled automatically using the `WAFER_API_KEY` environment variable.
|
|
4
|
+
|
|
5
|
+
Learn more in the [Wafer documentation](https://docs.wafer.ai/wafer-pass).
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
WAFER_API_KEY=your-api-key
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
```typescript
|
|
12
|
+
import { Agent } from "@mastra/core/agent";
|
|
13
|
+
|
|
14
|
+
const agent = new Agent({
|
|
15
|
+
id: "my-agent",
|
|
16
|
+
name: "My Agent",
|
|
17
|
+
instructions: "You are a helpful assistant",
|
|
18
|
+
model: "wafer.ai/GLM-5.1"
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
// Generate a response
|
|
22
|
+
const response = await agent.generate("Hello!");
|
|
23
|
+
|
|
24
|
+
// Stream a response
|
|
25
|
+
const stream = await agent.stream("Tell me a story");
|
|
26
|
+
for await (const chunk of stream) {
|
|
27
|
+
console.log(chunk);
|
|
28
|
+
}
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
> **Info:** Mastra uses the OpenAI-compatible `/chat/completions` endpoint. Some provider-specific features may not be available. Check the [Wafer documentation](https://docs.wafer.ai/wafer-pass) for details.
|
|
32
|
+
|
|
33
|
+
## Models
|
|
34
|
+
|
|
35
|
+
| Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
|
|
36
|
+
| ---------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
|
|
37
|
+
| `wafer.ai/GLM-5.1` | 203K | | | | | | — | — |
|
|
38
|
+
| `wafer.ai/Qwen3.5-397B-A17B` | 262K | | | | | | — | — |
|
|
39
|
+
|
|
40
|
+
## Advanced configuration
|
|
41
|
+
|
|
42
|
+
### Custom headers
|
|
43
|
+
|
|
44
|
+
```typescript
|
|
45
|
+
const agent = new Agent({
|
|
46
|
+
id: "custom-agent",
|
|
47
|
+
name: "custom-agent",
|
|
48
|
+
model: {
|
|
49
|
+
url: "https://pass.wafer.ai/v1",
|
|
50
|
+
id: "wafer.ai/GLM-5.1",
|
|
51
|
+
apiKey: process.env.WAFER_API_KEY,
|
|
52
|
+
headers: {
|
|
53
|
+
"X-Custom-Header": "value"
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
});
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
### Dynamic model selection
|
|
60
|
+
|
|
61
|
+
```typescript
|
|
62
|
+
const agent = new Agent({
|
|
63
|
+
id: "dynamic-agent",
|
|
64
|
+
name: "Dynamic Agent",
|
|
65
|
+
model: ({ requestContext }) => {
|
|
66
|
+
const useAdvanced = requestContext.task === "complex";
|
|
67
|
+
return useAdvanced
|
|
68
|
+
? "wafer.ai/Qwen3.5-397B-A17B"
|
|
69
|
+
: "wafer.ai/GLM-5.1";
|
|
70
|
+
}
|
|
71
|
+
});
|
|
72
|
+
```
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# ZenMux
|
|
2
2
|
|
|
3
|
-
Access
|
|
3
|
+
Access 89 ZenMux models through Mastra's model router. Authentication is handled automatically using the `ZENMUX_API_KEY` environment variable.
|
|
4
4
|
|
|
5
5
|
Learn more in the [ZenMux documentation](https://docs.zenmux.ai).
|
|
6
6
|
|
|
@@ -68,6 +68,7 @@ for await (const chunk of stream) {
|
|
|
68
68
|
| `zenmux/moonshotai/kimi-k2-thinking` | 262K | | | | | | $0.60 | $3 |
|
|
69
69
|
| `zenmux/moonshotai/kimi-k2-thinking-turbo` | 262K | | | | | | $1 | $8 |
|
|
70
70
|
| `zenmux/moonshotai/kimi-k2.5` | 262K | | | | | | $0.58 | $3 |
|
|
71
|
+
| `zenmux/moonshotai/kimi-k2.6` | 262K | | | | | | $0.95 | $4 |
|
|
71
72
|
| `zenmux/openai/gpt-5` | 400K | | | | | | $1 | $10 |
|
|
72
73
|
| `zenmux/openai/gpt-5-codex` | 400K | | | | | | $1 | $10 |
|
|
73
74
|
| `zenmux/openai/gpt-5.1` | 400K | | | | | | $1 | $10 |
|
|
@@ -26,6 +26,7 @@ Direct access to individual AI model providers. Each provider offers unique mode
|
|
|
26
26
|
- [Cortecs](https://mastra.ai/models/providers/cortecs)
|
|
27
27
|
- [D.Run (China)](https://mastra.ai/models/providers/drun)
|
|
28
28
|
- [Deep Infra](https://mastra.ai/models/providers/deepinfra)
|
|
29
|
+
- [DigitalOcean](https://mastra.ai/models/providers/digitalocean)
|
|
29
30
|
- [DInference](https://mastra.ai/models/providers/dinference)
|
|
30
31
|
- [evroc](https://mastra.ai/models/providers/evroc)
|
|
31
32
|
- [FastRouter](https://mastra.ai/models/providers/fastrouter)
|
|
@@ -83,11 +84,14 @@ Direct access to individual AI model providers. Each provider offers unique mode
|
|
|
83
84
|
- [submodel](https://mastra.ai/models/providers/submodel)
|
|
84
85
|
- [Synthetic](https://mastra.ai/models/providers/synthetic)
|
|
85
86
|
- [Tencent Coding Plan (China)](https://mastra.ai/models/providers/tencent-coding-plan)
|
|
87
|
+
- [Tencent Token Plan](https://mastra.ai/models/providers/tencent-token-plan)
|
|
88
|
+
- [Tencent TokenHub](https://mastra.ai/models/providers/tencent-tokenhub)
|
|
86
89
|
- [The Grid AI](https://mastra.ai/models/providers/the-grid-ai)
|
|
87
90
|
- [Together AI](https://mastra.ai/models/providers/togetherai)
|
|
88
91
|
- [Upstage](https://mastra.ai/models/providers/upstage)
|
|
89
92
|
- [Vivgrid](https://mastra.ai/models/providers/vivgrid)
|
|
90
93
|
- [Vultr](https://mastra.ai/models/providers/vultr)
|
|
94
|
+
- [Wafer](https://mastra.ai/models/providers/wafer.ai)
|
|
91
95
|
- [Weights & Biases](https://mastra.ai/models/providers/wandb)
|
|
92
96
|
- [Xiaomi](https://mastra.ai/models/providers/xiaomi)
|
|
93
97
|
- [Xiaomi Token Plan (China)](https://mastra.ai/models/providers/xiaomi-token-plan-cn)
|
|
@@ -298,6 +298,14 @@ const response = await agent.generate('Help me organize my day', {
|
|
|
298
298
|
|
|
299
299
|
**options.tracingOptions.tags** (`string[]`): Tags to apply to this trace. String labels for categorizing and filtering traces.
|
|
300
300
|
|
|
301
|
+
**options.versions** (`VersionOverrides`): Per-invocation version overrides for sub-agent delegation. Merged on top of Mastra instance-level versions and propagated automatically through sub-agent calls via requestContext. Requires the editor package. See \[Sub-agent versioning]\(/docs/editor/overview#sub-agent-versioning).
|
|
302
|
+
|
|
303
|
+
**options.versions.agents** (`Record<string, VersionSelector>`): A map of agent IDs to their version selectors.
|
|
304
|
+
|
|
305
|
+
**options.versions.agents.versionId** (`string`): Target a specific version by ID.
|
|
306
|
+
|
|
307
|
+
**options.versions.agents.status** (`'draft' | 'published'`): Target the latest version with this publication status.
|
|
308
|
+
|
|
301
309
|
**options.includeRawChunks** (`boolean`): Whether to include raw chunks in the stream output. Not available on all model providers.
|
|
302
310
|
|
|
303
311
|
## Response structure
|
|
@@ -140,6 +140,18 @@ await run.resume({
|
|
|
140
140
|
})
|
|
141
141
|
```
|
|
142
142
|
|
|
143
|
+
When a [`.foreach()`](https://mastra.ai/reference/workflows/workflow-methods/foreach) step suspends across multiple iterations, pass `forEachIndex` (zero-based; `0` targets the first iteration) to resume one iteration at a time. Iterations you don't target remain suspended.
|
|
144
|
+
|
|
145
|
+
```typescript
|
|
146
|
+
await run.resume({
|
|
147
|
+
step: 'approve',
|
|
148
|
+
resumeData: { ok: true },
|
|
149
|
+
forEachIndex: 1, // resumes the second iteration
|
|
150
|
+
})
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
`forEachIndex` is also supported by `resumeAsync()` and `resumeStream()`.
|
|
154
|
+
|
|
143
155
|
### `cancel()`
|
|
144
156
|
|
|
145
157
|
Cancel a running workflow:
|
|
@@ -63,4 +63,12 @@ Visit the [Configuration reference](https://mastra.ai/reference/configuration) f
|
|
|
63
63
|
|
|
64
64
|
**gateways** (`Record<string, MastraModelGateway>`): Custom model gateways to register for accessing AI models through alternative providers or private deployments. Structured as a key-value pair, with keys being the registry key (used for getGateway()) and values being gateway instances. (Default: `{}`)
|
|
65
65
|
|
|
66
|
-
**memory** (`Record<string, MastraMemory>`): Memory instances to register. These can be referenced by stored agents and resolved at runtime. Structured as a key-value pair, with keys being the registry key and values being memory instances. (Default: `{}`)
|
|
66
|
+
**memory** (`Record<string, MastraMemory>`): Memory instances to register. These can be referenced by stored agents and resolved at runtime. Structured as a key-value pair, with keys being the registry key and values being memory instances. (Default: `{}`)
|
|
67
|
+
|
|
68
|
+
**versions** (`VersionOverrides`): Global version overrides for sub-agent delegation. When a supervisor agent delegates to a sub-agent, these overrides determine which stored version of that sub-agent to use instead of the code-defined default. Requires the editor package to be configured. See \[Sub-agent versioning]\(/docs/editor/overview#sub-agent-versioning) for details.
|
|
69
|
+
|
|
70
|
+
**versions.agents** (`Record<string, VersionSelector>`): A map of agent IDs to their version selectors. Each selector can target a specific version by ID or by publication status.
|
|
71
|
+
|
|
72
|
+
**versions.agents.versionId** (`string`): The ID of a specific version to use.
|
|
73
|
+
|
|
74
|
+
**versions.agents.status** (`'draft' | 'published'`): Select the latest version with this publication status.
|
|
@@ -117,4 +117,17 @@ export const mastra = new Mastra({
|
|
|
117
117
|
],
|
|
118
118
|
},
|
|
119
119
|
})
|
|
120
|
-
```
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
> **Keep bindings inline inside new Mastra({...}):** Mastra's Babel plugin defers `new Mastra(...)` so that `env` is available when it runs. Code outside that call evaluates at module load before `env` is populated.
|
|
123
|
+
>
|
|
124
|
+
> ```typescript
|
|
125
|
+
> // ✅ Works — binding is inside new Mastra(), so it's deferred
|
|
126
|
+
> export const mastra = new Mastra({
|
|
127
|
+
> storage: new D1Store({ binding: env.DB }),
|
|
128
|
+
> })
|
|
129
|
+
>
|
|
130
|
+
> // ❌ Breaks — env.DB is undefined at module load
|
|
131
|
+
> const storage = new D1Store({ binding: env.DB })
|
|
132
|
+
> export const mastra = new Mastra({ storage })
|
|
133
|
+
> ```
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# NetlifyDeployer
|
|
2
2
|
|
|
3
|
-
The `NetlifyDeployer` class handles packaging, configuration, and deployment by adapting Mastra's output to create an optimized version of your server. It extends the base [`Deployer`](https://mastra.ai/reference/deployer) class with Netlify
|
|
3
|
+
The `NetlifyDeployer` class handles packaging, configuration, and deployment by adapting Mastra's output to create an optimized version of your server. It extends the base [`Deployer`](https://mastra.ai/reference/deployer) class with Netlify-specific functionality. It enables you to run Mastra within Netlify serverless functions or edge functions.
|
|
4
4
|
|
|
5
5
|
## Installation
|
|
6
6
|
|
|
@@ -43,9 +43,31 @@ export const mastra = new Mastra({
|
|
|
43
43
|
})
|
|
44
44
|
```
|
|
45
45
|
|
|
46
|
+
## Constructor options
|
|
47
|
+
|
|
48
|
+
- `target?: 'serverless' | 'edge'` — Deploy target for Netlify. Defaults to `'serverless'`.
|
|
49
|
+
|
|
50
|
+
- `'serverless'` — Standard [Netlify Functions](https://docs.netlify.com/functions/overview/) (Node.js runtime, 60s default timeout).
|
|
51
|
+
- `'edge'` — [Netlify Edge Functions](https://docs.netlify.com/build/edge-functions/overview/) (Deno-based runtime, runs at the edge closest to users, no hard timeout).
|
|
52
|
+
|
|
53
|
+
### Edge functions example
|
|
54
|
+
|
|
55
|
+
```typescript
|
|
56
|
+
import { Mastra } from '@mastra/core'
|
|
57
|
+
import { NetlifyDeployer } from '@mastra/deployer-netlify'
|
|
58
|
+
|
|
59
|
+
export const mastra = new Mastra({
|
|
60
|
+
deployer: new NetlifyDeployer({
|
|
61
|
+
target: 'edge',
|
|
62
|
+
}),
|
|
63
|
+
})
|
|
64
|
+
```
|
|
65
|
+
|
|
46
66
|
## Output
|
|
47
67
|
|
|
48
|
-
After running `mastra build`, the deployer generates a `.netlify` folder. The build output includes all agents, tools, and workflows of your project, alongside a
|
|
68
|
+
After running `mastra build`, the deployer generates a `.netlify` folder. The build output includes all agents, tools, and workflows of your project, alongside a `config.json` file that configures the [Netlify Frameworks API](https://docs.netlify.com/build/frameworks/frameworks-api/).
|
|
69
|
+
|
|
70
|
+
### Serverless output (default)
|
|
49
71
|
|
|
50
72
|
```bash
|
|
51
73
|
your-project/
|
|
@@ -77,4 +99,30 @@ The `config.json` file contains:
|
|
|
77
99
|
}
|
|
78
100
|
]
|
|
79
101
|
}
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
### Edge output
|
|
105
|
+
|
|
106
|
+
```bash
|
|
107
|
+
your-project/
|
|
108
|
+
└── .netlify/
|
|
109
|
+
└── v1/
|
|
110
|
+
├── config.json
|
|
111
|
+
└── edge-functions/
|
|
112
|
+
├── index.mjs
|
|
113
|
+
├── package.json
|
|
114
|
+
└── node_modules/
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
The `config.json` file contains:
|
|
118
|
+
|
|
119
|
+
```json
|
|
120
|
+
{
|
|
121
|
+
"edge_functions": [
|
|
122
|
+
{
|
|
123
|
+
"function": "index",
|
|
124
|
+
"path": "/*"
|
|
125
|
+
}
|
|
126
|
+
]
|
|
127
|
+
}
|
|
80
128
|
```
|
|
@@ -458,6 +458,12 @@ Respond to a pending question from the `ask_user` built-in tool.
|
|
|
458
458
|
harness.respondToQuestion({ questionId: 'q-123', answer: 'Yes, proceed with the refactor' })
|
|
459
459
|
```
|
|
460
460
|
|
|
461
|
+
For multi-select questions, pass the selected option labels as a string array.
|
|
462
|
+
|
|
463
|
+
```typescript
|
|
464
|
+
harness.respondToQuestion({ questionId: 'q-123', answer: ['Add tests', 'Update docs'] })
|
|
465
|
+
```
|
|
466
|
+
|
|
461
467
|
#### `respondToPlanApproval({ planId, response })`
|
|
462
468
|
|
|
463
469
|
Respond to a pending plan approval from the `submit_plan` built-in tool. The `response` object contains `action` (`'approved'` or `'rejected'`) and an optional `feedback` string.
|
|
@@ -700,57 +706,74 @@ unsubscribe()
|
|
|
700
706
|
|
|
701
707
|
The harness emits events through registered listeners. The following table lists the available event types:
|
|
702
708
|
|
|
703
|
-
| Event type | Description
|
|
704
|
-
| -------------------------- |
|
|
705
|
-
| `mode_changed` | The active mode changed.
|
|
706
|
-
| `model_changed` | The active model changed.
|
|
707
|
-
| `thread_changed` | The active thread changed.
|
|
708
|
-
| `thread_created` | A new thread was created.
|
|
709
|
-
| `thread_deleted` | A thread was deleted.
|
|
710
|
-
| `state_changed` | Harness state was updated.
|
|
711
|
-
| `agent_start` | The agent started processing.
|
|
712
|
-
| `agent_end` | The agent finished processing.
|
|
713
|
-
| `message_start` | A new message started streaming.
|
|
714
|
-
| `message_update` | A message was updated with new content.
|
|
715
|
-
| `message_end` | A message finished streaming.
|
|
716
|
-
| `tool_start` | A tool call started.
|
|
717
|
-
| `tool_approval_required` | A tool call requires user approval.
|
|
718
|
-
| `tool_update` | A tool call was updated with progress.
|
|
719
|
-
| `tool_end` | A tool call finished.
|
|
720
|
-
| `tool_input_start` | Tool input started streaming.
|
|
721
|
-
| `tool_input_delta` | Tool input received a streaming delta.
|
|
722
|
-
| `tool_input_end` | Tool input finished streaming.
|
|
723
|
-
| `usage_update` | Token usage was updated.
|
|
724
|
-
| `error` | An error occurred.
|
|
725
|
-
| `info` | An informational message was emitted.
|
|
726
|
-
| `follow_up_queued` | A follow-up message was queued.
|
|
727
|
-
| `workspace_status_changed` | The workspace status changed.
|
|
728
|
-
| `workspace_ready` | The workspace finished initializing.
|
|
729
|
-
| `workspace_error` | The workspace encountered an error.
|
|
730
|
-
| `om_status` | Observational Memory status update.
|
|
731
|
-
| `om_observation_start` | An observation started.
|
|
732
|
-
| `om_observation_end` | An observation completed.
|
|
733
|
-
| `om_reflection_start` | A reflection started.
|
|
734
|
-
| `om_reflection_end` | A reflection completed.
|
|
735
|
-
| `ask_question` | The agent asked a question via the `ask_user` tool.
|
|
736
|
-
| `plan_approval_required` | The agent submitted a plan for approval via the `submit_plan` tool.
|
|
737
|
-
| `plan_approved` | A plan was approved.
|
|
738
|
-
| `subagent_start` | A subagent started processing.
|
|
739
|
-
| `subagent_text_delta` | A subagent emitted a text delta.
|
|
740
|
-
| `subagent_tool_start` | A subagent started a tool call.
|
|
741
|
-
| `subagent_tool_end` | A subagent finished a tool call.
|
|
742
|
-
| `subagent_end` | A subagent finished processing.
|
|
743
|
-
| `subagent_model_changed` | A subagent's model changed.
|
|
744
|
-
| `task_updated` | A task list was updated.
|
|
709
|
+
| Event type | Description |
|
|
710
|
+
| -------------------------- | --------------------------------------------------------------------------------------------------- |
|
|
711
|
+
| `mode_changed` | The active mode changed. |
|
|
712
|
+
| `model_changed` | The active model changed. |
|
|
713
|
+
| `thread_changed` | The active thread changed. |
|
|
714
|
+
| `thread_created` | A new thread was created. |
|
|
715
|
+
| `thread_deleted` | A thread was deleted. |
|
|
716
|
+
| `state_changed` | Harness state was updated. |
|
|
717
|
+
| `agent_start` | The agent started processing. |
|
|
718
|
+
| `agent_end` | The agent finished processing. |
|
|
719
|
+
| `message_start` | A new message started streaming. |
|
|
720
|
+
| `message_update` | A message was updated with new content. |
|
|
721
|
+
| `message_end` | A message finished streaming. |
|
|
722
|
+
| `tool_start` | A tool call started. |
|
|
723
|
+
| `tool_approval_required` | A tool call requires user approval. |
|
|
724
|
+
| `tool_update` | A tool call was updated with progress. |
|
|
725
|
+
| `tool_end` | A tool call finished. |
|
|
726
|
+
| `tool_input_start` | Tool input started streaming. |
|
|
727
|
+
| `tool_input_delta` | Tool input received a streaming delta. |
|
|
728
|
+
| `tool_input_end` | Tool input finished streaming. |
|
|
729
|
+
| `usage_update` | Token usage was updated. |
|
|
730
|
+
| `error` | An error occurred. |
|
|
731
|
+
| `info` | An informational message was emitted. |
|
|
732
|
+
| `follow_up_queued` | A follow-up message was queued. |
|
|
733
|
+
| `workspace_status_changed` | The workspace status changed. |
|
|
734
|
+
| `workspace_ready` | The workspace finished initializing. |
|
|
735
|
+
| `workspace_error` | The workspace encountered an error. |
|
|
736
|
+
| `om_status` | Observational Memory status update. |
|
|
737
|
+
| `om_observation_start` | An observation started. |
|
|
738
|
+
| `om_observation_end` | An observation completed. |
|
|
739
|
+
| `om_reflection_start` | A reflection started. |
|
|
740
|
+
| `om_reflection_end` | A reflection completed. |
|
|
741
|
+
| `ask_question` | The agent asked a question via the `ask_user` tool. Includes optional choices and a selection mode. |
|
|
742
|
+
| `plan_approval_required` | The agent submitted a plan for approval via the `submit_plan` tool. |
|
|
743
|
+
| `plan_approved` | A plan was approved. |
|
|
744
|
+
| `subagent_start` | A subagent started processing. |
|
|
745
|
+
| `subagent_text_delta` | A subagent emitted a text delta. |
|
|
746
|
+
| `subagent_tool_start` | A subagent started a tool call. |
|
|
747
|
+
| `subagent_tool_end` | A subagent finished a tool call. |
|
|
748
|
+
| `subagent_end` | A subagent finished processing. |
|
|
749
|
+
| `subagent_model_changed` | A subagent's model changed. |
|
|
750
|
+
| `task_updated` | A task list was updated. |
|
|
745
751
|
|
|
746
752
|
## Built-in tools
|
|
747
753
|
|
|
748
754
|
The harness provides built-in tools to agents in every mode:
|
|
749
755
|
|
|
750
|
-
| Tool | Description
|
|
751
|
-
| ------------- |
|
|
752
|
-
| `ask_user` | Ask the user a question and wait for their response.
|
|
753
|
-
| `submit_plan` | Submit a plan for user review and approval.
|
|
754
|
-
| `task_write` | Create or update a structured task list for tracking progress.
|
|
755
|
-
| `task_check` | Check the completion status of the current task list.
|
|
756
|
-
| `subagent` | Spawn a focused subagent with constrained tools (only available when `subagents` is configured).
|
|
756
|
+
| Tool | Description |
|
|
757
|
+
| ------------- | ------------------------------------------------------------------------------------------------------------------------- |
|
|
758
|
+
| `ask_user` | Ask the user a question and wait for their response. Supports free text, single-select choices, and multi-select choices. |
|
|
759
|
+
| `submit_plan` | Submit a plan for user review and approval. |
|
|
760
|
+
| `task_write` | Create or update a structured task list for tracking progress. |
|
|
761
|
+
| `task_check` | Check the completion status of the current task list. |
|
|
762
|
+
| `subagent` | Spawn a focused subagent with constrained tools (only available when `subagents` is configured). |
|
|
763
|
+
|
|
764
|
+
### `ask_user` selections
|
|
765
|
+
|
|
766
|
+
The `ask_user` tool accepts `options` for choice prompts. Set `selectionMode` to `single_select` to let the user pick one option, or `multi_select` to let the user pick multiple options. When `options` are provided and `selectionMode` is omitted, the prompt defaults to `single_select`. Omit `options` for free-text questions.
|
|
767
|
+
|
|
768
|
+
The following example demonstrates a multi-select response handler. The UI reads `event.selectionMode`, lets the user choose multiple options, then returns a string array with `respondToQuestion()`.
|
|
769
|
+
|
|
770
|
+
```typescript
|
|
771
|
+
harness.subscribe(event => {
|
|
772
|
+
if (event.type === 'ask_question' && event.selectionMode === 'multi_select') {
|
|
773
|
+
harness.respondToQuestion({
|
|
774
|
+
questionId: event.questionId,
|
|
775
|
+
answer: ['Add tests', 'Update docs'],
|
|
776
|
+
})
|
|
777
|
+
}
|
|
778
|
+
})
|
|
779
|
+
```
|
package/.docs/reference/index.md
CHANGED
|
@@ -201,6 +201,7 @@ The Reference section provides documentation of Mastra's API, including paramete
|
|
|
201
201
|
- [MongoDB Storage](https://mastra.ai/reference/storage/mongodb)
|
|
202
202
|
- [MSSQL Storage](https://mastra.ai/reference/storage/mssql)
|
|
203
203
|
- [PostgreSQL Storage](https://mastra.ai/reference/storage/postgresql)
|
|
204
|
+
- [Redis Storage](https://mastra.ai/reference/storage/redis)
|
|
204
205
|
- [Upstash Storage](https://mastra.ai/reference/storage/upstash)
|
|
205
206
|
- [ChunkType](https://mastra.ai/reference/streaming/ChunkType)
|
|
206
207
|
- [MastraModelOutput](https://mastra.ai/reference/streaming/agents/MastraModelOutput)
|
|
@@ -217,6 +218,7 @@ The Reference section provides documentation of Mastra's API, including paramete
|
|
|
217
218
|
- [createVectorQueryTool()](https://mastra.ai/reference/tools/vector-query-tool)
|
|
218
219
|
- [MCPClient](https://mastra.ai/reference/tools/mcp-client)
|
|
219
220
|
- [MCPServer](https://mastra.ai/reference/tools/mcp-server)
|
|
221
|
+
- [Tavily Tools](https://mastra.ai/reference/tools/tavily)
|
|
220
222
|
- [Amazon S3 Vector Store](https://mastra.ai/reference/vectors/s3vectors)
|
|
221
223
|
- [Astra Vector Store](https://mastra.ai/reference/vectors/astra)
|
|
222
224
|
- [Chroma Vector Store](https://mastra.ai/reference/vectors/chroma)
|
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
# PrefillErrorHandler
|
|
2
2
|
|
|
3
|
-
The `PrefillErrorHandler` is an **error processor** that handles
|
|
3
|
+
The `PrefillErrorHandler` is an **error processor** that handles assistant-response prefill errors. This error occurs when a conversation ends with an assistant message and the model rejects the request because it interprets it as prefilling the assistant response.
|
|
4
4
|
|
|
5
5
|
When the error is detected, the processor appends a hidden `<system-reminder>continue</system-reminder>` user message to the conversation and signals a retry. The reminder is persisted with `metadata.systemReminder = { type: 'anthropic-prefill-processor-retry' }`, which keeps it available for retry reconstruction and raw history while standard UI-facing message conversions hide it.
|
|
6
6
|
|
|
7
|
-
Add this processor to `errorProcessors` when you want Mastra to recover from Anthropic assistant message prefill errors.
|
|
7
|
+
Add this processor to `errorProcessors` when you want Mastra to recover from assistant prefill rejections (for example Anthropic's "assistant message prefill" and Qwen/llama.cpp's "assistant response prefill is incompatible with `enable_thinking`" errors).
|
|
8
8
|
|
|
9
9
|
## How it works
|
|
10
10
|
|
|
11
|
-
1. The LLM API call fails with a
|
|
11
|
+
1. The LLM API call fails with a known assistant-prefill rejection message
|
|
12
12
|
2. `PrefillErrorHandler` checks that this is the first retry attempt
|
|
13
13
|
3. It appends a hidden `<system-reminder>continue</system-reminder>` user message to the `messageList`
|
|
14
14
|
4. It returns `{ retry: true }` to signal the LLM call should be retried with the modified messages
|
|
@@ -17,7 +17,7 @@ The processor now reacts to the API rejection itself instead of re-checking whet
|
|
|
17
17
|
|
|
18
18
|
## Usage example
|
|
19
19
|
|
|
20
|
-
Add `PrefillErrorHandler` to `errorProcessors` for any agent that should retry
|
|
20
|
+
Add `PrefillErrorHandler` to `errorProcessors` for any agent that should retry assistant-prefill failures:
|
|
21
21
|
|
|
22
22
|
```typescript
|
|
23
23
|
import { Agent } from '@mastra/core/agent'
|
|
@@ -62,7 +62,7 @@ The `PrefillErrorHandler` takes no constructor parameters.
|
|
|
62
62
|
|
|
63
63
|
**name** (`'Prefill Error Handler'`): Processor display name.
|
|
64
64
|
|
|
65
|
-
**processAPIError** (`(args: ProcessAPIErrorArgs) => ProcessAPIErrorResult | void`): Handles
|
|
65
|
+
**processAPIError** (`(args: ProcessAPIErrorArgs) => ProcessAPIErrorResult | void`): Handles known assistant-prefill errors by appending a hidden system reminder continue message and signaling retry. Only triggers on the first retry attempt.
|
|
66
66
|
|
|
67
67
|
## Related
|
|
68
68
|
|