@mastra/mcp-docs-server 1.1.26-alpha.1 → 1.1.26-alpha.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/.docs/docs/mastra-platform/overview.md +3 -1
  2. package/.docs/docs/memory/observational-memory.md +27 -7
  3. package/.docs/docs/observability/tracing/exporters/cloud.md +34 -41
  4. package/.docs/docs/observability/tracing/exporters/langfuse.md +31 -0
  5. package/.docs/guides/build-your-ui/ai-sdk-ui.md +19 -6
  6. package/.docs/guides/deployment/netlify.md +16 -1
  7. package/.docs/guides/migrations/mastra-cloud.md +128 -3
  8. package/.docs/models/gateways/netlify.md +2 -1
  9. package/.docs/models/gateways/openrouter.md +2 -1
  10. package/.docs/models/gateways/vercel.md +4 -1
  11. package/.docs/models/index.md +36 -1
  12. package/.docs/models/providers/alibaba-cn.md +2 -1
  13. package/.docs/models/providers/anthropic.md +2 -1
  14. package/.docs/models/providers/cortecs.md +3 -1
  15. package/.docs/models/providers/digitalocean.md +116 -0
  16. package/.docs/models/providers/firmware.md +2 -3
  17. package/.docs/models/providers/helicone.md +1 -2
  18. package/.docs/models/providers/hpc-ai.md +73 -0
  19. package/.docs/models/providers/nvidia.md +1 -1
  20. package/.docs/models/providers/openai.md +1 -2
  21. package/.docs/models/providers/opencode.md +2 -1
  22. package/.docs/models/providers/ovhcloud.md +4 -7
  23. package/.docs/models/providers/poe.md +2 -1
  24. package/.docs/models/providers/zenmux.md +2 -1
  25. package/.docs/models/providers.md +2 -0
  26. package/.docs/reference/client-js/mastra-client.md +23 -0
  27. package/.docs/reference/deployer/netlify.md +50 -2
  28. package/.docs/reference/index.md +1 -0
  29. package/.docs/reference/memory/observational-memory.md +2 -0
  30. package/.docs/reference/observability/tracing/exporters/cloud-exporter.md +4 -2
  31. package/.docs/reference/observability/tracing/exporters/langfuse.md +2 -0
  32. package/.docs/reference/workspace/docker-sandbox.md +196 -0
  33. package/CHANGELOG.md +38 -0
  34. package/package.json +4 -4
@@ -1,6 +1,6 @@
1
1
  # ![Cortecs logo](https://models.dev/logos/cortecs.svg)Cortecs
2
2
 
3
- Access 30 Cortecs models through Mastra's model router. Authentication is handled automatically using the `CORTECS_API_KEY` environment variable.
3
+ Access 32 Cortecs models through Mastra's model router. Authentication is handled automatically using the `CORTECS_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Cortecs documentation](https://cortecs.ai).
6
6
 
@@ -49,6 +49,7 @@ for await (const chunk of stream) {
49
49
  | `cortecs/glm-4.7` | 198K | | | | | | $0.45 | $2 |
50
50
  | `cortecs/glm-4.7-flash` | 203K | | | | | | $0.09 | $0.53 |
51
51
  | `cortecs/glm-5` | 203K | | | | | | $1 | $3 |
52
+ | `cortecs/glm-5.1` | 205K | | | | | | $1 | $4 |
52
53
  | `cortecs/gpt-4.1` | 1.0M | | | | | | $2 | $9 |
53
54
  | `cortecs/gpt-oss-120b` | 128K | | | | | | — | — |
54
55
  | `cortecs/intellect-3` | 128K | | | | | | $0.22 | $1 |
@@ -59,6 +60,7 @@ for await (const chunk of stream) {
59
60
  | `cortecs/minimax-m2` | 400K | | | | | | $0.39 | $2 |
60
61
  | `cortecs/minimax-m2.1` | 196K | | | | | | $0.34 | $1 |
61
62
  | `cortecs/minimax-m2.5` | 197K | | | | | | $0.32 | $1 |
63
+ | `cortecs/minimax-M2.7` | 203K | | | | | | $0.47 | $1 |
62
64
  | `cortecs/nova-pro-v1` | 300K | | | | | | $1 | $4 |
63
65
  | `cortecs/qwen3-32b` | 16K | | | | | | $0.10 | $0.33 |
64
66
  | `cortecs/qwen3-coder-480b-a35b-instruct` | 262K | | | | | | $0.44 | $2 |
@@ -0,0 +1,116 @@
1
+ # ![DigitalOcean logo](https://models.dev/logos/digitalocean.svg)DigitalOcean
2
+
3
+ Access 46 DigitalOcean models through Mastra's model router. Authentication is handled automatically using the `DIGITALOCEAN_ACCESS_TOKEN` environment variable.
4
+
5
+ Learn more in the [DigitalOcean documentation](https://docs.digitalocean.com/products/gradient-ai-platform/details/models/).
6
+
7
+ ```bash
8
+ DIGITALOCEAN_ACCESS_TOKEN=your-api-token
9
+ ```
10
+
11
+ ```typescript
12
+ import { Agent } from "@mastra/core/agent";
13
+
14
+ const agent = new Agent({
15
+ id: "my-agent",
16
+ name: "My Agent",
17
+ instructions: "You are a helpful assistant",
18
+ model: "digitalocean/alibaba-qwen3-32b"
19
+ });
20
+
21
+ // Generate a response
22
+ const response = await agent.generate("Hello!");
23
+
24
+ // Stream a response
25
+ const stream = await agent.stream("Tell me a story");
26
+ for await (const chunk of stream) {
27
+ console.log(chunk);
28
+ }
29
+ ```
30
+
31
+ > **Info:** Mastra uses the OpenAI-compatible `/chat/completions` endpoint. Some provider-specific features may not be available. Check the [DigitalOcean documentation](https://docs.digitalocean.com/products/gradient-ai-platform/details/models/) for details.
32
+
33
+ ## Models
34
+
35
+ | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
+ | ---------------------------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
+ | `digitalocean/alibaba-qwen3-32b` | 131K | | | | | | $0.25 | $0.55 |
38
+ | `digitalocean/all-mini-lm-l6-v2` | 256 | | | | | | $0.01 | — |
39
+ | `digitalocean/anthropic-claude-4.1-opus` | 200K | | | | | | $15 | $75 |
40
+ | `digitalocean/anthropic-claude-4.5-sonnet` | 1.0M | | | | | | $3 | $15 |
41
+ | `digitalocean/anthropic-claude-4.6-sonnet` | 1.0M | | | | | | $3 | $15 |
42
+ | `digitalocean/anthropic-claude-haiku-4.5` | 200K | | | | | | $1 | $5 |
43
+ | `digitalocean/anthropic-claude-opus-4` | 200K | | | | | | $15 | $75 |
44
+ | `digitalocean/anthropic-claude-opus-4.5` | 200K | | | | | | $5 | $25 |
45
+ | `digitalocean/anthropic-claude-opus-4.6` | 1.0M | | | | | | $5 | $25 |
46
+ | `digitalocean/anthropic-claude-opus-4.7` | 1.0M | | | | | | $5 | $25 |
47
+ | `digitalocean/anthropic-claude-sonnet-4` | 1.0M | | | | | | $3 | $15 |
48
+ | `digitalocean/arcee-trinity-large-thinking` | 256K | | | | | | $0.25 | $0.90 |
49
+ | `digitalocean/deepseek-r1-distill-llama-70b` | 131K | | | | | | $0.99 | $0.99 |
50
+ | `digitalocean/fal-ai/elevenlabs/tts/multilingual-v2` | — | | | | | | — | — |
51
+ | `digitalocean/fal-ai/fast-sdxl` | — | | | | | | — | — |
52
+ | `digitalocean/fal-ai/flux/schnell` | — | | | | | | — | — |
53
+ | `digitalocean/fal-ai/stable-audio-25/text-to-audio` | — | | | | | | — | — |
54
+ | `digitalocean/glm-5` | 203K | | | | | | $1 | $3 |
55
+ | `digitalocean/gte-large-en-v1.5` | 8K | | | | | | $0.09 | — |
56
+ | `digitalocean/kimi-k2.5` | 262K | | | | | | $0.50 | $3 |
57
+ | `digitalocean/llama3.3-70b-instruct` | 128K | | | | | | $0.65 | $0.65 |
58
+ | `digitalocean/minimax-m2.5` | 205K | | | | | | $0.30 | $1 |
59
+ | `digitalocean/multi-qa-mpnet-base-dot-v1` | 512 | | | | | | $0.01 | — |
60
+ | `digitalocean/nvidia-nemotron-3-super-120b` | 256K | | | | | | $0.30 | $0.65 |
61
+ | `digitalocean/openai-gpt-4.1` | 1.0M | | | | | | $2 | $8 |
62
+ | `digitalocean/openai-gpt-4o` | 128K | | | | | | $3 | $10 |
63
+ | `digitalocean/openai-gpt-4o-mini` | 128K | | | | | | $0.15 | $0.60 |
64
+ | `digitalocean/openai-gpt-5` | 400K | | | | | | $1 | $10 |
65
+ | `digitalocean/openai-gpt-5-2-pro` | 400K | | | | | | $21 | $168 |
66
+ | `digitalocean/openai-gpt-5-mini` | 400K | | | | | | $0.25 | $2 |
67
+ | `digitalocean/openai-gpt-5-nano` | 400K | | | | | | $0.05 | $0.40 |
68
+ | `digitalocean/openai-gpt-5.1-codex-max` | 400K | | | | | | $1 | $10 |
69
+ | `digitalocean/openai-gpt-5.2` | 400K | | | | | | $2 | $14 |
70
+ | `digitalocean/openai-gpt-5.3-codex` | 400K | | | | | | $2 | $14 |
71
+ | `digitalocean/openai-gpt-5.4` | 1.0M | | | | | | $3 | $15 |
72
+ | `digitalocean/openai-gpt-5.4-mini` | 400K | | | | | | $0.75 | $5 |
73
+ | `digitalocean/openai-gpt-5.4-nano` | 400K | | | | | | $0.20 | $1 |
74
+ | `digitalocean/openai-gpt-5.4-pro` | 400K | | | | | | $30 | $180 |
75
+ | `digitalocean/openai-gpt-image-1` | — | | | | | | $5 | $40 |
76
+ | `digitalocean/openai-gpt-image-1.5` | — | | | | | | $5 | $10 |
77
+ | `digitalocean/openai-gpt-oss-120b` | 131K | | | | | | $0.10 | $0.70 |
78
+ | `digitalocean/openai-gpt-oss-20b` | 131K | | | | | | $0.05 | $0.45 |
79
+ | `digitalocean/openai-o1` | 200K | | | | | | $15 | $60 |
80
+ | `digitalocean/openai-o3` | 200K | | | | | | $2 | $8 |
81
+ | `digitalocean/openai-o3-mini` | 200K | | | | | | $1 | $4 |
82
+ | `digitalocean/qwen3-embedding-0.6b` | 8K | | | | | | $0.04 | — |
83
+
84
+ ## Advanced configuration
85
+
86
+ ### Custom headers
87
+
88
+ ```typescript
89
+ const agent = new Agent({
90
+ id: "custom-agent",
91
+ name: "custom-agent",
92
+ model: {
93
+ url: "https://inference.do-ai.run/v1",
94
+ id: "digitalocean/alibaba-qwen3-32b",
95
+ apiKey: process.env.DIGITALOCEAN_ACCESS_TOKEN,
96
+ headers: {
97
+ "X-Custom-Header": "value"
98
+ }
99
+ }
100
+ });
101
+ ```
102
+
103
+ ### Dynamic model selection
104
+
105
+ ```typescript
106
+ const agent = new Agent({
107
+ id: "dynamic-agent",
108
+ name: "Dynamic Agent",
109
+ model: ({ requestContext }) => {
110
+ const useAdvanced = requestContext.task === "complex";
111
+ return useAdvanced
112
+ ? "digitalocean/qwen3-embedding-0.6b"
113
+ : "digitalocean/alibaba-qwen3-32b";
114
+ }
115
+ });
116
+ ```
@@ -1,6 +1,6 @@
1
1
  # ![Firmware logo](https://models.dev/logos/firmware.svg)Firmware
2
2
 
3
- Access 25 Firmware models through Mastra's model router. Authentication is handled automatically using the `FIRMWARE_API_KEY` environment variable.
3
+ Access 24 Firmware models through Mastra's model router. Authentication is handled automatically using the `FIRMWARE_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Firmware documentation](https://docs.frogbot.ai).
6
6
 
@@ -35,9 +35,8 @@ for await (const chunk of stream) {
35
35
  | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
36
  | -------------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
37
  | `firmware/claude-haiku-4-5` | 200K | | | | | | $1 | $5 |
38
- | `firmware/claude-opus-4-5` | 200K | | | | | | $5 | $25 |
39
38
  | `firmware/claude-opus-4-6` | 200K | | | | | | $5 | $25 |
40
- | `firmware/claude-sonnet-4-5` | 200K | | | | | | $3 | $15 |
39
+ | `firmware/claude-opus-4-7` | 200K | | | | | | $5 | $25 |
41
40
  | `firmware/claude-sonnet-4-6` | 200K | | | | | | $3 | $15 |
42
41
  | `firmware/deepseek-v3-2` | 128K | | | | | | $0.58 | $2 |
43
42
  | `firmware/gemini-2.5-flash` | 1.0M | | | | | | $0.30 | $3 |
@@ -1,6 +1,6 @@
1
1
  # ![Helicone logo](https://models.dev/logos/helicone.svg)Helicone
2
2
 
3
- Access 91 Helicone models through Mastra's model router. Authentication is handled automatically using the `HELICONE_API_KEY` environment variable.
3
+ Access 90 Helicone models through Mastra's model router. Authentication is handled automatically using the `HELICONE_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Helicone documentation](https://helicone.ai/models).
6
6
 
@@ -48,7 +48,6 @@ for await (const chunk of stream) {
48
48
  | `helicone/claude-opus-4-1-20250805` | 200K | | | | | | $15 | $75 |
49
49
  | `helicone/claude-sonnet-4` | 200K | | | | | | $3 | $15 |
50
50
  | `helicone/claude-sonnet-4-5-20250929` | 200K | | | | | | $3 | $15 |
51
- | `helicone/codex-mini-latest` | 200K | | | | | | $2 | $6 |
52
51
  | `helicone/deepseek-r1-distill-llama-70b` | 128K | | | | | | $0.03 | $0.13 |
53
52
  | `helicone/deepseek-reasoner` | 128K | | | | | | $0.56 | $2 |
54
53
  | `helicone/deepseek-tng-r1t2-chimera` | 130K | | | | | | $0.30 | $1 |
@@ -0,0 +1,73 @@
1
+ # ![HPC-AI logo](https://models.dev/logos/hpc-ai.svg)HPC-AI
2
+
3
+ Access 3 HPC-AI models through Mastra's model router. Authentication is handled automatically using the `HPC_AI_API_KEY` environment variable.
4
+
5
+ Learn more in the [HPC-AI documentation](https://www.hpc-ai.com/doc/docs/quickstart/).
6
+
7
+ ```bash
8
+ HPC_AI_API_KEY=your-api-key
9
+ ```
10
+
11
+ ```typescript
12
+ import { Agent } from "@mastra/core/agent";
13
+
14
+ const agent = new Agent({
15
+ id: "my-agent",
16
+ name: "My Agent",
17
+ instructions: "You are a helpful assistant",
18
+ model: "hpc-ai/minimax/minimax-m2.5"
19
+ });
20
+
21
+ // Generate a response
22
+ const response = await agent.generate("Hello!");
23
+
24
+ // Stream a response
25
+ const stream = await agent.stream("Tell me a story");
26
+ for await (const chunk of stream) {
27
+ console.log(chunk);
28
+ }
29
+ ```
30
+
31
+ > **Info:** Mastra uses the OpenAI-compatible `/chat/completions` endpoint. Some provider-specific features may not be available. Check the [HPC-AI documentation](https://www.hpc-ai.com/doc/docs/quickstart/) for details.
32
+
33
+ ## Models
34
+
35
+ | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
+ | ----------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
+ | `hpc-ai/minimax/minimax-m2.5` | 1.0M | | | | | | $0.14 | $0.56 |
38
+ | `hpc-ai/moonshotai/kimi-k2.5` | 262K | | | | | | $0.21 | $1 |
39
+ | `hpc-ai/zai-org/glm-5.1` | 202K | | | | | | $0.66 | $2 |
40
+
41
+ ## Advanced configuration
42
+
43
+ ### Custom headers
44
+
45
+ ```typescript
46
+ const agent = new Agent({
47
+ id: "custom-agent",
48
+ name: "custom-agent",
49
+ model: {
50
+ url: "https://api.hpc-ai.com/inference/v1",
51
+ id: "hpc-ai/minimax/minimax-m2.5",
52
+ apiKey: process.env.HPC_AI_API_KEY,
53
+ headers: {
54
+ "X-Custom-Header": "value"
55
+ }
56
+ }
57
+ });
58
+ ```
59
+
60
+ ### Dynamic model selection
61
+
62
+ ```typescript
63
+ const agent = new Agent({
64
+ id: "dynamic-agent",
65
+ name: "Dynamic Agent",
66
+ model: ({ requestContext }) => {
67
+ const useAdvanced = requestContext.task === "complex";
68
+ return useAdvanced
69
+ ? "hpc-ai/zai-org/glm-5.1"
70
+ : "hpc-ai/minimax/minimax-m2.5";
71
+ }
72
+ });
73
+ ```
@@ -71,7 +71,7 @@ for await (const chunk of stream) {
71
71
  | `nvidia/microsoft/phi-4-mini-instruct` | 131K | | | | | | — | — |
72
72
  | `nvidia/minimaxai/minimax-m2.1` | 205K | | | | | | — | — |
73
73
  | `nvidia/minimaxai/minimax-m2.5` | 205K | | | | | | — | — |
74
- | `nvidia/minimaxai/minimax-m2.7` | 205K | | | | | | $0.30 | $1 |
74
+ | `nvidia/minimaxai/minimax-m2.7` | 205K | | | | | | | |
75
75
  | `nvidia/mistralai/codestral-22b-instruct-v0.1` | 128K | | | | | | — | — |
76
76
  | `nvidia/mistralai/devstral-2-123b-instruct-2512` | 262K | | | | | | — | — |
77
77
  | `nvidia/mistralai/mamba-codestral-7b-v0.1` | 128K | | | | | | — | — |
@@ -1,6 +1,6 @@
1
1
  # ![OpenAI logo](https://models.dev/logos/openai.svg)OpenAI
2
2
 
3
- Access 51 OpenAI models through Mastra's model router. Authentication is handled automatically using the `OPENAI_API_KEY` environment variable.
3
+ Access 50 OpenAI models through Mastra's model router. Authentication is handled automatically using the `OPENAI_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [OpenAI documentation](https://platform.openai.com/docs/models).
6
6
 
@@ -33,7 +33,6 @@ for await (const chunk of stream) {
33
33
  | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
34
34
  | ------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
35
35
  | `openai/chatgpt-image-latest` | — | | | | | | — | — |
36
- | `openai/codex-mini-latest` | 200K | | | | | | $2 | $6 |
37
36
  | `openai/gpt-3.5-turbo` | 16K | | | | | | $0.50 | $2 |
38
37
  | `openai/gpt-4` | 8K | | | | | | $30 | $60 |
39
38
  | `openai/gpt-4-turbo` | 128K | | | | | | $10 | $30 |
@@ -1,6 +1,6 @@
1
1
  # ![OpenCode Zen logo](https://models.dev/logos/opencode.svg)OpenCode Zen
2
2
 
3
- Access 34 OpenCode Zen models through Mastra's model router. Authentication is handled automatically using the `OPENCODE_API_KEY` environment variable.
3
+ Access 35 OpenCode Zen models through Mastra's model router. Authentication is handled automatically using the `OPENCODE_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [OpenCode Zen documentation](https://opencode.ai/docs/zen).
6
6
 
@@ -40,6 +40,7 @@ for await (const chunk of stream) {
40
40
  | `opencode/claude-opus-4-1` | 200K | | | | | | $15 | $75 |
41
41
  | `opencode/claude-opus-4-5` | 200K | | | | | | $5 | $25 |
42
42
  | `opencode/claude-opus-4-6` | 1.0M | | | | | | $5 | $25 |
43
+ | `opencode/claude-opus-4-7` | 1.0M | | | | | | $5 | $25 |
43
44
  | `opencode/claude-sonnet-4` | 1.0M | | | | | | $3 | $15 |
44
45
  | `opencode/claude-sonnet-4-5` | 1.0M | | | | | | $3 | $15 |
45
46
  | `opencode/claude-sonnet-4-6` | 1.0M | | | | | | $3 | $15 |
@@ -1,6 +1,6 @@
1
1
  # ![OVHcloud AI Endpoints logo](https://models.dev/logos/ovhcloud.svg)OVHcloud AI Endpoints
2
2
 
3
- Access 13 OVHcloud AI Endpoints models through Mastra's model router. Authentication is handled automatically using the `OVHCLOUD_API_KEY` environment variable.
3
+ Access 10 OVHcloud AI Endpoints models through Mastra's model router. Authentication is handled automatically using the `OVHCLOUD_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [OVHcloud AI Endpoints documentation](https://www.ovhcloud.com/en/public-cloud/ai-endpoints/catalog//).
6
6
 
@@ -15,7 +15,7 @@ const agent = new Agent({
15
15
  id: "my-agent",
16
16
  name: "My Agent",
17
17
  instructions: "You are a helpful assistant",
18
- model: "ovhcloud/deepseek-r1-distill-llama-70b"
18
+ model: "ovhcloud/gpt-oss-120b"
19
19
  });
20
20
 
21
21
  // Generate a response
@@ -34,7 +34,6 @@ for await (const chunk of stream) {
34
34
 
35
35
  | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
36
  | ---------------------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
- | `ovhcloud/deepseek-r1-distill-llama-70b` | 131K | | | | | | $0.74 | $0.74 |
38
37
  | `ovhcloud/gpt-oss-120b` | 131K | | | | | | $0.09 | $0.47 |
39
38
  | `ovhcloud/gpt-oss-20b` | 131K | | | | | | $0.05 | $0.18 |
40
39
  | `ovhcloud/llama-3.1-8b-instruct` | 131K | | | | | | $0.11 | $0.11 |
@@ -42,8 +41,6 @@ for await (const chunk of stream) {
42
41
  | `ovhcloud/mistral-7b-instruct-v0.3` | 66K | | | | | | $0.11 | $0.11 |
43
42
  | `ovhcloud/mistral-nemo-instruct-2407` | 66K | | | | | | $0.14 | $0.14 |
44
43
  | `ovhcloud/mistral-small-3.2-24b-instruct-2506` | 131K | | | | | | $0.10 | $0.31 |
45
- | `ovhcloud/mixtral-8x7b-instruct-v0.1` | 33K | | | | | | $0.70 | $0.70 |
46
- | `ovhcloud/qwen2.5-coder-32b-instruct` | 33K | | | | | | $0.96 | $0.96 |
47
44
  | `ovhcloud/qwen2.5-vl-72b-instruct` | 33K | | | | | | $1 | $1 |
48
45
  | `ovhcloud/qwen3-32b` | 33K | | | | | | $0.09 | $0.25 |
49
46
  | `ovhcloud/qwen3-coder-30b-a3b-instruct` | 262K | | | | | | $0.07 | $0.26 |
@@ -58,7 +55,7 @@ const agent = new Agent({
58
55
  name: "custom-agent",
59
56
  model: {
60
57
  url: "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1",
61
- id: "ovhcloud/deepseek-r1-distill-llama-70b",
58
+ id: "ovhcloud/gpt-oss-120b",
62
59
  apiKey: process.env.OVHCLOUD_API_KEY,
63
60
  headers: {
64
61
  "X-Custom-Header": "value"
@@ -77,7 +74,7 @@ const agent = new Agent({
77
74
  const useAdvanced = requestContext.task === "complex";
78
75
  return useAdvanced
79
76
  ? "ovhcloud/qwen3-coder-30b-a3b-instruct"
80
- : "ovhcloud/deepseek-r1-distill-llama-70b";
77
+ : "ovhcloud/gpt-oss-120b";
81
78
  }
82
79
  });
83
80
  ```
@@ -1,6 +1,6 @@
1
1
  # ![Poe logo](https://models.dev/logos/poe.svg)Poe
2
2
 
3
- Access 117 Poe models through Mastra's model router. Authentication is handled automatically using the `POE_API_KEY` environment variable.
3
+ Access 118 Poe models through Mastra's model router. Authentication is handled automatically using the `POE_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Poe documentation](https://creator.poe.com/docs/external-applications/openai-compatible-api).
6
6
 
@@ -41,6 +41,7 @@ for await (const chunk of stream) {
41
41
  | `poe/anthropic/claude-opus-4.1` | 197K | | | | | | $13 | $64 |
42
42
  | `poe/anthropic/claude-opus-4.5` | 197K | | | | | | $4 | $21 |
43
43
  | `poe/anthropic/claude-opus-4.6` | 983K | | | | | | $4 | $21 |
44
+ | `poe/anthropic/claude-opus-4.7` | 1.0M | | | | | | $4 | $21 |
44
45
  | `poe/anthropic/claude-sonnet-3.7` | 197K | | | | | | $3 | $13 |
45
46
  | `poe/anthropic/claude-sonnet-4` | 983K | | | | | | $3 | $13 |
46
47
  | `poe/anthropic/claude-sonnet-4.5` | 983K | | | | | | $3 | $13 |
@@ -1,6 +1,6 @@
1
1
  # ![ZenMux logo](https://models.dev/logos/zenmux.svg)ZenMux
2
2
 
3
- Access 87 ZenMux models through Mastra's model router. Authentication is handled automatically using the `ZENMUX_API_KEY` environment variable.
3
+ Access 88 ZenMux models through Mastra's model router. Authentication is handled automatically using the `ZENMUX_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [ZenMux documentation](https://docs.zenmux.ai).
6
6
 
@@ -41,6 +41,7 @@ for await (const chunk of stream) {
41
41
  | `zenmux/anthropic/claude-opus-4.1` | 200K | | | | | | $15 | $75 |
42
42
  | `zenmux/anthropic/claude-opus-4.5` | 200K | | | | | | $5 | $25 |
43
43
  | `zenmux/anthropic/claude-opus-4.6` | 1.0M | | | | | | $5 | $25 |
44
+ | `zenmux/anthropic/claude-opus-4.7` | 1.0M | | | | | | $5 | $25 |
44
45
  | `zenmux/anthropic/claude-sonnet-4` | 1.0M | | | | | | $3 | $15 |
45
46
  | `zenmux/anthropic/claude-sonnet-4.5` | 1.0M | | | | | | $3 | $15 |
46
47
  | `zenmux/anthropic/claude-sonnet-4.6` | 1.0M | | | | | | $3 | $15 |
@@ -26,6 +26,7 @@ Direct access to individual AI model providers. Each provider offers unique mode
26
26
  - [Cortecs](https://mastra.ai/models/providers/cortecs)
27
27
  - [D.Run (China)](https://mastra.ai/models/providers/drun)
28
28
  - [Deep Infra](https://mastra.ai/models/providers/deepinfra)
29
+ - [DigitalOcean](https://mastra.ai/models/providers/digitalocean)
29
30
  - [DInference](https://mastra.ai/models/providers/dinference)
30
31
  - [evroc](https://mastra.ai/models/providers/evroc)
31
32
  - [FastRouter](https://mastra.ai/models/providers/fastrouter)
@@ -34,6 +35,7 @@ Direct access to individual AI model providers. Each provider offers unique mode
34
35
  - [Friendli](https://mastra.ai/models/providers/friendli)
35
36
  - [GitHub Models](https://mastra.ai/models/providers/github-models)
36
37
  - [Helicone](https://mastra.ai/models/providers/helicone)
38
+ - [HPC-AI](https://mastra.ai/models/providers/hpc-ai)
37
39
  - [Hugging Face](https://mastra.ai/models/providers/huggingface)
38
40
  - [iFlow](https://mastra.ai/models/providers/iflowcn)
39
41
  - [Inception](https://mastra.ai/models/providers/inception)
@@ -12,6 +12,29 @@ export const mastraClient = new MastraClient({
12
12
  })
13
13
  ```
14
14
 
15
+ ## `RequestContext`
16
+
17
+ When you use `RequestContext` with the client SDK, import it from `@mastra/client-js`.
18
+
19
+ ```typescript
20
+ import { MastraClient, RequestContext } from '@mastra/client-js'
21
+
22
+ const client = new MastraClient({
23
+ baseUrl: 'http://localhost:4111/',
24
+ })
25
+
26
+ const requestContext = new RequestContext()
27
+ requestContext.set('userId', 'user-123')
28
+
29
+ const agent = client.getAgent('support-agent')
30
+
31
+ const response = await agent.generate('Summarize this ticket', {
32
+ requestContext,
33
+ })
34
+ ```
35
+
36
+ You can also pass `requestContext` as a `Record<string, any>`.
37
+
15
38
  ## Parameters
16
39
 
17
40
  **baseUrl** (`string`): The base URL for the Mastra API. All requests will be sent relative to this URL.
@@ -1,6 +1,6 @@
1
1
  # NetlifyDeployer
2
2
 
3
- The `NetlifyDeployer` class handles packaging, configuration, and deployment by adapting Mastra's output to create an optimized version of your server. It extends the base [`Deployer`](https://mastra.ai/reference/deployer) class with Netlify specific functionality. It enables you to run Mastra within Netlify functions.
3
+ The `NetlifyDeployer` class handles packaging, configuration, and deployment by adapting Mastra's output to create an optimized version of your server. It extends the base [`Deployer`](https://mastra.ai/reference/deployer) class with Netlify-specific functionality. It enables you to run Mastra within Netlify serverless functions or edge functions.
4
4
 
5
5
  ## Installation
6
6
 
@@ -43,9 +43,31 @@ export const mastra = new Mastra({
43
43
  })
44
44
  ```
45
45
 
46
+ ## Constructor options
47
+
48
+ - `target?: 'serverless' | 'edge'` — Deploy target for Netlify. Defaults to `'serverless'`.
49
+
50
+ - `'serverless'` — Standard [Netlify Functions](https://docs.netlify.com/functions/overview/) (Node.js runtime, 60s default timeout).
51
+ - `'edge'` — [Netlify Edge Functions](https://docs.netlify.com/build/edge-functions/overview/) (Deno-based runtime, runs at the edge closest to users, no hard timeout).
52
+
53
+ ### Edge functions example
54
+
55
+ ```typescript
56
+ import { Mastra } from '@mastra/core'
57
+ import { NetlifyDeployer } from '@mastra/deployer-netlify'
58
+
59
+ export const mastra = new Mastra({
60
+ deployer: new NetlifyDeployer({
61
+ target: 'edge',
62
+ }),
63
+ })
64
+ ```
65
+
46
66
  ## Output
47
67
 
48
- After running `mastra build`, the deployer generates a `.netlify` folder. The build output includes all agents, tools, and workflows of your project, alongside a special `config.json` file. The `config.json` file configures the behavior of Netlify functions.
68
+ After running `mastra build`, the deployer generates a `.netlify` folder. The build output includes all agents, tools, and workflows of your project, alongside a `config.json` file that configures the [Netlify Frameworks API](https://docs.netlify.com/build/frameworks/frameworks-api/).
69
+
70
+ ### Serverless output (default)
49
71
 
50
72
  ```bash
51
73
  your-project/
@@ -77,4 +99,30 @@ The `config.json` file contains:
77
99
  }
78
100
  ]
79
101
  }
102
+ ```
103
+
104
+ ### Edge output
105
+
106
+ ```bash
107
+ your-project/
108
+ └── .netlify/
109
+ └── v1/
110
+ ├── config.json
111
+ └── edge-functions/
112
+ ├── index.mjs
113
+ ├── package.json
114
+ └── node_modules/
115
+ ```
116
+
117
+ The `config.json` file contains:
118
+
119
+ ```json
120
+ {
121
+ "edge_functions": [
122
+ {
123
+ "function": "index",
124
+ "path": "/*"
125
+ }
126
+ ]
127
+ }
80
128
  ```
@@ -284,6 +284,7 @@ The Reference section provides documentation of Mastra's API, including paramete
284
284
  - [AgentFSFilesystem](https://mastra.ai/reference/workspace/agentfs-filesystem)
285
285
  - [BlaxelSandbox](https://mastra.ai/reference/workspace/blaxel-sandbox)
286
286
  - [DaytonaSandbox](https://mastra.ai/reference/workspace/daytona-sandbox)
287
+ - [DockerSandbox](https://mastra.ai/reference/workspace/docker-sandbox)
287
288
  - [E2BSandbox](https://mastra.ai/reference/workspace/e2b-sandbox)
288
289
  - [GCSFilesystem](https://mastra.ai/reference/workspace/gcs-filesystem)
289
290
  - [LocalFilesystem](https://mastra.ai/reference/workspace/local-filesystem)
@@ -36,6 +36,8 @@ OM performs thresholding with fast local token estimation. Text uses `tokenx`, a
36
36
 
37
37
  **scope** (`'resource' | 'thread'`): Memory scope for observations. \`'thread'\` keeps observations per-thread. \`'resource'\` (experimental) shares observations across all threads for a resource, enabling cross-conversation memory. (Default: `'thread'`)
38
38
 
39
+ **activateAfterIdle** (`number | string`): Time before buffered observations or buffered reflections are forced to activate after inactivity, even if their token thresholds have not been reached yet. Accepts milliseconds or duration strings like \`300\_000\`, \`"5m"\`, or \`"1hr"\`. When the gap between the current time and the last assistant message part timestamp exceeds this value, buffered observational memory activates before the next prompt. Useful for aligning with prompt cache TTLs.
40
+
39
41
  **shareTokenBudget** (`boolean`): Share the token budget between messages and observations. When enabled, the total budget is \`observation.messageTokens + reflection.observationTokens\`. Messages can use more space when observations are small, and vice versa. This maximizes context usage through flexible allocation. \`shareTokenBudget\` is not yet compatible with async buffering. You must set \`observation: { bufferTokens: false }\` when using this option (this is a temporary limitation). (Default: `false`)
40
42
 
41
43
  **retrieval** (`boolean | { vector?: boolean; scope?: 'thread' | 'resource' }`): \*\*Experimental.\*\* Enable retrieval-mode observation groups as durable pointers to raw message history. \`true\` enables cross-thread browsing by default. \`{ vector: true }\` also enables semantic search using Memory's vector store and embedder. \`{ scope: 'thread' }\` restricts the recall tool to the current thread only. Default scope is \`'resource'\`. (Default: `false`)
@@ -1,5 +1,7 @@
1
1
  # CloudExporter
2
2
 
3
+ **Added in:** `@mastra/observability@1.8.0`
4
+
3
5
  Sends tracing spans, logs, metrics, scores, and feedback to the Mastra platform for online visualization and monitoring.
4
6
 
5
7
  ## Constructor
@@ -56,9 +58,9 @@ Extends `BaseExporterConfig`, which includes:
56
58
 
57
59
  The exporter reads these environment variables if not provided in config:
58
60
 
59
- - `MASTRA_CLOUD_ACCESS_TOKEN` - Authentication token. Project-scoped tokens work with the default `/ai/{signal}/publish` routes. Organization API keys require `projectId` or `MASTRA_PROJECT_ID`.
61
+ - `MASTRA_CLOUD_ACCESS_TOKEN` - Authentication token for `CloudExporter` requests
60
62
  - `MASTRA_PROJECT_ID` - Project ID to use when deriving project-scoped collector routes such as `/projects/:projectId/ai/spans/publish`
61
- - `MASTRA_CLOUD_TRACES_ENDPOINT` - Traces endpoint override. Pass either a base origin or a full traces publish URL. Defaults to `https://api.mastra.ai`
63
+ - `MASTRA_CLOUD_TRACES_ENDPOINT` - Traces endpoint override. Pass either a base origin or a full traces publish URL. Defaults to `https://observability.mastra.ai` in `@mastra/observability@1.9.2` and later
62
64
 
63
65
  ## Properties
64
66
 
@@ -16,6 +16,8 @@ interface LangfuseExporterConfig extends BaseExporterConfig {
16
16
  secretKey?: string
17
17
  baseUrl?: string
18
18
  realtime?: boolean
19
+ flushAt?: number
20
+ flushInterval?: number
19
21
  environment?: string
20
22
  release?: string
21
23
  }