@mastra/mcp-docs-server 1.1.1 → 1.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/.docs/docs/deployment/cloud-providers.md +1 -1
  2. package/.docs/docs/deployment/overview.md +1 -1
  3. package/.docs/docs/deployment/studio.md +234 -0
  4. package/.docs/docs/memory/observational-memory.md +86 -11
  5. package/.docs/docs/streaming/events.md +23 -0
  6. package/.docs/docs/workspace/filesystem.md +72 -1
  7. package/.docs/docs/workspace/overview.md +95 -12
  8. package/.docs/docs/workspace/sandbox.md +2 -0
  9. package/.docs/guides/agent-frameworks/ai-sdk.md +6 -2
  10. package/.docs/guides/deployment/cloudflare.md +99 -0
  11. package/.docs/models/gateways/openrouter.md +6 -3
  12. package/.docs/models/index.md +1 -1
  13. package/.docs/models/providers/baseten.md +2 -1
  14. package/.docs/models/providers/cerebras.md +2 -1
  15. package/.docs/models/providers/fireworks-ai.md +2 -1
  16. package/.docs/models/providers/friendli.md +3 -2
  17. package/.docs/models/providers/huggingface.md +3 -2
  18. package/.docs/models/providers/jiekou.md +4 -2
  19. package/.docs/models/providers/minimax-cn-coding-plan.md +3 -2
  20. package/.docs/models/providers/minimax-cn.md +3 -2
  21. package/.docs/models/providers/minimax-coding-plan.md +3 -2
  22. package/.docs/models/providers/minimax.md +3 -2
  23. package/.docs/models/providers/nano-gpt.md +12 -4
  24. package/.docs/models/providers/novita-ai.md +4 -2
  25. package/.docs/models/providers/ollama-cloud.md +3 -1
  26. package/.docs/models/providers/openai.md +15 -14
  27. package/.docs/models/providers/opencode.md +31 -32
  28. package/.docs/models/providers/stackit.md +78 -0
  29. package/.docs/models/providers/synthetic.md +1 -1
  30. package/.docs/models/providers/zai-coding-plan.md +3 -2
  31. package/.docs/models/providers/zai.md +3 -2
  32. package/.docs/models/providers/zhipuai-coding-plan.md +3 -2
  33. package/.docs/models/providers/zhipuai.md +3 -2
  34. package/.docs/models/providers.md +1 -0
  35. package/.docs/reference/ai-sdk/with-mastra.md +1 -1
  36. package/.docs/reference/cli/mastra.md +1 -1
  37. package/.docs/reference/deployer/cloudflare.md +35 -12
  38. package/.docs/reference/index.md +3 -0
  39. package/.docs/reference/memory/observational-memory.md +318 -9
  40. package/.docs/reference/streaming/workflows/stream.md +1 -0
  41. package/.docs/reference/workflows/workflow-methods/foreach.md +30 -0
  42. package/.docs/reference/workspace/e2b-sandbox.md +299 -0
  43. package/.docs/reference/workspace/gcs-filesystem.md +170 -0
  44. package/.docs/reference/workspace/s3-filesystem.md +169 -0
  45. package/CHANGELOG.md +14 -0
  46. package/package.json +6 -6
  47. package/.docs/guides/deployment/cloudflare-deployer.md +0 -102
@@ -19,6 +19,7 @@ When you assign a workspace with a sandbox to an agent, Mastra automatically inc
19
19
  Available providers:
20
20
 
21
21
  - [`LocalSandbox`](https://mastra.ai/reference/workspace/local-sandbox) - Executes commands on the local machine
22
+ - [`E2BSandbox`](https://mastra.ai/reference/workspace/e2b-sandbox) - Executes commands in isolated E2B cloud sandboxes
22
23
 
23
24
  ## Basic usage
24
25
 
@@ -57,5 +58,6 @@ When you configure a sandbox on a workspace, agents receive the `execute_command
57
58
  ## Related
58
59
 
59
60
  - [`LocalSandbox` reference](https://mastra.ai/reference/workspace/local-sandbox)
61
+ - [`E2BSandbox` reference](https://mastra.ai/reference/workspace/e2b-sandbox)
60
62
  - [Workspace overview](https://mastra.ai/docs/workspace/overview)
61
63
  - [Filesystem](https://mastra.ai/docs/workspace/filesystem)
@@ -83,9 +83,11 @@ const storage = new LibSQLStore({
83
83
  });
84
84
  await storage.init();
85
85
 
86
+ const memoryStorage = await storage.getStore('memory');
87
+
86
88
  const model = withMastra(openai('gpt-4o'), {
87
89
  memory: {
88
- storage,
90
+ storage: memoryStorage!,
89
91
  threadId: 'user-thread-123',
90
92
  resourceId: 'user-123',
91
93
  lastMessages: 10,
@@ -111,11 +113,13 @@ import { LibSQLStore } from '@mastra/libsql';
111
113
  const storage = new LibSQLStore({ id: 'my-app', url: 'file:./data.db' });
112
114
  await storage.init();
113
115
 
116
+ const memoryStorage = await storage.getStore('memory');
117
+
114
118
  const model = withMastra(openai('gpt-4o'), {
115
119
  inputProcessors: [myGuardProcessor],
116
120
  outputProcessors: [myLoggingProcessor],
117
121
  memory: {
118
- storage,
122
+ storage: memoryStorage!,
119
123
  threadId: 'thread-123',
120
124
  resourceId: 'user-123',
121
125
  lastMessages: 10,
@@ -0,0 +1,99 @@
1
+ # Deploy Mastra to Cloudflare
2
+
3
+ Use `@mastra/deployer-cloudflare` to deploy your Mastra server to Cloudflare Workers. The deployer bundles your code and generates a `wrangler.jsonc` file conforming to Cloudflare's [wrangler configuration](https://developers.cloudflare.com/workers/wrangler/configuration/), ready to deploy with no additional configuration.
4
+
5
+ > **Info:** If you're using a [server adapter](https://mastra.ai/docs/server/server-adapters) or [web framework](https://mastra.ai/docs/deployment/web-framework), deploy the way you normally would for that framework.
6
+
7
+ ## Before you begin
8
+
9
+ You'll need a [Mastra application](https://mastra.ai/guides/getting-started/quickstart) and a [Cloudflare](https://cloudflare.com/) account.
10
+
11
+ Cloudflare Workers use an ephemeral filesystem, so any storage you configure (including observability storage) must be hosted externally. If you're using [LibSQLStore](https://mastra.ai/reference/storage/libsql) with a file URL, switch to a remotely hosted database.
12
+
13
+ ## Installation
14
+
15
+ Add the `@mastra/deployer-cloudflare` package to your project:
16
+
17
+ **npm**:
18
+
19
+ ```bash
20
+ npm install @mastra/deployer-cloudflare@latest
21
+ ```
22
+
23
+ **pnpm**:
24
+
25
+ ```bash
26
+ pnpm add @mastra/deployer-cloudflare@latest
27
+ ```
28
+
29
+ **Yarn**:
30
+
31
+ ```bash
32
+ yarn add @mastra/deployer-cloudflare@latest
33
+ ```
34
+
35
+ **Bun**:
36
+
37
+ ```bash
38
+ bun add @mastra/deployer-cloudflare@latest
39
+ ```
40
+
41
+ Import [`CloudflareDeployer`](https://mastra.ai/reference/deployer/cloudflare) and set it as the deployer in your Mastra configuration:
42
+
43
+ ```typescript
44
+ import { Mastra } from "@mastra/core";
45
+ import { CloudflareDeployer } from "@mastra/deployer-cloudflare";
46
+
47
+ export const mastra = new Mastra({
48
+ deployer: new CloudflareDeployer({
49
+ name: "your-project-name",
50
+ vars: {
51
+ NODE_ENV: "production",
52
+ },
53
+ }),
54
+ });
55
+ ```
56
+
57
+ In order to test your Cloudflare Worker locally, also install the [`wrangler` CLI](https://developers.cloudflare.com/workers/wrangler/install-and-update/):
58
+
59
+ **npm**:
60
+
61
+ ```bash
62
+ npm install -D wrangler
63
+ ```
64
+
65
+ **pnpm**:
66
+
67
+ ```bash
68
+ pnpm add -D wrangler
69
+ ```
70
+
71
+ **Yarn**:
72
+
73
+ ```bash
74
+ yarn add --dev wrangler
75
+ ```
76
+
77
+ **Bun**:
78
+
79
+ ```bash
80
+ bun add --dev wrangler
81
+ ```
82
+
83
+ ## Usage
84
+
85
+ After setting up your project, push it to your remote Git provider of choice (e.g. GitHub).
86
+
87
+ 1. Connect your repository to Cloudflare. On the "Workers & Pages" dashboard, select **Create application** and choose your Git provider in the next step. Continue with the setup process and select the repository you want to deploy.
88
+
89
+ > **Note:** Remember to set your environment variables needed to run your application (e.g. your [model provider](https://mastra.ai/models/providers) API key).
90
+
91
+ 2. Once you're ready, click the **Deploy** button and wait for the first deployment to complete.
92
+
93
+ 3. Try out your newly deployed function by going to `https://<your-project-name>.<slug>.workers.dev/api/agents`. You should get a JSON response listing all available agents.
94
+
95
+ Since the [Mastra server](https://mastra.ai/docs/server/mastra-server) prefixes every API endpoint with `/api`, you have to add it to your URLs when making requests.
96
+
97
+ 4. You can now call your Mastra endpoints over HTTP.
98
+
99
+ > **Note:** Set up [authentication](https://mastra.ai/docs/server/auth) before exposing your endpoints publicly.
@@ -1,6 +1,6 @@
1
1
  # ![OpenRouter logo](https://models.dev/logos/openrouter.svg)OpenRouter
2
2
 
3
- OpenRouter aggregates models from multiple providers with enhanced features like rate limiting and failover. Access 176 models through Mastra's model router.
3
+ OpenRouter aggregates models from multiple providers with enhanced features like rate limiting and failover. Access 179 models through Mastra's model router.
4
4
 
5
5
  Learn more in the [OpenRouter documentation](https://openrouter.ai/models).
6
6
 
@@ -101,6 +101,7 @@ ANTHROPIC_API_KEY=ant-...
101
101
  | `minimax/minimax-m1` |
102
102
  | `minimax/minimax-m2` |
103
103
  | `minimax/minimax-m2.1` |
104
+ | `minimax/minimax-m2.5` |
104
105
  | `mistralai/codestral-2508` |
105
106
  | `mistralai/devstral-2512` |
106
107
  | `mistralai/devstral-2512:free` |
@@ -156,7 +157,6 @@ ANTHROPIC_API_KEY=ant-...
156
157
  | `openai/gpt-oss-20b:free` |
157
158
  | `openai/gpt-oss-safeguard-20b` |
158
159
  | `openai/o4-mini` |
159
- | `openrouter/pony-alpha` |
160
160
  | `openrouter/sherlock-dash-alpha` |
161
161
  | `openrouter/sherlock-think-alpha` |
162
162
  | `qwen/qwen-2.5-coder-32b-instruct` |
@@ -190,6 +190,8 @@ ANTHROPIC_API_KEY=ant-...
190
190
  | `sourceful/riverflow-v2-fast-preview` |
191
191
  | `sourceful/riverflow-v2-max-preview` |
192
192
  | `sourceful/riverflow-v2-standard-preview` |
193
+ | `stepfun/step-3.5-flash` |
194
+ | `stepfun/step-3.5-flash:free` |
193
195
  | `thudm/glm-z1-32b:free` |
194
196
  | `tngtech/deepseek-r1t2-chimera:free` |
195
197
  | `tngtech/tng-r1t-chimera:free` |
@@ -209,4 +211,5 @@ ANTHROPIC_API_KEY=ant-...
209
211
  | `z-ai/glm-4.6` |
210
212
  | `z-ai/glm-4.6:exacto` |
211
213
  | `z-ai/glm-4.7` |
212
- | `z-ai/glm-4.7-flash` |
214
+ | `z-ai/glm-4.7-flash` |
215
+ | `z-ai/glm-5` |
@@ -1,6 +1,6 @@
1
1
  # Model Providers
2
2
 
3
- Mastra provides a unified interface for working with LLMs across multiple providers, giving you access to 2173 models from 78 providers through a single API.
3
+ Mastra provides a unified interface for working with LLMs across multiple providers, giving you access to 2211 models from 79 providers through a single API.
4
4
 
5
5
  ## Features
6
6
 
@@ -1,6 +1,6 @@
1
1
  # ![Baseten logo](https://models.dev/logos/baseten.svg)Baseten
2
2
 
3
- Access 6 Baseten models through Mastra's model router. Authentication is handled automatically using the `BASETEN_API_KEY` environment variable.
3
+ Access 7 Baseten models through Mastra's model router. Authentication is handled automatically using the `BASETEN_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Baseten documentation](https://docs.baseten.co/development/model-apis/overview).
6
6
 
@@ -37,6 +37,7 @@ for await (const chunk of stream) {
37
37
  | `baseten/deepseek-ai/DeepSeek-V3.2` | 164K | | | | | | $0.30 | $0.45 |
38
38
  | `baseten/moonshotai/Kimi-K2-Instruct-0905` | 262K | | | | | | $0.60 | $3 |
39
39
  | `baseten/moonshotai/Kimi-K2-Thinking` | 262K | | | | | | $0.60 | $3 |
40
+ | `baseten/moonshotai/Kimi-K2.5` | 262K | | | | | | $0.60 | $3 |
40
41
  | `baseten/Qwen/Qwen3-Coder-480B-A35B-Instruct` | 262K | | | | | | $0.38 | $2 |
41
42
  | `baseten/zai-org/GLM-4.6` | 200K | | | | | | $0.60 | $2 |
42
43
  | `baseten/zai-org/GLM-4.7` | 205K | | | | | | $0.60 | $2 |
@@ -1,6 +1,6 @@
1
1
  # ![Cerebras logo](https://models.dev/logos/cerebras.svg)Cerebras
2
2
 
3
- Access 3 Cerebras models through Mastra's model router. Authentication is handled automatically using the `CEREBRAS_API_KEY` environment variable.
3
+ Access 4 Cerebras models through Mastra's model router. Authentication is handled automatically using the `CEREBRAS_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Cerebras documentation](https://inference-docs.cerebras.ai/models/overview).
6
6
 
@@ -33,6 +33,7 @@ for await (const chunk of stream) {
33
33
  | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
34
34
  | ----------------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
35
35
  | `cerebras/gpt-oss-120b` | 131K | | | | | | $0.25 | $0.69 |
36
+ | `cerebras/llama3.1-8b` | 32K | | | | | | $0.10 | $0.10 |
36
37
  | `cerebras/qwen-3-235b-a22b-instruct-2507` | 131K | | | | | | $0.60 | $1 |
37
38
  | `cerebras/zai-glm-4.7` | 131K | | | | | | — | — |
38
39
 
@@ -1,6 +1,6 @@
1
1
  # ![Fireworks AI logo](https://models.dev/logos/fireworks-ai.svg)Fireworks AI
2
2
 
3
- Access 17 Fireworks AI models through Mastra's model router. Authentication is handled automatically using the `FIREWORKS_API_KEY` environment variable.
3
+ Access 18 Fireworks AI models through Mastra's model router. Authentication is handled automatically using the `FIREWORKS_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Fireworks AI documentation](https://fireworks.ai/docs/).
6
6
 
@@ -42,6 +42,7 @@ for await (const chunk of stream) {
42
42
  | `fireworks-ai/accounts/fireworks/models/glm-4p5-air` | 131K | | | | | | $0.22 | $0.88 |
43
43
  | `fireworks-ai/accounts/fireworks/models/glm-4p6` | 198K | | | | | | $0.55 | $2 |
44
44
  | `fireworks-ai/accounts/fireworks/models/glm-4p7` | 198K | | | | | | $0.60 | $2 |
45
+ | `fireworks-ai/accounts/fireworks/models/glm-5` | 203K | | | | | | $1 | $3 |
45
46
  | `fireworks-ai/accounts/fireworks/models/gpt-oss-120b` | 131K | | | | | | $0.15 | $0.60 |
46
47
  | `fireworks-ai/accounts/fireworks/models/gpt-oss-20b` | 131K | | | | | | $0.05 | $0.20 |
47
48
  | `fireworks-ai/accounts/fireworks/models/kimi-k2-instruct` | 128K | | | | | | $1 | $3 |
@@ -1,6 +1,6 @@
1
1
  # ![Friendli logo](https://models.dev/logos/friendli.svg)Friendli
2
2
 
3
- Access 7 Friendli models through Mastra's model router. Authentication is handled automatically using the `FRIENDLI_TOKEN` environment variable.
3
+ Access 8 Friendli models through Mastra's model router. Authentication is handled automatically using the `FRIENDLI_TOKEN` environment variable.
4
4
 
5
5
  Learn more in the [Friendli documentation](https://friendli.ai/docs/guides/serverless_endpoints/introduction).
6
6
 
@@ -41,6 +41,7 @@ for await (const chunk of stream) {
41
41
  | `friendli/MiniMaxAI/MiniMax-M2.1` | 197K | | | | | | $0.30 | $1 |
42
42
  | `friendli/Qwen/Qwen3-235B-A22B-Instruct-2507` | 262K | | | | | | $0.20 | $0.80 |
43
43
  | `friendli/zai-org/GLM-4.7` | 203K | | | | | | — | — |
44
+ | `friendli/zai-org/GLM-5` | 203K | | | | | | $1 | $3 |
44
45
 
45
46
  ## Advanced Configuration
46
47
 
@@ -70,7 +71,7 @@ const agent = new Agent({
70
71
  model: ({ requestContext }) => {
71
72
  const useAdvanced = requestContext.task === "complex";
72
73
  return useAdvanced
73
- ? "friendli/zai-org/GLM-4.7"
74
+ ? "friendli/zai-org/GLM-5"
74
75
  : "friendli/LGAI-EXAONE/EXAONE-4.0.1-32B";
75
76
  }
76
77
  });
@@ -1,6 +1,6 @@
1
1
  # ![Hugging Face logo](https://models.dev/logos/huggingface.svg)Hugging Face
2
2
 
3
- Access 16 Hugging Face models through Mastra's model router. Authentication is handled automatically using the `HF_TOKEN` environment variable.
3
+ Access 17 Hugging Face models through Mastra's model router. Authentication is handled automatically using the `HF_TOKEN` environment variable.
4
4
 
5
5
  Learn more in the [Hugging Face documentation](https://huggingface.co).
6
6
 
@@ -50,6 +50,7 @@ for await (const chunk of stream) {
50
50
  | `huggingface/XiaomiMiMo/MiMo-V2-Flash` | 262K | | | | | | $0.10 | $0.30 |
51
51
  | `huggingface/zai-org/GLM-4.7` | 205K | | | | | | $0.60 | $2 |
52
52
  | `huggingface/zai-org/GLM-4.7-Flash` | 200K | | | | | | — | — |
53
+ | `huggingface/zai-org/GLM-5` | 203K | | | | | | $1 | $3 |
53
54
 
54
55
  ## Advanced Configuration
55
56
 
@@ -79,7 +80,7 @@ const agent = new Agent({
79
80
  model: ({ requestContext }) => {
80
81
  const useAdvanced = requestContext.task === "complex";
81
82
  return useAdvanced
82
- ? "huggingface/zai-org/GLM-4.7-Flash"
83
+ ? "huggingface/zai-org/GLM-5"
83
84
  : "huggingface/MiniMaxAI/MiniMax-M2.1";
84
85
  }
85
86
  });
@@ -1,6 +1,6 @@
1
1
  # ![Jiekou.AI logo](https://models.dev/logos/jiekou.svg)Jiekou.AI
2
2
 
3
- Access 59 Jiekou.AI models through Mastra's model router. Authentication is handled automatically using the `JIEKOU_API_KEY` environment variable.
3
+ Access 61 Jiekou.AI models through Mastra's model router. Authentication is handled automatically using the `JIEKOU_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Jiekou.AI documentation](https://docs.jiekou.ai/docs/support/quickstart?utm_source=github_models.dev).
6
6
 
@@ -40,6 +40,7 @@ for await (const chunk of stream) {
40
40
  | `jiekou/claude-opus-4-1-20250805` | 200K | | | | | | $14 | $68 |
41
41
  | `jiekou/claude-opus-4-20250514` | 200K | | | | | | $14 | $68 |
42
42
  | `jiekou/claude-opus-4-5-20251101` | 200K | | | | | | $5 | $23 |
43
+ | `jiekou/claude-opus-4-6` | 1.0M | | | | | | $5 | $25 |
43
44
  | `jiekou/claude-sonnet-4-20250514` | 200K | | | | | | $3 | $14 |
44
45
  | `jiekou/claude-sonnet-4-5-20250929` | 200K | | | | | | $3 | $14 |
45
46
  | `jiekou/deepseek/deepseek-r1-0528` | 164K | | | | | | $0.70 | $3 |
@@ -59,6 +60,7 @@ for await (const chunk of stream) {
59
60
  | `jiekou/gpt-5-mini` | 400K | | | | | | $0.23 | $2 |
60
61
  | `jiekou/gpt-5-nano` | 400K | | | | | | $0.04 | $0.36 |
61
62
  | `jiekou/gpt-5-pro` | 400K | | | | | | $14 | $108 |
63
+ | `jiekou/gpt-5.1` | 400K | | | | | | $1 | $9 |
62
64
  | `jiekou/gpt-5.1-codex` | 400K | | | | | | $1 | $9 |
63
65
  | `jiekou/gpt-5.1-codex-max` | 400K | | | | | | $1 | $9 |
64
66
  | `jiekou/gpt-5.1-codex-mini` | 400K | | | | | | $0.23 | $2 |
@@ -79,13 +81,13 @@ for await (const chunk of stream) {
79
81
  | `jiekou/o3` | 131K | | | | | | $10 | $40 |
80
82
  | `jiekou/o3-mini` | 131K | | | | | | $1 | $4 |
81
83
  | `jiekou/o4-mini` | 200K | | | | | | $1 | $4 |
82
- | `jiekou/qwen/qwen2.5-vl-72b-instruct` | 33K | | | | | | $0.80 | $0.80 |
83
84
  | `jiekou/qwen/qwen3-235b-a22b-fp8` | 41K | | | | | | $0.20 | $0.80 |
84
85
  | `jiekou/qwen/qwen3-235b-a22b-instruct-2507` | 131K | | | | | | $0.15 | $0.80 |
85
86
  | `jiekou/qwen/qwen3-235b-a22b-thinking-2507` | 131K | | | | | | $0.30 | $3 |
86
87
  | `jiekou/qwen/qwen3-30b-a3b-fp8` | 41K | | | | | | $0.09 | $0.45 |
87
88
  | `jiekou/qwen/qwen3-32b-fp8` | 41K | | | | | | $0.10 | $0.45 |
88
89
  | `jiekou/qwen/qwen3-coder-480b-a35b-instruct` | 262K | | | | | | $0.29 | $1 |
90
+ | `jiekou/qwen/qwen3-coder-next` | 262K | | | | | | $0.20 | $2 |
89
91
  | `jiekou/qwen/qwen3-next-80b-a3b-instruct` | 66K | | | | | | $0.15 | $2 |
90
92
  | `jiekou/qwen/qwen3-next-80b-a3b-thinking` | 66K | | | | | | $0.15 | $2 |
91
93
  | `jiekou/xiaomimimo/mimo-v2-flash` | 262K | | | | | | — | — |
@@ -1,6 +1,6 @@
1
1
  # ![MiniMax Coding Plan (minimaxi.com) logo](https://models.dev/logos/minimax-cn-coding-plan.svg)MiniMax Coding Plan (minimaxi.com)
2
2
 
3
- Access 2 MiniMax Coding Plan (minimaxi.com) models through Mastra's model router. Authentication is handled automatically using the `MINIMAX_API_KEY` environment variable.
3
+ Access 3 MiniMax Coding Plan (minimaxi.com) models through Mastra's model router. Authentication is handled automatically using the `MINIMAX_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [MiniMax Coding Plan (minimaxi.com) documentation](https://platform.minimaxi.com/docs/coding-plan/intro).
6
6
 
@@ -36,6 +36,7 @@ for await (const chunk of stream) {
36
36
  | ------------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
37
  | `minimax-cn-coding-plan/MiniMax-M2` | 197K | | | | | | — | — |
38
38
  | `minimax-cn-coding-plan/MiniMax-M2.1` | 205K | | | | | | — | — |
39
+ | `minimax-cn-coding-plan/MiniMax-M2.5` | 205K | | | | | | — | — |
39
40
 
40
41
  ## Advanced Configuration
41
42
 
@@ -65,7 +66,7 @@ const agent = new Agent({
65
66
  model: ({ requestContext }) => {
66
67
  const useAdvanced = requestContext.task === "complex";
67
68
  return useAdvanced
68
- ? "minimax-cn-coding-plan/MiniMax-M2.1"
69
+ ? "minimax-cn-coding-plan/MiniMax-M2.5"
69
70
  : "minimax-cn-coding-plan/MiniMax-M2";
70
71
  }
71
72
  });
@@ -1,6 +1,6 @@
1
1
  # ![MiniMax (minimaxi.com) logo](https://models.dev/logos/minimax-cn.svg)MiniMax (minimaxi.com)
2
2
 
3
- Access 2 MiniMax (minimaxi.com) models through Mastra's model router. Authentication is handled automatically using the `MINIMAX_API_KEY` environment variable.
3
+ Access 3 MiniMax (minimaxi.com) models through Mastra's model router. Authentication is handled automatically using the `MINIMAX_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [MiniMax (minimaxi.com) documentation](https://platform.minimaxi.com/docs/guides/quickstart).
6
6
 
@@ -36,6 +36,7 @@ for await (const chunk of stream) {
36
36
  | ------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
37
  | `minimax-cn/MiniMax-M2` | 197K | | | | | | $0.30 | $1 |
38
38
  | `minimax-cn/MiniMax-M2.1` | 205K | | | | | | $0.30 | $1 |
39
+ | `minimax-cn/MiniMax-M2.5` | 205K | | | | | | $0.30 | $1 |
39
40
 
40
41
  ## Advanced Configuration
41
42
 
@@ -65,7 +66,7 @@ const agent = new Agent({
65
66
  model: ({ requestContext }) => {
66
67
  const useAdvanced = requestContext.task === "complex";
67
68
  return useAdvanced
68
- ? "minimax-cn/MiniMax-M2.1"
69
+ ? "minimax-cn/MiniMax-M2.5"
69
70
  : "minimax-cn/MiniMax-M2";
70
71
  }
71
72
  });
@@ -1,6 +1,6 @@
1
1
  # ![MiniMax Coding Plan (minimax.io) logo](https://models.dev/logos/minimax-coding-plan.svg)MiniMax Coding Plan (minimax.io)
2
2
 
3
- Access 2 MiniMax Coding Plan (minimax.io) models through Mastra's model router. Authentication is handled automatically using the `MINIMAX_API_KEY` environment variable.
3
+ Access 3 MiniMax Coding Plan (minimax.io) models through Mastra's model router. Authentication is handled automatically using the `MINIMAX_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [MiniMax Coding Plan (minimax.io) documentation](https://platform.minimax.io/docs/coding-plan/intro).
6
6
 
@@ -36,6 +36,7 @@ for await (const chunk of stream) {
36
36
  | ---------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
37
  | `minimax-coding-plan/MiniMax-M2` | 197K | | | | | | — | — |
38
38
  | `minimax-coding-plan/MiniMax-M2.1` | 205K | | | | | | — | — |
39
+ | `minimax-coding-plan/MiniMax-M2.5` | 205K | | | | | | — | — |
39
40
 
40
41
  ## Advanced Configuration
41
42
 
@@ -65,7 +66,7 @@ const agent = new Agent({
65
66
  model: ({ requestContext }) => {
66
67
  const useAdvanced = requestContext.task === "complex";
67
68
  return useAdvanced
68
- ? "minimax-coding-plan/MiniMax-M2.1"
69
+ ? "minimax-coding-plan/MiniMax-M2.5"
69
70
  : "minimax-coding-plan/MiniMax-M2";
70
71
  }
71
72
  });
@@ -1,6 +1,6 @@
1
1
  # ![MiniMax (minimax.io) logo](https://models.dev/logos/minimax.svg)MiniMax (minimax.io)
2
2
 
3
- Access 2 MiniMax (minimax.io) models through Mastra's model router. Authentication is handled automatically using the `MINIMAX_API_KEY` environment variable.
3
+ Access 3 MiniMax (minimax.io) models through Mastra's model router. Authentication is handled automatically using the `MINIMAX_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [MiniMax (minimax.io) documentation](https://platform.minimax.io/docs/guides/quickstart).
6
6
 
@@ -36,6 +36,7 @@ for await (const chunk of stream) {
36
36
  | ---------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
37
  | `minimax/MiniMax-M2` | 197K | | | | | | $0.30 | $1 |
38
38
  | `minimax/MiniMax-M2.1` | 205K | | | | | | $0.30 | $1 |
39
+ | `minimax/MiniMax-M2.5` | 205K | | | | | | $0.30 | $1 |
39
40
 
40
41
  ## Advanced Configuration
41
42
 
@@ -65,7 +66,7 @@ const agent = new Agent({
65
66
  model: ({ requestContext }) => {
66
67
  const useAdvanced = requestContext.task === "complex";
67
68
  return useAdvanced
68
- ? "minimax/MiniMax-M2.1"
69
+ ? "minimax/MiniMax-M2.5"
69
70
  : "minimax/MiniMax-M2";
70
71
  }
71
72
  });
@@ -1,6 +1,6 @@
1
1
  # ![NanoGPT logo](https://models.dev/logos/nano-gpt.svg)NanoGPT
2
2
 
3
- Access 21 NanoGPT models through Mastra's model router. Authentication is handled automatically using the `NANO_GPT_API_KEY` environment variable.
3
+ Access 29 NanoGPT models through Mastra's model router. Authentication is handled automatically using the `NANO_GPT_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [NanoGPT documentation](https://docs.nano-gpt.com).
6
6
 
@@ -39,22 +39,30 @@ for await (const chunk of stream) {
39
39
  | `nano-gpt/meta-llama/llama-3.3-70b-instruct` | 128K | | | | | | $1 | $2 |
40
40
  | `nano-gpt/meta-llama/llama-4-maverick` | 128K | | | | | | $1 | $2 |
41
41
  | `nano-gpt/minimax/minimax-m2.1` | 128K | | | | | | $1 | $2 |
42
+ | `nano-gpt/minimax/minimax-m2.5` | 205K | | | | | | $0.30 | $1 |
43
+ | `nano-gpt/minimax/minimax-m2.5-official` | 205K | | | | | | $0.30 | $1 |
42
44
  | `nano-gpt/mistralai/devstral-2-123b-instruct-2512` | 131K | | | | | | $1 | $2 |
43
45
  | `nano-gpt/mistralai/ministral-14b-instruct-2512` | 131K | | | | | | $1 | $2 |
44
46
  | `nano-gpt/mistralai/mistral-large-3-675b-instruct-2512` | 131K | | | | | | $1 | $2 |
45
47
  | `nano-gpt/moonshotai/kimi-k2-instruct` | 131K | | | | | | $1 | $2 |
46
48
  | `nano-gpt/moonshotai/kimi-k2-thinking` | 33K | | | | | | $1 | $2 |
49
+ | `nano-gpt/moonshotai/kimi-k2.5` | 256K | | | | | | $0.30 | $2 |
50
+ | `nano-gpt/moonshotai/kimi-k2.5-thinking` | 256K | | | | | | $0.30 | $2 |
47
51
  | `nano-gpt/nousresearch/hermes-4-405b:thinking` | 128K | | | | | | $1 | $2 |
48
52
  | `nano-gpt/nvidia/llama-3_3-nemotron-super-49b-v1_5` | 128K | | | | | | $1 | $2 |
49
53
  | `nano-gpt/openai/gpt-oss-120b` | 128K | | | | | | $1 | $2 |
50
54
  | `nano-gpt/qwen/qwen3-235b-a22b-thinking-2507` | 262K | | | | | | $1 | $2 |
51
55
  | `nano-gpt/qwen/qwen3-coder` | 106K | | | | | | $1 | $2 |
52
- | `nano-gpt/z-ai/glm-4.6` | 200K | | | | | | $1 | $2 |
53
- | `nano-gpt/z-ai/glm-4.6:thinking` | 128K | | | | | | $1 | $2 |
54
56
  | `nano-gpt/zai-org/glm-4.5-air` | 128K | | | | | | $1 | $2 |
55
57
  | `nano-gpt/zai-org/glm-4.5-air:thinking` | 128K | | | | | | $1 | $2 |
58
+ | `nano-gpt/zai-org/glm-4.6` | 200K | | | | | | $1 | $2 |
59
+ | `nano-gpt/zai-org/glm-4.6:thinking` | 128K | | | | | | $1 | $2 |
56
60
  | `nano-gpt/zai-org/glm-4.7` | 205K | | | | | | $1 | $2 |
57
61
  | `nano-gpt/zai-org/glm-4.7:thinking` | 128K | | | | | | $1 | $2 |
62
+ | `nano-gpt/zai-org/glm-5` | 200K | | | | | | $0.80 | $3 |
63
+ | `nano-gpt/zai-org/glm-5-original` | 200K | | | | | | $0.80 | $3 |
64
+ | `nano-gpt/zai-org/glm-5-original:thinking` | 200K | | | | | | $0.80 | $3 |
65
+ | `nano-gpt/zai-org/glm-5:thinking` | 200K | | | | | | $0.80 | $3 |
58
66
 
59
67
  ## Advanced Configuration
60
68
 
@@ -84,7 +92,7 @@ const agent = new Agent({
84
92
  model: ({ requestContext }) => {
85
93
  const useAdvanced = requestContext.task === "complex";
86
94
  return useAdvanced
87
- ? "nano-gpt/zai-org/glm-4.7:thinking"
95
+ ? "nano-gpt/zai-org/glm-5:thinking"
88
96
  : "nano-gpt/deepseek/deepseek-r1";
89
97
  }
90
98
  });
@@ -1,6 +1,6 @@
1
1
  # ![NovitaAI logo](https://models.dev/logos/novita-ai.svg)NovitaAI
2
2
 
3
- Access 81 NovitaAI models through Mastra's model router. Authentication is handled automatically using the `NOVITA_API_KEY` environment variable.
3
+ Access 83 NovitaAI models through Mastra's model router. Authentication is handled automatically using the `NOVITA_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [NovitaAI documentation](https://novita.ai/docs/guides/introduction).
6
6
 
@@ -67,6 +67,7 @@ for await (const chunk of stream) {
67
67
  | `novita-ai/microsoft/wizardlm-2-8x22b` | 66K | | | | | | $0.62 | $0.62 |
68
68
  | `novita-ai/minimax/minimax-m2` | 205K | | | | | | $0.30 | $1 |
69
69
  | `novita-ai/minimax/minimax-m2.1` | 205K | | | | | | $0.30 | $1 |
70
+ | `novita-ai/minimax/minimax-m2.5` | 205K | | | | | | $0.30 | $1 |
70
71
  | `novita-ai/minimaxai/minimax-m1-80k` | 1.0M | | | | | | $0.55 | $2 |
71
72
  | `novita-ai/mistralai/mistral-nemo` | 60K | | | | | | $0.04 | $0.17 |
72
73
  | `novita-ai/moonshotai/kimi-k2-0905` | 262K | | | | | | $0.60 | $3 |
@@ -115,6 +116,7 @@ for await (const chunk of stream) {
115
116
  | `novita-ai/zai-org/glm-4.6v` | 131K | | | | | | $0.30 | $0.90 |
116
117
  | `novita-ai/zai-org/glm-4.7` | 205K | | | | | | $0.60 | $2 |
117
118
  | `novita-ai/zai-org/glm-4.7-flash` | 200K | | | | | | $0.07 | $0.40 |
119
+ | `novita-ai/zai-org/glm-5` | 203K | | | | | | $1 | $3 |
118
120
 
119
121
  ## Advanced Configuration
120
122
 
@@ -144,7 +146,7 @@ const agent = new Agent({
144
146
  model: ({ requestContext }) => {
145
147
  const useAdvanced = requestContext.task === "complex";
146
148
  return useAdvanced
147
- ? "novita-ai/zai-org/glm-4.7-flash"
149
+ ? "novita-ai/zai-org/glm-5"
148
150
  : "novita-ai/baichuan/baichuan-m2-32b";
149
151
  }
150
152
  });
@@ -1,6 +1,6 @@
1
1
  # ![Ollama Cloud logo](https://models.dev/logos/ollama-cloud.svg)Ollama Cloud
2
2
 
3
- Access 30 Ollama Cloud models through Mastra's model router. Authentication is handled automatically using the `OLLAMA_API_KEY` environment variable.
3
+ Access 32 Ollama Cloud models through Mastra's model router. Authentication is handled automatically using the `OLLAMA_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Ollama Cloud documentation](https://docs.ollama.com/cloud).
6
6
 
@@ -46,6 +46,7 @@ for await (const chunk of stream) {
46
46
  | `ollama-cloud/gemma3:4b` | 131K | | | | | | — | — |
47
47
  | `ollama-cloud/glm-4.6` | 203K | | | | | | — | — |
48
48
  | `ollama-cloud/glm-4.7` | 203K | | | | | | — | — |
49
+ | `ollama-cloud/glm-5` | 203K | | | | | | — | — |
49
50
  | `ollama-cloud/gpt-oss:120b` | 131K | | | | | | — | — |
50
51
  | `ollama-cloud/gpt-oss:20b` | 131K | | | | | | — | — |
51
52
  | `ollama-cloud/kimi-k2-thinking` | 262K | | | | | | — | — |
@@ -53,6 +54,7 @@ for await (const chunk of stream) {
53
54
  | `ollama-cloud/kimi-k2.5` | 262K | | | | | | — | — |
54
55
  | `ollama-cloud/minimax-m2` | 205K | | | | | | — | — |
55
56
  | `ollama-cloud/minimax-m2.1` | 205K | | | | | | — | — |
57
+ | `ollama-cloud/minimax-m2.5` | 205K | | | | | | — | — |
56
58
  | `ollama-cloud/ministral-3:14b` | 262K | | | | | | — | — |
57
59
  | `ollama-cloud/ministral-3:3b` | 262K | | | | | | — | — |
58
60
  | `ollama-cloud/ministral-3:8b` | 262K | | | | | | — | — |
@@ -1,6 +1,6 @@
1
1
  # ![OpenAI logo](https://models.dev/logos/openai.svg)OpenAI
2
2
 
3
- Access 41 OpenAI models through Mastra's model router. Authentication is handled automatically using the `OPENAI_API_KEY` environment variable.
3
+ Access 42 OpenAI models through Mastra's model router. Authentication is handled automatically using the `OPENAI_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [OpenAI documentation](https://platform.openai.com/docs/models).
6
6
 
@@ -44,22 +44,23 @@ for await (const chunk of stream) {
44
44
  | `openai/gpt-4o-2024-08-06` | 128K | | | | | | $3 | $10 |
45
45
  | `openai/gpt-4o-2024-11-20` | 128K | | | | | | $3 | $10 |
46
46
  | `openai/gpt-4o-mini` | 128K | | | | | | $0.15 | $0.60 |
47
- | `openai/gpt-5` | 272K | | | | | | $1 | $10 |
48
- | `openai/gpt-5-chat-latest` | 272K | | | | | | $1 | $10 |
49
- | `openai/gpt-5-codex` | 272K | | | | | | $1 | $10 |
50
- | `openai/gpt-5-mini` | 272K | | | | | | $0.25 | $2 |
51
- | `openai/gpt-5-nano` | 272K | | | | | | $0.05 | $0.40 |
47
+ | `openai/gpt-5` | 400K | | | | | | $1 | $10 |
48
+ | `openai/gpt-5-chat-latest` | 400K | | | | | | $1 | $10 |
49
+ | `openai/gpt-5-codex` | 400K | | | | | | $1 | $10 |
50
+ | `openai/gpt-5-mini` | 400K | | | | | | $0.25 | $2 |
51
+ | `openai/gpt-5-nano` | 400K | | | | | | $0.05 | $0.40 |
52
52
  | `openai/gpt-5-pro` | 400K | | | | | | $15 | $120 |
53
- | `openai/gpt-5.1` | 272K | | | | | | $1 | $10 |
53
+ | `openai/gpt-5.1` | 400K | | | | | | $1 | $10 |
54
54
  | `openai/gpt-5.1-chat-latest` | 128K | | | | | | $1 | $10 |
55
- | `openai/gpt-5.1-codex` | 272K | | | | | | $1 | $10 |
56
- | `openai/gpt-5.1-codex-max` | 272K | | | | | | $1 | $10 |
57
- | `openai/gpt-5.1-codex-mini` | 272K | | | | | | $0.25 | $2 |
58
- | `openai/gpt-5.2` | 272K | | | | | | $2 | $14 |
55
+ | `openai/gpt-5.1-codex` | 400K | | | | | | $1 | $10 |
56
+ | `openai/gpt-5.1-codex-max` | 400K | | | | | | $1 | $10 |
57
+ | `openai/gpt-5.1-codex-mini` | 400K | | | | | | $0.25 | $2 |
58
+ | `openai/gpt-5.2` | 400K | | | | | | $2 | $14 |
59
59
  | `openai/gpt-5.2-chat-latest` | 128K | | | | | | $2 | $14 |
60
- | `openai/gpt-5.2-codex` | 272K | | | | | | $2 | $14 |
61
- | `openai/gpt-5.2-pro` | 272K | | | | | | $21 | $168 |
62
- | `openai/gpt-5.3-codex` | 272K | | | | | | $2 | $14 |
60
+ | `openai/gpt-5.2-codex` | 400K | | | | | | $2 | $14 |
61
+ | `openai/gpt-5.2-pro` | 400K | | | | | | $21 | $168 |
62
+ | `openai/gpt-5.3-codex` | 400K | | | | | | $2 | $14 |
63
+ | `openai/gpt-5.3-codex-spark` | 128K | | | | | | $2 | $14 |
63
64
  | `openai/o1` | 200K | | | | | | $15 | $60 |
64
65
  | `openai/o1-mini` | 128K | | | | | | $1 | $4 |
65
66
  | `openai/o1-preview` | 128K | | | | | | $15 | $60 |