@mastra/mcp-docs-server 1.1.17 → 1.1.18-alpha.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. package/.docs/docs/agents/overview.md +4 -4
  2. package/.docs/docs/agents/processors.md +1 -1
  3. package/.docs/docs/community/licensing.md +7 -9
  4. package/.docs/docs/deployment/monorepo.md +0 -6
  5. package/.docs/docs/evals/built-in-scorers.md +1 -1
  6. package/.docs/docs/{observability → evals}/datasets/overview.md +12 -12
  7. package/.docs/docs/{observability → evals}/datasets/running-experiments.md +9 -9
  8. package/.docs/docs/evals/overview.md +8 -9
  9. package/.docs/docs/getting-started/manual-install.md +1 -2
  10. package/.docs/docs/index.md +1 -1
  11. package/.docs/docs/mastra-cloud/deployment.md +2 -2
  12. package/.docs/docs/mastra-cloud/observability.md +2 -2
  13. package/.docs/docs/mastra-cloud/overview.md +1 -1
  14. package/.docs/docs/mastra-cloud/setup.md +3 -3
  15. package/.docs/docs/mcp/publishing-mcp-server.md +20 -0
  16. package/.docs/docs/memory/message-history.md +6 -4
  17. package/.docs/docs/memory/observational-memory.md +20 -11
  18. package/.docs/docs/memory/overview.md +4 -4
  19. package/.docs/docs/memory/semantic-recall.md +28 -19
  20. package/.docs/docs/memory/storage.md +4 -4
  21. package/.docs/docs/observability/metrics/overview.md +114 -0
  22. package/.docs/docs/observability/overview.md +13 -5
  23. package/.docs/docs/observability/tracing/exporters/default.md +2 -4
  24. package/.docs/docs/observability/tracing/exporters/laminar.md +4 -4
  25. package/.docs/docs/observability/tracing/exporters/sentry.md +4 -4
  26. package/.docs/docs/observability/tracing/overview.md +2 -2
  27. package/.docs/docs/rag/chunking-and-embedding.md +2 -2
  28. package/.docs/docs/server/auth/composite-auth.md +1 -7
  29. package/.docs/docs/server/auth/custom-auth-provider.md +2 -4
  30. package/.docs/docs/server/auth/jwt.md +1 -1
  31. package/.docs/docs/server/auth/simple-auth.md +1 -7
  32. package/.docs/docs/server/auth.md +3 -3
  33. package/.docs/docs/server/custom-adapters.md +3 -1
  34. package/.docs/docs/server/custom-api-routes.md +1 -1
  35. package/.docs/docs/server/mastra-client.md +1 -3
  36. package/.docs/docs/server/mastra-server.md +8 -0
  37. package/.docs/docs/server/request-context.md +17 -17
  38. package/.docs/docs/server/server-adapters.md +8 -8
  39. package/.docs/docs/streaming/events.md +1 -90
  40. package/.docs/docs/streaming/overview.md +0 -42
  41. package/.docs/docs/studio/auth.md +142 -0
  42. package/.docs/docs/{deployment/studio.md → studio/deployment.md} +42 -16
  43. package/.docs/docs/studio/observability.md +98 -0
  44. package/.docs/docs/studio/overview.md +127 -0
  45. package/.docs/docs/workflows/agents-and-tools.md +7 -10
  46. package/.docs/docs/workflows/control-flow.md +1 -1
  47. package/.docs/docs/workflows/overview.md +12 -7
  48. package/.docs/docs/workflows/suspend-and-resume.md +1 -1
  49. package/.docs/guides/concepts/multi-agent-systems.md +4 -4
  50. package/.docs/guides/deployment/vercel.md +1 -1
  51. package/.docs/guides/getting-started/next-js.md +1 -1
  52. package/.docs/guides/getting-started/quickstart.md +1 -1
  53. package/.docs/guides/guide/ai-recruiter.md +1 -1
  54. package/.docs/guides/guide/chef-michel.md +1 -1
  55. package/.docs/guides/guide/code-review-bot.md +1 -1
  56. package/.docs/guides/guide/dev-assistant.md +1 -1
  57. package/.docs/guides/guide/docs-manager.md +1 -1
  58. package/.docs/guides/guide/github-actions-pr-description.md +1 -1
  59. package/.docs/guides/guide/notes-mcp-server.md +1 -1
  60. package/.docs/guides/guide/stock-agent.md +1 -1
  61. package/.docs/guides/guide/web-search.md +2 -2
  62. package/.docs/guides/index.md +1 -1
  63. package/.docs/guides/migrations/upgrade-to-v1/client.md +1 -1
  64. package/.docs/guides/migrations/upgrade-to-v1/tracing.md +1 -1
  65. package/.docs/models/gateways/netlify.md +65 -66
  66. package/.docs/models/gateways/openrouter.md +2 -1
  67. package/.docs/models/gateways/vercel.md +3 -1
  68. package/.docs/models/index.md +2 -2
  69. package/.docs/models/providers/aihubmix.md +17 -102
  70. package/.docs/models/providers/opencode.md +3 -2
  71. package/.docs/models/providers/synthetic.md +2 -1
  72. package/.docs/models/providers/vultr.md +11 -16
  73. package/.docs/models/providers/zenmux.md +2 -31
  74. package/.docs/models/providers.md +0 -1
  75. package/.docs/reference/agents/agent.md +1 -1
  76. package/.docs/reference/cli/mastra.md +3 -3
  77. package/.docs/reference/client-js/workflows.md +2 -2
  78. package/.docs/reference/configuration.md +4 -4
  79. package/.docs/reference/deployer/cloudflare.md +1 -1
  80. package/.docs/reference/deployer/vercel.md +1 -1
  81. package/.docs/reference/index.md +16 -14
  82. package/.docs/reference/memory/observational-memory.md +1 -1
  83. package/.docs/reference/observability/metrics/automatic-metrics.md +132 -0
  84. package/.docs/reference/storage/cloudflare-d1.md +1 -1
  85. package/.docs/reference/storage/cloudflare.md +3 -3
  86. package/.docs/reference/storage/convex.md +1 -1
  87. package/.docs/reference/storage/dynamodb.md +1 -1
  88. package/.docs/reference/storage/lance.md +1 -1
  89. package/.docs/reference/storage/upstash.md +1 -1
  90. package/.docs/reference/workspace/vercel.md +118 -0
  91. package/CHANGELOG.md +22 -0
  92. package/package.json +5 -5
  93. package/.docs/docs/getting-started/studio.md +0 -113
  94. package/.docs/docs/mastra-cloud/studio.md +0 -24
@@ -56,7 +56,7 @@ Once setup is complete, follow the instructions in your terminal to start the Ma
56
56
 
57
57
  Try asking about the weather. If your API key is set up correctly, you'll get a response:
58
58
 
59
- [Studio](https://mastra.ai/docs/getting-started/studio) lets you rapidly build and prototype agents without needing to build a UI. Once you're ready, you can integrate your Mastra agent into your app using the guides below.
59
+ [Studio](https://mastra.ai/docs/studio/overview) lets you rapidly build and prototype agents without needing to build a UI. Once you're ready, you can integrate your Mastra agent into your app using the guides below.
60
60
 
61
61
  ## Next steps
62
62
 
@@ -178,7 +178,7 @@ Set up the Workflow, define steps to extract and classify candidate data, and th
178
178
 
179
179
  ## Testing the workflow
180
180
 
181
- You can test your workflow inside [Studio](https://mastra.ai/docs/getting-started/studio) by starting the development server:
181
+ You can test your workflow inside [Studio](https://mastra.ai/docs/studio/overview) by starting the development server:
182
182
 
183
183
  ```bash
184
184
  mastra dev
@@ -179,7 +179,7 @@ Learn how to interact with your agent through Mastra's API.
179
179
  mastra dev
180
180
  ```
181
181
 
182
- This will start a server exposing endpoints to interact with your registered agents. Within [Studio](https://mastra.ai/docs/getting-started/studio) you can test your agent through a UI.
182
+ This will start a server exposing endpoints to interact with your registered agents. Within [Studio](https://mastra.ai/docs/studio/overview) you can test your agent through a UI.
183
183
 
184
184
  2. By default, `mastra dev` runs on `http://localhost:4111`. Your Chef Assistant agent will be available at:
185
185
 
@@ -149,7 +149,7 @@ export const mastra = new Mastra({
149
149
 
150
150
  ## Test the bot
151
151
 
152
- Start Mastra Studio and interact with the code review bot to see it in action.
152
+ Start [Studio](https://mastra.ai/docs/studio/overview) and interact with the code review bot to see it in action.
153
153
 
154
154
  **npm**:
155
155
 
@@ -223,7 +223,7 @@ export const mastra = new Mastra({
223
223
 
224
224
  ## Test the assistant
225
225
 
226
- Start Mastra Studio and interact with the agent to see it in action.
226
+ Start [Studio](https://mastra.ai/docs/studio/overview) and interact with the agent to see it in action.
227
227
 
228
228
  **npm**:
229
229
 
@@ -129,7 +129,7 @@ export const mastra = new Mastra({
129
129
 
130
130
  ## Test the docs manager
131
131
 
132
- Start Mastra Studio and interact with the agent to see it in action.
132
+ Start [Studio](https://mastra.ai/docs/studio/overview) and interact with the agent to see it in action.
133
133
 
134
134
  **npm**:
135
135
 
@@ -103,7 +103,7 @@ Output format:
103
103
 
104
104
  ### Test the agent locally
105
105
 
106
- Before deploying the workflow, you can test the agent in [Mastra Studio](https://mastra.ai/docs/getting-started/studio) to verify it generates descriptions correctly.
106
+ Before deploying the workflow, you can test the agent in [Studio](https://mastra.ai/docs/studio/overview) to verify it generates descriptions correctly.
107
107
 
108
108
  1. Get a diff from any public GitHub PR by appending `.diff` to the PR URL:
109
109
 
@@ -403,7 +403,7 @@ Let's add the MCP server!
403
403
 
404
404
  ## Run the server
405
405
 
406
- Great, you've authored your first MCP server! Now you can try it out by starting the Mastra dev server and opening [Studio](https://mastra.ai/docs/getting-started/studio):
406
+ Great, you've authored your first MCP server! Now you can try it out by starting the Mastra dev server and opening [Studio](https://mastra.ai/docs/studio/overview):
407
407
 
408
408
  ```bash
409
409
  npm run dev
@@ -100,7 +100,7 @@ Learn how to interact with your agent through Mastra's API.
100
100
  mastra dev
101
101
  ```
102
102
 
103
- This will start a server exposing endpoints to interact with your registered agents. Within [Studio](https://mastra.ai/docs/getting-started/studio) you can test your `stockAgent` and `stockPrices` tool through a UI.
103
+ This will start a server exposing endpoints to interact with your registered agents. Within [Studio](https://mastra.ai/docs/studio/overview) you can test your `stockAgent` and `stockPrices` tool through a UI.
104
104
 
105
105
  2. By default, `mastra dev` runs on `http://localhost:4111`. Your Stock agent will be available at:
106
106
 
@@ -194,7 +194,7 @@ Some LLM providers include built-in web search capabilities that can be used dir
194
194
  })
195
195
  ```
196
196
 
197
- 5. You can test your agent with [Studio](https://mastra.ai/docs/getting-started/studio) using the `mastra dev` command:
197
+ 5. You can test your agent with [Studio](https://mastra.ai/docs/studio/overview) using the `mastra dev` command:
198
198
 
199
199
  ```bash
200
200
  mastra dev
@@ -311,7 +311,7 @@ For more control over search behavior, you can integrate external search APIs as
311
311
  })
312
312
  ```
313
313
 
314
- 6. You can test your agent with [Studio](https://mastra.ai/docs/getting-started/studio) using the `mastra dev` command:
314
+ 6. You can test your agent with [Studio](https://mastra.ai/docs/studio/overview) using the `mastra dev` command:
315
315
 
316
316
  ```bash
317
317
  mastra dev
@@ -4,7 +4,7 @@ Mastra offers a variety of guides to help you build and work with Mastra, from b
4
4
 
5
5
  ## New project
6
6
 
7
- The `create mastra` command is the fastest way to build your first agent. It walks you through setup and generates an example agent you can run and adapt in [Studio](https://mastra.ai/docs/getting-started/studio) right away. You can always integrate Mastra with your framework or UI when you’re ready.
7
+ The `create mastra` command is the fastest way to build your first agent. It walks you through setup and generates an example agent you can run and adapt in [Studio](https://mastra.ai/docs/studio/overview) right away. You can always integrate Mastra with your framework or UI when you’re ready.
8
8
 
9
9
  - [Quickstart](https://mastra.ai/guides/getting-started/quickstart)
10
10
 
@@ -34,7 +34,7 @@ const agent = client.getAgent('my-agent');
34
34
  > **Codemod:** You can use Mastra's codemod CLI to update your code automatically:
35
35
  >
36
36
  > ```bash
37
- > npx @mastra/codemod@beta v1/client-msg-function-args .
37
+ > npx @mastra/codemod@latest v1/client-msg-function-args .
38
38
  > ```
39
39
 
40
40
  ### `threadId` and `resourceId` to `memory` option
@@ -45,7 +45,7 @@ export const mastra = new Mastra({
45
45
  default: {
46
46
  serviceName: 'mastra',
47
47
  exporters: [
48
- new DefaultExporter(), // Persists traces to storage for Mastra Studio
48
+ new DefaultExporter(), // Persists traces to storage for Studio
49
49
  new CloudExporter(), // Sends traces to Mastra Cloud (if MASTRA_CLOUD_ACCESS_TOKEN is set)
50
50
  ],
51
51
  spanOutputProcessors: [
@@ -1,6 +1,6 @@
1
1
  # Netlify
2
2
 
3
- Netlify AI Gateway provides unified access to multiple providers with built-in caching and observability. Access 63 models through Mastra's model router.
3
+ Netlify AI Gateway provides unified access to multiple providers with built-in caching and observability. Access 62 models through Mastra's model router.
4
4
 
5
5
  Learn more in the [Netlify documentation](https://docs.netlify.com/build/ai-gateway/overview/).
6
6
 
@@ -33,68 +33,67 @@ ANTHROPIC_API_KEY=ant-...
33
33
 
34
34
  ## Available models
35
35
 
36
- | Model |
37
- | ---------------------------------------------- |
38
- | `anthropic/claude-3-haiku-20240307` |
39
- | `anthropic/claude-haiku-4-5` |
40
- | `anthropic/claude-haiku-4-5-20251001` |
41
- | `anthropic/claude-opus-4-1-20250805` |
42
- | `anthropic/claude-opus-4-20250514` |
43
- | `anthropic/claude-opus-4-5` |
44
- | `anthropic/claude-opus-4-5-20251101` |
45
- | `anthropic/claude-opus-4-6` |
46
- | `anthropic/claude-sonnet-4-0` |
47
- | `anthropic/claude-sonnet-4-20250514` |
48
- | `anthropic/claude-sonnet-4-5` |
49
- | `anthropic/claude-sonnet-4-5-20250929` |
50
- | `anthropic/claude-sonnet-4-6` |
51
- | `gemini/gemini-2.0-flash` |
52
- | `gemini/gemini-2.0-flash-lite` |
53
- | `gemini/gemini-2.5-flash` |
54
- | `gemini/gemini-2.5-flash-image` |
55
- | `gemini/gemini-2.5-flash-lite` |
56
- | `gemini/gemini-2.5-flash-lite-preview-09-2025` |
57
- | `gemini/gemini-2.5-pro` |
58
- | `gemini/gemini-3-flash-preview` |
59
- | `gemini/gemini-3-pro-image-preview` |
60
- | `gemini/gemini-3.1-flash-image-preview` |
61
- | `gemini/gemini-3.1-flash-lite-preview` |
62
- | `gemini/gemini-3.1-pro-preview` |
63
- | `gemini/gemini-3.1-pro-preview-customtools` |
64
- | `gemini/gemini-flash-latest` |
65
- | `gemini/gemini-flash-lite-latest` |
66
- | `openai/gpt-4.1` |
67
- | `openai/gpt-4.1-mini` |
68
- | `openai/gpt-4.1-nano` |
69
- | `openai/gpt-4o` |
70
- | `openai/gpt-4o-mini` |
71
- | `openai/gpt-5` |
72
- | `openai/gpt-5-2025-08-07` |
73
- | `openai/gpt-5-codex` |
74
- | `openai/gpt-5-mini` |
75
- | `openai/gpt-5-mini-2025-08-07` |
76
- | `openai/gpt-5-nano` |
77
- | `openai/gpt-5-pro` |
78
- | `openai/gpt-5.1` |
79
- | `openai/gpt-5.1-2025-11-13` |
80
- | `openai/gpt-5.1-codex` |
81
- | `openai/gpt-5.1-codex-max` |
82
- | `openai/gpt-5.1-codex-mini` |
83
- | `openai/gpt-5.2` |
84
- | `openai/gpt-5.2-2025-12-11` |
85
- | `openai/gpt-5.2-codex` |
86
- | `openai/gpt-5.2-pro` |
87
- | `openai/gpt-5.2-pro-2025-12-11` |
88
- | `openai/gpt-5.3-chat-latest` |
89
- | `openai/gpt-5.3-codex` |
90
- | `openai/gpt-5.4` |
91
- | `openai/gpt-5.4-2026-03-05` |
92
- | `openai/gpt-5.4-mini` |
93
- | `openai/gpt-5.4-mini-2026-03-17` |
94
- | `openai/gpt-5.4-nano` |
95
- | `openai/gpt-5.4-nano-2026-03-17` |
96
- | `openai/gpt-5.4-pro` |
97
- | `openai/gpt-5.4-pro-2026-03-05` |
98
- | `openai/o3` |
99
- | `openai/o3-mini` |
100
- | `openai/o4-mini` |
36
+ | Model |
37
+ | ------------------------------------------- |
38
+ | `anthropic/claude-3-haiku-20240307` |
39
+ | `anthropic/claude-haiku-4-5` |
40
+ | `anthropic/claude-haiku-4-5-20251001` |
41
+ | `anthropic/claude-opus-4-1-20250805` |
42
+ | `anthropic/claude-opus-4-20250514` |
43
+ | `anthropic/claude-opus-4-5` |
44
+ | `anthropic/claude-opus-4-5-20251101` |
45
+ | `anthropic/claude-opus-4-6` |
46
+ | `anthropic/claude-sonnet-4-0` |
47
+ | `anthropic/claude-sonnet-4-20250514` |
48
+ | `anthropic/claude-sonnet-4-5` |
49
+ | `anthropic/claude-sonnet-4-5-20250929` |
50
+ | `anthropic/claude-sonnet-4-6` |
51
+ | `gemini/gemini-2.0-flash` |
52
+ | `gemini/gemini-2.0-flash-lite` |
53
+ | `gemini/gemini-2.5-flash` |
54
+ | `gemini/gemini-2.5-flash-image` |
55
+ | `gemini/gemini-2.5-flash-lite` |
56
+ | `gemini/gemini-2.5-pro` |
57
+ | `gemini/gemini-3-flash-preview` |
58
+ | `gemini/gemini-3-pro-image-preview` |
59
+ | `gemini/gemini-3.1-flash-image-preview` |
60
+ | `gemini/gemini-3.1-flash-lite-preview` |
61
+ | `gemini/gemini-3.1-pro-preview` |
62
+ | `gemini/gemini-3.1-pro-preview-customtools` |
63
+ | `gemini/gemini-flash-latest` |
64
+ | `gemini/gemini-flash-lite-latest` |
65
+ | `openai/gpt-4.1` |
66
+ | `openai/gpt-4.1-mini` |
67
+ | `openai/gpt-4.1-nano` |
68
+ | `openai/gpt-4o` |
69
+ | `openai/gpt-4o-mini` |
70
+ | `openai/gpt-5` |
71
+ | `openai/gpt-5-2025-08-07` |
72
+ | `openai/gpt-5-codex` |
73
+ | `openai/gpt-5-mini` |
74
+ | `openai/gpt-5-mini-2025-08-07` |
75
+ | `openai/gpt-5-nano` |
76
+ | `openai/gpt-5-pro` |
77
+ | `openai/gpt-5.1` |
78
+ | `openai/gpt-5.1-2025-11-13` |
79
+ | `openai/gpt-5.1-codex` |
80
+ | `openai/gpt-5.1-codex-max` |
81
+ | `openai/gpt-5.1-codex-mini` |
82
+ | `openai/gpt-5.2` |
83
+ | `openai/gpt-5.2-2025-12-11` |
84
+ | `openai/gpt-5.2-codex` |
85
+ | `openai/gpt-5.2-pro` |
86
+ | `openai/gpt-5.2-pro-2025-12-11` |
87
+ | `openai/gpt-5.3-chat-latest` |
88
+ | `openai/gpt-5.3-codex` |
89
+ | `openai/gpt-5.4` |
90
+ | `openai/gpt-5.4-2026-03-05` |
91
+ | `openai/gpt-5.4-mini` |
92
+ | `openai/gpt-5.4-mini-2026-03-17` |
93
+ | `openai/gpt-5.4-nano` |
94
+ | `openai/gpt-5.4-nano-2026-03-17` |
95
+ | `openai/gpt-5.4-pro` |
96
+ | `openai/gpt-5.4-pro-2026-03-05` |
97
+ | `openai/o3` |
98
+ | `openai/o3-mini` |
99
+ | `openai/o4-mini` |
@@ -1,6 +1,6 @@
1
1
  # ![OpenRouter logo](https://models.dev/logos/openrouter.svg)OpenRouter
2
2
 
3
- OpenRouter aggregates models from multiple providers with enhanced features like rate limiting and failover. Access 165 models through Mastra's model router.
3
+ OpenRouter aggregates models from multiple providers with enhanced features like rate limiting and failover. Access 166 models through Mastra's model router.
4
4
 
5
5
  Learn more in the [OpenRouter documentation](https://openrouter.ai/models).
6
6
 
@@ -172,6 +172,7 @@ ANTHROPIC_API_KEY=ant-...
172
172
  | `qwen/qwen3-next-80b-a3b-thinking` |
173
173
  | `qwen/qwen3.5-397b-a17b` |
174
174
  | `qwen/qwen3.5-plus-02-15` |
175
+ | `qwen/qwen3.6-plus-preview:free` |
175
176
  | `sourceful/riverflow-v2-fast-preview` |
176
177
  | `sourceful/riverflow-v2-max-preview` |
177
178
  | `sourceful/riverflow-v2-standard-preview` |
@@ -1,6 +1,6 @@
1
1
  # ![Vercel logo](https://models.dev/logos/vercel.svg)Vercel
2
2
 
3
- Vercel aggregates models from multiple providers with enhanced features like rate limiting and failover. Access 224 models through Mastra's model router.
3
+ Vercel aggregates models from multiple providers with enhanced features like rate limiting and failover. Access 226 models through Mastra's model router.
4
4
 
5
5
  Learn more in the [Vercel documentation](https://ai-sdk.dev/providers/ai-sdk-providers).
6
6
 
@@ -117,6 +117,7 @@ ANTHROPIC_API_KEY=ant-...
117
117
  | `inception/mercury-2` |
118
118
  | `inception/mercury-coder-small` |
119
119
  | `kwaipilot/kat-coder-pro-v1` |
120
+ | `kwaipilot/kat-coder-pro-v2` |
120
121
  | `meituan/longcat-flash-chat` |
121
122
  | `meituan/longcat-flash-thinking` |
122
123
  | `meituan/longcat-flash-thinking-2601` |
@@ -162,6 +163,7 @@ ANTHROPIC_API_KEY=ant-...
162
163
  | `morph/morph-v3-fast` |
163
164
  | `morph/morph-v3-large` |
164
165
  | `nvidia/nemotron-3-nano-30b-a3b` |
166
+ | `nvidia/nemotron-3-super-120b-a12b` |
165
167
  | `nvidia/nemotron-nano-12b-v2-vl` |
166
168
  | `nvidia/nemotron-nano-9b-v2` |
167
169
  | `openai/codex-mini` |
@@ -1,6 +1,6 @@
1
1
  # Model Providers
2
2
 
3
- Mastra provides a unified interface for working with LLMs across multiple providers, giving you access to 3616 models from 95 providers through a single API.
3
+ Mastra provides a unified interface for working with LLMs across multiple providers, giving you access to 3566 models from 94 providers through a single API.
4
4
 
5
5
  ## Features
6
6
 
@@ -92,7 +92,7 @@ Browse the directory of available models using the navigation on the left, or ex
92
92
 
93
93
  You can also discover models directly in your editor. Mastra provides full autocomplete for the `model` field - just start typing, and your IDE will show available options.
94
94
 
95
- Alternatively, browse and test models in [Studio](https://mastra.ai/docs/getting-started/studio) UI.
95
+ Alternatively, browse and test models in [Studio](https://mastra.ai/docs/studio/overview) UI.
96
96
 
97
97
  > **Info:** In development, we auto-refresh your local model list every hour, ensuring your TypeScript autocomplete and Studio stay up-to-date with the latest models. To disable, set `MASTRA_AUTO_REFRESH_PROVIDERS=false`. Auto-refresh is disabled by default in production.
98
98
 
@@ -1,118 +1,33 @@
1
1
  # ![AIHubMix logo](https://models.dev/logos/aihubmix.svg)AIHubMix
2
2
 
3
- Access 48 AIHubMix models through Mastra's model router. Authentication is handled automatically using the `AIHUBMIX_API_KEY` environment variable.
3
+ AIHubMix is available through the AI SDK. Install the provider package to use their models with Mastra.
4
4
 
5
- Learn more in the [AIHubMix documentation](https://docs.aihubmix.com).
5
+ For detailed provider-specific documentation, see the [AI SDK AIHubMix provider docs](https://ai-sdk.dev/providers/community-providers/aihubmix).
6
6
 
7
- ```bash
8
- AIHUBMIX_API_KEY=your-api-key
9
- ```
10
-
11
- ```typescript
12
- import { Agent } from "@mastra/core/agent";
7
+ To use this provider with Mastra agents, see the [Agent Overview documentation](https://mastra.ai/docs/agents/overview).
13
8
 
14
- const agent = new Agent({
15
- id: "my-agent",
16
- name: "My Agent",
17
- instructions: "You are a helpful assistant",
18
- model: "aihubmix/Kimi-K2-0905"
19
- });
9
+ ## Installation
20
10
 
21
- // Generate a response
22
- const response = await agent.generate("Hello!");
11
+ **npm**:
23
12
 
24
- // Stream a response
25
- const stream = await agent.stream("Tell me a story");
26
- for await (const chunk of stream) {
27
- console.log(chunk);
28
- }
13
+ ```bash
14
+ npm install @aihubmix/ai-sdk-provider
29
15
  ```
30
16
 
31
- > **Info:** Mastra uses the OpenAI-compatible `/chat/completions` endpoint. Some provider-specific features may not be available. Check the [AIHubMix documentation](https://docs.aihubmix.com) for details.
32
-
33
- ## Models
17
+ **pnpm**:
34
18
 
35
- | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
- | ----------------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
- | `aihubmix/claude-haiku-4-5` | 200K | | | | | | $1 | $6 |
38
- | `aihubmix/claude-opus-4-1` | 200K | | | | | | $17 | $83 |
39
- | `aihubmix/claude-opus-4-5` | 200K | | | | | | $5 | $25 |
40
- | `aihubmix/claude-opus-4-6` | 200K | | | | | | $5 | $25 |
41
- | `aihubmix/claude-opus-4-6-think` | 200K | | | | | | $5 | $25 |
42
- | `aihubmix/claude-sonnet-4-5` | 200K | | | | | | $3 | $17 |
43
- | `aihubmix/claude-sonnet-4-6` | 200K | | | | | | $3 | $15 |
44
- | `aihubmix/claude-sonnet-4-6-think` | 200K | | | | | | $3 | $15 |
45
- | `aihubmix/coding-glm-4.7` | 205K | | | | | | $0.27 | $1 |
46
- | `aihubmix/coding-glm-4.7-free` | 205K | | | | | | — | — |
47
- | `aihubmix/coding-glm-5-free` | 205K | | | | | | — | — |
48
- | `aihubmix/coding-minimax-m2.1-free` | 205K | | | | | | — | — |
49
- | `aihubmix/deepseek-v3.2` | 131K | | | | | | $0.30 | $0.45 |
50
- | `aihubmix/deepseek-v3.2-fast` | 128K | | | | | | $1 | $3 |
51
- | `aihubmix/deepseek-v3.2-think` | 131K | | | | | | $0.30 | $0.45 |
52
- | `aihubmix/gemini-2.5-flash` | 1.0M | | | | | | $0.07 | $0.30 |
53
- | `aihubmix/gemini-2.5-pro` | 2.0M | | | | | | $1 | $5 |
54
- | `aihubmix/gemini-3-pro-preview` | 1.0M | | | | | | $2 | $12 |
55
- | `aihubmix/gemini-3-pro-preview-search` | 1.0M | | | | | | $2 | $12 |
56
- | `aihubmix/glm-4.6v` | 128K | | | | | | $0.14 | $0.41 |
57
- | `aihubmix/glm-4.7` | 205K | | | | | | $0.27 | $1 |
58
- | `aihubmix/glm-5` | 205K | | | | | | $0.88 | $3 |
59
- | `aihubmix/gpt-4.1` | 1.0M | | | | | | $2 | $8 |
60
- | `aihubmix/gpt-4.1-mini` | 1.0M | | | | | | $0.40 | $2 |
61
- | `aihubmix/gpt-4.1-nano` | 1.0M | | | | | | $0.10 | $0.40 |
62
- | `aihubmix/gpt-4o` | 128K | | | | | | $3 | $10 |
63
- | `aihubmix/gpt-5` | 400K | | | | | | $5 | $20 |
64
- | `aihubmix/gpt-5-codex` | 400K | | | | | | $1 | $10 |
65
- | `aihubmix/gpt-5-mini` | 200K | | | | | | $2 | $6 |
66
- | `aihubmix/gpt-5-nano` | 128K | | | | | | $0.50 | $2 |
67
- | `aihubmix/gpt-5-pro` | 400K | | | | | | $7 | $28 |
68
- | `aihubmix/gpt-5.1` | 400K | | | | | | $1 | $10 |
69
- | `aihubmix/gpt-5.1-codex` | 400K | | | | | | $1 | $10 |
70
- | `aihubmix/gpt-5.1-codex-max` | 400K | | | | | | $1 | $10 |
71
- | `aihubmix/gpt-5.1-codex-mini` | 400K | | | | | | $0.25 | $2 |
72
- | `aihubmix/gpt-5.2` | 400K | | | | | | $2 | $14 |
73
- | `aihubmix/gpt-5.2-codex` | 400K | | | | | | $2 | $14 |
74
- | `aihubmix/Kimi-K2-0905` | 262K | | | | | | $0.55 | $2 |
75
- | `aihubmix/kimi-k2.5` | 262K | | | | | | $0.60 | $3 |
76
- | `aihubmix/minimax-m2.1` | 205K | | | | | | $0.29 | $1 |
77
- | `aihubmix/minimax-m2.5` | 205K | | | | | | $0.29 | $1 |
78
- | `aihubmix/o4-mini` | 200K | | | | | | $2 | $6 |
79
- | `aihubmix/qwen3-235b-a22b-instruct-2507` | 262K | | | | | | $0.28 | $1 |
80
- | `aihubmix/qwen3-235b-a22b-thinking-2507` | 262K | | | | | | $0.28 | $3 |
81
- | `aihubmix/qwen3-coder-480b-a35b-instruct` | 262K | | | | | | $0.82 | $3 |
82
- | `aihubmix/qwen3-coder-next` | 262K | | | | | | $0.14 | $0.55 |
83
- | `aihubmix/qwen3-max-2026-01-23` | 262K | | | | | | $0.34 | $1 |
84
- | `aihubmix/qwen3.5-plus` | 1.0M | | | | | | $0.11 | $0.66 |
85
-
86
- ## Advanced configuration
19
+ ```bash
20
+ pnpm add @aihubmix/ai-sdk-provider
21
+ ```
87
22
 
88
- ### Custom headers
23
+ **Yarn**:
89
24
 
90
- ```typescript
91
- const agent = new Agent({
92
- id: "custom-agent",
93
- name: "custom-agent",
94
- model: {
95
- url: "https://aihubmix.com/v1",
96
- id: "aihubmix/Kimi-K2-0905",
97
- apiKey: process.env.AIHUBMIX_API_KEY,
98
- headers: {
99
- "X-Custom-Header": "value"
100
- }
101
- }
102
- });
25
+ ```bash
26
+ yarn add @aihubmix/ai-sdk-provider
103
27
  ```
104
28
 
105
- ### Dynamic model selection
29
+ **Bun**:
106
30
 
107
- ```typescript
108
- const agent = new Agent({
109
- id: "dynamic-agent",
110
- name: "Dynamic Agent",
111
- model: ({ requestContext }) => {
112
- const useAdvanced = requestContext.task === "complex";
113
- return useAdvanced
114
- ? "aihubmix/qwen3.5-plus"
115
- : "aihubmix/Kimi-K2-0905";
116
- }
117
- });
31
+ ```bash
32
+ bun add @aihubmix/ai-sdk-provider
118
33
  ```
@@ -1,6 +1,6 @@
1
1
  # ![OpenCode Zen logo](https://models.dev/logos/opencode.svg)OpenCode Zen
2
2
 
3
- Access 33 OpenCode Zen models through Mastra's model router. Authentication is handled automatically using the `OPENCODE_API_KEY` environment variable.
3
+ Access 34 OpenCode Zen models through Mastra's model router. Authentication is handled automatically using the `OPENCODE_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [OpenCode Zen documentation](https://opencode.ai/docs/zen).
6
6
 
@@ -67,6 +67,7 @@ for await (const chunk of stream) {
67
67
  | `opencode/minimax-m2.5` | 205K | | | | | | $0.30 | $1 |
68
68
  | `opencode/minimax-m2.5-free` | 205K | | | | | | — | — |
69
69
  | `opencode/nemotron-3-super-free` | 1.0M | | | | | | — | — |
70
+ | `opencode/qwen3.6-plus-free` | 1.0M | | | | | | — | — |
70
71
 
71
72
  ## Advanced configuration
72
73
 
@@ -96,7 +97,7 @@ const agent = new Agent({
96
97
  model: ({ requestContext }) => {
97
98
  const useAdvanced = requestContext.task === "complex";
98
99
  return useAdvanced
99
- ? "opencode/nemotron-3-super-free"
100
+ ? "opencode/qwen3.6-plus-free"
100
101
  : "opencode/big-pickle";
101
102
  }
102
103
  });
@@ -1,6 +1,6 @@
1
1
  # ![Synthetic logo](https://models.dev/logos/synthetic.svg)Synthetic
2
2
 
3
- Access 28 Synthetic models through Mastra's model router. Authentication is handled automatically using the `SYNTHETIC_API_KEY` environment variable.
3
+ Access 29 Synthetic models through Mastra's model router. Authentication is handled automatically using the `SYNTHETIC_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Synthetic documentation](https://synthetic.new/pricing).
6
6
 
@@ -59,6 +59,7 @@ for await (const chunk of stream) {
59
59
  | `synthetic/hf:Qwen/Qwen3-235B-A22B-Instruct-2507` | 256K | | | | | | $0.20 | $0.60 |
60
60
  | `synthetic/hf:Qwen/Qwen3-235B-A22B-Thinking-2507` | 256K | | | | | | $0.65 | $3 |
61
61
  | `synthetic/hf:Qwen/Qwen3-Coder-480B-A35B-Instruct` | 256K | | | | | | $2 | $2 |
62
+ | `synthetic/hf:Qwen/Qwen3.5-397B-A17B` | 262K | | | | | | $0.60 | $3 |
62
63
  | `synthetic/hf:zai-org/GLM-4.6` | 200K | | | | | | $0.55 | $2 |
63
64
  | `synthetic/hf:zai-org/GLM-4.7` | 200K | | | | | | $0.55 | $2 |
64
65
  | `synthetic/hf:zai-org/GLM-4.7-Flash` | 197K | | | | | | $0.06 | $0.40 |
@@ -1,6 +1,6 @@
1
1
  # ![Vultr logo](https://models.dev/logos/vultr.svg)Vultr
2
2
 
3
- Access 9 Vultr models through Mastra's model router. Authentication is handled automatically using the `VULTR_API_KEY` environment variable.
3
+ Access 4 Vultr models through Mastra's model router. Authentication is handled automatically using the `VULTR_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Vultr documentation](https://api.vultrinference.com/).
6
6
 
@@ -15,7 +15,7 @@ const agent = new Agent({
15
15
  id: "my-agent",
16
16
  name: "My Agent",
17
17
  instructions: "You are a helpful assistant",
18
- model: "vultr/DeepSeek-R1-Distill-Llama-70B"
18
+ model: "vultr/DeepSeek-V3.2"
19
19
  });
20
20
 
21
21
  // Generate a response
@@ -32,17 +32,12 @@ for await (const chunk of stream) {
32
32
 
33
33
  ## Models
34
34
 
35
- | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
- | ----------------------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
- | `vultr/DeepSeek-R1-Distill-Llama-70B` | 130K | | | | | | $2 | $2 |
38
- | `vultr/DeepSeek-R1-Distill-Qwen-32B` | 130K | | | | | | $0.30 | $0.30 |
39
- | `vultr/DeepSeek-V3.2` | 163K | | | | | | $0.55 | $2 |
40
- | `vultr/GLM-5-FP8` | 202K | | | | | | $0.85 | $3 |
41
- | `vultr/gpt-oss-120b` | 130K | | | | | | $0.15 | $0.60 |
42
- | `vultr/Kimi-K2.5` | 261K | | | | | | $0.55 | $3 |
43
- | `vultr/Llama-3_1-Nemotron-Ultra-253B-v1` | 32K | | | | | | $0.55 | $2 |
44
- | `vultr/MiniMax-M2.5` | 196K | | | | | | $0.30 | $1 |
45
- | `vultr/NVIDIA-Nemotron-3-Super-120B-A12B-NVFP4` | 260K | | | | | | $0.20 | $0.80 |
35
+ | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
+ | --------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
+ | `vultr/DeepSeek-V3.2` | 163K | | | | | | $0.55 | $2 |
38
+ | `vultr/GLM-5-FP8` | 202K | | | | | | $0.85 | $3 |
39
+ | `vultr/Kimi-K2.5` | 261K | | | | | | $0.55 | $3 |
40
+ | `vultr/MiniMax-M2.5` | 196K | | | | | | $0.30 | $1 |
46
41
 
47
42
  ## Advanced configuration
48
43
 
@@ -54,7 +49,7 @@ const agent = new Agent({
54
49
  name: "custom-agent",
55
50
  model: {
56
51
  url: "https://api.vultrinference.com/v1",
57
- id: "vultr/DeepSeek-R1-Distill-Llama-70B",
52
+ id: "vultr/DeepSeek-V3.2",
58
53
  apiKey: process.env.VULTR_API_KEY,
59
54
  headers: {
60
55
  "X-Custom-Header": "value"
@@ -72,8 +67,8 @@ const agent = new Agent({
72
67
  model: ({ requestContext }) => {
73
68
  const useAdvanced = requestContext.task === "complex";
74
69
  return useAdvanced
75
- ? "vultr/gpt-oss-120b"
76
- : "vultr/DeepSeek-R1-Distill-Llama-70B";
70
+ ? "vultr/MiniMax-M2.5"
71
+ : "vultr/DeepSeek-V3.2";
77
72
  }
78
73
  });
79
74
  ```