@mastra/mcp-docs-server 1.1.18-alpha.1 → 1.1.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. package/.docs/docs/agents/overview.md +4 -4
  2. package/.docs/docs/agents/processors.md +1 -1
  3. package/.docs/docs/community/licensing.md +7 -9
  4. package/.docs/docs/deployment/monorepo.md +0 -6
  5. package/.docs/docs/evals/built-in-scorers.md +1 -1
  6. package/.docs/docs/{observability → evals}/datasets/overview.md +12 -12
  7. package/.docs/docs/{observability → evals}/datasets/running-experiments.md +9 -9
  8. package/.docs/docs/evals/overview.md +8 -9
  9. package/.docs/docs/getting-started/manual-install.md +1 -2
  10. package/.docs/docs/index.md +1 -1
  11. package/.docs/docs/mastra-cloud/deployment.md +2 -2
  12. package/.docs/docs/mastra-cloud/observability.md +2 -2
  13. package/.docs/docs/mastra-cloud/overview.md +1 -1
  14. package/.docs/docs/mastra-cloud/setup.md +3 -3
  15. package/.docs/docs/mcp/publishing-mcp-server.md +20 -0
  16. package/.docs/docs/memory/message-history.md +6 -4
  17. package/.docs/docs/memory/observational-memory.md +20 -11
  18. package/.docs/docs/memory/overview.md +4 -4
  19. package/.docs/docs/memory/semantic-recall.md +28 -19
  20. package/.docs/docs/memory/storage.md +4 -4
  21. package/.docs/docs/observability/metrics/overview.md +114 -0
  22. package/.docs/docs/observability/overview.md +13 -5
  23. package/.docs/docs/observability/tracing/exporters/default.md +2 -4
  24. package/.docs/docs/observability/tracing/exporters/laminar.md +4 -4
  25. package/.docs/docs/observability/tracing/exporters/sentry.md +4 -4
  26. package/.docs/docs/observability/tracing/overview.md +2 -2
  27. package/.docs/docs/rag/chunking-and-embedding.md +2 -2
  28. package/.docs/docs/server/auth/composite-auth.md +1 -7
  29. package/.docs/docs/server/auth/custom-auth-provider.md +2 -4
  30. package/.docs/docs/server/auth/jwt.md +1 -1
  31. package/.docs/docs/server/auth/simple-auth.md +1 -7
  32. package/.docs/docs/server/auth.md +3 -3
  33. package/.docs/docs/server/custom-adapters.md +3 -1
  34. package/.docs/docs/server/custom-api-routes.md +1 -1
  35. package/.docs/docs/server/mastra-client.md +1 -3
  36. package/.docs/docs/server/mastra-server.md +8 -0
  37. package/.docs/docs/server/request-context.md +17 -17
  38. package/.docs/docs/server/server-adapters.md +8 -8
  39. package/.docs/docs/streaming/events.md +1 -90
  40. package/.docs/docs/streaming/overview.md +0 -42
  41. package/.docs/docs/studio/auth.md +142 -0
  42. package/.docs/docs/{deployment/studio.md → studio/deployment.md} +42 -16
  43. package/.docs/docs/studio/observability.md +98 -0
  44. package/.docs/docs/studio/overview.md +127 -0
  45. package/.docs/docs/workflows/agents-and-tools.md +7 -10
  46. package/.docs/docs/workflows/control-flow.md +1 -1
  47. package/.docs/docs/workflows/overview.md +12 -7
  48. package/.docs/docs/workflows/suspend-and-resume.md +1 -1
  49. package/.docs/guides/concepts/multi-agent-systems.md +4 -4
  50. package/.docs/guides/deployment/vercel.md +1 -1
  51. package/.docs/guides/getting-started/next-js.md +1 -1
  52. package/.docs/guides/getting-started/quickstart.md +1 -1
  53. package/.docs/guides/guide/ai-recruiter.md +1 -1
  54. package/.docs/guides/guide/chef-michel.md +1 -1
  55. package/.docs/guides/guide/code-review-bot.md +1 -1
  56. package/.docs/guides/guide/dev-assistant.md +1 -1
  57. package/.docs/guides/guide/docs-manager.md +1 -1
  58. package/.docs/guides/guide/github-actions-pr-description.md +1 -1
  59. package/.docs/guides/guide/notes-mcp-server.md +1 -1
  60. package/.docs/guides/guide/stock-agent.md +1 -1
  61. package/.docs/guides/guide/web-search.md +2 -2
  62. package/.docs/guides/index.md +1 -1
  63. package/.docs/guides/migrations/upgrade-to-v1/client.md +1 -1
  64. package/.docs/guides/migrations/upgrade-to-v1/tracing.md +1 -1
  65. package/.docs/models/index.md +2 -2
  66. package/.docs/models/providers/aihubmix.md +17 -102
  67. package/.docs/models/providers/synthetic.md +2 -1
  68. package/.docs/models/providers.md +0 -1
  69. package/.docs/reference/agents/agent.md +1 -1
  70. package/.docs/reference/cli/mastra.md +3 -3
  71. package/.docs/reference/client-js/workflows.md +2 -2
  72. package/.docs/reference/configuration.md +4 -4
  73. package/.docs/reference/deployer/cloudflare.md +1 -1
  74. package/.docs/reference/deployer/vercel.md +1 -1
  75. package/.docs/reference/index.md +16 -14
  76. package/.docs/reference/memory/observational-memory.md +1 -1
  77. package/.docs/reference/observability/metrics/automatic-metrics.md +132 -0
  78. package/.docs/reference/storage/cloudflare-d1.md +1 -1
  79. package/.docs/reference/storage/cloudflare.md +3 -3
  80. package/.docs/reference/storage/convex.md +1 -1
  81. package/.docs/reference/storage/dynamodb.md +1 -1
  82. package/.docs/reference/storage/lance.md +1 -1
  83. package/.docs/reference/storage/upstash.md +1 -1
  84. package/.docs/reference/workspace/vercel.md +118 -0
  85. package/CHANGELOG.md +23 -0
  86. package/package.json +6 -6
  87. package/.docs/docs/getting-started/studio.md +0 -113
  88. package/.docs/docs/mastra-cloud/studio.md +0 -24
@@ -56,7 +56,7 @@ Once setup is complete, follow the instructions in your terminal to start the Ma
56
56
 
57
57
  Try asking about the weather. If your API key is set up correctly, you'll get a response:
58
58
 
59
- [Studio](https://mastra.ai/docs/getting-started/studio) lets you rapidly build and prototype agents without needing to build a UI. Once you're ready, you can integrate your Mastra agent into your app using the guides below.
59
+ [Studio](https://mastra.ai/docs/studio/overview) lets you rapidly build and prototype agents without needing to build a UI. Once you're ready, you can integrate your Mastra agent into your app using the guides below.
60
60
 
61
61
  ## Next steps
62
62
 
@@ -178,7 +178,7 @@ Set up the Workflow, define steps to extract and classify candidate data, and th
178
178
 
179
179
  ## Testing the workflow
180
180
 
181
- You can test your workflow inside [Studio](https://mastra.ai/docs/getting-started/studio) by starting the development server:
181
+ You can test your workflow inside [Studio](https://mastra.ai/docs/studio/overview) by starting the development server:
182
182
 
183
183
  ```bash
184
184
  mastra dev
@@ -179,7 +179,7 @@ Learn how to interact with your agent through Mastra's API.
179
179
  mastra dev
180
180
  ```
181
181
 
182
- This will start a server exposing endpoints to interact with your registered agents. Within [Studio](https://mastra.ai/docs/getting-started/studio) you can test your agent through a UI.
182
+ This will start a server exposing endpoints to interact with your registered agents. Within [Studio](https://mastra.ai/docs/studio/overview) you can test your agent through a UI.
183
183
 
184
184
  2. By default, `mastra dev` runs on `http://localhost:4111`. Your Chef Assistant agent will be available at:
185
185
 
@@ -149,7 +149,7 @@ export const mastra = new Mastra({
149
149
 
150
150
  ## Test the bot
151
151
 
152
- Start Mastra Studio and interact with the code review bot to see it in action.
152
+ Start [Studio](https://mastra.ai/docs/studio/overview) and interact with the code review bot to see it in action.
153
153
 
154
154
  **npm**:
155
155
 
@@ -223,7 +223,7 @@ export const mastra = new Mastra({
223
223
 
224
224
  ## Test the assistant
225
225
 
226
- Start Mastra Studio and interact with the agent to see it in action.
226
+ Start [Studio](https://mastra.ai/docs/studio/overview) and interact with the agent to see it in action.
227
227
 
228
228
  **npm**:
229
229
 
@@ -129,7 +129,7 @@ export const mastra = new Mastra({
129
129
 
130
130
  ## Test the docs manager
131
131
 
132
- Start Mastra Studio and interact with the agent to see it in action.
132
+ Start [Studio](https://mastra.ai/docs/studio/overview) and interact with the agent to see it in action.
133
133
 
134
134
  **npm**:
135
135
 
@@ -103,7 +103,7 @@ Output format:
103
103
 
104
104
  ### Test the agent locally
105
105
 
106
- Before deploying the workflow, you can test the agent in [Mastra Studio](https://mastra.ai/docs/getting-started/studio) to verify it generates descriptions correctly.
106
+ Before deploying the workflow, you can test the agent in [Studio](https://mastra.ai/docs/studio/overview) to verify it generates descriptions correctly.
107
107
 
108
108
  1. Get a diff from any public GitHub PR by appending `.diff` to the PR URL:
109
109
 
@@ -403,7 +403,7 @@ Let's add the MCP server!
403
403
 
404
404
  ## Run the server
405
405
 
406
- Great, you've authored your first MCP server! Now you can try it out by starting the Mastra dev server and opening [Studio](https://mastra.ai/docs/getting-started/studio):
406
+ Great, you've authored your first MCP server! Now you can try it out by starting the Mastra dev server and opening [Studio](https://mastra.ai/docs/studio/overview):
407
407
 
408
408
  ```bash
409
409
  npm run dev
@@ -100,7 +100,7 @@ Learn how to interact with your agent through Mastra's API.
100
100
  mastra dev
101
101
  ```
102
102
 
103
- This will start a server exposing endpoints to interact with your registered agents. Within [Studio](https://mastra.ai/docs/getting-started/studio) you can test your `stockAgent` and `stockPrices` tool through a UI.
103
+ This will start a server exposing endpoints to interact with your registered agents. Within [Studio](https://mastra.ai/docs/studio/overview) you can test your `stockAgent` and `stockPrices` tool through a UI.
104
104
 
105
105
  2. By default, `mastra dev` runs on `http://localhost:4111`. Your Stock agent will be available at:
106
106
 
@@ -194,7 +194,7 @@ Some LLM providers include built-in web search capabilities that can be used dir
194
194
  })
195
195
  ```
196
196
 
197
- 5. You can test your agent with [Studio](https://mastra.ai/docs/getting-started/studio) using the `mastra dev` command:
197
+ 5. You can test your agent with [Studio](https://mastra.ai/docs/studio/overview) using the `mastra dev` command:
198
198
 
199
199
  ```bash
200
200
  mastra dev
@@ -311,7 +311,7 @@ For more control over search behavior, you can integrate external search APIs as
311
311
  })
312
312
  ```
313
313
 
314
- 6. You can test your agent with [Studio](https://mastra.ai/docs/getting-started/studio) using the `mastra dev` command:
314
+ 6. You can test your agent with [Studio](https://mastra.ai/docs/studio/overview) using the `mastra dev` command:
315
315
 
316
316
  ```bash
317
317
  mastra dev
@@ -4,7 +4,7 @@ Mastra offers a variety of guides to help you build and work with Mastra, from b
4
4
 
5
5
  ## New project
6
6
 
7
- The `create mastra` command is the fastest way to build your first agent. It walks you through setup and generates an example agent you can run and adapt in [Studio](https://mastra.ai/docs/getting-started/studio) right away. You can always integrate Mastra with your framework or UI when you’re ready.
7
+ The `create mastra` command is the fastest way to build your first agent. It walks you through setup and generates an example agent you can run and adapt in [Studio](https://mastra.ai/docs/studio/overview) right away. You can always integrate Mastra with your framework or UI when you’re ready.
8
8
 
9
9
  - [Quickstart](https://mastra.ai/guides/getting-started/quickstart)
10
10
 
@@ -34,7 +34,7 @@ const agent = client.getAgent('my-agent');
34
34
  > **Codemod:** You can use Mastra's codemod CLI to update your code automatically:
35
35
  >
36
36
  > ```bash
37
- > npx @mastra/codemod@beta v1/client-msg-function-args .
37
+ > npx @mastra/codemod@latest v1/client-msg-function-args .
38
38
  > ```
39
39
 
40
40
  ### `threadId` and `resourceId` to `memory` option
@@ -45,7 +45,7 @@ export const mastra = new Mastra({
45
45
  default: {
46
46
  serviceName: 'mastra',
47
47
  exporters: [
48
- new DefaultExporter(), // Persists traces to storage for Mastra Studio
48
+ new DefaultExporter(), // Persists traces to storage for Studio
49
49
  new CloudExporter(), // Sends traces to Mastra Cloud (if MASTRA_CLOUD_ACCESS_TOKEN is set)
50
50
  ],
51
51
  spanOutputProcessors: [
@@ -1,6 +1,6 @@
1
1
  # Model Providers
2
2
 
3
- Mastra provides a unified interface for working with LLMs across multiple providers, giving you access to 3613 models from 95 providers through a single API.
3
+ Mastra provides a unified interface for working with LLMs across multiple providers, giving you access to 3566 models from 94 providers through a single API.
4
4
 
5
5
  ## Features
6
6
 
@@ -92,7 +92,7 @@ Browse the directory of available models using the navigation on the left, or ex
92
92
 
93
93
  You can also discover models directly in your editor. Mastra provides full autocomplete for the `model` field - just start typing, and your IDE will show available options.
94
94
 
95
- Alternatively, browse and test models in [Studio](https://mastra.ai/docs/getting-started/studio) UI.
95
+ Alternatively, browse and test models in [Studio](https://mastra.ai/docs/studio/overview) UI.
96
96
 
97
97
  > **Info:** In development, we auto-refresh your local model list every hour, ensuring your TypeScript autocomplete and Studio stay up-to-date with the latest models. To disable, set `MASTRA_AUTO_REFRESH_PROVIDERS=false`. Auto-refresh is disabled by default in production.
98
98
 
@@ -1,118 +1,33 @@
1
1
  # ![AIHubMix logo](https://models.dev/logos/aihubmix.svg)AIHubMix
2
2
 
3
- Access 48 AIHubMix models through Mastra's model router. Authentication is handled automatically using the `AIHUBMIX_API_KEY` environment variable.
3
+ AIHubMix is available through the AI SDK. Install the provider package to use their models with Mastra.
4
4
 
5
- Learn more in the [AIHubMix documentation](https://docs.aihubmix.com).
5
+ For detailed provider-specific documentation, see the [AI SDK AIHubMix provider docs](https://ai-sdk.dev/providers/community-providers/aihubmix).
6
6
 
7
- ```bash
8
- AIHUBMIX_API_KEY=your-api-key
9
- ```
10
-
11
- ```typescript
12
- import { Agent } from "@mastra/core/agent";
7
+ To use this provider with Mastra agents, see the [Agent Overview documentation](https://mastra.ai/docs/agents/overview).
13
8
 
14
- const agent = new Agent({
15
- id: "my-agent",
16
- name: "My Agent",
17
- instructions: "You are a helpful assistant",
18
- model: "aihubmix/Kimi-K2-0905"
19
- });
9
+ ## Installation
20
10
 
21
- // Generate a response
22
- const response = await agent.generate("Hello!");
11
+ **npm**:
23
12
 
24
- // Stream a response
25
- const stream = await agent.stream("Tell me a story");
26
- for await (const chunk of stream) {
27
- console.log(chunk);
28
- }
13
+ ```bash
14
+ npm install @aihubmix/ai-sdk-provider
29
15
  ```
30
16
 
31
- > **Info:** Mastra uses the OpenAI-compatible `/chat/completions` endpoint. Some provider-specific features may not be available. Check the [AIHubMix documentation](https://docs.aihubmix.com) for details.
32
-
33
- ## Models
17
+ **pnpm**:
34
18
 
35
- | Model | Context | Tools | Reasoning | Image | Audio | Video | Input $/1M | Output $/1M |
36
- | ----------------------------------------- | ------- | ----- | --------- | ----- | ----- | ----- | ---------- | ----------- |
37
- | `aihubmix/claude-haiku-4-5` | 200K | | | | | | $1 | $6 |
38
- | `aihubmix/claude-opus-4-1` | 200K | | | | | | $17 | $83 |
39
- | `aihubmix/claude-opus-4-5` | 200K | | | | | | $5 | $25 |
40
- | `aihubmix/claude-opus-4-6` | 200K | | | | | | $5 | $25 |
41
- | `aihubmix/claude-opus-4-6-think` | 200K | | | | | | $5 | $25 |
42
- | `aihubmix/claude-sonnet-4-5` | 200K | | | | | | $3 | $17 |
43
- | `aihubmix/claude-sonnet-4-6` | 200K | | | | | | $3 | $15 |
44
- | `aihubmix/claude-sonnet-4-6-think` | 200K | | | | | | $3 | $15 |
45
- | `aihubmix/coding-glm-4.7` | 205K | | | | | | $0.27 | $1 |
46
- | `aihubmix/coding-glm-4.7-free` | 205K | | | | | | — | — |
47
- | `aihubmix/coding-glm-5-free` | 205K | | | | | | — | — |
48
- | `aihubmix/coding-minimax-m2.1-free` | 205K | | | | | | — | — |
49
- | `aihubmix/deepseek-v3.2` | 131K | | | | | | $0.30 | $0.45 |
50
- | `aihubmix/deepseek-v3.2-fast` | 128K | | | | | | $1 | $3 |
51
- | `aihubmix/deepseek-v3.2-think` | 131K | | | | | | $0.30 | $0.45 |
52
- | `aihubmix/gemini-2.5-flash` | 1.0M | | | | | | $0.07 | $0.30 |
53
- | `aihubmix/gemini-2.5-pro` | 2.0M | | | | | | $1 | $5 |
54
- | `aihubmix/gemini-3-pro-preview` | 1.0M | | | | | | $2 | $12 |
55
- | `aihubmix/gemini-3-pro-preview-search` | 1.0M | | | | | | $2 | $12 |
56
- | `aihubmix/glm-4.6v` | 128K | | | | | | $0.14 | $0.41 |
57
- | `aihubmix/glm-4.7` | 205K | | | | | | $0.27 | $1 |
58
- | `aihubmix/glm-5` | 205K | | | | | | $0.88 | $3 |
59
- | `aihubmix/gpt-4.1` | 1.0M | | | | | | $2 | $8 |
60
- | `aihubmix/gpt-4.1-mini` | 1.0M | | | | | | $0.40 | $2 |
61
- | `aihubmix/gpt-4.1-nano` | 1.0M | | | | | | $0.10 | $0.40 |
62
- | `aihubmix/gpt-4o` | 128K | | | | | | $3 | $10 |
63
- | `aihubmix/gpt-5` | 400K | | | | | | $5 | $20 |
64
- | `aihubmix/gpt-5-codex` | 400K | | | | | | $1 | $10 |
65
- | `aihubmix/gpt-5-mini` | 200K | | | | | | $2 | $6 |
66
- | `aihubmix/gpt-5-nano` | 128K | | | | | | $0.50 | $2 |
67
- | `aihubmix/gpt-5-pro` | 400K | | | | | | $7 | $28 |
68
- | `aihubmix/gpt-5.1` | 400K | | | | | | $1 | $10 |
69
- | `aihubmix/gpt-5.1-codex` | 400K | | | | | | $1 | $10 |
70
- | `aihubmix/gpt-5.1-codex-max` | 400K | | | | | | $1 | $10 |
71
- | `aihubmix/gpt-5.1-codex-mini` | 400K | | | | | | $0.25 | $2 |
72
- | `aihubmix/gpt-5.2` | 400K | | | | | | $2 | $14 |
73
- | `aihubmix/gpt-5.2-codex` | 400K | | | | | | $2 | $14 |
74
- | `aihubmix/Kimi-K2-0905` | 262K | | | | | | $0.55 | $2 |
75
- | `aihubmix/kimi-k2.5` | 262K | | | | | | $0.60 | $3 |
76
- | `aihubmix/minimax-m2.1` | 205K | | | | | | $0.29 | $1 |
77
- | `aihubmix/minimax-m2.5` | 205K | | | | | | $0.29 | $1 |
78
- | `aihubmix/o4-mini` | 200K | | | | | | $2 | $6 |
79
- | `aihubmix/qwen3-235b-a22b-instruct-2507` | 262K | | | | | | $0.28 | $1 |
80
- | `aihubmix/qwen3-235b-a22b-thinking-2507` | 262K | | | | | | $0.28 | $3 |
81
- | `aihubmix/qwen3-coder-480b-a35b-instruct` | 262K | | | | | | $0.82 | $3 |
82
- | `aihubmix/qwen3-coder-next` | 262K | | | | | | $0.14 | $0.55 |
83
- | `aihubmix/qwen3-max-2026-01-23` | 262K | | | | | | $0.34 | $1 |
84
- | `aihubmix/qwen3.5-plus` | 1.0M | | | | | | $0.11 | $0.66 |
85
-
86
- ## Advanced configuration
19
+ ```bash
20
+ pnpm add @aihubmix/ai-sdk-provider
21
+ ```
87
22
 
88
- ### Custom headers
23
+ **Yarn**:
89
24
 
90
- ```typescript
91
- const agent = new Agent({
92
- id: "custom-agent",
93
- name: "custom-agent",
94
- model: {
95
- url: "https://aihubmix.com/v1",
96
- id: "aihubmix/Kimi-K2-0905",
97
- apiKey: process.env.AIHUBMIX_API_KEY,
98
- headers: {
99
- "X-Custom-Header": "value"
100
- }
101
- }
102
- });
25
+ ```bash
26
+ yarn add @aihubmix/ai-sdk-provider
103
27
  ```
104
28
 
105
- ### Dynamic model selection
29
+ **Bun**:
106
30
 
107
- ```typescript
108
- const agent = new Agent({
109
- id: "dynamic-agent",
110
- name: "Dynamic Agent",
111
- model: ({ requestContext }) => {
112
- const useAdvanced = requestContext.task === "complex";
113
- return useAdvanced
114
- ? "aihubmix/qwen3.5-plus"
115
- : "aihubmix/Kimi-K2-0905";
116
- }
117
- });
31
+ ```bash
32
+ bun add @aihubmix/ai-sdk-provider
118
33
  ```
@@ -1,6 +1,6 @@
1
1
  # ![Synthetic logo](https://models.dev/logos/synthetic.svg)Synthetic
2
2
 
3
- Access 28 Synthetic models through Mastra's model router. Authentication is handled automatically using the `SYNTHETIC_API_KEY` environment variable.
3
+ Access 29 Synthetic models through Mastra's model router. Authentication is handled automatically using the `SYNTHETIC_API_KEY` environment variable.
4
4
 
5
5
  Learn more in the [Synthetic documentation](https://synthetic.new/pricing).
6
6
 
@@ -59,6 +59,7 @@ for await (const chunk of stream) {
59
59
  | `synthetic/hf:Qwen/Qwen3-235B-A22B-Instruct-2507` | 256K | | | | | | $0.20 | $0.60 |
60
60
  | `synthetic/hf:Qwen/Qwen3-235B-A22B-Thinking-2507` | 256K | | | | | | $0.65 | $3 |
61
61
  | `synthetic/hf:Qwen/Qwen3-Coder-480B-A35B-Instruct` | 256K | | | | | | $2 | $2 |
62
+ | `synthetic/hf:Qwen/Qwen3.5-397B-A17B` | 262K | | | | | | $0.60 | $3 |
62
63
  | `synthetic/hf:zai-org/GLM-4.6` | 200K | | | | | | $0.55 | $2 |
63
64
  | `synthetic/hf:zai-org/GLM-4.7` | 200K | | | | | | $0.55 | $2 |
64
65
  | `synthetic/hf:zai-org/GLM-4.7-Flash` | 197K | | | | | | $0.06 | $0.40 |
@@ -11,7 +11,6 @@ Direct access to individual AI model providers. Each provider offers unique mode
11
11
  - [xAI](https://mastra.ai/models/providers/xai)
12
12
  - [302.AI](https://mastra.ai/models/providers/302ai)
13
13
  - [Abacus](https://mastra.ai/models/providers/abacus)
14
- - [AIHubMix](https://mastra.ai/models/providers/aihubmix)
15
14
  - [Alibaba](https://mastra.ai/models/providers/alibaba)
16
15
  - [Alibaba (China)](https://mastra.ai/models/providers/alibaba-cn)
17
16
  - [Alibaba Coding Plan](https://mastra.ai/models/providers/alibaba-coding-plan)
@@ -6,7 +6,7 @@ The `Agent` class is the foundation for creating AI agents in Mastra. It provide
6
6
 
7
7
  ### Basic string instructions
8
8
 
9
- Passing instructions as a string or array of strings is the simplest way to set up an agent. This is useful for straightforward use cases where you just need to provide a prompt without additional configuration.
9
+ Passing instructions as a string or array of strings is the simplest way to set up an agent. This is useful for straightforward use cases where you need to provide a prompt without additional configuration.
10
10
 
11
11
  ```typescript
12
12
  import { Agent } from '@mastra/core/agent'
@@ -4,9 +4,9 @@ You can use the Command-Line Interface (CLI) provided by Mastra to develop, buil
4
4
 
5
5
  ## `mastra dev`
6
6
 
7
- Starts a server which exposes [Studio](https://mastra.ai/docs/getting-started/studio) and REST endpoints for your agents, tools, and workflows. You can visit <http://localhost:4111/swagger-ui> for an overview of all available endpoints once `mastra dev` is running.
7
+ Starts a server which exposes [Studio](https://mastra.ai/docs/studio/overview) and REST endpoints for your agents, tools, and workflows. You can visit <http://localhost:4111/swagger-ui> for an overview of all available endpoints once `mastra dev` is running.
8
8
 
9
- You can also [configure the server](https://mastra.ai/docs/getting-started/studio).
9
+ You can also [configure the server](https://mastra.ai/reference/configuration).
10
10
 
11
11
  ### Flags
12
12
 
@@ -157,7 +157,7 @@ Comma-separated list of custom arguments to pass to the Node.js process, e.g. `-
157
157
 
158
158
  ## `mastra studio`
159
159
 
160
- Starts [Mastra Studio](https://mastra.ai/docs/getting-started/studio) as a static server. After starting, you can enter your Mastra instance URL (e.g. `http://localhost:4111`) to connect Studio to your Mastra backend. Looks for `.env` and `.env.production` files in the current working directory for configuration.
160
+ Starts [Studio](https://mastra.ai/docs/studio/overview) as a static server. After starting, you can enter your Mastra instance URL (e.g. `http://localhost:4111`) to connect Studio to your Mastra backend. Looks for `.env` and `.env.production` files in the current working directory for configuration.
161
161
 
162
162
  ### Flags
163
163
 
@@ -55,7 +55,7 @@ The `resourceId` parameter associates the workflow run with a specific resource
55
55
 
56
56
  ### `startAsync()`
57
57
 
58
- Start a workflow run and await the full result:
58
+ Start a workflow run and await its completion, returning the full result as the workflow output.
59
59
 
60
60
  ```typescript
61
61
  const run = await workflow.createRun()
@@ -97,7 +97,7 @@ const result = await run.startAsync({
97
97
 
98
98
  ### `start()`
99
99
 
100
- Start a workflow run without waiting for completion:
100
+ Start a workflow run without waiting for completion (fire-and-forget). Returns immediately with a success message. Use `runById()` on the workflow instance to check results later:
101
101
 
102
102
  ```typescript
103
103
  const run = await workflow.createRun()
@@ -749,9 +749,9 @@ Return a `{ status, body }` object to override the default `400` response, or `u
749
749
 
750
750
  The `context` parameter indicates which part of the request failed validation:
751
751
 
752
- - `'query'` query parameters
753
- - `'body'` request body
754
- - `'path'` path parameters
752
+ - `'query'`: query parameters
753
+ - `'body'`: request body
754
+ - `'path'`: path parameters
755
755
 
756
756
  ```typescript
757
757
  import { Mastra } from '@mastra/core'
@@ -797,7 +797,7 @@ export const mastra = new Mastra({
797
797
  **Type:** `string`\
798
798
  **Default:** `/`
799
799
 
800
- Base path for hosting Mastra Studio. Use this to host the Studio on a sub-path of your existing application instead of the root.
800
+ Base path for hosting [Studio](https://mastra.ai/docs/studio/overview). Use this to host the Studio on a sub-path of your existing application instead of the root.
801
801
 
802
802
  This is useful when integrating with existing applications, using authentication tools like Cloudflare Zero Trust that benefit from shared domains, or managing multiple services under a single domain.
803
803
 
@@ -75,7 +75,7 @@ The `CloudflareDeployer` constructor accepts the same configuration options as `
75
75
 
76
76
  ## Secrets
77
77
 
78
- Environment variables from your `.env` file are **not** written to `wrangler.jsonc`. This prevents secrets from being committed to source control.
78
+ Environment variables from your `.env` file aren't written to `wrangler.jsonc`. This prevents secrets from being committed to source control.
79
79
 
80
80
  To make your `.env` secrets available to your Cloudflare Worker, upload them as [Cloudflare Secrets](https://developers.cloudflare.com/workers/configuration/secrets/):
81
81
 
@@ -47,7 +47,7 @@ export const mastra = new Mastra({
47
47
 
48
48
  The deployer accepts the following options:
49
49
 
50
- - `studio?: boolean` — Deploy [Studio](https://mastra.ai/docs/getting-started/studio) alongside your API as static assets served from Vercel's Edge CDN. Defaults to `false`.
50
+ - `studio?: boolean` — Deploy [Studio](https://mastra.ai/docs/studio/overview) alongside your API as static assets served from Vercel's Edge CDN. Defaults to `false`.
51
51
  - `maxDuration?: number` — Function execution timeout (in seconds)
52
52
  - `memory?: number` — Function memory (in MB)
53
53
  - `regions?: string[]` — Regions to deploy the function (e.g. `['sfo1','iad1']`)
@@ -84,6 +84,10 @@ The Reference section provides documentation of Mastra's API, including paramete
84
84
  - [Deployer](https://mastra.ai/reference/deployer)
85
85
  - [Netlify](https://mastra.ai/reference/deployer/netlify)
86
86
  - [Vercel](https://mastra.ai/reference/deployer/vercel)
87
+ - [createScorer()](https://mastra.ai/reference/evals/create-scorer)
88
+ - [MastraScorer](https://mastra.ai/reference/evals/mastra-scorer)
89
+ - [runEvals()](https://mastra.ai/reference/evals/run-evals)
90
+ - [Scorer Utils](https://mastra.ai/reference/evals/scorer-utils)
87
91
  - [Answer Relevancy Scorer](https://mastra.ai/reference/evals/answer-relevancy)
88
92
  - [Answer Similarity Scorer](https://mastra.ai/reference/evals/answer-similarity)
89
93
  - [Bias](https://mastra.ai/reference/evals/bias)
@@ -91,30 +95,16 @@ The Reference section provides documentation of Mastra's API, including paramete
91
95
  - [Content Similarity Scorer](https://mastra.ai/reference/evals/content-similarity)
92
96
  - [Context Precision Scorer](https://mastra.ai/reference/evals/context-precision)
93
97
  - [Context Relevance Scorer](https://mastra.ai/reference/evals/context-relevance)
94
- - [createScorer()](https://mastra.ai/reference/evals/create-scorer)
95
98
  - [Faithfulness](https://mastra.ai/reference/evals/faithfulness)
96
99
  - [Hallucination](https://mastra.ai/reference/evals/hallucination)
97
100
  - [Keyword Coverage Scorer](https://mastra.ai/reference/evals/keyword-coverage)
98
- - [MastraScorer](https://mastra.ai/reference/evals/mastra-scorer)
99
101
  - [Noise Sensitivity Scorer](https://mastra.ai/reference/evals/noise-sensitivity)
100
102
  - [Prompt Alignment Scorer](https://mastra.ai/reference/evals/prompt-alignment)
101
- - [runEvals()](https://mastra.ai/reference/evals/run-evals)
102
- - [Scorer Utils](https://mastra.ai/reference/evals/scorer-utils)
103
103
  - [Textual Difference Scorer](https://mastra.ai/reference/evals/textual-difference)
104
104
  - [Tone Consistency Scorer](https://mastra.ai/reference/evals/tone-consistency)
105
105
  - [Tool Call Accuracy Scorers](https://mastra.ai/reference/evals/tool-call-accuracy)
106
106
  - [Toxicity](https://mastra.ai/reference/evals/toxicity)
107
107
  - [Trajectory Accuracy Scorers](https://mastra.ai/reference/evals/trajectory-accuracy)
108
- - [Harness Class](https://mastra.ai/reference/harness/harness-class)
109
- - [Cloned Thread Utilities](https://mastra.ai/reference/memory/clone-utilities)
110
- - [Memory Class](https://mastra.ai/reference/memory/memory-class)
111
- - [Observational Memory](https://mastra.ai/reference/memory/observational-memory)
112
- - [.cloneThread()](https://mastra.ai/reference/memory/cloneThread)
113
- - [.createThread()](https://mastra.ai/reference/memory/createThread)
114
- - [.deleteMessages()](https://mastra.ai/reference/memory/deleteMessages)
115
- - [.getThreadById()](https://mastra.ai/reference/memory/getThreadById)
116
- - [.listThreads()](https://mastra.ai/reference/memory/listThreads)
117
- - [.recall()](https://mastra.ai/reference/memory/recall)
118
108
  - [Dataset Class](https://mastra.ai/reference/datasets/dataset)
119
109
  - [DatasetsManager Class](https://mastra.ai/reference/datasets/datasets-manager)
120
110
  - [.addItem()](https://mastra.ai/reference/datasets/addItem)
@@ -139,7 +129,18 @@ The Reference section provides documentation of Mastra's API, including paramete
139
129
  - [.startExperimentAsync()](https://mastra.ai/reference/datasets/startExperimentAsync)
140
130
  - [.update()](https://mastra.ai/reference/datasets/update)
141
131
  - [.updateItem()](https://mastra.ai/reference/datasets/updateItem)
132
+ - [Harness Class](https://mastra.ai/reference/harness/harness-class)
133
+ - [Cloned Thread Utilities](https://mastra.ai/reference/memory/clone-utilities)
134
+ - [Memory Class](https://mastra.ai/reference/memory/memory-class)
135
+ - [Observational Memory](https://mastra.ai/reference/memory/observational-memory)
136
+ - [.cloneThread()](https://mastra.ai/reference/memory/cloneThread)
137
+ - [.createThread()](https://mastra.ai/reference/memory/createThread)
138
+ - [.deleteMessages()](https://mastra.ai/reference/memory/deleteMessages)
139
+ - [.getThreadById()](https://mastra.ai/reference/memory/getThreadById)
140
+ - [.listThreads()](https://mastra.ai/reference/memory/listThreads)
141
+ - [.recall()](https://mastra.ai/reference/memory/recall)
142
142
  - [PinoLogger](https://mastra.ai/reference/logging/pino-logger)
143
+ - [Automatic Metrics](https://mastra.ai/reference/observability/metrics/automatic-metrics)
143
144
  - [Configuration](https://mastra.ai/reference/observability/tracing/configuration)
144
145
  - [Instances](https://mastra.ai/reference/observability/tracing/instances)
145
146
  - [Interfaces](https://mastra.ai/reference/observability/tracing/interfaces)
@@ -276,6 +277,7 @@ The Reference section provides documentation of Mastra's API, including paramete
276
277
  - [LocalSandbox](https://mastra.ai/reference/workspace/local-sandbox)
277
278
  - [S3Filesystem](https://mastra.ai/reference/workspace/s3-filesystem)
278
279
  - [SandboxProcessManager](https://mastra.ai/reference/workspace/process-manager)
280
+ - [VercelSandbox](https://mastra.ai/reference/workspace/vercel)
279
281
  - [Workspace Class](https://mastra.ai/reference/workspace/workspace-class)
280
282
  - [WorkspaceFilesystem](https://mastra.ai/reference/workspace/filesystem)
281
283
  - [WorkspaceSandbox](https://mastra.ai/reference/workspace/sandbox)
@@ -666,7 +666,7 @@ const selector = new ModelByInputTokens({
666
666
 
667
667
  #### Behavior
668
668
 
669
- - Thresholds are sorted internally, so the order in the config object does not matter.
669
+ - Thresholds are sorted internally, so the order in the config object doesn't matter.
670
670
  - `inputTokens ≤ smallest threshold` → uses that threshold's model
671
671
  - `inputTokens > largest threshold` → `resolve()` throws an error. If this happens during an OM Observer or Reflector run, OM aborts via TripWire, so callers receive an empty `text` result or streamed `tripwire` instead of a normal assistant response.
672
672
  - OM computes the input token count for the Observer or Reflector call and resolves the matching model tier directly