@mastra/mcp-docs-server 1.1.17 → 1.1.18-alpha.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/docs/agents/overview.md +4 -4
- package/.docs/docs/agents/processors.md +1 -1
- package/.docs/docs/community/licensing.md +7 -9
- package/.docs/docs/deployment/monorepo.md +0 -6
- package/.docs/docs/evals/built-in-scorers.md +1 -1
- package/.docs/docs/{observability → evals}/datasets/overview.md +12 -12
- package/.docs/docs/{observability → evals}/datasets/running-experiments.md +9 -9
- package/.docs/docs/evals/overview.md +8 -9
- package/.docs/docs/getting-started/manual-install.md +1 -2
- package/.docs/docs/index.md +1 -1
- package/.docs/docs/mastra-cloud/deployment.md +2 -2
- package/.docs/docs/mastra-cloud/observability.md +2 -2
- package/.docs/docs/mastra-cloud/overview.md +1 -1
- package/.docs/docs/mastra-cloud/setup.md +3 -3
- package/.docs/docs/mcp/publishing-mcp-server.md +20 -0
- package/.docs/docs/memory/message-history.md +6 -4
- package/.docs/docs/memory/observational-memory.md +20 -11
- package/.docs/docs/memory/overview.md +4 -4
- package/.docs/docs/memory/semantic-recall.md +28 -19
- package/.docs/docs/memory/storage.md +4 -4
- package/.docs/docs/observability/metrics/overview.md +114 -0
- package/.docs/docs/observability/overview.md +13 -5
- package/.docs/docs/observability/tracing/exporters/default.md +2 -4
- package/.docs/docs/observability/tracing/exporters/laminar.md +4 -4
- package/.docs/docs/observability/tracing/exporters/sentry.md +4 -4
- package/.docs/docs/observability/tracing/overview.md +2 -2
- package/.docs/docs/rag/chunking-and-embedding.md +2 -2
- package/.docs/docs/server/auth/composite-auth.md +1 -7
- package/.docs/docs/server/auth/custom-auth-provider.md +2 -4
- package/.docs/docs/server/auth/jwt.md +1 -1
- package/.docs/docs/server/auth/simple-auth.md +1 -7
- package/.docs/docs/server/auth.md +3 -3
- package/.docs/docs/server/custom-adapters.md +3 -1
- package/.docs/docs/server/custom-api-routes.md +1 -1
- package/.docs/docs/server/mastra-client.md +1 -3
- package/.docs/docs/server/mastra-server.md +8 -0
- package/.docs/docs/server/request-context.md +17 -17
- package/.docs/docs/server/server-adapters.md +8 -8
- package/.docs/docs/streaming/events.md +1 -90
- package/.docs/docs/streaming/overview.md +0 -42
- package/.docs/docs/studio/auth.md +142 -0
- package/.docs/docs/{deployment/studio.md → studio/deployment.md} +42 -16
- package/.docs/docs/studio/observability.md +98 -0
- package/.docs/docs/studio/overview.md +127 -0
- package/.docs/docs/workflows/agents-and-tools.md +7 -10
- package/.docs/docs/workflows/control-flow.md +1 -1
- package/.docs/docs/workflows/overview.md +12 -7
- package/.docs/docs/workflows/suspend-and-resume.md +1 -1
- package/.docs/guides/concepts/multi-agent-systems.md +4 -4
- package/.docs/guides/deployment/vercel.md +1 -1
- package/.docs/guides/getting-started/next-js.md +1 -1
- package/.docs/guides/getting-started/quickstart.md +1 -1
- package/.docs/guides/guide/ai-recruiter.md +1 -1
- package/.docs/guides/guide/chef-michel.md +1 -1
- package/.docs/guides/guide/code-review-bot.md +1 -1
- package/.docs/guides/guide/dev-assistant.md +1 -1
- package/.docs/guides/guide/docs-manager.md +1 -1
- package/.docs/guides/guide/github-actions-pr-description.md +1 -1
- package/.docs/guides/guide/notes-mcp-server.md +1 -1
- package/.docs/guides/guide/stock-agent.md +1 -1
- package/.docs/guides/guide/web-search.md +2 -2
- package/.docs/guides/index.md +1 -1
- package/.docs/guides/migrations/upgrade-to-v1/client.md +1 -1
- package/.docs/guides/migrations/upgrade-to-v1/tracing.md +1 -1
- package/.docs/models/gateways/netlify.md +65 -66
- package/.docs/models/gateways/openrouter.md +2 -1
- package/.docs/models/gateways/vercel.md +3 -1
- package/.docs/models/index.md +2 -2
- package/.docs/models/providers/aihubmix.md +17 -102
- package/.docs/models/providers/opencode.md +3 -2
- package/.docs/models/providers/synthetic.md +2 -1
- package/.docs/models/providers/vultr.md +11 -16
- package/.docs/models/providers/zenmux.md +2 -31
- package/.docs/models/providers.md +0 -1
- package/.docs/reference/agents/agent.md +1 -1
- package/.docs/reference/cli/mastra.md +3 -3
- package/.docs/reference/client-js/workflows.md +2 -2
- package/.docs/reference/configuration.md +4 -4
- package/.docs/reference/deployer/cloudflare.md +1 -1
- package/.docs/reference/deployer/vercel.md +1 -1
- package/.docs/reference/index.md +16 -14
- package/.docs/reference/memory/observational-memory.md +1 -1
- package/.docs/reference/observability/metrics/automatic-metrics.md +132 -0
- package/.docs/reference/storage/cloudflare-d1.md +1 -1
- package/.docs/reference/storage/cloudflare.md +3 -3
- package/.docs/reference/storage/convex.md +1 -1
- package/.docs/reference/storage/dynamodb.md +1 -1
- package/.docs/reference/storage/lance.md +1 -1
- package/.docs/reference/storage/upstash.md +1 -1
- package/.docs/reference/workspace/vercel.md +118 -0
- package/CHANGELOG.md +22 -0
- package/package.json +5 -5
- package/.docs/docs/getting-started/studio.md +0 -113
- package/.docs/docs/mastra-cloud/studio.md +0 -24
|
@@ -38,7 +38,7 @@ export const mastra = new Mastra({
|
|
|
38
38
|
|
|
39
39
|
Once registered, it can be called from workflows, tools, or other agents, and has access to shared resources such as memory, logging, and observability features.
|
|
40
40
|
|
|
41
|
-
> **Tip:** Use [Studio](https://mastra.ai/docs/
|
|
41
|
+
> **Tip:** Use [Studio](https://mastra.ai/docs/studio/overview) to test your agent with different messages, inspect tool calls and responses, and debug agent behavior.
|
|
42
42
|
|
|
43
43
|
> **Note:** Visit the [agent reference](https://mastra.ai/reference/agents/agent) for more information on available properties and configurations.
|
|
44
44
|
|
|
@@ -48,7 +48,7 @@ After registration, retrieve your agent with [`mastra.getAgentById()`](https://m
|
|
|
48
48
|
|
|
49
49
|
When referencing an agent from your Mastra instance, use `mastra.getAgentById()` to ensure it has access to shared services such as instance-level storage, logging, and agent registry. A directly imported agent can still work with its own local configuration, but it won't have access to those shared services.
|
|
50
50
|
|
|
51
|
-
|
|
51
|
+
**.generate()**:
|
|
52
52
|
|
|
53
53
|
Returns the full response after all tool calls and steps complete. The result includes `text`, `toolCalls`, `toolResults`, `steps`, and token `usage` statistics.
|
|
54
54
|
|
|
@@ -58,7 +58,7 @@ const response = await agent.generate('Help me organize my day')
|
|
|
58
58
|
console.log(response.text)
|
|
59
59
|
```
|
|
60
60
|
|
|
61
|
-
|
|
61
|
+
**.stream()**:
|
|
62
62
|
|
|
63
63
|
Returns a stream you can consume as tokens arrive. The result exposes `textStream` for incremental output and promises for `toolCalls`, `toolResults`, `steps`, and token `usage` that resolve when the stream finishes.
|
|
64
64
|
|
|
@@ -90,6 +90,6 @@ Once your agent is running, use this table to find the right page for what you w
|
|
|
90
90
|
|
|
91
91
|
## Multi-agent systems
|
|
92
92
|
|
|
93
|
-
A multi-agent system uses multiple agents to solve a task that
|
|
93
|
+
A multi-agent system uses multiple agents to solve a task that's too broad or too specialized for a single agent. Instead of building one agent with dozens of tools and a long instruction set, you split responsibilities across focused agents and let a coordinator bring results together.
|
|
94
94
|
|
|
95
95
|
Read the [conceptual overview of multi-agent systems](https://mastra.ai/guides/concepts/multi-agent-systems) to learn how you can apply different patterns with Mastra.
|
|
@@ -493,7 +493,7 @@ After a `.parallel()` step, each branch result is keyed by its processor ID (e.g
|
|
|
493
493
|
|
|
494
494
|
If a branch uses a mutating strategy like `redact`, map to that branch so its transformed messages carry forward. If all branches only `block`, any branch works. Pick any one since none of them modify the messages.
|
|
495
495
|
|
|
496
|
-
When an agent is registered with Mastra, processor workflows are automatically registered as workflows, allowing you to view and debug them in the [Studio](https://mastra.ai/docs/
|
|
496
|
+
When an agent is registered with Mastra, processor workflows are automatically registered as workflows, allowing you to view and debug them in the [Studio](https://mastra.ai/docs/studio/overview).
|
|
497
497
|
|
|
498
498
|
### Retry mechanism
|
|
499
499
|
|
|
@@ -1,10 +1,8 @@
|
|
|
1
1
|
# License
|
|
2
2
|
|
|
3
|
-
## Apache license 2.0
|
|
4
|
-
|
|
5
3
|
Mastra is licensed under the Apache License 2.0, a permissive open-source license that provides users with broad rights to use, modify, and distribute the software.
|
|
6
4
|
|
|
7
|
-
|
|
5
|
+
## What's Apache License 2.0?
|
|
8
6
|
|
|
9
7
|
The Apache License 2.0 is a permissive open-source license that grants users extensive rights to use, modify, and distribute the software. It allows:
|
|
10
8
|
|
|
@@ -16,7 +14,7 @@ The Apache License 2.0 is a permissive open-source license that grants users ext
|
|
|
16
14
|
|
|
17
15
|
The Apache License 2.0 is one of the most permissive and business-friendly open-source licenses available.
|
|
18
16
|
|
|
19
|
-
|
|
17
|
+
## Why We Chose Apache License 2.0
|
|
20
18
|
|
|
21
19
|
We selected the Apache License 2.0 for several important reasons:
|
|
22
20
|
|
|
@@ -30,11 +28,11 @@ We selected the Apache License 2.0 for several important reasons:
|
|
|
30
28
|
|
|
31
29
|
5. **Widely Adopted**: It's one of the most popular and well-understood open-source licenses in the industry.
|
|
32
30
|
|
|
33
|
-
|
|
31
|
+
## Building Your Business with Mastra
|
|
34
32
|
|
|
35
33
|
The Apache License 2.0 provides maximum flexibility for building businesses with Mastra:
|
|
36
34
|
|
|
37
|
-
|
|
35
|
+
### Allowed Business Models
|
|
38
36
|
|
|
39
37
|
- **Building Applications**: Create and sell applications built with Mastra
|
|
40
38
|
- **Offering Consulting Services**: Provide expertise, implementation, and customization services
|
|
@@ -44,7 +42,7 @@ The Apache License 2.0 provides maximum flexibility for building businesses with
|
|
|
44
42
|
- **Hosted Services**: Offer Mastra as a hosted or managed service
|
|
45
43
|
- **SaaS Platforms**: Build SaaS platforms powered by Mastra
|
|
46
44
|
|
|
47
|
-
|
|
45
|
+
### Examples of Compliant Usage
|
|
48
46
|
|
|
49
47
|
- A company builds an AI-powered customer service application using Mastra and sells it to clients
|
|
50
48
|
- A consulting firm offers implementation and customization services for Mastra
|
|
@@ -53,7 +51,7 @@ The Apache License 2.0 provides maximum flexibility for building businesses with
|
|
|
53
51
|
- A company offers Mastra as a hosted service to their customers
|
|
54
52
|
- A SaaS platform integrates Mastra as their AI backend
|
|
55
53
|
|
|
56
|
-
|
|
54
|
+
### Compliance Requirements
|
|
57
55
|
|
|
58
56
|
The Apache License 2.0 has minimal requirements:
|
|
59
57
|
|
|
@@ -61,6 +59,6 @@ The Apache License 2.0 has minimal requirements:
|
|
|
61
59
|
- **State Changes**: If you modify the software, state that you have made changes
|
|
62
60
|
- **Include License**: Include a copy of the Apache License 2.0 when distributing
|
|
63
61
|
|
|
64
|
-
|
|
62
|
+
## Questions About Licensing?
|
|
65
63
|
|
|
66
64
|
If you have specific questions about how the Apache License 2.0 applies to your use case, please [contact us](https://discord.gg/BTYqqHKUrf) on Discord for clarification. We're committed to supporting all legitimate use cases while maintaining the open-source nature of the project.
|
|
@@ -105,12 +105,6 @@ When deploying to cloud providers, ensure the correct package is selected as the
|
|
|
105
105
|
|
|
106
106
|
Most providers let you specify the root directory in their dashboard or configuration file.
|
|
107
107
|
|
|
108
|
-
### Mastra Cloud
|
|
109
|
-
|
|
110
|
-
The image below shows how to select `apps/api` as the project root when deploying to [Mastra Cloud](https://mastra.ai/docs/mastra-cloud/overview). While the interface may differ between providers, the configuration remains the same.
|
|
111
|
-
|
|
112
|
-

|
|
113
|
-
|
|
114
108
|
## Dependency management
|
|
115
109
|
|
|
116
110
|
Keep dependencies consistent to avoid version conflicts and build errors:
|
|
@@ -28,7 +28,7 @@ These scorers evaluate the quality and relevance of context used in generating r
|
|
|
28
28
|
- [`context-precision`](https://mastra.ai/reference/evals/context-precision): Evaluates context relevance and ranking using Mean Average Precision, rewarding early placement of relevant context (`0-1`, higher is better)
|
|
29
29
|
- [`context-relevance`](https://mastra.ai/reference/evals/context-relevance): Measures context utility with nuanced relevance levels, usage tracking, and missing context detection (`0-1`, higher is better)
|
|
30
30
|
|
|
31
|
-
>
|
|
31
|
+
> **Context Scorer Selection:**
|
|
32
32
|
>
|
|
33
33
|
> - Use **Context Precision** when context ordering matters and you need standard IR metrics (ideal for RAG ranking evaluation)
|
|
34
34
|
> - Use **Context Relevance** when you need detailed relevance assessment and want to track context usage and identify gaps
|
|
@@ -41,6 +41,16 @@ const { datasets: all } = await datasets.list()
|
|
|
41
41
|
|
|
42
42
|
> **Info:** Visit the [`DatasetsManager` reference](https://mastra.ai/reference/datasets/datasets-manager) for the full list of methods.
|
|
43
43
|
|
|
44
|
+
## Studio
|
|
45
|
+
|
|
46
|
+
You can also manage datasets in [Studio](https://mastra.ai/docs/studio/overview). After opening Studio, select **Datasets** from the sidebar to see all your available datasets or create a new one.
|
|
47
|
+
|
|
48
|
+
To get started, select **Create Dataset** and set a name, description, and optional schemas. After confirming, you'll see the dataset details page with two tabs: **Items** and [**Experiments**](https://mastra.ai/docs/evals/datasets/running-experiments).
|
|
49
|
+
|
|
50
|
+
In the **Items** view you can add, update, and delete items, and view version history. Select **Add Item** to insert a new item with JSON editors for input and ground truth. From this view you can also import items in bulk from a CSV or JSON file. When importing, map each column to the corresponding dataset field.
|
|
51
|
+
|
|
52
|
+
Select **Versions** to see the full history of changes to the dataset. After selecting **Compare Versions**, choose any two versions and select **Compare** to see a side-by-side diff of all items that were added, changed, or removed between those versions.
|
|
53
|
+
|
|
44
54
|
## Creating a dataset
|
|
45
55
|
|
|
46
56
|
Call [`create()`](https://mastra.ai/reference/datasets/create) with a name and optional description:
|
|
@@ -176,23 +186,13 @@ Fetch the exact items that existed at a past version:
|
|
|
176
186
|
const items = await dataset.listItems({ version: 2 })
|
|
177
187
|
```
|
|
178
188
|
|
|
179
|
-
You can also pin experiments to a version, see [running experiments](https://mastra.ai/docs/
|
|
189
|
+
You can also pin experiments to a version, see [running experiments](https://mastra.ai/docs/evals/datasets/running-experiments).
|
|
180
190
|
|
|
181
191
|
> **Info:** Visit the [`Dataset` reference](https://mastra.ai/reference/datasets/dataset) for the full list of methods and parameters.
|
|
182
192
|
|
|
183
|
-
## Studio
|
|
184
|
-
|
|
185
|
-
You can also manage datasets in [Mastra Studio](https://mastra.ai/docs/getting-started/studio). After opening Studio, select **Datasets** from the sidebar to see all your available datasets or create a new one.
|
|
186
|
-
|
|
187
|
-
To get started, select **Create Dataset** and set a name, description, and optional schemas. After confirming, you'll see the dataset details page with two tabs: **Items** and [**Experiments**](https://mastra.ai/docs/observability/datasets/running-experiments).
|
|
188
|
-
|
|
189
|
-
In the **Items** view you can add, update, and delete items, and view version history. Select **Add Item** to insert a new item with JSON editors for input and ground truth. From this view you can also import items in bulk from a CSV or JSON file. When importing, map each column to the corresponding dataset field.
|
|
190
|
-
|
|
191
|
-
Select **Versions** to see the full history of changes to the dataset. After selecting **Compare Versions**, choose any two versions and select **Compare** to see a side-by-side diff of all items that were added, changed, or removed between those versions.
|
|
192
|
-
|
|
193
193
|
## Related
|
|
194
194
|
|
|
195
|
-
- [Running experiments](https://mastra.ai/docs/
|
|
195
|
+
- [Running experiments](https://mastra.ai/docs/evals/datasets/running-experiments)
|
|
196
196
|
- [Scorers overview](https://mastra.ai/docs/evals/overview)
|
|
197
197
|
- [DatasetsManager reference](https://mastra.ai/reference/datasets/datasets-manager)
|
|
198
198
|
- [Dataset reference](https://mastra.ai/reference/datasets/dataset)
|
|
@@ -27,6 +27,14 @@ console.log(summary.failedCount) // number of items that failed
|
|
|
27
27
|
|
|
28
28
|
`startExperiment()` blocks until all items finish. For fire-and-forget execution, see [async experiments](#async-experiments).
|
|
29
29
|
|
|
30
|
+
## Studio
|
|
31
|
+
|
|
32
|
+
You can also run experiments in [Studio](https://mastra.ai/docs/studio/overview). After you've added a dataset item, open it and select **Run Experiment** and configure the target, scorers, and options.
|
|
33
|
+
|
|
34
|
+
After running an experiment, the **Experiments** tab shows all runs for that dataset (with status, counts, and timestamps). Select an experiment to see per-item results, scores, and execution traces.
|
|
35
|
+
|
|
36
|
+
In the **Experiments** tab, select **Compare** and choose two or more experiments to compare their scores and results side by side.
|
|
37
|
+
|
|
30
38
|
## Experiment targets
|
|
31
39
|
|
|
32
40
|
You can point an experiment at a registered agent, workflow, or scorer.
|
|
@@ -258,17 +266,9 @@ for (const result of results) {
|
|
|
258
266
|
|
|
259
267
|
> **Info:** Visit the [`startExperiment` reference](https://mastra.ai/reference/datasets/startExperiment) for the full parameter and return type documentation.
|
|
260
268
|
|
|
261
|
-
## Studio
|
|
262
|
-
|
|
263
|
-
You can also run experiments in [Mastra Studio](https://mastra.ai/docs/getting-started/studio). After you've added a dataset item, open it and select **Run Experiment** and configure the target, scorers, and options.
|
|
264
|
-
|
|
265
|
-
After running an experiment, the **Experiments** tab shows all runs for that dataset (with status, counts, and timestamps). Select an experiment to see per-item results, scores, and execution traces.
|
|
266
|
-
|
|
267
|
-
In the **Experiments** tab, select **Compare** and choose two or more experiments to compare their scores and results side by side.
|
|
268
|
-
|
|
269
269
|
## Related
|
|
270
270
|
|
|
271
|
-
- [Datasets overview](https://mastra.ai/docs/
|
|
271
|
+
- [Datasets overview](https://mastra.ai/docs/evals/datasets/overview)
|
|
272
272
|
- [Scorers overview](https://mastra.ai/docs/evals/overview)
|
|
273
273
|
- [`startExperiment` reference](https://mastra.ai/reference/datasets/startExperiment)
|
|
274
274
|
- [`listExperimentResults` reference](https://mastra.ai/reference/datasets/listExperimentResults)
|
|
@@ -111,9 +111,9 @@ export const contentWorkflow = createWorkflow({ ... })
|
|
|
111
111
|
|
|
112
112
|
In addition to live evaluations, you can use scorers to evaluate historical traces from your agent interactions and workflows. This is particularly useful for analyzing past performance, debugging issues, or running batch evaluations.
|
|
113
113
|
|
|
114
|
-
> **Observability
|
|
114
|
+
> **Observability required:** To score traces, you must first configure observability in your Mastra instance to collect trace data. See [Tracing documentation](https://mastra.ai/docs/observability/tracing/overview) for setup instructions.
|
|
115
115
|
|
|
116
|
-
|
|
116
|
+
## Studio
|
|
117
117
|
|
|
118
118
|
To score traces, you first need to register your scorers with your Mastra instance:
|
|
119
119
|
|
|
@@ -126,16 +126,15 @@ const mastra = new Mastra({
|
|
|
126
126
|
})
|
|
127
127
|
```
|
|
128
128
|
|
|
129
|
-
Once registered, you can score traces interactively within Studio under the Observability section.
|
|
129
|
+
Once registered, you can score traces interactively within Studio under the **Observability** section. Open Studio to manage scorers, review scores, and run experiments.
|
|
130
130
|
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
For more details, see [Studio](https://mastra.ai/docs/getting-started/studio) docs.
|
|
131
|
+
- **Scorers list**: Browse all registered scorers with their description, and the number of agents and workflows each scorer is attached to.
|
|
132
|
+
- **Score results**: Select a scorer to see a paginated list of every score it has produced. Click a row to open the detail panel, which shows the score value, reason, input, output, and the prompts used by the judge. From this panel, save any result as a dataset item for future experiments.
|
|
133
|
+
- **Agent Evaluate tab**: Open the Evaluate tab on any agent to attach or detach scorers, create or edit stored scorers inline, manage datasets, and run experiments. Experiment results display per-item scores alongside pass/fail status and version tags.
|
|
134
|
+
- **Trace scoring**: In the Observability section, run a scorer against any historical trace or span to evaluate past interactions. Filter scores by agent or workflow.
|
|
136
135
|
|
|
137
136
|
## Next steps
|
|
138
137
|
|
|
139
138
|
- Learn how to create your own scorers in the [Creating Custom Scorers](https://mastra.ai/docs/evals/custom-scorers) guide
|
|
140
139
|
- Explore built-in scorers in the [Built-in Scorers](https://mastra.ai/docs/evals/built-in-scorers) section
|
|
141
|
-
- Test scorers with [Studio](https://mastra.ai/docs/
|
|
140
|
+
- Test scorers with [Studio](https://mastra.ai/docs/studio/overview)
|
|
@@ -71,7 +71,6 @@ If you prefer not to use our automatic CLI tool, you can set up your project you
|
|
|
71
71
|
```json
|
|
72
72
|
{
|
|
73
73
|
"scripts": {
|
|
74
|
-
"test": "echo \"Error: no test specified\" && exit 1",
|
|
75
74
|
"dev": "mastra dev",
|
|
76
75
|
"build": "mastra build"
|
|
77
76
|
}
|
|
@@ -199,7 +198,7 @@ If you prefer not to use our automatic CLI tool, you can set up your project you
|
|
|
199
198
|
})
|
|
200
199
|
```
|
|
201
200
|
|
|
202
|
-
7. You can now launch [Studio](https://mastra.ai/docs/
|
|
201
|
+
7. You can now launch [Studio](https://mastra.ai/docs/studio/overview) and test your agent.
|
|
203
202
|
|
|
204
203
|
**npm**:
|
|
205
204
|
|
package/.docs/docs/index.md
CHANGED
|
@@ -4,7 +4,7 @@ Build AI agents your users actually depend on. Mastra is a TypeScript framework
|
|
|
4
4
|
|
|
5
5
|
## Quickstart
|
|
6
6
|
|
|
7
|
-
Run this command to create a new project you can test immediately in [Studio](https://mastra.ai/docs/
|
|
7
|
+
Run this command to create a new project you can test immediately in [Studio](https://mastra.ai/docs/studio/overview):
|
|
8
8
|
|
|
9
9
|
**npm**:
|
|
10
10
|
|
|
@@ -73,5 +73,5 @@ After deployment, interact with your agents using the [Mastra Client](https://ma
|
|
|
73
73
|
|
|
74
74
|
## Next steps
|
|
75
75
|
|
|
76
|
-
- [Studio](https://mastra.ai/docs/mastra-cloud/
|
|
77
|
-
- [Observability](https://mastra.ai/docs/mastra-cloud/observability)
|
|
76
|
+
- [Studio](https://mastra.ai/docs/mastra-cloud/overview): Test your agents in the cloud
|
|
77
|
+
- [Observability](https://mastra.ai/docs/mastra-cloud/observability): Monitor traces and logs
|
|
@@ -10,7 +10,7 @@ Traces are available for both agents and workflows by enabling [observability](h
|
|
|
10
10
|
|
|
11
11
|
### Agents
|
|
12
12
|
|
|
13
|
-
With observability enabled, you can view detailed outputs from your agents in the **Traces** section in [Studio](https://mastra.ai/docs/mastra-cloud/
|
|
13
|
+
With observability enabled, you can view detailed outputs from your agents in the **Traces** section in [Studio](https://mastra.ai/docs/mastra-cloud/overview).
|
|
14
14
|
|
|
15
15
|

|
|
16
16
|
|
|
@@ -18,7 +18,7 @@ Agent traces break a run into clear steps: model calls, tool calls, and intermed
|
|
|
18
18
|
|
|
19
19
|
### Workflows
|
|
20
20
|
|
|
21
|
-
With observability enabled, you can view detailed outputs from your workflows in the **Traces** section in [Studio](https://mastra.ai/docs/mastra-cloud/
|
|
21
|
+
With observability enabled, you can view detailed outputs from your workflows in the **Traces** section in [Studio](https://mastra.ai/docs/mastra-cloud/overview).
|
|
22
22
|
|
|
23
23
|

|
|
24
24
|
|
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
|
|
5
5
|
## Studio
|
|
6
6
|
|
|
7
|
-
Run [Studio](https://mastra.ai/docs/
|
|
7
|
+
Run [Studio](https://mastra.ai/docs/studio/overview) in the cloud and share access with your team via a link. Team members can test agents and workflows, tweak system prompts, and give feedback without running the project locally.
|
|
8
8
|
|
|
9
9
|
## Deploy
|
|
10
10
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Setup
|
|
2
2
|
|
|
3
|
-
Import your Mastra project to [Mastra Cloud](https://cloud.mastra.ai) to use [Studio](https://mastra.ai/docs/mastra-cloud/
|
|
3
|
+
Import your Mastra project to [Mastra Cloud](https://cloud.mastra.ai) to use [Studio](https://mastra.ai/docs/mastra-cloud/overview) and optionally [deploy](https://mastra.ai/docs/mastra-cloud/deployment) your agent.
|
|
4
4
|
|
|
5
5
|
## Before you begin
|
|
6
6
|
|
|
@@ -14,7 +14,7 @@ Import your Mastra project to [Mastra Cloud](https://cloud.mastra.ai) to use [St
|
|
|
14
14
|
When you create a new project, you can choose from three options:
|
|
15
15
|
|
|
16
16
|
1. **Create from GitHub** - Import a Mastra project from GitHub
|
|
17
|
-
2. **Create from your server** - Connect a self-hosted Mastra instance to [Studio](https://mastra.ai/docs/mastra-cloud/
|
|
17
|
+
2. **Create from your server** - Connect a self-hosted Mastra instance to [Studio](https://mastra.ai/docs/mastra-cloud/overview)
|
|
18
18
|
3. **Create from template** - Start from a [pre-built template](https://mastra.ai/templates)
|
|
19
19
|
|
|
20
20
|
To create a project from GitHub, follow these steps:
|
|
@@ -37,6 +37,6 @@ To create a project from GitHub, follow these steps:
|
|
|
37
37
|
|
|
38
38
|
## Next steps
|
|
39
39
|
|
|
40
|
-
Once your project is imported, [Studio](https://mastra.ai/docs/mastra-cloud/
|
|
40
|
+
Once your project is imported, [Studio](https://mastra.ai/docs/mastra-cloud/overview) automatically creates a sandbox where you can interact with your agents and share access with your team.
|
|
41
41
|
|
|
42
42
|
When you're ready for production, enable [Deployment](https://mastra.ai/docs/mastra-cloud/deployment) settings and hit deploy!
|
|
@@ -6,10 +6,30 @@ This example guides you through setting up a basic Mastra MCPServer using the st
|
|
|
6
6
|
|
|
7
7
|
Install the necessary packages:
|
|
8
8
|
|
|
9
|
+
**npm**:
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
npm install @mastra/mcp @mastra/core tsup
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
**pnpm**:
|
|
16
|
+
|
|
9
17
|
```bash
|
|
10
18
|
pnpm add @mastra/mcp @mastra/core tsup
|
|
11
19
|
```
|
|
12
20
|
|
|
21
|
+
**Yarn**:
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
yarn add @mastra/mcp @mastra/core tsup
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
**Bun**:
|
|
28
|
+
|
|
29
|
+
```bash
|
|
30
|
+
bun add @mastra/mcp @mastra/core tsup
|
|
31
|
+
```
|
|
32
|
+
|
|
13
33
|
## Setting up an MCP server
|
|
14
34
|
|
|
15
35
|
1. Create a file for your stdio server, for example, `/src/mastra/stdio.ts`.
|
|
@@ -48,7 +48,7 @@ export const mastra = new Mastra({
|
|
|
48
48
|
})
|
|
49
49
|
```
|
|
50
50
|
|
|
51
|
-
|
|
51
|
+
Instantiate a [`Memory`](https://mastra.ai/reference/memory/memory-class) instance in your agent:
|
|
52
52
|
|
|
53
53
|
```typescript
|
|
54
54
|
import { Memory } from '@mastra/memory'
|
|
@@ -66,7 +66,7 @@ export const agent = new Agent({
|
|
|
66
66
|
|
|
67
67
|
When you call the agent, messages are automatically saved to the database. You can specify a `threadId`, `resourceId`, and optional `metadata`:
|
|
68
68
|
|
|
69
|
-
|
|
69
|
+
**.generate()**:
|
|
70
70
|
|
|
71
71
|
```typescript
|
|
72
72
|
await agent.generate('Hello', {
|
|
@@ -81,7 +81,7 @@ await agent.generate('Hello', {
|
|
|
81
81
|
})
|
|
82
82
|
```
|
|
83
83
|
|
|
84
|
-
|
|
84
|
+
**.stream()**:
|
|
85
85
|
|
|
86
86
|
```typescript
|
|
87
87
|
await agent.stream('Hello', {
|
|
@@ -103,12 +103,14 @@ You can use this history in two ways:
|
|
|
103
103
|
- **Automatic inclusion** - Mastra automatically fetches and includes recent messages in the context window. By default, it includes the last 10 messages, keeping agents grounded in the conversation. You can adjust this number with `lastMessages`, but in most cases you don't need to think about it.
|
|
104
104
|
- [**Manual querying**](#querying) - For more control, use the `recall()` function to query threads and messages directly. This lets you choose exactly which memories are included in the context window, or fetch messages to render conversation history in your UI.
|
|
105
105
|
|
|
106
|
+
> **Tip:** When memory is enabled, [Studio](https://mastra.ai/docs/studio/overview) uses message history to display past conversations in the chat sidebar.
|
|
107
|
+
|
|
106
108
|
## Accessing memory
|
|
107
109
|
|
|
108
110
|
To access memory functions for querying, cloning, or deleting threads and messages, call `getMemory()` on an agent:
|
|
109
111
|
|
|
110
112
|
```typescript
|
|
111
|
-
const agent = mastra.
|
|
113
|
+
const agent = mastra.getAgentById('test-agent')
|
|
112
114
|
const memory = await agent.getMemory()
|
|
113
115
|
```
|
|
114
116
|
|
|
@@ -42,7 +42,7 @@ See [configuration options](https://mastra.ai/reference/memory/observational-mem
|
|
|
42
42
|
|
|
43
43
|
## Benefits
|
|
44
44
|
|
|
45
|
-
- **Prompt caching**: OM's context is stable
|
|
45
|
+
- **Prompt caching**: OM's context is stable and observations append over time rather than being dynamically retrieved each turn. This keeps the prompt prefix cacheable, which reduces costs.
|
|
46
46
|
- **Compression**: Raw message history and tool results get compressed into a dense observation log. Smaller context means faster responses and longer coherent conversations.
|
|
47
47
|
- **Zero context rot**: The agent sees relevant information instead of noisy tool calls and irrelevant tokens, so the agent stays on task over long sessions.
|
|
48
48
|
|
|
@@ -50,7 +50,7 @@ See [configuration options](https://mastra.ai/reference/memory/observational-mem
|
|
|
50
50
|
|
|
51
51
|
You don't remember every word of every conversation you've ever had. You observe what happened subconsciously, then your brain reflects — reorganizing, combining, and condensing into long-term memory. OM works the same way.
|
|
52
52
|
|
|
53
|
-
Every time an agent responds, it sees a context window containing its system prompt, recent message history, and any injected context. The context window is finite
|
|
53
|
+
Every time an agent responds, it sees a context window containing its system prompt, recent message history, and any injected context. The context window is finite; even models with large token limits perform worse when the window is full. This causes two problems:
|
|
54
54
|
|
|
55
55
|
- **Context rot**: the more raw message history an agent carries, the worse it performs.
|
|
56
56
|
- **Context waste**: most of that history contains tokens no longer needed to keep the agent on task.
|
|
@@ -59,14 +59,15 @@ OM solves both problems by compressing old context into dense observations.
|
|
|
59
59
|
|
|
60
60
|
### Observations
|
|
61
61
|
|
|
62
|
-
When message history tokens exceed a threshold (default: 30,000), the Observer creates observations
|
|
62
|
+
When message history tokens exceed a threshold (default: 30,000), the Observer creates observations which are concise notes about what happened:
|
|
63
63
|
|
|
64
64
|
OM uses fast local token estimation for this thresholding work. Text is estimated with `tokenx`, while image parts use provider-aware heuristics so multimodal conversations still trigger observation at the right time. The same applies to image-like `file` parts when a transport normalizes an uploaded image as a file instead of an image part. For example, OpenAI image detail settings can materially change when OM decides to observe.
|
|
65
65
|
|
|
66
66
|
The Observer can also see attachments in the history it reviews. OM keeps readable placeholders like `[Image #1: reference-board.png]` or `[File #1: floorplan.pdf]` in the transcript for readability, and forwards the actual attachment parts alongside the text. Image-like `file` parts are upgraded to image inputs for the Observer when possible, while non-image attachments are forwarded as file parts with normalized token counting. This applies to both normal thread observation and batched resource-scope observation.
|
|
67
67
|
|
|
68
|
-
```
|
|
68
|
+
```md
|
|
69
69
|
Date: 2026-01-15
|
|
70
|
+
|
|
70
71
|
- 🔴 12:10 User is building a Next.js app with Supabase auth, due in 1 week (meaning January 22nd 2026)
|
|
71
72
|
- 🔴 12:10 App uses server components with client-side hydration
|
|
72
73
|
- 🟡 12:12 User asked about middleware configuration for protected routes
|
|
@@ -77,11 +78,11 @@ The compression is typically 5–40×. The Observer also tracks a **current task
|
|
|
77
78
|
|
|
78
79
|
If you enable `observation.threadTitle`, the Observer can also suggest a short thread title when the conversation topic meaningfully changes. Thread title generation is opt-in and updates the thread metadata, so apps like Mastra Code can show the latest title in thread lists and status UI.
|
|
79
80
|
|
|
80
|
-
Example:
|
|
81
|
+
Example: An agent using Playwright MCP might see 50,000+ tokens per page snapshot. With OM, the Observer watches the interaction and creates a few hundred tokens of observations about what was on the page and what actions were taken. The agent stays on task without carrying every raw snapshot.
|
|
81
82
|
|
|
82
83
|
### Reflections
|
|
83
84
|
|
|
84
|
-
When observations exceed their threshold (default: 40,000 tokens), the Reflector condenses them
|
|
85
|
+
When observations exceed their threshold (default: 40,000 tokens), the Reflector condenses them, combines related items, and reflects on patterns.
|
|
85
86
|
|
|
86
87
|
The result is a three-tier system:
|
|
87
88
|
|
|
@@ -93,7 +94,7 @@ The result is a three-tier system:
|
|
|
93
94
|
|
|
94
95
|
> **Note:** Retrieval mode is experimental. The API may change in future releases.
|
|
95
96
|
|
|
96
|
-
Normal OM compresses messages into observations, which is great for staying on task
|
|
97
|
+
Normal OM compresses messages into observations, which is great for staying on task, but the original wording is gone. Retrieval mode fixes this by keeping each observation group linked to the raw messages that produced it. When the agent needs exact wording, tool output, or chronology that the summary compressed away, it can call a `recall` tool to page through the source messages.
|
|
97
98
|
|
|
98
99
|
#### Browsing only
|
|
99
100
|
|
|
@@ -162,6 +163,16 @@ With retrieval mode enabled, OM:
|
|
|
162
163
|
|
|
163
164
|
See the [recall tool reference](https://mastra.ai/reference/memory/observational-memory) for the full API (detail levels, part indexing, pagination, cross-thread browsing, and token limiting).
|
|
164
165
|
|
|
166
|
+
## Studio
|
|
167
|
+
|
|
168
|
+
To see how it works in practice, open [Studio](https://mastra.ai/docs/studio/overview) and navigate to an agent with OM enabled. The **Memory** tab displays:
|
|
169
|
+
|
|
170
|
+
- **Token progress bars**: Current token counts for messages and observations, showing how close each is to its threshold. Hover over the info icon to see the model and threshold for the Observer and Reflector.
|
|
171
|
+
- **Active observations**: The current observation log, rendered inline. When previous observation or reflection records exist, expand "Previous observations" to browse them.
|
|
172
|
+
- **Background processing**: During a conversation, buffered observation chunks and reflection status appear as the agent processes in the background.
|
|
173
|
+
|
|
174
|
+
The progress bars update live while the agent is observing or reflecting, showing elapsed time and a status badge.
|
|
175
|
+
|
|
165
176
|
## Models
|
|
166
177
|
|
|
167
178
|
The Observer and Reflector run in the background. Any model that works with Mastra's [model routing](https://mastra.ai/models) (`provider/model`) can be used. When using `observationalMemory: true`, the default model is `google/gemini-2.5-flash`. When passing a config object, a `model` must be explicitly set.
|
|
@@ -184,6 +195,8 @@ See [model configuration](https://mastra.ai/reference/memory/observational-memor
|
|
|
184
195
|
|
|
185
196
|
### Token-tiered model selection
|
|
186
197
|
|
|
198
|
+
**Added in:** `@mastra/memory@1.10.0`
|
|
199
|
+
|
|
187
200
|
You can use `ModelByInputTokens` to specify different Observer or Reflector models based on input token count. OM selects the matching model tier at runtime from the configured `upTo` thresholds.
|
|
188
201
|
|
|
189
202
|
```typescript
|
|
@@ -373,10 +386,6 @@ No manual migration needed. OM reads existing messages and observes them lazily
|
|
|
373
386
|
- **Thread scope**: The first time a thread exceeds `observation.messageTokens`, the Observer processes the backlog.
|
|
374
387
|
- **Resource scope**: All unobserved messages across all threads for a resource are processed together. For users with many existing threads, this could take significant time.
|
|
375
388
|
|
|
376
|
-
## Viewing in Mastra Studio
|
|
377
|
-
|
|
378
|
-
Mastra Studio shows OM status in real time in the memory tab: token usage, which model is running, current observations, and reflection history.
|
|
379
|
-
|
|
380
389
|
## Comparing OM with other memory features
|
|
381
390
|
|
|
382
391
|
- **[Message history](https://mastra.ai/docs/memory/message-history)**: High-fidelity record of the current conversation
|
|
@@ -107,7 +107,7 @@ Use memory when your agent needs to maintain multi-turn conversations that refer
|
|
|
107
107
|
|
|
108
108
|
> **Note:** Visit [Memory Class](https://mastra.ai/reference/memory/memory-class) for a full list of configuration options.
|
|
109
109
|
|
|
110
|
-
5. Call your agent, for example in [
|
|
110
|
+
5. Call your agent, for example in [Studio](https://mastra.ai/docs/studio/overview). Inside Studio, start a new chat with your agent and take a look at the right sidebar. It'll now display various memory-related information.
|
|
111
111
|
|
|
112
112
|
## Message history
|
|
113
113
|
|
|
@@ -165,7 +165,7 @@ export const memoryAgent = new Agent({
|
|
|
165
165
|
|
|
166
166
|
## Memory in multi-agent systems
|
|
167
167
|
|
|
168
|
-
When a [supervisor agent](https://mastra.ai/docs/agents/supervisor-agents) delegates to a subagent, Mastra isolates subagent memory automatically.
|
|
168
|
+
When a [supervisor agent](https://mastra.ai/docs/agents/supervisor-agents) delegates to a subagent, Mastra isolates subagent memory automatically. No flag enables this as it happens on every delegation. Understanding how this scoping works lets you decide what stays private and what to share intentionally.
|
|
169
169
|
|
|
170
170
|
### How delegation scopes memory
|
|
171
171
|
|
|
@@ -175,7 +175,7 @@ Each delegation creates a fresh `threadId` and a deterministic `resourceId` for
|
|
|
175
175
|
- **Resource ID**: Derived as `{parentResourceId}-{agentName}`. Because the resource ID is stable across delegations, resource-scoped memory persists between calls. A subagent remembers facts from previous delegations by the same user.
|
|
176
176
|
- **Memory instance**: If a subagent has no memory configured, it inherits the supervisor's `Memory` instance. If the subagent defines its own, that takes precedence.
|
|
177
177
|
|
|
178
|
-
The supervisor forwards its conversation context to the subagent so it has enough background to complete the task. Only the delegation prompt and the subagent's response are saved — the full parent conversation
|
|
178
|
+
The supervisor forwards its conversation context to the subagent so it has enough background to complete the task. Only the delegation prompt and the subagent's response are saved — the full parent conversation isn't stored. You can control which messages reach the subagent with the [`messageFilter`](https://mastra.ai/docs/agents/supervisor-agents) callback.
|
|
179
179
|
|
|
180
180
|
> **Note:** Subagent resource IDs are always suffixed with the agent name (`{parentResourceId}-{agentName}`). Two different subagents under the same supervisor never share a resource ID through delegation.
|
|
181
181
|
|
|
@@ -206,7 +206,7 @@ Because both calls use `resource: 'project-42'`, the writer can access the resea
|
|
|
206
206
|
|
|
207
207
|
Enable [Tracing](https://mastra.ai/docs/observability/tracing/overview) to monitor and debug memory in action. Traces show you exactly which messages and observations the agent included in its context for each request, helping you understand agent behavior and verify that memory retrieval is working as expected.
|
|
208
208
|
|
|
209
|
-
Open [
|
|
209
|
+
Open [Studio](https://mastra.ai/docs/studio/overview) and select the **Observability** tab in the sidebar. Open the trace of a recent agent request, then look for spans of LLMs calls.
|
|
210
210
|
|
|
211
211
|
## Switch memory per request
|
|
212
212
|
|
|
@@ -18,18 +18,33 @@ After getting a response from the LLM, all new messages (user, assistant, and to
|
|
|
18
18
|
|
|
19
19
|
## Quickstart
|
|
20
20
|
|
|
21
|
-
Semantic recall is
|
|
21
|
+
Semantic recall is disabled by default. To enable it, set `semanticRecall: true` in `options` and provide a `vector` store and `embedder`:
|
|
22
22
|
|
|
23
23
|
```typescript
|
|
24
24
|
import { Agent } from '@mastra/core/agent'
|
|
25
25
|
import { Memory } from '@mastra/memory'
|
|
26
|
+
import { LibSQLStore, LibSQLVector } from '@mastra/libsql'
|
|
27
|
+
import { ModelRouterEmbeddingModel } from '@mastra/core/llm'
|
|
26
28
|
|
|
27
29
|
const agent = new Agent({
|
|
28
30
|
id: 'support-agent',
|
|
29
31
|
name: 'SupportAgent',
|
|
30
32
|
instructions: 'You are a helpful support agent.',
|
|
31
33
|
model: 'openai/gpt-5.4',
|
|
32
|
-
memory: new Memory(
|
|
34
|
+
memory: new Memory({
|
|
35
|
+
storage: new LibSQLStore({
|
|
36
|
+
id: 'agent-storage',
|
|
37
|
+
url: 'file:./local.db',
|
|
38
|
+
}),
|
|
39
|
+
vector: new LibSQLVector({
|
|
40
|
+
id: 'agent-vector',
|
|
41
|
+
url: 'file:./local.db',
|
|
42
|
+
}),
|
|
43
|
+
embedder: new ModelRouterEmbeddingModel('openai/text-embedding-3-small'),
|
|
44
|
+
options: {
|
|
45
|
+
semanticRecall: true,
|
|
46
|
+
},
|
|
47
|
+
}),
|
|
33
48
|
})
|
|
34
49
|
```
|
|
35
50
|
|
|
@@ -77,6 +92,9 @@ const agent = new Agent({
|
|
|
77
92
|
id: 'agent-vector',
|
|
78
93
|
url: 'file:./local.db',
|
|
79
94
|
}),
|
|
95
|
+
options: {
|
|
96
|
+
semanticRecall: true,
|
|
97
|
+
},
|
|
80
98
|
}),
|
|
81
99
|
})
|
|
82
100
|
```
|
|
@@ -139,6 +157,9 @@ import { ModelRouterEmbeddingModel } from '@mastra/core/llm'
|
|
|
139
157
|
const agent = new Agent({
|
|
140
158
|
memory: new Memory({
|
|
141
159
|
embedder: new ModelRouterEmbeddingModel('openai/text-embedding-3-small'),
|
|
160
|
+
options: {
|
|
161
|
+
semanticRecall: true,
|
|
162
|
+
},
|
|
142
163
|
}),
|
|
143
164
|
})
|
|
144
165
|
```
|
|
@@ -262,26 +283,14 @@ const agent = new Agent({
|
|
|
262
283
|
|
|
263
284
|
For detailed information about index configuration options and performance tuning, see the [PgVector configuration guide](https://mastra.ai/reference/vectors/pg).
|
|
264
285
|
|
|
265
|
-
##
|
|
286
|
+
## Disable semantic recall
|
|
266
287
|
|
|
267
|
-
Semantic recall
|
|
268
|
-
|
|
269
|
-
Semantic recall is enabled by default but can be disabled when not needed:
|
|
270
|
-
|
|
271
|
-
```typescript
|
|
272
|
-
const agent = new Agent({
|
|
273
|
-
memory: new Memory({
|
|
274
|
-
options: {
|
|
275
|
-
semanticRecall: false,
|
|
276
|
-
},
|
|
277
|
-
}),
|
|
278
|
-
})
|
|
279
|
-
```
|
|
288
|
+
Semantic recall is disabled by default (`semanticRecall: false`). Each call adds latency because new messages are converted into embeddings and used to query a vector database before the LLM receives them.
|
|
280
289
|
|
|
281
|
-
|
|
290
|
+
Keep semantic recall disabled when:
|
|
282
291
|
|
|
283
|
-
-
|
|
284
|
-
-
|
|
292
|
+
- Message history provides sufficient context for the current conversation.
|
|
293
|
+
- You're building performance-sensitive applications, like realtime two-way audio, where embedding and vector query latency is noticeable.
|
|
285
294
|
|
|
286
295
|
## Viewing recalled messages
|
|
287
296
|
|