@mastra/mcp-docs-server 0.13.30-alpha.0 → 0.13.30-alpha.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +9 -9
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +15 -0
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +15 -15
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +35 -35
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +24 -24
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +15 -15
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +31 -31
- package/.docs/organized/changelogs/%40mastra%2Freact.md +20 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +15 -15
- package/.docs/organized/changelogs/create-mastra.md +19 -19
- package/.docs/organized/changelogs/mastra.md +27 -27
- package/.docs/organized/code-examples/agent.md +0 -1
- package/.docs/organized/code-examples/agui.md +2 -2
- package/.docs/organized/code-examples/client-side-tools.md +2 -2
- package/.docs/raw/agents/adding-voice.mdx +118 -25
- package/.docs/raw/agents/agent-memory.mdx +73 -89
- package/.docs/raw/agents/guardrails.mdx +1 -1
- package/.docs/raw/agents/overview.mdx +39 -7
- package/.docs/raw/agents/using-tools.mdx +95 -0
- package/.docs/raw/deployment/overview.mdx +9 -11
- package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +1 -1
- package/.docs/raw/frameworks/servers/express.mdx +2 -2
- package/.docs/raw/getting-started/installation.mdx +34 -85
- package/.docs/raw/getting-started/mcp-docs-server.mdx +13 -1
- package/.docs/raw/index.mdx +49 -14
- package/.docs/raw/observability/ai-tracing/exporters/otel.mdx +3 -0
- package/.docs/raw/reference/observability/ai-tracing/exporters/otel.mdx +6 -0
- package/.docs/raw/reference/scorers/answer-relevancy.mdx +105 -7
- package/.docs/raw/reference/scorers/answer-similarity.mdx +266 -16
- package/.docs/raw/reference/scorers/bias.mdx +107 -6
- package/.docs/raw/reference/scorers/completeness.mdx +131 -8
- package/.docs/raw/reference/scorers/content-similarity.mdx +107 -8
- package/.docs/raw/reference/scorers/context-precision.mdx +234 -18
- package/.docs/raw/reference/scorers/context-relevance.mdx +418 -35
- package/.docs/raw/reference/scorers/faithfulness.mdx +122 -8
- package/.docs/raw/reference/scorers/hallucination.mdx +125 -8
- package/.docs/raw/reference/scorers/keyword-coverage.mdx +141 -9
- package/.docs/raw/reference/scorers/noise-sensitivity.mdx +478 -6
- package/.docs/raw/reference/scorers/prompt-alignment.mdx +351 -102
- package/.docs/raw/reference/scorers/textual-difference.mdx +134 -6
- package/.docs/raw/reference/scorers/tone-consistency.mdx +133 -0
- package/.docs/raw/reference/scorers/tool-call-accuracy.mdx +422 -65
- package/.docs/raw/reference/scorers/toxicity.mdx +125 -7
- package/.docs/raw/reference/workflows/workflow.mdx +33 -0
- package/.docs/raw/scorers/custom-scorers.mdx +244 -3
- package/.docs/raw/scorers/overview.mdx +8 -38
- package/.docs/raw/server-db/middleware.mdx +5 -2
- package/.docs/raw/server-db/runtime-context.mdx +178 -0
- package/.docs/raw/streaming/workflow-streaming.mdx +5 -1
- package/.docs/raw/tools-mcp/overview.mdx +25 -7
- package/.docs/raw/workflows/overview.mdx +28 -1
- package/CHANGELOG.md +14 -0
- package/package.json +4 -4
- package/.docs/raw/agents/runtime-context.mdx +0 -106
- package/.docs/raw/agents/using-tools-and-mcp.mdx +0 -241
- package/.docs/raw/getting-started/model-providers.mdx +0 -63
- package/.docs/raw/tools-mcp/runtime-context.mdx +0 -63
- /package/.docs/raw/{evals → scorers/evals-old-api}/custom-eval.mdx +0 -0
- /package/.docs/raw/{evals → scorers/evals-old-api}/overview.mdx +0 -0
- /package/.docs/raw/{evals → scorers/evals-old-api}/running-in-ci.mdx +0 -0
- /package/.docs/raw/{evals → scorers/evals-old-api}/textual-evals.mdx +0 -0
- /package/.docs/raw/{server-db → workflows}/snapshots.mdx +0 -0
|
@@ -1,5 +1,31 @@
|
|
|
1
1
|
# mastra
|
|
2
2
|
|
|
3
|
+
## 0.16.0-alpha.1
|
|
4
|
+
|
|
5
|
+
### Minor Changes
|
|
6
|
+
|
|
7
|
+
- Update peer dependencies to match core package version bump (0.21.0) ([#8795](https://github.com/mastra-ai/mastra/pull/8795))
|
|
8
|
+
|
|
9
|
+
### Patch Changes
|
|
10
|
+
|
|
11
|
+
- Create unified Sidebar component to use on Playground and Cloud ([#8655](https://github.com/mastra-ai/mastra/pull/8655))
|
|
12
|
+
|
|
13
|
+
- Use only zod validation in dynamic form ([#8802](https://github.com/mastra-ai/mastra/pull/8802))
|
|
14
|
+
|
|
15
|
+
- Add support for --debug options to build/dev commands to add more logging ([#8792](https://github.com/mastra-ai/mastra/pull/8792))
|
|
16
|
+
|
|
17
|
+
- Add div wrapper around entity tables to fix table vertical position ([#8758](https://github.com/mastra-ai/mastra/pull/8758))
|
|
18
|
+
|
|
19
|
+
- Customize AITraces type to seamlessly work on Cloud too ([#8759](https://github.com/mastra-ai/mastra/pull/8759))
|
|
20
|
+
|
|
21
|
+
- Stream finalResult from network loop ([#8795](https://github.com/mastra-ai/mastra/pull/8795))
|
|
22
|
+
|
|
23
|
+
- Improve README ([#8819](https://github.com/mastra-ai/mastra/pull/8819))
|
|
24
|
+
|
|
25
|
+
- Updated dependencies [[`1ed9670`](https://github.com/mastra-ai/mastra/commit/1ed9670d3ca50cb60dc2e517738c5eef3968ed27), [`ca5a01f`](https://github.com/mastra-ai/mastra/commit/ca5a01f0dd4dabdbfde3beeaf92c7333e0f9bb39), [`158381d`](https://github.com/mastra-ai/mastra/commit/158381d39335be934b81ef8a1947bccace492c25), [`fb703b9`](https://github.com/mastra-ai/mastra/commit/fb703b9634eeaff1a6eb2b5531ce0f9e8fb04727), [`dfe856f`](https://github.com/mastra-ai/mastra/commit/dfe856f7f60ff4765b75930e7b5d82dd0f0f7d89), [`37a2314`](https://github.com/mastra-ai/mastra/commit/37a23148e0e5a3b40d4f9f098b194671a8a49faf), [`05a9dee`](https://github.com/mastra-ai/mastra/commit/05a9dee3d355694d28847bfffb6289657fcf7dfa), [`e3c1077`](https://github.com/mastra-ai/mastra/commit/e3c107763aedd1643d3def5df450c235da9ff76c), [`1bccdb3`](https://github.com/mastra-ai/mastra/commit/1bccdb33eb90cbeba2dc5ece1c2561fb774b26b6), [`5ef944a`](https://github.com/mastra-ai/mastra/commit/5ef944a3721d93105675cac2b2311432ff8cc393), [`d6b186f`](https://github.com/mastra-ai/mastra/commit/d6b186fb08f1caf1b86f73d3a5ee88fb999ca3be), [`65493b3`](https://github.com/mastra-ai/mastra/commit/65493b31c36f6fdb78f9679f7e1ecf0c250aa5ee), [`a998b8f`](https://github.com/mastra-ai/mastra/commit/a998b8f858091c2ec47683e60766cf12d03001e4), [`8a37bdd`](https://github.com/mastra-ai/mastra/commit/8a37bddb6d8614a32c5b70303d583d80c620ea61), [`e0e1cf1`](https://github.com/mastra-ai/mastra/commit/e0e1cf1e37b9dc61099ab331a6d386e44b816310)]:
|
|
26
|
+
- @mastra/core@0.21.0-alpha.1
|
|
27
|
+
- @mastra/deployer@0.21.0-alpha.1
|
|
28
|
+
|
|
3
29
|
## 0.15.2-alpha.0
|
|
4
30
|
|
|
5
31
|
### Patch Changes
|
|
@@ -273,30 +299,4 @@
|
|
|
273
299
|
|
|
274
300
|
- Add observe strean to get streans after workflow has been interrupted ([#8318](https://github.com/mastra-ai/mastra/pull/8318))
|
|
275
301
|
|
|
276
|
-
|
|
277
|
-
- @mastra/core@0.20.0-alpha.0
|
|
278
|
-
- @mastra/deployer@0.20.0-alpha.0
|
|
279
|
-
- @mastra/mcp@0.13.3-alpha.0
|
|
280
|
-
- @mastra/loggers@0.10.15-alpha.0
|
|
281
|
-
|
|
282
|
-
## 0.13.4
|
|
283
|
-
|
|
284
|
-
### Patch Changes
|
|
285
|
-
|
|
286
|
-
- disable network label when memory is not enabled OR the agent has no subagents ([#8341](https://github.com/mastra-ai/mastra/pull/8341))
|
|
287
|
-
|
|
288
|
-
- Added Mastra model router to Playground UI ([#8332](https://github.com/mastra-ai/mastra/pull/8332))
|
|
289
|
-
|
|
290
|
-
- Updated dependencies [[`4a70ccc`](https://github.com/mastra-ai/mastra/commit/4a70ccc5cfa12ae9c2b36545a5814cd98e5a0ead), [`0992b8b`](https://github.com/mastra-ai/mastra/commit/0992b8bf0f4f1ba7ad9940883ec4bb8d867d3105), [`283bea0`](https://github.com/mastra-ai/mastra/commit/283bea07adbaf04a27fa3ad2df611095e0825195)]:
|
|
291
|
-
- @mastra/core@0.19.1
|
|
292
|
-
- @mastra/deployer@0.19.1
|
|
293
|
-
|
|
294
|
-
## 0.13.4-alpha.1
|
|
295
|
-
|
|
296
|
-
### Patch Changes
|
|
297
|
-
|
|
298
|
-
- disable network label when memory is not enabled OR the agent has no subagents ([#8341](https://github.com/mastra-ai/mastra/pull/8341))
|
|
299
|
-
|
|
300
|
-
- Updated dependencies [[`4a70ccc`](https://github.com/mastra-ai/mastra/commit/4a70ccc5cfa12ae9c2b36545a5814cd98e5a0ead)]:
|
|
301
|
-
|
|
302
|
-
... 6120 more lines hidden. See full changelog in package directory.
|
|
302
|
+
... 6146 more lines hidden. See full changelog in package directory.
|
|
@@ -703,7 +703,6 @@ import { Agent } from '@mastra/core/agent';
|
|
|
703
703
|
import { openai, openai as openai_v5 } from '@ai-sdk/openai-v5';
|
|
704
704
|
import { createTool } from '@mastra/core/tools';
|
|
705
705
|
import { z } from 'zod';
|
|
706
|
-
import { cookingTool } from '../tools';
|
|
707
706
|
import { myWorkflow } from '../workflows';
|
|
708
707
|
import { Memory } from '@mastra/memory';
|
|
709
708
|
import { ModerationProcessor } from '@mastra/core/processors';
|
|
@@ -23,13 +23,13 @@
|
|
|
23
23
|
"@types/react-dom": "^19.1.7",
|
|
24
24
|
"@typescript-eslint/eslint-plugin": "^8.38.0",
|
|
25
25
|
"@typescript-eslint/parser": "^8.38.0",
|
|
26
|
-
"@vitejs/plugin-react": "^
|
|
26
|
+
"@vitejs/plugin-react": "^5.0.4",
|
|
27
27
|
"eslint": "^9.36.0",
|
|
28
28
|
"eslint-plugin-react-hooks": "^5.2.0",
|
|
29
29
|
"eslint-plugin-react-refresh": "^0.4.22",
|
|
30
30
|
"globals": "^16.0.0",
|
|
31
31
|
"typescript": "^5.8.3",
|
|
32
|
-
"vite": "^
|
|
32
|
+
"vite": "^7.1.9"
|
|
33
33
|
}
|
|
34
34
|
}
|
|
35
35
|
```
|
|
@@ -14,14 +14,14 @@
|
|
|
14
14
|
"@eslint/js": "^9.21.0",
|
|
15
15
|
"@types/react": "^19.1.9",
|
|
16
16
|
"@types/react-dom": "^19.1.7",
|
|
17
|
-
"@vitejs/plugin-react": "^
|
|
17
|
+
"@vitejs/plugin-react": "^5.0.4",
|
|
18
18
|
"eslint": "^9.36.0",
|
|
19
19
|
"eslint-plugin-react-hooks": "^5.2.0",
|
|
20
20
|
"eslint-plugin-react-refresh": "^0.4.22",
|
|
21
21
|
"globals": "^15.15.0",
|
|
22
22
|
"typescript": "~5.8.3",
|
|
23
23
|
"typescript-eslint": "^8.38.0",
|
|
24
|
-
"vite": "^
|
|
24
|
+
"vite": "^7.1.9"
|
|
25
25
|
}
|
|
26
26
|
}
|
|
27
27
|
```
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
Mastra agents can be enhanced with voice capabilities, allowing them to speak responses and listen to user input. You can configure an agent to use either a single voice provider or combine multiple providers for different operations.
|
|
4
4
|
|
|
5
|
-
##
|
|
5
|
+
## Basic usage
|
|
6
6
|
|
|
7
7
|
The simplest way to add voice to an agent is to use a single provider for both speaking and listening:
|
|
8
8
|
|
|
@@ -39,30 +39,6 @@ try {
|
|
|
39
39
|
}
|
|
40
40
|
```
|
|
41
41
|
|
|
42
|
-
## Using Multiple Providers
|
|
43
|
-
|
|
44
|
-
For more flexibility, you can use different providers for speaking and listening using the CompositeVoice class:
|
|
45
|
-
|
|
46
|
-
```typescript
|
|
47
|
-
import { Agent } from "@mastra/core/agent";
|
|
48
|
-
import { CompositeVoice } from "@mastra/core/voice";
|
|
49
|
-
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
50
|
-
import { PlayAIVoice } from "@mastra/voice-playai";
|
|
51
|
-
import { openai } from "@ai-sdk/openai";
|
|
52
|
-
|
|
53
|
-
export const agent = new Agent({
|
|
54
|
-
name: "Agent",
|
|
55
|
-
instructions: `You are a helpful assistant with both STT and TTS capabilities.`,
|
|
56
|
-
model: openai("gpt-4o"),
|
|
57
|
-
|
|
58
|
-
// Create a composite voice using OpenAI for listening and PlayAI for speaking
|
|
59
|
-
voice: new CompositeVoice({
|
|
60
|
-
input: new OpenAIVoice(),
|
|
61
|
-
output: new PlayAIVoice(),
|
|
62
|
-
}),
|
|
63
|
-
});
|
|
64
|
-
```
|
|
65
|
-
|
|
66
42
|
## Working with Audio Streams
|
|
67
43
|
|
|
68
44
|
The `speak()` and `listen()` methods work with Node.js streams. Here's how to save and load audio files:
|
|
@@ -176,6 +152,123 @@ agent.voice.on("error", (error) => {
|
|
|
176
152
|
});
|
|
177
153
|
```
|
|
178
154
|
|
|
155
|
+
## Examples
|
|
156
|
+
|
|
157
|
+
### End-to-end voice interaction
|
|
158
|
+
|
|
159
|
+
This example demonstrates a voice interaction between two agents. The hybrid voice agent, which uses multiple providers, speaks a question, which is saved as an audio file. The unified voice agent listens to that file, processes the question, generates a response, and speaks it back. Both audio outputs are saved to the `audio` directory.
|
|
160
|
+
|
|
161
|
+
The following files are created:
|
|
162
|
+
|
|
163
|
+
- **hybrid-question.mp3** – Hybrid agent's spoken question.
|
|
164
|
+
- **unified-response.mp3** – Unified agent's spoken response.
|
|
165
|
+
|
|
166
|
+
```typescript filename="src/test-voice-agents.ts" showLineNumbers copy
|
|
167
|
+
import "dotenv/config";
|
|
168
|
+
|
|
169
|
+
import path from "path";
|
|
170
|
+
import { createReadStream } from "fs";
|
|
171
|
+
import { Agent } from "@mastra/core/agent";
|
|
172
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
173
|
+
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
174
|
+
import { Mastra } from "@mastra/core/mastra";
|
|
175
|
+
import { openai } from "@ai-sdk/openai";
|
|
176
|
+
|
|
177
|
+
// Saves an audio stream to a file in the audio directory, creating the directory if it doesn't exist.
|
|
178
|
+
export const saveAudioToFile = async (audio: NodeJS.ReadableStream, filename: string): Promise<void> => {
|
|
179
|
+
const audioDir = path.join(process.cwd(), "audio");
|
|
180
|
+
const filePath = path.join(audioDir, filename);
|
|
181
|
+
|
|
182
|
+
await fs.promises.mkdir(audioDir, { recursive: true });
|
|
183
|
+
|
|
184
|
+
const writer = createWriteStream(filePath);
|
|
185
|
+
audio.pipe(writer);
|
|
186
|
+
return new Promise((resolve, reject) => {
|
|
187
|
+
writer.on("finish", resolve);
|
|
188
|
+
writer.on("error", reject);
|
|
189
|
+
});
|
|
190
|
+
};
|
|
191
|
+
|
|
192
|
+
// Saves an audio stream to a file in the audio directory, creating the directory if it doesn't exist.
|
|
193
|
+
export const convertToText = async (input: string | NodeJS.ReadableStream): Promise<string> => {
|
|
194
|
+
if (typeof input === "string") {
|
|
195
|
+
return input;
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
const chunks: Buffer[] = [];
|
|
199
|
+
return new Promise((resolve, reject) => {
|
|
200
|
+
input.on("data", (chunk) => chunks.push(Buffer.from(chunk)));
|
|
201
|
+
input.on("error", reject);
|
|
202
|
+
input.on("end", () => resolve(Buffer.concat(chunks).toString("utf-8")));
|
|
203
|
+
});
|
|
204
|
+
};
|
|
205
|
+
|
|
206
|
+
export const hybridVoiceAgent = new Agent({
|
|
207
|
+
name: "hybrid-voice-agent",
|
|
208
|
+
model: openai("gpt-4o"),
|
|
209
|
+
instructions: "You can speak and listen using different providers.",
|
|
210
|
+
voice: new CompositeVoice({
|
|
211
|
+
input: new OpenAIVoice(),
|
|
212
|
+
output: new OpenAIVoice()
|
|
213
|
+
})
|
|
214
|
+
});
|
|
215
|
+
|
|
216
|
+
export const unifiedVoiceAgent = new Agent({
|
|
217
|
+
name: "unified-voice-agent",
|
|
218
|
+
instructions: "You are an agent with both STT and TTS capabilities.",
|
|
219
|
+
model: openai("gpt-4o"),
|
|
220
|
+
voice: new OpenAIVoice()
|
|
221
|
+
});
|
|
222
|
+
|
|
223
|
+
export const mastra = new Mastra({
|
|
224
|
+
// ...
|
|
225
|
+
agents: { hybridVoiceAgent, unifiedVoiceAgent }
|
|
226
|
+
});
|
|
227
|
+
|
|
228
|
+
const hybridVoiceAgent = mastra.getAgent("hybridVoiceAgent");
|
|
229
|
+
const unifiedVoiceAgent = mastra.getAgent("unifiedVoiceAgent");
|
|
230
|
+
|
|
231
|
+
const question = "What is the meaning of life in one sentence?";
|
|
232
|
+
|
|
233
|
+
const hybridSpoken = await hybridVoiceAgent.voice.speak(question);
|
|
234
|
+
|
|
235
|
+
await saveAudioToFile(hybridSpoken!, "hybrid-question.mp3");
|
|
236
|
+
|
|
237
|
+
const audioStream = createReadStream(path.join(process.cwd(), "audio", "hybrid-question.mp3"));
|
|
238
|
+
const unifiedHeard = await unifiedVoiceAgent.voice.listen(audioStream);
|
|
239
|
+
|
|
240
|
+
const inputText = await convertToText(unifiedHeard!);
|
|
241
|
+
|
|
242
|
+
const unifiedResponse = await unifiedVoiceAgent.generate(inputText);
|
|
243
|
+
const unifiedSpoken = await unifiedVoiceAgent.voice.speak(unifiedResponse.text);
|
|
244
|
+
|
|
245
|
+
await saveAudioToFile(unifiedSpoken!, "unified-response.mp3");
|
|
246
|
+
```
|
|
247
|
+
|
|
248
|
+
### Using Multiple Providers
|
|
249
|
+
|
|
250
|
+
For more flexibility, you can use different providers for speaking and listening using the CompositeVoice class:
|
|
251
|
+
|
|
252
|
+
```typescript
|
|
253
|
+
import { Agent } from "@mastra/core/agent";
|
|
254
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
255
|
+
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
256
|
+
import { PlayAIVoice } from "@mastra/voice-playai";
|
|
257
|
+
import { openai } from "@ai-sdk/openai";
|
|
258
|
+
|
|
259
|
+
export const agent = new Agent({
|
|
260
|
+
name: "Agent",
|
|
261
|
+
instructions: `You are a helpful assistant with both STT and TTS capabilities.`,
|
|
262
|
+
model: openai("gpt-4o"),
|
|
263
|
+
|
|
264
|
+
// Create a composite voice using OpenAI for listening and PlayAI for speaking
|
|
265
|
+
voice: new CompositeVoice({
|
|
266
|
+
input: new OpenAIVoice(),
|
|
267
|
+
output: new PlayAIVoice(),
|
|
268
|
+
}),
|
|
269
|
+
});
|
|
270
|
+
```
|
|
271
|
+
|
|
179
272
|
## Supported Voice Providers
|
|
180
273
|
|
|
181
274
|
Mastra supports multiple voice providers for text-to-speech (TTS) and speech-to-text (STT) capabilities:
|
|
@@ -1,40 +1,80 @@
|
|
|
1
1
|
---
|
|
2
|
-
title: "
|
|
3
|
-
description:
|
|
2
|
+
title: "Agent Memory | Agents | Mastra Docs"
|
|
3
|
+
description: Learn how to add memory to agents to store conversation history and maintain context across interactions.
|
|
4
4
|
---
|
|
5
5
|
|
|
6
|
+
import { Callout } from "nextra/components";
|
|
7
|
+
|
|
6
8
|
# Agent Memory
|
|
7
9
|
|
|
8
|
-
Agents
|
|
10
|
+
Agents can use memory to store conversation history, recall relevant information, and maintain context across interactions. This enables more natural, stateful conversations throughout a user’s session.
|
|
11
|
+
|
|
12
|
+
## When to use memory
|
|
13
|
+
|
|
14
|
+
Use memory when an agent needs to retain information across multiple user interactions. This includes recalling user-specific details, facts, or tool calls and their results. Without memory, the agent handles each request in isolation, with no awareness of previous messages or responses.
|
|
15
|
+
|
|
16
|
+
For more information about the different ways memory works in Mastra see the following pages.
|
|
17
|
+
|
|
18
|
+
- [Working Memory](../memory/working-memory.mdx)
|
|
19
|
+
- [Semantic Recall](../memory/semantic-recall.mdx)
|
|
20
|
+
|
|
21
|
+
## Prerequisites
|
|
9
22
|
|
|
10
|
-
|
|
23
|
+
Memory requires a storage provider to persist conversation history, including user messages and agent responses. You can use **shared storage** to define a single provider for all agents, or **dedicated storage** to configure separate providers for individual agents.
|
|
11
24
|
|
|
12
|
-
|
|
25
|
+
Install `@mastra/memory` and a storage provider.
|
|
13
26
|
|
|
14
27
|
```bash npm2yarn copy
|
|
15
28
|
npm install @mastra/memory@latest @mastra/libsql@latest
|
|
16
29
|
```
|
|
17
30
|
|
|
18
|
-
|
|
31
|
+
### Storage providers
|
|
32
|
+
|
|
33
|
+
Mastra supports multiple storage providers. Popular options include:
|
|
34
|
+
|
|
35
|
+
- [LibSQL](../../reference/storage/libsql.mdx)
|
|
36
|
+
- [PostgreSQL](../../reference/storage/postgresql.mdx)
|
|
37
|
+
- [Cloudflare D1](../../reference/storage/cloudflare-d1.mdx)
|
|
38
|
+
|
|
39
|
+
<Callout type="warning">
|
|
40
|
+
`LibSQLStore` works well for local development and when deploying to [Mastra Cloud](../mastra-cloud/overview.mdx), but may not be supported by some [serverless platforms](../deployment/serverless-platforms/index.mdx) or [cloud providers](../deployment/cloud-providers/index.mdx).
|
|
41
|
+
</Callout>
|
|
42
|
+
|
|
43
|
+
### Shared storage
|
|
44
|
+
|
|
45
|
+
Use shared storage for a simple, centralized setup across agents. Add the storage adapter to your main Mastra instance to make it available to all agents by default.
|
|
46
|
+
|
|
47
|
+
```typescript {6-8} filename="src/mastra/index.ts" showLineNumbers copy
|
|
48
|
+
import { Mastra } from "@mastra/core/mastra";
|
|
49
|
+
import { LibSQLStore } from "@mastra/libsql";
|
|
50
|
+
|
|
51
|
+
export const mastra = new Mastra({
|
|
52
|
+
// ..
|
|
53
|
+
storage: new LibSQLStore({
|
|
54
|
+
url: ":memory:"
|
|
55
|
+
}),
|
|
56
|
+
});
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
### Dedicated storage
|
|
60
|
+
|
|
61
|
+
Use dedicated storage when agents need to keep data separate, use different providers, or tailor storage requirements for specific users. Add the storage adapter directly to the agent’s `memory` configuration option.
|
|
62
|
+
|
|
63
|
+
```typescript {7-10} filename="src/mastra/agents/memory-agent.ts" showLineNumbers copy
|
|
19
64
|
import { Agent } from "@mastra/core/agent";
|
|
20
65
|
import { Memory } from "@mastra/memory";
|
|
21
66
|
import { LibSQLStore } from "@mastra/libsql";
|
|
22
|
-
import { openai } from "@ai-sdk/openai";
|
|
23
67
|
|
|
24
|
-
export const
|
|
25
|
-
|
|
26
|
-
instructions: "You are a helpful assistant with memory.",
|
|
27
|
-
model: openai("gpt-4o"),
|
|
68
|
+
export const memoryAgent = new Agent({
|
|
69
|
+
// ...
|
|
28
70
|
memory: new Memory({
|
|
29
71
|
storage: new LibSQLStore({
|
|
30
|
-
url: "file:../../memory.db"
|
|
72
|
+
url: "file:../../memory-agent.db"
|
|
31
73
|
})
|
|
32
74
|
})
|
|
33
75
|
});
|
|
34
76
|
```
|
|
35
77
|
|
|
36
|
-
This basic setup uses the default settings. Visit the [Memory documentation](../memory/overview.mdx) for more configuration info.
|
|
37
|
-
|
|
38
78
|
## Memory in agent calls
|
|
39
79
|
|
|
40
80
|
When calling `.generate()` or `.stream()`, include a `memory` object with both `resource` and `thread` to enable memory.
|
|
@@ -45,10 +85,10 @@ When calling `.generate()` or `.stream()`, include a `memory` object with both `
|
|
|
45
85
|
These fields tell the agent where to store and retrieve context, enabling persistent, thread-aware memory across interactions.
|
|
46
86
|
|
|
47
87
|
```typescript {3-4}
|
|
48
|
-
const response = await
|
|
88
|
+
const response = await memoryAgent.generate("Remember my favorite color is blue.", {
|
|
49
89
|
memory: {
|
|
50
|
-
|
|
51
|
-
|
|
90
|
+
thread: "user-123",
|
|
91
|
+
resource: "test-123"
|
|
52
92
|
}
|
|
53
93
|
});
|
|
54
94
|
```
|
|
@@ -56,25 +96,22 @@ const response = await testAgent.generate("Remember my favorite color is blue.",
|
|
|
56
96
|
To recall information stored in memory, call the agent with the same `resource` and `thread` values used in the original interaction.
|
|
57
97
|
|
|
58
98
|
```typescript {3-4}
|
|
59
|
-
const response = await
|
|
99
|
+
const response = await memoryAgent.generate("What's my favorite color?", {
|
|
60
100
|
memory: {
|
|
61
|
-
|
|
62
|
-
|
|
101
|
+
thread: "user-123",
|
|
102
|
+
resource: "test-123"
|
|
63
103
|
}
|
|
64
104
|
});
|
|
65
105
|
```
|
|
66
106
|
|
|
67
|
-
##
|
|
107
|
+
## Using `RuntimeContext`
|
|
68
108
|
|
|
69
|
-
|
|
109
|
+
Use `RuntimeContext` to access request-specific values. This lets you conditionally select different memory or storage configurations based on the context of the request.
|
|
70
110
|
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
import { Memory } from "@mastra/memory";
|
|
76
|
-
import { LibSQLStore } from "@mastra/libsql";
|
|
77
|
-
import { openai } from "@ai-sdk/openai";
|
|
111
|
+
```typescript filename="src/mastra/agents/memory-agent.ts" showLineNumbers
|
|
112
|
+
export type UserTier = {
|
|
113
|
+
"user-tier": "enterprise" | "pro";
|
|
114
|
+
};
|
|
78
115
|
|
|
79
116
|
const premiumMemory = new Memory({
|
|
80
117
|
// ...
|
|
@@ -84,68 +121,14 @@ const standardMemory = new Memory({
|
|
|
84
121
|
// ...
|
|
85
122
|
});
|
|
86
123
|
|
|
87
|
-
export const
|
|
88
|
-
name: "test-agent",
|
|
89
|
-
instructions: "You are a helpful assistant with tiered memory capabilities.",
|
|
90
|
-
model: openai("gpt-4o"),
|
|
91
|
-
memory: ({ runtimeContext }) => {
|
|
92
|
-
const userTier = runtimeContext.get("userTier");
|
|
93
|
-
return userTier === "premium" ? premiumMemory : standardMemory;
|
|
94
|
-
}
|
|
95
|
-
});
|
|
96
|
-
```
|
|
97
|
-
|
|
98
|
-
### Agent usage
|
|
99
|
-
|
|
100
|
-
Pass a configured `RuntimeContext` instance to an agent to enable conditional logic during execution. This allows the agent to adapt its behavior based on runtime values.
|
|
101
|
-
|
|
102
|
-
```typescript {1,4,6, 13} showLineNumbers copy
|
|
103
|
-
import { RuntimeContext } from "@mastra/core/runtime-context";
|
|
104
|
-
|
|
105
|
-
const testAgent = mastra.getAgent("testAgent");
|
|
106
|
-
const runtimeContext = new RuntimeContext();
|
|
107
|
-
|
|
108
|
-
runtimeContext.set("userTier", "premium");
|
|
109
|
-
|
|
110
|
-
const response = await testAgent.generate("Remember my favorite color is blue.", {
|
|
111
|
-
memory: {
|
|
112
|
-
resource: "user_alice",
|
|
113
|
-
thread: { id: "preferences_thread" }
|
|
114
|
-
},
|
|
115
|
-
runtimeContext
|
|
116
|
-
});
|
|
117
|
-
```
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
## Async memory configuration
|
|
121
|
-
|
|
122
|
-
Memory can be configured asynchronously to support use cases like fetching user-specific settings from a database, validating access with Auth, or loading additional data from a remote service.
|
|
123
|
-
|
|
124
|
-
```typescript {18, 22} filename="src/mastra/agents/test-agent.ts" showLineNumbers copy
|
|
125
|
-
import { Agent } from "@mastra/core/agent";
|
|
126
|
-
import { Memory } from "@mastra/memory";
|
|
127
|
-
import { LibSQLStore } from "@mastra/libsql";
|
|
128
|
-
import { openai } from "@ai-sdk/openai";
|
|
129
|
-
|
|
130
|
-
const premiumMemory = new Memory({
|
|
131
|
-
// ...
|
|
132
|
-
});
|
|
133
|
-
|
|
134
|
-
const standardMemory = new Memory({
|
|
124
|
+
export const memoryAgent = new Agent({
|
|
135
125
|
// ...
|
|
136
|
-
})
|
|
137
|
-
|
|
138
|
-
export const testAgent = new Agent({
|
|
139
|
-
name: "test-agent",
|
|
140
|
-
instructions: "You are a helpful assistant with tiered memory capabilities.",
|
|
141
|
-
model: openai("gpt-4o"),
|
|
142
|
-
memory: async ({ runtimeContext }) => {
|
|
143
|
-
const userId = runtimeContext.get("userId");
|
|
144
|
-
|
|
145
|
-
// Example database lookup using `userId`
|
|
146
|
-
const userTier = await query(`SELECT user_tier FROM users WHERE userId = $1`, [userId]);
|
|
126
|
+
memory: ({ runtimeContext }) => {
|
|
127
|
+
const userTier = runtimeContext.get("user-tier") as UserTier["user-tier"];
|
|
147
128
|
|
|
148
|
-
return userTier === "
|
|
129
|
+
return userTier === "enterprise"
|
|
130
|
+
? premiumMemory
|
|
131
|
+
: standardMemory;
|
|
149
132
|
}
|
|
150
133
|
});
|
|
151
134
|
```
|
|
@@ -155,3 +138,4 @@ export const testAgent = new Agent({
|
|
|
155
138
|
- [Working Memory](../memory/working-memory.mdx)
|
|
156
139
|
- [Semantic Recall](../memory/semantic-recall.mdx)
|
|
157
140
|
- [Threads and Resources](../memory/threads-and-resources.mdx)
|
|
141
|
+
- [Runtime Context](../server-db/runtime-context.mdx)
|
|
@@ -20,7 +20,7 @@ Use processors for content moderation, prompt injection prevention, response san
|
|
|
20
20
|
|
|
21
21
|
## Adding processors to an agent
|
|
22
22
|
|
|
23
|
-
Import and instantiate the relevant processor class, and pass it to your agent’s configuration using either the `inputProcessors` or `outputProcessors`
|
|
23
|
+
Import and instantiate the relevant processor class, and pass it to your agent’s configuration using either the `inputProcessors` or `outputProcessors` option:
|
|
24
24
|
|
|
25
25
|
```typescript {3,9-17} filename="src/mastra/agents/moderated-agent.ts" showLineNumbers copy
|
|
26
26
|
import { openai } from "@ai-sdk/openai";
|
|
@@ -34,7 +34,7 @@ Mastra's model router auto-detects environment variables for your chosen provide
|
|
|
34
34
|
OPENAI_API_KEY=<your-api-key>
|
|
35
35
|
```
|
|
36
36
|
|
|
37
|
-
> Mastra supports more than 600 models. Choose from the full list [here](
|
|
37
|
+
> Mastra supports more than 600 models. Choose from the full list [here](/models).
|
|
38
38
|
|
|
39
39
|
### Create an agent
|
|
40
40
|
|
|
@@ -282,10 +282,6 @@ const response = await testAgent.generate([
|
|
|
282
282
|
console.log(response.text);
|
|
283
283
|
```
|
|
284
284
|
|
|
285
|
-
## Multi-step tool use
|
|
286
|
-
|
|
287
|
-
Agents can be enhanced with tools, functions that extend their capabilities beyond text generation. Tools allow agents to perform calculations, access external systems, and process data. Agents not only decide whether to call tools they're given, they determine the parameters that should be given to that tool.
|
|
288
|
-
|
|
289
285
|
For a detailed guide to creating and configuring tools, see the [Tools Overview](../tools-mcp/overview.mdx) page.
|
|
290
286
|
|
|
291
287
|
### Using `maxSteps`
|
|
@@ -314,6 +310,42 @@ const response = await testAgent.generate("Help me organize my day", {
|
|
|
314
310
|
});
|
|
315
311
|
```
|
|
316
312
|
|
|
313
|
+
## Using tools
|
|
314
|
+
|
|
315
|
+
Agents can use tools to go beyond language generation, enabling structured interactions with external APIs and services. Tools allow agents to access data and perform clearly defined operations in a reliable, repeatable way.
|
|
316
|
+
|
|
317
|
+
```typescript filename="src/mastra/agents/test-agent.ts" showLineNumbers
|
|
318
|
+
export const testAgent = new Agent({
|
|
319
|
+
// ...
|
|
320
|
+
tools: { testTool }
|
|
321
|
+
});
|
|
322
|
+
```
|
|
323
|
+
|
|
324
|
+
> See [Using Tools](./using-tools.mdx) for more information.
|
|
325
|
+
|
|
326
|
+
## Using `RuntimeContext`
|
|
327
|
+
|
|
328
|
+
Use `RuntimeContext` to access request-specific values. This lets you conditionally adjust behavior based on the context of the request.
|
|
329
|
+
|
|
330
|
+
```typescript filename="src/mastra/agents/test-agent.ts" showLineNumbers
|
|
331
|
+
export type UserTier = {
|
|
332
|
+
"user-tier": "enterprise" | "pro";
|
|
333
|
+
};
|
|
334
|
+
|
|
335
|
+
export const testAgent = new Agent({
|
|
336
|
+
// ...
|
|
337
|
+
model: ({ runtimeContext }) => {
|
|
338
|
+
const userTier = runtimeContext.get("user-tier") as UserTier["user-tier"];
|
|
339
|
+
|
|
340
|
+
return userTier === "enterprise"
|
|
341
|
+
? openai("gpt-4o-mini")
|
|
342
|
+
: openai("gpt-4.1-nano");
|
|
343
|
+
}
|
|
344
|
+
});
|
|
345
|
+
```
|
|
346
|
+
|
|
347
|
+
> See [Runtime Context](../server-db/runtime-context.mdx) for more information.
|
|
348
|
+
|
|
317
349
|
## Testing agents locally
|
|
318
350
|
There are two ways to run and test agents.
|
|
319
351
|
|
|
@@ -353,7 +385,7 @@ npx tsx src/test-agent.ts
|
|
|
353
385
|
|
|
354
386
|
## Related
|
|
355
387
|
|
|
388
|
+
- [Using Tools](./using-tools.mdx)
|
|
356
389
|
- [Agent Memory](./agent-memory.mdx)
|
|
357
|
-
- [
|
|
358
|
-
- [Agent Tools and MCP](./using-tools-and-mcp.mdx)
|
|
390
|
+
- [Runtime Context](../../examples/agents/runtime-context.mdx)
|
|
359
391
|
- [Calling Agents](../../examples/agents/calling-agents.mdx)
|