@mastra/mcp-docs-server 0.13.29 → 0.13.30-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40internal%2Fchangeset-cli.md +2 -0
- package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +9 -9
- package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +2 -0
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +31 -31
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +36 -0
- package/.docs/organized/changelogs/%40mastra%2Fastra.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fchroma.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +28 -28
- package/.docs/organized/changelogs/%40mastra%2Fcloud.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +106 -106
- package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +37 -37
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +25 -25
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +25 -25
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +25 -25
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +49 -49
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fevals.md +33 -33
- package/.docs/organized/changelogs/%40mastra%2Flance.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +23 -23
- package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +36 -36
- package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fmssql.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +31 -31
- package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +67 -67
- package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Frag.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Freact.md +37 -0
- package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +15 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +37 -37
- package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +19 -19
- package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +18 -18
- package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +15 -0
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +16 -16
- package/.docs/organized/changelogs/create-mastra.md +35 -35
- package/.docs/organized/changelogs/mastra.md +63 -63
- package/.docs/organized/code-examples/agent.md +26 -7
- package/.docs/organized/code-examples/agui.md +4 -4
- package/.docs/organized/code-examples/ai-elements.md +1 -1
- package/.docs/organized/code-examples/ai-sdk-useChat.md +2 -2
- package/.docs/organized/code-examples/ai-sdk-v5.md +2 -2
- package/.docs/organized/code-examples/assistant-ui.md +2 -2
- package/.docs/organized/code-examples/bird-checker-with-nextjs-and-eval.md +2 -2
- package/.docs/organized/code-examples/bird-checker-with-nextjs.md +2 -2
- package/.docs/organized/code-examples/client-side-tools.md +4 -4
- package/.docs/organized/code-examples/crypto-chatbot.md +2 -2
- package/.docs/organized/code-examples/heads-up-game.md +2 -2
- package/.docs/organized/code-examples/openapi-spec-writer.md +2 -2
- package/.docs/raw/agents/adding-voice.mdx +118 -25
- package/.docs/raw/agents/agent-memory.mdx +73 -89
- package/.docs/raw/agents/guardrails.mdx +1 -1
- package/.docs/raw/agents/networks.mdx +12 -6
- package/.docs/raw/agents/overview.mdx +46 -11
- package/.docs/raw/agents/using-tools.mdx +95 -0
- package/.docs/raw/deployment/overview.mdx +9 -11
- package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +7 -4
- package/.docs/raw/frameworks/servers/express.mdx +2 -2
- package/.docs/raw/getting-started/installation.mdx +34 -132
- package/.docs/raw/getting-started/mcp-docs-server.mdx +13 -1
- package/.docs/raw/index.mdx +49 -14
- package/.docs/raw/observability/ai-tracing/exporters/otel.mdx +3 -0
- package/.docs/raw/reference/agents/generateLegacy.mdx +4 -4
- package/.docs/raw/reference/observability/ai-tracing/exporters/otel.mdx +6 -0
- package/.docs/raw/reference/scorers/answer-relevancy.mdx +105 -7
- package/.docs/raw/reference/scorers/answer-similarity.mdx +266 -16
- package/.docs/raw/reference/scorers/bias.mdx +107 -6
- package/.docs/raw/reference/scorers/completeness.mdx +131 -8
- package/.docs/raw/reference/scorers/content-similarity.mdx +107 -8
- package/.docs/raw/reference/scorers/context-precision.mdx +234 -18
- package/.docs/raw/reference/scorers/context-relevance.mdx +418 -35
- package/.docs/raw/reference/scorers/faithfulness.mdx +122 -8
- package/.docs/raw/reference/scorers/hallucination.mdx +125 -8
- package/.docs/raw/reference/scorers/keyword-coverage.mdx +141 -9
- package/.docs/raw/reference/scorers/noise-sensitivity.mdx +478 -6
- package/.docs/raw/reference/scorers/prompt-alignment.mdx +351 -102
- package/.docs/raw/reference/scorers/textual-difference.mdx +134 -6
- package/.docs/raw/reference/scorers/tone-consistency.mdx +133 -0
- package/.docs/raw/reference/scorers/tool-call-accuracy.mdx +422 -65
- package/.docs/raw/reference/scorers/toxicity.mdx +125 -7
- package/.docs/raw/reference/streaming/agents/MastraModelOutput.mdx +9 -5
- package/.docs/raw/reference/streaming/agents/streamLegacy.mdx +4 -4
- package/.docs/raw/reference/streaming/workflows/observeStream.mdx +49 -0
- package/.docs/raw/reference/streaming/workflows/observeStreamVNext.mdx +47 -0
- package/.docs/raw/reference/streaming/workflows/resumeStreamVNext.mdx +7 -5
- package/.docs/raw/reference/streaming/workflows/stream.mdx +1 -1
- package/.docs/raw/reference/workflows/workflow.mdx +33 -0
- package/.docs/raw/scorers/custom-scorers.mdx +244 -3
- package/.docs/raw/scorers/overview.mdx +8 -38
- package/.docs/raw/server-db/middleware.mdx +5 -2
- package/.docs/raw/server-db/runtime-context.mdx +178 -0
- package/.docs/raw/streaming/workflow-streaming.mdx +28 -1
- package/.docs/raw/tools-mcp/overview.mdx +25 -7
- package/.docs/raw/workflows/overview.mdx +28 -1
- package/CHANGELOG.md +15 -0
- package/package.json +6 -6
- package/.docs/raw/agents/runtime-context.mdx +0 -103
- package/.docs/raw/agents/using-tools-and-mcp.mdx +0 -241
- package/.docs/raw/getting-started/model-providers.mdx +0 -63
- package/.docs/raw/reference/agents/migration-guide.mdx +0 -291
- package/.docs/raw/tools-mcp/runtime-context.mdx +0 -63
- /package/.docs/raw/{evals → scorers/evals-old-api}/custom-eval.mdx +0 -0
- /package/.docs/raw/{evals → scorers/evals-old-api}/overview.mdx +0 -0
- /package/.docs/raw/{evals → scorers/evals-old-api}/running-in-ci.mdx +0 -0
- /package/.docs/raw/{evals → scorers/evals-old-api}/textual-evals.mdx +0 -0
- /package/.docs/raw/{server-db → workflows}/snapshots.mdx +0 -0
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
Mastra agents can be enhanced with voice capabilities, allowing them to speak responses and listen to user input. You can configure an agent to use either a single voice provider or combine multiple providers for different operations.
|
|
4
4
|
|
|
5
|
-
##
|
|
5
|
+
## Basic usage
|
|
6
6
|
|
|
7
7
|
The simplest way to add voice to an agent is to use a single provider for both speaking and listening:
|
|
8
8
|
|
|
@@ -39,30 +39,6 @@ try {
|
|
|
39
39
|
}
|
|
40
40
|
```
|
|
41
41
|
|
|
42
|
-
## Using Multiple Providers
|
|
43
|
-
|
|
44
|
-
For more flexibility, you can use different providers for speaking and listening using the CompositeVoice class:
|
|
45
|
-
|
|
46
|
-
```typescript
|
|
47
|
-
import { Agent } from "@mastra/core/agent";
|
|
48
|
-
import { CompositeVoice } from "@mastra/core/voice";
|
|
49
|
-
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
50
|
-
import { PlayAIVoice } from "@mastra/voice-playai";
|
|
51
|
-
import { openai } from "@ai-sdk/openai";
|
|
52
|
-
|
|
53
|
-
export const agent = new Agent({
|
|
54
|
-
name: "Agent",
|
|
55
|
-
instructions: `You are a helpful assistant with both STT and TTS capabilities.`,
|
|
56
|
-
model: openai("gpt-4o"),
|
|
57
|
-
|
|
58
|
-
// Create a composite voice using OpenAI for listening and PlayAI for speaking
|
|
59
|
-
voice: new CompositeVoice({
|
|
60
|
-
input: new OpenAIVoice(),
|
|
61
|
-
output: new PlayAIVoice(),
|
|
62
|
-
}),
|
|
63
|
-
});
|
|
64
|
-
```
|
|
65
|
-
|
|
66
42
|
## Working with Audio Streams
|
|
67
43
|
|
|
68
44
|
The `speak()` and `listen()` methods work with Node.js streams. Here's how to save and load audio files:
|
|
@@ -176,6 +152,123 @@ agent.voice.on("error", (error) => {
|
|
|
176
152
|
});
|
|
177
153
|
```
|
|
178
154
|
|
|
155
|
+
## Examples
|
|
156
|
+
|
|
157
|
+
### End-to-end voice interaction
|
|
158
|
+
|
|
159
|
+
This example demonstrates a voice interaction between two agents. The hybrid voice agent, which uses multiple providers, speaks a question, which is saved as an audio file. The unified voice agent listens to that file, processes the question, generates a response, and speaks it back. Both audio outputs are saved to the `audio` directory.
|
|
160
|
+
|
|
161
|
+
The following files are created:
|
|
162
|
+
|
|
163
|
+
- **hybrid-question.mp3** – Hybrid agent's spoken question.
|
|
164
|
+
- **unified-response.mp3** – Unified agent's spoken response.
|
|
165
|
+
|
|
166
|
+
```typescript filename="src/test-voice-agents.ts" showLineNumbers copy
|
|
167
|
+
import "dotenv/config";
|
|
168
|
+
|
|
169
|
+
import path from "path";
|
|
170
|
+
import { createReadStream } from "fs";
|
|
171
|
+
import { Agent } from "@mastra/core/agent";
|
|
172
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
173
|
+
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
174
|
+
import { Mastra } from "@mastra/core/mastra";
|
|
175
|
+
import { openai } from "@ai-sdk/openai";
|
|
176
|
+
|
|
177
|
+
// Saves an audio stream to a file in the audio directory, creating the directory if it doesn't exist.
|
|
178
|
+
export const saveAudioToFile = async (audio: NodeJS.ReadableStream, filename: string): Promise<void> => {
|
|
179
|
+
const audioDir = path.join(process.cwd(), "audio");
|
|
180
|
+
const filePath = path.join(audioDir, filename);
|
|
181
|
+
|
|
182
|
+
await fs.promises.mkdir(audioDir, { recursive: true });
|
|
183
|
+
|
|
184
|
+
const writer = createWriteStream(filePath);
|
|
185
|
+
audio.pipe(writer);
|
|
186
|
+
return new Promise((resolve, reject) => {
|
|
187
|
+
writer.on("finish", resolve);
|
|
188
|
+
writer.on("error", reject);
|
|
189
|
+
});
|
|
190
|
+
};
|
|
191
|
+
|
|
192
|
+
// Saves an audio stream to a file in the audio directory, creating the directory if it doesn't exist.
|
|
193
|
+
export const convertToText = async (input: string | NodeJS.ReadableStream): Promise<string> => {
|
|
194
|
+
if (typeof input === "string") {
|
|
195
|
+
return input;
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
const chunks: Buffer[] = [];
|
|
199
|
+
return new Promise((resolve, reject) => {
|
|
200
|
+
input.on("data", (chunk) => chunks.push(Buffer.from(chunk)));
|
|
201
|
+
input.on("error", reject);
|
|
202
|
+
input.on("end", () => resolve(Buffer.concat(chunks).toString("utf-8")));
|
|
203
|
+
});
|
|
204
|
+
};
|
|
205
|
+
|
|
206
|
+
export const hybridVoiceAgent = new Agent({
|
|
207
|
+
name: "hybrid-voice-agent",
|
|
208
|
+
model: openai("gpt-4o"),
|
|
209
|
+
instructions: "You can speak and listen using different providers.",
|
|
210
|
+
voice: new CompositeVoice({
|
|
211
|
+
input: new OpenAIVoice(),
|
|
212
|
+
output: new OpenAIVoice()
|
|
213
|
+
})
|
|
214
|
+
});
|
|
215
|
+
|
|
216
|
+
export const unifiedVoiceAgent = new Agent({
|
|
217
|
+
name: "unified-voice-agent",
|
|
218
|
+
instructions: "You are an agent with both STT and TTS capabilities.",
|
|
219
|
+
model: openai("gpt-4o"),
|
|
220
|
+
voice: new OpenAIVoice()
|
|
221
|
+
});
|
|
222
|
+
|
|
223
|
+
export const mastra = new Mastra({
|
|
224
|
+
// ...
|
|
225
|
+
agents: { hybridVoiceAgent, unifiedVoiceAgent }
|
|
226
|
+
});
|
|
227
|
+
|
|
228
|
+
const hybridVoiceAgent = mastra.getAgent("hybridVoiceAgent");
|
|
229
|
+
const unifiedVoiceAgent = mastra.getAgent("unifiedVoiceAgent");
|
|
230
|
+
|
|
231
|
+
const question = "What is the meaning of life in one sentence?";
|
|
232
|
+
|
|
233
|
+
const hybridSpoken = await hybridVoiceAgent.voice.speak(question);
|
|
234
|
+
|
|
235
|
+
await saveAudioToFile(hybridSpoken!, "hybrid-question.mp3");
|
|
236
|
+
|
|
237
|
+
const audioStream = createReadStream(path.join(process.cwd(), "audio", "hybrid-question.mp3"));
|
|
238
|
+
const unifiedHeard = await unifiedVoiceAgent.voice.listen(audioStream);
|
|
239
|
+
|
|
240
|
+
const inputText = await convertToText(unifiedHeard!);
|
|
241
|
+
|
|
242
|
+
const unifiedResponse = await unifiedVoiceAgent.generate(inputText);
|
|
243
|
+
const unifiedSpoken = await unifiedVoiceAgent.voice.speak(unifiedResponse.text);
|
|
244
|
+
|
|
245
|
+
await saveAudioToFile(unifiedSpoken!, "unified-response.mp3");
|
|
246
|
+
```
|
|
247
|
+
|
|
248
|
+
### Using Multiple Providers
|
|
249
|
+
|
|
250
|
+
For more flexibility, you can use different providers for speaking and listening using the CompositeVoice class:
|
|
251
|
+
|
|
252
|
+
```typescript
|
|
253
|
+
import { Agent } from "@mastra/core/agent";
|
|
254
|
+
import { CompositeVoice } from "@mastra/core/voice";
|
|
255
|
+
import { OpenAIVoice } from "@mastra/voice-openai";
|
|
256
|
+
import { PlayAIVoice } from "@mastra/voice-playai";
|
|
257
|
+
import { openai } from "@ai-sdk/openai";
|
|
258
|
+
|
|
259
|
+
export const agent = new Agent({
|
|
260
|
+
name: "Agent",
|
|
261
|
+
instructions: `You are a helpful assistant with both STT and TTS capabilities.`,
|
|
262
|
+
model: openai("gpt-4o"),
|
|
263
|
+
|
|
264
|
+
// Create a composite voice using OpenAI for listening and PlayAI for speaking
|
|
265
|
+
voice: new CompositeVoice({
|
|
266
|
+
input: new OpenAIVoice(),
|
|
267
|
+
output: new PlayAIVoice(),
|
|
268
|
+
}),
|
|
269
|
+
});
|
|
270
|
+
```
|
|
271
|
+
|
|
179
272
|
## Supported Voice Providers
|
|
180
273
|
|
|
181
274
|
Mastra supports multiple voice providers for text-to-speech (TTS) and speech-to-text (STT) capabilities:
|
|
@@ -1,40 +1,80 @@
|
|
|
1
1
|
---
|
|
2
|
-
title: "
|
|
3
|
-
description:
|
|
2
|
+
title: "Agent Memory | Agents | Mastra Docs"
|
|
3
|
+
description: Learn how to add memory to agents to store conversation history and maintain context across interactions.
|
|
4
4
|
---
|
|
5
5
|
|
|
6
|
+
import { Callout } from "nextra/components";
|
|
7
|
+
|
|
6
8
|
# Agent Memory
|
|
7
9
|
|
|
8
|
-
Agents
|
|
10
|
+
Agents can use memory to store conversation history, recall relevant information, and maintain context across interactions. This enables more natural, stateful conversations throughout a user’s session.
|
|
11
|
+
|
|
12
|
+
## When to use memory
|
|
13
|
+
|
|
14
|
+
Use memory when an agent needs to retain information across multiple user interactions. This includes recalling user-specific details, facts, or tool calls and their results. Without memory, the agent handles each request in isolation, with no awareness of previous messages or responses.
|
|
15
|
+
|
|
16
|
+
For more information about the different ways memory works in Mastra see the following pages.
|
|
17
|
+
|
|
18
|
+
- [Working Memory](../memory/working-memory.mdx)
|
|
19
|
+
- [Semantic Recall](../memory/semantic-recall.mdx)
|
|
20
|
+
|
|
21
|
+
## Prerequisites
|
|
9
22
|
|
|
10
|
-
|
|
23
|
+
Memory requires a storage provider to persist conversation history, including user messages and agent responses. You can use **shared storage** to define a single provider for all agents, or **dedicated storage** to configure separate providers for individual agents.
|
|
11
24
|
|
|
12
|
-
|
|
25
|
+
Install `@mastra/memory` and a storage provider.
|
|
13
26
|
|
|
14
27
|
```bash npm2yarn copy
|
|
15
28
|
npm install @mastra/memory@latest @mastra/libsql@latest
|
|
16
29
|
```
|
|
17
30
|
|
|
18
|
-
|
|
31
|
+
### Storage providers
|
|
32
|
+
|
|
33
|
+
Mastra supports multiple storage providers. Popular options include:
|
|
34
|
+
|
|
35
|
+
- [LibSQL](../../reference/storage/libsql.mdx)
|
|
36
|
+
- [PostgreSQL](../../reference/storage/postgresql.mdx)
|
|
37
|
+
- [Cloudflare D1](../../reference/storage/cloudflare-d1.mdx)
|
|
38
|
+
|
|
39
|
+
<Callout type="warning">
|
|
40
|
+
`LibSQLStore` works well for local development and when deploying to [Mastra Cloud](../mastra-cloud/overview.mdx), but may not be supported by some [serverless platforms](../deployment/serverless-platforms/index.mdx) or [cloud providers](../deployment/cloud-providers/index.mdx).
|
|
41
|
+
</Callout>
|
|
42
|
+
|
|
43
|
+
### Shared storage
|
|
44
|
+
|
|
45
|
+
Use shared storage for a simple, centralized setup across agents. Add the storage adapter to your main Mastra instance to make it available to all agents by default.
|
|
46
|
+
|
|
47
|
+
```typescript {6-8} filename="src/mastra/index.ts" showLineNumbers copy
|
|
48
|
+
import { Mastra } from "@mastra/core/mastra";
|
|
49
|
+
import { LibSQLStore } from "@mastra/libsql";
|
|
50
|
+
|
|
51
|
+
export const mastra = new Mastra({
|
|
52
|
+
// ..
|
|
53
|
+
storage: new LibSQLStore({
|
|
54
|
+
url: ":memory:"
|
|
55
|
+
}),
|
|
56
|
+
});
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
### Dedicated storage
|
|
60
|
+
|
|
61
|
+
Use dedicated storage when agents need to keep data separate, use different providers, or tailor storage requirements for specific users. Add the storage adapter directly to the agent’s `memory` configuration option.
|
|
62
|
+
|
|
63
|
+
```typescript {7-10} filename="src/mastra/agents/memory-agent.ts" showLineNumbers copy
|
|
19
64
|
import { Agent } from "@mastra/core/agent";
|
|
20
65
|
import { Memory } from "@mastra/memory";
|
|
21
66
|
import { LibSQLStore } from "@mastra/libsql";
|
|
22
|
-
import { openai } from "@ai-sdk/openai";
|
|
23
67
|
|
|
24
|
-
export const
|
|
25
|
-
|
|
26
|
-
instructions: "You are a helpful assistant with memory.",
|
|
27
|
-
model: openai("gpt-4o"),
|
|
68
|
+
export const memoryAgent = new Agent({
|
|
69
|
+
// ...
|
|
28
70
|
memory: new Memory({
|
|
29
71
|
storage: new LibSQLStore({
|
|
30
|
-
url: "file:../../memory.db"
|
|
72
|
+
url: "file:../../memory-agent.db"
|
|
31
73
|
})
|
|
32
74
|
})
|
|
33
75
|
});
|
|
34
76
|
```
|
|
35
77
|
|
|
36
|
-
This basic setup uses the default settings. Visit the [Memory documentation](../memory/overview.mdx) for more configuration info.
|
|
37
|
-
|
|
38
78
|
## Memory in agent calls
|
|
39
79
|
|
|
40
80
|
When calling `.generate()` or `.stream()`, include a `memory` object with both `resource` and `thread` to enable memory.
|
|
@@ -45,10 +85,10 @@ When calling `.generate()` or `.stream()`, include a `memory` object with both `
|
|
|
45
85
|
These fields tell the agent where to store and retrieve context, enabling persistent, thread-aware memory across interactions.
|
|
46
86
|
|
|
47
87
|
```typescript {3-4}
|
|
48
|
-
const response = await
|
|
88
|
+
const response = await memoryAgent.generate("Remember my favorite color is blue.", {
|
|
49
89
|
memory: {
|
|
50
|
-
|
|
51
|
-
|
|
90
|
+
thread: "user-123",
|
|
91
|
+
resource: "test-123"
|
|
52
92
|
}
|
|
53
93
|
});
|
|
54
94
|
```
|
|
@@ -56,25 +96,22 @@ const response = await testAgent.generate("Remember my favorite color is blue.",
|
|
|
56
96
|
To recall information stored in memory, call the agent with the same `resource` and `thread` values used in the original interaction.
|
|
57
97
|
|
|
58
98
|
```typescript {3-4}
|
|
59
|
-
const response = await
|
|
99
|
+
const response = await memoryAgent.generate("What's my favorite color?", {
|
|
60
100
|
memory: {
|
|
61
|
-
|
|
62
|
-
|
|
101
|
+
thread: "user-123",
|
|
102
|
+
resource: "test-123"
|
|
63
103
|
}
|
|
64
104
|
});
|
|
65
105
|
```
|
|
66
106
|
|
|
67
|
-
##
|
|
107
|
+
## Using `RuntimeContext`
|
|
68
108
|
|
|
69
|
-
|
|
109
|
+
Use `RuntimeContext` to access request-specific values. This lets you conditionally select different memory or storage configurations based on the context of the request.
|
|
70
110
|
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
import { Memory } from "@mastra/memory";
|
|
76
|
-
import { LibSQLStore } from "@mastra/libsql";
|
|
77
|
-
import { openai } from "@ai-sdk/openai";
|
|
111
|
+
```typescript filename="src/mastra/agents/memory-agent.ts" showLineNumbers
|
|
112
|
+
export type UserTier = {
|
|
113
|
+
"user-tier": "enterprise" | "pro";
|
|
114
|
+
};
|
|
78
115
|
|
|
79
116
|
const premiumMemory = new Memory({
|
|
80
117
|
// ...
|
|
@@ -84,68 +121,14 @@ const standardMemory = new Memory({
|
|
|
84
121
|
// ...
|
|
85
122
|
});
|
|
86
123
|
|
|
87
|
-
export const
|
|
88
|
-
name: "test-agent",
|
|
89
|
-
instructions: "You are a helpful assistant with tiered memory capabilities.",
|
|
90
|
-
model: openai("gpt-4o"),
|
|
91
|
-
memory: ({ runtimeContext }) => {
|
|
92
|
-
const userTier = runtimeContext.get("userTier");
|
|
93
|
-
return userTier === "premium" ? premiumMemory : standardMemory;
|
|
94
|
-
}
|
|
95
|
-
});
|
|
96
|
-
```
|
|
97
|
-
|
|
98
|
-
### Agent usage
|
|
99
|
-
|
|
100
|
-
Pass a configured `RuntimeContext` instance to an agent to enable conditional logic during execution. This allows the agent to adapt its behavior based on runtime values.
|
|
101
|
-
|
|
102
|
-
```typescript {1,4,6, 13} showLineNumbers copy
|
|
103
|
-
import { RuntimeContext } from "@mastra/core/runtime-context";
|
|
104
|
-
|
|
105
|
-
const testAgent = mastra.getAgent("testAgent");
|
|
106
|
-
const runtimeContext = new RuntimeContext();
|
|
107
|
-
|
|
108
|
-
runtimeContext.set("userTier", "premium");
|
|
109
|
-
|
|
110
|
-
const response = await testAgent.generate("Remember my favorite color is blue.", {
|
|
111
|
-
memory: {
|
|
112
|
-
resource: "user_alice",
|
|
113
|
-
thread: { id: "preferences_thread" }
|
|
114
|
-
},
|
|
115
|
-
runtimeContext
|
|
116
|
-
});
|
|
117
|
-
```
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
## Async memory configuration
|
|
121
|
-
|
|
122
|
-
Memory can be configured asynchronously to support use cases like fetching user-specific settings from a database, validating access with Auth, or loading additional data from a remote service.
|
|
123
|
-
|
|
124
|
-
```typescript {18, 22} filename="src/mastra/agents/test-agent.ts" showLineNumbers copy
|
|
125
|
-
import { Agent } from "@mastra/core/agent";
|
|
126
|
-
import { Memory } from "@mastra/memory";
|
|
127
|
-
import { LibSQLStore } from "@mastra/libsql";
|
|
128
|
-
import { openai } from "@ai-sdk/openai";
|
|
129
|
-
|
|
130
|
-
const premiumMemory = new Memory({
|
|
131
|
-
// ...
|
|
132
|
-
});
|
|
133
|
-
|
|
134
|
-
const standardMemory = new Memory({
|
|
124
|
+
export const memoryAgent = new Agent({
|
|
135
125
|
// ...
|
|
136
|
-
})
|
|
137
|
-
|
|
138
|
-
export const testAgent = new Agent({
|
|
139
|
-
name: "test-agent",
|
|
140
|
-
instructions: "You are a helpful assistant with tiered memory capabilities.",
|
|
141
|
-
model: openai("gpt-4o"),
|
|
142
|
-
memory: async ({ runtimeContext }) => {
|
|
143
|
-
const userId = runtimeContext.get("userId");
|
|
144
|
-
|
|
145
|
-
// Example database lookup using `userId`
|
|
146
|
-
const userTier = await query(`SELECT user_tier FROM users WHERE userId = $1`, [userId]);
|
|
126
|
+
memory: ({ runtimeContext }) => {
|
|
127
|
+
const userTier = runtimeContext.get("user-tier") as UserTier["user-tier"];
|
|
147
128
|
|
|
148
|
-
return userTier === "
|
|
129
|
+
return userTier === "enterprise"
|
|
130
|
+
? premiumMemory
|
|
131
|
+
: standardMemory;
|
|
149
132
|
}
|
|
150
133
|
});
|
|
151
134
|
```
|
|
@@ -155,3 +138,4 @@ export const testAgent = new Agent({
|
|
|
155
138
|
- [Working Memory](../memory/working-memory.mdx)
|
|
156
139
|
- [Semantic Recall](../memory/semantic-recall.mdx)
|
|
157
140
|
- [Threads and Resources](../memory/threads-and-resources.mdx)
|
|
141
|
+
- [Runtime Context](../server-db/runtime-context.mdx)
|
|
@@ -20,7 +20,7 @@ Use processors for content moderation, prompt injection prevention, response san
|
|
|
20
20
|
|
|
21
21
|
## Adding processors to an agent
|
|
22
22
|
|
|
23
|
-
Import and instantiate the relevant processor class, and pass it to your agent’s configuration using either the `inputProcessors` or `outputProcessors`
|
|
23
|
+
Import and instantiate the relevant processor class, and pass it to your agent’s configuration using either the `inputProcessors` or `outputProcessors` option:
|
|
24
24
|
|
|
25
25
|
```typescript {3,9-17} filename="src/mastra/agents/moderated-agent.ts" showLineNumbers copy
|
|
26
26
|
import { openai } from "@ai-sdk/openai";
|
|
@@ -60,9 +60,12 @@ const agentStep1 = createStep({
|
|
|
60
60
|
}),
|
|
61
61
|
execute: async ({ inputData }) => {
|
|
62
62
|
const resp = await agent1.generate(inputData.city, {
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
63
|
+
structuredOutput: {
|
|
64
|
+
schema: z.object({
|
|
65
|
+
text: z.string(),
|
|
66
|
+
})
|
|
67
|
+
},
|
|
68
|
+
maxSteps: 1
|
|
66
69
|
});
|
|
67
70
|
|
|
68
71
|
return { text: resp.object.text };
|
|
@@ -80,9 +83,12 @@ const agentStep2 = createStep({
|
|
|
80
83
|
}),
|
|
81
84
|
execute: async ({ inputData }) => {
|
|
82
85
|
const resp = await agent2.generate(inputData.text, {
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
+
structuredOutput: {
|
|
87
|
+
schema: z.object({
|
|
88
|
+
text: z.string(),
|
|
89
|
+
})
|
|
90
|
+
},
|
|
91
|
+
maxSteps: 1
|
|
86
92
|
});
|
|
87
93
|
|
|
88
94
|
return { text: resp.object.text };
|
|
@@ -34,7 +34,7 @@ Mastra's model router auto-detects environment variables for your chosen provide
|
|
|
34
34
|
OPENAI_API_KEY=<your-api-key>
|
|
35
35
|
```
|
|
36
36
|
|
|
37
|
-
> Mastra supports more than 600 models. Choose from the full list [here](
|
|
37
|
+
> Mastra supports more than 600 models. Choose from the full list [here](/models).
|
|
38
38
|
|
|
39
39
|
### Create an agent
|
|
40
40
|
|
|
@@ -245,10 +245,13 @@ const response = await testAgent.generate(
|
|
|
245
245
|
}
|
|
246
246
|
],
|
|
247
247
|
{
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
248
|
+
structuredOutput: {
|
|
249
|
+
schema: z.object({
|
|
250
|
+
summary: z.string(),
|
|
251
|
+
keywords: z.array(z.string())
|
|
252
|
+
})
|
|
253
|
+
},
|
|
254
|
+
maxSteps: 1
|
|
252
255
|
}
|
|
253
256
|
);
|
|
254
257
|
|
|
@@ -279,10 +282,6 @@ const response = await testAgent.generate([
|
|
|
279
282
|
console.log(response.text);
|
|
280
283
|
```
|
|
281
284
|
|
|
282
|
-
## Multi-step tool use
|
|
283
|
-
|
|
284
|
-
Agents can be enhanced with tools, functions that extend their capabilities beyond text generation. Tools allow agents to perform calculations, access external systems, and process data. Agents not only decide whether to call tools they're given, they determine the parameters that should be given to that tool.
|
|
285
|
-
|
|
286
285
|
For a detailed guide to creating and configuring tools, see the [Tools Overview](../tools-mcp/overview.mdx) page.
|
|
287
286
|
|
|
288
287
|
### Using `maxSteps`
|
|
@@ -311,6 +310,42 @@ const response = await testAgent.generate("Help me organize my day", {
|
|
|
311
310
|
});
|
|
312
311
|
```
|
|
313
312
|
|
|
313
|
+
## Using tools
|
|
314
|
+
|
|
315
|
+
Agents can use tools to go beyond language generation, enabling structured interactions with external APIs and services. Tools allow agents to access data and perform clearly defined operations in a reliable, repeatable way.
|
|
316
|
+
|
|
317
|
+
```typescript filename="src/mastra/agents/test-agent.ts" showLineNumbers
|
|
318
|
+
export const testAgent = new Agent({
|
|
319
|
+
// ...
|
|
320
|
+
tools: { testTool }
|
|
321
|
+
});
|
|
322
|
+
```
|
|
323
|
+
|
|
324
|
+
> See [Using Tools](./using-tools.mdx) for more information.
|
|
325
|
+
|
|
326
|
+
## Using `RuntimeContext`
|
|
327
|
+
|
|
328
|
+
Use `RuntimeContext` to access request-specific values. This lets you conditionally adjust behavior based on the context of the request.
|
|
329
|
+
|
|
330
|
+
```typescript filename="src/mastra/agents/test-agent.ts" showLineNumbers
|
|
331
|
+
export type UserTier = {
|
|
332
|
+
"user-tier": "enterprise" | "pro";
|
|
333
|
+
};
|
|
334
|
+
|
|
335
|
+
export const testAgent = new Agent({
|
|
336
|
+
// ...
|
|
337
|
+
model: ({ runtimeContext }) => {
|
|
338
|
+
const userTier = runtimeContext.get("user-tier") as UserTier["user-tier"];
|
|
339
|
+
|
|
340
|
+
return userTier === "enterprise"
|
|
341
|
+
? openai("gpt-4o-mini")
|
|
342
|
+
: openai("gpt-4.1-nano");
|
|
343
|
+
}
|
|
344
|
+
});
|
|
345
|
+
```
|
|
346
|
+
|
|
347
|
+
> See [Runtime Context](../server-db/runtime-context.mdx) for more information.
|
|
348
|
+
|
|
314
349
|
## Testing agents locally
|
|
315
350
|
There are two ways to run and test agents.
|
|
316
351
|
|
|
@@ -350,7 +385,7 @@ npx tsx src/test-agent.ts
|
|
|
350
385
|
|
|
351
386
|
## Related
|
|
352
387
|
|
|
388
|
+
- [Using Tools](./using-tools.mdx)
|
|
353
389
|
- [Agent Memory](./agent-memory.mdx)
|
|
354
|
-
- [
|
|
355
|
-
- [Agent Tools and MCP](./using-tools-and-mcp.mdx)
|
|
390
|
+
- [Runtime Context](../../examples/agents/runtime-context.mdx)
|
|
356
391
|
- [Calling Agents](../../examples/agents/calling-agents.mdx)
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: "Using Tools | Agents | Mastra Docs"
|
|
3
|
+
description: Learn how to create tools and add them to agents to extend capabilities beyond text generation.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Using Tools
|
|
7
|
+
|
|
8
|
+
Agents use tools to call APIs, query databases, or run custom functions from your codebase. [Tools](../tools-mcp/overview.mdx) give agents capabilities beyond language generation by providing structured access to data and performing clearly defined operations. You can also load tools from remote [MCP servers](../tools-mcp/mcp-overview.mdx) to expand an agent’s capabilities.
|
|
9
|
+
|
|
10
|
+
## When to use tools
|
|
11
|
+
|
|
12
|
+
Use tools when an agent needs additional context or information from remote resources, or when it needs to run code that performs a specific operation. This includes tasks a model can't reliably handle on its own, such as fetching live data or returning consistent, well defined outputs.
|
|
13
|
+
|
|
14
|
+
## Creating a tool
|
|
15
|
+
|
|
16
|
+
This example shows how to create a tool that fetches weather data from an API. When the agent calls the tool, it provides the required input as defined by the tool’s `inputSchema`. The tool accesses this data through its `context` argument, which in this example includes the `location` used in the weather API query.
|
|
17
|
+
|
|
18
|
+
```typescript {14,16} filename="src/mastra/tools/weather-tool.ts" showLineNumbers copy
|
|
19
|
+
import { createTool } from "@mastra/core/tools";
|
|
20
|
+
import { z } from "zod";
|
|
21
|
+
|
|
22
|
+
export const weatherTool = createTool({
|
|
23
|
+
id: "weather-tool",
|
|
24
|
+
description: "Fetches weather for a location",
|
|
25
|
+
inputSchema: z.object({
|
|
26
|
+
location: z.string()
|
|
27
|
+
}),
|
|
28
|
+
outputSchema: z.object({
|
|
29
|
+
weather: z.string()
|
|
30
|
+
}),
|
|
31
|
+
execute: async ({ context }) => {
|
|
32
|
+
const { location } = context;
|
|
33
|
+
|
|
34
|
+
const response = await fetch(`https://wttr.in/${location}?format=3`);
|
|
35
|
+
const weather = await response.text();
|
|
36
|
+
|
|
37
|
+
return { weather };
|
|
38
|
+
}
|
|
39
|
+
});
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
## Adding tools to an agent
|
|
43
|
+
|
|
44
|
+
To make a tool available to an agent, add it to the `tools` option and reference it by name in the agent’s instructions.
|
|
45
|
+
|
|
46
|
+
```typescript {9,11} filename="src/mastra/agents/weather-agent.ts" showLineNumbers copy
|
|
47
|
+
import { openai } from "@ai-sdk/openai";
|
|
48
|
+
import { Agent } from "@mastra/core/agent";
|
|
49
|
+
import { weatherTool } from "../tools/weather-tool";
|
|
50
|
+
|
|
51
|
+
export const weatherAgent = new Agent({
|
|
52
|
+
name: "weather-agent",
|
|
53
|
+
instructions: `
|
|
54
|
+
You are a helpful weather assistant.
|
|
55
|
+
Use the weatherTool to fetch current weather data.`,
|
|
56
|
+
model: openai("gpt-4o-mini"),
|
|
57
|
+
tools: { weatherTool }
|
|
58
|
+
});
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
## Calling an agent
|
|
62
|
+
|
|
63
|
+
The agent uses the tool’s `inputSchema` to infer what data the tool expects. In this case, it extracts `London` as the `location` from the message and makes it available to the tool’s context.
|
|
64
|
+
|
|
65
|
+
```typescript {5} filename="src/test-tool.ts" showLineNumbers copy
|
|
66
|
+
import { mastra } from "./mastra";
|
|
67
|
+
|
|
68
|
+
const agent = mastra.getAgent("weatherAgent");
|
|
69
|
+
|
|
70
|
+
const result = await agent.generate("What's the weather in London?");
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
## Using multiple tools
|
|
74
|
+
|
|
75
|
+
An agent can use multiple tools to handle more complex tasks by delegating specific parts to individual tools. The agent decides which tools to use based on the user’s message, the agent’s instructions, and the tool descriptions and schemas.
|
|
76
|
+
|
|
77
|
+
When multiple tools are available, the agent may choose to use one, several, or none, depending on what’s needed to answer the query.
|
|
78
|
+
|
|
79
|
+
```typescript {6} filename="src/mastra/agents/weather-agent.ts" showLineNumbers copy
|
|
80
|
+
import { weatherTool } from "../tools/weather-tool";
|
|
81
|
+
import { activitiesTool } from "../tools/activities-tool";
|
|
82
|
+
|
|
83
|
+
export const weatherAgent = new Agent({
|
|
84
|
+
// ..
|
|
85
|
+
tools: { weatherTool, activitiesTool }
|
|
86
|
+
});
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
## Related
|
|
91
|
+
|
|
92
|
+
- [Tools Overview](../tools-mcp/overview.mdx)
|
|
93
|
+
- [Agent Memory](./agent-memory.mdx)
|
|
94
|
+
- [Runtime Context](./runtime-context.mdx)
|
|
95
|
+
- [Calling Agents](../../examples/agents/calling-agents.mdx)
|
|
@@ -7,6 +7,15 @@ description: Learn about different deployment options for your Mastra applicatio
|
|
|
7
7
|
|
|
8
8
|
Mastra offers multiple deployment options to suit your application's needs, from fully-managed solutions to self-hosted options, and web framework integrations. This guide will help you understand the available deployment paths and choose the right one for your project.
|
|
9
9
|
|
|
10
|
+
## Choosing a Deployment Option
|
|
11
|
+
|
|
12
|
+
| Option | Best For | Key Benefits |
|
|
13
|
+
| ------------------------ | ------------------------------------------------------------- | -------------------------------------------------------------- |
|
|
14
|
+
| **Mastra Cloud** | Teams wanting to ship quickly without infrastructure concerns | Fully-managed, automatic scaling, built-in observability |
|
|
15
|
+
| **Framework Deployment** | Teams already using Next.js, Astro etc | Simplify deployment with a unified codebase for frontend and backend |
|
|
16
|
+
| **Server Deployment** | Teams needing maximum control and customization | Full control, custom middleware, integrate with existing apps |
|
|
17
|
+
| **Serverless Platforms** | Teams already using Vercel, Netlify, or Cloudflare | Platform integration, simplified deployment, automatic scaling |
|
|
18
|
+
|
|
10
19
|
## Deployment Options
|
|
11
20
|
|
|
12
21
|
### Runtime support
|
|
@@ -71,14 +80,3 @@ Once your Mastra application is deployed, you'll need to configure your client t
|
|
|
71
80
|
- Support for streaming responses
|
|
72
81
|
|
|
73
82
|
[Client configuration guide →](../server-db/mastra-client.mdx)
|
|
74
|
-
|
|
75
|
-
## Choosing a Deployment Option
|
|
76
|
-
|
|
77
|
-
| Option | Best For | Key Benefits |
|
|
78
|
-
| ------------------------ | ------------------------------------------------------------- | -------------------------------------------------------------- |
|
|
79
|
-
| **Mastra Cloud** | Teams wanting to ship quickly without infrastructure concerns | Fully-managed, automatic scaling, built-in observability |
|
|
80
|
-
| **Framework Deployment** | Teams already using Next.js, Astro etc | Simplify deployment with a unified codebase for frontend and backend |
|
|
81
|
-
| **Server Deployment** | Teams needing maximum control and customization | Full control, custom middleware, integrate with existing apps |
|
|
82
|
-
| **Serverless Platforms** | Teams already using Vercel, Netlify, or Cloudflare | Platform integration, simplified deployment, automatic scaling |
|
|
83
|
-
|
|
84
|
-
|
|
@@ -33,7 +33,7 @@ export const weatherAgent = new Agent({
|
|
|
33
33
|
});
|
|
34
34
|
```
|
|
35
35
|
|
|
36
|
-
> See [Model Providers](/
|
|
36
|
+
> See [Model Providers](/models) and [Model Capabilities](/models) for more information.
|
|
37
37
|
|
|
38
38
|
## React Hooks
|
|
39
39
|
|
|
@@ -181,9 +181,12 @@ export async function POST(req: Request) {
|
|
|
181
181
|
const body = await req.json();
|
|
182
182
|
const myAgent = mastra.getAgent("weatherAgent");
|
|
183
183
|
const stream = await myAgent.stream(body, {
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
184
|
+
structuredOutput: {
|
|
185
|
+
schema: z.object({
|
|
186
|
+
weather: z.string()
|
|
187
|
+
})
|
|
188
|
+
},
|
|
189
|
+
maxSteps: 1
|
|
187
190
|
});
|
|
188
191
|
|
|
189
192
|
return stream.toTextStreamResponse();
|