@mastra/mcp-docs-server 0.13.34 → 0.13.35-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +12 -12
  2. package/.docs/organized/changelogs/%40mastra%2Fcore.md +31 -31
  3. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +9 -9
  4. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +9 -9
  5. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +9 -9
  6. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +9 -9
  7. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +19 -19
  8. package/.docs/organized/changelogs/%40mastra%2Flance.md +10 -10
  9. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +8 -8
  10. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +11 -11
  11. package/.docs/organized/changelogs/%40mastra%2Fpg.md +10 -10
  12. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +16 -16
  13. package/.docs/organized/changelogs/%40mastra%2Freact.md +10 -10
  14. package/.docs/organized/changelogs/%40mastra%2Fserver.md +14 -14
  15. package/.docs/organized/changelogs/create-mastra.md +3 -3
  16. package/.docs/organized/changelogs/mastra.md +9 -9
  17. package/.docs/organized/code-examples/memory-with-mongodb.md +208 -0
  18. package/.docs/raw/getting-started/project-structure.mdx +1 -1
  19. package/.docs/raw/getting-started/studio.mdx +4 -4
  20. package/.docs/raw/memory/overview.mdx +1 -1
  21. package/.docs/raw/memory/semantic-recall.mdx +4 -3
  22. package/.docs/raw/memory/storage/memory-with-libsql.mdx +141 -0
  23. package/.docs/raw/memory/storage/memory-with-pg.mdx +138 -0
  24. package/.docs/raw/memory/storage/memory-with-upstash.mdx +147 -0
  25. package/.docs/raw/observability/ai-tracing/exporters/arize.mdx +201 -0
  26. package/.docs/raw/observability/ai-tracing/overview.mdx +12 -8
  27. package/.docs/raw/reference/observability/ai-tracing/exporters/arize.mdx +160 -0
  28. package/.docs/raw/reference/observability/ai-tracing/exporters/braintrust.mdx +2 -2
  29. package/.docs/raw/reference/observability/ai-tracing/exporters/langfuse.mdx +1 -1
  30. package/.docs/raw/reference/observability/ai-tracing/exporters/langsmith.mdx +2 -2
  31. package/.docs/raw/reference/observability/ai-tracing/exporters/otel.mdx +1 -1
  32. package/.docs/raw/reference/observability/ai-tracing/interfaces.mdx +48 -21
  33. package/.docs/raw/reference/storage/mongodb.mdx +146 -0
  34. package/.docs/raw/server-db/storage.mdx +1 -0
  35. package/.docs/raw/workflows/agents-and-tools.mdx +15 -1
  36. package/.docs/raw/workflows/human-in-the-loop.mdx +268 -0
  37. package/CHANGELOG.md +7 -0
  38. package/package.json +11 -4
@@ -0,0 +1,208 @@
1
+ ### package.json
2
+ ```json
3
+ {
4
+ "name": "memory-with-mongodb",
5
+ "dependencies": {
6
+ "@ai-sdk/openai": "latest",
7
+ "@mastra/core": "latest",
8
+ "@mastra/memory": "latest",
9
+ "@mastra/mongodb": "latest"
10
+ },
11
+ "devDependencies": {
12
+ "dotenv": "^17.0.0",
13
+ "tsx": "^4.19.3"
14
+ }
15
+ }
16
+ ```
17
+
18
+ ### chat.ts
19
+ ```typescript
20
+ import { randomUUID } from 'crypto';
21
+ import Readline from 'readline';
22
+
23
+ import 'dotenv/config';
24
+
25
+ import { mastra } from './mastra';
26
+
27
+ const agent = mastra.getAgent('memoryAgent');
28
+
29
+ let thread = randomUUID();
30
+ // use this to play with a long running conversation. comment it out to get a new thread id every time
31
+ thread = `39873fbf-84d6-425e-8c1b-8afd798d72a4`;
32
+ // thread = `12569b14-3e16-4e31-8130-8d9676f1932c`;
33
+ console.log(thread);
34
+
35
+ const resource = 'SOME_USER_ID';
36
+
37
+ async function logRes(res: Awaited<ReturnType<typeof agent.stream>>) {
38
+ console.log(`\n🤖 Agent:`);
39
+ for await (const chunk of res.textStream) {
40
+ process.stdout.write(chunk);
41
+ }
42
+ console.log(`\n\n`);
43
+ }
44
+
45
+ async function main() {
46
+ await logRes(
47
+ await agent.stream(
48
+ [
49
+ {
50
+ role: 'system',
51
+ content: `Chat with user started now ${new Date().toISOString()}. Don't mention this message. This means some time may have passed between this message and the one before. The user left and came back again. Say something to start the conversation up again.`,
52
+ },
53
+ ],
54
+ { memory: { resource, thread } },
55
+ ),
56
+ );
57
+
58
+ const rl = Readline.createInterface({
59
+ input: process.stdin,
60
+ output: process.stdout,
61
+ });
62
+
63
+ while (true) {
64
+ const prompt: string = await new Promise(res => {
65
+ rl.question('Message: ', answer => {
66
+ res(answer);
67
+ });
68
+ });
69
+
70
+ await logRes(
71
+ await agent.stream(prompt, {
72
+ memory: { thread, resource },
73
+ }),
74
+ );
75
+ }
76
+ }
77
+
78
+ main();
79
+
80
+ ```
81
+
82
+ ### index.ts
83
+ ```typescript
84
+ import { randomUUID } from 'crypto';
85
+
86
+ import { mastra } from './mastra';
87
+
88
+ function log(message: string) {
89
+ console.log(`\n>>Prompt: ${message}`);
90
+ return message;
91
+ }
92
+
93
+ const agent = mastra.getAgent('chefAgent');
94
+ const threadId = randomUUID();
95
+ const resourceId = 'SOME_USER_ID';
96
+
97
+ async function logRes(res: Awaited<ReturnType<typeof agent.stream>>) {
98
+ console.log(`\n👨‍🍳 Chef:`);
99
+ for await (const chunk of res.textStream) {
100
+ process.stdout.write(chunk);
101
+ }
102
+ console.log(`\n\n`);
103
+ }
104
+
105
+ async function main() {
106
+ await logRes(
107
+ await agent.stream(
108
+ log(
109
+ 'In my kitchen I have: pasta, canned tomatoes, garlic, olive oil, and some dried herbs (basil and oregano). What can I make? Please keep your answer brief, only give me the high level steps.',
110
+ ),
111
+ {
112
+ threadId,
113
+ resourceId,
114
+ },
115
+ ),
116
+ );
117
+
118
+ await logRes(
119
+ await agent.stream(
120
+ log(
121
+ "Now I'm over at my friend's house, and they have: chicken thighs, coconut milk, sweet potatoes, and some curry powder.",
122
+ ),
123
+ {
124
+ threadId,
125
+ resourceId,
126
+ },
127
+ ),
128
+ );
129
+
130
+ await logRes(
131
+ await agent.stream(log('What did we cook before I went to my friends house?'), {
132
+ threadId,
133
+ resourceId,
134
+ memoryOptions: {
135
+ lastMessages: 3,
136
+ },
137
+ }),
138
+ );
139
+
140
+ process.exit(0);
141
+ }
142
+
143
+ main();
144
+
145
+ ```
146
+
147
+ ### mastra/agents/index.ts
148
+ ```typescript
149
+ import { openai } from '@ai-sdk/openai';
150
+ import { Agent } from '@mastra/core/agent';
151
+ import { Memory } from '@mastra/memory';
152
+ import { MongoDBStore, MongoDBVector } from '@mastra/mongodb';
153
+
154
+ // This URI must be an Atlas MongoDB deployment in order to work with vector search
155
+ // in the format mongodb+srv://<username>:<password>@<cluster>.mongodb.net
156
+ const uri = process.env.MONGODB_URI;
157
+ const dbName = process.env.MONGODB_DB_NAME || 'mastra_memory';
158
+
159
+ export const memory = new Memory({
160
+ storage: new MongoDBStore({
161
+ url: uri,
162
+ dbName,
163
+ }),
164
+ vector: new MongoDBVector({
165
+ uri,
166
+ dbName,
167
+ }),
168
+ options: {
169
+ lastMessages: 10,
170
+ semanticRecall: {
171
+ topK: 3,
172
+ messageRange: 2,
173
+ },
174
+ },
175
+ embedder: openai.embedding('text-embedding-3-small'),
176
+ });
177
+
178
+ export const chefAgent = new Agent({
179
+ name: 'chefAgent',
180
+ instructions:
181
+ 'You are Michel, a practical and experienced home chef who helps people cook great meals with whatever ingredients they have available. Your first priority is understanding what ingredients and equipment the user has access to, then suggesting achievable recipes. You explain cooking steps clearly and offer substitutions when needed, maintaining a friendly and encouraging tone throughout.',
182
+ model: openai('gpt-4o'),
183
+ memory,
184
+ });
185
+
186
+ export const memoryAgent = new Agent({
187
+ name: 'Memory Agent',
188
+ instructions:
189
+ "You are an AI agent with the ability to automatically recall memories from previous interactions. You may have conversations that last hours, days, months, or years. If you don't know it already you should ask for the users name and some info about them.",
190
+ model: openai('gpt-4o'),
191
+ memory,
192
+ });
193
+
194
+ ```
195
+
196
+ ### mastra/index.ts
197
+ ```typescript
198
+ import { Mastra } from '@mastra/core';
199
+
200
+ import 'dotenv/config';
201
+
202
+ import { chefAgent, memoryAgent } from './agents';
203
+
204
+ export const mastra = new Mastra({
205
+ agents: { chefAgent, memoryAgent },
206
+ });
207
+
208
+ ```
@@ -63,7 +63,7 @@ Top-level files define how your Mastra project is configured, built, and connect
63
63
  | File | Description |
64
64
  | --------------------- | ------------ |
65
65
  | `src/mastra/index.ts` | Central entry point where you configure and initialize Mastra. |
66
- | `.env.example` | Template for environment variables copy and rename to `.env` to add your secret [model provider](/models) keys. |
66
+ | `.env.example` | Template for environment variables - copy and rename to `.env` to add your secret [model provider](/models) keys. |
67
67
  | `package.json` | Defines project metadata, dependencies, and available npm scripts. |
68
68
  | `tsconfig.json` | Configures TypeScript options such as path aliases, compiler settings, and build output. |
69
69
 
@@ -10,15 +10,15 @@ import { Callout } from "nextra/components";
10
10
 
11
11
  # Playground
12
12
 
13
- Playground helps you build agents quickly. It provides an interactive UI for testing your agents and workflows, along with a REST API that exposes your Mastra application as a local service. This lets you start building without having to integrate Mastra into your project right away.
13
+ Playground provides an interactive UI for building and testing your agents, along with a REST API that exposes your Mastra application as a local service. This lets you start building without worrying about integration right away.
14
14
 
15
- As your project evolves, Playground enables you to iterate quickly. Features like Observability and Scorers give you visibility into performance at every stage of development.
15
+ As your project evolves, Playground's development environment helps you iterate on your agent quickly. Meanwhile, Observability and Scorer features give you visibility into performance at every stage.
16
16
 
17
- To get started, run Playground locally using the instructions below, or [deploy to Mastra Cloud](/docs/mastra-cloud/setting-up) to access the Playground remotely and collaborate with your team.
17
+ To get started, run Playground locally using the instructions below, or [deploy to Mastra Cloud](https://mastra.ai/docs/mastra-cloud/setting-up) to collaborate with your team.
18
18
 
19
19
  <YouTube id="spGlcTEjuXY" startTime={126}/>
20
20
 
21
- ## Launch Playground
21
+ ## Start Playground
22
22
 
23
23
  If you created your application with `create mastra`, start the local development server using the `dev` script. You can also run it directly with `mastra dev`.
24
24
 
@@ -69,7 +69,7 @@ All memory types are [thread-scoped](./working-memory.mdx#thread-scoped-memory-d
69
69
 
70
70
  To persist and recall information between conversations, memory requires a storage adapter.
71
71
 
72
- Supported options include [LibSQL](/examples/memory/memory-with-libsql), [Postgres](/examples/memory/memory-with-pg), and [Upstash](/examples/memory/memory-with-upstash)
72
+ Supported options include [LibSQL](/docs/memory/storage/memory-with-libsql), [MongoDB](/docs/memory/storage/memory-with-mongodb), [Postgres](/docs/memory/storage/memory-with-pg), and [Upstash](/docs/memory/storage/memory-with-upstash)
73
73
 
74
74
  We use LibSQL out of the box because it is file-based or in-memory, so it is easy to install and works well with the playground.
75
75
 
@@ -92,9 +92,10 @@ const agent = new Agent({
92
92
 
93
93
  **Storage/vector code Examples**:
94
94
 
95
- - [LibSQL](/examples/memory/memory-with-libsql)
96
- - [Postgres](/examples/memory/memory-with-pg)
97
- - [Upstash](/examples/memory/memory-with-upstash)
95
+ - [LibSQL](/docs/memory/storage/memory-with-libsql)
96
+ - [MongoDB](/docs/memory/storage/memory-with-mongodb)
97
+ - [Postgres](/docs/memory/storage/memory-with-pg)
98
+ - [Upstash](/docs/memory/storage/memory-with-upstash)
98
99
 
99
100
  ### Embedder configuration
100
101
 
@@ -0,0 +1,141 @@
1
+ ---
2
+ title: "Example: Memory with LibSQL | Memory | Mastra Docs"
3
+ description: Example for how to use Mastra's memory system with LibSQL storage and vector database backend.
4
+ ---
5
+
6
+ # Memory with LibSQL
7
+
8
+ This example demonstrates how to use Mastra's memory system with LibSQL as the storage backend.
9
+
10
+ ## Prerequisites
11
+
12
+ This example uses the `openai` model. Make sure to add `OPENAI_API_KEY` to your `.env` file.
13
+
14
+ ```bash filename=".env" copy
15
+ OPENAI_API_KEY=<your-api-key>
16
+ ```
17
+
18
+ And install the following package:
19
+
20
+ ```bash copy
21
+ npm install @mastra/libsql
22
+ ```
23
+
24
+ ## Adding memory to an agent
25
+
26
+ To add LibSQL memory to an agent use the `Memory` class and create a new `storage` key using `LibSQLStore`. The `url` can either by a remote location, or a local file system resource.
27
+
28
+ ```typescript filename="src/mastra/agents/example-libsql-agent.ts" showLineNumbers copy
29
+ import { Memory } from "@mastra/memory";
30
+ import { Agent } from "@mastra/core/agent";
31
+ import { openai } from "@ai-sdk/openai";
32
+ import { LibSQLStore } from "@mastra/libsql";
33
+
34
+ export const libsqlAgent = new Agent({
35
+ name: "libsql-agent",
36
+ instructions: "You are an AI agent with the ability to automatically recall memories from previous interactions.",
37
+ model: openai("gpt-4o"),
38
+ memory: new Memory({
39
+ storage: new LibSQLStore({
40
+ url: "file:libsql-agent.db"
41
+ }),
42
+ options: {
43
+ threads: {
44
+ generateTitle: true
45
+ }
46
+ }
47
+ })
48
+ });
49
+ ```
50
+
51
+ ## Local embeddings with fastembed
52
+
53
+ Embeddings are numeric vectors used by memory’s `semanticRecall` to retrieve related messages by meaning (not keywords). This setup uses `@mastra/fastembed` to generate vector embeddings.
54
+
55
+ Install `fastembed` to get started:
56
+
57
+ ```bash copy
58
+ npm install @mastra/fastembed
59
+ ```
60
+
61
+ Add the following to your agent:
62
+
63
+ ```typescript filename="src/mastra/agents/example-libsql-agent.ts" showLineNumbers copy
64
+ import { Memory } from "@mastra/memory";
65
+ import { Agent } from "@mastra/core/agent";
66
+ import { openai } from "@ai-sdk/openai";
67
+ import { LibSQLStore, LibSQLVector } from "@mastra/libsql";
68
+ import { fastembed } from "@mastra/fastembed";
69
+
70
+ export const libsqlAgent = new Agent({
71
+ name: "libsql-agent",
72
+ instructions: "You are an AI agent with the ability to automatically recall memories from previous interactions.",
73
+ model: openai("gpt-4o"),
74
+ memory: new Memory({
75
+ storage: new LibSQLStore({
76
+ url: "file:libsql-agent.db"
77
+ }),
78
+ vector: new LibSQLVector({
79
+ connectionUrl: "file:libsql-agent.db"
80
+ }),
81
+ embedder: fastembed,
82
+ options: {
83
+ lastMessages: 10,
84
+ semanticRecall: {
85
+ topK: 3,
86
+ messageRange: 2
87
+ },
88
+ threads: {
89
+ generateTitle: true
90
+ }
91
+ }
92
+ })
93
+ });
94
+ ```
95
+
96
+ ## Usage example
97
+
98
+ Use `memoryOptions` to scope recall for this request. Set `lastMessages: 5` to limit recency-based recall, and use `semanticRecall` to fetch the `topK: 3` most relevant messages, including `messageRange: 2` neighboring messages for context around each match.
99
+
100
+ ```typescript filename="src/test-libsql-agent.ts" showLineNumbers copy
101
+ import "dotenv/config";
102
+
103
+ import { mastra } from "./mastra";
104
+
105
+ const threadId = "123";
106
+ const resourceId = "user-456";
107
+
108
+ const agent = mastra.getAgent("libsqlAgent");
109
+
110
+ const message = await agent.stream("My name is Mastra", {
111
+ memory: {
112
+ thread: threadId,
113
+ resource: resourceId
114
+ }
115
+ });
116
+
117
+ await message.textStream.pipeTo(new WritableStream());
118
+
119
+ const stream = await agent.stream("What's my name?", {
120
+ memory: {
121
+ thread: threadId,
122
+ resource: resourceId
123
+ },
124
+ memoryOptions: {
125
+ lastMessages: 5,
126
+ semanticRecall: {
127
+ topK: 3,
128
+ messageRange: 2
129
+ }
130
+ }
131
+ });
132
+
133
+ for await (const chunk of stream.textStream) {
134
+ process.stdout.write(chunk);
135
+ }
136
+ ```
137
+
138
+
139
+ ## Related
140
+
141
+ - [Calling Agents](../agents/calling-agents.mdx)
@@ -0,0 +1,138 @@
1
+ ---
2
+ title: "Example: Memory with PostgreSQL | Memory | Mastra Docs"
3
+ description: Example for how to use Mastra's memory system with PostgreSQL storage and vector capabilities.
4
+ ---
5
+
6
+ # Memory with Postgres
7
+
8
+ This example demonstrates how to use Mastra's memory system with PostgreSQL as the storage backend.
9
+
10
+ ## Prerequisites
11
+
12
+ This example uses the `openai` model and requires a PostgreSQL database with the `pgvector` extension. Make sure to add the following to your `.env` file:
13
+
14
+ ```bash filename=".env" copy
15
+ OPENAI_API_KEY=<your-api-key>
16
+ DATABASE_URL=<your-connection-string>
17
+ ```
18
+
19
+ And install the following package:
20
+
21
+ ```bash copy
22
+ npm install @mastra/pg
23
+ ```
24
+
25
+ ## Adding memory to an agent
26
+
27
+ To add PostgreSQL memory to an agent use the `Memory` class and create a new `storage` key using `PostgresStore`. The `connectionString` can either be a remote location, or a local database connection.
28
+
29
+ ```typescript filename="src/mastra/agents/example-pg-agent.ts" showLineNumbers copy
30
+ import { Memory } from "@mastra/memory";
31
+ import { Agent } from "@mastra/core/agent";
32
+ import { openai } from "@ai-sdk/openai";
33
+ import { PostgresStore } from "@mastra/pg";
34
+
35
+ export const pgAgent = new Agent({
36
+ name: "pg-agent",
37
+ instructions: "You are an AI agent with the ability to automatically recall memories from previous interactions.",
38
+ model: openai("gpt-4o"),
39
+ memory: new Memory({
40
+ storage: new PostgresStore({
41
+ connectionString: process.env.DATABASE_URL!
42
+ }),
43
+ options: {
44
+ threads: {
45
+ generateTitle: true
46
+ }
47
+ }
48
+ })
49
+ });
50
+ ```
51
+
52
+ ## Local embeddings with fastembed
53
+
54
+ Embeddings are numeric vectors used by memory’s `semanticRecall` to retrieve related messages by meaning (not keywords). This setup uses `@mastra/fastembed` to generate vector embeddings.
55
+
56
+ Install `fastembed` to get started:
57
+
58
+ ```bash copy
59
+ npm install @mastra/fastembed
60
+ ```
61
+
62
+ Add the following to your agent:
63
+
64
+ ```typescript filename="src/mastra/agents/example-pg-agent.ts" showLineNumbers copy
65
+ import { Memory } from "@mastra/memory";
66
+ import { Agent } from "@mastra/core/agent";
67
+ import { openai } from "@ai-sdk/openai";
68
+ import { PostgresStore, PgVector } from "@mastra/pg";
69
+ import { fastembed } from "@mastra/fastembed";
70
+
71
+ export const pgAgent = new Agent({
72
+ name: "pg-agent",
73
+ instructions: "You are an AI agent with the ability to automatically recall memories from previous interactions.",
74
+ model: openai("gpt-4o"),
75
+ memory: new Memory({
76
+ storage: new PostgresStore({
77
+ connectionString: process.env.DATABASE_URL!
78
+ }),
79
+ vector: new PgVector({
80
+ connectionString: process.env.DATABASE_URL!
81
+ }),
82
+ embedder: fastembed,
83
+ options: {
84
+ lastMessages: 10,
85
+ semanticRecall: {
86
+ topK: 3,
87
+ messageRange: 2
88
+ }
89
+ }
90
+ })
91
+ });
92
+ ```
93
+
94
+ ## Usage example
95
+
96
+ Use `memoryOptions` to scope recall for this request. Set `lastMessages: 5` to limit recency-based recall, and use `semanticRecall` to fetch the `topK: 3` most relevant messages, including `messageRange: 2` neighboring messages for context around each match.
97
+
98
+ ```typescript filename="src/test-pg-agent.ts" showLineNumbers copy
99
+ import "dotenv/config";
100
+
101
+ import { mastra } from "./mastra";
102
+
103
+ const threadId = "123";
104
+ const resourceId = "user-456";
105
+
106
+ const agent = mastra.getAgent("pgAgent");
107
+
108
+ const message = await agent.stream("My name is Mastra", {
109
+ memory: {
110
+ thread: threadId,
111
+ resource: resourceId
112
+ }
113
+ });
114
+
115
+ await message.textStream.pipeTo(new WritableStream());
116
+
117
+ const stream = await agent.stream("What's my name?", {
118
+ memory: {
119
+ thread: threadId,
120
+ resource: resourceId
121
+ },
122
+ memoryOptions: {
123
+ lastMessages: 5,
124
+ semanticRecall: {
125
+ topK: 3,
126
+ messageRange: 2
127
+ }
128
+ }
129
+ });
130
+
131
+ for await (const chunk of stream.textStream) {
132
+ process.stdout.write(chunk);
133
+ }
134
+ ```
135
+
136
+ ## Related
137
+
138
+ - [Calling Agents](../agents/calling-agents.mdx)
@@ -0,0 +1,147 @@
1
+ ---
2
+ title: "Example: Memory with Upstash | Memory | Mastra Docs"
3
+ description: Example for how to use Mastra's memory system with Upstash Redis storage and vector capabilities.
4
+ ---
5
+
6
+ # Memory with Upstash
7
+
8
+ This example demonstrates how to use Mastra's memory system with Upstash as the storage backend.
9
+
10
+ ## Prerequisites
11
+
12
+ This example uses the `openai` model and requires both Upstash Redis and Upstash Vector services. Make sure to add the following to your `.env` file:
13
+
14
+ ```bash filename=".env" copy
15
+ OPENAI_API_KEY=<your-api-key>
16
+ UPSTASH_REDIS_REST_URL=<your-redis-url>
17
+ UPSTASH_REDIS_REST_TOKEN=<your-redis-token>
18
+ UPSTASH_VECTOR_REST_URL=<your-vector-index-url>
19
+ UPSTASH_VECTOR_REST_TOKEN=<your-vector-index-token>
20
+ ```
21
+
22
+ You can get your Upstash credentials by signing up at [upstash.com](https://upstash.com) and creating both Redis and Vector databases.
23
+
24
+ And install the following package:
25
+
26
+ ```bash copy
27
+ npm install @mastra/upstash
28
+ ```
29
+
30
+ ## Adding memory to an agent
31
+
32
+ To add Upstash memory to an agent use the `Memory` class and create a new `storage` key using `UpstashStore` and a new `vector` key using `UpstashVector`. The configuration can point to either a remote service or a local setup.
33
+
34
+ ```typescript filename="src/mastra/agents/example-upstash-agent.ts" showLineNumbers copy
35
+ import { Memory } from "@mastra/memory";
36
+ import { Agent } from "@mastra/core/agent";
37
+ import { openai } from "@ai-sdk/openai";
38
+ import { UpstashStore } from "@mastra/upstash";
39
+
40
+ export const upstashAgent = new Agent({
41
+ name: "upstash-agent",
42
+ instructions: "You are an AI agent with the ability to automatically recall memories from previous interactions.",
43
+ model: openai("gpt-4o"),
44
+ memory: new Memory({
45
+ storage: new UpstashStore({
46
+ url: process.env.UPSTASH_REDIS_REST_URL!,
47
+ token: process.env.UPSTASH_REDIS_REST_TOKEN!
48
+ }),
49
+ options: {
50
+ threads: {
51
+ generateTitle: true
52
+ }
53
+ }
54
+ })
55
+ });
56
+ ```
57
+
58
+
59
+ ## Local embeddings with fastembed
60
+
61
+ Embeddings are numeric vectors used by memory’s `semanticRecall` to retrieve related messages by meaning (not keywords). This setup uses `@mastra/fastembed` to generate vector embeddings.
62
+
63
+ Install `fastembed` to get started:
64
+
65
+ ```bash copy
66
+ npm install @mastra/fastembed
67
+ ```
68
+
69
+ Add the following to your agent:
70
+
71
+ ```typescript filename="src/mastra/agents/example-upstash-agent.ts" showLineNumbers copy
72
+ import { Memory } from "@mastra/memory";
73
+ import { Agent } from "@mastra/core/agent";
74
+ import { openai } from "@ai-sdk/openai";
75
+ import { UpstashStore, UpstashVector } from "@mastra/upstash";
76
+ import { fastembed } from "@mastra/fastembed";
77
+
78
+ export const upstashAgent = new Agent({
79
+ name: "upstash-agent",
80
+ instructions: "You are an AI agent with the ability to automatically recall memories from previous interactions.",
81
+ model: openai("gpt-4o"),
82
+ memory: new Memory({
83
+ storage: new UpstashStore({
84
+ url: process.env.UPSTASH_REDIS_REST_URL!,
85
+ token: process.env.UPSTASH_REDIS_REST_TOKEN!
86
+ }),
87
+ vector: new UpstashVector({
88
+ url: process.env.UPSTASH_VECTOR_REST_URL!,
89
+ token: process.env.UPSTASH_VECTOR_REST_TOKEN!
90
+ }),
91
+ embedder: fastembed,
92
+ options: {
93
+ lastMessages: 10,
94
+ semanticRecall: {
95
+ topK: 3,
96
+ messageRange: 2
97
+ }
98
+ }
99
+ })
100
+ });
101
+ ```
102
+
103
+ ## Usage example
104
+
105
+ Use `memoryOptions` to scope recall for this request. Set `lastMessages: 5` to limit recency-based recall, and use `semanticRecall` to fetch the `topK: 3` most relevant messages, including `messageRange: 2` neighboring messages for context around each match.
106
+
107
+ ```typescript filename="src/test-upstash-agent.ts" showLineNumbers copy
108
+ import "dotenv/config";
109
+
110
+ import { mastra } from "./mastra";
111
+
112
+ const threadId = "123";
113
+ const resourceId = "user-456";
114
+
115
+ const agent = mastra.getAgent("upstashAgent");
116
+
117
+ const message = await agent.stream("My name is Mastra", {
118
+ memory: {
119
+ thread: threadId,
120
+ resource: resourceId
121
+ }
122
+ });
123
+
124
+ await message.textStream.pipeTo(new WritableStream());
125
+
126
+ const stream = await agent.stream("What's my name?", {
127
+ memory: {
128
+ thread: threadId,
129
+ resource: resourceId
130
+ },
131
+ memoryOptions: {
132
+ lastMessages: 5,
133
+ semanticRecall: {
134
+ topK: 3,
135
+ messageRange: 2
136
+ }
137
+ }
138
+ });
139
+
140
+ for await (const chunk of stream.textStream) {
141
+ process.stdout.write(chunk);
142
+ }
143
+ ```
144
+
145
+ ## Related
146
+
147
+ - [Calling Agents](../agents/calling-agents.mdx)