@mastra/memory 1.0.0 → 1.0.1-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +31 -0
- package/dist/{chunk-SG3GRV3O.cjs → chunk-23EXJLET.cjs} +3 -3
- package/dist/chunk-23EXJLET.cjs.map +1 -0
- package/dist/{chunk-KMQS2YEC.js → chunk-BSDWQEU3.js} +3 -3
- package/dist/chunk-BSDWQEU3.js.map +1 -0
- package/dist/{chunk-WC4XBMZT.js → chunk-HJYHDIOC.js} +5 -5
- package/dist/chunk-HJYHDIOC.js.map +1 -0
- package/dist/{chunk-YMNW6DEN.cjs → chunk-LIBOSOHM.cjs} +14 -14
- package/dist/chunk-LIBOSOHM.cjs.map +1 -0
- package/dist/{chunk-ZUQPUTTO.cjs → chunk-O3CS4UGX.cjs} +3 -3
- package/dist/chunk-O3CS4UGX.cjs.map +1 -0
- package/dist/{chunk-QY6BZOPJ.js → chunk-WM6IIUQW.js} +5 -5
- package/dist/chunk-WM6IIUQW.js.map +1 -0
- package/dist/{chunk-MMUHFOCG.js → chunk-YF4R74L2.js} +3 -3
- package/dist/chunk-YF4R74L2.js.map +1 -0
- package/dist/{chunk-W72AYUIF.cjs → chunk-ZSBBXHNM.cjs} +14 -14
- package/dist/chunk-ZSBBXHNM.cjs.map +1 -0
- package/dist/docs/README.md +2 -2
- package/dist/docs/SKILL.md +2 -2
- package/dist/docs/SOURCE_MAP.json +1 -1
- package/dist/docs/agents/01-agent-memory.md +8 -8
- package/dist/docs/agents/02-networks.md +1 -1
- package/dist/docs/agents/03-agent-approval.md +2 -2
- package/dist/docs/agents/04-network-approval.md +2 -2
- package/dist/docs/core/01-reference.md +6 -6
- package/dist/docs/memory/01-overview.md +22 -53
- package/dist/docs/memory/02-storage.md +115 -87
- package/dist/docs/memory/03-message-history.md +249 -0
- package/dist/docs/memory/{03-working-memory.md → 04-working-memory.md} +22 -1
- package/dist/docs/memory/{04-semantic-recall.md → 05-semantic-recall.md} +45 -22
- package/dist/docs/memory/{05-memory-processors.md → 06-memory-processors.md} +4 -4
- package/dist/docs/memory/{06-reference.md → 07-reference.md} +33 -33
- package/dist/docs/processors/01-reference.md +1 -1
- package/dist/docs/storage/01-reference.md +114 -35
- package/dist/docs/vectors/01-reference.md +12 -12
- package/dist/index.cjs +56 -28
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.ts +9 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +44 -16
- package/dist/index.js.map +1 -1
- package/dist/{token-6GSAFR2W-JV3TZR4M.cjs → token-6GSAFR2W-2B4WM6AQ.cjs} +8 -8
- package/dist/token-6GSAFR2W-2B4WM6AQ.cjs.map +1 -0
- package/dist/{token-6GSAFR2W-VLY2XUPA.js → token-6GSAFR2W-ABXTQD64.js} +5 -5
- package/dist/token-6GSAFR2W-ABXTQD64.js.map +1 -0
- package/dist/{token-6GSAFR2W-YCB5SK2Z.cjs → token-6GSAFR2W-TW2P7HCS.cjs} +8 -8
- package/dist/token-6GSAFR2W-TW2P7HCS.cjs.map +1 -0
- package/dist/{token-6GSAFR2W-K2BTU23I.js → token-6GSAFR2W-WGTMOPEU.js} +5 -5
- package/dist/token-6GSAFR2W-WGTMOPEU.js.map +1 -0
- package/dist/token-util-NEHG7TUY-GYFEVMWP.cjs +10 -0
- package/dist/{token-util-NEHG7TUY-7IL6JUVY.cjs.map → token-util-NEHG7TUY-GYFEVMWP.cjs.map} +1 -1
- package/dist/token-util-NEHG7TUY-TV2H7N56.js +8 -0
- package/dist/{token-util-NEHG7TUY-KSXDO2NO.js.map → token-util-NEHG7TUY-TV2H7N56.js.map} +1 -1
- package/dist/token-util-NEHG7TUY-WJZIPNNX.cjs +10 -0
- package/dist/{token-util-NEHG7TUY-HF7KBP2H.cjs.map → token-util-NEHG7TUY-WJZIPNNX.cjs.map} +1 -1
- package/dist/token-util-NEHG7TUY-XQP3QSPX.js +8 -0
- package/dist/{token-util-NEHG7TUY-TIJ3LMSH.js.map → token-util-NEHG7TUY-XQP3QSPX.js.map} +1 -1
- package/dist/tools/working-memory.d.ts +2 -2
- package/dist/tools/working-memory.d.ts.map +1 -1
- package/package.json +9 -9
- package/dist/chunk-KMQS2YEC.js.map +0 -1
- package/dist/chunk-MMUHFOCG.js.map +0 -1
- package/dist/chunk-QY6BZOPJ.js.map +0 -1
- package/dist/chunk-SG3GRV3O.cjs.map +0 -1
- package/dist/chunk-W72AYUIF.cjs.map +0 -1
- package/dist/chunk-WC4XBMZT.js.map +0 -1
- package/dist/chunk-YMNW6DEN.cjs.map +0 -1
- package/dist/chunk-ZUQPUTTO.cjs.map +0 -1
- package/dist/token-6GSAFR2W-JV3TZR4M.cjs.map +0 -1
- package/dist/token-6GSAFR2W-K2BTU23I.js.map +0 -1
- package/dist/token-6GSAFR2W-VLY2XUPA.js.map +0 -1
- package/dist/token-6GSAFR2W-YCB5SK2Z.cjs.map +0 -1
- package/dist/token-util-NEHG7TUY-7IL6JUVY.cjs +0 -10
- package/dist/token-util-NEHG7TUY-HF7KBP2H.cjs +0 -10
- package/dist/token-util-NEHG7TUY-KSXDO2NO.js +0 -8
- package/dist/token-util-NEHG7TUY-TIJ3LMSH.js +0 -8
|
@@ -0,0 +1,249 @@
|
|
|
1
|
+
> Learn how to configure message history in Mastra to store recent messages from the current conversation.
|
|
2
|
+
|
|
3
|
+
# Message History
|
|
4
|
+
|
|
5
|
+
Message history is the most basic and important form of memory. It gives the LLM a view of recent messages in the context window, enabling your agent to reference earlier exchanges and respond coherently.
|
|
6
|
+
|
|
7
|
+
You can also retrieve message history to display past conversations in your UI.
|
|
8
|
+
|
|
9
|
+
> **Note:**
|
|
10
|
+
Each message belongs to a thread (the conversation) and a resource (the user or entity it's associated with). See [Threads and resources](https://mastra.ai/docs/memory/storage#threads-and-resources) for more detail.
|
|
11
|
+
|
|
12
|
+
## Getting started
|
|
13
|
+
|
|
14
|
+
Install the Mastra memory module along with a [storage adapter](https://mastra.ai/docs/memory/storage#supported-providers) for your database. The examples below use `@mastra/libsql`, which stores data locally in a `mastra.db` file.
|
|
15
|
+
|
|
16
|
+
```bash npm2yarn
|
|
17
|
+
npm install @mastra/memory@latest @mastra/libsql@latest
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
Message history requires a storage adapter to persist conversations. Configure storage on your Mastra instance if you haven't already:
|
|
21
|
+
|
|
22
|
+
```typescript title="src/mastra/index.ts"
|
|
23
|
+
import { Mastra } from "@mastra/core";
|
|
24
|
+
import { LibSQLStore } from "@mastra/libsql";
|
|
25
|
+
|
|
26
|
+
export const mastra = new Mastra({
|
|
27
|
+
storage: new LibSQLStore({
|
|
28
|
+
id: 'mastra-storage',
|
|
29
|
+
url: "file:./mastra.db",
|
|
30
|
+
}),
|
|
31
|
+
});
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
Give your agent a `Memory`:
|
|
35
|
+
|
|
36
|
+
```typescript title="src/mastra/agents/your-agent.ts"
|
|
37
|
+
import { Memory } from "@mastra/memory";
|
|
38
|
+
import { Agent } from "@mastra/core/agent";
|
|
39
|
+
|
|
40
|
+
export const agent = new Agent({
|
|
41
|
+
id: "test-agent",
|
|
42
|
+
memory: new Memory({
|
|
43
|
+
options: {
|
|
44
|
+
lastMessages: 10,
|
|
45
|
+
},
|
|
46
|
+
}),
|
|
47
|
+
});
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
When you call the agent, messages are automatically saved to the database. You can specify a `threadId`, `resourceId`, and optional `metadata`:
|
|
51
|
+
|
|
52
|
+
**generate:**
|
|
53
|
+
|
|
54
|
+
```typescript
|
|
55
|
+
await agent.generate("Hello", {
|
|
56
|
+
memory: {
|
|
57
|
+
thread: {
|
|
58
|
+
id: "thread-123",
|
|
59
|
+
title: "Support conversation",
|
|
60
|
+
metadata: { category: "billing" },
|
|
61
|
+
},
|
|
62
|
+
resource: "user-456",
|
|
63
|
+
},
|
|
64
|
+
});
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
**stream:**
|
|
69
|
+
|
|
70
|
+
```typescript
|
|
71
|
+
await agent.stream("Hello", {
|
|
72
|
+
memory: {
|
|
73
|
+
thread: {
|
|
74
|
+
id: "thread-123",
|
|
75
|
+
title: "Support conversation",
|
|
76
|
+
metadata: { category: "billing" },
|
|
77
|
+
},
|
|
78
|
+
resource: "user-456",
|
|
79
|
+
},
|
|
80
|
+
});
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
> **Note:**
|
|
86
|
+
|
|
87
|
+
Threads and messages are created automatically when you call `agent.generate()` or `agent.stream()`, but you can also create them manually with [`createThread()`](https://mastra.ai/reference/memory/createThread) and [`saveMessages()`](https://mastra.ai/reference/memory/memory-class).
|
|
88
|
+
|
|
89
|
+
There are two ways to use this history:
|
|
90
|
+
|
|
91
|
+
- **Automatic inclusion** - Mastra automatically fetches and includes recent messages in the context window. By default, it includes the last 10 messages, keeping agents grounded in the conversation. You can adjust this number with `lastMessages`, but in most cases you don't need to think about it.
|
|
92
|
+
- [**Manual querying**](#querying) - For more control, use the `recall()` function to query threads and messages directly. This lets you choose exactly which memories are included in the context window, or fetch messages to render conversation history in your UI.
|
|
93
|
+
|
|
94
|
+
## Accessing Memory
|
|
95
|
+
|
|
96
|
+
To access memory functions for querying, cloning, or deleting threads and messages, call `getMemory()` on an agent:
|
|
97
|
+
|
|
98
|
+
```typescript
|
|
99
|
+
const agent = mastra.getAgent("weatherAgent");
|
|
100
|
+
const memory = await agent.getMemory();
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
The `Memory` instance gives you access to functions for listing threads, recalling messages, cloning conversations, and more.
|
|
104
|
+
|
|
105
|
+
## Querying
|
|
106
|
+
|
|
107
|
+
Use these methods to fetch threads and messages for displaying conversation history in your UI or for custom memory retrieval logic.
|
|
108
|
+
|
|
109
|
+
> **Note:**
|
|
110
|
+
The memory system does not enforce access control. Before running any query, verify in your application logic that the current user is authorized to access the `resourceId` being queried.
|
|
111
|
+
|
|
112
|
+
### Threads
|
|
113
|
+
|
|
114
|
+
Use [`listThreads()`](https://mastra.ai/reference/memory/listThreads) to retrieve threads for a resource:
|
|
115
|
+
|
|
116
|
+
```typescript
|
|
117
|
+
const result = await memory.listThreads({
|
|
118
|
+
filter: { resourceId: "user-123" },
|
|
119
|
+
perPage: false,
|
|
120
|
+
});
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
Paginate through threads:
|
|
124
|
+
|
|
125
|
+
```typescript
|
|
126
|
+
const result = await memory.listThreads({
|
|
127
|
+
filter: { resourceId: "user-123" },
|
|
128
|
+
page: 0,
|
|
129
|
+
perPage: 10,
|
|
130
|
+
});
|
|
131
|
+
|
|
132
|
+
console.log(result.threads); // thread objects
|
|
133
|
+
console.log(result.hasMore); // more pages available?
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
You can also filter by metadata and control sort order:
|
|
137
|
+
|
|
138
|
+
```typescript
|
|
139
|
+
const result = await memory.listThreads({
|
|
140
|
+
filter: {
|
|
141
|
+
resourceId: "user-123",
|
|
142
|
+
metadata: { status: "active" },
|
|
143
|
+
},
|
|
144
|
+
orderBy: { field: "createdAt", direction: "DESC" },
|
|
145
|
+
});
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
To fetch a single thread by ID, use [`getThreadById()`](https://mastra.ai/reference/memory/getThreadById):
|
|
149
|
+
|
|
150
|
+
```typescript
|
|
151
|
+
const thread = await memory.getThreadById({ threadId: "thread-123" });
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
### Messages
|
|
155
|
+
|
|
156
|
+
Once you have a thread, use [`recall()`](https://mastra.ai/reference/memory/recall) to retrieve its messages. It supports pagination, date filtering, and [semantic search](https://mastra.ai/docs/memory/semantic-recall).
|
|
157
|
+
|
|
158
|
+
Basic recall returns all messages from a thread:
|
|
159
|
+
|
|
160
|
+
```typescript
|
|
161
|
+
const { messages } = await memory.recall({
|
|
162
|
+
threadId: "thread-123",
|
|
163
|
+
perPage: false,
|
|
164
|
+
});
|
|
165
|
+
```
|
|
166
|
+
|
|
167
|
+
Paginate through messages:
|
|
168
|
+
|
|
169
|
+
```typescript
|
|
170
|
+
const { messages } = await memory.recall({
|
|
171
|
+
threadId: "thread-123",
|
|
172
|
+
page: 0,
|
|
173
|
+
perPage: 50,
|
|
174
|
+
});
|
|
175
|
+
```
|
|
176
|
+
|
|
177
|
+
Filter by date range:
|
|
178
|
+
|
|
179
|
+
```typescript
|
|
180
|
+
const { messages } = await memory.recall({
|
|
181
|
+
threadId: "thread-123",
|
|
182
|
+
filter: {
|
|
183
|
+
dateRange: {
|
|
184
|
+
start: new Date("2025-01-01"),
|
|
185
|
+
end: new Date("2025-06-01"),
|
|
186
|
+
},
|
|
187
|
+
},
|
|
188
|
+
});
|
|
189
|
+
```
|
|
190
|
+
|
|
191
|
+
Fetch a single message by ID:
|
|
192
|
+
|
|
193
|
+
```typescript
|
|
194
|
+
const { messages } = await memory.recall({
|
|
195
|
+
threadId: "thread-123",
|
|
196
|
+
include: [{ id: "msg-123" }],
|
|
197
|
+
});
|
|
198
|
+
```
|
|
199
|
+
|
|
200
|
+
Fetch multiple messages by ID with surrounding context:
|
|
201
|
+
|
|
202
|
+
```typescript
|
|
203
|
+
const { messages } = await memory.recall({
|
|
204
|
+
threadId: "thread-123",
|
|
205
|
+
include: [
|
|
206
|
+
{ id: "msg-123" },
|
|
207
|
+
{
|
|
208
|
+
id: "msg-456",
|
|
209
|
+
withPreviousMessages: 3,
|
|
210
|
+
withNextMessages: 1,
|
|
211
|
+
},
|
|
212
|
+
],
|
|
213
|
+
});
|
|
214
|
+
```
|
|
215
|
+
|
|
216
|
+
Search by meaning (see [Semantic recall](https://mastra.ai/docs/memory/semantic-recall) for setup):
|
|
217
|
+
|
|
218
|
+
```typescript
|
|
219
|
+
const { messages } = await memory.recall({
|
|
220
|
+
threadId: "thread-123",
|
|
221
|
+
vectorSearchString: "project deadline discussion",
|
|
222
|
+
threadConfig: {
|
|
223
|
+
semanticRecall: true,
|
|
224
|
+
},
|
|
225
|
+
});
|
|
226
|
+
```
|
|
227
|
+
|
|
228
|
+
### UI format
|
|
229
|
+
|
|
230
|
+
Message queries return `MastraDBMessage[]` format. To display messages in a frontend, you may need to convert them to a format your UI library expects. For example, [`toAISdkV5Messages`](https://mastra.ai/reference/ai-sdk/to-ai-sdk-v5-messages) converts messages to AI SDK UI format.
|
|
231
|
+
|
|
232
|
+
## Thread cloning
|
|
233
|
+
|
|
234
|
+
Thread cloning creates a copy of an existing thread with its messages. This is useful for branching conversations, creating checkpoints before a potentially destructive operation, or testing variations of a conversation.
|
|
235
|
+
|
|
236
|
+
```typescript
|
|
237
|
+
const { thread, clonedMessages } = await memory.cloneThread({
|
|
238
|
+
sourceThreadId: "thread-123",
|
|
239
|
+
title: "Branched conversation",
|
|
240
|
+
});
|
|
241
|
+
```
|
|
242
|
+
|
|
243
|
+
You can filter which messages get cloned (by count or date range), specify custom thread IDs, and use utility methods to inspect clone relationships.
|
|
244
|
+
|
|
245
|
+
See [`cloneThread()`](https://mastra.ai/reference/memory/cloneThread) and [clone utilities](https://mastra.ai/reference/memory/clone-utilities) for the full API.
|
|
246
|
+
|
|
247
|
+
## Deleting messages
|
|
248
|
+
|
|
249
|
+
To remove messages from a thread, use [`deleteMessages()`](https://mastra.ai/reference/memory/deleteMessages). You can delete by message ID or clear all messages from a thread.
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
# Working Memory
|
|
4
4
|
|
|
5
|
-
While [message history](https://mastra.ai/docs/
|
|
5
|
+
While [message history](https://mastra.ai/docs/memory/message-history) and [semantic recall](./semantic-recall) help agents remember conversations, working memory allows them to maintain persistent information about users across interactions.
|
|
6
6
|
|
|
7
7
|
Think of it as the agent's active thoughts or scratchpad – the key information they keep available about the user or task. It's similar to how a person would naturally remember someone's name, preferences, or important details during a conversation.
|
|
8
8
|
|
|
@@ -383,6 +383,27 @@ await memory.updateWorkingMemory({
|
|
|
383
383
|
});
|
|
384
384
|
```
|
|
385
385
|
|
|
386
|
+
## Read-Only Working Memory
|
|
387
|
+
|
|
388
|
+
In some scenarios, you may want an agent to have access to working memory data without the ability to modify it. This is useful for:
|
|
389
|
+
|
|
390
|
+
- **Routing agents** that need context but shouldn't update user profiles
|
|
391
|
+
- **Sub agents** in a multi-agent system that should reference but not own the memory
|
|
392
|
+
|
|
393
|
+
To enable read-only mode, set `readOnly: true` in the memory options:
|
|
394
|
+
|
|
395
|
+
```typescript
|
|
396
|
+
const response = await agent.generate("What do you know about me?", {
|
|
397
|
+
memory: {
|
|
398
|
+
thread: "conversation-123",
|
|
399
|
+
resource: "user-alice-456",
|
|
400
|
+
options: {
|
|
401
|
+
readOnly: true, // Working memory is provided but cannot be updated
|
|
402
|
+
},
|
|
403
|
+
},
|
|
404
|
+
});
|
|
405
|
+
```
|
|
406
|
+
|
|
386
407
|
## Examples
|
|
387
408
|
|
|
388
409
|
- [Working memory with template](https://github.com/mastra-ai/mastra/tree/main/examples/memory-with-template)
|
|
@@ -37,9 +37,32 @@ const agent = new Agent({
|
|
|
37
37
|
});
|
|
38
38
|
```
|
|
39
39
|
|
|
40
|
+
## Using the recall() Method
|
|
41
|
+
|
|
42
|
+
While `listMessages` retrieves messages by thread ID with basic pagination, [`recall()`](https://mastra.ai/reference/memory/recall) adds support for **semantic search**. When you need to find messages by meaning rather than just recency, use `recall()` with a `vectorSearchString`:
|
|
43
|
+
|
|
44
|
+
```typescript
|
|
45
|
+
const memory = await agent.getMemory();
|
|
46
|
+
|
|
47
|
+
// Basic recall - similar to listMessages
|
|
48
|
+
const { messages } = await memory!.recall({
|
|
49
|
+
threadId: "thread-123",
|
|
50
|
+
perPage: 50,
|
|
51
|
+
});
|
|
52
|
+
|
|
53
|
+
// Semantic recall - find messages by meaning
|
|
54
|
+
const { messages: relevantMessages } = await memory!.recall({
|
|
55
|
+
threadId: "thread-123",
|
|
56
|
+
vectorSearchString: "What did we discuss about the project deadline?",
|
|
57
|
+
threadConfig: {
|
|
58
|
+
semanticRecall: true,
|
|
59
|
+
},
|
|
60
|
+
});
|
|
61
|
+
```
|
|
62
|
+
|
|
40
63
|
## Storage configuration
|
|
41
64
|
|
|
42
|
-
Semantic recall relies on a [storage and vector db](https://mastra.ai/reference/
|
|
65
|
+
Semantic recall relies on a [storage and vector db](https://mastra.ai/reference/memory/memory-class) to store messages and their embeddings.
|
|
43
66
|
|
|
44
67
|
```ts {8-16}
|
|
45
68
|
import { Memory } from "@mastra/memory";
|
|
@@ -64,23 +87,23 @@ const agent = new Agent({
|
|
|
64
87
|
|
|
65
88
|
Each vector store page below includes installation instructions, configuration parameters, and usage examples:
|
|
66
89
|
|
|
67
|
-
- [Astra](https://mastra.ai/reference/
|
|
68
|
-
- [Chroma](https://mastra.ai/reference/
|
|
69
|
-
- [Cloudflare Vectorize](https://mastra.ai/reference/
|
|
70
|
-
- [Convex](https://mastra.ai/reference/
|
|
71
|
-
- [Couchbase](https://mastra.ai/reference/
|
|
72
|
-
- [DuckDB](https://mastra.ai/reference/
|
|
73
|
-
- [Elasticsearch](https://mastra.ai/reference/
|
|
74
|
-
- [LanceDB](https://mastra.ai/reference/
|
|
75
|
-
- [libSQL](https://mastra.ai/reference/
|
|
76
|
-
- [MongoDB](https://mastra.ai/reference/
|
|
77
|
-
- [OpenSearch](https://mastra.ai/reference/
|
|
78
|
-
- [Pinecone](https://mastra.ai/reference/
|
|
79
|
-
- [PostgreSQL](https://mastra.ai/reference/
|
|
80
|
-
- [Qdrant](https://mastra.ai/reference/
|
|
81
|
-
- [S3 Vectors](https://mastra.ai/reference/
|
|
82
|
-
- [Turbopuffer](https://mastra.ai/reference/
|
|
83
|
-
- [Upstash](https://mastra.ai/reference/
|
|
90
|
+
- [Astra](https://mastra.ai/reference/vectors/astra)
|
|
91
|
+
- [Chroma](https://mastra.ai/reference/vectors/chroma)
|
|
92
|
+
- [Cloudflare Vectorize](https://mastra.ai/reference/vectors/vectorize)
|
|
93
|
+
- [Convex](https://mastra.ai/reference/vectors/convex)
|
|
94
|
+
- [Couchbase](https://mastra.ai/reference/vectors/couchbase)
|
|
95
|
+
- [DuckDB](https://mastra.ai/reference/vectors/duckdb)
|
|
96
|
+
- [Elasticsearch](https://mastra.ai/reference/vectors/elasticsearch)
|
|
97
|
+
- [LanceDB](https://mastra.ai/reference/vectors/lance)
|
|
98
|
+
- [libSQL](https://mastra.ai/reference/vectors/libsql)
|
|
99
|
+
- [MongoDB](https://mastra.ai/reference/vectors/mongodb)
|
|
100
|
+
- [OpenSearch](https://mastra.ai/reference/vectors/opensearch)
|
|
101
|
+
- [Pinecone](https://mastra.ai/reference/vectors/pinecone)
|
|
102
|
+
- [PostgreSQL](https://mastra.ai/reference/vectors/pg)
|
|
103
|
+
- [Qdrant](https://mastra.ai/reference/vectors/qdrant)
|
|
104
|
+
- [S3 Vectors](https://mastra.ai/reference/vectors/s3vectors)
|
|
105
|
+
- [Turbopuffer](https://mastra.ai/reference/vectors/turbopuffer)
|
|
106
|
+
- [Upstash](https://mastra.ai/reference/vectors/upstash)
|
|
84
107
|
|
|
85
108
|
## Recall configuration
|
|
86
109
|
|
|
@@ -106,7 +129,7 @@ const agent = new Agent({
|
|
|
106
129
|
|
|
107
130
|
## Embedder configuration
|
|
108
131
|
|
|
109
|
-
Semantic recall relies on an [embedding model](https://mastra.ai/reference/
|
|
132
|
+
Semantic recall relies on an [embedding model](https://mastra.ai/reference/memory/memory-class) to convert messages into embeddings. Mastra supports embedding models through the model router using `provider/model` strings, or you can use any [embedding model](https://sdk.vercel.ai/docs/ai-sdk-core/embeddings) compatible with the AI SDK.
|
|
110
133
|
|
|
111
134
|
#### Using the Model Router (Recommended)
|
|
112
135
|
|
|
@@ -127,7 +150,7 @@ const agent = new Agent({
|
|
|
127
150
|
Supported embedding models:
|
|
128
151
|
|
|
129
152
|
- **OpenAI**: `text-embedding-3-small`, `text-embedding-3-large`, `text-embedding-ada-002`
|
|
130
|
-
- **Google**: `gemini-embedding-001
|
|
153
|
+
- **Google**: `gemini-embedding-001`
|
|
131
154
|
|
|
132
155
|
The model router automatically handles API key detection from environment variables (`OPENAI_API_KEY`, `GOOGLE_GENERATIVE_AI_API_KEY`).
|
|
133
156
|
|
|
@@ -152,7 +175,7 @@ const agent = new Agent({
|
|
|
152
175
|
To use FastEmbed (a local embedding model), install `@mastra/fastembed`:
|
|
153
176
|
|
|
154
177
|
```bash npm2yarn
|
|
155
|
-
npm install @mastra/fastembed@
|
|
178
|
+
npm install @mastra/fastembed@latest
|
|
156
179
|
```
|
|
157
180
|
|
|
158
181
|
Then configure it in your memory:
|
|
@@ -205,7 +228,7 @@ const agent = new Agent({
|
|
|
205
228
|
});
|
|
206
229
|
```
|
|
207
230
|
|
|
208
|
-
For detailed information about index configuration options and performance tuning, see the [PgVector configuration guide](https://mastra.ai/reference/
|
|
231
|
+
For detailed information about index configuration options and performance tuning, see the [PgVector configuration guide](https://mastra.ai/reference/vectors/pg#index-configuration-guide).
|
|
209
232
|
|
|
210
233
|
## Disabling
|
|
211
234
|
|
|
@@ -6,7 +6,7 @@ Memory processors transform and filter messages as they pass through an agent wi
|
|
|
6
6
|
|
|
7
7
|
When memory is enabled on an agent, Mastra adds memory processors to the agent's processor pipeline. These processors retrieve message history, working memory, and semantically relevant messages, then persist new messages after the model responds.
|
|
8
8
|
|
|
9
|
-
Memory processors are [processors](https://mastra.ai/docs/
|
|
9
|
+
Memory processors are [processors](https://mastra.ai/docs/agents/processors) that operate specifically on memory-related messages and state.
|
|
10
10
|
|
|
11
11
|
## Built-in Memory Processors
|
|
12
12
|
|
|
@@ -311,8 +311,8 @@ Both scenarios are safe - guardrails prevent inappropriate content from being pe
|
|
|
311
311
|
|
|
312
312
|
## Related documentation
|
|
313
313
|
|
|
314
|
-
- [Processors](https://mastra.ai/docs/
|
|
315
|
-
- [Guardrails](https://mastra.ai/docs/
|
|
316
|
-
- [Memory Overview](https://mastra.ai/docs/
|
|
314
|
+
- [Processors](https://mastra.ai/docs/agents/processors) - General processor concepts and custom processor creation
|
|
315
|
+
- [Guardrails](https://mastra.ai/docs/agents/guardrails) - Security and validation processors
|
|
316
|
+
- [Memory Overview](https://mastra.ai/docs/memory/overview) - Memory types and configuration
|
|
317
317
|
|
|
318
318
|
When creating custom processors avoid mutating the input `messages` array or its objects directly.
|
|
@@ -221,10 +221,10 @@ async function manageClones() {
|
|
|
221
221
|
|
|
222
222
|
### Related
|
|
223
223
|
|
|
224
|
-
- [cloneThread](https://mastra.ai/reference/
|
|
225
|
-
- [Memory Class Reference](https://mastra.ai/reference/
|
|
226
|
-
- [getThreadById](https://mastra.ai/reference/
|
|
227
|
-
- [listThreads](https://mastra.ai/reference/
|
|
224
|
+
- [cloneThread](https://mastra.ai/reference/memory/cloneThread)
|
|
225
|
+
- [Memory Class Reference](https://mastra.ai/reference/memory/memory-class)
|
|
226
|
+
- [getThreadById](https://mastra.ai/reference/memory/getThreadById)
|
|
227
|
+
- [listThreads](https://mastra.ai/reference/memory/listThreads)
|
|
228
228
|
|
|
229
229
|
---
|
|
230
230
|
|
|
@@ -331,11 +331,11 @@ const results = await memory.recall({
|
|
|
331
331
|
|
|
332
332
|
### Related
|
|
333
333
|
|
|
334
|
-
- [Memory Class Reference](https://mastra.ai/reference/
|
|
335
|
-
- [createThread](https://mastra.ai/reference/
|
|
336
|
-
- [Clone Utility Methods](https://mastra.ai/reference/
|
|
337
|
-
- [recall](https://mastra.ai/reference/
|
|
338
|
-
- [Semantic Recall](https://mastra.ai/docs/
|
|
334
|
+
- [Memory Class Reference](https://mastra.ai/reference/memory/memory-class)
|
|
335
|
+
- [createThread](https://mastra.ai/reference/memory/createThread)
|
|
336
|
+
- [Clone Utility Methods](https://mastra.ai/reference/memory/clone-utilities)
|
|
337
|
+
- [recall](https://mastra.ai/reference/memory/recall)
|
|
338
|
+
- [Semantic Recall](https://mastra.ai/docs/memory/semantic-recall)
|
|
339
339
|
|
|
340
340
|
---
|
|
341
341
|
|
|
@@ -384,11 +384,11 @@ console.log(response.text);
|
|
|
384
384
|
|
|
385
385
|
### Related
|
|
386
386
|
|
|
387
|
-
- [Memory Class Reference](https://mastra.ai/reference/
|
|
388
|
-
- [Getting Started with Memory](https://mastra.ai/docs/
|
|
389
|
-
- [getThreadById](https://mastra.ai/reference/
|
|
390
|
-
- [listThreads](https://mastra.ai/reference/
|
|
391
|
-
- [recall](https://mastra.ai/reference/
|
|
387
|
+
- [Memory Class Reference](https://mastra.ai/reference/memory/memory-class)
|
|
388
|
+
- [Getting Started with Memory](https://mastra.ai/docs/memory/overview) (Covers threads concept)
|
|
389
|
+
- [getThreadById](https://mastra.ai/reference/memory/getThreadById)
|
|
390
|
+
- [listThreads](https://mastra.ai/reference/memory/listThreads)
|
|
391
|
+
- [recall](https://mastra.ai/reference/memory/recall)
|
|
392
392
|
|
|
393
393
|
---
|
|
394
394
|
|
|
@@ -410,10 +410,10 @@ await memory?.getThreadById({ threadId: "thread-123" });
|
|
|
410
410
|
|
|
411
411
|
### Related
|
|
412
412
|
|
|
413
|
-
- [Memory Class Reference](https://mastra.ai/reference/
|
|
414
|
-
- [Getting Started with Memory](https://mastra.ai/docs/
|
|
415
|
-
- [createThread](https://mastra.ai/reference/
|
|
416
|
-
- [listThreads](https://mastra.ai/reference/
|
|
413
|
+
- [Memory Class Reference](https://mastra.ai/reference/memory/memory-class)
|
|
414
|
+
- [Getting Started with Memory](https://mastra.ai/docs/memory/overview) (Covers threads concept)
|
|
415
|
+
- [createThread](https://mastra.ai/reference/memory/createThread)
|
|
416
|
+
- [listThreads](https://mastra.ai/reference/memory/listThreads)
|
|
417
417
|
|
|
418
418
|
---
|
|
419
419
|
|
|
@@ -552,10 +552,10 @@ await memory.listThreads({
|
|
|
552
552
|
|
|
553
553
|
## Related
|
|
554
554
|
|
|
555
|
-
- [Memory Class Reference](https://mastra.ai/reference/
|
|
556
|
-
- [Getting Started with Memory](https://mastra.ai/docs/
|
|
557
|
-
- [createThread](https://mastra.ai/reference/
|
|
558
|
-
- [getThreadById](https://mastra.ai/reference/
|
|
555
|
+
- [Memory Class Reference](https://mastra.ai/reference/memory/memory-class)
|
|
556
|
+
- [Getting Started with Memory](https://mastra.ai/docs/memory/overview)
|
|
557
|
+
- [createThread](https://mastra.ai/reference/memory/createThread)
|
|
558
|
+
- [getThreadById](https://mastra.ai/reference/memory/getThreadById)
|
|
559
559
|
|
|
560
560
|
---
|
|
561
561
|
|
|
@@ -674,14 +674,14 @@ export const agent = new Agent({
|
|
|
674
674
|
|
|
675
675
|
### Related
|
|
676
676
|
|
|
677
|
-
- [Getting Started with Memory](https://mastra.ai/docs/
|
|
678
|
-
- [Semantic Recall](https://mastra.ai/docs/
|
|
679
|
-
- [Working Memory](https://mastra.ai/docs/
|
|
680
|
-
- [Memory Processors](https://mastra.ai/docs/
|
|
681
|
-
- [createThread](https://mastra.ai/reference/
|
|
682
|
-
- [recall](https://mastra.ai/reference/
|
|
683
|
-
- [getThreadById](https://mastra.ai/reference/
|
|
684
|
-
- [listThreads](https://mastra.ai/reference/
|
|
685
|
-
- [deleteMessages](https://mastra.ai/reference/
|
|
686
|
-
- [cloneThread](https://mastra.ai/reference/
|
|
687
|
-
- [Clone Utility Methods](https://mastra.ai/reference/
|
|
677
|
+
- [Getting Started with Memory](https://mastra.ai/docs/memory/overview)
|
|
678
|
+
- [Semantic Recall](https://mastra.ai/docs/memory/semantic-recall)
|
|
679
|
+
- [Working Memory](https://mastra.ai/docs/memory/working-memory)
|
|
680
|
+
- [Memory Processors](https://mastra.ai/docs/memory/memory-processors)
|
|
681
|
+
- [createThread](https://mastra.ai/reference/memory/createThread)
|
|
682
|
+
- [recall](https://mastra.ai/reference/memory/recall)
|
|
683
|
+
- [getThreadById](https://mastra.ai/reference/memory/getThreadById)
|
|
684
|
+
- [listThreads](https://mastra.ai/reference/memory/listThreads)
|
|
685
|
+
- [deleteMessages](https://mastra.ai/reference/memory/deleteMessages)
|
|
686
|
+
- [cloneThread](https://mastra.ai/reference/memory/cloneThread)
|
|
687
|
+
- [Clone Utility Methods](https://mastra.ai/reference/memory/clone-utilities)
|