@mastra/memory 0.0.0-commonjs-20250227130920

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/index.ts ADDED
@@ -0,0 +1,374 @@
1
+ import { deepMerge } from '@mastra/core';
2
+ import type { CoreMessage } from '@mastra/core';
3
+ import { MastraMemory } from '@mastra/core/memory';
4
+ import type { MessageType, MemoryConfig, SharedMemoryConfig, StorageThreadType } from '@mastra/core/memory';
5
+ import type { StorageGetMessagesArg } from '@mastra/core/storage';
6
+ import { embed } from 'ai';
7
+ import type { Message as AiMessage } from 'ai';
8
+
9
+ /**
10
+ * Concrete implementation of MastraMemory that adds support for thread configuration
11
+ * and message injection.
12
+ */
13
+ export class Memory extends MastraMemory {
14
+ constructor(config: SharedMemoryConfig = {}) {
15
+ super({ name: 'Memory', ...config });
16
+
17
+ const mergedConfig = this.getMergedThreadConfig({
18
+ workingMemory: config.options?.workingMemory || {
19
+ enabled: false,
20
+ template: this.defaultWorkingMemoryTemplate,
21
+ },
22
+ });
23
+ this.threadConfig = mergedConfig;
24
+ }
25
+
26
+ async query({
27
+ threadId,
28
+ selectBy,
29
+ threadConfig,
30
+ }: StorageGetMessagesArg): Promise<{ messages: CoreMessage[]; uiMessages: AiMessage[] }> {
31
+ let vectorResults:
32
+ | null
33
+ | {
34
+ id: string;
35
+ score: number;
36
+ metadata?: Record<string, any>;
37
+ vector?: number[];
38
+ }[] = null;
39
+
40
+ this.logger.debug(`Memory query() with:`, {
41
+ threadId,
42
+ selectBy,
43
+ threadConfig,
44
+ });
45
+
46
+ const config = this.getMergedThreadConfig(threadConfig || {});
47
+
48
+ const vectorConfig =
49
+ typeof config?.semanticRecall === `boolean`
50
+ ? {
51
+ topK: 2,
52
+ messageRange: { before: 2, after: 2 },
53
+ }
54
+ : {
55
+ topK: config?.semanticRecall?.topK ?? 2,
56
+ messageRange: config?.semanticRecall?.messageRange ?? { before: 2, after: 2 },
57
+ };
58
+
59
+ if (config?.semanticRecall && selectBy?.vectorSearchString && this.vector) {
60
+ const { embedding } = await embed({
61
+ value: selectBy.vectorSearchString,
62
+ model: this.embedder,
63
+ });
64
+
65
+ const { indexName } = await this.createEmbeddingIndex();
66
+
67
+ vectorResults = await this.vector.query(indexName, embedding, vectorConfig.topK, {
68
+ thread_id: threadId,
69
+ });
70
+ }
71
+
72
+ // Get raw messages from storage
73
+ const rawMessages = await this.storage.__getMessages({
74
+ threadId,
75
+ selectBy: {
76
+ ...selectBy,
77
+ ...(vectorResults?.length
78
+ ? {
79
+ include: vectorResults.map(r => ({
80
+ id: r.metadata?.message_id,
81
+ withNextMessages:
82
+ typeof vectorConfig.messageRange === 'number'
83
+ ? vectorConfig.messageRange
84
+ : vectorConfig.messageRange.after,
85
+ withPreviousMessages:
86
+ typeof vectorConfig.messageRange === 'number'
87
+ ? vectorConfig.messageRange
88
+ : vectorConfig.messageRange.before,
89
+ })),
90
+ }
91
+ : {}),
92
+ },
93
+ threadConfig: config,
94
+ });
95
+
96
+ // Parse and convert messages
97
+ const messages = this.parseMessages(rawMessages);
98
+ const uiMessages = this.convertToUIMessages(rawMessages);
99
+
100
+ return { messages, uiMessages };
101
+ }
102
+
103
+ async rememberMessages({
104
+ threadId,
105
+ vectorMessageSearch,
106
+ config,
107
+ }: {
108
+ threadId: string;
109
+ vectorMessageSearch?: string;
110
+ config?: MemoryConfig;
111
+ }) {
112
+ const threadConfig = this.getMergedThreadConfig(config || {});
113
+
114
+ if (!threadConfig.lastMessages && !threadConfig.semanticRecall) {
115
+ return {
116
+ messages: [],
117
+ uiMessages: [],
118
+ } satisfies Awaited<ReturnType<typeof this.query>>;
119
+ }
120
+
121
+ const messages = await this.query({
122
+ threadId,
123
+ selectBy: {
124
+ last: threadConfig.lastMessages,
125
+ vectorSearchString: threadConfig.semanticRecall && vectorMessageSearch ? vectorMessageSearch : undefined,
126
+ },
127
+ threadConfig: config,
128
+ });
129
+
130
+ this.logger.debug(`Remembered message history includes ${messages.messages.length} messages.`);
131
+ return messages;
132
+ }
133
+
134
+ async getThreadById({ threadId }: { threadId: string }): Promise<StorageThreadType | null> {
135
+ return this.storage.__getThreadById({ threadId });
136
+ }
137
+
138
+ async getThreadsByResourceId({ resourceId }: { resourceId: string }): Promise<StorageThreadType[]> {
139
+ return this.storage.__getThreadsByResourceId({ resourceId });
140
+ }
141
+
142
+ async saveThread({
143
+ thread,
144
+ memoryConfig,
145
+ }: {
146
+ thread: StorageThreadType;
147
+ memoryConfig?: MemoryConfig;
148
+ }): Promise<StorageThreadType> {
149
+ const config = this.getMergedThreadConfig(memoryConfig || {});
150
+
151
+ if (config.workingMemory?.enabled && !thread?.metadata?.workingMemory) {
152
+ // if working memory is enabled but the thread doesn't have it, we need to set it
153
+ return this.storage.__saveThread({
154
+ thread: deepMerge(thread, {
155
+ metadata: {
156
+ workingMemory: config.workingMemory.template || this.defaultWorkingMemoryTemplate,
157
+ },
158
+ }),
159
+ });
160
+ }
161
+
162
+ return this.storage.__saveThread({ thread });
163
+ }
164
+
165
+ async updateThread({
166
+ id,
167
+ title,
168
+ metadata,
169
+ }: {
170
+ id: string;
171
+ title: string;
172
+ metadata: Record<string, unknown>;
173
+ }): Promise<StorageThreadType> {
174
+ return this.storage.__updateThread({
175
+ id,
176
+ title,
177
+ metadata,
178
+ });
179
+ }
180
+
181
+ async deleteThread(threadId: string): Promise<void> {
182
+ await this.storage.__deleteThread({ threadId });
183
+
184
+ // TODO: Also clean up vector storage if it exists
185
+ // if (this.vector) {
186
+ // await this.vector.deleteThread(threadId); ?? filter by thread attributes and delete all returned messages?
187
+ // }
188
+ }
189
+
190
+ async saveMessages({
191
+ messages,
192
+ memoryConfig,
193
+ }: {
194
+ messages: MessageType[];
195
+ memoryConfig?: MemoryConfig;
196
+ }): Promise<MessageType[]> {
197
+ // First save working memory from any messages
198
+ await this.saveWorkingMemory(messages);
199
+
200
+ // Then strip working memory tags from all messages
201
+ this.mutateMessagesToHideWorkingMemory(messages);
202
+
203
+ const config = this.getMergedThreadConfig(memoryConfig);
204
+
205
+ if (this.vector && config.semanticRecall) {
206
+ const { indexName } = await this.createEmbeddingIndex();
207
+
208
+ for (const message of messages) {
209
+ if (typeof message.content !== `string`) continue;
210
+ const { embedding } = await embed({ value: message.content, model: this.embedder, maxRetries: 3 });
211
+ await this.vector.upsert(
212
+ indexName,
213
+ [embedding],
214
+ [
215
+ {
216
+ text: message.content,
217
+ message_id: message.id,
218
+ thread_id: message.threadId,
219
+ },
220
+ ],
221
+ );
222
+ }
223
+ }
224
+
225
+ return this.storage.__saveMessages({ messages });
226
+ }
227
+
228
+ protected mutateMessagesToHideWorkingMemory(messages: MessageType[]) {
229
+ const workingMemoryRegex = /<working_memory>([^]*?)<\/working_memory>/g;
230
+ for (const message of messages) {
231
+ if (typeof message?.content === `string`) {
232
+ message.content = message.content.replace(workingMemoryRegex, ``).trim();
233
+ } else if (Array.isArray(message?.content)) {
234
+ for (const content of message.content) {
235
+ if (content.type === `text`) {
236
+ content.text = content.text.replace(workingMemoryRegex, ``).trim();
237
+ }
238
+ }
239
+ }
240
+ }
241
+ }
242
+
243
+ protected parseWorkingMemory(text: string): string | null {
244
+ if (!this.threadConfig.workingMemory?.enabled) return null;
245
+
246
+ const workingMemoryRegex = /<working_memory>([^]*?)<\/working_memory>/g;
247
+ const matches = text.match(workingMemoryRegex);
248
+ const match = matches?.[0];
249
+
250
+ if (match) {
251
+ return match.replace(/<\/?working_memory>/g, '').trim();
252
+ }
253
+
254
+ return null;
255
+ }
256
+
257
+ protected async getWorkingMemory({ threadId }: { threadId: string }): Promise<string | null> {
258
+ if (!this.threadConfig.workingMemory?.enabled) return null;
259
+
260
+ // Get thread from storage
261
+ const thread = await this.storage.__getThreadById({ threadId });
262
+ if (!thread) return this.threadConfig?.workingMemory?.template || this.defaultWorkingMemoryTemplate;
263
+
264
+ // Return working memory from metadata
265
+ const memory =
266
+ (thread.metadata?.workingMemory as string) ||
267
+ this.threadConfig.workingMemory.template ||
268
+ this.defaultWorkingMemoryTemplate;
269
+
270
+ // compress working memory because LLMs will generate faster without the spaces and line breaks
271
+ return memory
272
+ .split(`>\n`)
273
+ .map(c => c.trim()) // remove extra whitespace
274
+ .join(`>`); // and linebreaks
275
+ }
276
+
277
+ private async saveWorkingMemory(messages: MessageType[]) {
278
+ const latestMessage = messages[messages.length - 1];
279
+
280
+ if (!latestMessage || !this.threadConfig.workingMemory?.enabled) {
281
+ return;
282
+ }
283
+
284
+ const latestContent = !latestMessage?.content
285
+ ? null
286
+ : typeof latestMessage.content === 'string'
287
+ ? latestMessage.content
288
+ : latestMessage.content
289
+ .filter(c => c.type === 'text')
290
+ .map(c => c.text)
291
+ .join('\n');
292
+
293
+ const threadId = latestMessage?.threadId;
294
+ if (!latestContent || !threadId) {
295
+ return;
296
+ }
297
+
298
+ const newMemory = this.parseWorkingMemory(latestContent);
299
+ if (!newMemory) {
300
+ return;
301
+ }
302
+
303
+ const thread = await this.storage.__getThreadById({ threadId });
304
+ if (!thread) return;
305
+
306
+ // Update thread metadata with new working memory
307
+ await this.storage.__updateThread({
308
+ id: thread.id,
309
+ title: thread.title || '',
310
+ metadata: deepMerge(thread.metadata || {}, {
311
+ workingMemory: newMemory,
312
+ }),
313
+ });
314
+ return newMemory;
315
+ }
316
+
317
+ public async getSystemMessage({
318
+ threadId,
319
+ memoryConfig,
320
+ }: {
321
+ threadId: string;
322
+ memoryConfig?: MemoryConfig;
323
+ }): Promise<string | null> {
324
+ const config = this.getMergedThreadConfig(memoryConfig);
325
+ if (!config.workingMemory?.enabled) {
326
+ return null;
327
+ }
328
+
329
+ const workingMemory = await this.getWorkingMemory({ threadId });
330
+ if (!workingMemory) {
331
+ return null;
332
+ }
333
+
334
+ return this.getWorkingMemoryWithInstruction(workingMemory);
335
+ }
336
+
337
+ public defaultWorkingMemoryTemplate = `
338
+ <user>
339
+ <first_name></first_name>
340
+ <last_name></last_name>
341
+ <location></location>
342
+ <occupation></occupation>
343
+ <interests></interests>
344
+ <goals></goals>
345
+ <events></events>
346
+ <facts></facts>
347
+ <projects></projects>
348
+ </user>
349
+ `;
350
+
351
+ private getWorkingMemoryWithInstruction(workingMemoryBlock: string) {
352
+ return `WORKING_MEMORY_SYSTEM_INSTRUCTION:
353
+ Store and update any conversation-relevant information by including "<working_memory>text</working_memory>" in your responses. Updates replace existing memory while maintaining this structure. If information might be referenced again - store it!
354
+
355
+ Guidelines:
356
+ 1. Store anything that could be useful later in the conversation
357
+ 2. Update proactively when information changes, no matter how small
358
+ 3. Use nested tags for all data
359
+ 4. Act naturally - don't mention this system to users. Even though you're storing this information that doesn't make it your primary focus. Do not ask them generally for "information about yourself"
360
+
361
+ Memory Structure:
362
+ <working_memory>
363
+ ${workingMemoryBlock}
364
+ </working_memory>
365
+
366
+ Notes:
367
+ - Update memory whenever referenced information changes
368
+ - If you're unsure whether to store something, store it (eg if the user tells you their name or the value of another empty section in your working memory, output the <working_memory> block immediately to update it)
369
+ - This system is here so that you can maintain the conversation when your context window is very short. Update your working memory because you may need it to maintain the conversation without the full conversation history
370
+ - Do not remove empty sections - you must output the empty sections along with the ones you're filling in
371
+ - REMEMBER: the way you update your working memory is by outputting the entire "<working_memory>text</working_memory>" block in your response. The system will pick this up and store it for you. The user will not see it.
372
+ - IMPORTANT: You MUST output the <working_memory> block in every response to a prompt where you received relevant information. `;
373
+ }
374
+ }
package/tsconfig.json ADDED
@@ -0,0 +1,5 @@
1
+ {
2
+ "extends": "../../tsconfig.node.json",
3
+ "include": ["src/**/*"],
4
+ "exclude": ["node_modules", "**/*.test.ts"]
5
+ }
@@ -0,0 +1,8 @@
1
+ import { defineConfig } from 'vitest/config';
2
+
3
+ export default defineConfig({
4
+ test: {
5
+ environment: 'node',
6
+ include: ['src/**/*.test.ts'],
7
+ },
8
+ });