@mastra/memory 0.12.2-alpha.2 → 0.12.3-alpha.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +2 -2
- package/CHANGELOG.md +48 -0
- package/dist/index.cjs +5 -3
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +6 -4
- package/dist/index.js.map +1 -1
- package/integration-tests-v5/.env.test +5 -0
- package/integration-tests-v5/CHANGELOG.md +25 -0
- package/integration-tests-v5/docker-compose.yml +39 -0
- package/integration-tests-v5/node_modules/.bin/next +21 -0
- package/integration-tests-v5/node_modules/.bin/tsc +21 -0
- package/integration-tests-v5/node_modules/.bin/tsserver +21 -0
- package/integration-tests-v5/node_modules/.bin/vitest +21 -0
- package/integration-tests-v5/package.json +43 -0
- package/integration-tests-v5/src/agent-memory.test.ts +621 -0
- package/integration-tests-v5/src/mastra/agents/weather.ts +75 -0
- package/integration-tests-v5/src/mastra/index.ts +13 -0
- package/integration-tests-v5/src/mastra/tools/weather.ts +24 -0
- package/integration-tests-v5/src/processors.test.ts +600 -0
- package/integration-tests-v5/src/streaming-memory.test.ts +367 -0
- package/integration-tests-v5/src/test-utils.ts +146 -0
- package/integration-tests-v5/src/working-memory.test.ts +1064 -0
- package/integration-tests-v5/tsconfig.json +13 -0
- package/integration-tests-v5/vitest.config.ts +18 -0
- package/package.json +7 -6
- package/src/index.ts +4 -2
|
@@ -0,0 +1,621 @@
|
|
|
1
|
+
import { randomUUID } from 'node:crypto';
|
|
2
|
+
import { google } from '@ai-sdk/google';
|
|
3
|
+
import { openai } from '@ai-sdk/openai';
|
|
4
|
+
import { Mastra } from '@mastra/core';
|
|
5
|
+
import type { CoreMessage } from '@mastra/core';
|
|
6
|
+
import { Agent } from '@mastra/core/agent';
|
|
7
|
+
import type { UIMessageWithMetadata } from '@mastra/core/agent';
|
|
8
|
+
import { RuntimeContext } from '@mastra/core/runtime-context';
|
|
9
|
+
import { MockStore } from '@mastra/core/storage';
|
|
10
|
+
import { fastembed } from '@mastra/fastembed';
|
|
11
|
+
import { LibSQLStore, LibSQLVector } from '@mastra/libsql';
|
|
12
|
+
import { Memory } from '@mastra/memory';
|
|
13
|
+
import { describe, expect, it } from 'vitest';
|
|
14
|
+
import { z } from 'zod';
|
|
15
|
+
import { memoryProcessorAgent, weatherAgent } from './mastra/agents/weather';
|
|
16
|
+
import { weatherTool, weatherToolCity } from './mastra/tools/weather';
|
|
17
|
+
|
|
18
|
+
describe('Agent Memory Tests', () => {
|
|
19
|
+
const dbFile = 'file:mastra-agent.db';
|
|
20
|
+
|
|
21
|
+
it(`inherits storage from Mastra instance`, async () => {
|
|
22
|
+
const agent = new Agent({
|
|
23
|
+
name: 'test',
|
|
24
|
+
instructions: '',
|
|
25
|
+
model: openai('gpt-4o-mini'),
|
|
26
|
+
memory: new Memory({
|
|
27
|
+
options: {
|
|
28
|
+
lastMessages: 10,
|
|
29
|
+
},
|
|
30
|
+
}),
|
|
31
|
+
});
|
|
32
|
+
const mastra = new Mastra({
|
|
33
|
+
agents: {
|
|
34
|
+
agent,
|
|
35
|
+
},
|
|
36
|
+
storage: new LibSQLStore({
|
|
37
|
+
url: dbFile,
|
|
38
|
+
}),
|
|
39
|
+
});
|
|
40
|
+
const agentMemory = (await mastra.getAgent('agent').getMemory())!;
|
|
41
|
+
await expect(agentMemory.query({ threadId: '1' })).resolves.not.toThrow();
|
|
42
|
+
const agentMemory2 = (await agent.getMemory())!;
|
|
43
|
+
await expect(agentMemory2.query({ threadId: '1' })).resolves.not.toThrow();
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
it('should inherit storage from Mastra instance when workingMemory is enabled', async () => {
|
|
47
|
+
const mastra = new Mastra({
|
|
48
|
+
storage: new LibSQLStore({
|
|
49
|
+
url: dbFile,
|
|
50
|
+
}),
|
|
51
|
+
agents: {
|
|
52
|
+
testAgent: new Agent({
|
|
53
|
+
name: 'Test Agent',
|
|
54
|
+
instructions: 'You are a test agent',
|
|
55
|
+
model: openai('gpt-4o-mini'),
|
|
56
|
+
memory: new Memory({
|
|
57
|
+
options: {
|
|
58
|
+
workingMemory: {
|
|
59
|
+
enabled: true,
|
|
60
|
+
},
|
|
61
|
+
},
|
|
62
|
+
}),
|
|
63
|
+
}),
|
|
64
|
+
},
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
const agent = mastra.getAgent('testAgent');
|
|
68
|
+
const memory = await agent.getMemory();
|
|
69
|
+
expect(memory).toBeDefined();
|
|
70
|
+
|
|
71
|
+
// Should be able to create a thread and use working memory
|
|
72
|
+
const thread = await memory!.createThread({
|
|
73
|
+
resourceId: 'test-resource',
|
|
74
|
+
title: 'Test Thread',
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
expect(thread).toBeDefined();
|
|
78
|
+
expect(thread.id).toBeDefined();
|
|
79
|
+
|
|
80
|
+
// Should be able to update working memory without error
|
|
81
|
+
await memory!.updateWorkingMemory({
|
|
82
|
+
threadId: thread.id,
|
|
83
|
+
resourceId: 'test-resource',
|
|
84
|
+
workingMemory: '# Test Working Memory\n- Name: Test User',
|
|
85
|
+
});
|
|
86
|
+
|
|
87
|
+
// Should be able to retrieve working memory
|
|
88
|
+
const workingMemoryData = await memory!.getWorkingMemory({
|
|
89
|
+
threadId: thread.id,
|
|
90
|
+
resourceId: 'test-resource',
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
expect(workingMemoryData).toBe('# Test Working Memory\n- Name: Test User');
|
|
94
|
+
});
|
|
95
|
+
|
|
96
|
+
it('should work with resource-scoped working memory when storage supports it', async () => {
|
|
97
|
+
const mastra = new Mastra({
|
|
98
|
+
storage: new LibSQLStore({
|
|
99
|
+
url: dbFile,
|
|
100
|
+
}),
|
|
101
|
+
agents: {
|
|
102
|
+
testAgent: new Agent({
|
|
103
|
+
name: 'Test Agent',
|
|
104
|
+
instructions: 'You are a test agent',
|
|
105
|
+
model: openai('gpt-4o-mini'),
|
|
106
|
+
memory: new Memory({
|
|
107
|
+
options: {
|
|
108
|
+
workingMemory: {
|
|
109
|
+
enabled: true,
|
|
110
|
+
scope: 'resource',
|
|
111
|
+
},
|
|
112
|
+
},
|
|
113
|
+
}),
|
|
114
|
+
}),
|
|
115
|
+
},
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
const agent = mastra.getAgent('testAgent');
|
|
119
|
+
const memory = await agent.getMemory();
|
|
120
|
+
|
|
121
|
+
expect(memory).toBeDefined();
|
|
122
|
+
|
|
123
|
+
// Create a thread
|
|
124
|
+
const thread = await memory!.createThread({
|
|
125
|
+
resourceId: 'test-resource',
|
|
126
|
+
title: 'Test Thread',
|
|
127
|
+
});
|
|
128
|
+
|
|
129
|
+
// Update resource-scoped working memory
|
|
130
|
+
await memory!.updateWorkingMemory({
|
|
131
|
+
threadId: thread.id,
|
|
132
|
+
resourceId: 'test-resource',
|
|
133
|
+
workingMemory: '# Resource Memory\n- Shared across threads',
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
const workingMemoryData = await memory!.getWorkingMemory({
|
|
137
|
+
threadId: thread.id,
|
|
138
|
+
resourceId: 'test-resource',
|
|
139
|
+
});
|
|
140
|
+
|
|
141
|
+
expect(workingMemoryData).toBe('# Resource Memory\n- Shared across threads');
|
|
142
|
+
});
|
|
143
|
+
|
|
144
|
+
describe('Agent memory message persistence', () => {
|
|
145
|
+
// making a separate memory for agent to avoid conflicts with other tests
|
|
146
|
+
const memory = new Memory({
|
|
147
|
+
options: {
|
|
148
|
+
lastMessages: 10,
|
|
149
|
+
semanticRecall: true,
|
|
150
|
+
},
|
|
151
|
+
storage: new LibSQLStore({
|
|
152
|
+
url: dbFile,
|
|
153
|
+
}),
|
|
154
|
+
vector: new LibSQLVector({
|
|
155
|
+
connectionUrl: dbFile,
|
|
156
|
+
}),
|
|
157
|
+
embedder: fastembed,
|
|
158
|
+
});
|
|
159
|
+
const agent = new Agent({
|
|
160
|
+
name: 'test',
|
|
161
|
+
instructions:
|
|
162
|
+
'You are a weather agent. When asked about weather in any city, use the get_weather tool with the city name as the postal code.',
|
|
163
|
+
model: openai('gpt-4o'),
|
|
164
|
+
memory,
|
|
165
|
+
tools: { get_weather: weatherTool },
|
|
166
|
+
});
|
|
167
|
+
it('should save all user messages (not just the most recent)', async () => {
|
|
168
|
+
const threadId = randomUUID();
|
|
169
|
+
const resourceId = 'all-user-messages';
|
|
170
|
+
|
|
171
|
+
// Send multiple user messages
|
|
172
|
+
await agent.generateVNext(
|
|
173
|
+
[
|
|
174
|
+
{ role: 'user', content: 'First message' },
|
|
175
|
+
{ role: 'user', content: 'Second message' },
|
|
176
|
+
],
|
|
177
|
+
{
|
|
178
|
+
threadId,
|
|
179
|
+
resourceId,
|
|
180
|
+
},
|
|
181
|
+
);
|
|
182
|
+
|
|
183
|
+
// Fetch messages from memory
|
|
184
|
+
const agentMemory = (await agent.getMemory())!;
|
|
185
|
+
const { messages, uiMessages } = await agentMemory.query({ threadId });
|
|
186
|
+
const userMessages = messages.filter((m: any) => m.role === 'user').map((m: any) => m.content);
|
|
187
|
+
const userUiMessages = uiMessages.filter((m: any) => m.role === 'user').map((m: any) => m.content);
|
|
188
|
+
|
|
189
|
+
expect(userMessages).toEqual(expect.arrayContaining(['First message', 'Second message']));
|
|
190
|
+
expect(userUiMessages).toEqual(expect.arrayContaining(['First message', 'Second message']));
|
|
191
|
+
});
|
|
192
|
+
|
|
193
|
+
it('should save assistant responses for both text and object output modes', async () => {
|
|
194
|
+
const threadId = randomUUID();
|
|
195
|
+
const resourceId = 'assistant-responses';
|
|
196
|
+
// 1. Text mode
|
|
197
|
+
await agent.generateVNext([{ role: 'user', content: 'What is 2+2?' }], {
|
|
198
|
+
threadId,
|
|
199
|
+
resourceId,
|
|
200
|
+
modelSettings: {
|
|
201
|
+
temperature: 0,
|
|
202
|
+
},
|
|
203
|
+
});
|
|
204
|
+
|
|
205
|
+
// 2. Object/output mode
|
|
206
|
+
await agent.generateVNext([{ role: 'user', content: 'Give me JSON' }], {
|
|
207
|
+
threadId,
|
|
208
|
+
resourceId,
|
|
209
|
+
output: z.object({
|
|
210
|
+
result: z.string(),
|
|
211
|
+
}),
|
|
212
|
+
modelSettings: {
|
|
213
|
+
temperature: 0,
|
|
214
|
+
},
|
|
215
|
+
});
|
|
216
|
+
|
|
217
|
+
// Fetch messages from memory
|
|
218
|
+
const agentMemory = (await agent.getMemory())!;
|
|
219
|
+
const { messages, uiMessages } = await agentMemory.query({ threadId });
|
|
220
|
+
const userMessages = messages.filter((m: any) => m.role === 'user').map((m: any) => m.content);
|
|
221
|
+
const userUiMessages = uiMessages.filter((m: any) => m.role === 'user').map((m: any) => m.content);
|
|
222
|
+
const assistantMessages = messages.filter((m: any) => m.role === 'assistant').map((m: any) => m.content);
|
|
223
|
+
const assistantUiMessages = uiMessages.filter((m: any) => m.role === 'assistant').map((m: any) => m.content);
|
|
224
|
+
expect(userMessages).toEqual(expect.arrayContaining(['What is 2+2?', 'Give me JSON']));
|
|
225
|
+
expect(userUiMessages).toEqual(expect.arrayContaining(['What is 2+2?', 'Give me JSON']));
|
|
226
|
+
function flattenAssistantMessages(messages: any[]) {
|
|
227
|
+
return messages.flatMap(msg =>
|
|
228
|
+
Array.isArray(msg) ? msg.map(part => (typeof part === 'object' && part.text ? part.text : part)) : msg,
|
|
229
|
+
);
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
expect(flattenAssistantMessages(assistantMessages)).toEqual(
|
|
233
|
+
expect.arrayContaining([expect.stringContaining('2 + 2'), expect.stringContaining('"result"')]),
|
|
234
|
+
);
|
|
235
|
+
|
|
236
|
+
expect(flattenAssistantMessages(assistantUiMessages)).toEqual(
|
|
237
|
+
expect.arrayContaining([expect.stringContaining('2 + 2'), expect.stringContaining('"result"')]),
|
|
238
|
+
);
|
|
239
|
+
});
|
|
240
|
+
|
|
241
|
+
it('should not save messages provided in the context option', async () => {
|
|
242
|
+
const threadId = randomUUID();
|
|
243
|
+
const resourceId = 'context-option-messages-not-saved';
|
|
244
|
+
|
|
245
|
+
const userMessageContent = 'This is a user message.';
|
|
246
|
+
const contextMessageContent1 = 'This is the first context message.';
|
|
247
|
+
const contextMessageContent2 = 'This is the second context message.';
|
|
248
|
+
|
|
249
|
+
// Send user messages and context messages
|
|
250
|
+
await agent.generateVNext(userMessageContent, {
|
|
251
|
+
threadId,
|
|
252
|
+
resourceId,
|
|
253
|
+
context: [
|
|
254
|
+
{ role: 'system', content: contextMessageContent1 },
|
|
255
|
+
{ role: 'user', content: contextMessageContent2 },
|
|
256
|
+
],
|
|
257
|
+
});
|
|
258
|
+
|
|
259
|
+
// Fetch messages from memory
|
|
260
|
+
const agentMemory = (await agent.getMemory())!;
|
|
261
|
+
const { messages } = await agentMemory.query({ threadId });
|
|
262
|
+
|
|
263
|
+
// Assert that the context messages are NOT saved
|
|
264
|
+
const savedContextMessages = messages.filter(
|
|
265
|
+
(m: any) => m.content === contextMessageContent1 || m.content === contextMessageContent2,
|
|
266
|
+
);
|
|
267
|
+
expect(savedContextMessages.length).toBe(0);
|
|
268
|
+
|
|
269
|
+
// Assert that the user message IS saved
|
|
270
|
+
const savedUserMessages = messages.filter((m: any) => m.role === 'user');
|
|
271
|
+
expect(savedUserMessages.length).toBe(1);
|
|
272
|
+
expect(savedUserMessages[0].content).toBe(userMessageContent);
|
|
273
|
+
});
|
|
274
|
+
|
|
275
|
+
it('should persist UIMessageWithMetadata through agent generate and memory', async () => {
|
|
276
|
+
const threadId = randomUUID();
|
|
277
|
+
const resourceId = 'ui-message-metadata';
|
|
278
|
+
|
|
279
|
+
// Create messages with metadata
|
|
280
|
+
const messagesWithMetadata: UIMessageWithMetadata[] = [
|
|
281
|
+
{
|
|
282
|
+
id: 'msg1',
|
|
283
|
+
role: 'user',
|
|
284
|
+
content: 'Hello with metadata',
|
|
285
|
+
parts: [{ type: 'text', text: 'Hello with metadata' }],
|
|
286
|
+
metadata: {
|
|
287
|
+
source: 'web-ui',
|
|
288
|
+
timestamp: Date.now(),
|
|
289
|
+
customField: 'custom-value',
|
|
290
|
+
},
|
|
291
|
+
},
|
|
292
|
+
{
|
|
293
|
+
id: 'msg2',
|
|
294
|
+
role: 'user',
|
|
295
|
+
content: 'Another message with different metadata',
|
|
296
|
+
parts: [{ type: 'text', text: 'Another message with different metadata' }],
|
|
297
|
+
metadata: {
|
|
298
|
+
source: 'mobile-app',
|
|
299
|
+
version: '1.0.0',
|
|
300
|
+
userId: 'user-123',
|
|
301
|
+
},
|
|
302
|
+
},
|
|
303
|
+
];
|
|
304
|
+
|
|
305
|
+
// Send messages with metadata
|
|
306
|
+
await agent.generateVNext(messagesWithMetadata, {
|
|
307
|
+
threadId,
|
|
308
|
+
resourceId,
|
|
309
|
+
});
|
|
310
|
+
|
|
311
|
+
// Fetch messages from memory
|
|
312
|
+
const agentMemory = (await agent.getMemory())!;
|
|
313
|
+
const { uiMessages } = await agentMemory.query({ threadId });
|
|
314
|
+
|
|
315
|
+
// Check that all user messages were saved
|
|
316
|
+
const savedUserMessages = uiMessages.filter((m: any) => m.role === 'user');
|
|
317
|
+
expect(savedUserMessages.length).toBe(2);
|
|
318
|
+
|
|
319
|
+
// Check that metadata was persisted in the stored messages
|
|
320
|
+
const firstMessage = uiMessages.find((m: any) => m.content === 'Hello with metadata');
|
|
321
|
+
const secondMessage = uiMessages.find((m: any) => m.content === 'Another message with different metadata');
|
|
322
|
+
|
|
323
|
+
expect(firstMessage).toBeDefined();
|
|
324
|
+
expect(firstMessage!.metadata).toEqual({
|
|
325
|
+
source: 'web-ui',
|
|
326
|
+
timestamp: expect.any(Number),
|
|
327
|
+
customField: 'custom-value',
|
|
328
|
+
});
|
|
329
|
+
|
|
330
|
+
expect(secondMessage).toBeDefined();
|
|
331
|
+
expect(secondMessage!.metadata).toEqual({
|
|
332
|
+
source: 'mobile-app',
|
|
333
|
+
version: '1.0.0',
|
|
334
|
+
userId: 'user-123',
|
|
335
|
+
});
|
|
336
|
+
|
|
337
|
+
// Check UI messages also preserve metadata
|
|
338
|
+
const firstUIMessage = uiMessages.find((m: any) => m.content === 'Hello with metadata');
|
|
339
|
+
const secondUIMessage = uiMessages.find((m: any) => m.content === 'Another message with different metadata');
|
|
340
|
+
|
|
341
|
+
expect(firstUIMessage?.metadata).toEqual({
|
|
342
|
+
source: 'web-ui',
|
|
343
|
+
timestamp: expect.any(Number),
|
|
344
|
+
customField: 'custom-value',
|
|
345
|
+
});
|
|
346
|
+
|
|
347
|
+
expect(secondUIMessage?.metadata).toEqual({
|
|
348
|
+
source: 'mobile-app',
|
|
349
|
+
version: '1.0.0',
|
|
350
|
+
userId: 'user-123',
|
|
351
|
+
});
|
|
352
|
+
});
|
|
353
|
+
});
|
|
354
|
+
|
|
355
|
+
describe('Agent thread metadata with generateTitle', () => {
|
|
356
|
+
// Agent with generateTitle: true
|
|
357
|
+
const memoryWithTitle = new Memory({
|
|
358
|
+
options: {
|
|
359
|
+
threads: { generateTitle: true },
|
|
360
|
+
semanticRecall: true,
|
|
361
|
+
lastMessages: 10,
|
|
362
|
+
},
|
|
363
|
+
storage: new LibSQLStore({ url: dbFile }),
|
|
364
|
+
vector: new LibSQLVector({ connectionUrl: dbFile }),
|
|
365
|
+
embedder: fastembed,
|
|
366
|
+
});
|
|
367
|
+
const agentWithTitle = new Agent({
|
|
368
|
+
name: 'title-on',
|
|
369
|
+
instructions: 'Test agent with generateTitle on.',
|
|
370
|
+
model: openai('gpt-4o'),
|
|
371
|
+
memory: memoryWithTitle,
|
|
372
|
+
tools: { get_weather: weatherTool },
|
|
373
|
+
});
|
|
374
|
+
|
|
375
|
+
const agentWithDynamicModelTitle = new Agent({
|
|
376
|
+
name: 'title-on',
|
|
377
|
+
instructions: 'Test agent with generateTitle on.',
|
|
378
|
+
model: ({ runtimeContext }) => openai(runtimeContext.get('model') as string),
|
|
379
|
+
memory: memoryWithTitle,
|
|
380
|
+
tools: { get_weather: weatherTool },
|
|
381
|
+
});
|
|
382
|
+
|
|
383
|
+
// Agent with generateTitle: false
|
|
384
|
+
const memoryNoTitle = new Memory({
|
|
385
|
+
options: {
|
|
386
|
+
threads: { generateTitle: false },
|
|
387
|
+
semanticRecall: true,
|
|
388
|
+
lastMessages: 10,
|
|
389
|
+
},
|
|
390
|
+
storage: new LibSQLStore({ url: dbFile }),
|
|
391
|
+
vector: new LibSQLVector({ connectionUrl: dbFile }),
|
|
392
|
+
embedder: fastembed,
|
|
393
|
+
});
|
|
394
|
+
const agentNoTitle = new Agent({
|
|
395
|
+
name: 'title-off',
|
|
396
|
+
instructions: 'Test agent with generateTitle off.',
|
|
397
|
+
model: openai('gpt-4o'),
|
|
398
|
+
memory: memoryNoTitle,
|
|
399
|
+
tools: { get_weather: weatherTool },
|
|
400
|
+
});
|
|
401
|
+
|
|
402
|
+
it('should preserve metadata when generateTitle is true', async () => {
|
|
403
|
+
const threadId = randomUUID();
|
|
404
|
+
const resourceId = 'gen-title-metadata';
|
|
405
|
+
const metadata = { foo: 'bar', custom: 123 };
|
|
406
|
+
|
|
407
|
+
const thread = await memoryWithTitle.createThread({
|
|
408
|
+
threadId,
|
|
409
|
+
resourceId,
|
|
410
|
+
metadata,
|
|
411
|
+
});
|
|
412
|
+
|
|
413
|
+
expect(thread).toBeDefined();
|
|
414
|
+
expect(thread?.metadata).toMatchObject(metadata);
|
|
415
|
+
|
|
416
|
+
await agentWithTitle.generateVNext([{ role: 'user', content: 'Hello, world!' }], { threadId, resourceId });
|
|
417
|
+
await agentWithTitle.generateVNext([{ role: 'user', content: 'Hello, world!' }], { threadId, resourceId });
|
|
418
|
+
|
|
419
|
+
const existingThread = await memoryWithTitle.getThreadById({ threadId });
|
|
420
|
+
expect(existingThread).toBeDefined();
|
|
421
|
+
expect(existingThread?.metadata).toMatchObject(metadata);
|
|
422
|
+
});
|
|
423
|
+
|
|
424
|
+
it('should use generateTitle with runtime context', async () => {
|
|
425
|
+
const threadId = randomUUID();
|
|
426
|
+
const resourceId = 'gen-title-metadata';
|
|
427
|
+
const metadata = { foo: 'bar', custom: 123 };
|
|
428
|
+
|
|
429
|
+
const thread = await memoryWithTitle.createThread({
|
|
430
|
+
threadId,
|
|
431
|
+
resourceId,
|
|
432
|
+
metadata,
|
|
433
|
+
});
|
|
434
|
+
|
|
435
|
+
expect(thread).toBeDefined();
|
|
436
|
+
expect(thread?.metadata).toMatchObject(metadata);
|
|
437
|
+
|
|
438
|
+
const runtimeContext = new RuntimeContext();
|
|
439
|
+
runtimeContext.set('model', 'gpt-4o-mini');
|
|
440
|
+
await agentWithDynamicModelTitle.generateVNext([{ role: 'user', content: 'Hello, world!' }], {
|
|
441
|
+
threadId,
|
|
442
|
+
resourceId,
|
|
443
|
+
runtimeContext,
|
|
444
|
+
});
|
|
445
|
+
|
|
446
|
+
const existingThread = await memoryWithTitle.getThreadById({ threadId });
|
|
447
|
+
expect(existingThread).toBeDefined();
|
|
448
|
+
expect(existingThread?.metadata).toMatchObject(metadata);
|
|
449
|
+
});
|
|
450
|
+
|
|
451
|
+
it('should preserve metadata when generateTitle is false', async () => {
|
|
452
|
+
const threadId = randomUUID();
|
|
453
|
+
const resourceId = 'no-gen-title-metadata';
|
|
454
|
+
const metadata = { foo: 'baz', custom: 456 };
|
|
455
|
+
|
|
456
|
+
const thread = await memoryNoTitle.createThread({
|
|
457
|
+
threadId,
|
|
458
|
+
resourceId,
|
|
459
|
+
metadata,
|
|
460
|
+
});
|
|
461
|
+
|
|
462
|
+
expect(thread).toBeDefined();
|
|
463
|
+
expect(thread?.metadata).toMatchObject(metadata);
|
|
464
|
+
|
|
465
|
+
await agentNoTitle.generateVNext([{ role: 'user', content: 'Hello, world!' }], { threadId, resourceId });
|
|
466
|
+
await agentNoTitle.generateVNext([{ role: 'user', content: 'Hello, world!' }], { threadId, resourceId });
|
|
467
|
+
|
|
468
|
+
const existingThread = await memoryNoTitle.getThreadById({ threadId });
|
|
469
|
+
expect(existingThread).toBeDefined();
|
|
470
|
+
expect(existingThread?.metadata).toMatchObject(metadata);
|
|
471
|
+
});
|
|
472
|
+
});
|
|
473
|
+
});
|
|
474
|
+
|
|
475
|
+
describe('Agent with message processors', () => {
|
|
476
|
+
it('should apply processors to filter tool messages from context', async () => {
|
|
477
|
+
const threadId = randomUUID();
|
|
478
|
+
const resourceId = 'processor-filter-tool-message';
|
|
479
|
+
|
|
480
|
+
// First, ask a question that will trigger a tool call
|
|
481
|
+
const firstResponse = await memoryProcessorAgent.generateVNext('What is the weather in London?', {
|
|
482
|
+
threadId,
|
|
483
|
+
resourceId,
|
|
484
|
+
});
|
|
485
|
+
|
|
486
|
+
// The response should contain the weather.
|
|
487
|
+
expect(firstResponse.text).toContain('65');
|
|
488
|
+
|
|
489
|
+
// Check that tool calls were saved to memory
|
|
490
|
+
const agentMemory = (await memoryProcessorAgent.getMemory())!;
|
|
491
|
+
const { messages: messagesFromMemory } = await agentMemory.query({ threadId });
|
|
492
|
+
const toolMessages = messagesFromMemory.filter(
|
|
493
|
+
m => m.role === 'tool' || (m.role === 'assistant' && typeof m.content !== 'string'),
|
|
494
|
+
);
|
|
495
|
+
|
|
496
|
+
expect(toolMessages.length).toBeGreaterThan(0);
|
|
497
|
+
|
|
498
|
+
// Now, ask a follow-up question. The processor should prevent the tool call history
|
|
499
|
+
// from being sent to the model.
|
|
500
|
+
const secondResponse = await memoryProcessorAgent.generateVNext('What was the tool you just used?', {
|
|
501
|
+
memory: {
|
|
502
|
+
thread: threadId,
|
|
503
|
+
resource: resourceId,
|
|
504
|
+
options: {
|
|
505
|
+
lastMessages: 10,
|
|
506
|
+
},
|
|
507
|
+
},
|
|
508
|
+
});
|
|
509
|
+
|
|
510
|
+
const secondResponseRequestMessages: CoreMessage[] = secondResponse.request.body.input;
|
|
511
|
+
|
|
512
|
+
expect(secondResponseRequestMessages.length).toBe(4);
|
|
513
|
+
// Filter out tool messages and tool results, should be the same as above.
|
|
514
|
+
expect(
|
|
515
|
+
secondResponseRequestMessages.filter(m => m.role !== 'tool' || (m as any)?.tool_calls?.[0]?.type !== 'function')
|
|
516
|
+
.length,
|
|
517
|
+
).toBe(4);
|
|
518
|
+
}, 3000_000);
|
|
519
|
+
});
|
|
520
|
+
|
|
521
|
+
describe('Agent.fetchMemory', () => {
|
|
522
|
+
it('should return messages from memory', async () => {
|
|
523
|
+
const threadId = randomUUID();
|
|
524
|
+
const resourceId = 'fetch-memory-test';
|
|
525
|
+
|
|
526
|
+
const response = await weatherAgent.generateVNext('Just a simple greeting to populate memory.', {
|
|
527
|
+
threadId,
|
|
528
|
+
resourceId,
|
|
529
|
+
});
|
|
530
|
+
|
|
531
|
+
const { messages } = await weatherAgent.fetchMemory({ threadId, resourceId });
|
|
532
|
+
|
|
533
|
+
expect(messages).toBeDefined();
|
|
534
|
+
if (!messages) return;
|
|
535
|
+
|
|
536
|
+
expect(messages.length).toBe(2); // user message + assistant response
|
|
537
|
+
|
|
538
|
+
const userMessage = messages.find(m => m.role === 'user');
|
|
539
|
+
expect(userMessage).toBeDefined();
|
|
540
|
+
if (!userMessage) return;
|
|
541
|
+
expect(userMessage.content[0]).toEqual({ type: 'text', text: 'Just a simple greeting to populate memory.' });
|
|
542
|
+
|
|
543
|
+
const assistantMessage = messages.find(m => m.role === 'assistant');
|
|
544
|
+
expect(assistantMessage).toBeDefined();
|
|
545
|
+
if (!assistantMessage) return;
|
|
546
|
+
expect(assistantMessage.content).toEqual([{ type: 'text', text: response.text }]);
|
|
547
|
+
}, 30_000);
|
|
548
|
+
|
|
549
|
+
it('should apply processors when fetching memory', async () => {
|
|
550
|
+
const threadId = randomUUID();
|
|
551
|
+
const resourceId = 'fetch-memory-processor-test';
|
|
552
|
+
|
|
553
|
+
await memoryProcessorAgent.generateVNext('What is the weather in London?', { threadId, resourceId });
|
|
554
|
+
|
|
555
|
+
const { messages } = await memoryProcessorAgent.fetchMemory({ threadId, resourceId });
|
|
556
|
+
|
|
557
|
+
expect(messages).toBeDefined();
|
|
558
|
+
if (!messages) return;
|
|
559
|
+
|
|
560
|
+
const hasToolRelatedMessage = messages.some(
|
|
561
|
+
m => m.role === 'tool' || (Array.isArray(m.content) && m.content.some(c => c.type === 'tool-call')),
|
|
562
|
+
);
|
|
563
|
+
expect(hasToolRelatedMessage).toBe(false);
|
|
564
|
+
|
|
565
|
+
const userMessage = messages.find(m => m.role === 'user');
|
|
566
|
+
expect(userMessage).toBeDefined();
|
|
567
|
+
if (!userMessage) return;
|
|
568
|
+
expect(userMessage.content[0]).toEqual({ type: 'text', text: 'What is the weather in London?' });
|
|
569
|
+
}, 30_000);
|
|
570
|
+
|
|
571
|
+
it('should return nothing if thread does not exist', async () => {
|
|
572
|
+
const threadId = randomUUID();
|
|
573
|
+
const resourceId = 'fetch-memory-no-thread';
|
|
574
|
+
|
|
575
|
+
const result = await weatherAgent.fetchMemory({ threadId, resourceId });
|
|
576
|
+
|
|
577
|
+
expect(result.messages).toEqual([]);
|
|
578
|
+
expect(result.threadId).toBe(threadId);
|
|
579
|
+
});
|
|
580
|
+
});
|
|
581
|
+
|
|
582
|
+
describe('Agent memory test gemini', () => {
|
|
583
|
+
const memory = new Memory({
|
|
584
|
+
storage: new MockStore(),
|
|
585
|
+
options: {
|
|
586
|
+
threads: {
|
|
587
|
+
generateTitle: false,
|
|
588
|
+
},
|
|
589
|
+
lastMessages: 2,
|
|
590
|
+
},
|
|
591
|
+
});
|
|
592
|
+
|
|
593
|
+
const agent = new Agent({
|
|
594
|
+
name: 'gemini-agent',
|
|
595
|
+
instructions:
|
|
596
|
+
'You are a weather agent. When asked about weather in any city, use the get_weather tool with the city name.',
|
|
597
|
+
model: google.chat('gemini-2.5-flash-preview-05-20'),
|
|
598
|
+
memory,
|
|
599
|
+
tools: { get_weather: weatherToolCity },
|
|
600
|
+
});
|
|
601
|
+
|
|
602
|
+
const resource = 'weatherAgent-memory-test';
|
|
603
|
+
const thread = new Date().getTime().toString();
|
|
604
|
+
|
|
605
|
+
it('should not throw error when using gemini', async () => {
|
|
606
|
+
// generate two messages in the db
|
|
607
|
+
await agent.generateVNext(`What's the weather in Tokyo?`, {
|
|
608
|
+
memory: { resource, thread },
|
|
609
|
+
});
|
|
610
|
+
|
|
611
|
+
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
612
|
+
|
|
613
|
+
// Will throw if the messages sent to the agent aren't cleaned up because a tool call message will be the first message sent to the agent
|
|
614
|
+
// Which some providers like gemini will not allow.
|
|
615
|
+
await expect(
|
|
616
|
+
agent.generateVNext(`What's the weather in London?`, {
|
|
617
|
+
memory: { resource, thread },
|
|
618
|
+
}),
|
|
619
|
+
).resolves.not.toThrow();
|
|
620
|
+
});
|
|
621
|
+
});
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import { openai } from '@ai-sdk/openai';
|
|
2
|
+
import { createTool } from '@mastra/core';
|
|
3
|
+
import { Agent } from '@mastra/core/agent';
|
|
4
|
+
import { LibSQLStore, LibSQLVector } from '@mastra/libsql';
|
|
5
|
+
import { Memory } from '@mastra/memory';
|
|
6
|
+
import { ToolCallFilter } from '@mastra/memory/processors';
|
|
7
|
+
import { z } from 'zod';
|
|
8
|
+
import { weatherTool } from '../tools/weather';
|
|
9
|
+
|
|
10
|
+
export const memory = new Memory({
|
|
11
|
+
options: {
|
|
12
|
+
workingMemory: {
|
|
13
|
+
enabled: true,
|
|
14
|
+
},
|
|
15
|
+
lastMessages: 10,
|
|
16
|
+
semanticRecall: true,
|
|
17
|
+
},
|
|
18
|
+
storage: new LibSQLStore({
|
|
19
|
+
url: 'file:mastra.db', // relative path from bundled .mastra/output dir
|
|
20
|
+
}),
|
|
21
|
+
vector: new LibSQLVector({
|
|
22
|
+
connectionUrl: 'file:mastra.db', // relative path from bundled .mastra/output dir
|
|
23
|
+
}),
|
|
24
|
+
embedder: openai.embedding('text-embedding-3-small'),
|
|
25
|
+
});
|
|
26
|
+
|
|
27
|
+
export const weatherAgent = new Agent({
|
|
28
|
+
name: 'test',
|
|
29
|
+
instructions:
|
|
30
|
+
'You are a weather agent. When asked about weather in any city, use the get_weather tool with the city name as the postal code. When asked for clipboard contents use the clipboard tool to get the clipboard contents.',
|
|
31
|
+
model: openai('gpt-4o'),
|
|
32
|
+
memory,
|
|
33
|
+
tools: {
|
|
34
|
+
get_weather: weatherTool,
|
|
35
|
+
clipboard: createTool({
|
|
36
|
+
id: 'clipboard',
|
|
37
|
+
description: 'Returns the contents of the users clipboard',
|
|
38
|
+
inputSchema: z.object({}),
|
|
39
|
+
}),
|
|
40
|
+
},
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
const memoryWithProcessor = new Memory({
|
|
44
|
+
embedder: openai.embedding('text-embedding-3-small'),
|
|
45
|
+
storage: new LibSQLStore({
|
|
46
|
+
url: 'file:mastra.db',
|
|
47
|
+
}),
|
|
48
|
+
vector: new LibSQLVector({
|
|
49
|
+
connectionUrl: 'file:mastra.db',
|
|
50
|
+
}),
|
|
51
|
+
options: {
|
|
52
|
+
semanticRecall: {
|
|
53
|
+
topK: 20,
|
|
54
|
+
messageRange: {
|
|
55
|
+
before: 10,
|
|
56
|
+
after: 10,
|
|
57
|
+
},
|
|
58
|
+
},
|
|
59
|
+
lastMessages: 20,
|
|
60
|
+
threads: {
|
|
61
|
+
generateTitle: true,
|
|
62
|
+
},
|
|
63
|
+
},
|
|
64
|
+
processors: [new ToolCallFilter()],
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
export const memoryProcessorAgent = new Agent({
|
|
68
|
+
name: 'test-processor',
|
|
69
|
+
instructions: 'You are a test agent that uses a memory processor to filter out tool call messages.',
|
|
70
|
+
model: openai('gpt-4o'),
|
|
71
|
+
memory: memoryWithProcessor,
|
|
72
|
+
tools: {
|
|
73
|
+
get_weather: weatherTool,
|
|
74
|
+
},
|
|
75
|
+
});
|