@mastra/memory 0.14.3-alpha.0 → 0.14.3-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/CHANGELOG.md +23 -0
  2. package/package.json +18 -5
  3. package/.turbo/turbo-build.log +0 -8
  4. package/eslint.config.js +0 -12
  5. package/integration-tests-v5/.env.test +0 -5
  6. package/integration-tests-v5/CHANGELOG.md +0 -159
  7. package/integration-tests-v5/docker-compose.yml +0 -39
  8. package/integration-tests-v5/node_modules/.bin/next +0 -21
  9. package/integration-tests-v5/node_modules/.bin/tsc +0 -21
  10. package/integration-tests-v5/node_modules/.bin/tsserver +0 -21
  11. package/integration-tests-v5/node_modules/.bin/vitest +0 -21
  12. package/integration-tests-v5/package.json +0 -43
  13. package/integration-tests-v5/src/agent-memory.test.ts +0 -621
  14. package/integration-tests-v5/src/mastra/agents/weather.ts +0 -75
  15. package/integration-tests-v5/src/mastra/index.ts +0 -13
  16. package/integration-tests-v5/src/mastra/tools/weather.ts +0 -24
  17. package/integration-tests-v5/src/processors.test.ts +0 -604
  18. package/integration-tests-v5/src/streaming-memory.test.ts +0 -367
  19. package/integration-tests-v5/src/test-utils.ts +0 -147
  20. package/integration-tests-v5/src/working-memory.test.ts +0 -1064
  21. package/integration-tests-v5/tsconfig.json +0 -13
  22. package/integration-tests-v5/vitest.config.ts +0 -18
  23. package/src/index.ts +0 -1040
  24. package/src/processors/index.test.ts +0 -246
  25. package/src/processors/index.ts +0 -2
  26. package/src/processors/token-limiter.ts +0 -159
  27. package/src/processors/tool-call-filter.ts +0 -77
  28. package/src/tools/working-memory.ts +0 -154
  29. package/tsconfig.build.json +0 -9
  30. package/tsconfig.json +0 -5
  31. package/tsup.config.ts +0 -17
  32. package/vitest.config.ts +0 -11
@@ -1,13 +0,0 @@
1
- import { Mastra } from '@mastra/core';
2
- import { LibSQLStore } from '@mastra/libsql';
3
- import { memoryProcessorAgent, weatherAgent } from './agents/weather';
4
-
5
- export const mastra = new Mastra({
6
- agents: {
7
- test: weatherAgent,
8
- testProcessor: memoryProcessorAgent,
9
- },
10
- storage: new LibSQLStore({
11
- url: 'file:mastra.db',
12
- }),
13
- });
@@ -1,24 +0,0 @@
1
- import { createTool } from '@mastra/core/tools';
2
- import { z } from 'zod';
3
-
4
- export const weatherTool = createTool({
5
- id: 'get_weather',
6
- description: 'Get the weather for a given location',
7
- inputSchema: z.object({
8
- postalCode: z.string().describe('The location to get the weather for'),
9
- }),
10
- execute: async ({ context: { postalCode } }) => {
11
- return `The weather in ${postalCode} is sunny. It is currently 70 degrees and feels like 65 degrees.`;
12
- },
13
- });
14
-
15
- export const weatherToolCity = createTool({
16
- id: 'get_weather_city',
17
- description: 'Get the weather for a given location',
18
- inputSchema: z.object({
19
- city: z.string().describe('The location to get the weather for'),
20
- }),
21
- execute: async ({ context: { city } }) => {
22
- return `The weather in ${city} is sunny. It is currently 70 degrees and feels like 65 degrees.`;
23
- },
24
- });
@@ -1,604 +0,0 @@
1
- import { mkdtemp } from 'fs/promises';
2
- import { afterEach } from 'node:test';
3
- import { tmpdir } from 'os';
4
- import { join } from 'path';
5
- import { openai } from '@ai-sdk/openai';
6
- import type { MastraMessageV2 } from '@mastra/core/agent';
7
- import { Agent, MessageList } from '@mastra/core/agent';
8
- import type { CoreMessage } from '@mastra/core/llm';
9
- import type { MemoryProcessorOpts } from '@mastra/core/memory';
10
- import { MemoryProcessor } from '@mastra/core/memory';
11
- import { createTool } from '@mastra/core/tools';
12
- import { fastembed } from '@mastra/fastembed';
13
- import { LibSQLVector, LibSQLStore } from '@mastra/libsql';
14
- import { Memory } from '@mastra/memory';
15
- import { TokenLimiter, ToolCallFilter } from '@mastra/memory/processors';
16
- import type { UIMessage } from 'ai';
17
- import { describe, it, expect, beforeEach } from 'vitest';
18
- import { z } from 'zod';
19
- import { filterToolCallsByName, filterToolResultsByName, generateConversationHistory } from './test-utils';
20
-
21
- function v2ToCoreMessages(messages: MastraMessageV2[] | UIMessage[]): CoreMessage[] {
22
- return new MessageList().add(messages, 'memory').get.all.core();
23
- }
24
-
25
- let memory: Memory;
26
- let storage: LibSQLStore;
27
- let vector: LibSQLVector;
28
- const resourceId = 'processor-test';
29
-
30
- beforeEach(async () => {
31
- // Create a new unique database file in the temp directory for each test
32
- const dbPath = join(await mkdtemp(join(tmpdir(), `memory-processor-test-`)), 'test.db');
33
-
34
- storage = new LibSQLStore({
35
- url: `file:${dbPath}`,
36
- });
37
- vector = new LibSQLVector({
38
- connectionUrl: `file:${dbPath}`,
39
- });
40
-
41
- // Initialize memory with the in-memory database
42
- memory = new Memory({
43
- storage,
44
- options: {
45
- lastMessages: 10,
46
- semanticRecall: false,
47
- threads: {
48
- generateTitle: false,
49
- },
50
- },
51
- });
52
- });
53
-
54
- afterEach(async () => {
55
- //@ts-ignore
56
- await storage.client.close();
57
- //@ts-ignore
58
- await vector.turso.close();
59
- });
60
-
61
- describe('Memory with Processors', () => {
62
- it('should apply TokenLimiter when retrieving messages', async () => {
63
- // Create a thread
64
- const thread = await memory.createThread({
65
- title: 'TokenLimiter Test Thread',
66
- resourceId,
67
- });
68
-
69
- // Generate conversation with 10 turn pairs (20 messages total)
70
- const { messagesV2 } = generateConversationHistory({
71
- threadId: thread.id,
72
- resourceId,
73
- messageCount: 10,
74
- toolFrequency: 3,
75
- });
76
-
77
- // Save messages
78
- await memory.saveMessages({ messages: messagesV2, format: 'v2' });
79
-
80
- // Get messages with a token limit of 250 (should get ~2.5 messages)
81
- const queryResult = await memory.query({
82
- threadId: thread.id,
83
- selectBy: { last: 20 },
84
- });
85
- const result = await memory.processMessages({
86
- messages: new MessageList({ threadId: thread.id, resourceId })
87
- .add(queryResult.uiMessages, 'memory')
88
- .get.all.core(),
89
- processors: [new TokenLimiter(250)], // Limit to 250 tokens
90
- });
91
-
92
- // We should have messages limited by token count
93
- expect(result.length).toBeGreaterThan(0);
94
- expect(result.length).toBeLessThanOrEqual(4); // Should get a small subset of messages
95
-
96
- expect(result.at(-1)).toEqual({
97
- role: 'tool',
98
- content: [
99
- {
100
- type: 'tool-result',
101
- toolCallId: 'tool-9',
102
- toolName: 'weather',
103
- result: 'Pretty hot',
104
- },
105
- ],
106
- });
107
-
108
- // Now query with a very high token limit that should return all messages
109
- const allMessagesQuery = await memory.query({
110
- threadId: thread.id,
111
- selectBy: { last: 20 },
112
- });
113
- expect(allMessagesQuery.messages.length).toBe(20);
114
-
115
- const allMessagesResult = await memory.processMessages({
116
- messages: new MessageList({ threadId: thread.id, resourceId })
117
- .add(allMessagesQuery.uiMessages, 'memory')
118
- .get.all.core(),
119
- processors: [new TokenLimiter(3000)], // High limit that should exceed total tokens
120
- });
121
-
122
- // create response message list to add to memory
123
- const messages = new MessageList({ threadId: thread.id, resourceId })
124
- .add(allMessagesResult, 'response')
125
- .get.all.v2();
126
-
127
- const listed = new MessageList({ threadId: thread.id, resourceId }).add(messages, 'memory').get.all.v2();
128
-
129
- // We should get all 20 messages
130
- expect(listed.length).toBe(20);
131
- // core messages store tool call/result as separate messages, so +3
132
- expect(allMessagesResult.length).toBe(23);
133
- });
134
-
135
- it('should apply ToolCallFilter when retrieving messages', async () => {
136
- // Create a thread
137
- const thread = await memory.createThread({
138
- title: 'ToolFilter Test Thread',
139
- resourceId,
140
- });
141
-
142
- // Generate conversation with tool calls
143
- const { messagesV2 } = generateConversationHistory({
144
- threadId: thread.id,
145
- resourceId,
146
- messageCount: 5,
147
- toolFrequency: 2, // Every other assistant response is a tool call
148
- toolNames: ['weather', 'calculator'],
149
- });
150
-
151
- // Save messages
152
- await memory.saveMessages({ messages: messagesV2, format: 'v2' });
153
-
154
- // filter weather tool calls
155
- const queryResult = await memory.query({
156
- threadId: thread.id,
157
- selectBy: { last: 20 },
158
- });
159
- const result = await memory.processMessages({
160
- messages: v2ToCoreMessages(queryResult.uiMessages),
161
- processors: [new ToolCallFilter({ exclude: ['weather'] })],
162
- });
163
- const messages = new MessageList({ threadId: thread.id, resourceId }).add(result, 'response').get.all.v2();
164
- expect(new MessageList().add(messages, 'memory').get.all.v2().length).toBeLessThan(messagesV2.length);
165
- expect(filterToolCallsByName(result, 'weather')).toHaveLength(0);
166
- expect(filterToolResultsByName(result, 'weather')).toHaveLength(0);
167
- expect(filterToolCallsByName(result, 'calculator')).toHaveLength(1);
168
- expect(filterToolResultsByName(result, 'calculator')).toHaveLength(1);
169
-
170
- // make another query with no processors to make sure memory messages in DB were not altered and were only filtered from results
171
- const queryResult2 = await memory.query({
172
- threadId: thread.id,
173
- selectBy: { last: 20 },
174
- });
175
- const result2 = await memory.processMessages({
176
- messages: v2ToCoreMessages(queryResult2.uiMessages),
177
- processors: [],
178
- });
179
- const messages2 = new MessageList({ threadId: thread.id, resourceId }).add(result2, 'response').get.all.v2();
180
- expect(new MessageList().add(messages2, 'memory').get.all.v2()).toHaveLength(messagesV2.length);
181
- expect(filterToolCallsByName(result2, 'weather')).toHaveLength(1);
182
- expect(filterToolResultsByName(result2, 'weather')).toHaveLength(1);
183
- expect(filterToolCallsByName(result2, 'calculator')).toHaveLength(1);
184
- expect(filterToolResultsByName(result2, 'calculator')).toHaveLength(1);
185
-
186
- // filter all by name
187
- const queryResult3 = await memory.query({
188
- threadId: thread.id,
189
- selectBy: { last: 20 },
190
- });
191
- const result3 = await memory.processMessages({
192
- messages: v2ToCoreMessages(queryResult3.uiMessages),
193
- processors: [new ToolCallFilter({ exclude: ['weather', 'calculator'] })],
194
- });
195
- expect(result3.length).toBeLessThan(messagesV2.length);
196
- expect(filterToolCallsByName(result3, 'weather')).toHaveLength(0);
197
- expect(filterToolResultsByName(result3, 'weather')).toHaveLength(0);
198
- expect(filterToolCallsByName(result3, 'calculator')).toHaveLength(0);
199
- expect(filterToolResultsByName(result3, 'calculator')).toHaveLength(0);
200
-
201
- // filter all by default
202
- const queryResult4 = await memory.query({
203
- threadId: thread.id,
204
- selectBy: { last: 20 },
205
- });
206
- const result4 = await memory.processMessages({
207
- messages: v2ToCoreMessages(queryResult4.uiMessages),
208
- processors: [new ToolCallFilter()],
209
- });
210
- expect(result4.length).toBeLessThan(messagesV2.length);
211
- expect(filterToolCallsByName(result4, 'weather')).toHaveLength(0);
212
- expect(filterToolResultsByName(result4, 'weather')).toHaveLength(0);
213
- expect(filterToolCallsByName(result4, 'calculator')).toHaveLength(0);
214
- expect(filterToolResultsByName(result4, 'calculator')).toHaveLength(0);
215
- });
216
-
217
- it('should apply multiple processors in order', async () => {
218
- // Create a thread
219
- const thread = await memory.createThread({
220
- title: 'Multiple Processors Test Thread',
221
- resourceId,
222
- });
223
-
224
- // Generate conversation with tool calls
225
- const { messages } = generateConversationHistory({
226
- threadId: thread.id,
227
- resourceId,
228
- messageCount: 8,
229
- toolFrequency: 2, // Every other assistant response is a tool call
230
- toolNames: ['weather', 'calculator', 'search'],
231
- });
232
-
233
- // Save messages
234
- await memory.saveMessages({ messages });
235
-
236
- // Apply multiple processors: first remove weather tool calls, then limit to 250 tokens
237
- const queryResult = await memory.query({
238
- threadId: thread.id,
239
- selectBy: { last: 20 },
240
- });
241
- const result = await memory.processMessages({
242
- messages: v2ToCoreMessages(queryResult.uiMessages),
243
- processors: [new ToolCallFilter({ exclude: ['weather'] }), new TokenLimiter(250)],
244
- });
245
-
246
- // We should have fewer messages after filtering and token limiting
247
- expect(result.length).toBeGreaterThan(0);
248
- expect(result.length).toBeLessThan(messages.length);
249
- // And they should exclude weather tool messages
250
- expect(filterToolResultsByName(result, `weather`)).toHaveLength(0);
251
- expect(filterToolCallsByName(result, `weather`)).toHaveLength(0);
252
- });
253
-
254
- it('should apply multiple processors without duplicating messages', async () => {
255
- class ConversationOnlyFilter extends MemoryProcessor {
256
- constructor() {
257
- super({ name: 'ConversationOnlyFilter' });
258
- }
259
-
260
- process(messages: CoreMessage[], _opts: MemoryProcessorOpts = {}): CoreMessage[] {
261
- return messages.filter(msg => msg.role === 'user' || msg.role === 'assistant');
262
- }
263
- }
264
- const memory = new Memory({
265
- storage,
266
- vector,
267
- embedder: fastembed,
268
- processors: [new ToolCallFilter(), new ConversationOnlyFilter(), new TokenLimiter(127000)],
269
- options: {
270
- lastMessages: 10,
271
- semanticRecall: true,
272
- workingMemory: {
273
- enabled: true,
274
- },
275
- },
276
- });
277
- const thread = await memory.createThread({
278
- title: 'Multiple Processors Test Thread 2',
279
- resourceId,
280
- });
281
- const instructions = 'You are a helpful assistant';
282
- const agent = new Agent({
283
- name: 'processor-test-agent',
284
- instructions,
285
- model: openai('gpt-4o'),
286
- memory,
287
- });
288
-
289
- const userMessage = 'Tell me something interesting about space';
290
-
291
- const res = await agent.generateVNext(
292
- [
293
- {
294
- role: 'user',
295
- content: userMessage,
296
- },
297
- ],
298
- {
299
- threadId: thread.id,
300
- resourceId,
301
- },
302
- );
303
-
304
- const requestInputMessages = res.request.body?.input;
305
- if (!Array.isArray(requestInputMessages)) {
306
- throw new Error(`responseMessages should be an array`);
307
- }
308
-
309
- const userMessagesByContent = requestInputMessages.filter(m => m.content?.[0]?.text === userMessage);
310
- expect(userMessagesByContent).toEqual([
311
- expect.objectContaining({ role: 'user', content: [expect.objectContaining({ text: userMessage })] }),
312
- ]); // should only be one
313
- expect(userMessagesByContent.length).toBe(1); // if there's more than one we have duplicate messages
314
-
315
- const userMessage2 = 'Tell me something else interesting about space';
316
-
317
- const res2 = await agent.generateVNext(
318
- [
319
- {
320
- role: 'user',
321
- content: userMessage2,
322
- },
323
- ],
324
- {
325
- threadId: thread.id,
326
- resourceId,
327
- },
328
- );
329
-
330
- const requestInputMessages2 = res2.request.body?.input;
331
-
332
- if (!Array.isArray(requestInputMessages2)) {
333
- throw new Error(`responseMessages should be an array`);
334
- }
335
-
336
- const userMessagesByContent2 = requestInputMessages2.filter((m: any) => m.content?.[0]?.text === userMessage2);
337
- expect(userMessagesByContent2).toEqual([
338
- expect.objectContaining({ role: 'user', content: [expect.objectContaining({ text: userMessage2 })] }),
339
- ]); // should only be one
340
- expect(userMessagesByContent2.length).toBe(1); // if there's more than one we have duplicate messages
341
-
342
- // make sure all user messages are there
343
- const allUserMessages = requestInputMessages2.filter((m: CoreMessage) => m.role === 'user');
344
- expect(allUserMessages.length).toBe(2);
345
-
346
- const remembered = await memory.query({
347
- threadId: thread.id,
348
- resourceId,
349
- selectBy: {
350
- last: 20,
351
- },
352
- });
353
- expect(remembered.messages.filter(m => m.role === 'user').length).toBe(2);
354
- expect(remembered.messages.length).toBe(4); // 2 user, 2 assistant. These wont be filtered because they come from memory.query() directly
355
- });
356
-
357
- it('should apply processors with a real Mastra agent', async () => {
358
- // Create a thread
359
- const thread = await memory.createThread({
360
- title: 'Real Agent Processor Test Thread',
361
- resourceId,
362
- });
363
-
364
- const threadId = thread.id;
365
-
366
- // Create test tools
367
- const weatherTool = createTool({
368
- id: 'get_weather',
369
- description: 'Get the weather for a given location',
370
- inputSchema: z.object({
371
- location: z.string().describe('The location to get the weather for'),
372
- }),
373
- execute: async ({ context: { location } }) => {
374
- return `The weather in ${location} is sunny. It is currently 70 degrees and feels like 65 degrees.`;
375
- },
376
- });
377
-
378
- const calculatorTool = createTool({
379
- id: 'calculator',
380
- description: 'Perform a simple calculation',
381
- inputSchema: z.object({
382
- expression: z.string().describe('The mathematical expression to calculate'),
383
- }),
384
- execute: async ({ context: { expression } }) => {
385
- return `The result of ${expression} is ${eval(expression)}`;
386
- },
387
- });
388
-
389
- const instructions =
390
- 'You are a helpful assistant with access to weather and calculator tools. Use them when appropriate.';
391
- // Create agent with memory and tools
392
- const agent = new Agent({
393
- name: 'processor-test-agent',
394
- instructions,
395
- model: openai('gpt-4o'),
396
- memory,
397
- tools: {
398
- get_weather: weatherTool,
399
- calculator: calculatorTool,
400
- },
401
- });
402
-
403
- // First message - use weather tool
404
- await agent.generateVNext('What is the weather in Seattle?', {
405
- threadId,
406
- resourceId,
407
- });
408
- // Second message - use calculator tool
409
- await agent.generateVNext('Calculate 123 * 456', {
410
- threadId,
411
- resourceId,
412
- });
413
- // Third message - simple text response
414
- await agent.generateVNext('Tell me something interesting about space', {
415
- threadId,
416
- resourceId,
417
- });
418
-
419
- // Query with no processors to verify baseline message count
420
- const queryResult = await memory.query({
421
- threadId,
422
- selectBy: { last: 20 },
423
- });
424
-
425
- const list = new MessageList({ threadId }).add(queryResult.messagesV2, 'memory');
426
-
427
- const baselineResult = await memory.processMessages({
428
- messages: list.get.remembered.core(),
429
- newMessages: list.get.input.core(),
430
- processors: [],
431
- });
432
-
433
- // There should be at least 6 messages (3 user + 3 assistant responses)
434
- expect(baselineResult.length).toBeGreaterThanOrEqual(6);
435
-
436
- // Verify we have tool calls in the baseline
437
- const weatherToolCalls = filterToolCallsByName(baselineResult, 'get_weather');
438
- const calculatorToolCalls = filterToolCallsByName(baselineResult, 'calculator');
439
- expect(weatherToolCalls.length).toBeGreaterThan(0);
440
- expect(calculatorToolCalls.length).toBeGreaterThan(0);
441
-
442
- // Test filtering weather tool calls
443
- const weatherQueryResult = await memory.query({
444
- threadId,
445
- selectBy: { last: 20 },
446
- });
447
- const list2 = new MessageList({ threadId }).add(weatherQueryResult.messagesV2, 'memory');
448
- const weatherFilteredResult = await memory.processMessages({
449
- messages: list2.get.all.core(),
450
- processors: [new ToolCallFilter({ exclude: ['get_weather'] })],
451
- });
452
-
453
- // Should have fewer messages after filtering
454
- expect(weatherFilteredResult.length).toBeLessThan(baselineResult.length);
455
-
456
- // No weather tool calls should remain
457
- expect(filterToolCallsByName(weatherFilteredResult, 'get_weather').length).toBe(0);
458
- expect(filterToolResultsByName(weatherFilteredResult, 'get_weather').length).toBe(0);
459
-
460
- // Calculator tool calls should still be present
461
- expect(filterToolCallsByName(weatherFilteredResult, 'calculator').length).toBeGreaterThan(0);
462
-
463
- // Test token limiting
464
- const tokenLimitQuery = await memory.query({
465
- threadId,
466
- selectBy: { last: 20 },
467
- });
468
- const list3 = new MessageList({ threadId }).add(tokenLimitQuery.messages, 'memory');
469
- const tokenLimitedResult = await memory.processMessages({
470
- messages: list3.get.all.core(),
471
- processors: [new TokenLimiter(100)], // Small limit to only get a subset
472
- });
473
-
474
- // Should have fewer messages after token limiting
475
- expect(tokenLimitedResult.length).toBeLessThan(baselineResult.length);
476
-
477
- // Test combining processors
478
- const combinedQuery = await memory.query({
479
- threadId,
480
- selectBy: { last: 20 },
481
- });
482
- const list4 = new MessageList({ threadId }).add(combinedQuery.messages, 'memory');
483
- const combinedResult = await memory.processMessages({
484
- messages: list4.get.all.core(),
485
- processors: [new ToolCallFilter({ exclude: ['get_weather', 'calculator'] }), new TokenLimiter(500)],
486
- });
487
-
488
- // No tool calls should remain
489
- expect(filterToolCallsByName(combinedResult, 'get_weather').length).toBe(0);
490
- expect(filterToolCallsByName(combinedResult, 'calculator').length).toBe(0);
491
- expect(filterToolResultsByName(combinedResult, 'get_weather').length).toBe(0);
492
- expect(filterToolResultsByName(combinedResult, 'calculator').length).toBe(0);
493
-
494
- // The result should still contain some messages
495
- expect(combinedResult.length).toBeGreaterThan(0);
496
- });
497
-
498
- it('should chunk long text by character count', async () => {
499
- // Create a thread
500
- const thread = await memory.createThread({
501
- title: 'Text Chunking Test Thread',
502
- resourceId,
503
- });
504
-
505
- // Create a long text with known word boundaries
506
- const words = [];
507
- for (let i = 0; i < 1000; i++) {
508
- words.push(`word${i}`);
509
- }
510
- const longText = words.join(' ');
511
-
512
- // Save a message with the long text
513
- await memory.saveMessages({
514
- messages: [
515
- {
516
- id: 'chunking-test',
517
- threadId: thread.id,
518
- role: 'user',
519
- content: {
520
- format: 2,
521
- parts: [{ type: 'text', text: longText }],
522
- },
523
- createdAt: new Date(),
524
- resourceId,
525
- },
526
- ],
527
- });
528
-
529
- // Query the message back
530
- const queryResult = await memory.query({
531
- threadId: thread.id,
532
- selectBy: { last: 1 },
533
- });
534
-
535
- // Retrieve the message (no TokenLimiter, just get the message back)
536
- const result = await memory.processMessages({
537
- messages: v2ToCoreMessages(queryResult.uiMessages),
538
- });
539
-
540
- // Should have retrieved the message
541
- expect(result.length).toBe(1);
542
-
543
- // Each chunk should respect word boundaries
544
- for (const msg of result) {
545
- // No words should be cut off
546
- const content = typeof msg.content === 'string' ? msg.content : (msg.content[0] as { text: string }).text;
547
- const words = content.split(/\s+/);
548
- for (const word of words) {
549
- expect(word).toMatch(/^word\d+$/); // Each word should be complete
550
- }
551
- }
552
-
553
- // Chunks should maintain original order
554
- let prevNum = -1;
555
- for (const msg of result) {
556
- const content = typeof msg.content === 'string' ? msg.content : (msg.content[0] as { text: string }).text;
557
- const firstWord = content.split(/\s+/)[0];
558
- const num = parseInt(firstWord.replace('word', ''));
559
- expect(num).toBeGreaterThan(prevNum);
560
- prevNum = num;
561
- }
562
- });
563
- });
564
-
565
- // Direct unit test for chunkText
566
-
567
- describe('Memory.chunkText', () => {
568
- it('should split long text into chunks at word boundaries', () => {
569
- const memory = new Memory({
570
- storage,
571
- vector,
572
- embedder: fastembed,
573
- options: {
574
- semanticRecall: true,
575
- lastMessages: 10,
576
- },
577
- });
578
- const words = [];
579
- for (let i = 0; i < 1000; i++) {
580
- words.push(`word${i}`);
581
- }
582
- const longText = words.join(' ');
583
- // Use a small token size to force chunking
584
- const chunks = (memory as any).chunkText(longText, 50);
585
- expect(chunks.length).toBeGreaterThan(1);
586
- // Each chunk should respect word boundaries
587
- for (const chunk of chunks) {
588
- const chunkWords = chunk.split(/\s+/);
589
- for (const word of chunkWords) {
590
- if (word.length === 0) continue;
591
- expect(word).toMatch(/^word\d+$/);
592
- }
593
- }
594
- // Chunks should maintain original order
595
- let prevNum = -1;
596
- for (const chunk of chunks) {
597
- const firstWord = chunk.split(/\s+/)[0];
598
- if (!firstWord) continue; // skip empty
599
- const num = parseInt(firstWord.replace('word', ''));
600
- expect(num).toBeGreaterThan(prevNum);
601
- prevNum = num;
602
- }
603
- });
604
- });