memories-lite 0.9.1 → 0.9.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -202,6 +202,9 @@ export class MemoriesLite {
202
202
  const newMessageEmbeddings: Record<string, number[]> = {};
203
203
  const retrievedOldMemory: Array<{ id: string; text: string; type: string }> = [];
204
204
 
205
+ //
206
+ // add the userId to the filters
207
+ filters.userId = userId;
205
208
  // Create embeddings and search for similar memories
206
209
  for (const elem of facts) {
207
210
  const fact = elem.fact;
@@ -250,7 +253,7 @@ export class MemoriesLite {
250
253
  console.log(`-- ⛔ LLM Error: ${action.event}, ${action.type}, "${action.text}"`);
251
254
  continue;
252
255
  }
253
- console.log(`-- DBG memory action: ${action.event}, ${action.type}, "${action.text}", why: "${action.reason}"`);
256
+ console.log(`-- DBG memory "${userId}": ${action.event}, ${action.type}, "${action.text}", why: "${action.reason}"`);
254
257
  try {
255
258
  switch (action.event) {
256
259
  case "ADD": {
@@ -276,7 +279,7 @@ export class MemoriesLite {
276
279
  }
277
280
  case "UPDATE": {
278
281
  const realMemoryId = tempUuidMapping[action.id];
279
- const type = uniqueOldMemories[action.id].type as MemoryType;
282
+ const type = metadata.type = uniqueOldMemories[action.id].type || action.type;
280
283
  await this.updateMemory(
281
284
  realMemoryId,
282
285
  action.text,
@@ -455,7 +458,7 @@ export class MemoriesLite {
455
458
  }
456
459
 
457
460
  const vectorStore = await this.getVectorStore(userId);
458
-
461
+ filters.userId = userId;
459
462
 
460
463
  // Search vector store
461
464
  const queryEmbedding = await this.embedder.embed(query);
@@ -597,6 +600,7 @@ export class MemoriesLite {
597
600
  if (agentId) filters.agentId = agentId;
598
601
  if (runId) filters.runId = runId;
599
602
  if (type) filters.type = type;
603
+ filters.userId = userId;
600
604
  const [memories] = await vectorStore.list(filters, limit);
601
605
 
602
606
  const excludedKeys = new Set([
@@ -640,6 +644,7 @@ export class MemoriesLite {
640
644
  ...metadata,
641
645
  data,
642
646
  hash: createHash("md5").update(data).digest("hex"),
647
+ userId,
643
648
  createdAt: new Date().toISOString(),
644
649
  };
645
650
 
@@ -676,6 +681,7 @@ export class MemoriesLite {
676
681
  ...metadata,
677
682
  data,
678
683
  hash: createHash("md5").update(data).digest("hex"),
684
+ type: existingMemory.payload.type,
679
685
  createdAt: existingMemory.payload.createdAt,
680
686
  updatedAt: new Date().toISOString(),
681
687
  ...(existingMemory.payload.agentId && {
@@ -153,15 +153,15 @@ You must strictly extract {Subject, Predicate, Object} triplets by following the
153
153
  - Extract triplets that describe facts *about the user* based on their statements, covering areas like preferences, beliefs, actions, experiences, learning, identity, work, or relationships (e.g., "I love working").
154
154
  - Apply explicit, precise, and unambiguous predicates (e.g., "owns", "is located at", "is a", "has function", "causes", etc.).
155
155
  - Determine the triplet type (e.g., "factual", "episodic", "procedural", "semantic") based on the content and meaning.
156
- - "episodic" for time-based events (e.g., "I went to the park yesterday").
156
+ - "episodic" If a fact depends on a temporal, situational, or immediate personal context, then that fact AND ALL OF ITS sub-facts MUST be classified as episodic.
157
157
  - "procedural" for business processes (e.g., "Looking for customer John Doe address", "How to create a new contract").
158
158
  - "factual" for stable user data (except procedural that prevails).
159
159
 
160
160
  - Eliminate introductions, sub-facts, detailed repetitive elements, stylistic fillers, or vague statements. General facts always takes precedence over multiple sub-facts (signal vs noise).
161
161
  - The query intention can include specific preferences about how the Assistant should respond (e.g., "answer concisely", "explain in detail").
162
- - Compress each fact and reason (less than 12 words).
162
+ - Compress each OUTPUT (fact and reason) with less than 10 words.
163
163
  - DO NOT infer personal facts from third-party informations.
164
- - Treat "Assistant Answer:" as external responses from the Assistant to enrich your reasoning process about the user.
164
+ - Treat "**ASSISTANT**:" as responses to enrich context of your reasoning process about the USER query.
165
165
  2. Use pronoun "I" instead of "The user" in the subject of the triplet.
166
166
  3. Do not output any facts already present in section # PRE-EXISTING FACTS.
167
167
  - If you find facts already present in section # PRE-EXISTING FACTS, use field "existing" to store them.
@@ -5,6 +5,7 @@ import sqlite3 from 'sqlite3';
5
5
  import { VectorStore } from "./base";
6
6
  import { SearchFilters, VectorStoreConfig, VectorStoreResult, MemoryPayload, MemoryScoringConfig, MemoryType } from "../types";
7
7
  import { createHash } from 'crypto';
8
+ import { existsSync } from 'fs';
8
9
 
9
10
 
10
11
  // Define interface for database rows
@@ -25,6 +26,7 @@ interface MemoryVector {
25
26
  */
26
27
  export class LiteVectorStore implements VectorStore {
27
28
  private db: sqlite3.Database;
29
+ private dbPath: string;
28
30
  private isSecure: boolean;
29
31
  private dimension: number;
30
32
  private currentUserId: string;
@@ -39,20 +41,21 @@ export class LiteVectorStore implements VectorStore {
39
41
  this.currentUserId = currentUserId;
40
42
  this.isSecure = config.secure || false;
41
43
  this.scoringConfig = config.scoring;
42
- this.cleanupThreshold = config.recencyCleanupThreshold; // Store threshold
44
+ this.cleanupThreshold = config.recencyCleanupThreshold || 0.25; // (default 0.25 means 2 times the half-life )
43
45
  config.rootPath = config.rootPath || process.cwd();
44
46
  const filename = this.isSecure ? `memories-lite-${currentUserId}.db` : `memories-lite-global.db`;
45
- const dbPath = (config.rootPath == ':memory:') ? ':memory:' : path.join(config.rootPath, filename);
47
+ this.dbPath = (config.rootPath == ':memory:') ? ':memory:' : path.join(config.rootPath, filename);
46
48
 
47
49
  // Add error handling callback for the database connection
48
- this.db = new sqlite3.Database(dbPath);
50
+ this.db = new sqlite3.Database(this.dbPath);
49
51
  }
50
52
 
51
53
 
52
54
  private async init() {
53
- await this.run(`
54
- CREATE TABLE IF NOT EXISTS vectors (
55
- id TEXT PRIMARY KEY,
55
+ try{
56
+ await this.run(`
57
+ CREATE TABLE IF NOT EXISTS vectors (
58
+ id TEXT PRIMARY KEY,
56
59
  vector BLOB NOT NULL,
57
60
  payload TEXT NOT NULL
58
61
  )
@@ -64,6 +67,9 @@ export class LiteVectorStore implements VectorStore {
64
67
  user_id TEXT NOT NULL UNIQUE
65
68
  )
66
69
  `);
70
+ }catch(err){
71
+ console.log("-- DBG init error:",err);
72
+ }
67
73
  }
68
74
 
69
75
  private async run(sql: string, params: any[] = []): Promise<void> {
@@ -151,9 +157,12 @@ export class LiteVectorStore implements VectorStore {
151
157
  if (cachedStore) {
152
158
  Object.setPrototypeOf(cachedStore, LiteVectorStore.prototype);
153
159
  cachedStore.currentUserId = hashedUserId;
154
- // Ensure scoring config and threshold are updated if config object changed
155
- cachedStore.scoringConfig = config.scoring;
156
- cachedStore.cleanupThreshold = config.recencyCleanupThreshold;
160
+
161
+ //
162
+ // if the database file does not exist, we need to reinitialize the store
163
+ if (cachedStore.dbPath!==':memory:' && !existsSync(cachedStore.dbPath)) {
164
+ return new LiteVectorStore(config, hashedUserId);
165
+ }
157
166
  return cachedStore;
158
167
  }
159
168
 
@@ -176,13 +185,20 @@ export class LiteVectorStore implements VectorStore {
176
185
  `Vector dimension mismatch. Expected ${this.dimension}, got ${vectors[i].length}`,
177
186
  );
178
187
  }
188
+
189
+ const payload = {...payloads[i]};
190
+ //
191
+ // case of global store (insecure)
192
+ if(!payload.userId){
193
+ throw new Error("userId is required in payload");
194
+ }
179
195
  //
180
196
  // remove the userId from the payload as sensitive data
181
- this.isSecure && delete payloads[i].userId;
197
+ this.isSecure && delete payload.userId;
182
198
  const vectorBuffer = Buffer.from(new Float32Array(vectors[i]).buffer);
183
199
  await this.run(
184
200
  `INSERT OR REPLACE INTO vectors (id, vector, payload) VALUES (?, ?, ?)`,
185
- [ids[i], vectorBuffer, JSON.stringify(payloads[i])],
201
+ [ids[i], vectorBuffer, JSON.stringify(payload)],
186
202
  );
187
203
  }
188
204
 
@@ -199,6 +215,15 @@ export class LiteVectorStore implements VectorStore {
199
215
  );
200
216
  }
201
217
 
218
+
219
+
220
+ if(!filters || !filters.userId){
221
+ throw new Error("userId is mandatory in search");
222
+ }
223
+ filters = {...filters};
224
+ this.isSecure && delete filters.userId;
225
+
226
+
202
227
  const results: VectorStoreResult[] = [];
203
228
  const rows = await this.all(`SELECT * FROM vectors`);
204
229
 
@@ -290,18 +315,23 @@ export class LiteVectorStore implements VectorStore {
290
315
  const rows = await this.all(`SELECT * FROM vectors`);
291
316
  const results: VectorStoreResult[] = [];
292
317
 
318
+ //
319
+ // remove the userId from the payload as sensitive data
320
+ filters = {...filters};
321
+ this.isSecure && delete filters?.userId;
322
+
293
323
  for (const row of rows) {
294
324
  const memoryVector: MemoryVector = {
295
325
  id: row.id,
296
326
  vector: Array.from(new Float32Array(row.vector.buffer)),
297
- payload: {},
327
+ payload: JSON.parse(row.payload),
298
328
  };
299
329
 
300
330
  if (this.filterVector(memoryVector, filters)) {
301
331
  // load payload at the end
302
332
  results.push({
303
333
  id: memoryVector.id,
304
- payload: JSON.parse(row.payload),
334
+ payload:memoryVector.payload,
305
335
  });
306
336
  }
307
337
  }
@@ -356,7 +386,15 @@ export class LiteVectorStore implements VectorStore {
356
386
  return Math.max(0, hybridScore);
357
387
  }
358
388
 
359
- // Internal method to clean up vectors based on recency score threshold
389
+ /**
390
+ * Internal method to clean up vectors based on recency score threshold.
391
+ *
392
+ * @param threshold - The minimum recency score required for a memory to be retained.
393
+ * - Recency score is calculated using exponential decay: 1.0 means brand new, 0.5 means at half-life, 0.0 means fully decayed.
394
+ * - Memories with a recency score below this threshold will be deleted (unless their half-life is infinite or zero).
395
+ * - For example, a threshold of 0.25 will remove all memories whose recency score has decayed 2 times the half-life.
396
+ * - Use a lower threshold to keep more old memories, or a higher threshold to keep only fresher ones.
397
+ */
360
398
  private async _cleanupByRecency(threshold: number): Promise<number> {
361
399
  const rows = await this.all(`SELECT id, payload FROM vectors`);
362
400
  let deletedCount = 0;
@@ -0,0 +1,40 @@
1
+ /// <reference types="jest" />
2
+ import { MemoriesLite } from "../src";
3
+ import dotenv from "dotenv";
4
+
5
+ dotenv.config();
6
+
7
+ /**
8
+ * Helper to initialize MemoriesLite instance and generate a random userId.
9
+ * @param customPrompt Optional prompt to inject into the memory config.
10
+ */
11
+ export function createTestMemory({customPrompt, dimension, rootPath, secure}:any) {
12
+ dimension = dimension || 768;
13
+ const userId =
14
+ Math.random().toString(36).substring(2, 15) +
15
+ Math.random().toString(36).substring(2, 15);
16
+
17
+ const memory = new MemoriesLite({
18
+ version: "v1.1",
19
+ disableHistory: true,
20
+ ...(customPrompt ? { customPrompt } : {}),
21
+ embedder: {
22
+ provider: "openai",
23
+ config: { dimension, apiKey: process.env.OPENAI_API_KEY!, model: "text-embedding-3-small" }
24
+ },
25
+ vectorStore: {
26
+ provider: "lite",
27
+ config: {
28
+ dimension,
29
+ rootPath: (rootPath || ":memory:"),
30
+ secure: secure || false }
31
+ },
32
+ llm: {
33
+ provider: "openai",
34
+ config: { apiKey: process.env.OPENAI_API_KEY || "", model: "gpt-4.1-mini" }
35
+ },
36
+ historyDbPath: ":memory:"
37
+ });
38
+
39
+ return { memory, userId };
40
+ }
@@ -2,6 +2,7 @@
2
2
  import { MemoriesLite } from "../src";
3
3
  import { MemoryItem, SearchResult } from "../src/types";
4
4
  import dotenv from "dotenv";
5
+ import { createTestMemory } from "./init.mem";
5
6
 
6
7
  dotenv.config();
7
8
 
@@ -9,42 +10,11 @@ jest.setTimeout(30000); // Increase timeout to 30 seconds
9
10
 
10
11
  describe("Memory Class facts regression tests", () => {
11
12
  let memory: MemoriesLite;
12
- const userId =
13
- Math.random().toString(36).substring(2, 15) +
14
- Math.random().toString(36).substring(2, 15);
15
-
16
- const dimension = 768;
13
+ let userId: string;
17
14
 
18
15
  beforeEach(async () => {
19
- // Initialize with default configuration
20
- memory = new MemoriesLite({
21
- version: "v1.1",
22
- disableHistory: true,
23
- customPrompt: "L'utilisateur travail pour une régie immobilière!",
24
- embedder: {
25
- provider: "openai",
26
- config: {
27
- dimension,
28
- apiKey: process.env.OPENAI_API_KEY || "",
29
- model: "text-embedding-3-small",
30
- },
31
- },
32
- vectorStore: {
33
- provider: "lite",
34
- config: {
35
- dimension,
36
- rootPath: ":memory:",
37
- },
38
- },
39
- llm: {
40
- provider: "openai",
41
- config: {
42
- apiKey: process.env.OPENAI_API_KEY || "",
43
- model: "gpt-4.1-mini",
44
- },
45
- },
46
- historyDbPath: ":memory:", // Use in-memory SQLite for tests
47
- });
16
+ // Initialize memory via helper
17
+ ({ memory, userId } = createTestMemory({customPrompt:"L'utilisateur travail pour une régie immobilière!"}));
48
18
  // Reset all memories before each test
49
19
  await memory.reset(userId);
50
20
  });
@@ -56,48 +26,12 @@ describe("Memory Class facts regression tests", () => {
56
26
 
57
27
  describe("Edge cases for Facts", () => {
58
28
 
59
- it("should not extract personal information as facts from business queries", async () => {
60
- // type?: "factual" | "episodic" | "semantic"|"procedural" | "assistant_preference";
61
- // Capture a query that contains a name but is asking for contact information
62
- const result = (await memory.capture(
63
- "je cherche le téléphone de mon client Alphonse MAGLOIRE",
64
- userId,
65
- {},
66
- )) as SearchResult;
67
-
68
- // Verify no memory was created (business query)
69
- expect(result).toBeDefined();
70
- expect(result.results).toBeDefined();
71
- expect(Array.isArray(result.results)).toBe(true);
72
- expect(result.results.length).toBe(1);
73
- const type = result.results[0]?.type;
74
- expect(["procedural","episodic"].includes(type)).toBe(true);
75
- // Now search for memories that might contain "Alphonse MAGLOIRE"
76
- // const searchResult = (await memory.retrieve(
77
- // "Qui est Alphonse MAGLOIRE?",
78
- // userId,
79
- // {},
80
- // )) as SearchResult;
81
-
82
- // // Verify no personal fact like "Je m'appelle Alphonse MAGLOIRE" was created
83
- // expect(searchResult).toBeDefined();
84
- // expect(searchResult.results).toBeDefined();
85
- // expect(Array.isArray(searchResult.results)).toBe(true);
86
- // expect(searchResult.results.length).toBe(0);
87
-
88
- // // Ensure no memory contains the name as a personal fact
89
- // const allMemories = await memory.getAll(userId, {});
90
- // const personalFacts = allMemories.results.filter(mem =>
91
- // mem.memory.toLowerCase().includes("Alphonse MAGLOIRE")
92
- // );
93
- // expect(personalFacts.length).toBe(0);
94
- });
95
29
 
96
- it("should add a single procedural memory", async () => {
30
+ it("should not add memory: Qui suis-je ?", async () => {
97
31
  const customFacts = "Je suis Olivier Poulain\nIT et je travaille chez Immeuble SA";
98
32
  const result = (await memory.capture([
99
33
  {role:"user", content:"Qui suis-je ?"},
100
- {role:"assistant", content:"Vous êtes Olivier Poulain, Chef de Projets au département IT & Gestion de projet, dans l'équipe IT de Immeuble SA. Ces informations proviennent de votre profil collaborateur enregistré dans le système interne de Immeuble SA"}],
34
+ {role:"assistant", content:"Vous êtes Olivier Poulain, Chef de Projets au département IT & Gestion de projet, dans l'équipe IT de Immeuble SA"}],
101
35
  userId,
102
36
  {customFacts},
103
37
  )) as SearchResult;
@@ -106,18 +40,20 @@ describe("Memory Class facts regression tests", () => {
106
40
  expect(result.results.length).toBe(0);
107
41
  // expect(result.results[0]?.type).toBe("factual");
108
42
  });
109
- it("episodic memory: Je veux manger des sushis pour ma pause de midi.", async () => {
43
+ it("episodic: Je veux manger des sushis pour ma pause de midi.", async () => {
110
44
  const customFacts = "Je suis Olivier Poulain\nIT et je travaille chez Immeuble SA";
111
45
  const result = (await memory.capture([
112
- {role:"user", content:"Je veux manger des sushis pour ma pause de midi."}],
46
+ {role:"user", content:"J'ai faim, je veux manger des sushis pour ma pause de midi."},
47
+ {role:"user", content:"Cherche un restaurant de sushis près de chez moi."}],
113
48
  userId,
114
49
  {customFacts},
115
50
  )) as SearchResult;
116
51
 
117
52
  expect(result).toBeDefined();
118
53
  expect(result.results).toBeDefined();
119
- expect(result.results.length).toBeGreaterThan(0);
54
+ expect(result.results.length).toBeGreaterThan(1);
120
55
  expect(result.results[0]?.type).toBe("episodic");
56
+ expect(result.results[1]?.type).toBe("episodic");
121
57
  });
122
58
 
123
59
 
@@ -136,6 +72,24 @@ describe("Memory Class facts regression tests", () => {
136
72
  expect(result.results[0]?.type).toBe("assistant_preference");
137
73
  });
138
74
 
75
+ it("business:je cherche le téléphone de mon client Alphonse MAGLOIRE", async () => {
76
+ // type?: "factual" | "episodic" | "semantic"|"procedural" | "assistant_preference";
77
+ // Capture a query that contains a name but is asking for contact information
78
+ const result = (await memory.capture(
79
+ "je cherche le téléphone de mon client Alphonse MAGLOIRE",
80
+ userId,
81
+ {},
82
+ )) as SearchResult;
83
+
84
+ // Verify no memory was created (business query)
85
+ expect(result).toBeDefined();
86
+ expect(result.results).toBeDefined();
87
+ expect(Array.isArray(result.results)).toBe(true);
88
+ expect(result.results.length).toBe(1);
89
+ const type = result.results[0]?.type;
90
+ expect(["procedural","episodic"].includes(type)).toBe(true);
91
+ });
92
+
139
93
  it("business:Le logement de Alphonse MAGLOIRE au 5ème étage est de combien pièces", async () => {
140
94
  const result = (await memory.capture([
141
95
  {role:"user", content:"Le logement de Alphonse MAGLOIRE au 5ème étage est de combien pièces.",},
@@ -2,6 +2,7 @@
2
2
  import { MemoriesLite } from "../src";
3
3
  import { MemoryItem, SearchResult } from "../src/types";
4
4
  import dotenv from "dotenv";
5
+ import { createTestMemory } from "./init.mem";
5
6
 
6
7
  dotenv.config();
7
8
 
@@ -9,41 +10,11 @@ jest.setTimeout(30000); // Increase timeout to 30 seconds
9
10
 
10
11
  describe("Memory Class", () => {
11
12
  let memory: MemoriesLite;
12
- const userId =
13
- Math.random().toString(36).substring(2, 15) +
14
- Math.random().toString(36).substring(2, 15);
15
-
16
- const dimension = 768;
13
+ let userId: string;
17
14
 
18
15
  beforeEach(async () => {
19
- // Initialize with default configuration
20
- memory = new MemoriesLite({
21
- version: "v1.1",
22
- disableHistory: true,
23
- embedder: {
24
- provider: "openai",
25
- config: {
26
- dimension,
27
- apiKey: process.env.OPENAI_API_KEY || "",
28
- model: "text-embedding-3-small",
29
- },
30
- },
31
- vectorStore: {
32
- provider: "lite",
33
- config: {
34
- dimension,
35
- rootPath: ":memory:",
36
- },
37
- },
38
- llm: {
39
- provider: "openai",
40
- config: {
41
- apiKey: process.env.OPENAI_API_KEY || "",
42
- model: "gpt-4.1-mini",
43
- },
44
- },
45
- historyDbPath: ":memory:", // Use in-memory SQLite for tests
46
- });
16
+ // Initialize memory via helper
17
+ ({ memory, userId } = createTestMemory({}));
47
18
  // Reset all memories before each test
48
19
  await memory.reset(userId);
49
20
  });
@@ -101,7 +72,7 @@ describe("Memory Class", () => {
101
72
  expect(typeof result.memory).toBe("string");
102
73
  });
103
74
 
104
- it("should update a memory", async () => {
75
+ it("should directly update a memory content", async () => {
105
76
  // First add a memory
106
77
  const addResult = (await memory.capture(
107
78
  "I love speaking foreign languages especially Spanish",
@@ -127,23 +98,19 @@ describe("Memory Class", () => {
127
98
  it("should update a memory", async () => {
128
99
  // First add a memory
129
100
  const init = await memory.capture("I love to drink red wine", userId, {});
130
- // expect(init.results?.[0]?.id).toBeDefined();
131
- const initId = init.results?.[0]?.id;
101
+ expect(init.results?.[0]?.id).toBeDefined();
132
102
  const addResult = (await memory.capture("I love to drink red wine with friends", userId, {})) as SearchResult;
133
103
  expect(addResult.results?.[0]?.id).toBeDefined();
134
104
 
135
- const memoryId = addResult.results[0].id;
136
- expect(memoryId).toBe(initId);
137
- // Delete the memory
138
- await memory.delete(memoryId, userId);
105
+ const updated = (await memory.get(addResult.results[0].id, userId))!;
139
106
 
140
- // Try to get the deleted memory - should throw or return null
141
- const result = await memory.get(memoryId, userId);
142
- expect(result).toBeNull();
107
+ expect(updated).toBeDefined();
108
+ expect(updated.id).toBe(init.results[0].id);
109
+ expect(updated.type).toBe(init.results[0].type);
143
110
  });
144
111
 
145
112
 
146
- it("should get all memories for distinct users", async () => {
113
+ it.only("should get all memories for distinct users", async () => {
147
114
  // Add a few memories
148
115
  await memory.capture("I love visiting new places in the winters", userId, {});
149
116
  await memory.capture("I like to rule the world", userId, {});
@@ -350,42 +317,17 @@ describe("Memory Class", () => {
350
317
 
351
318
  describe("Memory with Custom Configuration", () => {
352
319
  let customMemory: MemoriesLite;
353
-
354
- beforeEach(() => {
355
- customMemory = new MemoriesLite({
356
- version: "v1.1",
357
- disableHistory: true,
358
- embedder: {
359
- provider: "openai",
360
- config: {
361
- dimension,
362
- apiKey: process.env.OPENAI_API_KEY || "",
363
- model: "text-embedding-3-small",
364
- },
365
- },
366
- vectorStore: {
367
- provider: "lite",
368
- config: {
369
- dimension,
370
- rootPath: ":memory:",
371
- },
372
- },
373
- llm: {
374
- provider: "openai",
375
- config: {
376
- apiKey: process.env.OPENAI_API_KEY || "",
377
- model: "gpt-4.1-mini",
378
- },
379
- },
380
- historyDbPath: ":memory:", // Use in-memory SQLite for tests
381
- });
320
+ beforeEach(async () => {
321
+ // Initialize customMemory via helper
322
+ ({ memory: customMemory, userId } = createTestMemory({}));
323
+ // Reset before custom tests
324
+ await customMemory.reset(userId);
382
325
  });
383
326
 
384
327
  afterEach(async () => {
385
328
  await customMemory.reset(userId);
386
329
  });
387
330
 
388
-
389
331
  it("should perform semantic search with custom embeddings", async () => {
390
332
  // Add test memories
391
333
  await customMemory.capture("The weather in London is rainy today", userId, {});