memories-lite 0.9.1 → 0.9.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -153,15 +153,15 @@ You must strictly extract {Subject, Predicate, Object} triplets by following the
153
153
  - Extract triplets that describe facts *about the user* based on their statements, covering areas like preferences, beliefs, actions, experiences, learning, identity, work, or relationships (e.g., "I love working").
154
154
  - Apply explicit, precise, and unambiguous predicates (e.g., "owns", "is located at", "is a", "has function", "causes", etc.).
155
155
  - Determine the triplet type (e.g., "factual", "episodic", "procedural", "semantic") based on the content and meaning.
156
- - "episodic" for time-based events (e.g., "I went to the park yesterday").
156
+ - "episodic" If a fact depends on a temporal, situational, or immediate personal context, then that fact AND ALL OF ITS sub-facts MUST be classified as episodic.
157
157
  - "procedural" for business processes (e.g., "Looking for customer John Doe address", "How to create a new contract").
158
158
  - "factual" for stable user data (except procedural that prevails).
159
159
 
160
160
  - Eliminate introductions, sub-facts, detailed repetitive elements, stylistic fillers, or vague statements. General facts always takes precedence over multiple sub-facts (signal vs noise).
161
161
  - The query intention can include specific preferences about how the Assistant should respond (e.g., "answer concisely", "explain in detail").
162
- - Compress each fact and reason (less than 12 words).
162
+ - Compress each OUTPUT (fact and reason) with less than 10 words.
163
163
  - DO NOT infer personal facts from third-party informations.
164
- - Treat "Assistant Answer:" as external responses from the Assistant to enrich your reasoning process about the user.
164
+ - Treat "**ASSISTANT**:" as responses to enrich context of your reasoning process about the USER query.
165
165
  2. Use pronoun "I" instead of "The user" in the subject of the triplet.
166
166
  3. Do not output any facts already present in section # PRE-EXISTING FACTS.
167
167
  - If you find facts already present in section # PRE-EXISTING FACTS, use field "existing" to store them.
@@ -5,6 +5,7 @@ import sqlite3 from 'sqlite3';
5
5
  import { VectorStore } from "./base";
6
6
  import { SearchFilters, VectorStoreConfig, VectorStoreResult, MemoryPayload, MemoryScoringConfig, MemoryType } from "../types";
7
7
  import { createHash } from 'crypto';
8
+ import { existsSync } from 'fs';
8
9
 
9
10
 
10
11
  // Define interface for database rows
@@ -25,6 +26,7 @@ interface MemoryVector {
25
26
  */
26
27
  export class LiteVectorStore implements VectorStore {
27
28
  private db: sqlite3.Database;
29
+ private dbPath: string;
28
30
  private isSecure: boolean;
29
31
  private dimension: number;
30
32
  private currentUserId: string;
@@ -39,20 +41,21 @@ export class LiteVectorStore implements VectorStore {
39
41
  this.currentUserId = currentUserId;
40
42
  this.isSecure = config.secure || false;
41
43
  this.scoringConfig = config.scoring;
42
- this.cleanupThreshold = config.recencyCleanupThreshold; // Store threshold
44
+ this.cleanupThreshold = config.recencyCleanupThreshold || 0.25; // (default 0.25 means 2 times the half-life )
43
45
  config.rootPath = config.rootPath || process.cwd();
44
46
  const filename = this.isSecure ? `memories-lite-${currentUserId}.db` : `memories-lite-global.db`;
45
- const dbPath = (config.rootPath == ':memory:') ? ':memory:' : path.join(config.rootPath, filename);
47
+ this.dbPath = (config.rootPath == ':memory:') ? ':memory:' : path.join(config.rootPath, filename);
46
48
 
47
49
  // Add error handling callback for the database connection
48
- this.db = new sqlite3.Database(dbPath);
50
+ this.db = new sqlite3.Database(this.dbPath);
49
51
  }
50
52
 
51
53
 
52
54
  private async init() {
53
- await this.run(`
54
- CREATE TABLE IF NOT EXISTS vectors (
55
- id TEXT PRIMARY KEY,
55
+ try{
56
+ await this.run(`
57
+ CREATE TABLE IF NOT EXISTS vectors (
58
+ id TEXT PRIMARY KEY,
56
59
  vector BLOB NOT NULL,
57
60
  payload TEXT NOT NULL
58
61
  )
@@ -64,6 +67,9 @@ export class LiteVectorStore implements VectorStore {
64
67
  user_id TEXT NOT NULL UNIQUE
65
68
  )
66
69
  `);
70
+ }catch(err){
71
+ console.log("-- DBG init error:",err);
72
+ }
67
73
  }
68
74
 
69
75
  private async run(sql: string, params: any[] = []): Promise<void> {
@@ -151,9 +157,12 @@ export class LiteVectorStore implements VectorStore {
151
157
  if (cachedStore) {
152
158
  Object.setPrototypeOf(cachedStore, LiteVectorStore.prototype);
153
159
  cachedStore.currentUserId = hashedUserId;
154
- // Ensure scoring config and threshold are updated if config object changed
155
- cachedStore.scoringConfig = config.scoring;
156
- cachedStore.cleanupThreshold = config.recencyCleanupThreshold;
160
+
161
+ //
162
+ // if the database file does not exist, we need to reinitialize the store
163
+ if (cachedStore.dbPath!==':memory:' && !existsSync(cachedStore.dbPath)) {
164
+ return new LiteVectorStore(config, hashedUserId);
165
+ }
157
166
  return cachedStore;
158
167
  }
159
168
 
@@ -176,13 +185,20 @@ export class LiteVectorStore implements VectorStore {
176
185
  `Vector dimension mismatch. Expected ${this.dimension}, got ${vectors[i].length}`,
177
186
  );
178
187
  }
188
+
189
+ const payload = {...payloads[i]};
190
+ //
191
+ // case of global store (insecure)
192
+ if(!payload.userId){
193
+ throw new Error("userId is required in payload");
194
+ }
179
195
  //
180
196
  // remove the userId from the payload as sensitive data
181
- this.isSecure && delete payloads[i].userId;
197
+ this.isSecure && delete payload.userId;
182
198
  const vectorBuffer = Buffer.from(new Float32Array(vectors[i]).buffer);
183
199
  await this.run(
184
200
  `INSERT OR REPLACE INTO vectors (id, vector, payload) VALUES (?, ?, ?)`,
185
- [ids[i], vectorBuffer, JSON.stringify(payloads[i])],
201
+ [ids[i], vectorBuffer, JSON.stringify(payload)],
186
202
  );
187
203
  }
188
204
 
@@ -199,6 +215,15 @@ export class LiteVectorStore implements VectorStore {
199
215
  );
200
216
  }
201
217
 
218
+
219
+
220
+ if(!filters || !filters.userId){
221
+ throw new Error("userId is mandatory in search");
222
+ }
223
+ filters = {...filters};
224
+ this.isSecure && delete filters.userId;
225
+
226
+
202
227
  const results: VectorStoreResult[] = [];
203
228
  const rows = await this.all(`SELECT * FROM vectors`);
204
229
 
@@ -290,18 +315,23 @@ export class LiteVectorStore implements VectorStore {
290
315
  const rows = await this.all(`SELECT * FROM vectors`);
291
316
  const results: VectorStoreResult[] = [];
292
317
 
318
+ //
319
+ // remove the userId from the payload as sensitive data
320
+ filters = {...filters};
321
+ this.isSecure && delete filters?.userId;
322
+
293
323
  for (const row of rows) {
294
324
  const memoryVector: MemoryVector = {
295
325
  id: row.id,
296
326
  vector: Array.from(new Float32Array(row.vector.buffer)),
297
- payload: {},
327
+ payload: JSON.parse(row.payload),
298
328
  };
299
329
 
300
330
  if (this.filterVector(memoryVector, filters)) {
301
331
  // load payload at the end
302
332
  results.push({
303
333
  id: memoryVector.id,
304
- payload: JSON.parse(row.payload),
334
+ payload:memoryVector.payload,
305
335
  });
306
336
  }
307
337
  }
@@ -356,7 +386,15 @@ export class LiteVectorStore implements VectorStore {
356
386
  return Math.max(0, hybridScore);
357
387
  }
358
388
 
359
- // Internal method to clean up vectors based on recency score threshold
389
+ /**
390
+ * Internal method to clean up vectors based on recency score threshold.
391
+ *
392
+ * @param threshold - The minimum recency score required for a memory to be retained.
393
+ * - Recency score is calculated using exponential decay: 1.0 means brand new, 0.5 means at half-life, 0.0 means fully decayed.
394
+ * - Memories with a recency score below this threshold will be deleted (unless their half-life is infinite or zero).
395
+ * - For example, a threshold of 0.25 will remove all memories whose recency score has decayed 2 times the half-life.
396
+ * - Use a lower threshold to keep more old memories, or a higher threshold to keep only fresher ones.
397
+ */
360
398
  private async _cleanupByRecency(threshold: number): Promise<number> {
361
399
  const rows = await this.all(`SELECT id, payload FROM vectors`);
362
400
  let deletedCount = 0;
@@ -0,0 +1,40 @@
1
+ /// <reference types="jest" />
2
+ import { MemoriesLite } from "../src";
3
+ import dotenv from "dotenv";
4
+
5
+ dotenv.config();
6
+
7
+ /**
8
+ * Helper to initialize MemoriesLite instance and generate a random userId.
9
+ * @param customPrompt Optional prompt to inject into the memory config.
10
+ */
11
+ export function createTestMemory({customPrompt, dimension, rootPath, secure}:any) {
12
+ dimension = dimension || 768;
13
+ const userId =
14
+ Math.random().toString(36).substring(2, 15) +
15
+ Math.random().toString(36).substring(2, 15);
16
+
17
+ const memory = new MemoriesLite({
18
+ version: "v1.1",
19
+ disableHistory: true,
20
+ ...(customPrompt ? { customPrompt } : {}),
21
+ embedder: {
22
+ provider: "openai",
23
+ config: { dimension, apiKey: process.env.OPENAI_API_KEY!, model: "text-embedding-3-small" }
24
+ },
25
+ vectorStore: {
26
+ provider: "lite",
27
+ config: {
28
+ dimension,
29
+ rootPath: (rootPath || ":memory:"),
30
+ secure: secure || false }
31
+ },
32
+ llm: {
33
+ provider: "openai",
34
+ config: { apiKey: process.env.OPENAI_API_KEY || "", model: "gpt-4.1-mini" }
35
+ },
36
+ historyDbPath: ":memory:"
37
+ });
38
+
39
+ return { memory, userId };
40
+ }
@@ -2,6 +2,7 @@
2
2
  import { MemoriesLite } from "../src";
3
3
  import { MemoryItem, SearchResult } from "../src/types";
4
4
  import dotenv from "dotenv";
5
+ import { createTestMemory } from "./init.mem";
5
6
 
6
7
  dotenv.config();
7
8
 
@@ -9,42 +10,11 @@ jest.setTimeout(30000); // Increase timeout to 30 seconds
9
10
 
10
11
  describe("Memory Class facts regression tests", () => {
11
12
  let memory: MemoriesLite;
12
- const userId =
13
- Math.random().toString(36).substring(2, 15) +
14
- Math.random().toString(36).substring(2, 15);
15
-
16
- const dimension = 768;
13
+ let userId: string;
17
14
 
18
15
  beforeEach(async () => {
19
- // Initialize with default configuration
20
- memory = new MemoriesLite({
21
- version: "v1.1",
22
- disableHistory: true,
23
- customPrompt: "L'utilisateur travail pour une régie immobilière!",
24
- embedder: {
25
- provider: "openai",
26
- config: {
27
- dimension,
28
- apiKey: process.env.OPENAI_API_KEY || "",
29
- model: "text-embedding-3-small",
30
- },
31
- },
32
- vectorStore: {
33
- provider: "lite",
34
- config: {
35
- dimension,
36
- rootPath: ":memory:",
37
- },
38
- },
39
- llm: {
40
- provider: "openai",
41
- config: {
42
- apiKey: process.env.OPENAI_API_KEY || "",
43
- model: "gpt-4.1-mini",
44
- },
45
- },
46
- historyDbPath: ":memory:", // Use in-memory SQLite for tests
47
- });
16
+ // Initialize memory via helper
17
+ ({ memory, userId } = createTestMemory({customPrompt:"L'utilisateur travail pour une régie immobilière!"}));
48
18
  // Reset all memories before each test
49
19
  await memory.reset(userId);
50
20
  });
@@ -56,48 +26,12 @@ describe("Memory Class facts regression tests", () => {
56
26
 
57
27
  describe("Edge cases for Facts", () => {
58
28
 
59
- it("should not extract personal information as facts from business queries", async () => {
60
- // type?: "factual" | "episodic" | "semantic"|"procedural" | "assistant_preference";
61
- // Capture a query that contains a name but is asking for contact information
62
- const result = (await memory.capture(
63
- "je cherche le téléphone de mon client Alphonse MAGLOIRE",
64
- userId,
65
- {},
66
- )) as SearchResult;
67
-
68
- // Verify no memory was created (business query)
69
- expect(result).toBeDefined();
70
- expect(result.results).toBeDefined();
71
- expect(Array.isArray(result.results)).toBe(true);
72
- expect(result.results.length).toBe(1);
73
- const type = result.results[0]?.type;
74
- expect(["procedural","episodic"].includes(type)).toBe(true);
75
- // Now search for memories that might contain "Alphonse MAGLOIRE"
76
- // const searchResult = (await memory.retrieve(
77
- // "Qui est Alphonse MAGLOIRE?",
78
- // userId,
79
- // {},
80
- // )) as SearchResult;
81
-
82
- // // Verify no personal fact like "Je m'appelle Alphonse MAGLOIRE" was created
83
- // expect(searchResult).toBeDefined();
84
- // expect(searchResult.results).toBeDefined();
85
- // expect(Array.isArray(searchResult.results)).toBe(true);
86
- // expect(searchResult.results.length).toBe(0);
87
-
88
- // // Ensure no memory contains the name as a personal fact
89
- // const allMemories = await memory.getAll(userId, {});
90
- // const personalFacts = allMemories.results.filter(mem =>
91
- // mem.memory.toLowerCase().includes("Alphonse MAGLOIRE")
92
- // );
93
- // expect(personalFacts.length).toBe(0);
94
- });
95
29
 
96
- it("should add a single procedural memory", async () => {
30
+ it("should not add memory: Qui suis-je ?", async () => {
97
31
  const customFacts = "Je suis Olivier Poulain\nIT et je travaille chez Immeuble SA";
98
32
  const result = (await memory.capture([
99
33
  {role:"user", content:"Qui suis-je ?"},
100
- {role:"assistant", content:"Vous êtes Olivier Poulain, Chef de Projets au département IT & Gestion de projet, dans l'équipe IT de Immeuble SA. Ces informations proviennent de votre profil collaborateur enregistré dans le système interne de Immeuble SA"}],
34
+ {role:"assistant", content:"Vous êtes Olivier Poulain, Chef de Projets au département IT & Gestion de projet, dans l'équipe IT de Immeuble SA"}],
101
35
  userId,
102
36
  {customFacts},
103
37
  )) as SearchResult;
@@ -106,18 +40,20 @@ describe("Memory Class facts regression tests", () => {
106
40
  expect(result.results.length).toBe(0);
107
41
  // expect(result.results[0]?.type).toBe("factual");
108
42
  });
109
- it("episodic memory: Je veux manger des sushis pour ma pause de midi.", async () => {
43
+ it("episodic: Je veux manger des sushis pour ma pause de midi.", async () => {
110
44
  const customFacts = "Je suis Olivier Poulain\nIT et je travaille chez Immeuble SA";
111
45
  const result = (await memory.capture([
112
- {role:"user", content:"Je veux manger des sushis pour ma pause de midi."}],
46
+ {role:"user", content:"J'ai faim, je veux manger des sushis pour ma pause de midi."},
47
+ {role:"user", content:"Cherche un restaurant de sushis près de chez moi."}],
113
48
  userId,
114
49
  {customFacts},
115
50
  )) as SearchResult;
116
51
 
117
52
  expect(result).toBeDefined();
118
53
  expect(result.results).toBeDefined();
119
- expect(result.results.length).toBeGreaterThan(0);
54
+ expect(result.results.length).toBeGreaterThan(1);
120
55
  expect(result.results[0]?.type).toBe("episodic");
56
+ expect(result.results[1]?.type).toBe("episodic");
121
57
  });
122
58
 
123
59
 
@@ -136,6 +72,24 @@ describe("Memory Class facts regression tests", () => {
136
72
  expect(result.results[0]?.type).toBe("assistant_preference");
137
73
  });
138
74
 
75
+ it("business:je cherche le téléphone de mon client Alphonse MAGLOIRE", async () => {
76
+ // type?: "factual" | "episodic" | "semantic"|"procedural" | "assistant_preference";
77
+ // Capture a query that contains a name but is asking for contact information
78
+ const result = (await memory.capture(
79
+ "je cherche le téléphone de mon client Alphonse MAGLOIRE",
80
+ userId,
81
+ {},
82
+ )) as SearchResult;
83
+
84
+ // Verify no memory was created (business query)
85
+ expect(result).toBeDefined();
86
+ expect(result.results).toBeDefined();
87
+ expect(Array.isArray(result.results)).toBe(true);
88
+ expect(result.results.length).toBe(1);
89
+ const type = result.results[0]?.type;
90
+ expect(["procedural","episodic"].includes(type)).toBe(true);
91
+ });
92
+
139
93
  it("business:Le logement de Alphonse MAGLOIRE au 5ème étage est de combien pièces", async () => {
140
94
  const result = (await memory.capture([
141
95
  {role:"user", content:"Le logement de Alphonse MAGLOIRE au 5ème étage est de combien pièces.",},
@@ -2,6 +2,7 @@
2
2
  import { MemoriesLite } from "../src";
3
3
  import { MemoryItem, SearchResult } from "../src/types";
4
4
  import dotenv from "dotenv";
5
+ import { createTestMemory } from "./init.mem";
5
6
 
6
7
  dotenv.config();
7
8
 
@@ -9,41 +10,11 @@ jest.setTimeout(30000); // Increase timeout to 30 seconds
9
10
 
10
11
  describe("Memory Class", () => {
11
12
  let memory: MemoriesLite;
12
- const userId =
13
- Math.random().toString(36).substring(2, 15) +
14
- Math.random().toString(36).substring(2, 15);
15
-
16
- const dimension = 768;
13
+ let userId: string;
17
14
 
18
15
  beforeEach(async () => {
19
- // Initialize with default configuration
20
- memory = new MemoriesLite({
21
- version: "v1.1",
22
- disableHistory: true,
23
- embedder: {
24
- provider: "openai",
25
- config: {
26
- dimension,
27
- apiKey: process.env.OPENAI_API_KEY || "",
28
- model: "text-embedding-3-small",
29
- },
30
- },
31
- vectorStore: {
32
- provider: "lite",
33
- config: {
34
- dimension,
35
- rootPath: ":memory:",
36
- },
37
- },
38
- llm: {
39
- provider: "openai",
40
- config: {
41
- apiKey: process.env.OPENAI_API_KEY || "",
42
- model: "gpt-4.1-mini",
43
- },
44
- },
45
- historyDbPath: ":memory:", // Use in-memory SQLite for tests
46
- });
16
+ // Initialize memory via helper
17
+ ({ memory, userId } = createTestMemory({}));
47
18
  // Reset all memories before each test
48
19
  await memory.reset(userId);
49
20
  });
@@ -101,7 +72,7 @@ describe("Memory Class", () => {
101
72
  expect(typeof result.memory).toBe("string");
102
73
  });
103
74
 
104
- it("should update a memory", async () => {
75
+ it("should directly update a memory content", async () => {
105
76
  // First add a memory
106
77
  const addResult = (await memory.capture(
107
78
  "I love speaking foreign languages especially Spanish",
@@ -127,23 +98,19 @@ describe("Memory Class", () => {
127
98
  it("should update a memory", async () => {
128
99
  // First add a memory
129
100
  const init = await memory.capture("I love to drink red wine", userId, {});
130
- // expect(init.results?.[0]?.id).toBeDefined();
131
- const initId = init.results?.[0]?.id;
101
+ expect(init.results?.[0]?.id).toBeDefined();
132
102
  const addResult = (await memory.capture("I love to drink red wine with friends", userId, {})) as SearchResult;
133
103
  expect(addResult.results?.[0]?.id).toBeDefined();
134
104
 
135
- const memoryId = addResult.results[0].id;
136
- expect(memoryId).toBe(initId);
137
- // Delete the memory
138
- await memory.delete(memoryId, userId);
105
+ const updated = (await memory.get(addResult.results[0].id, userId))!;
139
106
 
140
- // Try to get the deleted memory - should throw or return null
141
- const result = await memory.get(memoryId, userId);
142
- expect(result).toBeNull();
107
+ expect(updated).toBeDefined();
108
+ expect(updated.id).toBe(init.results[0].id);
109
+ expect(updated.type).toBe(init.results[0].type);
143
110
  });
144
111
 
145
112
 
146
- it("should get all memories for distinct users", async () => {
113
+ it.only("should get all memories for distinct users", async () => {
147
114
  // Add a few memories
148
115
  await memory.capture("I love visiting new places in the winters", userId, {});
149
116
  await memory.capture("I like to rule the world", userId, {});
@@ -350,42 +317,17 @@ describe("Memory Class", () => {
350
317
 
351
318
  describe("Memory with Custom Configuration", () => {
352
319
  let customMemory: MemoriesLite;
353
-
354
- beforeEach(() => {
355
- customMemory = new MemoriesLite({
356
- version: "v1.1",
357
- disableHistory: true,
358
- embedder: {
359
- provider: "openai",
360
- config: {
361
- dimension,
362
- apiKey: process.env.OPENAI_API_KEY || "",
363
- model: "text-embedding-3-small",
364
- },
365
- },
366
- vectorStore: {
367
- provider: "lite",
368
- config: {
369
- dimension,
370
- rootPath: ":memory:",
371
- },
372
- },
373
- llm: {
374
- provider: "openai",
375
- config: {
376
- apiKey: process.env.OPENAI_API_KEY || "",
377
- model: "gpt-4.1-mini",
378
- },
379
- },
380
- historyDbPath: ":memory:", // Use in-memory SQLite for tests
381
- });
320
+ beforeEach(async () => {
321
+ // Initialize customMemory via helper
322
+ ({ memory: customMemory, userId } = createTestMemory({}));
323
+ // Reset before custom tests
324
+ await customMemory.reset(userId);
382
325
  });
383
326
 
384
327
  afterEach(async () => {
385
328
  await customMemory.reset(userId);
386
329
  });
387
330
 
388
-
389
331
  it("should perform semantic search with custom embeddings", async () => {
390
332
  // Add test memories
391
333
  await customMemory.capture("The weather in London is rainy today", userId, {});
@@ -0,0 +1,150 @@
1
+ /// <reference types="jest" />
2
+ import { MemoriesLite } from "../src";
3
+ import { MemoryItem, SearchResult } from "../src/types";
4
+ import dotenv from "dotenv";
5
+ import { createTestMemory } from "./init.mem";
6
+
7
+ dotenv.config();
8
+
9
+ jest.setTimeout(30000); // Increase timeout to 30 seconds
10
+
11
+ describe("Memory Updates - Type Preservation", () => {
12
+ let memory: MemoriesLite;
13
+ let userId: string;
14
+
15
+ beforeEach(async () => {
16
+ // Initialize memory via helper
17
+ ({ memory, userId } = createTestMemory({}));
18
+ // Reset all memories before each test
19
+ await memory.reset(userId);
20
+ });
21
+
22
+ afterEach(async () => {
23
+ // Clean up after each test
24
+ await memory.reset(userId);
25
+ });
26
+
27
+ describe("Memory Type Preservation During Updates", () => {
28
+ it("should preserve memory type when updating content", async () => {
29
+ // Add a factual memory
30
+ const factResult = (await memory.capture(
31
+ "I am allergic to peanuts",
32
+ userId,
33
+ {},
34
+ )) as SearchResult;
35
+
36
+ expect(factResult.results.length).toBeGreaterThan(0);
37
+ expect(factResult.results[0]?.type).toBe("factual");
38
+
39
+ const factId = factResult.results[0].id;
40
+
41
+ // Update the factual memory
42
+ await memory.update(factId, "I am severely allergic to peanuts and tree nuts", userId);
43
+
44
+ // Verify type preservation
45
+ const updatedFact = await memory.get(factId, userId);
46
+ expect(updatedFact).not.toBeNull();
47
+ expect(updatedFact?.type).toBe("factual");
48
+ expect(updatedFact?.memory).toBe("I am severely allergic to peanuts and tree nuts");
49
+ });
50
+
51
+ it("should preserve assistant_preference type when updating", async () => {
52
+ // Add a preference memory
53
+ const prefResult = (await memory.capture(
54
+ "please respond to me in French",
55
+ userId,
56
+ {},
57
+ )) as SearchResult;
58
+
59
+ expect(prefResult.results.length).toBeGreaterThan(0);
60
+ expect(prefResult.results[0]?.type).toBe("assistant_preference");
61
+
62
+ const prefId = prefResult.results[0].id;
63
+
64
+ // Update the preference memory
65
+ await memory.update(prefId, "please respond to me in Spanish", userId);
66
+
67
+ // Verify type preservation
68
+ const updatedPref = await memory.get(prefId, userId);
69
+ expect(updatedPref).not.toBeNull();
70
+ expect(updatedPref?.type).toBe("assistant_preference");
71
+ expect(updatedPref?.memory).toBe("please respond to me in Spanish");
72
+ });
73
+
74
+ it("should preserve episodic memory type when updating", async () => {
75
+ // Add an episodic memory
76
+ const episodicResult = (await memory.capture(
77
+ "I have a doctor's appointment tomorrow at 2pm",
78
+ userId,
79
+ {},
80
+ )) as SearchResult;
81
+
82
+ expect(episodicResult.results.length).toBeGreaterThan(0);
83
+ expect(episodicResult.results[0]?.type).toBe("episodic");
84
+
85
+ const episodicId = episodicResult.results[0].id;
86
+
87
+ // Update the episodic memory
88
+ await memory.update(episodicId, "My doctor's appointment was rescheduled to Friday at 3pm", userId);
89
+
90
+ // Verify type preservation
91
+ const updatedEpisodic = await memory.get(episodicId, userId);
92
+ expect(updatedEpisodic).not.toBeNull();
93
+ expect(updatedEpisodic?.type).toBe("episodic");
94
+ expect(updatedEpisodic?.memory).toBe("My doctor's appointment was rescheduled to Friday at 3pm");
95
+ });
96
+
97
+ it.skip("should preserve memory metadata when updating content", async () => {
98
+ // Mock the vector store to verify metadata is preserved
99
+ const mockVectorStore = {
100
+ insert: jest.fn().mockResolvedValue(undefined),
101
+ search: jest.fn().mockResolvedValue([{
102
+ id: 'test-id',
103
+ score: 0.9,
104
+ metadata: {
105
+ type: 'factual',
106
+ userId,
107
+ timestamp: Date.now(),
108
+ custom: 'test-value'
109
+ },
110
+ values: []
111
+ }]),
112
+ get: jest.fn().mockResolvedValue({
113
+ id: 'test-id',
114
+ metadata: {
115
+ type: 'factual',
116
+ userId,
117
+ timestamp: Date.now(),
118
+ custom: 'test-value'
119
+ },
120
+ values: []
121
+ }),
122
+ update: jest.fn().mockResolvedValue(undefined),
123
+ list: jest.fn(),
124
+ delete: jest.fn(),
125
+ deleteCol: jest.fn(),
126
+ ensureCollection: jest.fn(),
127
+ };
128
+
129
+ // Mock the getVectorStore method to return our mock
130
+ jest.spyOn(memory, 'getVectorStore').mockResolvedValue(mockVectorStore as any);
131
+
132
+ // Perform an update
133
+ await memory.update('test-id', 'Updated content', userId);
134
+
135
+ // Verify mockVectorStore.update was called with metadata preserved
136
+ expect(mockVectorStore.update).toHaveBeenCalled();
137
+ const updateArgs = mockVectorStore.update.mock.calls[0];
138
+
139
+ // Verify the ID and content are correct
140
+ expect(updateArgs[0]).toBe('test-id'); // ID
141
+
142
+ // Verify that metadata is passed and preserved in the update call
143
+ const metadataArg = updateArgs[2];
144
+ expect(metadataArg).toBeDefined();
145
+ expect(metadataArg.type).toBe('factual');
146
+ expect(metadataArg.userId).toBe(userId);
147
+ expect(metadataArg.custom).toBe('test-value');
148
+ });
149
+ });
150
+ });