@jeremiaheth/neolata-mem 0.4.4 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,81 @@
1
+ # Repository Analysis (Rerun): `neolata-mem`
2
+
3
+ ## Scope
4
+ This is a refreshed analysis of the **current** repository state after recent updates. It summarizes architecture, strengths, current risks, and recommended next actions.
5
+
6
+ ## Current Baseline (Observed)
7
+
8
+ - Package version: **0.3.2** (`package.json`)
9
+ - Test status: **38/38 tests passing** via Vitest (`npm test`)
10
+ - Main stack: Node.js ESM, no framework lock-in, local JSON default storage
11
+
12
+ ## Architecture Snapshot
13
+
14
+ 1. **Factory-first composition (`createMemory`)**
15
+ - `src/index.mjs` cleanly wires storage, embeddings, extraction, and optional LLM into `MemoryGraph`.
16
+ - This keeps provider choice isolated from graph logic.
17
+
18
+ 2. **Single core engine (`MemoryGraph`)**
19
+ - `src/graph.mjs` owns memory lifecycle behaviors: store/search/linking/decay/evolution, plus graph queries.
20
+ - Event emission is integrated directly in lifecycle methods, enabling observability consumers.
21
+
22
+ 3. **Adapter-style provider modules**
23
+ - `src/storage.mjs`, `src/embeddings.mjs`, `src/extraction.mjs`, and `src/llm.mjs` expose compact, swappable interfaces.
24
+ - Defaults are usable offline (noop embeddings + local JSON).
25
+
26
+ 4. **Operational interface parity**
27
+ - CLI (`cli/index.mjs`) routes into the same runtime APIs as library users, reducing drift between DX paths.
28
+
29
+ ## What’s Working Well
30
+
31
+ 1. **Strong zero-config path**
32
+ - New users can start without API keys or extra infrastructure.
33
+
34
+ 2. **Security/robustness guardrails are present**
35
+ - Storage path traversal checks and atomic write strategy are implemented in JSON storage.
36
+ - Input validation and configurable size/rate limits exist in graph operations.
37
+
38
+ 3. **Good functional breadth for agent memory**
39
+ - Beyond CRUD/search, the project includes graph traversal, clustering, pathing, orphan detection, decay, and conflict evolution.
40
+
41
+ 4. **Tested core behavior**
42
+ - Coverage includes events, decay behavior, reinforce semantics, search fallback, and multiple graph queries.
43
+
44
+ ## Risks and Constraints (Current)
45
+
46
+ 1. **Linking/search scale remains linear-heavy**
47
+ - Auto-linking in `store` compares new embedding against existing memory embeddings, which is straightforward but O(n).
48
+ - Repeated `.find` patterns for id lookups can add overhead as memory volume grows.
49
+
50
+ 2. **JSON backend durability/perf limits**
51
+ - Atomic writes improve safety, but whole-graph read/write behavior can become expensive under frequent updates.
52
+
53
+ 3. **Keyword fallback relevance ceiling**
54
+ - Noop mode currently depends on simple includes matching; precision/recall for natural language queries is limited.
55
+
56
+ 4. **LLM-dependent evolution variability**
57
+ - Conflict resolution quality remains provider/model/prompt dependent and less deterministic than local logic.
58
+
59
+ ## Prioritized Next Steps
60
+
61
+ ### Near-term (small changes, high value)
62
+
63
+ - Add a docs section with **scale guidance** (when JSON backend is ideal vs when to switch adapters).
64
+ - Add tests for **edge constraints** (max lengths, max memories, evolve interval/rate-limit edges).
65
+ - Add optional **query normalization** in keyword mode (tokenization/lowercase handling improvements).
66
+
67
+ ### Mid-term (performance/product hardening)
68
+
69
+ - Introduce an internal **`Map<string, Memory>` index** in `MemoryGraph` to reduce repeated linear id lookups.
70
+ - Add **candidate narrowing** before full similarity scans (simple inverted keyword index or bucketed heuristics).
71
+ - Provide a reference **persistent adapter example** (SQLite/Postgres) using the current storage contract.
72
+
73
+ ### Long-term (scale + operability)
74
+
75
+ - Add **batch APIs** (`storeMany`, `searchMany`) to amortize I/O and embedding calls.
76
+ - Add **metrics hooks** (timings/counts for store/search/decay/evolve) for production tuning.
77
+ - Add **persisted schema versioning** for future migrations and compatibility guarantees.
78
+
79
+ ## Overall Assessment
80
+
81
+ `neolata-mem` is in a healthy state for local and mid-scale agent-memory use: practical defaults, clean module boundaries, and a useful graph-native feature set. The key improvement axis is now predictable scaling and operational durability under heavier workloads, rather than foundational architecture changes.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@jeremiaheth/neolata-mem",
3
- "version": "0.4.4",
3
+ "version": "0.5.0",
4
4
  "description": "Graph-native memory engine for AI agents with Zettelkasten linking, biological decay, and conflict resolution",
5
5
  "type": "module",
6
6
  "main": "src/index.mjs",
package/src/graph.mjs CHANGED
@@ -18,6 +18,31 @@ import { cosineSimilarity } from './embeddings.mjs';
18
18
 
19
19
  /** @typedef {{ id: string, agent: string, memory: string, category: string, importance: number, tags: string[], embedding: number[]|null, links: {id: string, similarity: number}[], created_at: string, updated_at: string, evolution?: object[], accessCount?: number }} Memory */
20
20
 
21
+ // ── Keyword normalization helpers ──────────────────────────
22
+ const STOP_WORDS = new Set([
23
+ 'a','an','the','is','are','was','were','be','been','being',
24
+ 'have','has','had','do','does','did','will','would','could',
25
+ 'should','may','might','shall','can','need','dare','ought',
26
+ 'to','of','in','for','on','with','at','by','from','as','into',
27
+ 'through','during','before','after','above','below','between',
28
+ 'and','but','or','nor','not','so','yet','both','either','neither',
29
+ 'it','its','this','that','these','those','i','me','my','we','our',
30
+ ]);
31
+
32
+ /**
33
+ * Tokenize text into normalized terms (lowercase, alphanumeric, no stop words, deduped).
34
+ * @param {string} text
35
+ * @returns {string[]}
36
+ */
37
+ export function tokenize(text) {
38
+ return [...new Set(
39
+ text.toLowerCase()
40
+ .replace(/[^a-z0-9\s]/g, ' ')
41
+ .split(/\s+/)
42
+ .filter(w => w.length > 1 && !STOP_WORDS.has(w))
43
+ )];
44
+ }
45
+
21
46
  export class MemoryGraph {
22
47
  /**
23
48
  * @param {object} opts
@@ -42,6 +67,11 @@ export class MemoryGraph {
42
67
  this._listeners = {};
43
68
  this._lastEvolveMs = 0;
44
69
 
70
+ /** @type {Map<string, Memory>} id → memory for O(1) lookups */
71
+ this._idIndex = new Map();
72
+ /** @type {Map<string, Set<string>>} token → Set<memory id> for keyword narrowing */
73
+ this._tokenIndex = new Map();
74
+
45
75
  this.config = {
46
76
  linkThreshold: config.linkThreshold ?? 0.5,
47
77
  maxLinksPerMemory: config.maxLinksPerMemory ?? 5,
@@ -82,6 +112,39 @@ export class MemoryGraph {
82
112
  if (this.loaded) return;
83
113
  this.memories = await this.storage.load();
84
114
  this.loaded = true;
115
+ this._rebuildIndexes();
116
+ }
117
+
118
+ /** Rebuild id and token indexes from current memories. */
119
+ _rebuildIndexes() {
120
+ this._idIndex.clear();
121
+ this._tokenIndex.clear();
122
+ for (const mem of this.memories) {
123
+ this._indexMemory(mem);
124
+ }
125
+ }
126
+
127
+ /** Add a single memory to indexes. */
128
+ _indexMemory(mem) {
129
+ this._idIndex.set(mem.id, mem);
130
+ for (const token of tokenize(mem.memory)) {
131
+ if (!this._tokenIndex.has(token)) this._tokenIndex.set(token, new Set());
132
+ this._tokenIndex.get(token).add(mem.id);
133
+ }
134
+ }
135
+
136
+ /** Remove a memory from indexes. */
137
+ _deindexMemory(mem) {
138
+ this._idIndex.delete(mem.id);
139
+ for (const token of tokenize(mem.memory)) {
140
+ const set = this._tokenIndex.get(token);
141
+ if (set) { set.delete(mem.id); if (set.size === 0) this._tokenIndex.delete(token); }
142
+ }
143
+ }
144
+
145
+ /** Look up memory by id in O(1). */
146
+ _byId(id) {
147
+ return this._idIndex.get(id);
85
148
  }
86
149
 
87
150
  /** Persist current memories to storage. */
@@ -148,10 +211,11 @@ export class MemoryGraph {
148
211
  };
149
212
 
150
213
  this.memories.push(newMem);
214
+ this._indexMemory(newMem);
151
215
 
152
216
  // A-MEM: add backlinks to related memories
153
217
  for (const link of topLinks) {
154
- const target = this.memories.find(m => m.id === link.id);
218
+ const target = this._byId(link.id);
155
219
  if (target) {
156
220
  if (!target.links) target.links = [];
157
221
  if (!target.links.find(l => l.id === id)) {
@@ -171,7 +235,7 @@ export class MemoryGraph {
171
235
  }
172
236
  // Update backlinked targets
173
237
  for (const link of topLinks) {
174
- const target = this.memories.find(m => m.id === link.id);
238
+ const target = this._byId(link.id);
175
239
  if (target) await this.storage.upsert(target);
176
240
  }
177
241
  } else {
@@ -217,7 +281,7 @@ export class MemoryGraph {
217
281
  if (serverResults) {
218
282
  // Attach links from in-memory graph
219
283
  for (const r of serverResults) {
220
- const mem = this.memories.find(m => m.id === r.id);
284
+ const mem = this._byId(r.id);
221
285
  r.links = mem?.links || [];
222
286
  }
223
287
  this.emit('search', { agent, query, resultCount: serverResults.length });
@@ -230,15 +294,60 @@ export class MemoryGraph {
230
294
 
231
295
  let results;
232
296
  if (!queryEmb) {
233
- // Keyword fallback when no embeddings
234
- const q = query.toLowerCase();
235
- results = candidates
236
- .filter(m => m.memory.toLowerCase().includes(q))
237
- .slice(0, limit)
238
- .map(m => ({ ...m, score: 1.0, embedding: undefined }));
297
+ // Keyword fallback: tokenized matching with inverted index
298
+ const queryTokens = tokenize(query);
299
+ if (queryTokens.length === 0) {
300
+ // Fall back to simple substring match if all tokens are stop words
301
+ const q = query.toLowerCase();
302
+ results = candidates
303
+ .filter(m => m.memory.toLowerCase().includes(q))
304
+ .slice(0, limit)
305
+ .map(m => ({ ...m, score: 1.0, embedding: undefined }));
306
+ } else {
307
+ // Score by fraction of query tokens matched
308
+ const candidateIds = agent ? new Set(candidates.map(m => m.id)) : null;
309
+ results = [];
310
+ const scored = new Map(); // id → matched token count
311
+ for (const token of queryTokens) {
312
+ const ids = this._tokenIndex.get(token);
313
+ if (!ids) continue;
314
+ for (const id of ids) {
315
+ if (candidateIds && !candidateIds.has(id)) continue;
316
+ scored.set(id, (scored.get(id) || 0) + 1);
317
+ }
318
+ }
319
+ for (const [id, count] of scored) {
320
+ const mem = this._byId(id);
321
+ if (mem) results.push({ ...mem, score: count / queryTokens.length, embedding: undefined });
322
+ }
323
+ results.sort((a, b) => b.score - a.score || b.importance - a.importance);
324
+ results = results.slice(0, limit);
325
+ }
239
326
  } else {
240
- results = candidates
241
- .filter(m => m.embedding)
327
+ // Candidate narrowing: if >500 memories with embeddings, use token index to pre-filter
328
+ let embCandidates = candidates.filter(m => m.embedding);
329
+ if (embCandidates.length > 500 && !this.storage.search) {
330
+ const queryTokens = tokenize(query);
331
+ if (queryTokens.length > 0) {
332
+ const narrowed = new Set();
333
+ for (const token of queryTokens) {
334
+ const ids = this._tokenIndex.get(token);
335
+ if (ids) for (const id of ids) narrowed.add(id);
336
+ }
337
+ if (narrowed.size > 0) {
338
+ // Keep token-matched candidates + random sample of the rest for recall safety
339
+ const matched = embCandidates.filter(m => narrowed.has(m.id));
340
+ const rest = embCandidates.filter(m => !narrowed.has(m.id));
341
+ const sampleSize = Math.min(rest.length, Math.max(100, limit * 5));
342
+ // Deterministic sample: take evenly spaced
343
+ const step = rest.length / sampleSize;
344
+ const sample = [];
345
+ for (let i = 0; i < sampleSize; i++) sample.push(rest[Math.floor(i * step)]);
346
+ embCandidates = [...matched, ...sample];
347
+ }
348
+ }
349
+ }
350
+ results = embCandidates
242
351
  .map(m => ({ ...m, score: cosineSimilarity(queryEmb, m.embedding), embedding: undefined }))
243
352
  .filter(m => m.score >= minSimilarity)
244
353
  .sort((a, b) => b.score - a.score)
@@ -258,6 +367,176 @@ export class MemoryGraph {
258
367
  return this.search(null, query, opts);
259
368
  }
260
369
 
370
+ // ══════════════════════════════════════════════════════════
371
+ // BATCH — Amortized bulk operations
372
+ // ══════════════════════════════════════════════════════════
373
+
374
+ /**
375
+ * Store multiple memories in a single batch. Amortizes embedding calls and I/O.
376
+ * @param {string} agent
377
+ * @param {Array<{text: string, category?: string, importance?: number, tags?: string[]}>} items
378
+ * @param {object} [opts]
379
+ * @param {number} [opts.embeddingBatchSize=64] - Batch size for embedding calls
380
+ * @returns {Promise<{total: number, stored: number, results: Array<{id: string, links: number}>}>}
381
+ */
382
+ async storeMany(agent, items, { embeddingBatchSize = 64 } = {}) {
383
+ if (!agent || typeof agent !== 'string') throw new Error('agent must be a non-empty string');
384
+ if (agent.length > this.config.maxAgentLength) throw new Error(`agent exceeds max length (${this.config.maxAgentLength})`);
385
+ if (!/^[a-zA-Z0-9_\-. ]+$/.test(agent)) throw new Error('agent contains invalid characters');
386
+ if (!Array.isArray(items) || items.length === 0) throw new Error('items must be a non-empty array');
387
+
388
+ await this.init();
389
+
390
+ if (this.memories.length + items.length > this.config.maxMemories) {
391
+ throw new Error(`Batch would exceed memory limit (${this.config.maxMemories}). Run decay() or increase maxMemories.`);
392
+ }
393
+
394
+ // Validate all items first
395
+ const texts = items.map((item, i) => {
396
+ const text = typeof item === 'string' ? item : item.text;
397
+ if (!text || typeof text !== 'string') throw new Error(`items[${i}].text must be a non-empty string`);
398
+ if (text.length > this.config.maxMemoryLength) throw new Error(`items[${i}].text exceeds max length`);
399
+ return text;
400
+ });
401
+
402
+ // Batch embed all texts
403
+ const allEmbeddings = [];
404
+ for (let i = 0; i < texts.length; i += embeddingBatchSize) {
405
+ const batch = texts.slice(i, i + embeddingBatchSize);
406
+ const embeddings = await this.embeddings.embed(...batch);
407
+ allEmbeddings.push(...embeddings);
408
+ }
409
+
410
+ const results = [];
411
+ const now = new Date().toISOString();
412
+
413
+ for (let i = 0; i < items.length; i++) {
414
+ const item = typeof items[i] === 'string' ? { text: items[i] } : items[i];
415
+ const embedding = allEmbeddings[i];
416
+
417
+ // Find related memories for auto-linking
418
+ const related = [];
419
+ if (embedding) {
420
+ for (const existing of this.memories) {
421
+ if (!existing.embedding) continue;
422
+ const sim = cosineSimilarity(embedding, existing.embedding);
423
+ if (sim > this.config.linkThreshold) {
424
+ related.push({ id: existing.id, similarity: sim, agent: existing.agent });
425
+ }
426
+ }
427
+ related.sort((a, b) => b.similarity - a.similarity);
428
+ }
429
+ const topLinks = related.slice(0, this.config.maxLinksPerMemory);
430
+
431
+ const id = this.storage.genId();
432
+ const newMem = {
433
+ id, agent, memory: item.text || items[i],
434
+ category: item.category || 'fact',
435
+ importance: item.importance ?? 0.7,
436
+ tags: item.tags || [],
437
+ embedding,
438
+ links: topLinks.map(l => ({ id: l.id, similarity: l.similarity })),
439
+ created_at: now, updated_at: now,
440
+ };
441
+
442
+ this.memories.push(newMem);
443
+ this._indexMemory(newMem);
444
+
445
+ // Backlinks
446
+ for (const link of topLinks) {
447
+ const target = this._byId(link.id);
448
+ if (target) {
449
+ if (!target.links) target.links = [];
450
+ if (!target.links.find(l => l.id === id)) {
451
+ target.links.push({ id, similarity: link.similarity });
452
+ }
453
+ target.updated_at = now;
454
+ }
455
+ }
456
+
457
+ results.push({ id, links: topLinks.length });
458
+ this.emit('store', { id, agent, content: newMem.memory, category: newMem.category, importance: newMem.importance, links: topLinks.length });
459
+ }
460
+
461
+ // Single save at the end
462
+ if (this.storage.incremental) {
463
+ for (const r of results) {
464
+ const mem = this._byId(r.id);
465
+ if (mem) await this.storage.upsert(mem);
466
+ }
467
+ } else {
468
+ await this.save();
469
+ }
470
+
471
+ return { total: items.length, stored: results.length, results };
472
+ }
473
+
474
+ /**
475
+ * Search for multiple queries in a single batch. Amortizes embedding calls.
476
+ * @param {string|null} agent - Agent filter (null = all)
477
+ * @param {string[]} queries
478
+ * @param {object} [opts]
479
+ * @param {number} [opts.limit=10] - Per-query result limit
480
+ * @param {number} [opts.minSimilarity=0]
481
+ * @returns {Promise<Array<{query: string, results: Array<Memory & {score: number}>}>>}
482
+ */
483
+ async searchMany(agent, queries, { limit = 10, minSimilarity = 0 } = {}) {
484
+ if (!Array.isArray(queries) || queries.length === 0) throw new Error('queries must be a non-empty array');
485
+
486
+ await this.init();
487
+
488
+ // Batch embed all queries
489
+ const embedFn = this.embeddings.embedQuery || this.embeddings.embed;
490
+ const allEmbeddings = await embedFn.call(this.embeddings, ...queries);
491
+
492
+ let candidates = this.memories;
493
+ if (agent) candidates = candidates.filter(m => m.agent === agent);
494
+
495
+ const output = [];
496
+ for (let i = 0; i < queries.length; i++) {
497
+ const queryEmb = allEmbeddings[i];
498
+ let results;
499
+
500
+ if (!queryEmb) {
501
+ const queryTokens = tokenize(queries[i]);
502
+ if (queryTokens.length === 0) {
503
+ const q = queries[i].toLowerCase();
504
+ results = candidates.filter(m => m.memory.toLowerCase().includes(q))
505
+ .slice(0, limit).map(m => ({ ...m, score: 1.0, embedding: undefined }));
506
+ } else {
507
+ const candidateIds = agent ? new Set(candidates.map(m => m.id)) : null;
508
+ const scored = new Map();
509
+ for (const token of queryTokens) {
510
+ const ids = this._tokenIndex.get(token);
511
+ if (!ids) continue;
512
+ for (const id of ids) {
513
+ if (candidateIds && !candidateIds.has(id)) continue;
514
+ scored.set(id, (scored.get(id) || 0) + 1);
515
+ }
516
+ }
517
+ results = [];
518
+ for (const [id, count] of scored) {
519
+ const mem = this._byId(id);
520
+ if (mem) results.push({ ...mem, score: count / queryTokens.length, embedding: undefined });
521
+ }
522
+ results.sort((a, b) => b.score - a.score || b.importance - a.importance);
523
+ results = results.slice(0, limit);
524
+ }
525
+ } else {
526
+ results = candidates.filter(m => m.embedding)
527
+ .map(m => ({ ...m, score: cosineSimilarity(queryEmb, m.embedding), embedding: undefined }))
528
+ .filter(m => m.score >= minSimilarity)
529
+ .sort((a, b) => b.score - a.score)
530
+ .slice(0, limit);
531
+ }
532
+
533
+ this.emit('search', { agent, query: queries[i], resultCount: results.length });
534
+ output.push({ query: queries[i], results });
535
+ }
536
+
537
+ return output;
538
+ }
539
+
261
540
  // ══════════════════════════════════════════════════════════
262
541
  // LINKS — Graph queries
263
542
  // ══════════════════════════════════════════════════════════
@@ -268,11 +547,11 @@ export class MemoryGraph {
268
547
  */
269
548
  async links(memoryId) {
270
549
  await this.init();
271
- const mem = this.memories.find(m => m.id === memoryId);
550
+ const mem = this._byId(memoryId);
272
551
  if (!mem) return null;
273
552
 
274
553
  const linked = (mem.links || []).map(link => {
275
- const target = this.memories.find(m => m.id === link.id);
554
+ const target = this._byId(link.id);
276
555
  return {
277
556
  id: link.id,
278
557
  similarity: link.similarity,
@@ -292,7 +571,7 @@ export class MemoryGraph {
292
571
  */
293
572
  async traverse(startId, maxHops = 2) {
294
573
  await this.init();
295
- const start = this.memories.find(m => m.id === startId);
574
+ const start = this._byId(startId);
296
575
  if (!start) return null;
297
576
 
298
577
  const visited = new Map();
@@ -302,7 +581,7 @@ export class MemoryGraph {
302
581
  const { id, hop, similarity } = queue.shift();
303
582
  if (visited.has(id)) continue;
304
583
 
305
- const mem = this.memories.find(m => m.id === id);
584
+ const mem = this._byId(id);
306
585
  if (!mem) continue;
307
586
 
308
587
  visited.set(id, {
@@ -364,7 +643,7 @@ export class MemoryGraph {
364
643
  const agentCounts = {};
365
644
  for (const c of cluster) {
366
645
  agentCounts[c.agent] = (agentCounts[c.agent] || 0) + 1;
367
- const full = this.memories.find(m => m.id === c.id);
646
+ const full = this._byId(c.id);
368
647
  for (const tag of (full?.tags || [])) tagCounts[tag] = (tagCounts[tag] || 0) + 1;
369
648
  }
370
649
  const topTags = Object.entries(tagCounts).sort((a, b) => b[1] - a[1]).slice(0, 5).map(e => e[0]);
@@ -383,7 +662,7 @@ export class MemoryGraph {
383
662
  */
384
663
  async path(idA, idB) {
385
664
  await this.init();
386
- if (!this.memories.find(m => m.id === idA) || !this.memories.find(m => m.id === idB)) return null;
665
+ if (!this._byId(idA) || !this._byId(idB)) return null;
387
666
 
388
667
  const visited = new Map();
389
668
  const queue = [idA];
@@ -396,14 +675,14 @@ export class MemoryGraph {
396
675
  const path = [];
397
676
  let current = idB;
398
677
  while (current !== null) {
399
- const mem = this.memories.find(m => m.id === current);
678
+ const mem = this._byId(current);
400
679
  path.unshift({ id: current, memory: mem?.memory || '?', agent: mem?.agent || '?', category: mem?.category || '?' });
401
680
  current = visited.get(current);
402
681
  }
403
682
  return { found: true, hops: path.length - 1, path };
404
683
  }
405
684
 
406
- const mem = this.memories.find(m => m.id === id);
685
+ const mem = this._byId(id);
407
686
  if (!mem) continue;
408
687
  for (const link of (mem.links || [])) {
409
688
  if (!visited.has(link.id)) {
@@ -518,6 +797,7 @@ export class MemoryGraph {
518
797
  await this.storage.saveArchive(archived);
519
798
 
520
799
  const removeIds = new Set([...toArchive, ...toDelete].map(m => m.id));
800
+ for (const mem of [...toArchive, ...toDelete]) this._deindexMemory(mem);
521
801
  this.memories = this.memories.filter(m => !removeIds.has(m.id));
522
802
 
523
803
  for (const mem of this.memories) {
@@ -546,7 +826,7 @@ export class MemoryGraph {
546
826
  */
547
827
  async reinforce(memoryId, boost = 0.1) {
548
828
  await this.init();
549
- const mem = this.memories.find(m => m.id === memoryId);
829
+ const mem = this._byId(memoryId);
550
830
  if (!mem) return null;
551
831
 
552
832
  const oldImportance = mem.importance;
@@ -672,7 +952,7 @@ Respond ONLY with a JSON object:
672
952
  // Archive conflicting memories
673
953
  for (const conflict of (conflicts.conflicts || [])) {
674
954
  if (conflict.memoryId) {
675
- const old = this.memories.find(m => m.id === conflict.memoryId);
955
+ const old = this._byId(conflict.memoryId);
676
956
  if (old) {
677
957
  const archived = await this.storage.loadArchive();
678
958
  archived.push({ ...old, embedding: undefined, archived_at: new Date().toISOString(), archived_reason: `Superseded: ${conflict.reason}` });
@@ -680,6 +960,7 @@ Respond ONLY with a JSON object:
680
960
  if (this.storage.incremental) {
681
961
  await this.storage.remove(conflict.memoryId);
682
962
  }
963
+ this._deindexMemory(old);
683
964
  this.memories = this.memories.filter(m => m.id !== conflict.memoryId);
684
965
  actions.push({ type: 'archived', id: conflict.memoryId, reason: conflict.reason, old: old.memory });
685
966
  }
@@ -689,7 +970,7 @@ Respond ONLY with a JSON object:
689
970
  // Update existing memories in-place
690
971
  for (const update of (conflicts.updates || [])) {
691
972
  if (update.memoryId) {
692
- const existing = this.memories.find(m => m.id === update.memoryId);
973
+ const existing = this._byId(update.memoryId);
693
974
  if (existing) {
694
975
  const oldContent = existing.memory;
695
976
  existing.memory = text;
@@ -741,12 +1022,12 @@ Respond ONLY with a JSON object:
741
1022
  seen.add(r.id);
742
1023
  contextMems.push({ ...r, source: 'direct' });
743
1024
 
744
- const mem = this.memories.find(m => m.id === r.id);
1025
+ const mem = this._byId(r.id);
745
1026
  if (mem) {
746
1027
  for (const link of (mem.links || []).slice(0, 3)) {
747
1028
  if (seen.has(link.id)) continue;
748
1029
  seen.add(link.id);
749
- const linked = this.memories.find(m => m.id === link.id);
1030
+ const linked = this._byId(link.id);
750
1031
  if (linked) {
751
1032
  contextMems.push({
752
1033
  id: linked.id, memory: linked.memory, agent: linked.agent,
package/src/index.mjs CHANGED
@@ -156,7 +156,7 @@ export function createMemory(opts = {}) {
156
156
  }
157
157
 
158
158
  // Re-export everything for advanced usage
159
- export { MemoryGraph } from './graph.mjs';
159
+ export { MemoryGraph, tokenize } from './graph.mjs';
160
160
  export { openaiEmbeddings, noopEmbeddings, cosineSimilarity } from './embeddings.mjs';
161
161
  export { jsonStorage, memoryStorage } from './storage.mjs';
162
162
  export { supabaseStorage } from './supabase-storage.mjs';