clawmem 0.2.0 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/AGENTS.md CHANGED
@@ -634,9 +634,20 @@ Symptom: embed --force with new model produces 3 docs stuck as "Unembedded" but
634
634
  → Fix: Delete partial content_vectors + vectors_vec for the stuck hashes, then re-run embed (no --force).
635
635
  The vec0 DELETE try-catch prevents cascading failures during the re-embed.
636
636
 
637
+ Symptom: reindex --force after v0.2.0 upgrade shows no entity extraction
638
+ → `reindex --force` treats existing docs as updates (isNew=false). The A-MEM pipeline
639
+ skips entity extraction, link generation, and evolution for updates to avoid churn.
640
+ → Fix: Use `clawmem reindex --enrich` instead. The `--enrich` flag forces the full
641
+ enrichment pipeline (entity extraction + links + evolution) on all documents.
642
+ → `--force` alone only refreshes A-MEM notes (keywords, tags, context). `--enrich`
643
+ is needed after major upgrades that add new enrichment stages.
644
+
637
645
  Symptom: CLI reindex/update falls back to node-llama-cpp Vulkan (not GPU server)
638
646
  → GPU env vars only in systemd drop-in, not in wrapper script. CLI invocations missed them.
639
647
  → Fixed 2026-02-12: bin/clawmem wrapper exports CLAWMEM_EMBED_URL/LLM_URL/RERANK_URL defaults.
648
+ → Always run ClawMem via the `bin/clawmem` wrapper, not `bun run src/clawmem.ts` directly.
649
+ The wrapper sets CLAWMEM_EMBED_URL/LLM_URL/RERANK_URL defaults. Scripts or inline bun
650
+ commands that bypass the wrapper will fall back to in-process node-llama-cpp (slow, CPU).
640
651
 
641
652
  Symptom: "UserPromptSubmit hook error" on context-surfacing hook (intermittent)
642
653
  → SQLite contention between the watcher and the hook. The watcher processes filesystem events
package/CLAUDE.md CHANGED
@@ -634,9 +634,20 @@ Symptom: embed --force with new model produces 3 docs stuck as "Unembedded" but
634
634
  → Fix: Delete partial content_vectors + vectors_vec for the stuck hashes, then re-run embed (no --force).
635
635
  The vec0 DELETE try-catch prevents cascading failures during the re-embed.
636
636
 
637
+ Symptom: reindex --force after v0.2.0 upgrade shows no entity extraction
638
+ → `reindex --force` treats existing docs as updates (isNew=false). The A-MEM pipeline
639
+ skips entity extraction, link generation, and evolution for updates to avoid churn.
640
+ → Fix: Use `clawmem reindex --enrich` instead. The `--enrich` flag forces the full
641
+ enrichment pipeline (entity extraction + links + evolution) on all documents.
642
+ → `--force` alone only refreshes A-MEM notes (keywords, tags, context). `--enrich`
643
+ is needed after major upgrades that add new enrichment stages.
644
+
637
645
  Symptom: CLI reindex/update falls back to node-llama-cpp Vulkan (not GPU server)
638
646
  → GPU env vars only in systemd drop-in, not in wrapper script. CLI invocations missed them.
639
647
  → Fixed 2026-02-12: bin/clawmem wrapper exports CLAWMEM_EMBED_URL/LLM_URL/RERANK_URL defaults.
648
+ → Always run ClawMem via the `bin/clawmem` wrapper, not `bun run src/clawmem.ts` directly.
649
+ The wrapper sets CLAWMEM_EMBED_URL/LLM_URL/RERANK_URL defaults. Scripts or inline bun
650
+ commands that bypass the wrapper will fall back to in-process node-llama-cpp (slow, CPU).
640
651
 
641
652
  Symptom: "UserPromptSubmit hook error" on context-surfacing hook (intermittent)
642
653
  → SQLite contention between the watcher and the hook. The watcher processes filesystem events
package/README.md CHANGED
@@ -44,8 +44,6 @@ Runs fully local with no API keys and no cloud services. Integrates via Claude C
44
44
 
45
45
  ### v0.2.0 Enhancements
46
46
 
47
- Seven patterns extracted from competitor analysis ([Hindsight](https://github.com/vectorize-io/hindsight), [Hermes Agent](https://github.com/NousResearch/hermes-agent), [claude-mem](https://github.com/thedotmack/claude-mem)):
48
-
49
47
  - **Entity resolution + co-occurrence graph** — LLM entity extraction during A-MEM enrichment, canonical normalization via FTS5 + Levenshtein fuzzy matching, co-occurrence tracking, entity graph traversal for ENTITY intent queries
50
48
  - **MPFP graph retrieval** — Multi-Path Fact Propagation with meta-path patterns per intent, hop-synchronized edge cache, Forward Push with α=0.15 teleport probability. Replaces single-beam traversal for causal/entity/temporal queries.
51
49
  - **Temporal query extraction** — regex-based date range extraction from natural language queries ("last week", "March 2026"), wired as WHERE filters into BM25 and vector search
@@ -149,6 +147,25 @@ clawmem update --embed
149
147
  clawmem doctor
150
148
  ```
151
149
 
150
+ ### Upgrading
151
+
152
+ ```bash
153
+ bun update -g clawmem # or: npm update -g clawmem
154
+ ```
155
+
156
+ Database schema migrates automatically on next startup (new tables and columns are added via `CREATE IF NOT EXISTS` / `ALTER TABLE ADD COLUMN`).
157
+
158
+ After **major version updates** (e.g. 0.1.x → 0.2.0) that add new enrichment pipelines, run a full enrichment pass to backfill existing documents:
159
+
160
+ ```bash
161
+ clawmem reindex --enrich # Full enrichment: entity extraction + links + evolution for all docs
162
+ clawmem embed # Re-embed if upgrading embedding models (not needed for most updates)
163
+ ```
164
+
165
+ `--enrich` forces the complete A-MEM pipeline (entity extraction, link generation, memory evolution) on all documents, not just new ones. Without it, reindex only refreshes metadata for existing docs.
166
+
167
+ Routine patch updates (e.g. 0.2.0 → 0.2.1) do not require reindexing.
168
+
152
169
  ### Integration
153
170
 
154
171
  #### Claude Code
@@ -1027,6 +1044,8 @@ Built on the shoulders of:
1027
1044
  - [Beads](https://github.com/steveyegge/beads) — Dolt-backed issue tracker for AI agents
1028
1045
  - [claude-mem](https://github.com/thedotmack/claude-mem) — Claude Code memory integration reference
1029
1046
  - [Engram](https://github.com/Gentleman-Programming/engram) — observation dedup window, topic-key upsert pattern, temporal timeline navigation, duplicate metadata scoring signals
1047
+ - [Hermes Agent](https://github.com/NousResearch/hermes-agent) — memory nudge system (periodic lifecycle tool prompting)
1048
+ - [Hindsight](https://github.com/vectorize-io/hindsight) — entity resolution, MPFP graph traversal, temporal extraction, 3-tier consolidation, observation invalidation, 4-way parallel retrieval
1030
1049
  - [MAGMA](https://arxiv.org/abs/2501.13956) — multi-graph memory agent
1031
1050
  - [memory-lancedb-pro](https://github.com/CortexReach/memory-lancedb-pro) — retrieval gate, length normalization, MMR diversity, access reinforcement algorithms
1032
1051
  - [OpenViking](https://github.com/volcengine/OpenViking) — query decomposition patterns, collection-scoped retrieval, transaction-safe indexing
package/SKILL.md CHANGED
@@ -272,7 +272,7 @@ Once escalated, route by query type:
272
272
  | `beads_sync` | Sync Beads issues from Dolt backend into memory. |
273
273
  | `index_stats` | Doc counts, embedding coverage, content type distribution. |
274
274
  | `status` | Quick index health. |
275
- | `reindex` | Force re-index (BM25 only, does NOT embed). |
275
+ | `reindex` | Force re-index (BM25 only, does NOT embed). Use `--enrich` after major upgrades to backfill entity extraction + links on existing docs. |
276
276
  | `memory_evolution_status` | Track how a doc's A-MEM metadata evolved over time. |
277
277
  | `timeline` | Temporal neighborhood around a document — what was modified before/after. Progressive disclosure: search → timeline → get. Supports same-collection scoping and session correlation. |
278
278
  | `list_vaults` | Show configured vault names and paths. Empty in single-vault mode. |
@@ -660,6 +660,14 @@ echo "user query" | clawmem surface --context --stdin
660
660
  echo "session-id" | clawmem surface --bootstrap --stdin
661
661
  ```
662
662
 
663
+ ### Enrichment Commands
664
+
665
+ ```bash
666
+ clawmem reindex --enrich # Full A-MEM pipeline on ALL docs (entity extraction,
667
+ # link generation, memory evolution). Use after major upgrades.
668
+ # Without --enrich, reindex only refreshes metadata for changed docs.
669
+ ```
670
+
663
671
  ### Analysis Commands
664
672
 
665
673
  ```bash
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "clawmem",
3
- "version": "0.2.0",
3
+ "version": "0.2.2",
4
4
  "description": "On-device context engine and memory for AI agents. Claude Code and OpenClaw. Hooks + MCP server + hybrid RAG search.",
5
5
  "type": "module",
6
6
  "bin": {
package/src/clawmem.ts CHANGED
@@ -1269,6 +1269,7 @@ async function cmdWatch() {
1269
1269
 
1270
1270
  async function cmdReindex(args: string[]) {
1271
1271
  const force = args.includes("--force") || args.includes("-f");
1272
+ const enrich = args.includes("--enrich");
1272
1273
  const collections = collectionsList();
1273
1274
 
1274
1275
  if (collections.length === 0) {
@@ -1283,9 +1284,13 @@ async function cmdReindex(args: string[]) {
1283
1284
  console.log(`${c.yellow}Force reindex: deactivated all documents${c.reset}`);
1284
1285
  }
1285
1286
 
1287
+ if (enrich) {
1288
+ console.log(`${c.cyan}Full enrichment: entity extraction + links + evolution for all documents${c.reset}`);
1289
+ }
1290
+
1286
1291
  for (const col of collections) {
1287
1292
  console.log(`Indexing ${c.bold}${col.name}${c.reset} (${col.path})...`);
1288
- const stats = await indexCollection(s, col.name, col.path, col.pattern);
1293
+ const stats = await indexCollection(s, col.name, col.path, col.pattern, { forceEnrich: enrich });
1289
1294
  console.log(` +${stats.added} added, ~${stats.updated} updated, =${stats.unchanged} unchanged, -${stats.removed} removed`);
1290
1295
  }
1291
1296
  }
@@ -2202,7 +2207,7 @@ ${c.bold}Setup:${c.reset}
2202
2207
  ${c.bold}Indexing:${c.reset}
2203
2208
  clawmem update [--pull] [--embed] Re-scan collections (--embed auto-embeds)
2204
2209
  clawmem embed [-f] Generate fragment embeddings
2205
- clawmem reindex [--force] Full re-index
2210
+ clawmem reindex [--force] [--enrich] Full re-index (--enrich: run entity extraction + links on all docs)
2206
2211
  clawmem watch File watcher daemon
2207
2212
  clawmem status Show index status
2208
2213
 
package/src/entity.ts CHANGED
@@ -8,6 +8,7 @@
8
8
  */
9
9
 
10
10
  import type { Database } from "bun:sqlite";
11
+ import { createHash } from "crypto";
11
12
  import type { LlamaCpp } from "./llm.ts";
12
13
  import { extractJsonFromLLM } from "./amem.ts";
13
14
 
@@ -299,12 +300,63 @@ export function trackCoOccurrences(
299
300
  // Entity Enrichment Pipeline (called during A-MEM postIndexEnrich)
300
301
  // =============================================================================
301
302
 
303
+ /**
304
+ * Compute extraction input hash from title + body.
305
+ * Captures the actual input to the LLM prompt — changes to either trigger re-extraction.
306
+ */
307
+ function computeInputHash(title: string, body: string): string {
308
+ return createHash('sha256').update(title + '\0' + body).digest('hex');
309
+ }
310
+
311
+ /**
312
+ * Clear all derived entity state for a document:
313
+ * mentions, co-occurrence contributions, entity edges, and mention counts.
314
+ */
315
+ function clearDocEntityState(db: Database, docId: number): void {
316
+ // Get entity IDs this doc mentions (before deletion)
317
+ const oldMentions = db.prepare(
318
+ `SELECT entity_id FROM entity_mentions WHERE doc_id = ?`
319
+ ).all(docId) as { entity_id: string }[];
320
+
321
+ // Delete mentions
322
+ db.prepare(`DELETE FROM entity_mentions WHERE doc_id = ?`).run(docId);
323
+
324
+ // Decrement mention_count for each entity
325
+ for (const m of oldMentions) {
326
+ db.prepare(`
327
+ UPDATE entity_nodes SET mention_count = MAX(0, mention_count - 1) WHERE entity_id = ?
328
+ `).run(m.entity_id);
329
+ }
330
+
331
+ // Remove entity edges involving this doc
332
+ db.prepare(`
333
+ DELETE FROM memory_relations WHERE (source_id = ? OR target_id = ?) AND relation_type = 'entity'
334
+ `).run(docId, docId);
335
+
336
+ // Decrement co-occurrence counts for entity pairs from this doc
337
+ if (oldMentions.length >= 2) {
338
+ const ids = oldMentions.map(m => m.entity_id);
339
+ for (let i = 0; i < ids.length; i++) {
340
+ for (let j = i + 1; j < ids.length; j++) {
341
+ const sorted = [ids[i]!, ids[j]!].sort();
342
+ db.prepare(`
343
+ UPDATE entity_cooccurrences SET count = MAX(0, count - 1)
344
+ WHERE entity_a = ? AND entity_b = ?
345
+ `).run(sorted[0]!, sorted[1]!);
346
+ }
347
+ }
348
+ // Clean up zero-count rows
349
+ db.prepare(`DELETE FROM entity_cooccurrences WHERE count <= 0`).run();
350
+ }
351
+ }
352
+
302
353
  /**
303
354
  * Full entity enrichment for a document:
304
- * 1. Extract entities via LLM
305
- * 2. Resolve each to canonical form
306
- * 3. Record mentions
307
- * 4. Track co-occurrences
355
+ * 1. Check enrichment state (skip if input unchanged)
356
+ * 2. Extract entities via LLM
357
+ * 3. Resolve each to canonical form
358
+ * 4. Record mentions + co-occurrences + entity edges
359
+ * 5. Persist enrichment state for idempotency
308
360
  *
309
361
  * @returns Number of entities resolved
310
362
  */
@@ -315,7 +367,7 @@ export async function enrichDocumentEntities(
315
367
  vault: string = 'default'
316
368
  ): Promise<number> {
317
369
  try {
318
- // Get document content
370
+ // Get document content (snapshot for extraction)
319
371
  const doc = db.prepare(`
320
372
  SELECT d.title, c.doc as body
321
373
  FROM documents d
@@ -328,14 +380,34 @@ export async function enrichDocumentEntities(
328
380
  return 0;
329
381
  }
330
382
 
331
- // Step 1: Extract entities
383
+ // Compute extraction input hash (title + body — the actual LLM prompt input)
384
+ const inputHash = computeInputHash(doc.title, doc.body);
385
+
386
+ // Check enrichment state — skip if already enriched with same input
387
+ const existingState = db.prepare(
388
+ `SELECT input_hash FROM entity_enrichment_state WHERE doc_id = ?`
389
+ ).get(docId) as { input_hash: string } | undefined;
390
+
391
+ if (existingState?.input_hash === inputHash) {
392
+ return 0; // Same input, already enriched — skip
393
+ }
394
+
395
+ // Step 1: Extract entities via LLM
332
396
  const entities = await extractEntities(llm, doc.title, doc.body);
333
- if (entities.length === 0) {
334
- console.log(`[entity] No entities found in docId ${docId}`);
397
+
398
+ // Recheck input hash before writing — abort if content changed during LLM call
399
+ const recheckHash = db.prepare(`
400
+ SELECT d.title, c.doc as body FROM documents d
401
+ JOIN content c ON c.hash = d.hash WHERE d.id = ? AND d.active = 1
402
+ `).get(docId) as { title: string; body: string } | null;
403
+
404
+ if (!recheckHash || computeInputHash(recheckHash.title, recheckHash.body) !== inputHash) {
405
+ console.log(`[entity] Document ${docId} changed during extraction — aborting`);
335
406
  return 0;
336
407
  }
337
408
 
338
- // Step 2-3: Deduplicate entities by name+type, then resolve and record mentions
409
+ // Step 3: Deduplicate entities by surface form, then resolve canonical IDs
410
+ // Done BEFORE transaction to avoid calling upsertEntity (which mutates counters) for dupes
339
411
  const seenKeys = new Set<string>();
340
412
  const uniqueEntities: ExtractedEntity[] = [];
341
413
  for (const entity of entities) {
@@ -346,36 +418,87 @@ export async function enrichDocumentEntities(
346
418
  }
347
419
  }
348
420
 
349
- const resolvedIds: string[] = [];
421
+ // Resolve canonical IDs first (read-only lookups, no counter mutation yet)
422
+ const resolvedPairs: { entity: ExtractedEntity; canonicalId: string }[] = [];
423
+ const seenCanonicalIds = new Set<string>();
350
424
  for (const entity of uniqueEntities) {
351
- const entityId = upsertEntity(db, entity.name, entity.type, vault);
352
- resolvedIds.push(entityId);
353
- recordEntityMention(db, entityId, docId, entity.name);
425
+ const canonicalId = resolveEntityCanonical(db, entity.name, entity.type, vault)
426
+ || makeEntityId(entity.name, entity.type, vault);
427
+ if (!seenCanonicalIds.has(canonicalId)) {
428
+ seenCanonicalIds.add(canonicalId);
429
+ resolvedPairs.push({ entity, canonicalId });
430
+ }
354
431
  }
355
432
 
356
- // Step 4: Track co-occurrences (deduplicated IDs prevent inflated pair counts)
357
- trackCoOccurrences(db, resolvedIds);
433
+ // All writes in a transaction partial failure rolls back cleanly
434
+ try {
435
+ db.exec("BEGIN");
436
+
437
+ // Re-check enrichment state inside transaction (prevents concurrent overcount)
438
+ const txState = db.prepare(
439
+ `SELECT input_hash FROM entity_enrichment_state WHERE doc_id = ?`
440
+ ).get(docId) as { input_hash: string } | undefined;
358
441
 
359
- // Step 5: Create entity edges in memory_relations
360
- for (const entityId of resolvedIds) {
361
- // Find other documents mentioning this entity
362
- const otherDocs = db.prepare(`
363
- SELECT doc_id FROM entity_mentions
364
- WHERE entity_id = ? AND doc_id != ?
365
- LIMIT 10
366
- `).all(entityId, docId) as { doc_id: number }[];
442
+ if (txState?.input_hash === inputHash) {
443
+ db.exec("ROLLBACK");
444
+ return 0; // Another worker already committed this exact enrichment
445
+ }
446
+
447
+ // Clear old derived state if re-enriching (content changed)
448
+ if (txState || existingState) {
449
+ clearDocEntityState(db, docId);
450
+ }
367
451
 
368
- for (const other of otherDocs) {
369
- // Insert entity relation (unidirectional; graph traversal handles inbound for entity/semantic types)
452
+ if (entities.length === 0) {
370
453
  db.prepare(`
371
- INSERT OR IGNORE INTO memory_relations (source_id, target_id, relation_type, weight, metadata, created_at)
372
- VALUES (?, ?, 'entity', 0.7, ?, datetime('now'))
373
- `).run(docId, other.doc_id, JSON.stringify({ entity: entityId }));
454
+ INSERT OR REPLACE INTO entity_enrichment_state (doc_id, input_hash, enriched_at)
455
+ VALUES (?, ?, datetime('now'))
456
+ `).run(docId, inputHash);
457
+ db.exec("COMMIT");
458
+ console.log(`[entity] No entities found in docId ${docId}`);
459
+ return 0;
374
460
  }
375
- }
376
461
 
377
- console.log(`[entity] Enriched docId ${docId}: ${resolvedIds.length} entities, ${entities.length} extracted`);
378
- return resolvedIds.length;
462
+ // Now mutate counters one upsert per unique canonical ID (no inflation)
463
+ const resolvedIds: string[] = [];
464
+ for (const { entity, canonicalId } of resolvedPairs) {
465
+ const entityId = upsertEntity(db, entity.name, entity.type, vault);
466
+ resolvedIds.push(entityId);
467
+ recordEntityMention(db, entityId, docId, entity.name);
468
+ }
469
+
470
+ // Step 4: Track co-occurrences (deduplicated by canonical ID)
471
+ trackCoOccurrences(db, resolvedIds);
472
+
473
+ // Step 5: Create entity edges in memory_relations
474
+ for (const entityId of resolvedIds) {
475
+ const otherDocs = db.prepare(`
476
+ SELECT doc_id FROM entity_mentions
477
+ WHERE entity_id = ? AND doc_id != ?
478
+ LIMIT 10
479
+ `).all(entityId, docId) as { doc_id: number }[];
480
+
481
+ for (const other of otherDocs) {
482
+ db.prepare(`
483
+ INSERT OR IGNORE INTO memory_relations (source_id, target_id, relation_type, weight, metadata, created_at)
484
+ VALUES (?, ?, 'entity', 0.7, ?, datetime('now'))
485
+ `).run(docId, other.doc_id, JSON.stringify({ entity: entityId }));
486
+ }
487
+ }
488
+
489
+ // Persist enrichment state LAST — only after all derived data written
490
+ db.prepare(`
491
+ INSERT OR REPLACE INTO entity_enrichment_state (doc_id, input_hash, enriched_at)
492
+ VALUES (?, ?, datetime('now'))
493
+ `).run(docId, inputHash);
494
+
495
+ db.exec("COMMIT");
496
+ console.log(`[entity] Enriched docId ${docId}: ${resolvedIds.length} entities, ${entities.length} extracted`);
497
+ return resolvedIds.length;
498
+ } catch (txErr) {
499
+ try { db.exec("ROLLBACK"); } catch { /* already rolled back */ }
500
+ throw txErr; // re-throw to outer catch
501
+ }
379
502
  } catch (err) {
380
503
  console.log(`[entity] Error enriching docId ${docId}:`, err);
381
504
  return 0;
package/src/indexer.ts CHANGED
@@ -166,7 +166,8 @@ export async function indexCollection(
166
166
  store: Store,
167
167
  collectionName: string,
168
168
  collectionPath: string,
169
- pattern: string = "**/*.md"
169
+ pattern: string = "**/*.md",
170
+ options?: { forceEnrich?: boolean }
170
171
  ): Promise<IndexStats> {
171
172
  const stats: IndexStats = { added: 0, updated: 0, unchanged: 0, removed: 0 };
172
173
  const activePaths = new Set<string>();
@@ -223,6 +224,10 @@ export async function indexCollection(
223
224
 
224
225
  if (existingRow?.content_hash === contentHash) {
225
226
  stats.unchanged++;
227
+ if (options?.forceEnrich) {
228
+ // --enrich: queue unchanged docs for full enrichment (entity extraction, links)
229
+ enrichQueue.push({ docId: existing.id, isNew: true });
230
+ }
226
231
  continue;
227
232
  }
228
233
 
@@ -319,8 +324,9 @@ export async function indexCollection(
319
324
  }
320
325
 
321
326
  // A-MEM enrichment runs after successful commit (LLM calls should not block transaction)
327
+ // forceEnrich overrides isNew to true — triggers full pipeline (entity extraction, links, evolution)
322
328
  for (const { docId, isNew } of enrichQueue) {
323
- await store.postIndexEnrich(llm, docId, isNew);
329
+ await store.postIndexEnrich(llm, docId, options?.forceEnrich ? true : isNew);
324
330
  }
325
331
 
326
332
  return stats;
package/src/store.ts CHANGED
@@ -711,6 +711,16 @@ function initializeDatabase(db: Database): void {
711
711
  // Entity FTS5 for fuzzy name lookup
712
712
  db.exec(`CREATE VIRTUAL TABLE IF NOT EXISTS entities_fts USING fts5(entity_id, name, entity_type)`);
713
713
 
714
+ // Entity enrichment state: tracks what input was used for extraction (idempotent --enrich)
715
+ db.exec(`
716
+ CREATE TABLE IF NOT EXISTS entity_enrichment_state (
717
+ doc_id INTEGER PRIMARY KEY,
718
+ input_hash TEXT NOT NULL,
719
+ enriched_at TEXT NOT NULL,
720
+ FOREIGN KEY (doc_id) REFERENCES documents(id) ON DELETE CASCADE
721
+ )
722
+ `);
723
+
714
724
  // 3-tier consolidation: observations synthesized from clusters of related facts
715
725
  db.exec(`
716
726
  CREATE TABLE IF NOT EXISTS consolidated_observations (