@whenmoon-afk/memory-mcp 2.4.0 → 2.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -100,11 +100,7 @@ After any installation method, **restart Claude Desktop completely** (quit and r
100
100
 
101
101
  ## Custom Database Location
102
102
 
103
- By default, memories are stored at:
104
-
105
- - **macOS**: `~/.claude-memories/memory.db`
106
- - **Windows**: `%APPDATA%/claude-memories/memory.db`
107
- - **Linux**: `~/.local/share/claude-memories/memory.db`
103
+ By default, memories are stored at `~/.memory-mcp/memory.db` on all platforms.
108
104
 
109
105
  To use a custom location, add the `env` field to your config:
110
106
 
@@ -171,6 +167,32 @@ The `npx github:` method bypasses npm cache. Alternatively:
171
167
 
172
168
  First run requires downloading and installing dependencies (can take 30+ seconds). Subsequent runs are faster but still fetch from GitHub. For faster startup, use the global install method.
173
169
 
170
+ ## Database Consolidation
171
+
172
+ If you have multiple memory database files (e.g., from different Claude Desktop versions or backups), you can merge them.
173
+
174
+ **Discover existing databases:**
175
+ ```bash
176
+ npx @whenmoon-afk/memory-mcp-consolidate --discover
177
+ ```
178
+
179
+ This searches common locations and shows all found databases with their memory counts, sizes, and WAL file status.
180
+
181
+ **Merge databases:**
182
+ ```bash
183
+ npx @whenmoon-afk/memory-mcp-consolidate ~/.memory-mcp/merged.db ~/old-db1.db ~/old-db2.db
184
+ ```
185
+
186
+ Features:
187
+ - Auto-discovers databases in common locations
188
+ - Shows WAL file status (indicates uncommitted data)
189
+ - Deduplicates by content hash (same content + type = duplicate)
190
+ - Keeps most recently accessed version when duplicates found
191
+ - Merges access counts from all duplicates
192
+ - Preserves all provenance/audit records
193
+ - Checkpoints WAL files before reading
194
+ - Source databases are NOT modified
195
+
174
196
  ## Dependencies
175
197
 
176
198
  - `@modelcontextprotocol/sdk` - MCP protocol implementation
package/package.json CHANGED
@@ -1,16 +1,18 @@
1
1
  {
2
2
  "name": "@whenmoon-afk/memory-mcp",
3
- "version": "2.4.0",
3
+ "version": "2.4.1",
4
4
  "description": "Brain-inspired memory for AI agents - MCP server",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
7
7
  "bin": {
8
8
  "memory-mcp": "./dist/index.js",
9
- "memory-mcp-install": "./install.js"
9
+ "memory-mcp-install": "./install.js",
10
+ "memory-mcp-consolidate": "./scripts/consolidate-memories.js"
10
11
  },
11
12
  "files": [
12
13
  "dist/",
13
14
  "install.js",
15
+ "scripts/",
14
16
  "README.md",
15
17
  "LICENSE"
16
18
  ],
@@ -29,6 +31,7 @@
29
31
  "format:check": "prettier --check \"src/**/*.ts\"",
30
32
  "release": "npm run test && npm run build",
31
33
  "clean": "rm -rf dist",
34
+ "consolidate": "node scripts/consolidate-memories.js",
32
35
  "prepublishOnly": "npm run clean && npm run build"
33
36
  },
34
37
  "keywords": [
@@ -0,0 +1,780 @@
1
+ #!/usr/bin/env node
2
+
3
+ /**
4
+ * Memory Database Consolidation Tool
5
+ *
6
+ * Merges multiple memory database files into one, deduplicating by content hash.
7
+ *
8
+ * Usage:
9
+ * node scripts/consolidate-memories.js <target.db> <source1.db> [source2.db] ...
10
+ *
11
+ * Example:
12
+ * node scripts/consolidate-memories.js ~/.memory-mcp/memory.db ~/old-memories/*.db
13
+ */
14
+
15
+ import Database from 'better-sqlite3';
16
+ import { createHash } from 'crypto';
17
+ import { existsSync, statSync, readdirSync } from 'fs';
18
+ import { basename, resolve, join, dirname } from 'path';
19
+ import { homedir, platform } from 'os';
20
+ import { execSync } from 'child_process';
21
+
22
+ // ANSI colors for output
23
+ const colors = {
24
+ reset: '\x1b[0m',
25
+ bright: '\x1b[1m',
26
+ green: '\x1b[32m',
27
+ yellow: '\x1b[33m',
28
+ red: '\x1b[31m',
29
+ cyan: '\x1b[36m',
30
+ };
31
+
32
+ function log(msg) {
33
+ console.log(msg);
34
+ }
35
+
36
+ function success(msg) {
37
+ console.log(`${colors.green}✓${colors.reset} ${msg}`);
38
+ }
39
+
40
+ function warn(msg) {
41
+ console.log(`${colors.yellow}⚠${colors.reset} ${msg}`);
42
+ }
43
+
44
+ function error(msg) {
45
+ console.error(`${colors.red}✗${colors.reset} ${msg}`);
46
+ }
47
+
48
+ function header(msg) {
49
+ console.log(`\n${colors.bright}${colors.cyan}${msg}${colors.reset}`);
50
+ }
51
+
52
+ /**
53
+ * Get common search paths for memory databases
54
+ */
55
+ function getSearchPaths() {
56
+ const home = homedir();
57
+ const plat = platform();
58
+
59
+ const paths = [
60
+ // New unified path
61
+ join(home, '.memory-mcp'),
62
+ // Old paths
63
+ join(home, '.claude-memories'),
64
+ ];
65
+
66
+ if (plat === 'darwin') {
67
+ // macOS
68
+ paths.push(join(home, 'Library', 'Application Support', 'Claude'));
69
+ paths.push(join(home, 'Library', 'Application Support'));
70
+ } else if (plat === 'win32') {
71
+ // Windows
72
+ const appData = process.env.APPDATA || join(home, 'AppData', 'Roaming');
73
+ paths.push(join(appData, 'claude-memories'));
74
+ paths.push(join(appData, 'Claude'));
75
+ paths.push(appData);
76
+ // Also check local app data for versioned Claude folders
77
+ const localAppData = process.env.LOCALAPPDATA || join(home, 'AppData', 'Local');
78
+ paths.push(localAppData);
79
+ } else {
80
+ // Linux
81
+ const xdgData = process.env.XDG_DATA_HOME || join(home, '.local', 'share');
82
+ const xdgConfig = process.env.XDG_CONFIG_HOME || join(home, '.config');
83
+ paths.push(join(xdgData, 'claude-memories'));
84
+ paths.push(join(xdgConfig, 'Claude'));
85
+ }
86
+
87
+ return paths.filter(p => existsSync(p));
88
+ }
89
+
90
+ /**
91
+ * Recursively find all .db files that look like memory databases
92
+ */
93
+ function findDatabaseFiles(searchPath, maxDepth = 3, currentDepth = 0) {
94
+ const results = [];
95
+
96
+ if (currentDepth > maxDepth || !existsSync(searchPath)) {
97
+ return results;
98
+ }
99
+
100
+ try {
101
+ const entries = readdirSync(searchPath, { withFileTypes: true });
102
+
103
+ for (const entry of entries) {
104
+ const fullPath = join(searchPath, entry.name);
105
+
106
+ if (entry.isFile() && entry.name.endsWith('.db')) {
107
+ // Check if it looks like a memory database
108
+ if (isMemoryDatabase(fullPath)) {
109
+ results.push(fullPath);
110
+ }
111
+ } else if (entry.isDirectory() && !entry.name.startsWith('.') && entry.name !== 'node_modules') {
112
+ // Recurse into subdirectories
113
+ results.push(...findDatabaseFiles(fullPath, maxDepth, currentDepth + 1));
114
+ }
115
+ }
116
+ } catch (err) {
117
+ // Permission denied or other error, skip
118
+ }
119
+
120
+ return results;
121
+ }
122
+
123
+ /**
124
+ * Check if a database file is a memory database by checking for expected tables
125
+ */
126
+ function isMemoryDatabase(dbPath) {
127
+ try {
128
+ const db = new Database(dbPath, { readonly: true, fileMustExist: true });
129
+ const tables = db.prepare("SELECT name FROM sqlite_master WHERE type='table'").pluck().all();
130
+ db.close();
131
+
132
+ // Must have memories table to be a memory database
133
+ return tables.includes('memories');
134
+ } catch {
135
+ return false;
136
+ }
137
+ }
138
+
139
+ /**
140
+ * Get info about a database file
141
+ */
142
+ function getDatabaseInfo(dbPath) {
143
+ const stats = statSync(dbPath);
144
+ const walPath = dbPath + '-wal';
145
+ const shmPath = dbPath + '-shm';
146
+
147
+ const hasWal = existsSync(walPath);
148
+ const hasShm = existsSync(shmPath);
149
+
150
+ let walSize = 0;
151
+ if (hasWal) {
152
+ walSize = statSync(walPath).size;
153
+ }
154
+
155
+ let memoryCount = 0;
156
+ let schemaVersion = 0;
157
+ let lastAccessed = 0;
158
+
159
+ try {
160
+ const db = new Database(dbPath, { readonly: true });
161
+
162
+ // Get schema version
163
+ try {
164
+ schemaVersion = db.prepare('SELECT MAX(version) FROM schema_version').pluck().get() || 0;
165
+ } catch {}
166
+
167
+ // Get memory count
168
+ try {
169
+ memoryCount = db.prepare('SELECT COUNT(*) FROM memories WHERE is_deleted = 0').pluck().get() || 0;
170
+ } catch {
171
+ try {
172
+ memoryCount = db.prepare('SELECT COUNT(*) FROM memories').pluck().get() || 0;
173
+ } catch {}
174
+ }
175
+
176
+ // Get last accessed time
177
+ try {
178
+ lastAccessed = db.prepare('SELECT MAX(last_accessed) FROM memories').pluck().get() || 0;
179
+ } catch {}
180
+
181
+ db.close();
182
+ } catch {}
183
+
184
+ return {
185
+ path: dbPath,
186
+ size: stats.size,
187
+ modified: stats.mtime,
188
+ hasWal,
189
+ walSize,
190
+ hasShm,
191
+ memoryCount,
192
+ schemaVersion,
193
+ lastAccessed: lastAccessed ? new Date(lastAccessed) : null,
194
+ };
195
+ }
196
+
197
+ /**
198
+ * Discover all memory databases on the system
199
+ */
200
+ function discoverDatabases() {
201
+ header('Memory Database Discovery');
202
+ log('Searching for memory databases...\n');
203
+
204
+ const searchPaths = getSearchPaths();
205
+ const foundDbs = new Set();
206
+
207
+ for (const searchPath of searchPaths) {
208
+ log(` Searching: ${searchPath}`);
209
+ const dbs = findDatabaseFiles(searchPath);
210
+ dbs.forEach(db => foundDbs.add(db));
211
+ }
212
+
213
+ if (foundDbs.size === 0) {
214
+ warn('No memory databases found in common locations.');
215
+ log('\nYou can specify database paths manually:');
216
+ log(' node scripts/consolidate-memories.js <target.db> <source1.db> [source2.db] ...');
217
+ return [];
218
+ }
219
+
220
+ header(`Found ${foundDbs.size} database(s):`);
221
+
222
+ const dbInfos = [];
223
+ for (const dbPath of foundDbs) {
224
+ const info = getDatabaseInfo(dbPath);
225
+ dbInfos.push(info);
226
+ }
227
+
228
+ // Sort by memory count (most memories first)
229
+ dbInfos.sort((a, b) => b.memoryCount - a.memoryCount);
230
+
231
+ // Display info
232
+ for (let i = 0; i < dbInfos.length; i++) {
233
+ const info = dbInfos[i];
234
+ const sizeKb = (info.size / 1024).toFixed(1);
235
+ const walIndicator = info.hasWal ? ` ${colors.yellow}[WAL: ${(info.walSize / 1024).toFixed(1)}KB]${colors.reset}` : '';
236
+ const lastAccessStr = info.lastAccessed ? info.lastAccessed.toLocaleDateString() : 'unknown';
237
+
238
+ log(`\n ${colors.bright}[${i + 1}]${colors.reset} ${info.path}`);
239
+ log(` Size: ${sizeKb} KB${walIndicator}`);
240
+ log(` Memories: ${info.memoryCount} | Schema: v${info.schemaVersion} | Last used: ${lastAccessStr}`);
241
+ }
242
+
243
+ return dbInfos;
244
+ }
245
+
246
+ /**
247
+ * Generate content hash for deduplication
248
+ */
249
+ function getContentHash(content, type) {
250
+ return createHash('sha256').update(`${type}:${content}`).digest('hex');
251
+ }
252
+
253
+ /**
254
+ * Checkpoint WAL to ensure all data is in main DB file
255
+ */
256
+ function checkpointWal(dbPath) {
257
+ try {
258
+ const db = new Database(dbPath);
259
+ db.pragma('wal_checkpoint(TRUNCATE)');
260
+ db.close();
261
+ return true;
262
+ } catch (err) {
263
+ warn(`Could not checkpoint WAL for ${basename(dbPath)}: ${err.message}`);
264
+ return false;
265
+ }
266
+ }
267
+
268
+ /**
269
+ * Get schema version from a database
270
+ */
271
+ function getSchemaVersion(db) {
272
+ try {
273
+ const version = db.prepare('SELECT MAX(version) FROM schema_version').pluck().get();
274
+ return version || 0;
275
+ } catch {
276
+ return 0;
277
+ }
278
+ }
279
+
280
+ /**
281
+ * Read all data from a source database
282
+ */
283
+ function readSourceDatabase(dbPath) {
284
+ const db = new Database(dbPath, { readonly: true });
285
+
286
+ const schemaVersion = getSchemaVersion(db);
287
+
288
+ // Read memories
289
+ let memories = [];
290
+ try {
291
+ if (schemaVersion >= 2) {
292
+ memories = db.prepare(`
293
+ SELECT id, content, type, importance, embedding, created_at, last_accessed,
294
+ expires_at, metadata, is_deleted, summary, access_count
295
+ FROM memories
296
+ `).all();
297
+ } else {
298
+ // Schema v1 doesn't have summary and access_count
299
+ memories = db.prepare(`
300
+ SELECT id, content, type, importance, embedding, created_at, last_accessed,
301
+ expires_at, metadata, is_deleted,
302
+ NULL as summary, 0 as access_count
303
+ FROM memories
304
+ `).all();
305
+ }
306
+ } catch (err) {
307
+ warn(`Could not read memories from ${basename(dbPath)}: ${err.message}`);
308
+ }
309
+
310
+ // Read entities
311
+ let entities = [];
312
+ try {
313
+ entities = db.prepare('SELECT id, name, type, metadata, created_at FROM entities').all();
314
+ } catch (err) {
315
+ warn(`Could not read entities from ${basename(dbPath)}: ${err.message}`);
316
+ }
317
+
318
+ // Read memory_entities links
319
+ let memoryEntities = [];
320
+ try {
321
+ memoryEntities = db.prepare('SELECT memory_id, entity_id, created_at FROM memory_entities').all();
322
+ } catch (err) {
323
+ warn(`Could not read memory_entities from ${basename(dbPath)}: ${err.message}`);
324
+ }
325
+
326
+ // Read provenance
327
+ let provenance = [];
328
+ try {
329
+ provenance = db.prepare(`
330
+ SELECT id, memory_id, operation, timestamp, source, context, user_id, changes
331
+ FROM provenance
332
+ `).all();
333
+ } catch (err) {
334
+ warn(`Could not read provenance from ${basename(dbPath)}: ${err.message}`);
335
+ }
336
+
337
+ db.close();
338
+
339
+ return {
340
+ schemaVersion,
341
+ memories,
342
+ entities,
343
+ memoryEntities,
344
+ provenance,
345
+ };
346
+ }
347
+
348
+ /**
349
+ * Merge memories with deduplication
350
+ */
351
+ function mergeMemories(allMemories) {
352
+ const byHash = new Map();
353
+ let duplicatesFound = 0;
354
+
355
+ for (const memory of allMemories) {
356
+ const hash = getContentHash(memory.content, memory.type);
357
+
358
+ if (byHash.has(hash)) {
359
+ duplicatesFound++;
360
+ const existing = byHash.get(hash);
361
+
362
+ // Keep the most recent one
363
+ if (memory.last_accessed > existing.last_accessed) {
364
+ // Merge access_count
365
+ memory.access_count = (memory.access_count || 0) + (existing.access_count || 0);
366
+ // Keep all provenance records (handled separately)
367
+ byHash.set(hash, { ...memory, _originalIds: [...(existing._originalIds || [existing.id]), existing.id] });
368
+ } else {
369
+ // Add to existing's access_count
370
+ existing.access_count = (existing.access_count || 0) + (memory.access_count || 0);
371
+ existing._originalIds = [...(existing._originalIds || []), memory.id];
372
+ }
373
+ } else {
374
+ byHash.set(hash, { ...memory, _originalIds: [memory.id] });
375
+ }
376
+ }
377
+
378
+ return {
379
+ memories: Array.from(byHash.values()),
380
+ duplicatesFound,
381
+ };
382
+ }
383
+
384
+ /**
385
+ * Merge entities (unique by name)
386
+ */
387
+ function mergeEntities(allEntities) {
388
+ const byName = new Map();
389
+ let duplicatesFound = 0;
390
+
391
+ for (const entity of allEntities) {
392
+ if (byName.has(entity.name)) {
393
+ duplicatesFound++;
394
+ // Keep the first one seen (or could merge metadata)
395
+ } else {
396
+ byName.set(entity.name, entity);
397
+ }
398
+ }
399
+
400
+ return {
401
+ entities: Array.from(byName.values()),
402
+ duplicatesFound,
403
+ };
404
+ }
405
+
406
+ /**
407
+ * Initialize target database with schema
408
+ */
409
+ function initializeTargetDatabase(db) {
410
+ db.pragma('foreign_keys = ON');
411
+ db.pragma('journal_mode = WAL');
412
+
413
+ db.exec(`
414
+ CREATE TABLE IF NOT EXISTS schema_version (
415
+ version INTEGER PRIMARY KEY,
416
+ applied_at INTEGER NOT NULL
417
+ );
418
+
419
+ CREATE TABLE IF NOT EXISTS memories (
420
+ id TEXT PRIMARY KEY,
421
+ content TEXT NOT NULL,
422
+ type TEXT NOT NULL CHECK(type IN ('fact', 'entity', 'relationship', 'self')),
423
+ importance REAL NOT NULL CHECK(importance >= 0 AND importance <= 10),
424
+ embedding BLOB,
425
+ created_at INTEGER NOT NULL,
426
+ last_accessed INTEGER NOT NULL,
427
+ expires_at INTEGER,
428
+ metadata TEXT NOT NULL DEFAULT '{}',
429
+ is_deleted INTEGER NOT NULL DEFAULT 0 CHECK(is_deleted IN (0, 1)),
430
+ summary TEXT,
431
+ access_count INTEGER NOT NULL DEFAULT 0
432
+ );
433
+
434
+ CREATE INDEX IF NOT EXISTS idx_memories_type ON memories(type);
435
+ CREATE INDEX IF NOT EXISTS idx_memories_importance ON memories(importance);
436
+ CREATE INDEX IF NOT EXISTS idx_memories_last_accessed ON memories(last_accessed);
437
+ CREATE INDEX IF NOT EXISTS idx_memories_expires_at ON memories(expires_at);
438
+ CREATE INDEX IF NOT EXISTS idx_memories_is_deleted ON memories(is_deleted);
439
+
440
+ CREATE TABLE IF NOT EXISTS entities (
441
+ id TEXT PRIMARY KEY,
442
+ name TEXT NOT NULL UNIQUE,
443
+ type TEXT NOT NULL,
444
+ metadata TEXT NOT NULL DEFAULT '{}',
445
+ created_at INTEGER NOT NULL
446
+ );
447
+
448
+ CREATE INDEX IF NOT EXISTS idx_entities_name ON entities(name);
449
+ CREATE INDEX IF NOT EXISTS idx_entities_type ON entities(type);
450
+
451
+ CREATE TABLE IF NOT EXISTS memory_entities (
452
+ memory_id TEXT NOT NULL,
453
+ entity_id TEXT NOT NULL,
454
+ created_at INTEGER NOT NULL,
455
+ PRIMARY KEY (memory_id, entity_id),
456
+ FOREIGN KEY (memory_id) REFERENCES memories(id) ON DELETE CASCADE,
457
+ FOREIGN KEY (entity_id) REFERENCES entities(id) ON DELETE CASCADE
458
+ );
459
+
460
+ CREATE INDEX IF NOT EXISTS idx_memory_entities_memory_id ON memory_entities(memory_id);
461
+ CREATE INDEX IF NOT EXISTS idx_memory_entities_entity_id ON memory_entities(entity_id);
462
+
463
+ CREATE TABLE IF NOT EXISTS provenance (
464
+ id TEXT PRIMARY KEY,
465
+ memory_id TEXT NOT NULL,
466
+ operation TEXT NOT NULL CHECK(operation IN ('create', 'update', 'delete', 'access', 'restore')),
467
+ timestamp INTEGER NOT NULL,
468
+ source TEXT NOT NULL,
469
+ context TEXT,
470
+ user_id TEXT,
471
+ changes TEXT,
472
+ FOREIGN KEY (memory_id) REFERENCES memories(id) ON DELETE CASCADE
473
+ );
474
+
475
+ CREATE INDEX IF NOT EXISTS idx_provenance_memory_id ON provenance(memory_id);
476
+ CREATE INDEX IF NOT EXISTS idx_provenance_timestamp ON provenance(timestamp);
477
+ CREATE INDEX IF NOT EXISTS idx_provenance_operation ON provenance(operation);
478
+
479
+ INSERT OR REPLACE INTO schema_version (version, applied_at) VALUES (3, ${Date.now()});
480
+ `);
481
+ }
482
+
483
+ /**
484
+ * Build FTS index
485
+ */
486
+ function buildFtsIndex(db) {
487
+ db.exec(`
488
+ DROP TABLE IF EXISTS memories_fts;
489
+
490
+ CREATE VIRTUAL TABLE memories_fts USING fts5(
491
+ memory_id UNINDEXED,
492
+ content,
493
+ summary,
494
+ tokenize = 'porter unicode61'
495
+ );
496
+
497
+ INSERT INTO memories_fts (memory_id, content, summary)
498
+ SELECT id, content, summary FROM memories WHERE is_deleted = 0;
499
+
500
+ -- Triggers for FTS sync
501
+ CREATE TRIGGER IF NOT EXISTS memories_fts_insert
502
+ AFTER INSERT ON memories WHEN NEW.is_deleted = 0
503
+ BEGIN
504
+ INSERT INTO memories_fts (memory_id, content, summary)
505
+ VALUES (NEW.id, NEW.content, NEW.summary);
506
+ END;
507
+
508
+ CREATE TRIGGER IF NOT EXISTS memories_fts_update
509
+ AFTER UPDATE OF content, summary ON memories WHEN NEW.is_deleted = 0
510
+ BEGIN
511
+ DELETE FROM memories_fts WHERE memory_id = NEW.id;
512
+ INSERT INTO memories_fts (memory_id, content, summary)
513
+ VALUES (NEW.id, NEW.content, NEW.summary);
514
+ END;
515
+
516
+ CREATE TRIGGER IF NOT EXISTS memories_fts_delete
517
+ AFTER UPDATE OF is_deleted ON memories WHEN NEW.is_deleted = 1
518
+ BEGIN
519
+ DELETE FROM memories_fts WHERE memory_id = NEW.id;
520
+ END;
521
+
522
+ CREATE TRIGGER IF NOT EXISTS memories_fts_restore
523
+ AFTER UPDATE OF is_deleted ON memories
524
+ WHEN NEW.is_deleted = 0 AND OLD.is_deleted = 1
525
+ BEGIN
526
+ INSERT INTO memories_fts (memory_id, content, summary)
527
+ VALUES (NEW.id, NEW.content, NEW.summary);
528
+ END;
529
+ `);
530
+ }
531
+
532
+ /**
533
+ * Main consolidation function
534
+ */
535
+ function consolidate(targetPath, sourcePaths) {
536
+ header('Memory Database Consolidation Tool');
537
+ log(`Target: ${targetPath}`);
538
+ log(`Sources: ${sourcePaths.length} database(s)\n`);
539
+
540
+ // Verify source files exist
541
+ for (const sourcePath of sourcePaths) {
542
+ if (!existsSync(sourcePath)) {
543
+ error(`Source file not found: ${sourcePath}`);
544
+ process.exit(1);
545
+ }
546
+ }
547
+
548
+ // Checkpoint WAL files
549
+ header('Step 1: Checkpoint WAL files');
550
+ for (const sourcePath of sourcePaths) {
551
+ checkpointWal(sourcePath);
552
+ success(`Checkpointed ${basename(sourcePath)}`);
553
+ }
554
+
555
+ // Read all source databases
556
+ header('Step 2: Read source databases');
557
+ const allMemories = [];
558
+ const allEntities = [];
559
+ const allMemoryEntities = [];
560
+ const allProvenance = [];
561
+
562
+ for (const sourcePath of sourcePaths) {
563
+ const data = readSourceDatabase(sourcePath);
564
+ const size = statSync(sourcePath).size;
565
+
566
+ log(` ${basename(sourcePath)} (${(size / 1024).toFixed(1)} KB)`);
567
+ log(` Schema v${data.schemaVersion}, ${data.memories.length} memories, ${data.entities.length} entities`);
568
+
569
+ allMemories.push(...data.memories);
570
+ allEntities.push(...data.entities);
571
+ allMemoryEntities.push(...data.memoryEntities);
572
+ allProvenance.push(...data.provenance);
573
+ }
574
+
575
+ log(`\n Total: ${allMemories.length} memories, ${allEntities.length} entities, ${allProvenance.length} provenance records`);
576
+
577
+ // Merge and deduplicate
578
+ header('Step 3: Deduplicate memories');
579
+ const mergedMemories = mergeMemories(allMemories);
580
+ success(`${mergedMemories.memories.length} unique memories (${mergedMemories.duplicatesFound} duplicates removed)`);
581
+
582
+ header('Step 4: Deduplicate entities');
583
+ const mergedEntities = mergeEntities(allEntities);
584
+ success(`${mergedEntities.entities.length} unique entities (${mergedEntities.duplicatesFound} duplicates removed)`);
585
+
586
+ // Build ID mapping for memory_entities and provenance
587
+ const memoryIdMap = new Map(); // old ID -> new ID
588
+ for (const memory of mergedMemories.memories) {
589
+ const newId = memory.id;
590
+ for (const oldId of memory._originalIds || [memory.id]) {
591
+ memoryIdMap.set(oldId, newId);
592
+ }
593
+ }
594
+
595
+ const entityIdMap = new Map(); // entity name -> entity ID
596
+ for (const entity of mergedEntities.entities) {
597
+ entityIdMap.set(entity.name, entity.id);
598
+ }
599
+
600
+ // Create target database
601
+ header('Step 5: Create consolidated database');
602
+
603
+ if (existsSync(targetPath)) {
604
+ warn(`Target file exists, will be overwritten: ${targetPath}`);
605
+ }
606
+
607
+ const targetDb = new Database(targetPath);
608
+ initializeTargetDatabase(targetDb);
609
+
610
+ // Insert memories
611
+ const insertMemory = targetDb.prepare(`
612
+ INSERT OR REPLACE INTO memories
613
+ (id, content, type, importance, embedding, created_at, last_accessed, expires_at, metadata, is_deleted, summary, access_count)
614
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
615
+ `);
616
+
617
+ const insertMemories = targetDb.transaction((memories) => {
618
+ for (const m of memories) {
619
+ // Generate summary if missing
620
+ let summary = m.summary;
621
+ if (!summary) {
622
+ const firstSentence = m.content.match(/^[^.!?]+[.!?]/);
623
+ if (firstSentence && firstSentence[0].length <= 100) {
624
+ summary = firstSentence[0].trim();
625
+ } else if (m.content.length > 100) {
626
+ summary = m.content.substring(0, 97) + '...';
627
+ } else {
628
+ summary = m.content;
629
+ }
630
+ }
631
+
632
+ insertMemory.run(
633
+ m.id, m.content, m.type, m.importance, m.embedding,
634
+ m.created_at, m.last_accessed, m.expires_at, m.metadata,
635
+ m.is_deleted, summary, m.access_count || 0
636
+ );
637
+ }
638
+ });
639
+
640
+ insertMemories(mergedMemories.memories);
641
+ success(`Inserted ${mergedMemories.memories.length} memories`);
642
+
643
+ // Insert entities
644
+ const insertEntity = targetDb.prepare(`
645
+ INSERT OR REPLACE INTO entities (id, name, type, metadata, created_at)
646
+ VALUES (?, ?, ?, ?, ?)
647
+ `);
648
+
649
+ const insertEntities = targetDb.transaction((entities) => {
650
+ for (const e of entities) {
651
+ insertEntity.run(e.id, e.name, e.type, e.metadata, e.created_at);
652
+ }
653
+ });
654
+
655
+ insertEntities(mergedEntities.entities);
656
+ success(`Inserted ${mergedEntities.entities.length} entities`);
657
+
658
+ // Insert memory_entities (rebuild with mapped IDs)
659
+ const insertMemoryEntity = targetDb.prepare(`
660
+ INSERT OR IGNORE INTO memory_entities (memory_id, entity_id, created_at)
661
+ VALUES (?, ?, ?)
662
+ `);
663
+
664
+ let memoryEntityCount = 0;
665
+ const insertMemoryEntities = targetDb.transaction((links) => {
666
+ for (const link of links) {
667
+ const newMemoryId = memoryIdMap.get(link.memory_id);
668
+ if (newMemoryId) {
669
+ insertMemoryEntity.run(newMemoryId, link.entity_id, link.created_at);
670
+ memoryEntityCount++;
671
+ }
672
+ }
673
+ });
674
+
675
+ insertMemoryEntities(allMemoryEntities);
676
+ success(`Inserted ${memoryEntityCount} memory-entity links`);
677
+
678
+ // Insert provenance (with mapped memory IDs)
679
+ const insertProvenance = targetDb.prepare(`
680
+ INSERT OR IGNORE INTO provenance (id, memory_id, operation, timestamp, source, context, user_id, changes)
681
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?)
682
+ `);
683
+
684
+ let provenanceCount = 0;
685
+ const insertProvenanceRecords = targetDb.transaction((records) => {
686
+ for (const p of records) {
687
+ const newMemoryId = memoryIdMap.get(p.memory_id);
688
+ if (newMemoryId) {
689
+ insertProvenance.run(p.id, newMemoryId, p.operation, p.timestamp, p.source, p.context, p.user_id, p.changes);
690
+ provenanceCount++;
691
+ }
692
+ }
693
+ });
694
+
695
+ insertProvenanceRecords(allProvenance);
696
+ success(`Inserted ${provenanceCount} provenance records`);
697
+
698
+ // Build FTS index
699
+ header('Step 6: Build FTS index');
700
+ buildFtsIndex(targetDb);
701
+ success('FTS index built');
702
+
703
+ // Optimize
704
+ targetDb.pragma('optimize');
705
+ targetDb.exec('ANALYZE');
706
+
707
+ targetDb.close();
708
+
709
+ // Summary
710
+ header('Consolidation Complete!');
711
+ const targetSize = statSync(targetPath).size;
712
+ log(` Target: ${targetPath} (${(targetSize / 1024).toFixed(1)} KB)`);
713
+ log(` Memories: ${mergedMemories.memories.length} (${mergedMemories.duplicatesFound} duplicates removed)`);
714
+ log(` Entities: ${mergedEntities.entities.length}`);
715
+ log(` Provenance: ${provenanceCount}`);
716
+ log('');
717
+ success('Source databases were NOT modified (kept as-is)');
718
+ log('\nYou can now update your config to use the consolidated database.');
719
+ }
720
+
721
+ // CLI
722
+ const args = process.argv.slice(2);
723
+
724
+ // Check for --discover flag
725
+ if (args.includes('--discover') || args.includes('-d')) {
726
+ const dbInfos = discoverDatabases();
727
+
728
+ if (dbInfos.length > 0) {
729
+ log('\n' + colors.cyan + 'To consolidate these databases:' + colors.reset);
730
+ log(' node scripts/consolidate-memories.js <target.db> <source1.db> [source2.db] ...\n');
731
+ log('Example (consolidate all found databases):');
732
+ const allPaths = dbInfos.map(d => `"${d.path}"`).join(' ');
733
+ log(` node scripts/consolidate-memories.js ~/.memory-mcp/consolidated.db ${allPaths}\n`);
734
+ }
735
+ process.exit(0);
736
+ }
737
+
738
+ // Check for --help flag
739
+ if (args.includes('--help') || args.includes('-h') || args.length < 2) {
740
+ console.log(`
741
+ ${colors.bright}Memory Database Consolidation Tool${colors.reset}
742
+
743
+ Merges multiple memory database files into one, deduplicating by content hash.
744
+
745
+ ${colors.cyan}Usage:${colors.reset}
746
+ node scripts/consolidate-memories.js [options] <target.db> <source1.db> [source2.db] ...
747
+
748
+ ${colors.cyan}Commands:${colors.reset}
749
+ --discover, -d Find all memory databases on your system
750
+ --help, -h Show this help message
751
+
752
+ ${colors.cyan}Example:${colors.reset}
753
+ # First, discover existing databases
754
+ node scripts/consolidate-memories.js --discover
755
+
756
+ # Then consolidate them
757
+ node scripts/consolidate-memories.js ~/.memory-mcp/consolidated.db ~/old-db1.db ~/old-db2.db
758
+
759
+ ${colors.cyan}Features:${colors.reset}
760
+ - Discovers memory databases in common locations
761
+ - Identifies databases with WAL files (uncommitted data)
762
+ - Deduplicates by content hash (same content + type)
763
+ - Keeps most recently accessed version of duplicates
764
+ - Merges access_count from all duplicates
765
+ - Preserves all provenance records
766
+ - Checkpoints WAL files before reading
767
+ - Source databases are NOT modified
768
+ `);
769
+ process.exit(args.length < 2 ? 1 : 0);
770
+ }
771
+
772
+ const [targetPath, ...sourcePaths] = args.map(p => resolve(p));
773
+
774
+ try {
775
+ consolidate(targetPath, sourcePaths);
776
+ } catch (err) {
777
+ error(`Consolidation failed: ${err.message}`);
778
+ console.error(err.stack);
779
+ process.exit(1);
780
+ }