mrmd-sync 0.3.0 → 0.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/src/index.js +320 -27
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mrmd-sync",
3
- "version": "0.3.0",
3
+ "version": "0.3.2",
4
4
  "description": "Production-ready sync server for mrmd - real-time collaboration with file persistence",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
package/src/index.js CHANGED
@@ -24,6 +24,7 @@ import {
24
24
  mkdirSync,
25
25
  statSync,
26
26
  rmSync,
27
+ readdirSync,
27
28
  } from 'fs';
28
29
  import { join, dirname, relative, resolve } from 'path';
29
30
  import { createHash } from 'crypto';
@@ -279,6 +280,89 @@ const DEFAULT_CONFIG = {
279
280
  // Paths that require dangerouslyAllowSystemPaths: true
280
281
  const DANGEROUS_PATHS = ['/', '/etc', '/usr', '/var', '/bin', '/sbin', '/root', '/home'];
281
282
 
283
+ // =============================================================================
284
+ // DATA LOSS PREVENTION - Memory Monitoring
285
+ // =============================================================================
286
+ // Added after investigating unexplained data loss on 2026-01-16.
287
+ // The sync server crashed with OOM after ~9 minutes, consuming 4GB for a 2.5KB
288
+ // document. User lost ~2.5 hours of work because the editor gave no warning.
289
+ //
290
+ // These safeguards ensure:
291
+ // 1. Memory usage is monitored and warnings are logged
292
+ // 2. Y.Doc compaction runs periodically to bound memory growth
293
+ // 3. Server fails fast (512MB limit in electron main.js) rather than OOM later
294
+ // =============================================================================
295
+
296
+ const MEMORY_WARNING_MB = 200; // Warn at 200MB
297
+ // DISABLED: Compaction causes document duplication!
298
+ // When clients reconnect after compaction, Yjs merges their state with the
299
+ // server's fresh state, causing content to double. Need a different approach.
300
+ // Keeping memory monitoring for warnings only.
301
+ const MEMORY_COMPACT_MB = Infinity; // Disabled
302
+ const MEMORY_CHECK_INTERVAL_MS = 30000; // Check every 30 seconds
303
+ const COMPACTION_INTERVAL_MS = Infinity; // Disabled
304
+
305
+ /**
306
+ * Get current memory usage in MB
307
+ */
308
+ function getMemoryUsageMB() {
309
+ const usage = process.memoryUsage();
310
+ return {
311
+ heapUsed: Math.round(usage.heapUsed / 1024 / 1024),
312
+ heapTotal: Math.round(usage.heapTotal / 1024 / 1024),
313
+ rss: Math.round(usage.rss / 1024 / 1024),
314
+ external: Math.round(usage.external / 1024 / 1024),
315
+ };
316
+ }
317
+
318
+ /**
319
+ * Compact a Y.Doc by creating a fresh doc with only current content.
320
+ * This discards all operation history and tombstones, dramatically reducing memory.
321
+ *
322
+ * NOTE: This will disconnect all clients, who will need to reconnect.
323
+ * The content itself is preserved via the file and snapshot.
324
+ *
325
+ * @param {Object} docData - The document data object from getDoc()
326
+ * @param {Object} log - Logger instance
327
+ * @returns {Object} - New Y.Doc and Y.Text
328
+ */
329
+ function compactYDoc(docData, log) {
330
+ const { ydoc, ytext, docName } = docData;
331
+
332
+ // Get current content before compaction
333
+ const currentContent = ytext.toString();
334
+ const oldStateSize = Y.encodeStateAsUpdate(ydoc).length;
335
+
336
+ log.info('Compacting Y.Doc', {
337
+ doc: docName,
338
+ contentLength: currentContent.length,
339
+ oldStateSize,
340
+ });
341
+
342
+ // Create fresh Y.Doc
343
+ const newYdoc = new Y.Doc();
344
+ newYdoc.name = docName;
345
+ const newYtext = newYdoc.getText('content');
346
+
347
+ // Insert current content into fresh doc
348
+ if (currentContent.length > 0) {
349
+ newYdoc.transact(() => {
350
+ newYtext.insert(0, currentContent);
351
+ });
352
+ }
353
+
354
+ const newStateSize = Y.encodeStateAsUpdate(newYdoc).length;
355
+
356
+ log.info('Y.Doc compacted', {
357
+ doc: docName,
358
+ oldStateSize,
359
+ newStateSize,
360
+ reduction: `${Math.round((1 - newStateSize / oldStateSize) * 100)}%`,
361
+ });
362
+
363
+ return { newYdoc, newYtext };
364
+ }
365
+
282
366
  // =============================================================================
283
367
  // UTILITY FUNCTIONS
284
368
  // =============================================================================
@@ -352,6 +436,87 @@ function atomicWriteFile(filePath, content) {
352
436
  }
353
437
  }
354
438
 
439
+ /**
440
+ * Clean up stale temp files left behind by crashed processes.
441
+ * Temp files have pattern: {filename}.tmp.{pid}.{timestamp}
442
+ * A file is stale if:
443
+ * - The PID no longer exists (process died)
444
+ * - OR the timestamp is older than maxAgeMs (fallback safety)
445
+ */
446
+ function cleanupStaleTempFiles(dir, log, maxAgeMs = 3600000) {
447
+ const tmpPattern = /\.tmp\.(\d+)\.(\d+)$/;
448
+ let cleaned = 0;
449
+ let errors = 0;
450
+
451
+ function walkDir(currentDir) {
452
+ let entries;
453
+ try {
454
+ entries = readdirSync(currentDir, { withFileTypes: true });
455
+ } catch {
456
+ return; // Skip directories we can't read
457
+ }
458
+
459
+ for (const entry of entries) {
460
+ const fullPath = join(currentDir, entry.name);
461
+
462
+ if (entry.isDirectory()) {
463
+ // Skip hidden directories and node_modules
464
+ if (!entry.name.startsWith('.') && entry.name !== 'node_modules') {
465
+ walkDir(fullPath);
466
+ }
467
+ continue;
468
+ }
469
+
470
+ // Check if this is a temp file
471
+ const match = entry.name.match(tmpPattern);
472
+ if (!match) continue;
473
+
474
+ const pid = parseInt(match[1], 10);
475
+ const timestamp = parseInt(match[2], 10);
476
+ const age = Date.now() - timestamp;
477
+
478
+ // Check if process is dead
479
+ let processIsDead = false;
480
+ try {
481
+ process.kill(pid, 0); // Signal 0 = check if process exists
482
+ } catch (err) {
483
+ if (err.code === 'ESRCH') {
484
+ processIsDead = true; // Process doesn't exist
485
+ }
486
+ // EPERM means process exists but we can't signal it
487
+ }
488
+
489
+ // Remove if process is dead OR file is older than maxAgeMs
490
+ if (processIsDead || age > maxAgeMs) {
491
+ try {
492
+ unlinkSync(fullPath);
493
+ cleaned++;
494
+ log.info('Removed stale temp file', {
495
+ path: fullPath,
496
+ pid,
497
+ processIsDead,
498
+ ageMs: age,
499
+ });
500
+ } catch (err) {
501
+ errors++;
502
+ log.warn('Failed to remove stale temp file', {
503
+ path: fullPath,
504
+ error: err.message,
505
+ });
506
+ }
507
+ }
508
+ }
509
+ }
510
+
511
+ walkDir(dir);
512
+
513
+ if (cleaned > 0 || errors > 0) {
514
+ log.info('Temp file cleanup complete', { cleaned, errors });
515
+ }
516
+
517
+ return { cleaned, errors };
518
+ }
519
+
355
520
  /**
356
521
  * Save Yjs document state for crash recovery
357
522
  */
@@ -495,6 +660,9 @@ export function createServer(options = {}) {
495
660
  // Acquire PID lock to prevent multiple instances on same directory
496
661
  const releasePidLock = acquirePidLock(snapshotDir, port, log);
497
662
 
663
+ // Clean up stale temp files from previous crashed processes
664
+ cleanupStaleTempFiles(resolvedDir, log);
665
+
498
666
  // Document storage
499
667
  const docs = new Map();
500
668
 
@@ -539,6 +707,21 @@ export function createServer(options = {}) {
539
707
  : null;
540
708
 
541
709
  const ytext = ydoc.getText('content');
710
+ const docData = {
711
+ docName,
712
+ ydoc,
713
+ ytext,
714
+ awareness,
715
+ conns,
716
+ mutex,
717
+ filePath,
718
+ snapshotPath,
719
+ applyFileChange: null,
720
+ flushWrite: null,
721
+ scheduleCleanup: null,
722
+ cancelCleanup: null,
723
+ scheduleWrite: null,
724
+ };
542
725
 
543
726
  // Track state
544
727
  let lastFileHash = null;
@@ -604,7 +787,7 @@ export function createServer(options = {}) {
604
787
  if (isShuttingDown) return;
605
788
 
606
789
  isWritingToFile = true;
607
- const content = ytext.toString();
790
+ const content = docData.ytext.toString();
608
791
  const hash = contentHash(content);
609
792
 
610
793
  // Skip if content unchanged
@@ -614,7 +797,7 @@ export function createServer(options = {}) {
614
797
  return;
615
798
  }
616
799
 
617
- const { success, error } = atomicWriteFile(filePath, content);
800
+ const { success, error } = atomicWriteFile(docData.filePath, content);
618
801
  if (error) {
619
802
  log.error('Error saving file', { path: filePath, error });
620
803
  metrics.errorOccurred();
@@ -630,15 +813,17 @@ export function createServer(options = {}) {
630
813
  }, debounceMs);
631
814
  };
632
815
 
816
+ docData.scheduleWrite = scheduleWrite;
817
+
633
818
  // Listen for Yjs updates
634
- ydoc.on('update', scheduleWrite);
819
+ docData.ydoc.on('update', scheduleWrite);
635
820
 
636
821
  // Schedule Yjs snapshot saves
637
822
  if (persistYjsState && snapshotPath) {
638
823
  const scheduleSnapshot = () => {
639
824
  clearTimeout(snapshotTimeout);
640
825
  snapshotTimeout = setTimeout(() => {
641
- const { success, error } = saveYjsSnapshot(snapshotPath, ydoc);
826
+ const { success, error } = saveYjsSnapshot(snapshotPath, docData.ydoc);
642
827
  if (error) {
643
828
  log.warn('Failed to save Yjs snapshot', { doc: docName, error });
644
829
  }
@@ -656,7 +841,7 @@ export function createServer(options = {}) {
656
841
  const newHash = contentHash(newContent);
657
842
  if (newHash === lastFileHash) return;
658
843
 
659
- const oldContent = ytext.toString();
844
+ const oldContent = docData.ytext.toString();
660
845
  if (oldContent === newContent) {
661
846
  lastFileHash = newHash;
662
847
  return;
@@ -665,14 +850,14 @@ export function createServer(options = {}) {
665
850
  isWritingToYjs = true;
666
851
  const changes = diffChars(oldContent, newContent);
667
852
 
668
- ydoc.transact(() => {
853
+ docData.ydoc.transact(() => {
669
854
  let pos = 0;
670
855
  for (const change of changes) {
671
856
  if (change.added) {
672
- ytext.insert(pos, change.value);
857
+ docData.ytext.insert(pos, change.value);
673
858
  pos += change.value.length;
674
859
  } else if (change.removed) {
675
- ytext.delete(pos, change.value.length);
860
+ docData.ytext.delete(pos, change.value.length);
676
861
  } else {
677
862
  pos += change.value.length;
678
863
  }
@@ -689,10 +874,10 @@ export function createServer(options = {}) {
689
874
  const flushWrite = async () => {
690
875
  clearTimeout(writeTimeout);
691
876
  await mutex.withLock(async () => {
692
- const content = ytext.toString();
877
+ const content = docData.ytext.toString();
693
878
  const hash = contentHash(content);
694
879
  if (hash !== lastFileHash) {
695
- const { error } = atomicWriteFile(filePath, content);
880
+ const { error } = atomicWriteFile(docData.filePath, content);
696
881
  if (error) {
697
882
  log.error('Error flushing file', { path: filePath, error });
698
883
  } else {
@@ -701,7 +886,7 @@ export function createServer(options = {}) {
701
886
  }
702
887
  // Save final snapshot
703
888
  if (snapshotPath) {
704
- saveYjsSnapshot(snapshotPath, ydoc);
889
+ saveYjsSnapshot(snapshotPath, docData.ydoc);
705
890
  }
706
891
  });
707
892
  pendingWrites.delete(docName);
@@ -714,8 +899,8 @@ export function createServer(options = {}) {
714
899
  if (conns.size === 0) {
715
900
  await flushWrite();
716
901
  clearTimeout(snapshotTimeout);
717
- awareness.destroy();
718
- ydoc.destroy();
902
+ docData.awareness.destroy();
903
+ docData.ydoc.destroy();
719
904
  docs.delete(docName);
720
905
  log.info('Cleaned up document', { doc: docName });
721
906
  }
@@ -726,19 +911,10 @@ export function createServer(options = {}) {
726
911
  clearTimeout(cleanupTimeout);
727
912
  };
728
913
 
729
- const docData = {
730
- ydoc,
731
- ytext,
732
- awareness,
733
- conns,
734
- mutex,
735
- filePath,
736
- snapshotPath,
737
- applyFileChange,
738
- flushWrite,
739
- scheduleCleanup,
740
- cancelCleanup,
741
- };
914
+ docData.applyFileChange = applyFileChange;
915
+ docData.flushWrite = flushWrite;
916
+ docData.scheduleCleanup = scheduleCleanup;
917
+ docData.cancelCleanup = cancelCleanup;
742
918
 
743
919
  docs.set(docName, docData);
744
920
  return docData;
@@ -751,6 +927,12 @@ export function createServer(options = {}) {
751
927
  const watcher = watch(join(resolvedDir, '**/*.md'), {
752
928
  ignoreInitial: true,
753
929
  awaitWriteFinish: { stabilityThreshold: 300 },
930
+ ignored: [
931
+ '**/node_modules/**',
932
+ '**/.venv/**',
933
+ '**/__pycache__/**',
934
+ '**/.git/**',
935
+ ],
754
936
  });
755
937
 
756
938
  watcher.on('change', async (filePath) => {
@@ -1045,7 +1227,7 @@ export function createServer(options = {}) {
1045
1227
  // GRACEFUL SHUTDOWN
1046
1228
  // =============================================================================
1047
1229
 
1048
- const gracefulShutdown = async (signal) => {
1230
+ let gracefulShutdown = async (signal) => {
1049
1231
  if (isShuttingDown) return;
1050
1232
  isShuttingDown = true;
1051
1233
 
@@ -1100,6 +1282,117 @@ export function createServer(options = {}) {
1100
1282
  });
1101
1283
  });
1102
1284
 
1285
+ // =============================================================================
1286
+ // DATA LOSS PREVENTION - Memory Monitoring
1287
+ // =============================================================================
1288
+ // Added after investigating unexplained data loss on 2026-01-16.
1289
+ // Monitors memory usage and triggers compaction if needed.
1290
+ // =============================================================================
1291
+
1292
+ let lastCompactionTime = Date.now();
1293
+ let memoryWarningLogged = false;
1294
+ let compactionInProgress = false;
1295
+
1296
+ const memoryMonitorInterval = setInterval(async () => {
1297
+ if (isShuttingDown) return;
1298
+
1299
+ const mem = getMemoryUsageMB();
1300
+
1301
+ // Log warning if memory is getting high
1302
+ if (mem.heapUsed >= MEMORY_WARNING_MB && !memoryWarningLogged) {
1303
+ log.warn('High memory usage detected', {
1304
+ heapUsed: `${mem.heapUsed}MB`,
1305
+ heapTotal: `${mem.heapTotal}MB`,
1306
+ rss: `${mem.rss}MB`,
1307
+ threshold: `${MEMORY_WARNING_MB}MB`,
1308
+ });
1309
+ memoryWarningLogged = true;
1310
+ } else if (mem.heapUsed < MEMORY_WARNING_MB) {
1311
+ memoryWarningLogged = false;
1312
+ }
1313
+
1314
+ if (compactionInProgress) return;
1315
+
1316
+ // Trigger compaction if memory is critical OR if enough time has passed
1317
+ const timeSinceLastCompaction = Date.now() - lastCompactionTime;
1318
+ const shouldCompact =
1319
+ mem.heapUsed >= MEMORY_COMPACT_MB ||
1320
+ timeSinceLastCompaction >= COMPACTION_INTERVAL_MS;
1321
+
1322
+ if (shouldCompact && docs.size > 0) {
1323
+ compactionInProgress = true;
1324
+ try {
1325
+ log.info('Triggering Y.Doc compaction', {
1326
+ reason: mem.heapUsed >= MEMORY_COMPACT_MB ? 'memory-pressure' : 'periodic',
1327
+ heapUsed: `${mem.heapUsed}MB`,
1328
+ docsCount: docs.size,
1329
+ });
1330
+
1331
+ // Compact all documents
1332
+ for (const [docName, docData] of docs) {
1333
+ try {
1334
+ await docData.flushWrite();
1335
+
1336
+ // Disconnect all clients (they will reconnect and get fresh state)
1337
+ for (const ws of docData.conns) {
1338
+ try {
1339
+ ws.close(4000, 'Document compacted - please reconnect');
1340
+ } catch (e) {
1341
+ // Ignore close errors
1342
+ }
1343
+ }
1344
+ docData.conns.clear();
1345
+
1346
+ // Destroy old doc and create fresh one
1347
+ const oldYdoc = docData.ydoc;
1348
+ const oldAwareness = docData.awareness;
1349
+ const { newYdoc, newYtext } = compactYDoc(docData, log);
1350
+ const newAwareness = new awarenessProtocol.Awareness(newYdoc);
1351
+
1352
+ oldYdoc.off('update', docData.scheduleWrite);
1353
+
1354
+ // Update the document entry
1355
+ docData.ydoc = newYdoc;
1356
+ docData.ytext = newYtext;
1357
+ docData.awareness = newAwareness;
1358
+
1359
+ // Re-register the update listener
1360
+ newYdoc.on('update', docData.scheduleWrite);
1361
+
1362
+ // Clean up old doc
1363
+ oldAwareness.destroy();
1364
+ oldYdoc.destroy();
1365
+
1366
+ log.info('Document compacted successfully', { doc: docName });
1367
+ } catch (e) {
1368
+ log.error('Error compacting document', { doc: docName, error: e.message });
1369
+ }
1370
+ }
1371
+
1372
+ lastCompactionTime = Date.now();
1373
+
1374
+ // Force garbage collection if available (--expose-gc flag)
1375
+ if (global.gc) {
1376
+ global.gc();
1377
+ const afterMem = getMemoryUsageMB();
1378
+ log.info('GC completed', {
1379
+ heapUsed: `${afterMem.heapUsed}MB`,
1380
+ freed: `${mem.heapUsed - afterMem.heapUsed}MB`,
1381
+ });
1382
+ }
1383
+ } finally {
1384
+ compactionInProgress = false;
1385
+ }
1386
+ }
1387
+ }, MEMORY_CHECK_INTERVAL_MS);
1388
+
1389
+ // Clean up interval on shutdown
1390
+ const originalGracefulShutdown = gracefulShutdown;
1391
+ gracefulShutdown = async (signal) => {
1392
+ clearInterval(memoryMonitorInterval);
1393
+ return originalGracefulShutdown(signal);
1394
+ };
1395
+
1103
1396
  // =============================================================================
1104
1397
  // PUBLIC API
1105
1398
  // =============================================================================