@bod.ee/db 0.12.4 → 0.12.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -27,6 +27,12 @@ import { BodDB, increment, serverTimestamp, arrayUnion, arrayRemove, ref } from
27
27
  const db = new BodDB({
28
28
  path: './data.db', // SQLite file (default: ':memory:')
29
29
  port: 4400, // optional — only needed if calling db.serve()
30
+ log: { // optional structured logging
31
+ enabled: true,
32
+ level: 'info', // 'debug' | 'info' | 'warn' | 'error'
33
+ components: '*', // '*' or ['storage', 'transport', 'subs', 'replication', 'keyauth']
34
+ logDir: './logs', // writes bod-YYYYMMDD-HHmmss.log per run; omit for console-only
35
+ },
30
36
  rules: { // inline rules, or path to .json/.ts file
31
37
  'users/$uid': {
32
38
  read: true,
@@ -497,7 +503,7 @@ await replica.replication!.start();
497
503
  - Reads served locally on replica, writes forwarded to primary
498
504
  - Replicas bootstrap full state on first connect, then consume live events
499
505
  - Push keys preserved across replicas (deterministic)
500
- - Auto-compaction on `_repl` stream (keepKey: 'path', maxCount: 10000)
506
+ - Auto-compaction on `_repl` stream (keepKey: 'path', maxCount: 500, auto-compact every 500 writes)
501
507
  - Excluded prefixes: `_repl`, `_streams`, `_mq`, `_auth` (internal data not replicated)
502
508
 
503
509
  ### Per-Path Topology
package/CLAUDE.md CHANGED
@@ -71,7 +71,7 @@ config.ts — demo instance config (open rules, indexes, fts, v
71
71
  - **BodClientCached**: two-tier cache wrapper around BodClient. Memory (Map, LRU eviction) + IndexedDB persistence. Stale-while-revalidate: subscribed paths always fresh, unsubscribed return stale + background refetch. Writes (`set/update/delete`) invalidate path + ancestors. `init()` opens IDB + sweeps expired. `warmup(paths[])` bulk-loads from IDB. Passthrough for `push/batch/query/search/mq/stream/vfs` via `cachedClient.client`.
72
72
  - **MCP**: `MCPAdapter` wraps a `BodClient` as a JSON-RPC MCP server (stdio + HTTP). Connects to a running BodDB instance over WebSocket — no embedded DB. Entry point: `mcp.ts`. Tools: CRUD (6), FTS (2), vectors (2), streams (4), MQ (7) = 21 tools. Use `--stdio` for Claude Code/Desktop, `--http` for remote agents.
73
73
  - **VFS (Virtual File System)**: `VFSEngine` — files stored outside SQLite via pluggable `VFSBackend` interface. `LocalBackend` stores at `<storageRoot>/<fileId>` using `Bun.file`/`Bun.write`. Metadata at `_vfs/<virtualPath>/` (size, mime, mtime, fileId, isDir) — gets subs/rules/replication for free. `fileId = pushId` so move/rename is metadata-only. REST: `POST/GET/DELETE /files/<path>`, `?stat=1`, `?list=1`, `?mkdir=1`, `PUT ?move=<dst>`. WS chunked fallback: base64-encoded `vfs-upload-init/chunk/done`, `vfs-download-init` → `vfs-download-chunk` push messages. Client: `VFSClient` via `client.vfs()` — `upload/download` (REST) + `uploadWS/downloadWS` (WS) + `stat/list/mkdir/delete/move`.
74
- - **Replication**: `ReplicationEngine` — single primary + N read replicas + multi-source feed subscriptions. Star topology. Primary emits write events to `_repl` stream via `onWrite` hooks. Replicas bootstrap via cursor-based `streamMaterialize('_repl', { keepKey: 'path', batchSize: 200 })` pagination (avoids huge single WS frame), then subscribe for ongoing events. Auto-compact on write threshold (`autoCompactThreshold`, default 500) + on startup keeps `_repl` bounded. Write proxy: replica forwards writes to primary via BodClient, primary applies + emits, replica consumes. `_replaying` flag prevents re-emission loops. `_emitting` guard prevents recursion from `db.push('_repl')`. Updates flattened to per-path set events for correct compaction keying. Sweep delete events replicated. Excluded prefixes: `_repl`, `_streams`, `_mq`, `_auth`. **Sources**: `ReplicationSource[]` — subscribe to specific paths from multiple remote DBs. Each source is an independent BodClient that filters `_repl` events by path prefix, with optional `localPrefix` remapping (e.g. remote `users/u1` → local `db-a/users/u1`). Sources connect in parallel; individual failures don't block others. Sources are independent of role — a DB can be primary AND consume sources. **Per-path topology**: `PathTopologyRouter` — when `paths` config is set, each path prefix gets an independent mode: `primary` (local authoritative, emits), `replica` (remote authoritative, proxies writes), `sync` (bidirectional, both emit+apply), `readonly` (pull-only, rejects writes), `writeonly` (push-only, ignores remote). Longest-prefix match resolves mode. `writeProxy: 'proxy'|'reject'` overrides replica write behavior. Bootstrap skips sync paths (ongoing stream only). Auth/rules checked before proxy in all handlers. `shouldProxyPath(path)`/`shouldRejectPath(path)` replace `isReplica` checks. `emitsToRepl`/`pullsFromPrimary` getters for compact/bootstrap decisions. Stable `replicaId` from config hash. Falls back to `role` when `paths` absent (backward compat).
74
+ - **Replication**: `ReplicationEngine` — single primary + N read replicas + multi-source feed subscriptions. Star topology. Primary emits write events to `_repl` stream via `onWrite` hooks. Replicas bootstrap via cursor-based `streamMaterialize('_repl', { keepKey: 'path', batchSize: 200 })` pagination (avoids huge single WS frame), then subscribe for ongoing events. Auto-compact on write threshold (`autoCompactThreshold`, default 500) + on startup keeps `_repl` bounded. Write proxy: replica forwards writes to primary via BodClient, primary applies + emits, replica consumes. `_replaying` flag prevents re-emission loops. `_emitting` guard prevents recursion from `db.push('_repl')`. Updates flattened to per-path set events for correct compaction keying. Sweep delete events replicated. Excluded prefixes: `_repl`, `_streams`, `_mq`, `_admin`, `_auth`. **Sources**: `ReplicationSource[]` — subscribe to specific paths from multiple remote DBs. Each source is an independent BodClient that filters `_repl` events by path prefix, with optional `localPrefix` remapping (e.g. remote `users/u1` → local `db-a/users/u1`). Sources connect in parallel; individual failures don't block others. Sources are independent of role — a DB can be primary AND consume sources. **Per-path topology**: `PathTopologyRouter` — when `paths` config is set, each path prefix gets an independent mode: `primary` (local authoritative, emits), `replica` (remote authoritative, proxies writes), `sync` (bidirectional, both emit+apply), `readonly` (pull-only, rejects writes), `writeonly` (push-only, ignores remote). Longest-prefix match resolves mode. `writeProxy: 'proxy'|'reject'` overrides replica write behavior. Bootstrap skips sync paths (ongoing stream only). Auth/rules checked before proxy in all handlers. `shouldProxyPath(path)`/`shouldRejectPath(path)` replace `isReplica` checks. `emitsToRepl`/`pullsFromPrimary` getters for compact/bootstrap decisions. Stable `replicaId` from config hash. Falls back to `role` when `paths` absent (backward compat).
75
75
  - **KeyAuth integration guide**: `docs/keyauth-integration.md` — flows for signup, signin, new device, autoAuth, IAM roles, common mistakes.
76
76
  - **Para-chat integration guide**: `docs/para-chat-integration.md` — how para-chat uses BodDB: per-path topology, VFS, KeyAuth, caching, file sync.
77
77
  - **KeyAuth**: `KeyAuthEngine` — portable Ed25519 identity & IAM. Identity hierarchy: Root (server-level, key on filesystem), Account (portable, password-encrypted private key in DB or device-generated), Device (delegate, linked via password unlock). Challenge-response auth: server sends nonce → client signs with Ed25519 → server verifies + creates session. Self-signed tokens (no JWT lib): `base64url(payload).base64url(Ed25519_sign)`. Data model at `_auth/` prefix (protected from external writes). Device reverse-index at `_auth/deviceIndex/{dfp}` for O(1) lookup. Password change is atomic (single `db.update()`). IAM: roles with path-based permissions, account role assignment. `_auth/` excluded from replication. Transport guards: `auth-link-device` and `auth-change-password` require authenticated session; non-root users can only change own password. **Device registration**: `registerDevice(publicKey)` — client-generated keypair, no password, idempotent; `allowOpenRegistration: false` requires authenticated session. **Browser crypto**: `keyAuth.browser.ts` uses `@noble/ed25519` with DER↔raw key bridge for server compatibility. **BodClient autoAuth**: `autoAuth: true` auto-generates keypair (localStorage), registers, authenticates — zero-config device identity. `client.auth.*` convenience methods for all auth ops. **IAM transport ops**: `auth-create-role`, `auth-delete-role`, `auth-update-roles` (root only), `auth-list-accounts`, `auth-list-roles`. Device accounts (no encrypted key) safely reject `linkDevice`/`changePassword`.
package/admin/admin.ts CHANGED
@@ -30,7 +30,7 @@ export function startAdminUI(options?: { port?: number; serverUrl?: string }) {
30
30
  const url = new URL(req.url);
31
31
 
32
32
  // Proxy API calls to the BodDB server
33
- if (url.pathname.startsWith('/db/') || url.pathname.startsWith('/files/') || url.pathname.startsWith('/sse/')) {
33
+ if (url.pathname.startsWith('/db/') || url.pathname.startsWith('/files/') || url.pathname.startsWith('/sse/') || url.pathname.startsWith('/replication')) {
34
34
  const target = httpBase + url.pathname + url.search;
35
35
  return fetch(target, { method: req.method, headers: req.headers, body: req.body });
36
36
  }
package/admin/ui.html CHANGED
@@ -11,7 +11,6 @@
11
11
  #metrics-bar { display: flex; background: #0a0a0a; border-bottom: 1px solid #2a2a2a; flex-shrink: 0; align-items: stretch; width: 100%; }
12
12
  .metric-card { display: flex; flex-direction: column; padding: 5px 10px 4px; border-right: 1px solid #181818; min-width: 140px; flex-shrink: 0; gap: 1px; overflow: hidden; }
13
13
  .metric-card:last-child { border-right: none; width: auto; }
14
- .metric-right { margin-left: auto; }
15
14
  .metric-top { display: flex; justify-content: space-between; align-items: baseline; width: 100%; }
16
15
  .metric-label { font-size: 9px; color: #555; text-transform: uppercase; letter-spacing: 0.5px; }
17
16
  .metric-value { font-size: 13px; color: #4ec9b0; font-weight: bold; min-width: 5ch; text-align: right; font-variant-numeric: tabular-nums; }
@@ -31,15 +30,18 @@
31
30
  #tree-header span { color: #555; font-size: 11px; }
32
31
  #tree-container { flex: 1; overflow-y: auto; padding: 4px; }
33
32
  #tree-container details { margin-left: 12px; }
34
- #tree-container summary { cursor: pointer; padding: 2px 4px; border-radius: 3px; white-space: nowrap; overflow: hidden; text-overflow: ellipsis; color: #4ec9b0; list-style: none; }
35
- #tree-container summary::before { content: '▶'; font-size: 10px; color: #666; margin-right: 5px; display: inline-block; transition: transform 0.15s; }
33
+ #tree-container summary { cursor: pointer; padding: 2px 4px; border-radius: 3px; color: #4ec9b0; list-style: none; display: flex; align-items: center; white-space: nowrap; overflow: hidden; }
34
+ #tree-container summary::before { content: '▶'; font-size: 10px; color: #666; margin-right: 5px; flex-shrink: 0; display: inline-block; transition: transform 0.15s; }
36
35
  details[open] > summary::before { transform: rotate(90deg); color: #aaa; }
36
+ #tree-container summary .tree-label { overflow: hidden; text-overflow: ellipsis; white-space: nowrap; min-width: 0; }
37
+ #tree-container summary .ttl-badge, #tree-container summary .count-badge { flex-shrink: 0; margin-left: 4px; }
37
38
  #tree-container summary:hover { background: #1e1e1e; }
38
39
  .tree-leaf { padding: 2px 4px 2px 16px; cursor: pointer; border-radius: 3px; color: #9cdcfe; display: flex; gap: 4px; align-items: baseline; overflow: hidden; }
39
40
  .tree-leaf:hover { background: #1e1e1e; }
40
41
  .tree-val { color: #ce9178; flex: 1; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; font-size: 11px; }
41
42
  .tree-key { color: #4ec9b0; flex-shrink: 0; }
42
43
  .ttl-badge { font-size: 9px; padding: 0 4px; border-radius: 3px; background: #4d3519; color: #d4a054; flex-shrink: 0; }
44
+ .count-badge { font-size: 9px; padding: 0 4px; border-radius: 3px; background: #1e2d3d; color: #569cd6; flex-shrink: 0; }
43
45
  @keyframes treeFlash { 0%,100% { background: transparent; } 30% { background: rgba(86,156,214,0.25); } }
44
46
  .flash { animation: treeFlash 1.2s ease-out; border-radius: 3px; }
45
47
 
@@ -127,16 +129,18 @@
127
129
  <div class="metric-top"><span class="metric-label">Ping</span><span class="metric-value" id="s-ping">—</span></div>
128
130
  <canvas class="metric-canvas" id="g-ping" width="100" height="28"></canvas>
129
131
  </div>
130
- <div class="metric-card" id="repl-card" style="border-left:1px solid #282828;display:none;width:180px">
131
- <div class="metric-top"><span class="metric-label">Replication</span><span class="metric-value dim" id="s-repl-role">—</span></div>
132
- <div style="margin-top:4px;font-size:10px" id="s-repl-sources"></div>
133
- </div>
134
- <div class="metric-card metric-right" style="border-left:1px solid #282828;justify-content:space-between">
132
+ <div style="margin-left:auto;display:flex;flex-shrink:0">
133
+ <div class="metric-card" id="repl-card" style="border-left:1px solid #282828;display:none;width:180px">
134
+ <div class="metric-top"><span class="metric-label">Replication</span><span class="metric-value dim" id="s-repl-role">—</span></div>
135
+ <div style="margin-top:4px;font-size:10px" id="s-repl-sources"></div>
136
+ </div>
137
+ <div class="metric-card" style="border-left:1px solid #282828;justify-content:space-between">
135
138
  <div class="metric-top"><span class="metric-label">Uptime</span><span class="metric-value dim" id="s-uptime">—</span></div>
136
139
  <div style="font-size:10px;color:#555;display:flex;justify-content:space-between"><span id="s-ts">—</span><span>v<span id="s-version">—</span></span></div>
137
140
  <div><span class="metric-label">WS<span id="ws-dot"></span></span> <span style="font-size:10px;color:#555"><span id="s-clients">0</span> clients · <span id="s-subs">0</span> subs</span></div>
138
141
  <div><button id="stats-toggle" class="sm" onclick="toggleStats()" title="Toggle server stats collection">Stats: ON</button></div>
139
142
  </div>
143
+ </div>
140
144
  </div>
141
145
 
142
146
  <div id="main">
@@ -1455,7 +1459,8 @@ function renderChildren(children, parentPath) {
1455
1459
  html += `<div class="tree-leaf" data-path="${escHtml(path)}" onclick="selectPath('${path.replace(/'/g, "\\'")}')"><span class="tree-key">${escHtml(ch.key)}</span>${ttlBadge}<span class="tree-val">${escHtml(String(display ?? ''))}</span></div>`;
1456
1460
  } else {
1457
1461
  const isOpen = _restoredOpenPaths.has(path);
1458
- html += `<details data-path="${escHtml(path)}"${isOpen ? ' open' : ''}><summary><span onclick="selectPath('${path.replace(/'/g, "\\'")}')">${escHtml(ch.key)}${ttlBadge}</span></summary><div class="tree-children" data-parent="${escHtml(path)}"></div></details>`;
1462
+ const countBadge = ch.count != null ? `<span class="count-badge">${ch.count}</span>` : '';
1463
+ html += `<details data-path="${escHtml(path)}"${isOpen ? ' open' : ''}><summary><span class="tree-label" onclick="selectPath('${path.replace(/'/g, "\\'")}')">${escHtml(ch.key)}</span>${ttlBadge}${countBadge}</summary><div class="tree-children" data-parent="${escHtml(path)}"></div></details>`;
1459
1464
  }
1460
1465
  }
1461
1466
  return html;
@@ -1534,8 +1539,6 @@ async function expandNode(details, isRefresh) {
1534
1539
  }
1535
1540
 
1536
1541
  async function refreshPath(path) {
1537
- // Re-fetch the nearest loaded ancestor and update its children in-place
1538
- // For simplicity, find the <details> or root and re-expand
1539
1542
  const parts = path.split('/');
1540
1543
  let target = '';
1541
1544
  // Walk up to find the deepest loaded ancestor
@@ -1545,8 +1548,17 @@ async function refreshPath(path) {
1545
1548
  }
1546
1549
 
1547
1550
  if (!target) {
1548
- // Refresh root
1549
- return loadTree([path]);
1551
+ // No loaded ancestor — try to update just the root-level item without full reload
1552
+ const topKey = parts[0];
1553
+ const container = document.getElementById('tree-container');
1554
+ const existing = container.querySelector(`[data-path="${CSS.escape(topKey)}"]`);
1555
+ if (!existing) {
1556
+ // Truly new top-level key — must reload root
1557
+ return loadTree([path]);
1558
+ }
1559
+ // Root item exists but children weren't loaded — just flash it
1560
+ flashPaths([path]);
1561
+ return;
1550
1562
  }
1551
1563
 
1552
1564
  // Re-fetch this node and all its children
@@ -1557,6 +1569,9 @@ async function refreshPath(path) {
1557
1569
  if (det && det.open) {
1558
1570
  await expandNode(det, true);
1559
1571
  flashPaths([path]);
1572
+ } else {
1573
+ // Node exists but is collapsed — just flash it
1574
+ flashPaths([path]);
1560
1575
  }
1561
1576
  }
1562
1577
 
@@ -68,12 +68,13 @@ const db = new BodDB({
68
68
  role: 'primary', // fallback for unconfigured paths
69
69
  primaryUrl: repl.remoteUrl, // wss://bod.ee/db
70
70
  paths: [
71
- { path: '_vfs', mode: 'primary' }, // local files are authoritative
72
- { path: '_auth', mode: 'replica' }, // bod.ee is auth authority
73
- { path: 'config', mode: 'sync' }, // bidirectional app config
74
- { path: 'storage', mode: 'primary' }, // local collections (notifications, etc.)
71
+ { path: '_vfs', mode: 'primary' }, // local files are authoritative
72
+ { path: '_auth', mode: 'replica' }, // bod.ee is auth authority
73
+ { path: '_auth/sessions', mode: 'primary' }, // sessions are local (longest-prefix wins)
74
+ { path: '_auth/server', mode: 'primary' }, // server keypair is local
75
+ { path: 'config', mode: 'sync' }, // bidirectional app config
76
+ { path: 'storage', mode: 'primary' }, // local collections (notifications, etc.)
75
77
  ],
76
- excludePrefixes: ['_repl', '_streams', '_mq'],
77
78
  },
78
79
  });
79
80
  ```
@@ -93,7 +94,7 @@ const db = new BodDB({
93
94
  1. **`_auth` writes are proxied** — `createAccount`, `linkDevice` go through bod.ee automatically. No separate HTTP call needed.
94
95
  2. **`_vfs` emits to remote** — file metadata changes push to bod.ee via `_repl` stream. No manual upload sync.
95
96
  3. **Bootstrap is selective** — only `_auth` (replica) pulls from remote on connect. `_vfs` (primary) and `storage` (primary) keep local state.
96
- 4. **`_auth/sessions` and `_auth/server`** — automatically excluded from replication (`_auth` prefix excluded by default; replica mode pulls from remote but these internal paths are local-only).
97
+ 4. **`_auth/sessions` and `_auth/server`** — kept local via explicit sub-path overrides (`mode: 'primary'`). Longest-prefix match ensures `_auth/sessions/x` resolves to `primary` even though `_auth` is `replica`.
97
98
 
98
99
  ---
99
100
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@bod.ee/db",
3
- "version": "0.12.4",
3
+ "version": "0.12.8",
4
4
  "module": "index.ts",
5
5
  "type": "module",
6
6
  "exports": {
@@ -23,6 +23,7 @@
23
23
  "admin:remote": "bun run admin/proxy.ts",
24
24
  "serve": "bun run cli.ts",
25
25
  "start": "bun run cli.ts config.ts",
26
+ "start-admin": "bunx concurrently -n server,admin -c cyan,yellow \"bun run cli.ts config.ts\" \"bun run admin/admin.ts\"",
26
27
  "publish-lib": "bun publish --access public",
27
28
  "mcp": "bun run mcp.ts --stdio",
28
29
  "deploy": "bun run deploy/deploy.ts boddb deploy",
@@ -203,9 +203,11 @@ export class BodClient {
203
203
  this.scheduleReconnect();
204
204
  }
205
205
 
206
- if (!this.connectPromise) return;
207
- this.connectPromise = null;
208
- // Only reject if this was the initial connect
206
+ // Reject initial connect promise so callers don't hang forever
207
+ if (this.connectPromise) {
208
+ this.connectPromise = null;
209
+ reject(new Error(`WebSocket connection failed (${this.options.url})`));
210
+ }
209
211
  };
210
212
 
211
213
  ws.onerror = () => {
@@ -541,6 +541,7 @@ export class BodDB {
541
541
  this.stop();
542
542
  this.subs.clear();
543
543
  this.storage.close();
544
+ this.log.close();
544
545
  }
545
546
 
546
547
  private snapshotExisting(path: string): Set<string> {
@@ -268,6 +268,7 @@ export class ReplicationEngine {
268
268
  sc.client.disconnect();
269
269
  }
270
270
  this.sourceConns = [];
271
+ this._pendingReplEvents = null;
271
272
  }
272
273
 
273
274
  /** Proxy a write operation to the primary (replica mode) */
@@ -296,7 +297,10 @@ export class ReplicationEngine {
296
297
 
297
298
  private startPrimary(): void {
298
299
  this.unsubWrite = this.db.onWrite((ev: WriteEvent) => {
299
- this.emit(ev);
300
+ // Defer emit — onWrite fires inside the SQLite write transaction,
301
+ // and db.push('_repl') inside that transaction gets silently dropped.
302
+ const evCopy = { ...ev };
303
+ setTimeout(() => { if (this._started) this.emit(evCopy); }, 0);
300
304
  });
301
305
 
302
306
  // Compact on startup
@@ -320,22 +324,31 @@ export class ReplicationEngine {
320
324
 
321
325
  /** Buffer replication events during transactions, emit immediately otherwise */
322
326
  private emit(ev: WriteEvent): void {
323
- if (this._emitting) return;
327
+ // _repl writes must never emit to _repl (infinite loop), regardless of config
328
+ if (ev.path.startsWith('_repl')) return;
329
+ if (this._emitting) {
330
+ this.log.debug('emit: skipped (re-entrant)', { path: ev.path });
331
+ return;
332
+ }
324
333
  if (this.router) {
325
- // Single resolve: check exclusion override + shouldEmit together
326
334
  const resolved = this.router.resolve(ev.path);
327
335
  const isConfigured = resolved.path !== '';
328
- // If in excludePrefixes but not explicitly configured, skip
329
- if (!isConfigured && this.options.excludePrefixes.some(p => ev.path.startsWith(p))) return;
336
+ if (!isConfigured && this.options.excludePrefixes.some(p => ev.path.startsWith(p))) {
337
+ this.log.debug('emit: skipped (excluded, not configured)', { path: ev.path });
338
+ return;
339
+ }
330
340
  const mode = resolved.mode;
331
- if (mode !== 'primary' && mode !== 'sync' && mode !== 'writeonly') return;
341
+ if (mode !== 'primary' && mode !== 'sync' && mode !== 'writeonly') {
342
+ this.log.debug('emit: skipped (mode)', { path: ev.path, mode });
343
+ return;
344
+ }
332
345
  } else {
333
346
  if (this.isExcluded(ev.path)) return;
334
347
  }
335
348
 
336
- // If buffering (transaction in progress), collect events
337
349
  if (this._pendingReplEvents) {
338
350
  this._pendingReplEvents.push(ev);
351
+ this.log.debug('emit: buffered (batch)', { path: ev.path });
339
352
  return;
340
353
  }
341
354
 
@@ -349,12 +362,13 @@ export class ReplicationEngine {
349
362
  const seq = this._seq++;
350
363
  const idempotencyKey = `${replEvent.ts}:${seq}:${ev.path}`;
351
364
  this.db.push('_repl', replEvent, { idempotencyKey });
352
-
365
+ this.log.info('_repl emit', { seq, op: ev.op, path: ev.path });
366
+ } catch (e: any) {
367
+ this.log.error('_repl emit failed', { path: ev.path, error: e.message });
353
368
  } finally {
354
369
  this._emitting = false;
355
370
  }
356
371
 
357
- // Auto-compact on write threshold (outside _emitting guard so notifications flow normally)
358
372
  this._emitCount++;
359
373
  const threshold = this.options.autoCompactThreshold;
360
374
  if (threshold > 0 && this._emitCount >= threshold) {
@@ -408,12 +422,16 @@ export class ReplicationEngine {
408
422
 
409
423
  // Subscribe to ongoing events, filter by paths
410
424
  const groupId = this.options.replicaId!;
425
+ this.log.info('Subscribing to _repl stream', { groupId, pathPrefixes });
411
426
  this.unsubStream = this.client.stream('_repl', groupId).on((events) => {
427
+ this.log.info('_repl events received', { count: events.length });
412
428
  this.db.setReplaying(true);
413
429
  try {
414
430
  for (const e of events) {
415
431
  const ev = e.val() as ReplEvent;
416
- if (this.matchesPathPrefixes(ev.path, pathPrefixes)) {
432
+ const matched = this.matchesPathPrefixes(ev.path, pathPrefixes);
433
+ this.log.info('_repl event', { op: ev.op, path: ev.path, matched });
434
+ if (matched) {
417
435
  this.applyEvent(ev);
418
436
  }
419
437
  this.client!.stream('_repl', groupId).ack(e.key).catch(() => {});
@@ -582,7 +600,7 @@ export class ReplicationEngine {
582
600
  private applyEvent(ev: ReplEvent, source?: ReplicationSource): void {
583
601
  const path = source ? this.remapPath(ev.path, source) : ev.path;
584
602
  // Defense-in-depth: skip events for paths we shouldn't apply (primary/writeonly)
585
- if (!source && this.router && !this.router.shouldApply(path)) return;
603
+ if (this.router && !this.router.shouldApply(path)) return;
586
604
  switch (ev.op) {
587
605
  case 'set':
588
606
  this.db.set(path, ev.value, ev.ttl ? { ttl: ev.ttl } : undefined);
@@ -137,7 +137,7 @@ export class StorageEngine {
137
137
  }
138
138
 
139
139
  /** Get immediate children of a path (one level deep). Returns { key, isLeaf, value? }[] */
140
- getShallow(path?: string): Array<{ key: string; isLeaf: boolean; value?: unknown; ttl?: number }> {
140
+ getShallow(path?: string): Array<{ key: string; isLeaf: boolean; value?: unknown; ttl?: number; count?: number }> {
141
141
  const prefix = path ? path + '/' : '';
142
142
  const end = prefix + '\uffff';
143
143
  const rows = (prefix
@@ -145,7 +145,7 @@ export class StorageEngine {
145
145
  : this.db.prepare('SELECT path, value, expires_at FROM nodes ORDER BY path').all()
146
146
  ) as Array<{ path: string; value: string; expires_at: number | null }>;
147
147
 
148
- const children: Array<{ key: string; isLeaf: boolean; value?: unknown; ttl?: number }> = [];
148
+ const children: Array<{ key: string; isLeaf: boolean; value?: unknown; ttl?: number; count?: number }> = [];
149
149
  const seen = new Set<string>();
150
150
  for (const row of rows) {
151
151
  const rest = row.path.slice(prefix.length);
@@ -158,10 +158,18 @@ export class StorageEngine {
158
158
  if (row.expires_at) entry.ttl = Math.max(0, row.expires_at - Math.floor(Date.now() / 1000));
159
159
  children.push(entry);
160
160
  } else {
161
- // Check if any child in this branch has TTL
161
+ // Check if any child in this branch has TTL; count direct children
162
162
  const branchPrefix = prefix + key + '/';
163
163
  const hasTTL = rows.some(r => r.path.startsWith(branchPrefix) && r.expires_at);
164
- children.push(hasTTL ? { key, isLeaf: false, ttl: -1 } : { key, isLeaf: false });
164
+ const directChildren = new Set<string>();
165
+ for (const r of rows) {
166
+ if (!r.path.startsWith(branchPrefix)) continue;
167
+ const seg = r.path.slice(branchPrefix.length).split('/')[0];
168
+ if (seg) directChildren.add(seg);
169
+ }
170
+ const entry: { key: string; isLeaf: boolean; ttl?: number; count?: number } = { key, isLeaf: false, count: directChildren.size };
171
+ if (hasTTL) entry.ttl = -1;
172
+ children.push(entry);
165
173
  }
166
174
  }
167
175
  return children;
@@ -253,6 +253,49 @@ export class Transport {
253
253
  })();
254
254
  }
255
255
 
256
+ // Replication REST routes
257
+ if (url.pathname.startsWith('/replication')) {
258
+ return (async () => {
259
+ const repl = this.db.replication;
260
+ if (!repl) {
261
+ if (req.method === 'GET' && url.pathname === '/replication') {
262
+ return Response.json({ ok: true, role: 'primary', started: false, seq: 0, topology: null, sources: [], synced: {} });
263
+ }
264
+ return Response.json({ ok: false, error: 'Replication not configured' }, { status: 503 });
265
+ }
266
+ if (req.method === 'GET' && url.pathname === '/replication') {
267
+ const s = repl.stats();
268
+ // Build synced snapshot: read local copies of each source's paths
269
+ const synced: Record<string, unknown> = {};
270
+ for (const src of (s.sources ?? [])) {
271
+ for (const p of (src.paths ?? [])) {
272
+ try { synced[p] = this.db.get(p); } catch {}
273
+ }
274
+ }
275
+ return Response.json({ ok: true, ...s, synced });
276
+ }
277
+ if (req.method === 'POST' && url.pathname === '/replication/source-write') {
278
+ try {
279
+ const body = await req.json() as { path: string; value: unknown };
280
+ await repl.proxyWrite({ op: 'set', path: body.path, value: body.value });
281
+ return Response.json({ ok: true });
282
+ } catch (e: any) {
283
+ return Response.json({ ok: false, error: e.message }, { status: 500 });
284
+ }
285
+ }
286
+ if (req.method === 'DELETE' && url.pathname.startsWith('/replication/source-delete/')) {
287
+ const path = url.pathname.slice('/replication/source-delete/'.length);
288
+ try {
289
+ await repl.proxyWrite({ op: 'delete', path });
290
+ return Response.json({ ok: true });
291
+ } catch (e: any) {
292
+ return Response.json({ ok: false, error: e.message }, { status: 500 });
293
+ }
294
+ }
295
+ return Response.json({ ok: false, error: 'Not found' }, { status: 404 });
296
+ })();
297
+ }
298
+
256
299
  // VFS REST routes
257
300
  if (this.db.vfs && url.pathname.startsWith('/files/')) {
258
301
  const vfsPath = normalizePath(url.pathname.slice(7));
@@ -515,10 +558,11 @@ export class Transport {
515
558
  }
516
559
 
517
560
  case 'batch': {
518
- // Upfront rules check before proxy (defense-in-depth)
561
+ // Upfront auth prefix + rules check before proxy (defense-in-depth)
519
562
  for (const batchOp of msg.operations) {
520
563
  const opPaths = batchOp.op === 'update' ? Object.keys(batchOp.updates) : [batchOp.path];
521
564
  for (const p of opPaths) {
565
+ if (guardAuthPrefix(p)) return;
522
566
  if (self.rules && !self.rules.check('write', p, ws.data.auth)) {
523
567
  return error(`Permission denied for ${p}`, Errors.PERMISSION_DENIED);
524
568
  }
@@ -3,6 +3,9 @@
3
3
  * Disabled by default. Enable via `log` option in BodDBOptions.
4
4
  */
5
5
 
6
+ import { createWriteStream, mkdirSync } from 'fs';
7
+ import type { WriteStream } from 'fs';
8
+
6
9
  export type LogLevel = 'debug' | 'info' | 'warn' | 'error';
7
10
 
8
11
  export interface LogConfig {
@@ -12,6 +15,12 @@ export interface LogConfig {
12
15
  level?: LogLevel;
13
16
  /** Enable specific components: ['storage', 'transport', 'subs', 'replication', 'stats', 'keyauth'] or '*' for all */
14
17
  components?: string[] | '*';
18
+ /**
19
+ * Directory to write log files into. Each run creates a new file named
20
+ * `bod-YYYYMMDD-HHmmss.log`. If omitted, logs go to console only.
21
+ * Only active when `enabled: true`.
22
+ */
23
+ logDir?: string;
15
24
  }
16
25
 
17
26
  const LEVELS: Record<LogLevel, number> = { debug: 0, info: 1, warn: 2, error: 3 };
@@ -21,11 +30,31 @@ export class Logger {
21
30
  private minLevel: number;
22
31
  private components: Set<string> | '*';
23
32
  private _cache = new Map<string, ComponentLogger>();
33
+ private _stream: WriteStream | null = null;
34
+ private _streamDead = false;
24
35
 
25
36
  constructor(config?: LogConfig) {
26
37
  this.enabled = config?.enabled ?? false;
27
38
  this.minLevel = LEVELS[config?.level ?? 'info'];
28
39
  this.components = config?.components === '*' ? '*' : new Set(config?.components ?? []);
40
+ if (this.enabled && config?.logDir) {
41
+ try {
42
+ mkdirSync(config.logDir, { recursive: true });
43
+ const d = new Date();
44
+ const ts = `${d.getFullYear()}${pad(d.getMonth() + 1)}${pad(d.getDate())}-${pad(d.getHours())}${pad(d.getMinutes())}${pad(d.getSeconds())}`;
45
+ const file = `${config.logDir}/bod-${ts}.log`;
46
+ this._stream = createWriteStream(file, { flags: 'a' });
47
+ this._stream.on('error', (err) => {
48
+ if (!this._streamDead) {
49
+ console.error(`[BodDB logger] File write error, disabling disk logging: ${err.message}`);
50
+ this._streamDead = true;
51
+ this._stream = null;
52
+ }
53
+ });
54
+ } catch (err: any) {
55
+ console.error(`[BodDB logger] Failed to open log file in "${config.logDir}": ${err.message}`);
56
+ }
57
+ }
29
58
  }
30
59
 
31
60
  forComponent(name: string): ComponentLogger {
@@ -50,9 +79,20 @@ export class Logger {
50
79
  } else {
51
80
  console[level === 'debug' ? 'log' : level](prefix, msg);
52
81
  }
82
+ if (this._stream && !this._streamDead) {
83
+ const dataPart = data !== undefined ? ' ' + JSON.stringify(data) : '';
84
+ this._stream.write(`${prefix} ${msg}${dataPart}\n`);
85
+ }
86
+ }
87
+
88
+ close(): void {
89
+ this._stream?.end();
90
+ this._stream = null;
53
91
  }
54
92
  }
55
93
 
94
+ function pad(n: number): string { return n.toString().padStart(2, '0'); }
95
+
56
96
  export class ComponentLogger {
57
97
  readonly isDebug: boolean;
58
98
  constructor(private logger: Logger, private component: string) {
@@ -325,7 +325,7 @@ describe('B5: Cursor-Based Stream Materialize', () => {
325
325
  });
326
326
 
327
327
  describe('B6: Batched Replication Events', () => {
328
- test('transaction batches repl events', () => {
328
+ test('transaction batches repl events', async () => {
329
329
  const db = new BodDB({ path: ':memory:', replication: { role: 'primary' } });
330
330
  db.replication!['_started'] = true;
331
331
  db.replication!['startPrimary']();
@@ -336,6 +336,7 @@ describe('B6: Batched Replication Events', () => {
336
336
  tx.set('b', 2);
337
337
  tx.set('c', 3);
338
338
  });
339
+ await new Promise(r => setTimeout(r, 50));
339
340
 
340
341
  // All 3 should have been emitted to _repl
341
342
  const replEvents = db.storage.query('_repl');
@@ -344,12 +345,13 @@ describe('B6: Batched Replication Events', () => {
344
345
  db.close();
345
346
  });
346
347
 
347
- test('non-transactional writes emit immediately', () => {
348
+ test('non-transactional writes emit immediately', async () => {
348
349
  const db = new BodDB({ path: ':memory:', replication: { role: 'primary' } });
349
350
  db.replication!['_started'] = true;
350
351
  db.replication!['startPrimary']();
351
352
 
352
353
  db.set('x', 'val');
354
+ await new Promise(r => setTimeout(r, 50));
353
355
  const replEvents = db.storage.query('_repl');
354
356
  expect(replEvents.length).toBe(1);
355
357
 
@@ -119,12 +119,13 @@ describe('repl load test', () => {
119
119
 
120
120
  // ─── 3. Auto-compact under sustained write load ───
121
121
 
122
- it('auto-compact keeps _repl bounded under sustained 10k writes', () => {
122
+ it('auto-compact keeps _repl bounded under sustained 10k writes', async () => {
123
123
  const { db } = primary({ compact: { maxCount: 200, keepKey: 'path' }, autoCompactThreshold: 500 });
124
124
 
125
125
  for (let i = 0; i < 10_000; i++) {
126
126
  db.set(`stream/key${i % 300}`, { round: Math.floor(i / 300), i });
127
127
  }
128
+ await new Promise(r => setTimeout(r, 50));
128
129
 
129
130
  const repl = db.get('_repl') as Record<string, any>;
130
131
  const count = repl ? Object.keys(repl).length : 0;
@@ -215,7 +216,7 @@ describe('repl load test', () => {
215
216
 
216
217
  // ─── 7. Heavy overwrite scenario: same 50 paths written 1000× each ───
217
218
 
218
- it('50 paths × 1000 overwrites: compact deduplicates correctly', () => {
219
+ it('50 paths × 1000 overwrites: compact deduplicates correctly', async () => {
219
220
  const { db } = primary({ compact: { maxCount: 100, keepKey: 'path' }, autoCompactThreshold: 1000 });
220
221
 
221
222
  for (let round = 0; round < 1000; round++) {
@@ -223,6 +224,7 @@ describe('repl load test', () => {
223
224
  db.set(`hot/key${i}`, { round, value: round * 50 + i });
224
225
  }
225
226
  }
227
+ await new Promise(r => setTimeout(r, 50));
226
228
 
227
229
  const repl = db.get('_repl') as Record<string, any>;
228
230
  const count = repl ? Object.keys(repl).length : 0;
@@ -66,9 +66,10 @@ describe('_repl stream bloat', () => {
66
66
 
67
67
  // --- Accumulation ---
68
68
 
69
- it('_repl grows unbounded without compaction', () => {
69
+ it('_repl grows unbounded without compaction', async () => {
70
70
  const { db } = createPrimary();
71
71
  fillRepl(db, 5000, 300);
72
+ await new Promise(r => setTimeout(r, 50));
72
73
  const repl = db.get('_repl') as Record<string, any>;
73
74
  expect(Object.keys(repl).length).toBe(5000);
74
75
  });
@@ -135,13 +136,14 @@ describe('_repl stream bloat', () => {
135
136
 
136
137
  // --- Auto-compact on write threshold ---
137
138
 
138
- it('auto-compact triggers after N writes', () => {
139
+ it('auto-compact triggers after N writes', async () => {
139
140
  const { db } = createPrimary({ compact: { maxCount: 50, keepKey: 'path' }, autoCompactThreshold: 100 });
140
141
 
141
142
  // Write 250 entries — compact triggers at 100, 200; maxCount=50 keeps only 50 each time
142
143
  for (let i = 0; i < 250; i++) {
143
144
  db.set(`data/item${i}`, { value: i });
144
145
  }
146
+ await new Promise(r => setTimeout(r, 50));
145
147
 
146
148
  const repl = db.get('_repl') as Record<string, any>;
147
149
  const count = repl ? Object.keys(repl).length : 0;
@@ -155,6 +157,7 @@ describe('_repl stream bloat', () => {
155
157
  it('short requestTimeout causes streamMaterialize to fail on bloated _repl', async () => {
156
158
  const { db: primary, port: primaryPort } = createPrimary();
157
159
  fillRepl(primary, 10000, 1000); // 10k entries × 1KB = ~10MB response
160
+ await new Promise(r => setTimeout(r, 50));
158
161
 
159
162
  const replCount = Object.keys(primary.get('_repl') as Record<string, any>).length;
160
163
  expect(replCount).toBe(10000);
@@ -227,6 +230,7 @@ describe('_repl stream bloat', () => {
227
230
  it('compaction reduces _repl and speeds up bootstrap', async () => {
228
231
  const { db: primary, port: primaryPort } = createPrimary();
229
232
  fillRepl(primary, 5000, 500);
233
+ await new Promise(r => setTimeout(r, 50));
230
234
 
231
235
  const beforeCount = Object.keys(primary.get('_repl') as Record<string, any>).length;
232
236
  expect(beforeCount).toBe(5000);
@@ -252,7 +256,7 @@ describe('_repl stream bloat', () => {
252
256
 
253
257
  // --- Repeated writes ---
254
258
 
255
- it('repeated writes to same paths bloat _repl with duplicates', () => {
259
+ it('repeated writes to same paths bloat _repl with duplicates', async () => {
256
260
  const { db } = createPrimary();
257
261
 
258
262
  for (let round = 0; round < 50; round++) {
@@ -260,6 +264,7 @@ describe('_repl stream bloat', () => {
260
264
  db.set(`config/setting${i}`, { value: round, updated: Date.now() });
261
265
  }
262
266
  }
267
+ await new Promise(r => setTimeout(r, 50));
263
268
 
264
269
  const repl = db.get('_repl') as Record<string, any>;
265
270
  const totalEntries = Object.keys(repl).length;
@@ -165,7 +165,7 @@ describe('Per-path replication topology', () => {
165
165
 
166
166
  // --- Emit filtering ---
167
167
 
168
- it('primary-mode path emits to _repl', () => {
168
+ it('primary-mode path emits to _repl', async () => {
169
169
  const db = new BodDB({
170
170
  path: ':memory:',
171
171
  sweepInterval: 0,
@@ -175,16 +175,17 @@ describe('Per-path replication topology', () => {
175
175
  },
176
176
  });
177
177
  instances.push(db);
178
- db.replication!.start();
178
+ await db.replication!.start();
179
179
 
180
180
  db.set('_vfs/file1', { data: 'hello' });
181
+ await new Promise(r => setTimeout(r, 50));
181
182
  const repl = db.get('_repl');
182
183
  expect(repl).toBeTruthy();
183
184
  const events = Object.values(repl as Record<string, any>);
184
185
  expect(events.some((e: any) => e.path === '_vfs/file1')).toBe(true);
185
186
  });
186
187
 
187
- it('non-emitting modes do NOT emit', () => {
188
+ it('emitting modes + fallback all emit to _repl', async () => {
188
189
  const db = new BodDB({
189
190
  path: ':memory:',
190
191
  sweepInterval: 0,
@@ -197,12 +198,15 @@ describe('Per-path replication topology', () => {
197
198
  },
198
199
  });
199
200
  instances.push(db);
200
- db.replication!.start();
201
+ await db.replication!.start();
201
202
 
202
203
  db.set('local/data', { v: 1 });
203
204
  db.set('telemetry/t1', { event: 'click' });
204
205
  db.set('other/data', { v: 2 });
205
206
 
207
+ // Wait for deferred setTimeout emits
208
+ await new Promise(r => setTimeout(r, 50));
209
+
206
210
  const repl = db.get('_repl');
207
211
  expect(repl).toBeTruthy();
208
212
  const events = Object.values(repl as Record<string, any>);
@@ -211,7 +215,43 @@ describe('Per-path replication topology', () => {
211
215
  expect(events.some((e: any) => e.path === 'other/data')).toBe(true);
212
216
  });
213
217
 
214
- it('writeonly path emits', () => {
218
+ it('replica and readonly modes do NOT emit to _repl', async () => {
219
+ const pPort = getPort();
220
+ const primary = createNode({ port: pPort, replication: { role: 'primary' } });
221
+ await primary.replication!.start();
222
+
223
+ const db = new BodDB({
224
+ path: ':memory:',
225
+ sweepInterval: 0,
226
+ replication: {
227
+ role: 'primary',
228
+ primaryUrl: `ws://localhost:${pPort}`,
229
+ paths: [
230
+ { path: 'cached', mode: 'replica' },
231
+ { path: 'feeds', mode: 'readonly' },
232
+ { path: 'local', mode: 'primary' },
233
+ ],
234
+ },
235
+ });
236
+ instances.push(db);
237
+ await db.replication!.start();
238
+
239
+ // Write directly (bypassing transport proxy) to test emit filtering
240
+ db.set('local/data', { v: 1 });
241
+ db.set('cached/data', { v: 2 });
242
+ db.set('feeds/data', { v: 3 });
243
+
244
+ // Wait for deferred emits
245
+ await new Promise(r => setTimeout(r, 50));
246
+
247
+ const repl = db.get('_repl');
248
+ const events = repl ? Object.values(repl as Record<string, any>) : [];
249
+ expect(events.some((e: any) => e.path === 'local/data')).toBe(true);
250
+ expect(events.some((e: any) => e.path === 'cached/data')).toBe(false);
251
+ expect(events.some((e: any) => e.path === 'feeds/data')).toBe(false);
252
+ });
253
+
254
+ it('writeonly path emits', async () => {
215
255
  const db = new BodDB({
216
256
  path: ':memory:',
217
257
  sweepInterval: 0,
@@ -221,9 +261,10 @@ describe('Per-path replication topology', () => {
221
261
  },
222
262
  });
223
263
  instances.push(db);
224
- db.replication!.start();
264
+ await db.replication!.start();
225
265
 
226
266
  db.set('telemetry/t1', { event: 'click' });
267
+ await new Promise(r => setTimeout(r, 50));
227
268
  const repl = db.get('_repl');
228
269
  expect(repl).toBeTruthy();
229
270
  const events = Object.values(repl as Record<string, any>);
@@ -796,7 +837,7 @@ describe('Per-path replication topology', () => {
796
837
  it('sync path written locally AND emitted (end-to-end)', async () => {
797
838
  const pPort = getPort();
798
839
  const primary = createNode({ port: pPort, replication: { role: 'primary' } });
799
- primary.replication!.start();
840
+ await primary.replication!.start();
800
841
 
801
842
  const rPort = getPort();
802
843
  const replica = createNode({
@@ -813,6 +854,7 @@ describe('Per-path replication topology', () => {
813
854
  replica.set('config/theme', 'dark');
814
855
  expect(replica.get('config/theme')).toBe('dark');
815
856
 
857
+ await new Promise(r => setTimeout(r, 50));
816
858
  const repl = replica.get('_repl');
817
859
  expect(repl).toBeTruthy();
818
860
  const events = Object.values(repl as Record<string, any>);
@@ -8,6 +8,7 @@ let nextPort = 24400 + Math.floor(Math.random() * 1000);
8
8
  describe('ReplicationEngine', () => {
9
9
  const instances: BodDB[] = [];
10
10
  const clients: BodClient[] = [];
11
+ const tick = () => new Promise(r => setTimeout(r, 50));
11
12
 
12
13
  afterEach(() => {
13
14
  for (const c of clients) c.disconnect();
@@ -48,7 +49,7 @@ describe('ReplicationEngine', () => {
48
49
  }
49
50
 
50
51
  // 1. Primary emits events to _repl on set/delete/update/push
51
- it('primary emits replication events on set/delete/update/push', () => {
52
+ it('primary emits replication events on set/delete/update/push', async () => {
52
53
  const db = new BodDB({
53
54
  path: ':memory:',
54
55
  sweepInterval: 0,
@@ -61,6 +62,7 @@ describe('ReplicationEngine', () => {
61
62
  db.update({ 'users/u2/age': 30 });
62
63
  db.delete('users/u1');
63
64
  db.push('logs', { msg: 'hello' });
65
+ await tick();
64
66
 
65
67
  const replData = db.get('_repl');
66
68
  expect(replData).toBeTruthy();
@@ -76,7 +78,7 @@ describe('ReplicationEngine', () => {
76
78
  });
77
79
 
78
80
  // 2. Loop prevention — _replaying flag prevents re-emission
79
- it('does not emit events when replaying', () => {
81
+ it('does not emit events when replaying', async () => {
80
82
  const db = new BodDB({
81
83
  path: ':memory:',
82
84
  sweepInterval: 0,
@@ -88,13 +90,14 @@ describe('ReplicationEngine', () => {
88
90
  db.setReplaying(true);
89
91
  db.set('users/u1', { name: 'Test' });
90
92
  db.setReplaying(false);
93
+ await tick();
91
94
 
92
95
  const replData = db.get('_repl');
93
96
  expect(replData).toBeNull();
94
97
  });
95
98
 
96
99
  // 3. Excluded prefixes are not replicated
97
- it('skips excluded prefixes', () => {
100
+ it('skips excluded prefixes', async () => {
98
101
  const db = new BodDB({
99
102
  path: ':memory:',
100
103
  sweepInterval: 0,
@@ -105,6 +108,7 @@ describe('ReplicationEngine', () => {
105
108
 
106
109
  db.set('internal/config', { foo: 1 });
107
110
  db.set('users/u1', { name: 'Eli' });
111
+ await tick();
108
112
 
109
113
  const replData = db.get('_repl') as Record<string, any>;
110
114
  const entries = Object.values(replData);
@@ -160,7 +164,7 @@ describe('ReplicationEngine', () => {
160
164
  });
161
165
 
162
166
  // 7. Transaction events are replicated
163
- it('transaction write events are replicated', () => {
167
+ it('transaction write events are replicated', async () => {
164
168
  const db = new BodDB({
165
169
  path: ':memory:',
166
170
  sweepInterval: 0,
@@ -173,6 +177,7 @@ describe('ReplicationEngine', () => {
173
177
  tx.set('a/1', 'one');
174
178
  tx.set('a/2', 'two');
175
179
  });
180
+ await tick();
176
181
 
177
182
  const replData = db.get('_repl') as Record<string, any>;
178
183
  const entries = Object.values(replData);
@@ -211,7 +216,7 @@ describe('ReplicationEngine', () => {
211
216
  });
212
217
 
213
218
  // 10. Multi-path update exclusion check (all paths checked, not just first)
214
- it('update with mixed excluded/non-excluded paths emits only non-excluded', () => {
219
+ it('update with mixed excluded/non-excluded paths emits only non-excluded', async () => {
215
220
  const db = new BodDB({
216
221
  path: ':memory:',
217
222
  sweepInterval: 0,
@@ -221,6 +226,7 @@ describe('ReplicationEngine', () => {
221
226
  db.replication!.start();
222
227
 
223
228
  db.update({ '_internal/x': 1, 'data/y': 2 });
229
+ await tick();
224
230
 
225
231
  const replData = db.get('_repl') as Record<string, any>;
226
232
  const entries = Object.values(replData);
@@ -229,7 +235,7 @@ describe('ReplicationEngine', () => {
229
235
  });
230
236
 
231
237
  // 11. Sweep fires delete events for expired paths
232
- it('sweep expired paths are replicated as deletes', () => {
238
+ it('sweep expired paths are replicated as deletes', async () => {
233
239
  const db = new BodDB({
234
240
  path: ':memory:',
235
241
  sweepInterval: 0,
@@ -242,6 +248,7 @@ describe('ReplicationEngine', () => {
242
248
  // Force expiry by manipulating the DB directly
243
249
  db.storage.db.prepare('UPDATE nodes SET expires_at = 1 WHERE path LIKE ?').run('sessions/s1%');
244
250
  db.sweep();
251
+ await tick();
245
252
 
246
253
  const replData = db.get('_repl') as Record<string, any>;
247
254
  const entries = Object.values(replData);
@@ -251,7 +258,7 @@ describe('ReplicationEngine', () => {
251
258
  });
252
259
 
253
260
  // 12. Recursion guard — emit doesn't infinite loop
254
- it('emit does not cause infinite recursion', () => {
261
+ it('emit does not cause infinite recursion', async () => {
255
262
  const db = new BodDB({
256
263
  path: ':memory:',
257
264
  sweepInterval: 0,
@@ -262,6 +269,7 @@ describe('ReplicationEngine', () => {
262
269
 
263
270
  db.set('users/u1', 'test');
264
271
  expect(db.get('users/u1')).toBe('test');
272
+ await tick();
265
273
  const replData = db.get('_repl') as Record<string, any>;
266
274
  expect(Object.values(replData).length).toBe(1);
267
275
  });
@@ -333,6 +341,7 @@ describe('ReplicationEngine', () => {
333
341
  expect(local.get('ext/users/u1')).toEqual({ name: 'Eli' });
334
342
 
335
343
  local.set('mydata/x', 'hello');
344
+ await tick();
336
345
  const replData = local.get('_repl') as Record<string, any>;
337
346
  const entries = Object.values(replData);
338
347
  expect(entries.some((e: any) => e.path === 'mydata/x')).toBe(true);