@bod.ee/db 0.7.0 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,7 +17,13 @@
17
17
  "Bash(cd:*)",
18
18
  "Bash(bod-db:*)",
19
19
  "Bash(ls:*)",
20
- "Bash(ssh:*)"
20
+ "Bash(ssh:*)",
21
+ "Bash(true:*)",
22
+ "Bash(tail:*)",
23
+ "Bash(lsof -ti:4400,4401 2>/dev/null | xargs kill -9 2>/dev/null)",
24
+ "WebFetch(domain:151.145.81.254)",
25
+ "WebFetch(domain:db-main.bod.ee)",
26
+ "Bash(md5:*)"
21
27
  ]
22
28
  }
23
29
  }
@@ -41,6 +41,13 @@ export default {
41
41
  vectors: { dimensions: 384 },
42
42
  mq: { visibilityTimeout: 30, maxDeliveries: 5 },
43
43
  compact: { 'events/logs': { maxAge: 86400 } },
44
+ vfs: { storageRoot: './files' },
45
+ replication: {
46
+ role: 'primary',
47
+ sources: [
48
+ { url: 'ws://other-db:4400', paths: ['catalog'], localPrefix: 'ext', id: 'my-source' },
49
+ ],
50
+ },
44
51
  } satisfies Partial<BodDBOptions>;
45
52
  ```
46
53
 
@@ -27,3 +27,37 @@ bun run deploy/deploy.ts <name> deploy
27
27
  1. Create `deploy/<name>.yaml` with app.name, app.dir, runtime.port, service.name, https.domain
28
28
  2. Create `deploy/prod-<name>.config.ts` with BodDB config
29
29
  3. Run `bun run deploy/deploy.ts <name> bootstrap`
30
+
31
+ ## Deploying Replicas
32
+ Add `replication` to the replica's config:
33
+ ```typescript
34
+ // deploy/prod-eu-replica.config.ts
35
+ export default {
36
+ path: './data-replica.db',
37
+ port: 4401,
38
+ replication: {
39
+ role: 'replica',
40
+ primaryUrl: 'ws://primary-host:4400',
41
+ replicaId: 'eu-replica-1',
42
+ },
43
+ };
44
+ ```
45
+ Primary config just needs `replication: { role: 'primary' }`. Replicas auto-bootstrap on startup.
46
+
47
+ ## Multi-Source Feed Subscriptions
48
+ Pull specific paths from multiple remote DBs:
49
+ ```typescript
50
+ // deploy/prod-aggregator.config.ts
51
+ export default {
52
+ path: './data-aggregator.db',
53
+ port: 4402,
54
+ replication: {
55
+ role: 'primary',
56
+ sources: [
57
+ { url: 'ws://db-a:4400', paths: ['catalog'], localPrefix: 'a', id: 'agg-catalog' },
58
+ { url: 'ws://db-b:4400', paths: ['alerts'], localPrefix: 'b', id: 'agg-alerts' },
59
+ ],
60
+ },
61
+ };
62
+ ```
63
+ Set `id` for persistent consumer group offsets across restarts. Sources work with any role.
@@ -30,7 +30,10 @@ src/server/VectorEngine.ts — vector similarity search (brute-force cosine
30
30
  src/server/StreamEngine.ts — Kafka-like event streaming: consumer groups, offsets, replay, durable subs
31
31
  src/server/MQEngine.ts — SQS-style message queue: push/fetch/ack/nack, visibility timeout, DLQ
32
32
  src/server/FileAdapter.ts — file system sync, watch, metadata, content read/write-through
33
- src/client/BodClient.ts WS client: CRUD, batch, push, subs, streams, auto-reconnect
33
+ src/server/ReplicationEngine.ts primary/replica replication: write hooks, _repl stream, bootstrap, proxy
34
+ src/server/VFSEngine.ts — virtual file system: backend abstraction, LocalBackend, metadata in DB
35
+ src/client/BodClient.ts — WS client: CRUD, batch, push, subs, streams, VFS, auto-reconnect
36
+ src/client/CachedClient.ts — two-tier cache (memory + IndexedDB): stale-while-revalidate, sub-aware
34
37
  src/react/hooks.ts — useValue, useChildren, useQuery, useMutation
35
38
  client.ts — client entry point
36
39
  react.ts — React hooks entry point
@@ -80,6 +83,17 @@ Push paths are append-only logs. `StreamEngine` adds consumer group offsets (`_s
80
83
  ### MQ (Message Queue)
81
84
  `MQEngine` owns all MQ SQL via `storage.db.prepare()` — same pattern as StreamEngine. Columns: `mq_status` (pending/inflight), `mq_inflight_until` (Unix ms), `mq_delivery_count`. `fetch()` uses SQLite transaction with TOCTOU guard (`changes > 0`). Ack = DELETE. Sweep reclaims expired inflight; exhausted messages move to DLQ at `<queue>/_dlq/<key>`. Per-queue options via longest prefix match on `queues` config.
82
85
 
86
+ ### Replication
87
+ `ReplicationEngine` — primary/replica + multi-source feed subscriptions via `_repl` stream. Primary: `onWrite` hooks emit events to `_repl` stream (updates flattened to per-path sets). Replica: bootstraps via `streamMaterialize`, subscribes for ongoing events, proxies writes to primary. Guards: `_replaying` prevents re-emission, `_emitting` prevents recursion from `db.push('_repl')`. Sweep deletes are replicated. Transport checks `isReplica` and forwards write ops.
88
+
89
+ **Sources** (`ReplicationSource[]`): independent of role. Each source creates a `BodClient`, bootstraps filtered `_repl` snapshot, subscribes for ongoing events. `matchesSourcePaths()` filters by path prefix. `remapPath()` prepends `localPrefix`. Events applied with `_replaying=true`. Sources connect via `Promise.allSettled` — individual failures logged, others continue. Deterministic `groupId` default: `source_${url}_${paths.join('+')}`.
90
+
91
+ ### VFS (Virtual File System)
92
+ `VFSEngine` — pluggable `VFSBackend` interface (`read/write/delete/exists`). `LocalBackend` stores files at `<storageRoot>/<fileId>` via `Bun.file`/`Bun.write`. fileId = pushId (move/rename = metadata-only). Metadata stored at `_vfs/<path>/__meta` (uses `__meta` key to avoid collision with children in leaf-flattened storage). Gets subscriptions, rules, replication for free. REST transport at `/files/<path>`, WS chunked fallback (base64, 48KB chunks). Client: `VFSClient` via `client.vfs()`.
93
+
94
+ ### CachedClient
95
+ `CachedClient` wraps `BodClient` with two-tier cache: in-memory `Map` (LRU, insertion-ordered eviction) + IndexedDB (`entries` object store, keyPath `path`). `get()` uses stale-while-revalidate: subscribed paths return immediately (sub keeps cache fresh), unsubscribed return stale + background `getSnapshot()`. Writes (`set/update/delete`) invalidate path + all ancestors via `pathUtils.ancestors()`. `init()` opens IDB + sweeps expired. `warmup()` bulk-loads synchronously in single IDB transaction. `close()` cleans up. Protocol: `StorageEngine.getWithMeta()` returns `{ data, updatedAt }`, `ValueSnapshot.updatedAt` carries it to client.
96
+
83
97
  ### FileAdapter
84
98
  Scans directory recursively on `start()`. Optional `fs.watch` for live sync. Stores metadata (size, mtime, mime) at `basePath/<relPath>`. Content read/write-through methods.
85
99
 
@@ -108,10 +122,14 @@ Path patterns with `$wildcard` capture. Most specific match wins. Supports boole
108
122
  - **Phase 5** (DONE): Rules config files, transforms/sentinels, refs, transactions, batch, push, TTL, FileAdapter, FTS5, VectorEngine
109
123
  - **Phase 6** (DONE): Event streaming — consumer groups, offset tracking, replay, idempotent push, StreamEngine
110
124
  - **Phase 7** (DONE): Message queue — MQEngine, push/fetch/ack/nack, visibility timeout, DLQ, exactly-once claim
125
+ - **Phase 8** (DONE): MCP server — MCPAdapter, stdio + HTTP, BodClient-based, 21 tools
126
+ - **Phase 9** (DONE): Replication — ReplicationEngine, primary + read replicas, write proxy, bootstrap, _repl stream
127
+ - **Phase 10** (DONE): VFS — VFSEngine, LocalBackend, REST + WS transport, VFSClient SDK
128
+ - **Phase 11** (DONE): CachedClient — two-tier cache (memory + IndexedDB), stale-while-revalidate, updatedAt protocol
111
129
 
112
130
  ## Testing
113
131
 
114
- - `bun test` — 187 tests across 19 files
132
+ - `bun test` — 228 tests across 22 files
115
133
  - Each engine/feature gets its own test file in `tests/`
116
134
  - Test happy path, edge cases, error cases
117
135
  - Use `{ sweepInterval: 0 }` in tests to disable background sweep
@@ -205,6 +205,55 @@ await client.batch([
205
205
  const key = await client.push('logs', { msg: 'hello' });
206
206
 
207
207
  // Query, subscribe, disconnect — same as before
208
+
209
+ // getSnapshot — includes updatedAt metadata
210
+ const snap = await client.getSnapshot('users/u1');
211
+ snap.val(); // { name: 'Alice' }
212
+ snap.updatedAt; // 1708900000000 (ms timestamp from server)
213
+ ```
214
+
215
+ ## CachedClient (Browser Cache)
216
+
217
+ Two-tier cache wrapper around BodClient: in-memory Map (LRU) + IndexedDB persistence.
218
+ Stale-while-revalidate: subscribed paths always fresh, unsubscribed return stale + background refetch.
219
+
220
+ ```typescript
221
+ import { BodClient, CachedClient } from 'bod-db/client';
222
+
223
+ const client = new BodClient({ url: 'ws://localhost:4400' });
224
+ await client.connect();
225
+
226
+ const cached = new CachedClient(client, {
227
+ maxAge: 7 * 24 * 60 * 60 * 1000, // IDB entry TTL (7 days)
228
+ maxMemoryEntries: 500, // LRU eviction cap
229
+ dbName: 'boddb-cache', // IndexedDB database name
230
+ enabled: true, // false = pure passthrough
231
+ });
232
+ await cached.init(); // opens IDB, sweeps expired entries
233
+
234
+ // Stale-while-revalidate get
235
+ const val = await cached.get('users/u1'); // network on first call, cache thereafter
236
+
237
+ // Subscriptions keep cache fresh
238
+ const off = cached.on('users/u1', (snap) => {
239
+ // snap.val() is always fresh, cache updated automatically
240
+ });
241
+ off();
242
+
243
+ // Writes invalidate cache (path + ancestors)
244
+ await cached.set('users/u1/name', 'Bob');
245
+ await cached.update({ 'users/u1/role': 'admin' });
246
+ await cached.delete('users/u1');
247
+
248
+ // Warmup — bulk-load from IDB into memory on page load
249
+ await cached.warmup(['users/u1', 'users/u2', 'config/app']);
250
+
251
+ // Passthrough for non-cached ops
252
+ cached.client.push('logs', { msg: 'hello' });
253
+ cached.client.batch([...]);
254
+ cached.client.mq('queue').push({ ... });
255
+
256
+ cached.close(); // cleanup IDB + memory
208
257
  ```
209
258
 
210
259
  ## Wire Protocol
@@ -388,6 +437,121 @@ await client.streamCompact('events/orders', { maxAge: 86400 });
388
437
  await client.streamReset('events/orders');
389
438
  ```
390
439
 
440
+ ## Replication (Primary + Read Replicas)
441
+
442
+ ```typescript
443
+ // Primary — emits write events to _repl stream
444
+ const primary = new BodDB({
445
+ path: './data.db',
446
+ replication: { role: 'primary' },
447
+ });
448
+ primary.serve({ port: 4400 });
449
+ await primary.replication!.start();
450
+
451
+ // Replica — connects to primary, bootstraps, proxies writes
452
+ const replica = new BodDB({
453
+ path: ':memory:',
454
+ replication: {
455
+ role: 'replica',
456
+ primaryUrl: 'ws://primary-host:4400',
457
+ replicaId: 'eu-replica-1',
458
+ },
459
+ });
460
+ replica.serve({ port: 4401 });
461
+ await replica.replication!.start();
462
+
463
+ // Clients connect to nearest replica for reads (<5ms)
464
+ // Writes are proxied to primary automatically
465
+ ```
466
+
467
+ - Star topology: single primary, N replicas
468
+ - Reads served locally on replica, writes forwarded to primary
469
+ - Replicas bootstrap full state on first connect, then consume live events
470
+ - Push keys preserved across replicas (deterministic)
471
+ - Auto-compaction on `_repl` stream (keepKey: 'path', maxCount: 10000)
472
+ - Excluded prefixes: `_repl`, `_streams`, `_mq` (internal data not replicated)
473
+
474
+ ### Multi-Source Feed Subscriptions
475
+
476
+ Subscribe to specific paths from multiple remote BodDB instances — like RSS feeds for database paths.
477
+
478
+ ```typescript
479
+ // Local DB pulls catalog from DB-A, alerts from DB-B
480
+ const db = new BodDB({
481
+ path: './local.db',
482
+ replication: {
483
+ role: 'primary', // can also be own primary AND consume sources
484
+ sources: [
485
+ { url: 'ws://db-a:4400', paths: ['catalog', 'config'], localPrefix: 'db-a' },
486
+ { url: 'ws://db-b:4400', paths: ['alerts'], localPrefix: 'db-b' },
487
+ ],
488
+ },
489
+ });
490
+ await db.replication!.start();
491
+ // Remote catalog/widgets → local db-a/catalog/widgets
492
+ // Remote alerts/sys-1 → local db-b/alerts/sys-1
493
+ ```
494
+
495
+ - Sources are independent of role — a DB can be primary + consume sources
496
+ - Each source is a one-way pull (read-only subscription)
497
+ - Path filtering at receive time: only matching paths are applied locally
498
+ - `localPrefix` remaps paths to avoid collisions between sources
499
+ - `excludePrefixes` per source for fine-grained filtering
500
+ - `id` sets a deterministic consumer group ID (persistent offset across restarts)
501
+ - Sources connect in parallel; individual failures don't block others
502
+
503
+ ## Virtual File System (VFS)
504
+
505
+ Files stored on disk, metadata in BodDB (gets subscriptions, rules, replication for free).
506
+
507
+ ```typescript
508
+ // Server — enable VFS
509
+ const db = new BodDB({ vfs: { storageRoot: './files' } });
510
+ db.serve();
511
+
512
+ // Server-side direct access
513
+ db.vfs!.write('docs/readme.txt', new TextEncoder().encode('Hello'));
514
+ const data = db.vfs!.read('docs/readme.txt'); // Uint8Array
515
+ const stat = db.vfs!.stat('docs/readme.txt'); // FileStat
516
+ const list = db.vfs!.list('docs'); // FileStat[]
517
+ db.vfs!.mkdir('docs/drafts');
518
+ db.vfs!.move('docs/readme.txt', 'archive/readme.txt');
519
+ db.vfs!.remove('archive/readme.txt');
520
+ ```
521
+
522
+ ### Client SDK
523
+
524
+ ```typescript
525
+ const vfs = client.vfs();
526
+
527
+ // REST (preferred for binary data)
528
+ await vfs.upload('docs/report.pdf', pdfBytes);
529
+ const bytes = await vfs.download('docs/report.pdf'); // Uint8Array
530
+
531
+ // WS chunked fallback (base64, for environments without HTTP)
532
+ await vfs.uploadWS('docs/report.pdf', pdfBytes);
533
+ const bytes2 = await vfs.downloadWS('docs/report.pdf');
534
+
535
+ // Metadata ops
536
+ const stat = await vfs.stat('docs/report.pdf'); // FileStat
537
+ const list = await vfs.list('docs'); // FileStat[]
538
+ await vfs.mkdir('docs/drafts');
539
+ await vfs.move('docs/report.pdf', 'archive/report.pdf');
540
+ await vfs.delete('archive/report.pdf');
541
+ ```
542
+
543
+ ### REST API
544
+
545
+ ```
546
+ POST /files/<path> — upload (raw binary body)
547
+ GET /files/<path> — download
548
+ GET /files/<path>?stat=1 — metadata JSON
549
+ GET /files/<path>?list=1 — list directory JSON
550
+ POST /files/<path>?mkdir=1 — create directory
551
+ PUT /files/<path>?move=dst — move/rename
552
+ DELETE /files/<path> — delete
553
+ ```
554
+
391
555
  ## Best Practices
392
556
 
393
557
  1. **Paths are your schema** — design upfront (`users/$uid/settings/theme`)
@@ -401,3 +565,4 @@ await client.streamReset('events/orders');
401
565
  9. **`port: 0` in tests** — random available port
402
566
  10. **Streams for event processing** — consumer groups with offset tracking, replay on reconnect
403
567
  11. **Idempotent push** — dedup with `idempotencyKey` to prevent duplicate events
568
+ 12. **CachedClient for browsers** — wrap BodClient for instant reads + cross-reload persistence via IndexedDB
@@ -0,0 +1,111 @@
1
+ name: Test and Publish
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ - master
8
+ pull_request:
9
+ branches:
10
+ - main
11
+ - master
12
+ workflow_dispatch: # Allow manual trigger
13
+
14
+ jobs:
15
+ test:
16
+ runs-on: ubuntu-latest
17
+
18
+ steps:
19
+ - name: Checkout repository
20
+ uses: actions/checkout@v4
21
+
22
+ - name: Set up Node.js
23
+ uses: actions/setup-node@v4
24
+ with:
25
+ node-version: '20'
26
+
27
+ - name: Install Bun
28
+ uses: oven-sh/setup-bun@v1
29
+
30
+ - name: Install dependencies
31
+ run: bun install
32
+
33
+ - name: Build
34
+ run: bun run build
35
+
36
+ - name: Run tests
37
+ env:
38
+ # Add your API keys as GitHub secrets
39
+ a: "1"
40
+ run: bun test
41
+
42
+ - name: Upload test results
43
+ if: always()
44
+ uses: actions/upload-artifact@v4
45
+ with:
46
+ name: test-results
47
+ path: build/test_reports/
48
+
49
+ publish-npm:
50
+ needs: test # Ensures tests must pass before publishing
51
+ # Only run on push to main (not on PRs)
52
+ if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' && !contains(github.event.head_commit.message, '[skip-ci]') }}
53
+ runs-on: ubuntu-latest
54
+ permissions:
55
+ contents: write # Needed to checkout the repository AND push the version commit/tag
56
+ # id-token: write # Uncomment if using OIDC for provenance
57
+
58
+ steps:
59
+ - name: Checkout repository
60
+ uses: actions/checkout@v4
61
+ with:
62
+ # Fetch all history and tags for version bumping
63
+ fetch-depth: 0
64
+
65
+ - name: Set up Node.js
66
+ uses: actions/setup-node@v4
67
+ with:
68
+ node-version: '20'
69
+ registry-url: 'https://registry.npmjs.org/'
70
+
71
+ - name: Install Bun
72
+ uses: oven-sh/setup-bun@v1
73
+
74
+ - name: Install dependencies
75
+ run: bun install --frozen-lockfile
76
+
77
+ - name: Configure Git
78
+ run: |
79
+ git config user.name "GitHub Actions Bot"
80
+ git config user.email "actions@github.com"
81
+
82
+ - name: Build
83
+ run: bun run build
84
+
85
+ - name: Generate models docs
86
+ run: bun run gen:models
87
+
88
+ - name: Bump version and commit changes
89
+ run: |
90
+ # Stage all changes including build artifacts
91
+ git add .
92
+ # Create version bump commit with all changes
93
+ npm version patch --no-git-tag-version
94
+ git add package.json bun.lock
95
+ git commit -m "chore(release): v$(node -p "require('./package.json').version") [skip ci]"
96
+
97
+ - name: Publish package to npm
98
+ run: npm publish --access public
99
+ env:
100
+ NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
101
+ # Uncomment for provenance:
102
+ # NPM_CONFIG_PROVENANCE: "true"
103
+
104
+ - name: Create and push tag
105
+ run: |
106
+ git tag "v$(node -p "require('./package.json').version")"
107
+ git push --follow-tags
108
+
109
+ permissions:
110
+ contents: write
111
+
package/CLAUDE.md CHANGED
@@ -22,7 +22,10 @@ src/server/StreamEngine.ts — Kafka-like event streaming: consumer groups, offs
22
22
  src/server/MQEngine.ts — SQS-style message queue: push/fetch/ack/nack, visibility timeout, DLQ, exactly-once claim
23
23
  src/server/FileAdapter.ts — file system sync: scan, watch, metadata, content read/write-through
24
24
  src/server/MCPAdapter.ts — MCP (Model Context Protocol) server: JSON-RPC dispatch, tool registry, stdio + HTTP transports
25
+ src/server/VFSEngine.ts — virtual file system: VFSBackend interface, LocalBackend (disk), metadata in DB
26
+ src/server/ReplicationEngine.ts — replication: primary/replica + multi-source feed subscriptions, write proxy
25
27
  src/client/BodClient.ts — WS client: connect, auth, CRUD, batch, push, subscriptions, auto-reconnect
28
+ src/client/CachedClient.ts — two-tier cache (memory + IndexedDB): stale-while-revalidate, sub-aware, write invalidation
26
29
  src/react/hooks.ts — useValue, useChildren, useQuery, useMutation
27
30
  client.ts — client entry point (import from 'bod-db/client')
28
31
  react.ts — React hooks entry point (import from 'bod-db/react')
@@ -54,8 +57,11 @@ config.ts — demo instance config (open rules, indexes, fts, v
54
57
  - **SSE fallback**: `GET /sse/<path>?event=value|child` returns `text/event-stream`. Initial `: ok` comment flushes the stream connection.
55
58
  - **Perf**: `snapshotExisting` and `notify` are skipped when no subscriptions are active. `exists()` uses `LIMIT 1`.
56
59
  - **Transport**: WS messages follow `protocol.ts` types. REST at `/db/<path>`. Auth via `op:'auth'` message. Subs cleaned up on disconnect.
57
- - **BodClient**: id-correlated request/response over WS. Auto-reconnect with exponential backoff. Re-subscribes all active subs on reconnect. `ValueSnapshot` with `.val()`, `.key`, `.path`, `.exists()`.
60
+ - **BodClient**: id-correlated request/response over WS. Auto-reconnect with exponential backoff. Re-subscribes all active subs on reconnect. `ValueSnapshot` with `.val()`, `.key`, `.path`, `.exists()`, `.updatedAt`. `getSnapshot(path)` returns ValueSnapshot with `updatedAt` from server.
61
+ - **CachedClient**: two-tier cache wrapper around BodClient. Memory (Map, LRU eviction) + IndexedDB persistence. Stale-while-revalidate: subscribed paths always fresh, unsubscribed return stale + background refetch. Writes (`set/update/delete`) invalidate path + ancestors. `init()` opens IDB + sweeps expired. `warmup(paths[])` bulk-loads from IDB. Passthrough for `push/batch/query/search/mq/stream/vfs` via `cachedClient.client`.
58
62
  - **MCP**: `MCPAdapter` wraps a `BodClient` as a JSON-RPC MCP server (stdio + HTTP). Connects to a running BodDB instance over WebSocket — no embedded DB. Entry point: `mcp.ts`. Tools: CRUD (6), FTS (2), vectors (2), streams (4), MQ (7) = 21 tools. Use `--stdio` for Claude Code/Desktop, `--http` for remote agents.
63
+ - **VFS (Virtual File System)**: `VFSEngine` — files stored outside SQLite via pluggable `VFSBackend` interface. `LocalBackend` stores at `<storageRoot>/<fileId>` using `Bun.file`/`Bun.write`. Metadata at `_vfs/<virtualPath>/` (size, mime, mtime, fileId, isDir) — gets subs/rules/replication for free. `fileId = pushId` so move/rename is metadata-only. REST: `POST/GET/DELETE /files/<path>`, `?stat=1`, `?list=1`, `?mkdir=1`, `PUT ?move=<dst>`. WS chunked fallback: base64-encoded `vfs-upload-init/chunk/done`, `vfs-download-init` → `vfs-download-chunk` push messages. Client: `VFSClient` via `client.vfs()` — `upload/download` (REST) + `uploadWS/downloadWS` (WS) + `stat/list/mkdir/delete/move`.
64
+ - **Replication**: `ReplicationEngine` — single primary + N read replicas + multi-source feed subscriptions. Star topology. Primary emits write events to `_repl` stream via `onWrite` hooks. Replicas bootstrap via `streamMaterialize('_repl', { keepKey: 'path' })`, then subscribe for ongoing events. Write proxy: replica forwards writes to primary via BodClient, primary applies + emits, replica consumes. `_replaying` flag prevents re-emission loops. `_emitting` guard prevents recursion from `db.push('_repl')`. Updates flattened to per-path set events for correct compaction keying. Sweep delete events replicated. Excluded prefixes: `_repl`, `_streams`, `_mq`. **Sources**: `ReplicationSource[]` — subscribe to specific paths from multiple remote DBs. Each source is an independent BodClient that filters `_repl` events by path prefix, with optional `localPrefix` remapping (e.g. remote `users/u1` → local `db-a/users/u1`). Sources connect in parallel; individual failures don't block others. Sources are independent of role — a DB can be primary AND consume sources.
59
65
 
60
66
  ## MCP Server
61
67
 
@@ -108,3 +114,6 @@ bun test tests/storage.test.ts # single file
108
114
  - [x] Phase 6: Event streaming (consumer groups, offset tracking, replay, idempotent push, StreamEngine)
109
115
  - [x] Phase 7: Message queue (MQEngine — push/fetch/ack/nack, visibility timeout, DLQ, exactly-once claim)
110
116
  - [x] Phase 8: MCP server (MCPAdapter — stdio + HTTP, BodClient-based, 21 tools)
117
+ - [x] Phase 9: Replication (ReplicationEngine — primary + read replicas, write proxy, bootstrap, _repl stream, multi-source feed subscriptions)
118
+ - [x] Phase 10: VFS — Virtual File System (VFSEngine, LocalBackend, REST + WS transport, VFSClient)
119
+ - [x] Phase 11: CachedClient — two-tier cache (memory + IndexedDB), stale-while-revalidate, updatedAt protocol
package/README.md CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
 
4
4
  ----
5
- 187 tests passing · ~200k ops/sec · Zero dependencies · Full docs + examples
5
+ 228 tests passing · ~200k ops/sec · Zero dependencies · Full docs + examples
6
6
  ----
7
7
 
8
8
 
@@ -120,6 +120,33 @@ client.onChild('users', (e) => console.log(e.type, e.key));
120
120
  client.disconnect();
121
121
  ```
122
122
 
123
+ ## CachedClient (Browser Cache)
124
+
125
+ ```typescript
126
+ import { BodClient, CachedClient } from 'bod-db/client';
127
+
128
+ const client = new BodClient({ url: 'ws://localhost:4400' });
129
+ await client.connect();
130
+
131
+ const cached = new CachedClient(client, {
132
+ maxMemoryEntries: 500, // LRU eviction cap
133
+ maxAge: 7 * 24 * 3600000, // IDB TTL (7 days)
134
+ });
135
+ await cached.init();
136
+
137
+ // Stale-while-revalidate: instant cache hit, background refetch
138
+ const val = await cached.get('users/u1');
139
+
140
+ // Subscriptions keep cache fresh automatically
141
+ const off = cached.on('users/u1', (snap) => console.log(snap.val()));
142
+
143
+ // Writes invalidate cache (path + ancestors)
144
+ await cached.set('users/u1/name', 'Bob');
145
+
146
+ // Warmup on page load
147
+ await cached.warmup(['users/u1', 'config/app']);
148
+ ```
149
+
123
150
  ## Streams (Kafka-like)
124
151
 
125
152
  ```typescript
@@ -161,6 +188,34 @@ const msgs = await q.fetch(5);
161
188
  await q.ack(msgs[0].key);
162
189
  ```
163
190
 
191
+ ## Replication
192
+
193
+ ```typescript
194
+ // Primary — emits writes to _repl stream
195
+ const primary = new BodDB({ replication: { role: 'primary' } });
196
+ primary.serve({ port: 4400 });
197
+ await primary.replication!.start();
198
+
199
+ // Replica — reads local, writes proxied to primary
200
+ const replica = new BodDB({
201
+ replication: { role: 'replica', primaryUrl: 'ws://primary:4400', replicaId: 'r1' },
202
+ });
203
+ await replica.replication!.start();
204
+
205
+ // Multi-source — pull specific paths from multiple remote DBs
206
+ const aggregator = new BodDB({
207
+ replication: {
208
+ role: 'primary',
209
+ sources: [
210
+ { url: 'ws://db-a:4400', paths: ['catalog'], localPrefix: 'a', id: 'src-a' },
211
+ { url: 'ws://db-b:4400', paths: ['alerts'], localPrefix: 'b', id: 'src-b' },
212
+ ],
213
+ },
214
+ });
215
+ await aggregator.replication!.start();
216
+ // Remote catalog/item → local a/catalog/item
217
+ ```
218
+
164
219
  ## Rules
165
220
 
166
221
  Function-based, expression strings, or JSON config files:
@@ -248,5 +303,5 @@ bun run tests/bench.ts
248
303
  ## Test
249
304
 
250
305
  ```bash
251
- bun test # 187 tests
306
+ bun test # 228 tests
252
307
  ```
package/admin/proxy.ts ADDED
@@ -0,0 +1,79 @@
1
+ // Admin UI proxy — serves ui.html locally, relays all WS traffic to a remote BodDB server.
2
+ // Usage: bun run admin/proxy.ts <remote-url> [--port <local-port>]
3
+ // Example: bun run admin/proxy.ts wss://db-main.bod.ee --port 4401
4
+
5
+ import { join } from 'path';
6
+ import type { ServerWebSocket } from 'bun';
7
+
8
+ const args = process.argv.slice(2);
9
+ const remoteUrl = args.find(a => !a.startsWith('--'));
10
+ if (!remoteUrl) { console.error('Usage: bun run admin/proxy.ts <remote-ws-url> [--port <port>]'); process.exit(1); }
11
+
12
+ const portIdx = args.indexOf('--port');
13
+ const PORT = portIdx >= 0 ? Number(args[portIdx + 1]) : 4401;
14
+ const UI_PATH = join(import.meta.dir, 'ui.html');
15
+
16
+ interface ProxyWsData {
17
+ remote: WebSocket | null;
18
+ queue: string[]; // buffered messages while remote connects
19
+ }
20
+
21
+ const server = Bun.serve({
22
+ port: PORT,
23
+ fetch(req, server) {
24
+ const url = new URL(req.url);
25
+ if (req.headers.get('upgrade') === 'websocket') {
26
+ if (server.upgrade(req, { data: { remote: null, queue: [] } as ProxyWsData })) return;
27
+ return new Response('WS upgrade failed', { status: 400 });
28
+ }
29
+ if (url.pathname === '/' || url.pathname === '/ui.html') {
30
+ return new Response(Bun.file(UI_PATH));
31
+ }
32
+ // Proxy REST to remote via fetch
33
+ if (url.pathname.startsWith('/db/') || url.pathname === '/rules' ||
34
+ url.pathname === '/transform' || url.pathname === '/set-ttl' ||
35
+ url.pathname === '/sweep' || url.pathname.startsWith('/fts/') ||
36
+ url.pathname.startsWith('/vectors/')) {
37
+ const remoteHttp = remoteUrl.replace(/^ws/, 'http');
38
+ const target = new URL(url.pathname + url.search, remoteHttp);
39
+ return fetch(target.toString(), {
40
+ method: req.method,
41
+ headers: req.headers,
42
+ body: req.body,
43
+ });
44
+ }
45
+ return new Response('Not found', { status: 404 });
46
+ },
47
+ websocket: {
48
+ open(ws: ServerWebSocket<ProxyWsData>) {
49
+ const remote = new WebSocket(remoteUrl);
50
+ ws.data.remote = remote;
51
+
52
+ remote.onopen = () => {
53
+ // Flush queued messages
54
+ for (const msg of ws.data.queue) remote.send(msg);
55
+ ws.data.queue = [];
56
+ };
57
+ remote.onmessage = (e) => {
58
+ ws.send(typeof e.data === 'string' ? e.data : e.data.toString());
59
+ };
60
+ remote.onclose = () => ws.close();
61
+ remote.onerror = () => ws.close();
62
+ },
63
+ message(ws: ServerWebSocket<ProxyWsData>, raw: string | Buffer) {
64
+ const msg = typeof raw === 'string' ? raw : raw.toString();
65
+ const remote = ws.data.remote;
66
+ if (remote?.readyState === WebSocket.OPEN) {
67
+ remote.send(msg);
68
+ } else {
69
+ ws.data.queue.push(msg);
70
+ }
71
+ },
72
+ close(ws: ServerWebSocket<ProxyWsData>) {
73
+ ws.data.remote?.close();
74
+ },
75
+ },
76
+ });
77
+
78
+ console.log(`BodDB Admin Proxy → http://localhost:${server.port}`);
79
+ console.log(`Remote: ${remoteUrl}`);
package/admin/rules.ts CHANGED
@@ -6,7 +6,7 @@ import type { PathRule } from '../src/server/RulesEngine.ts';
6
6
  * Context: { auth, path, params, data, newData }
7
7
  */
8
8
  export const rules: Record<string, PathRule> = {
9
- '_admin/$any': { read: false, write: false },
9
+ '_admin/$any': { read: true, write: false },
10
10
  'users/$uid': { write: ({ auth, params }) => auth?.role === 'admin' || ['alice', 'bob'].includes(params.uid) },
11
11
  'settings/$key': { write: ({ auth }) => !!auth },
12
12
  };