@bod.ee/db 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. package/.claude/settings.local.json +23 -0
  2. package/.claude/skills/config-file.md +54 -0
  3. package/.claude/skills/deploying-bod-db.md +29 -0
  4. package/.claude/skills/developing-bod-db.md +127 -0
  5. package/.claude/skills/using-bod-db.md +403 -0
  6. package/CLAUDE.md +110 -0
  7. package/README.md +252 -0
  8. package/admin/rules.ts +12 -0
  9. package/admin/server.ts +523 -0
  10. package/admin/ui.html +2281 -0
  11. package/cli.ts +177 -0
  12. package/client.ts +2 -0
  13. package/config.ts +20 -0
  14. package/deploy/.env.example +1 -0
  15. package/deploy/base.yaml +18 -0
  16. package/deploy/boddb-logs.yaml +10 -0
  17. package/deploy/boddb.yaml +10 -0
  18. package/deploy/demo.html +196 -0
  19. package/deploy/deploy.ts +32 -0
  20. package/deploy/prod-logs.config.ts +15 -0
  21. package/deploy/prod.config.ts +15 -0
  22. package/index.ts +20 -0
  23. package/mcp.ts +78 -0
  24. package/package.json +29 -0
  25. package/react.ts +1 -0
  26. package/src/client/BodClient.ts +515 -0
  27. package/src/react/hooks.ts +121 -0
  28. package/src/server/BodDB.ts +319 -0
  29. package/src/server/ExpressionRules.ts +250 -0
  30. package/src/server/FTSEngine.ts +76 -0
  31. package/src/server/FileAdapter.ts +116 -0
  32. package/src/server/MCPAdapter.ts +409 -0
  33. package/src/server/MQEngine.ts +286 -0
  34. package/src/server/QueryEngine.ts +45 -0
  35. package/src/server/RulesEngine.ts +108 -0
  36. package/src/server/StorageEngine.ts +464 -0
  37. package/src/server/StreamEngine.ts +320 -0
  38. package/src/server/SubscriptionEngine.ts +120 -0
  39. package/src/server/Transport.ts +479 -0
  40. package/src/server/VectorEngine.ts +115 -0
  41. package/src/shared/errors.ts +15 -0
  42. package/src/shared/pathUtils.ts +94 -0
  43. package/src/shared/protocol.ts +59 -0
  44. package/src/shared/transforms.ts +99 -0
  45. package/tests/batch.test.ts +60 -0
  46. package/tests/bench.ts +205 -0
  47. package/tests/e2e.test.ts +284 -0
  48. package/tests/expression-rules.test.ts +114 -0
  49. package/tests/file-adapter.test.ts +57 -0
  50. package/tests/fts.test.ts +58 -0
  51. package/tests/mq-flow.test.ts +204 -0
  52. package/tests/mq.test.ts +326 -0
  53. package/tests/push.test.ts +55 -0
  54. package/tests/query.test.ts +60 -0
  55. package/tests/rules.test.ts +78 -0
  56. package/tests/sse.test.ts +78 -0
  57. package/tests/storage.test.ts +199 -0
  58. package/tests/stream.test.ts +385 -0
  59. package/tests/stress.test.ts +202 -0
  60. package/tests/subscriptions.test.ts +86 -0
  61. package/tests/transforms.test.ts +92 -0
  62. package/tests/transport.test.ts +209 -0
  63. package/tests/ttl.test.ts +70 -0
  64. package/tests/vector.test.ts +69 -0
  65. package/tsconfig.json +27 -0
@@ -0,0 +1,385 @@
1
+ import { describe, test, expect, beforeEach, afterEach } from 'bun:test';
2
+ import { BodDB } from '../src/server/BodDB.ts';
3
+ import { BodClient } from '../src/client/BodClient.ts';
4
+
5
+ describe('StreamEngine', () => {
6
+ let db: BodDB;
7
+
8
+ beforeEach(() => {
9
+ db = new BodDB({ path: ':memory:', sweepInterval: 0 });
10
+ });
11
+
12
+ afterEach(() => {
13
+ db.close();
14
+ });
15
+
16
+ test('read returns events after offset', () => {
17
+ // Push 10 events
18
+ const keys: string[] = [];
19
+ for (let i = 0; i < 10; i++) {
20
+ keys.push(db.push('events/orders', { i }));
21
+ }
22
+
23
+ // Read all from group with no offset
24
+ const all = db.stream.read('events/orders', 'g1');
25
+ expect(all.length).toBe(10);
26
+ expect((all[0].data as { i: number }).i).toBe(0);
27
+
28
+ // Ack up to key 5
29
+ db.stream.ack('events/orders', 'g1', keys[4]);
30
+
31
+ // Read again — should get 5-9
32
+ const remaining = db.stream.read('events/orders', 'g1');
33
+ expect(remaining.length).toBe(5);
34
+ expect((remaining[0].data as { i: number }).i).toBe(5);
35
+ });
36
+
37
+ test('independent offsets per group', () => {
38
+ for (let i = 0; i < 5; i++) {
39
+ db.push('events/orders', { i });
40
+ }
41
+
42
+ const allG1 = db.stream.read('events/orders', 'g1');
43
+ const allG2 = db.stream.read('events/orders', 'g2');
44
+ expect(allG1.length).toBe(5);
45
+ expect(allG2.length).toBe(5);
46
+
47
+ // Ack all for g1
48
+ db.stream.ack('events/orders', 'g1', allG1[4].key);
49
+
50
+ // g1 should be empty, g2 still has all
51
+ expect(db.stream.read('events/orders', 'g1').length).toBe(0);
52
+ expect(db.stream.read('events/orders', 'g2').length).toBe(5);
53
+ });
54
+
55
+ test('subscribe replays and receives live events', () => {
56
+ // Push 3 events before subscribing
57
+ for (let i = 0; i < 3; i++) {
58
+ db.push('events/orders', { i });
59
+ }
60
+
61
+ const received: Array<{ key: string; data: unknown }> = [];
62
+ const unsub = db.stream.subscribe('events/orders', 'g1', (events) => {
63
+ received.push(...events);
64
+ });
65
+
66
+ // Should have received replay of 3
67
+ expect(received.length).toBe(3);
68
+
69
+ // Push live event
70
+ db.push('events/orders', { i: 3 });
71
+ expect(received.length).toBe(4);
72
+ expect((received[3].data as { i: number }).i).toBe(3);
73
+
74
+ unsub();
75
+
76
+ // After unsub, no more events
77
+ db.push('events/orders', { i: 4 });
78
+ expect(received.length).toBe(4);
79
+ });
80
+
81
+ test('subscribe resumes from last acked offset', () => {
82
+ const keys: string[] = [];
83
+ for (let i = 0; i < 5; i++) {
84
+ keys.push(db.push('events/orders', { i }));
85
+ }
86
+
87
+ // Ack up to 3rd event
88
+ db.stream.ack('events/orders', 'g1', keys[2]);
89
+
90
+ const received: Array<{ key: string; data: unknown }> = [];
91
+ const unsub = db.stream.subscribe('events/orders', 'g1', (events) => {
92
+ received.push(...events);
93
+ });
94
+
95
+ // Should replay only events 3 and 4
96
+ expect(received.length).toBe(2);
97
+ expect((received[0].data as { i: number }).i).toBe(3);
98
+
99
+ unsub();
100
+ });
101
+ });
102
+
103
+ describe('Stream compaction (snapshot model)', () => {
104
+ let db: BodDB;
105
+
106
+ beforeEach(() => {
107
+ db = new BodDB({ path: ':memory:', sweepInterval: 0 });
108
+ });
109
+
110
+ afterEach(() => {
111
+ db.close();
112
+ });
113
+
114
+ test('compact maxCount folds excess into snapshot', () => {
115
+ for (let i = 0; i < 20; i++) {
116
+ db.push('events/orders', { i });
117
+ }
118
+
119
+ const result = db.stream.compact('events/orders', { maxCount: 5 });
120
+ expect(result.deleted).toBe(15);
121
+ // snapshotSize = number of keys in merged state (all events have same `i` key, so 1)
122
+ expect(result.snapshotSize).toBe(1);
123
+
124
+ // Only 5 events remain as live rows
125
+ const remaining = db.stream.read('events/orders', 'check');
126
+ expect(remaining.length).toBe(5);
127
+ expect((remaining[0].data as { i: number }).i).toBe(15);
128
+
129
+ // Snapshot is merged final state of folded events
130
+ const snap = db.stream.snapshot('events/orders');
131
+ expect(snap).not.toBeNull();
132
+ // All 15 events had { i: N } — merged into single object, last write wins
133
+ expect(snap!.data.i).toBe(14);
134
+ });
135
+
136
+ test('compact maxAge folds old events into snapshot', () => {
137
+ for (let i = 0; i < 10; i++) {
138
+ db.push('events/orders', { i });
139
+ }
140
+
141
+ // Backdate first 6 events to 2 hours ago
142
+ const rows = db.storage.db
143
+ .prepare("SELECT path FROM nodes WHERE path LIKE 'events/orders/%' ORDER BY path ASC")
144
+ .all() as Array<{ path: string }>;
145
+ const twoHoursAgo = Date.now() - 7200 * 1000;
146
+ for (let i = 0; i < 6; i++) {
147
+ db.storage.db.run('UPDATE nodes SET updated_at = ? WHERE path = ?', [twoHoursAgo, rows[i].path]);
148
+ }
149
+
150
+ const result = db.stream.compact('events/orders', { maxAge: 3600 });
151
+ expect(result.deleted).toBe(6);
152
+ expect(db.stream.read('events/orders', 'check').length).toBe(4);
153
+
154
+ const snap = db.stream.snapshot('events/orders');
155
+ // 6 events with { i: 0..5 } merged — last write wins
156
+ expect(snap!.data.i).toBe(5);
157
+ });
158
+
159
+ test('compact keepKey folds duplicates into snapshot, keyed by field', () => {
160
+ db.push('events/orders', { orderId: 'o1', status: 'pending' });
161
+ db.push('events/orders', { orderId: 'o2', status: 'pending' });
162
+ db.push('events/orders', { orderId: 'o1', status: 'completed' });
163
+ db.push('events/orders', { orderId: 'o2', status: 'shipped' });
164
+ db.push('events/orders', { orderId: 'o3', status: 'new' });
165
+
166
+ const result = db.stream.compact('events/orders', { keepKey: 'orderId' });
167
+ expect(result.deleted).toBe(2); // old o1 + old o2
168
+
169
+ // Snapshot keyed by orderId
170
+ const snap = db.stream.snapshot('events/orders');
171
+ expect(snap).not.toBeNull();
172
+ expect((snap!.data['o1'] as any).status).toBe('pending'); // first o1 folded
173
+ expect((snap!.data['o2'] as any).status).toBe('pending'); // first o2 folded
174
+
175
+ // 3 events remain live
176
+ const remaining = db.stream.read('events/orders', 'check');
177
+ expect(remaining.length).toBe(3);
178
+ });
179
+
180
+ test('materialize merges snapshot + live events', () => {
181
+ db.push('events/orders', { orderId: 'o1', status: 'pending' });
182
+ db.push('events/orders', { orderId: 'o2', status: 'pending' });
183
+ db.push('events/orders', { orderId: 'o1', status: 'completed' });
184
+
185
+ db.stream.compact('events/orders', { keepKey: 'orderId' });
186
+
187
+ // Add more events on top
188
+ db.push('events/orders', { orderId: 'o2', status: 'shipped' });
189
+
190
+ const view = db.stream.materialize('events/orders', { keepKey: 'orderId' });
191
+ expect((view['o1'] as any).status).toBe('completed'); // from live event (survived compact)
192
+ expect((view['o2'] as any).status).toBe('shipped'); // from new event on top
193
+ });
194
+
195
+ test('compact respects consumer group offsets', () => {
196
+ const keys: string[] = [];
197
+ for (let i = 0; i < 10; i++) {
198
+ keys.push(db.push('events/orders', { i }));
199
+ }
200
+
201
+ db.stream.ack('events/orders', 'g1', keys[3]);
202
+
203
+ const result = db.stream.compact('events/orders', { maxCount: 2 });
204
+ // Only events 0-3 are acked, so only those can be folded
205
+ expect(result.deleted).toBe(4);
206
+ expect(db.stream.read('events/orders', 'g1').length).toBe(6);
207
+
208
+ // Snapshot is merged state of 4 folded events { i: 0..3 }
209
+ expect(db.stream.snapshot('events/orders')!.data.i).toBe(3);
210
+ });
211
+
212
+ test('auto-compact runs on sweep', () => {
213
+ const compactDb = new BodDB({
214
+ path: ':memory:',
215
+ sweepInterval: 0,
216
+ compact: { 'events/orders': { maxCount: 3 } },
217
+ });
218
+
219
+ for (let i = 0; i < 10; i++) {
220
+ compactDb.push('events/orders', { i });
221
+ }
222
+
223
+ compactDb.sweep();
224
+
225
+ const remaining = compactDb.stream.read('events/orders', 'check');
226
+ expect(remaining.length).toBe(3);
227
+ expect((remaining[0].data as { i: number }).i).toBe(7);
228
+
229
+ const snap = compactDb.stream.snapshot('events/orders');
230
+ // Events i=0..6 merged, last write wins → i=6
231
+ expect(snap!.data.i).toBe(6);
232
+
233
+ compactDb.close();
234
+ });
235
+
236
+ test('incremental compaction merges into existing snapshot', () => {
237
+ for (let i = 0; i < 10; i++) {
238
+ db.push('events/orders', { i });
239
+ }
240
+
241
+ // First compact: keep 5 → folds events i=0..4 into snapshot
242
+ db.stream.compact('events/orders', { maxCount: 5 });
243
+ expect(db.stream.snapshot('events/orders')!.data.i).toBe(4);
244
+
245
+ // Push 5 more
246
+ for (let i = 10; i < 15; i++) {
247
+ db.push('events/orders', { i });
248
+ }
249
+
250
+ // Second compact: keep 3 → folds more into snapshot
251
+ db.stream.compact('events/orders', { maxCount: 3 });
252
+ const snap = db.stream.snapshot('events/orders');
253
+ // All merged: last folded event wins → i=11 (events 0..11 folded, 12-14 remain)
254
+ expect(snap!.data.i).toBe(11);
255
+ expect(db.stream.read('events/orders', 'check').length).toBe(3);
256
+ });
257
+ });
258
+
259
+ describe('Idempotent push', () => {
260
+ let db: BodDB;
261
+
262
+ beforeEach(() => {
263
+ db = new BodDB({ path: ':memory:', sweepInterval: 0 });
264
+ });
265
+
266
+ afterEach(() => {
267
+ db.close();
268
+ });
269
+
270
+ test('same idempotencyKey returns same key, no duplicate', () => {
271
+ const key1 = db.push('events/orders', { order: 1 }, { idempotencyKey: 'idem-1' });
272
+ const key2 = db.push('events/orders', { order: 2 }, { idempotencyKey: 'idem-1' });
273
+
274
+ expect(key1).toBe(key2);
275
+
276
+ // Only one row
277
+ const all = db.stream.read('events/orders', 'check');
278
+ expect(all.length).toBe(1);
279
+ expect((all[0].data as { order: number }).order).toBe(1);
280
+ });
281
+
282
+ test('different idempotencyKeys create separate rows', () => {
283
+ db.push('events/orders', { a: 1 }, { idempotencyKey: 'k1' });
284
+ db.push('events/orders', { a: 2 }, { idempotencyKey: 'k2' });
285
+
286
+ const all = db.stream.read('events/orders', 'check');
287
+ expect(all.length).toBe(2);
288
+ });
289
+ });
290
+
291
+ describe('Stream over transport', () => {
292
+ let db: BodDB;
293
+ let client: BodClient;
294
+
295
+ beforeEach(async () => {
296
+ db = new BodDB({ path: ':memory:', sweepInterval: 0, port: 0 });
297
+ const server = db.serve();
298
+ client = new BodClient({ url: `ws://localhost:${server.port}` });
299
+ await client.connect();
300
+ });
301
+
302
+ afterEach(() => {
303
+ client.disconnect();
304
+ db.close();
305
+ });
306
+
307
+ test('push 100 events, stream-read gets all', async () => {
308
+ for (let i = 0; i < 100; i++) {
309
+ await client.push('events/orders', { i });
310
+ }
311
+
312
+ const reader = client.stream('events/orders', 'billing');
313
+ const events = await reader.read();
314
+ expect(events.length).toBe(100);
315
+ expect(events[0].val()).toEqual({ i: 0 });
316
+ expect(events[99].val()).toEqual({ i: 99 });
317
+ });
318
+
319
+ test('ack 50, read gets remaining 50', async () => {
320
+ const keys: string[] = [];
321
+ for (let i = 0; i < 100; i++) {
322
+ keys.push(await client.push('events/orders', { i }));
323
+ }
324
+
325
+ const reader = client.stream('events/orders', 'billing');
326
+ await reader.ack(keys[49]);
327
+
328
+ const events = await reader.read();
329
+ expect(events.length).toBe(50);
330
+ expect(events[0].val()).toEqual({ i: 50 });
331
+ });
332
+
333
+ test('stream-sub receives live events', async () => {
334
+ const reader = client.stream('events/orders', 'billing');
335
+ const received: Array<{ key: string; val: () => unknown }> = [];
336
+
337
+ const unsub = reader.on((events) => {
338
+ received.push(...events);
339
+ });
340
+
341
+ // Give time for sub to register
342
+ await new Promise(r => setTimeout(r, 50));
343
+
344
+ // Push events server-side
345
+ db.push('events/orders', { live: 1 });
346
+ db.push('events/orders', { live: 2 });
347
+
348
+ await new Promise(r => setTimeout(r, 50));
349
+
350
+ expect(received.length).toBe(2);
351
+ expect(received[0].val()).toEqual({ live: 1 });
352
+
353
+ unsub();
354
+ });
355
+
356
+ test('idempotent push over transport', async () => {
357
+ const key1 = await client.push('events/orders', { a: 1 }, { idempotencyKey: 'dedup-1' });
358
+ const key2 = await client.push('events/orders', { a: 2 }, { idempotencyKey: 'dedup-1' });
359
+
360
+ expect(key1).toBe(key2);
361
+
362
+ const events = await client.stream('events/orders', 'check').read();
363
+ expect(events.length).toBe(1);
364
+ });
365
+
366
+ test('two groups independent offsets over transport', async () => {
367
+ for (let i = 0; i < 10; i++) {
368
+ await client.push('events/orders', { i });
369
+ }
370
+
371
+ const r1 = client.stream('events/orders', 'g1');
372
+ const r2 = client.stream('events/orders', 'g2');
373
+
374
+ const e1 = await r1.read();
375
+ expect(e1.length).toBe(10);
376
+
377
+ // Ack all for g1
378
+ await r1.ack(e1[9].key);
379
+
380
+ const e1After = await r1.read();
381
+ const e2After = await r2.read();
382
+ expect(e1After.length).toBe(0);
383
+ expect(e2After.length).toBe(10);
384
+ });
385
+ });
@@ -0,0 +1,202 @@
1
+ import { describe, test, expect } from 'bun:test';
2
+ import { BodDB } from '../src/server/BodDB.ts';
3
+
4
+ function makeDB() { return new BodDB({ path: ':memory:' }); }
5
+
6
+ describe('Stress: Sequential Writes', () => {
7
+ test('1000 sequential set ops complete and are readable', () => {
8
+ const db = makeDB();
9
+ const N = 1000;
10
+ for (let i = 0; i < N; i++) db.set(`seq/item${i}`, { v: i, s: `str${i}` });
11
+ expect(db.get('seq/item0')).toEqual({ v: 0, s: 'str0' });
12
+ expect(db.get(`seq/item${N - 1}`)).toEqual({ v: N - 1, s: `str${N - 1}` });
13
+ db.close();
14
+ });
15
+
16
+ test('throughput: 5000 writes finish in < 5s', () => {
17
+ const db = makeDB();
18
+ const N = 5000;
19
+ const t0 = performance.now();
20
+ for (let i = 0; i < N; i++) db.set(`tput/k${i}`, i);
21
+ const elapsed = performance.now() - t0;
22
+ expect(elapsed).toBeLessThan(5000);
23
+ console.log(` 5000 writes: ${elapsed.toFixed(0)}ms (${Math.round(N / (elapsed / 1000)).toLocaleString()} ops/s)`);
24
+ db.close();
25
+ });
26
+ });
27
+
28
+ describe('Stress: Burst Reads', () => {
29
+ test('1000 reads of same path return consistent result', () => {
30
+ const db = makeDB();
31
+ db.set('read/target', { hello: 'world', n: 42 });
32
+ for (let i = 0; i < 1000; i++) {
33
+ expect(db.get('read/target')).toEqual({ hello: 'world', n: 42 });
34
+ }
35
+ db.close();
36
+ });
37
+
38
+ test('throughput: 10000 reads finish in < 2s', () => {
39
+ const db = makeDB();
40
+ db.set('read/x', { a: 1, b: 2, c: 3 });
41
+ const N = 10000;
42
+ const t0 = performance.now();
43
+ for (let i = 0; i < N; i++) db.get('read/x');
44
+ const elapsed = performance.now() - t0;
45
+ expect(elapsed).toBeLessThan(2000);
46
+ console.log(` 10000 reads: ${elapsed.toFixed(0)}ms (${Math.round(N / (elapsed / 1000)).toLocaleString()} ops/s)`);
47
+ db.close();
48
+ });
49
+ });
50
+
51
+ describe('Stress: Mixed Read/Write', () => {
52
+ test('interleaved reads and writes produce correct results', () => {
53
+ const db = makeDB();
54
+ const N = 500;
55
+ for (let i = 0; i < N; i++) {
56
+ db.set(`mixed/k${i % 50}`, { v: i });
57
+ expect((db.get(`mixed/k${i % 50}`) as any).v).toBe(i);
58
+ }
59
+ db.close();
60
+ });
61
+
62
+ test('throughput: 2000 mixed ops finish in < 3s', () => {
63
+ const db = makeDB();
64
+ db.set('mixed/seed', { x: 0 });
65
+ const N = 2000;
66
+ const t0 = performance.now();
67
+ for (let i = 0; i < N; i++) {
68
+ if (i % 3 === 0) db.set(`mixed/k${i % 100}`, i);
69
+ else db.get(`mixed/k${i % 100}`);
70
+ }
71
+ const elapsed = performance.now() - t0;
72
+ expect(elapsed).toBeLessThan(3000);
73
+ console.log(` 2000 mixed ops: ${elapsed.toFixed(0)}ms`);
74
+ db.close();
75
+ });
76
+ });
77
+
78
+ describe('Stress: Bulk Update', () => {
79
+ test('update with 200 keys is atomic and readable', () => {
80
+ const db = makeDB();
81
+ const updates: Record<string, unknown> = {};
82
+ for (let i = 0; i < 200; i++) updates[`bulk/k${i}`] = { v: i };
83
+ db.update(updates);
84
+ for (let i = 0; i < 200; i++) expect((db.get(`bulk/k${i}`) as any).v).toBe(i);
85
+ db.close();
86
+ });
87
+
88
+ test('20 batches × 100 keys each: all values survive', () => {
89
+ const db = makeDB();
90
+ for (let b = 0; b < 20; b++) {
91
+ const updates: Record<string, unknown> = {};
92
+ for (let k = 0; k < 100; k++) updates[`batch/k${k}`] = b * 100 + k;
93
+ db.update(updates);
94
+ }
95
+ // last batch wins
96
+ for (let k = 0; k < 100; k++) expect(db.get(`batch/k${k}`)).toBe(19 * 100 + k);
97
+ db.close();
98
+ });
99
+
100
+ test('throughput: update 500 keys in < 500ms', () => {
101
+ const db = makeDB();
102
+ const updates: Record<string, unknown> = {};
103
+ for (let i = 0; i < 500; i++) updates[`tput/k${i}`] = { i, ts: Date.now() };
104
+ const t0 = performance.now();
105
+ db.update(updates);
106
+ const elapsed = performance.now() - t0;
107
+ expect(elapsed).toBeLessThan(500);
108
+ console.log(` update(500 keys): ${elapsed.toFixed(1)}ms`);
109
+ db.close();
110
+ });
111
+ });
112
+
113
+ describe('Stress: Query Under Load', () => {
114
+ test('query filters 1000-record dataset correctly', () => {
115
+ const db = makeDB();
116
+ const N = 1000;
117
+ for (let i = 0; i < N; i++) db.set(`users/u${i}`, { score: i % 100, role: i % 4 === 0 ? 'admin' : 'user' });
118
+ const admins = db.query('users').where('role', '==', 'admin').get();
119
+ expect(admins.length).toBe(250);
120
+ const top = db.query('users').where('score', '>=', 95).order('score', 'desc').limit(10).get();
121
+ expect(top.length).toBeLessThanOrEqual(10);
122
+ top.forEach(r => expect(r.score).toBeGreaterThanOrEqual(95));
123
+ db.close();
124
+ });
125
+
126
+ test('throughput: 50 queries on 500 records in < 2s', () => {
127
+ const db = makeDB();
128
+ for (let i = 0; i < 500; i++) db.set(`q/u${i}`, { score: i % 100, role: i % 3 === 0 ? 'admin' : 'user' });
129
+ const t0 = performance.now();
130
+ for (let i = 0; i < 50; i++) db.query('q').where('role', '==', 'admin').order('score', 'desc').limit(10).get();
131
+ const elapsed = performance.now() - t0;
132
+ expect(elapsed).toBeLessThan(2000);
133
+ console.log(` 50 queries (500 records): ${elapsed.toFixed(0)}ms (${(elapsed / 50).toFixed(1)}ms/query)`);
134
+ db.close();
135
+ });
136
+ });
137
+
138
+ describe('Stress: Deep Path Writes', () => {
139
+ test('writes and reads at depth 8 are correct', () => {
140
+ const db = makeDB();
141
+ const path = 'a/b/c/d/e/f/g/h';
142
+ db.set(path, { deep: true, val: 42 });
143
+ expect(db.get(path)).toEqual({ deep: true, val: 42 });
144
+ expect(db.get('a/b/c/d/e/f/g')).toEqual({ h: { deep: true, val: 42 } });
145
+ db.close();
146
+ });
147
+
148
+ test('200 unique deep paths are all accessible', () => {
149
+ const db = makeDB();
150
+ const N = 200;
151
+ for (let i = 0; i < N; i++) {
152
+ const segs = Array.from({ length: 5 }, (_, d) => `l${d}_${i % (5 + d)}`);
153
+ db.set('deep/' + segs.join('/'), i);
154
+ }
155
+ // spot-check: path for i=0 is consistent
156
+ const segs0 = Array.from({ length: 5 }, (_, d) => `l${d}_${0 % (5 + d)}`);
157
+ expect(db.get('deep/' + segs0.join('/'))).toBe(0);
158
+ db.close();
159
+ });
160
+
161
+ test('throughput: 500 depth-6 writes in < 2s', () => {
162
+ const db = makeDB();
163
+ const N = 500;
164
+ const t0 = performance.now();
165
+ for (let i = 0; i < N; i++) {
166
+ const segs = Array.from({ length: 6 }, (_, d) => `n${d}_${i % (8 + d)}`);
167
+ db.set('dp/' + segs.join('/'), i);
168
+ }
169
+ const elapsed = performance.now() - t0;
170
+ expect(elapsed).toBeLessThan(2000);
171
+ console.log(` 500 depth-6 writes: ${elapsed.toFixed(0)}ms`);
172
+ db.close();
173
+ });
174
+ });
175
+
176
+ describe('Stress: Subscription Fan-out', () => {
177
+ test('10 listeners all receive events', async () => {
178
+ const db = makeDB();
179
+ const received: number[] = Array(10).fill(0);
180
+ const unsubs = Array.from({ length: 10 }, (_, i) =>
181
+ db.on('fan/target', () => { received[i]++; })
182
+ );
183
+ db.set('fan/target', { v: 1 });
184
+ db.set('fan/target', { v: 2 });
185
+ db.set('fan/target', { v: 3 });
186
+ await Bun.sleep(20);
187
+ unsubs.forEach(u => u());
188
+ received.forEach(c => expect(c).toBeGreaterThanOrEqual(1));
189
+ db.close();
190
+ });
191
+
192
+ test('100 rapid writes to subscribed path do not drop or error', async () => {
193
+ const db = makeDB();
194
+ let count = 0;
195
+ const unsub = db.on('rapid/path', () => { count++; });
196
+ for (let i = 0; i < 100; i++) db.set('rapid/path', i);
197
+ await Bun.sleep(50);
198
+ unsub();
199
+ expect(count).toBeGreaterThanOrEqual(1);
200
+ db.close();
201
+ });
202
+ });
@@ -0,0 +1,86 @@
1
+ import { describe, test, expect } from 'bun:test';
2
+ import { BodDB } from '../src/server/BodDB.ts';
3
+
4
+ describe('Subscriptions', () => {
5
+ test('fires on exact path write', () => {
6
+ const db = new BodDB();
7
+ const events: unknown[] = [];
8
+ db.on('users/u1', (snap) => events.push(snap.val()));
9
+ db.set('users/u1', { name: 'Alice' });
10
+ expect(events).toEqual([{ name: 'Alice' }]);
11
+ db.close();
12
+ });
13
+
14
+ test('fires on ancestor when child changes', () => {
15
+ const db = new BodDB();
16
+ const events: unknown[] = [];
17
+ db.on('users', (snap) => events.push(snap.val()));
18
+ db.set('users/u1', { name: 'Alice' });
19
+ expect(events.length).toBe(1);
20
+ expect(events[0]).toEqual({ u1: { name: 'Alice' } });
21
+ db.close();
22
+ });
23
+
24
+ test('unsubscribe stops notifications', () => {
25
+ const db = new BodDB();
26
+ const events: unknown[] = [];
27
+ const off = db.on('users/u1', (snap) => events.push(snap.val()));
28
+ db.set('users/u1', { name: 'Alice' });
29
+ off();
30
+ db.set('users/u1', { name: 'Bob' });
31
+ expect(events.length).toBe(1);
32
+ db.close();
33
+ });
34
+
35
+ test('child event: added for new children', () => {
36
+ const db = new BodDB();
37
+ const events: Array<{ type: string; key: string }> = [];
38
+ db.onChild('users', (e) => events.push({ type: e.type, key: e.key }));
39
+ db.set('users/u1', { name: 'Alice' });
40
+ db.set('users/u2', { name: 'Bob' });
41
+ expect(events).toEqual([
42
+ { type: 'added', key: 'u1' },
43
+ { type: 'added', key: 'u2' },
44
+ ]);
45
+ db.close();
46
+ });
47
+
48
+ test('child event: changed for existing children', () => {
49
+ const db = new BodDB();
50
+ db.set('users/u1', { name: 'Alice' });
51
+ const events: Array<{ type: string; key: string }> = [];
52
+ db.onChild('users', (e) => events.push({ type: e.type, key: e.key }));
53
+ db.set('users/u1', { name: 'Bob' });
54
+ expect(events).toEqual([{ type: 'changed', key: 'u1' }]);
55
+ db.close();
56
+ });
57
+
58
+ test('child event: removed on delete', () => {
59
+ const db = new BodDB();
60
+ db.set('users/u1', { name: 'Alice' });
61
+ const events: Array<{ type: string; key: string }> = [];
62
+ db.onChild('users', (e) => events.push({ type: e.type, key: e.key }));
63
+ db.delete('users/u1');
64
+ expect(events).toEqual([{ type: 'removed', key: 'u1' }]);
65
+ db.close();
66
+ });
67
+
68
+ test('delete fires subscription with null', () => {
69
+ const db = new BodDB();
70
+ db.set('users/u1', { name: 'Alice' });
71
+ const events: unknown[] = [];
72
+ db.on('users/u1', (snap) => events.push(snap.val()));
73
+ db.delete('users/u1');
74
+ expect(events[0]).toBeNull();
75
+ db.close();
76
+ });
77
+
78
+ test('multi-path update fires deduped notifications', () => {
79
+ const db = new BodDB();
80
+ const events: string[] = [];
81
+ db.on('users', () => events.push('users'));
82
+ db.update({ 'users/u1/name': 'Alice', 'users/u2/name': 'Bob' });
83
+ expect(events.length).toBe(1);
84
+ db.close();
85
+ });
86
+ });