@bod.ee/db 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/settings.local.json +23 -0
- package/.claude/skills/config-file.md +54 -0
- package/.claude/skills/deploying-bod-db.md +29 -0
- package/.claude/skills/developing-bod-db.md +127 -0
- package/.claude/skills/using-bod-db.md +403 -0
- package/CLAUDE.md +110 -0
- package/README.md +252 -0
- package/admin/rules.ts +12 -0
- package/admin/server.ts +523 -0
- package/admin/ui.html +2281 -0
- package/cli.ts +177 -0
- package/client.ts +2 -0
- package/config.ts +20 -0
- package/deploy/.env.example +1 -0
- package/deploy/base.yaml +18 -0
- package/deploy/boddb-logs.yaml +10 -0
- package/deploy/boddb.yaml +10 -0
- package/deploy/demo.html +196 -0
- package/deploy/deploy.ts +32 -0
- package/deploy/prod-logs.config.ts +15 -0
- package/deploy/prod.config.ts +15 -0
- package/index.ts +20 -0
- package/mcp.ts +78 -0
- package/package.json +29 -0
- package/react.ts +1 -0
- package/src/client/BodClient.ts +515 -0
- package/src/react/hooks.ts +121 -0
- package/src/server/BodDB.ts +319 -0
- package/src/server/ExpressionRules.ts +250 -0
- package/src/server/FTSEngine.ts +76 -0
- package/src/server/FileAdapter.ts +116 -0
- package/src/server/MCPAdapter.ts +409 -0
- package/src/server/MQEngine.ts +286 -0
- package/src/server/QueryEngine.ts +45 -0
- package/src/server/RulesEngine.ts +108 -0
- package/src/server/StorageEngine.ts +464 -0
- package/src/server/StreamEngine.ts +320 -0
- package/src/server/SubscriptionEngine.ts +120 -0
- package/src/server/Transport.ts +479 -0
- package/src/server/VectorEngine.ts +115 -0
- package/src/shared/errors.ts +15 -0
- package/src/shared/pathUtils.ts +94 -0
- package/src/shared/protocol.ts +59 -0
- package/src/shared/transforms.ts +99 -0
- package/tests/batch.test.ts +60 -0
- package/tests/bench.ts +205 -0
- package/tests/e2e.test.ts +284 -0
- package/tests/expression-rules.test.ts +114 -0
- package/tests/file-adapter.test.ts +57 -0
- package/tests/fts.test.ts +58 -0
- package/tests/mq-flow.test.ts +204 -0
- package/tests/mq.test.ts +326 -0
- package/tests/push.test.ts +55 -0
- package/tests/query.test.ts +60 -0
- package/tests/rules.test.ts +78 -0
- package/tests/sse.test.ts +78 -0
- package/tests/storage.test.ts +199 -0
- package/tests/stream.test.ts +385 -0
- package/tests/stress.test.ts +202 -0
- package/tests/subscriptions.test.ts +86 -0
- package/tests/transforms.test.ts +92 -0
- package/tests/transport.test.ts +209 -0
- package/tests/ttl.test.ts +70 -0
- package/tests/vector.test.ts +69 -0
- package/tsconfig.json +27 -0
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
import { describe, test, expect } from 'bun:test';
|
|
2
|
+
import { BodDB } from '../src/server/BodDB.ts';
|
|
3
|
+
|
|
4
|
+
describe('MQ end-to-end flow', () => {
|
|
5
|
+
test('full worker lifecycle: push → fetch → process → ack/nack → retry → DLQ', () => {
|
|
6
|
+
const db = new BodDB({
|
|
7
|
+
path: ':memory:',
|
|
8
|
+
sweepInterval: 0,
|
|
9
|
+
mq: { visibilityTimeout: 2, maxDeliveries: 3 },
|
|
10
|
+
});
|
|
11
|
+
|
|
12
|
+
// ── Producer pushes 5 jobs ──
|
|
13
|
+
const keys: string[] = [];
|
|
14
|
+
for (let i = 1; i <= 5; i++) {
|
|
15
|
+
keys.push(db.mq.push('queues/jobs', { task: `job-${i}`, attempt: 0 }));
|
|
16
|
+
}
|
|
17
|
+
console.log(`\n📥 Pushed 5 jobs: ${keys.join(', ')}`);
|
|
18
|
+
|
|
19
|
+
let peek = db.mq.peek('queues/jobs', 10);
|
|
20
|
+
console.log(`📋 Peek: ${peek.length} messages, all ${peek.map(m => m.status).join(', ')}`);
|
|
21
|
+
expect(peek).toHaveLength(5);
|
|
22
|
+
expect(peek.every(m => m.status === 'pending')).toBe(true);
|
|
23
|
+
|
|
24
|
+
// ── Worker 1 fetches batch of 3 ──
|
|
25
|
+
const batch1 = db.mq.fetch('queues/jobs', 3);
|
|
26
|
+
console.log(`\n👷 Worker 1 claimed ${batch1.length} jobs: ${batch1.map(m => m.key).join(', ')}`);
|
|
27
|
+
expect(batch1).toHaveLength(3);
|
|
28
|
+
expect(batch1.every(m => m.status === 'inflight')).toBe(true);
|
|
29
|
+
expect(batch1.every(m => m.deliveryCount === 1)).toBe(true);
|
|
30
|
+
|
|
31
|
+
// ── Worker 2 fetches — gets remaining 2 ──
|
|
32
|
+
const batch2 = db.mq.fetch('queues/jobs', 5);
|
|
33
|
+
console.log(`👷 Worker 2 claimed ${batch2.length} jobs: ${batch2.map(m => m.key).join(', ')}`);
|
|
34
|
+
expect(batch2).toHaveLength(2);
|
|
35
|
+
|
|
36
|
+
// ── Worker 3 fetches — gets nothing (all inflight) ──
|
|
37
|
+
const batch3 = db.mq.fetch('queues/jobs', 5);
|
|
38
|
+
console.log(`👷 Worker 3 claimed ${batch3.length} jobs (none available)`);
|
|
39
|
+
expect(batch3).toHaveLength(0);
|
|
40
|
+
|
|
41
|
+
// ── Peek now shows all 5 as inflight ──
|
|
42
|
+
peek = db.mq.peek('queues/jobs', 10);
|
|
43
|
+
console.log(`📋 Peek: ${peek.map(m => `${m.key.slice(0,6)}=${m.status}`).join(', ')}`);
|
|
44
|
+
expect(peek.every(m => m.status === 'inflight')).toBe(true);
|
|
45
|
+
|
|
46
|
+
// ── Worker 1: ack job 1 (success), nack job 2 (transient error), ack job 3 ──
|
|
47
|
+
db.mq.ack('queues/jobs', batch1[0].key);
|
|
48
|
+
console.log(`\n✅ Worker 1 acked ${batch1[0].key.slice(0,8)} (job-1 done)`);
|
|
49
|
+
|
|
50
|
+
db.mq.nack('queues/jobs', batch1[1].key);
|
|
51
|
+
console.log(`🔄 Worker 1 nacked ${batch1[1].key.slice(0,8)} (job-2 retry)`);
|
|
52
|
+
|
|
53
|
+
db.mq.ack('queues/jobs', batch1[2].key);
|
|
54
|
+
console.log(`✅ Worker 1 acked ${batch1[2].key.slice(0,8)} (job-3 done)`);
|
|
55
|
+
|
|
56
|
+
// ── Worker 2: ack both ──
|
|
57
|
+
db.mq.ack('queues/jobs', batch2[0].key);
|
|
58
|
+
db.mq.ack('queues/jobs', batch2[1].key);
|
|
59
|
+
console.log(`✅ Worker 2 acked both (job-4, job-5 done)`);
|
|
60
|
+
|
|
61
|
+
// ── Peek: only job-2 remains (pending after nack) ──
|
|
62
|
+
peek = db.mq.peek('queues/jobs', 10);
|
|
63
|
+
console.log(`\n📋 Peek after processing: ${peek.length} remaining`);
|
|
64
|
+
expect(peek).toHaveLength(1);
|
|
65
|
+
expect(peek[0].key).toBe(batch1[1].key);
|
|
66
|
+
expect(peek[0].status).toBe('pending');
|
|
67
|
+
expect(peek[0].deliveryCount).toBe(1);
|
|
68
|
+
|
|
69
|
+
// ── Retry: Worker fetches job-2 again, fails again ──
|
|
70
|
+
const retry1 = db.mq.fetch('queues/jobs', 1);
|
|
71
|
+
expect(retry1).toHaveLength(1);
|
|
72
|
+
expect(retry1[0].deliveryCount).toBe(2);
|
|
73
|
+
console.log(`\n🔄 Retry 1: job-2 deliveryCount=${retry1[0].deliveryCount}`);
|
|
74
|
+
db.mq.nack('queues/jobs', retry1[0].key);
|
|
75
|
+
|
|
76
|
+
const retry2 = db.mq.fetch('queues/jobs', 1);
|
|
77
|
+
expect(retry2).toHaveLength(1);
|
|
78
|
+
expect(retry2[0].deliveryCount).toBe(3);
|
|
79
|
+
console.log(`🔄 Retry 2: job-2 deliveryCount=${retry2[0].deliveryCount}`);
|
|
80
|
+
|
|
81
|
+
// Worker doesn't ack or nack — simulates crash. Message stays inflight.
|
|
82
|
+
// Expire it manually (simulating visibility timeout)
|
|
83
|
+
db.storage.db.run(`UPDATE nodes SET mq_inflight_until = 1 WHERE mq_status = 'inflight'`);
|
|
84
|
+
|
|
85
|
+
// ── Sweep: deliveryCount=3 >= maxDeliveries=3 → DLQ ──
|
|
86
|
+
const sweepResult = db.mq.sweep();
|
|
87
|
+
console.log(`\n🧹 Sweep: reclaimed=${sweepResult.reclaimed}, dlq'd=${sweepResult.dlqd}`);
|
|
88
|
+
expect(sweepResult.reclaimed).toBe(0);
|
|
89
|
+
expect(sweepResult.dlqd).toBe(1);
|
|
90
|
+
|
|
91
|
+
// ── Queue is now empty ──
|
|
92
|
+
peek = db.mq.peek('queues/jobs', 10);
|
|
93
|
+
console.log(`📋 Queue after sweep: ${peek.length} messages`);
|
|
94
|
+
expect(peek).toHaveLength(0);
|
|
95
|
+
|
|
96
|
+
// ── DLQ has the failed job ──
|
|
97
|
+
const dlq = db.mq.dlq('queues/jobs');
|
|
98
|
+
console.log(`💀 DLQ: ${dlq.length} dead letters`);
|
|
99
|
+
expect(dlq).toHaveLength(1);
|
|
100
|
+
expect((dlq[0].data as any).task).toBe('job-2');
|
|
101
|
+
|
|
102
|
+
// ── Verify isolation: normal CRUD unaffected ──
|
|
103
|
+
expect(db.get('queues/jobs')).toBeNull();
|
|
104
|
+
db.set('queues/jobs/metadata', { created: Date.now() });
|
|
105
|
+
expect(db.get('queues/jobs/metadata')).toBeTruthy();
|
|
106
|
+
// DLQ still there
|
|
107
|
+
expect(db.mq.dlq('queues/jobs')).toHaveLength(1);
|
|
108
|
+
|
|
109
|
+
console.log(`\n🎉 Full flow complete — 5 pushed, 4 processed, 1 DLQ'd, CRUD isolated`);
|
|
110
|
+
db.close();
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
test('competing workers with visibility timeout reclaim', () => {
|
|
114
|
+
const db = new BodDB({
|
|
115
|
+
path: ':memory:',
|
|
116
|
+
sweepInterval: 0,
|
|
117
|
+
mq: { visibilityTimeout: 1, maxDeliveries: 10 },
|
|
118
|
+
});
|
|
119
|
+
|
|
120
|
+
// Push 3 jobs
|
|
121
|
+
db.mq.push('q/work', { id: 'a' });
|
|
122
|
+
db.mq.push('q/work', { id: 'b' });
|
|
123
|
+
db.mq.push('q/work', { id: 'c' });
|
|
124
|
+
|
|
125
|
+
// Worker 1 fetches all 3
|
|
126
|
+
const w1 = db.mq.fetch('q/work', 10);
|
|
127
|
+
expect(w1).toHaveLength(3);
|
|
128
|
+
console.log(`\n👷 W1 claimed 3`);
|
|
129
|
+
|
|
130
|
+
// Worker 2 gets nothing
|
|
131
|
+
expect(db.mq.fetch('q/work', 10)).toHaveLength(0);
|
|
132
|
+
console.log(`👷 W2 gets nothing (all inflight)`);
|
|
133
|
+
|
|
134
|
+
// Worker 1 crashes — expire all
|
|
135
|
+
db.storage.db.run(`UPDATE nodes SET mq_inflight_until = 1 WHERE mq_status = 'inflight'`);
|
|
136
|
+
|
|
137
|
+
// Sweep reclaims
|
|
138
|
+
const result = db.mq.sweep();
|
|
139
|
+
expect(result.reclaimed).toBe(3);
|
|
140
|
+
console.log(`🧹 Sweep reclaimed ${result.reclaimed}`);
|
|
141
|
+
|
|
142
|
+
// Worker 2 now gets all 3
|
|
143
|
+
const w2 = db.mq.fetch('q/work', 10);
|
|
144
|
+
expect(w2).toHaveLength(3);
|
|
145
|
+
expect(w2.every(m => m.deliveryCount === 2)).toBe(true);
|
|
146
|
+
console.log(`👷 W2 claimed ${w2.length}, all deliveryCount=2`);
|
|
147
|
+
|
|
148
|
+
// Ack all
|
|
149
|
+
for (const m of w2) db.mq.ack('q/work', m.key);
|
|
150
|
+
expect(db.mq.peek('q/work')).toHaveLength(0);
|
|
151
|
+
console.log(`✅ All acked, queue empty`);
|
|
152
|
+
|
|
153
|
+
db.close();
|
|
154
|
+
});
|
|
155
|
+
|
|
156
|
+
test('idempotent push + purge flow', () => {
|
|
157
|
+
const db = new BodDB({ path: ':memory:', sweepInterval: 0 });
|
|
158
|
+
|
|
159
|
+
// Idempotent push: same key → same message
|
|
160
|
+
const k1 = db.mq.push('q/jobs', { type: 'email' }, { idempotencyKey: 'email-123' });
|
|
161
|
+
const k2 = db.mq.push('q/jobs', { type: 'email-v2' }, { idempotencyKey: 'email-123' });
|
|
162
|
+
expect(k1).toBe(k2);
|
|
163
|
+
expect(db.mq.peek('q/jobs')).toHaveLength(1);
|
|
164
|
+
console.log(`\n🔑 Idempotent push: same key returned`);
|
|
165
|
+
|
|
166
|
+
// Different key → new message
|
|
167
|
+
const k3 = db.mq.push('q/jobs', { type: 'sms' }, { idempotencyKey: 'sms-456' });
|
|
168
|
+
expect(k3).not.toBe(k1);
|
|
169
|
+
expect(db.mq.peek('q/jobs')).toHaveLength(2);
|
|
170
|
+
|
|
171
|
+
// Fetch one, purge rest
|
|
172
|
+
const fetched = db.mq.fetch('q/jobs', 1);
|
|
173
|
+
const purged = db.mq.purge('q/jobs');
|
|
174
|
+
expect(purged).toBe(1); // only pending one purged
|
|
175
|
+
console.log(`🗑️ Purged ${purged} pending, ${fetched.length} still inflight`);
|
|
176
|
+
|
|
177
|
+
// Inflight one still visible in peek
|
|
178
|
+
const remaining = db.mq.peek('q/jobs');
|
|
179
|
+
expect(remaining).toHaveLength(1);
|
|
180
|
+
expect(remaining[0].status).toBe('inflight');
|
|
181
|
+
|
|
182
|
+
db.close();
|
|
183
|
+
});
|
|
184
|
+
|
|
185
|
+
test('subscription notifications during MQ operations', () => {
|
|
186
|
+
const db = new BodDB({ path: ':memory:', sweepInterval: 0 });
|
|
187
|
+
const events: string[] = [];
|
|
188
|
+
|
|
189
|
+
// Subscribe to queue path via child subscription
|
|
190
|
+
const unsub = db.onChild('queues', (ev) => {
|
|
191
|
+
events.push(`${ev.type}:${ev.key}`);
|
|
192
|
+
});
|
|
193
|
+
|
|
194
|
+
const key = db.mq.push('queues/jobs', { n: 1 });
|
|
195
|
+
// Push should trigger child added on 'queues'
|
|
196
|
+
console.log(`\n📡 Events after push: ${JSON.stringify(events)}`);
|
|
197
|
+
|
|
198
|
+
db.mq.ack('queues/jobs', key);
|
|
199
|
+
console.log(`📡 Events after ack: ${JSON.stringify(events)}`);
|
|
200
|
+
|
|
201
|
+
unsub();
|
|
202
|
+
db.close();
|
|
203
|
+
});
|
|
204
|
+
});
|
package/tests/mq.test.ts
ADDED
|
@@ -0,0 +1,326 @@
|
|
|
1
|
+
import { describe, test, expect, beforeEach } from 'bun:test';
|
|
2
|
+
import { BodDB } from '../src/server/BodDB.ts';
|
|
3
|
+
|
|
4
|
+
let db: BodDB;
|
|
5
|
+
|
|
6
|
+
beforeEach(() => {
|
|
7
|
+
db = new BodDB({ path: ':memory:', sweepInterval: 0 });
|
|
8
|
+
});
|
|
9
|
+
|
|
10
|
+
describe('MQEngine', () => {
|
|
11
|
+
test('push + fetch claims exactly one message', () => {
|
|
12
|
+
db.mq.push('queues/jobs', { type: 'email' });
|
|
13
|
+
db.mq.push('queues/jobs', { type: 'sms' });
|
|
14
|
+
|
|
15
|
+
const msgs = db.mq.fetch('queues/jobs', 1);
|
|
16
|
+
expect(msgs).toHaveLength(1);
|
|
17
|
+
expect(msgs[0].data).toEqual({ type: 'email' });
|
|
18
|
+
expect(msgs[0].deliveryCount).toBe(1);
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
test('fetch returns multiple messages', () => {
|
|
22
|
+
db.mq.push('queues/jobs', { n: 1 });
|
|
23
|
+
db.mq.push('queues/jobs', { n: 2 });
|
|
24
|
+
db.mq.push('queues/jobs', { n: 3 });
|
|
25
|
+
|
|
26
|
+
const msgs = db.mq.fetch('queues/jobs', 5);
|
|
27
|
+
expect(msgs).toHaveLength(3);
|
|
28
|
+
});
|
|
29
|
+
|
|
30
|
+
test('fetched messages are not re-fetched (inflight)', () => {
|
|
31
|
+
db.mq.push('queues/jobs', { n: 1 });
|
|
32
|
+
const first = db.mq.fetch('queues/jobs', 1);
|
|
33
|
+
expect(first).toHaveLength(1);
|
|
34
|
+
|
|
35
|
+
const second = db.mq.fetch('queues/jobs', 1);
|
|
36
|
+
expect(second).toHaveLength(0);
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
test('ack deletes the row', () => {
|
|
40
|
+
db.mq.push('queues/jobs', { n: 1 });
|
|
41
|
+
const msgs = db.mq.fetch('queues/jobs', 1);
|
|
42
|
+
db.mq.ack('queues/jobs', msgs[0].key);
|
|
43
|
+
|
|
44
|
+
// Should be gone — peek returns nothing
|
|
45
|
+
const peeked = db.mq.peek('queues/jobs');
|
|
46
|
+
expect(peeked).toHaveLength(0);
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
test('nack releases to pending (re-fetchable)', () => {
|
|
50
|
+
db.mq.push('queues/jobs', { n: 1 });
|
|
51
|
+
const msgs = db.mq.fetch('queues/jobs', 1);
|
|
52
|
+
db.mq.nack('queues/jobs', msgs[0].key);
|
|
53
|
+
|
|
54
|
+
const refetched = db.mq.fetch('queues/jobs', 1);
|
|
55
|
+
expect(refetched).toHaveLength(1);
|
|
56
|
+
expect(refetched[0].key).toBe(msgs[0].key);
|
|
57
|
+
expect(refetched[0].deliveryCount).toBe(2);
|
|
58
|
+
});
|
|
59
|
+
|
|
60
|
+
test('peek does not claim messages', () => {
|
|
61
|
+
db.mq.push('queues/jobs', { n: 1 });
|
|
62
|
+
const peeked = db.mq.peek('queues/jobs', 10);
|
|
63
|
+
expect(peeked).toHaveLength(1);
|
|
64
|
+
|
|
65
|
+
// Should still be fetchable
|
|
66
|
+
const fetched = db.mq.fetch('queues/jobs', 1);
|
|
67
|
+
expect(fetched).toHaveLength(1);
|
|
68
|
+
expect(fetched[0].key).toBe(peeked[0].key);
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
test('visibility timeout reclaim via sweep', () => {
|
|
72
|
+
db.mq.push('queues/jobs', { n: 1 });
|
|
73
|
+
db.mq.fetch('queues/jobs', 1);
|
|
74
|
+
|
|
75
|
+
// Simulate expired inflight by directly updating
|
|
76
|
+
db.storage.db.run(`UPDATE nodes SET mq_inflight_until = 1 WHERE mq_status = 'inflight'`);
|
|
77
|
+
|
|
78
|
+
const result = db.mq.sweep();
|
|
79
|
+
expect(result.reclaimed).toBe(1);
|
|
80
|
+
expect(result.dlqd).toBe(0);
|
|
81
|
+
|
|
82
|
+
// Should be fetchable again
|
|
83
|
+
const msgs = db.mq.fetch('queues/jobs', 1);
|
|
84
|
+
expect(msgs).toHaveLength(1);
|
|
85
|
+
});
|
|
86
|
+
|
|
87
|
+
test('maxDeliveries exhausted → DLQ', () => {
|
|
88
|
+
const mqDb = new BodDB({ path: ':memory:', sweepInterval: 0, mq: { maxDeliveries: 2, visibilityTimeout: 1 } });
|
|
89
|
+
mqDb.mq.push('queues/jobs', { n: 1 });
|
|
90
|
+
|
|
91
|
+
// Fetch twice (exhaust deliveries)
|
|
92
|
+
for (let i = 0; i < 2; i++) {
|
|
93
|
+
mqDb.mq.fetch('queues/jobs', 1);
|
|
94
|
+
// Expire the inflight
|
|
95
|
+
mqDb.storage.db.run(`UPDATE nodes SET mq_inflight_until = 1 WHERE mq_status = 'inflight'`);
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// Sweep should move to DLQ
|
|
99
|
+
const result = mqDb.mq.sweep();
|
|
100
|
+
expect(result.dlqd).toBe(1);
|
|
101
|
+
expect(result.reclaimed).toBe(0);
|
|
102
|
+
|
|
103
|
+
// Check DLQ
|
|
104
|
+
const dlqMsgs = mqDb.mq.dlq('queues/jobs');
|
|
105
|
+
expect(dlqMsgs).toHaveLength(1);
|
|
106
|
+
expect(dlqMsgs[0].data).toEqual({ n: 1 });
|
|
107
|
+
|
|
108
|
+
// Original queue should be empty
|
|
109
|
+
const remaining = mqDb.mq.peek('queues/jobs');
|
|
110
|
+
expect(remaining).toHaveLength(0);
|
|
111
|
+
|
|
112
|
+
mqDb.close();
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
test('purge removes all pending', () => {
|
|
116
|
+
db.mq.push('queues/jobs', { n: 1 });
|
|
117
|
+
db.mq.push('queues/jobs', { n: 2 });
|
|
118
|
+
db.mq.push('queues/jobs', { n: 3 });
|
|
119
|
+
|
|
120
|
+
const count = db.mq.purge('queues/jobs');
|
|
121
|
+
expect(count).toBe(3);
|
|
122
|
+
|
|
123
|
+
const remaining = db.mq.peek('queues/jobs');
|
|
124
|
+
expect(remaining).toHaveLength(0);
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
test('purge does not remove inflight messages', () => {
|
|
128
|
+
db.mq.push('queues/jobs', { n: 1 });
|
|
129
|
+
db.mq.push('queues/jobs', { n: 2 });
|
|
130
|
+
db.mq.fetch('queues/jobs', 1); // claim one
|
|
131
|
+
|
|
132
|
+
const count = db.mq.purge('queues/jobs');
|
|
133
|
+
expect(count).toBe(1); // only the pending one
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
test('idempotent push dedup', () => {
|
|
137
|
+
const k1 = db.mq.push('queues/jobs', { n: 1 }, { idempotencyKey: 'job-1' });
|
|
138
|
+
const k2 = db.mq.push('queues/jobs', { n: 2 }, { idempotencyKey: 'job-1' });
|
|
139
|
+
expect(k1).toBe(k2);
|
|
140
|
+
|
|
141
|
+
const msgs = db.mq.peek('queues/jobs');
|
|
142
|
+
expect(msgs).toHaveLength(1);
|
|
143
|
+
expect(msgs[0].data).toEqual({ n: 1 });
|
|
144
|
+
});
|
|
145
|
+
|
|
146
|
+
test('push returns key', () => {
|
|
147
|
+
const key = db.mq.push('queues/jobs', { n: 1 });
|
|
148
|
+
expect(typeof key).toBe('string');
|
|
149
|
+
expect(key.length).toBeGreaterThan(0);
|
|
150
|
+
});
|
|
151
|
+
|
|
152
|
+
test('separate queues are independent', () => {
|
|
153
|
+
db.mq.push('queues/email', { to: 'alice' });
|
|
154
|
+
db.mq.push('queues/sms', { to: 'bob' });
|
|
155
|
+
|
|
156
|
+
const emails = db.mq.fetch('queues/email', 10);
|
|
157
|
+
expect(emails).toHaveLength(1);
|
|
158
|
+
expect(emails[0].data).toEqual({ to: 'alice' });
|
|
159
|
+
|
|
160
|
+
const sms = db.mq.fetch('queues/sms', 10);
|
|
161
|
+
expect(sms).toHaveLength(1);
|
|
162
|
+
expect(sms[0].data).toEqual({ to: 'bob' });
|
|
163
|
+
});
|
|
164
|
+
|
|
165
|
+
test('MQ rows invisible to get/exists/delete but visible in getShallow', () => {
|
|
166
|
+
db.mq.push('queues/jobs', { n: 1 });
|
|
167
|
+
expect(db.get('queues/jobs')).toBeNull();
|
|
168
|
+
expect(db.storage.exists('queues/jobs')).toBe(false);
|
|
169
|
+
// getShallow is a browse operation — should see MQ rows
|
|
170
|
+
const shallow = db.getShallow('queues');
|
|
171
|
+
expect(shallow.length).toBeGreaterThan(0);
|
|
172
|
+
});
|
|
173
|
+
|
|
174
|
+
test('empty queue fetch returns empty array', () => {
|
|
175
|
+
const msgs = db.mq.fetch('queues/empty', 5);
|
|
176
|
+
expect(msgs).toHaveLength(0);
|
|
177
|
+
});
|
|
178
|
+
|
|
179
|
+
test('double ack is safe', () => {
|
|
180
|
+
db.mq.push('queues/jobs', { n: 1 });
|
|
181
|
+
const msgs = db.mq.fetch('queues/jobs', 1);
|
|
182
|
+
db.mq.ack('queues/jobs', msgs[0].key);
|
|
183
|
+
db.mq.ack('queues/jobs', msgs[0].key); // should not throw
|
|
184
|
+
});
|
|
185
|
+
|
|
186
|
+
test('nack on non-existent key is safe', () => {
|
|
187
|
+
db.mq.nack('queues/jobs', 'nonexistent'); // should not throw
|
|
188
|
+
});
|
|
189
|
+
|
|
190
|
+
test('purge does not delete DLQ rows', () => {
|
|
191
|
+
const mqDb = new BodDB({ path: ':memory:', sweepInterval: 0, mq: { maxDeliveries: 1, visibilityTimeout: 1 } });
|
|
192
|
+
mqDb.mq.push('queues/jobs', { n: 1 });
|
|
193
|
+
mqDb.mq.fetch('queues/jobs', 1);
|
|
194
|
+
mqDb.storage.db.run(`UPDATE nodes SET mq_inflight_until = 1 WHERE mq_status = 'inflight'`);
|
|
195
|
+
mqDb.mq.sweep(); // moves to DLQ
|
|
196
|
+
|
|
197
|
+
const dlqBefore = mqDb.mq.dlq('queues/jobs');
|
|
198
|
+
expect(dlqBefore).toHaveLength(1);
|
|
199
|
+
|
|
200
|
+
mqDb.mq.purge('queues/jobs'); // should not touch DLQ
|
|
201
|
+
const dlqAfter = mqDb.mq.dlq('queues/jobs');
|
|
202
|
+
expect(dlqAfter).toHaveLength(1);
|
|
203
|
+
mqDb.close();
|
|
204
|
+
});
|
|
205
|
+
|
|
206
|
+
test('peek shows status change after fetch', () => {
|
|
207
|
+
db.mq.push('queues/jobs', { n: 1 });
|
|
208
|
+
|
|
209
|
+
const before = db.mq.peek('queues/jobs');
|
|
210
|
+
expect(before[0].status).toBe('pending');
|
|
211
|
+
|
|
212
|
+
db.mq.fetch('queues/jobs', 1);
|
|
213
|
+
|
|
214
|
+
const after = db.mq.peek('queues/jobs');
|
|
215
|
+
expect(after[0].status).toBe('inflight');
|
|
216
|
+
});
|
|
217
|
+
|
|
218
|
+
test('fetch status is always inflight', () => {
|
|
219
|
+
db.mq.push('queues/jobs', { n: 1 });
|
|
220
|
+
const msgs = db.mq.fetch('queues/jobs', 1);
|
|
221
|
+
expect(msgs[0].status).toBe('inflight');
|
|
222
|
+
});
|
|
223
|
+
|
|
224
|
+
test('idempotent push does NOT emit on duplicate', () => {
|
|
225
|
+
let notifyCount = 0;
|
|
226
|
+
const mqDb = new BodDB({ path: ':memory:', sweepInterval: 0, mq: {} });
|
|
227
|
+
(mqDb.mq as any).options.notify = () => { notifyCount++; };
|
|
228
|
+
|
|
229
|
+
mqDb.mq.push('queues/jobs', { n: 1 }, { idempotencyKey: 'job-1' });
|
|
230
|
+
expect(notifyCount).toBe(1);
|
|
231
|
+
|
|
232
|
+
mqDb.mq.push('queues/jobs', { n: 2 }, { idempotencyKey: 'job-1' });
|
|
233
|
+
// Duplicate returns early before emit — correct behavior
|
|
234
|
+
expect(notifyCount).toBe(1);
|
|
235
|
+
mqDb.close();
|
|
236
|
+
});
|
|
237
|
+
|
|
238
|
+
test('normal db.delete does not delete MQ rows', () => {
|
|
239
|
+
db.mq.push('queues/jobs', { n: 1 });
|
|
240
|
+
db.delete('queues/jobs');
|
|
241
|
+
|
|
242
|
+
// MQ row should survive
|
|
243
|
+
const peeked = db.mq.peek('queues/jobs');
|
|
244
|
+
expect(peeked).toHaveLength(1);
|
|
245
|
+
});
|
|
246
|
+
|
|
247
|
+
test('normal db.set at queue path does not clobber MQ rows', () => {
|
|
248
|
+
db.mq.push('queues/jobs', { n: 1 });
|
|
249
|
+
db.set('queues/jobs/config', 'test'); // regular data at same prefix
|
|
250
|
+
|
|
251
|
+
// MQ row should survive
|
|
252
|
+
const peeked = db.mq.peek('queues/jobs');
|
|
253
|
+
expect(peeked).toHaveLength(1);
|
|
254
|
+
// Regular data should also exist
|
|
255
|
+
expect(db.get('queues/jobs/config')).toBe('test');
|
|
256
|
+
});
|
|
257
|
+
|
|
258
|
+
test('per-queue options override', () => {
|
|
259
|
+
const mqDb = new BodDB({
|
|
260
|
+
path: ':memory:', sweepInterval: 0,
|
|
261
|
+
mq: { visibilityTimeout: 10, maxDeliveries: 5, queues: { 'queues/critical': { maxDeliveries: 1 } } },
|
|
262
|
+
});
|
|
263
|
+
|
|
264
|
+
mqDb.mq.push('queues/critical', { n: 1 });
|
|
265
|
+
mqDb.mq.fetch('queues/critical', 1);
|
|
266
|
+
mqDb.storage.db.run(`UPDATE nodes SET mq_inflight_until = 1 WHERE mq_status = 'inflight'`);
|
|
267
|
+
|
|
268
|
+
const result = mqDb.mq.sweep();
|
|
269
|
+
// maxDeliveries=1, deliveryCount=1 after fetch → should DLQ
|
|
270
|
+
expect(result.dlqd).toBe(1);
|
|
271
|
+
|
|
272
|
+
mqDb.close();
|
|
273
|
+
});
|
|
274
|
+
|
|
275
|
+
test('DLQ path construction with nested queue', () => {
|
|
276
|
+
const mqDb = new BodDB({ path: ':memory:', sweepInterval: 0, mq: { maxDeliveries: 1 } });
|
|
277
|
+
mqDb.mq.push('queues/email/high', { n: 1 });
|
|
278
|
+
mqDb.mq.fetch('queues/email/high', 1);
|
|
279
|
+
mqDb.storage.db.run(`UPDATE nodes SET mq_inflight_until = 1 WHERE mq_status = 'inflight'`);
|
|
280
|
+
mqDb.mq.sweep();
|
|
281
|
+
|
|
282
|
+
// DLQ should be at queues/email/high/_dlq/<key>
|
|
283
|
+
const dlq = mqDb.mq.dlq('queues/email/high');
|
|
284
|
+
expect(dlq).toHaveLength(1);
|
|
285
|
+
expect(dlq[0].path).toContain('queues/email/high/_dlq/');
|
|
286
|
+
|
|
287
|
+
mqDb.close();
|
|
288
|
+
});
|
|
289
|
+
|
|
290
|
+
test('notify fires on push, ack, nack, purge, sweep', () => {
|
|
291
|
+
const events: string[][] = [];
|
|
292
|
+
const mqDb = new BodDB({ path: ':memory:', sweepInterval: 0, mq: { visibilityTimeout: 1, maxDeliveries: 1 } });
|
|
293
|
+
(mqDb.mq as any).options.notify = (paths: string[]) => { events.push(paths); };
|
|
294
|
+
|
|
295
|
+
mqDb.mq.push('q/a', { n: 1 });
|
|
296
|
+
expect(events.length).toBe(1); // push
|
|
297
|
+
|
|
298
|
+
mqDb.mq.push('q/a', { n: 2 });
|
|
299
|
+
expect(events.length).toBe(2); // push
|
|
300
|
+
|
|
301
|
+
const msgs = mqDb.mq.fetch('q/a', 2);
|
|
302
|
+
// fetch does NOT notify (status change only, not visible to subs)
|
|
303
|
+
|
|
304
|
+
mqDb.mq.nack('q/a', msgs[0].key);
|
|
305
|
+
expect(events.length).toBe(3); // nack
|
|
306
|
+
|
|
307
|
+
mqDb.mq.ack('q/a', msgs[1].key);
|
|
308
|
+
expect(events.length).toBe(4); // ack
|
|
309
|
+
|
|
310
|
+
mqDb.mq.purge('q/a');
|
|
311
|
+
expect(events.length).toBe(5); // purge
|
|
312
|
+
|
|
313
|
+
mqDb.close();
|
|
314
|
+
});
|
|
315
|
+
|
|
316
|
+
test('sweep integrated into BodDB.sweep()', () => {
|
|
317
|
+
db.mq.push('queues/jobs', { n: 1 });
|
|
318
|
+
db.mq.fetch('queues/jobs', 1);
|
|
319
|
+
db.storage.db.run(`UPDATE nodes SET mq_inflight_until = 1 WHERE mq_status = 'inflight'`);
|
|
320
|
+
|
|
321
|
+
db.sweep(); // should call mq.sweep() internally
|
|
322
|
+
|
|
323
|
+
const msgs = db.mq.fetch('queues/jobs', 1);
|
|
324
|
+
expect(msgs).toHaveLength(1);
|
|
325
|
+
});
|
|
326
|
+
});
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import { describe, test, expect } from 'bun:test';
|
|
2
|
+
import { BodDB } from '../src/server/BodDB.ts';
|
|
3
|
+
import { generatePushId } from '../src/server/StorageEngine.ts';
|
|
4
|
+
|
|
5
|
+
describe('Push & Append-Only', () => {
|
|
6
|
+
test('generatePushId produces time-sortable IDs', () => {
|
|
7
|
+
const ids: string[] = [];
|
|
8
|
+
for (let i = 0; i < 10; i++) {
|
|
9
|
+
ids.push(generatePushId());
|
|
10
|
+
}
|
|
11
|
+
// Should be in sorted order
|
|
12
|
+
const sorted = [...ids].sort();
|
|
13
|
+
expect(ids).toEqual(sorted);
|
|
14
|
+
});
|
|
15
|
+
|
|
16
|
+
test('generatePushId produces unique IDs', () => {
|
|
17
|
+
const ids = new Set<string>();
|
|
18
|
+
for (let i = 0; i < 100; i++) {
|
|
19
|
+
ids.add(generatePushId());
|
|
20
|
+
}
|
|
21
|
+
expect(ids.size).toBe(100);
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
test('db.push stores value as single JSON row', () => {
|
|
25
|
+
const db = new BodDB({ sweepInterval: 0 });
|
|
26
|
+
const key = db.push('messages', { text: 'hello', ts: 123 });
|
|
27
|
+
expect(typeof key).toBe('string');
|
|
28
|
+
expect(key.length).toBe(12); // 8 time + 4 random
|
|
29
|
+
|
|
30
|
+
const val = db.get(`messages/${key}`);
|
|
31
|
+
expect(val).toEqual({ text: 'hello', ts: 123 });
|
|
32
|
+
db.close();
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
test('push entries appear in queries', () => {
|
|
36
|
+
const db = new BodDB({ sweepInterval: 0 });
|
|
37
|
+
db.push('logs', { level: 'info', msg: 'start' });
|
|
38
|
+
db.push('logs', { level: 'error', msg: 'fail' });
|
|
39
|
+
db.push('logs', { level: 'info', msg: 'end' });
|
|
40
|
+
|
|
41
|
+
const all = db.query('logs').get();
|
|
42
|
+
expect(all.length).toBe(3);
|
|
43
|
+
db.close();
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
test('push triggers subscriptions', () => {
|
|
47
|
+
const db = new BodDB({ sweepInterval: 0 });
|
|
48
|
+
const events: string[] = [];
|
|
49
|
+
db.onChild('messages', (e) => events.push(e.type));
|
|
50
|
+
|
|
51
|
+
db.push('messages', { text: 'hello' });
|
|
52
|
+
expect(events).toEqual(['added']);
|
|
53
|
+
db.close();
|
|
54
|
+
});
|
|
55
|
+
});
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import { describe, test, expect } from 'bun:test';
|
|
2
|
+
import { BodDB } from '../src/server/BodDB.ts';
|
|
3
|
+
|
|
4
|
+
describe('QueryEngine (via BodDB)', () => {
|
|
5
|
+
test('fluent query with where + order + limit', () => {
|
|
6
|
+
const db = new BodDB();
|
|
7
|
+
db.set('users/u1', { name: 'Charlie', role: 'admin' });
|
|
8
|
+
db.set('users/u2', { name: 'Alice', role: 'user' });
|
|
9
|
+
db.set('users/u3', { name: 'Bob', role: 'admin' });
|
|
10
|
+
|
|
11
|
+
const result = db.query('users')
|
|
12
|
+
.where('role', '==', 'admin')
|
|
13
|
+
.order('name')
|
|
14
|
+
.limit(1)
|
|
15
|
+
.get();
|
|
16
|
+
|
|
17
|
+
expect(result.length).toBe(1);
|
|
18
|
+
expect(result[0].name).toBe('Bob');
|
|
19
|
+
db.close();
|
|
20
|
+
});
|
|
21
|
+
|
|
22
|
+
test('query with desc ordering', () => {
|
|
23
|
+
const db = new BodDB();
|
|
24
|
+
db.set('posts/p1', { title: 'A', score: 10 });
|
|
25
|
+
db.set('posts/p2', { title: 'B', score: 50 });
|
|
26
|
+
db.set('posts/p3', { title: 'C', score: 30 });
|
|
27
|
+
|
|
28
|
+
const result = db.query('posts').order('score', 'desc').get();
|
|
29
|
+
expect(result.map(r => r.title)).toEqual(['B', 'C', 'A']);
|
|
30
|
+
db.close();
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
test('query with offset', () => {
|
|
34
|
+
const db = new BodDB();
|
|
35
|
+
db.set('items/a', { v: 1 });
|
|
36
|
+
db.set('items/b', { v: 2 });
|
|
37
|
+
db.set('items/c', { v: 3 });
|
|
38
|
+
|
|
39
|
+
const result = db.query('items').order('v').offset(1).limit(1).get();
|
|
40
|
+
expect(result.length).toBe(1);
|
|
41
|
+
expect(result[0].v).toBe(2);
|
|
42
|
+
db.close();
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
test('multiple filters', () => {
|
|
46
|
+
const db = new BodDB();
|
|
47
|
+
db.set('users/u1', { age: 25, role: 'admin' });
|
|
48
|
+
db.set('users/u2', { age: 30, role: 'admin' });
|
|
49
|
+
db.set('users/u3', { age: 35, role: 'user' });
|
|
50
|
+
|
|
51
|
+
const result = db.query('users')
|
|
52
|
+
.where('role', '==', 'admin')
|
|
53
|
+
.where('age', '>=', 30)
|
|
54
|
+
.get();
|
|
55
|
+
|
|
56
|
+
expect(result.length).toBe(1);
|
|
57
|
+
expect(result[0]._key).toBe('u2');
|
|
58
|
+
db.close();
|
|
59
|
+
});
|
|
60
|
+
});
|