@bod.ee/db 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/settings.local.json +23 -0
- package/.claude/skills/config-file.md +54 -0
- package/.claude/skills/deploying-bod-db.md +29 -0
- package/.claude/skills/developing-bod-db.md +127 -0
- package/.claude/skills/using-bod-db.md +403 -0
- package/CLAUDE.md +110 -0
- package/README.md +252 -0
- package/admin/rules.ts +12 -0
- package/admin/server.ts +523 -0
- package/admin/ui.html +2281 -0
- package/cli.ts +177 -0
- package/client.ts +2 -0
- package/config.ts +20 -0
- package/deploy/.env.example +1 -0
- package/deploy/base.yaml +18 -0
- package/deploy/boddb-logs.yaml +10 -0
- package/deploy/boddb.yaml +10 -0
- package/deploy/demo.html +196 -0
- package/deploy/deploy.ts +32 -0
- package/deploy/prod-logs.config.ts +15 -0
- package/deploy/prod.config.ts +15 -0
- package/index.ts +20 -0
- package/mcp.ts +78 -0
- package/package.json +29 -0
- package/react.ts +1 -0
- package/src/client/BodClient.ts +515 -0
- package/src/react/hooks.ts +121 -0
- package/src/server/BodDB.ts +319 -0
- package/src/server/ExpressionRules.ts +250 -0
- package/src/server/FTSEngine.ts +76 -0
- package/src/server/FileAdapter.ts +116 -0
- package/src/server/MCPAdapter.ts +409 -0
- package/src/server/MQEngine.ts +286 -0
- package/src/server/QueryEngine.ts +45 -0
- package/src/server/RulesEngine.ts +108 -0
- package/src/server/StorageEngine.ts +464 -0
- package/src/server/StreamEngine.ts +320 -0
- package/src/server/SubscriptionEngine.ts +120 -0
- package/src/server/Transport.ts +479 -0
- package/src/server/VectorEngine.ts +115 -0
- package/src/shared/errors.ts +15 -0
- package/src/shared/pathUtils.ts +94 -0
- package/src/shared/protocol.ts +59 -0
- package/src/shared/transforms.ts +99 -0
- package/tests/batch.test.ts +60 -0
- package/tests/bench.ts +205 -0
- package/tests/e2e.test.ts +284 -0
- package/tests/expression-rules.test.ts +114 -0
- package/tests/file-adapter.test.ts +57 -0
- package/tests/fts.test.ts +58 -0
- package/tests/mq-flow.test.ts +204 -0
- package/tests/mq.test.ts +326 -0
- package/tests/push.test.ts +55 -0
- package/tests/query.test.ts +60 -0
- package/tests/rules.test.ts +78 -0
- package/tests/sse.test.ts +78 -0
- package/tests/storage.test.ts +199 -0
- package/tests/stream.test.ts +385 -0
- package/tests/stress.test.ts +202 -0
- package/tests/subscriptions.test.ts +86 -0
- package/tests/transforms.test.ts +92 -0
- package/tests/transport.test.ts +209 -0
- package/tests/ttl.test.ts +70 -0
- package/tests/vector.test.ts +69 -0
- package/tsconfig.json +27 -0
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
{
|
|
2
|
+
"permissions": {
|
|
3
|
+
"allow": [
|
|
4
|
+
"Bash(bun test:*)",
|
|
5
|
+
"Bash(grep:*)",
|
|
6
|
+
"Bash(grep error::*)",
|
|
7
|
+
"Skill(git-commit)",
|
|
8
|
+
"Bash(bun run:*)",
|
|
9
|
+
"Bash(cat:*)",
|
|
10
|
+
"Bash(bun:*)",
|
|
11
|
+
"Bash(timeout 3 PORT=4401 bun run:*)",
|
|
12
|
+
"Bash(timeout 3 env PORT=4401:*)",
|
|
13
|
+
"Bash(timeout 3 env PORT=4402:*)",
|
|
14
|
+
"Bash(timeout 3 env PORT=4405:*)",
|
|
15
|
+
"Bash(git status:*)",
|
|
16
|
+
"Bash(git add:*)",
|
|
17
|
+
"Bash(cd:*)",
|
|
18
|
+
"Bash(bod-db:*)",
|
|
19
|
+
"Bash(ls:*)",
|
|
20
|
+
"Bash(ssh:*)"
|
|
21
|
+
]
|
|
22
|
+
}
|
|
23
|
+
}
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: config-file
|
|
3
|
+
description: BodDB CLI and config files. Use when starting a server, creating configs, or customizing the CLI.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# CLI & Config File
|
|
7
|
+
|
|
8
|
+
## CLI Usage
|
|
9
|
+
|
|
10
|
+
```bash
|
|
11
|
+
bod-db # defaults (memory, port 4400)
|
|
12
|
+
bod-db config.ts # start with config file
|
|
13
|
+
bod-db --port 3000 # override port
|
|
14
|
+
bod-db config.ts --port 5000 # config + override
|
|
15
|
+
bod-db --memory # force in-memory
|
|
16
|
+
bod-db --path ./my.db # override db path
|
|
17
|
+
bod-db --init # generate bod.config.ts
|
|
18
|
+
bod-db -h # help
|
|
19
|
+
bod-db -v # version
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
## npm scripts
|
|
23
|
+
|
|
24
|
+
```bash
|
|
25
|
+
bun run serve # start with defaults
|
|
26
|
+
bun run start # start with config.ts
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
## Config File Format
|
|
30
|
+
|
|
31
|
+
```typescript
|
|
32
|
+
import type { BodDBOptions } from 'bod-db';
|
|
33
|
+
|
|
34
|
+
export default {
|
|
35
|
+
path: './.tmp/bod.db',
|
|
36
|
+
port: 4400,
|
|
37
|
+
sweepInterval: 60000,
|
|
38
|
+
rules: { '': { read: true, write: true } },
|
|
39
|
+
indexes: { 'users': ['role', 'createdAt'] },
|
|
40
|
+
fts: {},
|
|
41
|
+
vectors: { dimensions: 384 },
|
|
42
|
+
mq: { visibilityTimeout: 30, maxDeliveries: 5 },
|
|
43
|
+
compact: { 'events/logs': { maxAge: 86400 } },
|
|
44
|
+
} satisfies Partial<BodDBOptions>;
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
Supports `.ts`, `.js`, `.json`. CLI flags override config values.
|
|
48
|
+
|
|
49
|
+
## Programmatic
|
|
50
|
+
|
|
51
|
+
```typescript
|
|
52
|
+
const db = await BodDB.create('./config.ts');
|
|
53
|
+
const db2 = await BodDB.create({ path: ':memory:' });
|
|
54
|
+
```
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
@skill deploying-bod-db
|
|
2
|
+
@description Deploy BodDB instances to production VM via vmdrop with base+override yaml config pattern
|
|
3
|
+
@returns Running BodDB service on remote VM with HTTPS
|
|
4
|
+
|
|
5
|
+
## Usage
|
|
6
|
+
```bash
|
|
7
|
+
bun run deploy # deploy boddb instance
|
|
8
|
+
bun run deploy:bootstrap # first-time setup (provision + deploy)
|
|
9
|
+
bun run deploy:logs # tail service logs
|
|
10
|
+
bun run deploy:ssh # SSH into VM
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Multi-Instance
|
|
14
|
+
Create `deploy/<name>.yaml` with overrides, then:
|
|
15
|
+
```bash
|
|
16
|
+
bun run deploy/deploy.ts <name> bootstrap
|
|
17
|
+
bun run deploy/deploy.ts <name> deploy
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
## Config Structure
|
|
21
|
+
- `deploy/base.yaml` — shared defaults (host, user, HTTPS, excludes)
|
|
22
|
+
- `deploy/<instance>.yaml` — instance-specific (name, port, domain, execStart)
|
|
23
|
+
- `deploy/prod.config.ts` — BodDB runtime config (rules, indexes, sweep)
|
|
24
|
+
- `deploy/.env.example` — secret references (SSH key path)
|
|
25
|
+
|
|
26
|
+
## Adding a New Instance
|
|
27
|
+
1. Create `deploy/<name>.yaml` with app.name, app.dir, runtime.port, service.name, https.domain
|
|
28
|
+
2. Create `deploy/prod-<name>.config.ts` with BodDB config
|
|
29
|
+
3. Run `bun run deploy/deploy.ts <name> bootstrap`
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: developing-bod-db
|
|
3
|
+
description: Guide for implementing BodDB features — storage, subscriptions, queries, transport, rules, transforms, FTS, vectors. Use when adding new capabilities, fixing bugs, or extending internals.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Developing BodDB
|
|
7
|
+
|
|
8
|
+
## When to Use
|
|
9
|
+
- Implementing new features or engines
|
|
10
|
+
- Adding features to existing engines
|
|
11
|
+
- Fixing bugs or extending the test suite
|
|
12
|
+
- Refactoring internals
|
|
13
|
+
|
|
14
|
+
## Architecture
|
|
15
|
+
|
|
16
|
+
```
|
|
17
|
+
src/shared/pathUtils.ts — path validation, flatten/reconstruct, ancestors, prefixEnd
|
|
18
|
+
src/shared/protocol.ts — wire message types (ClientMessage, ServerMessage, BatchOp)
|
|
19
|
+
src/shared/errors.ts — BodError class, error codes
|
|
20
|
+
src/shared/transforms.ts — sentinel classes, factory fns, resolveTransforms
|
|
21
|
+
src/server/StorageEngine.ts — SQLite CRUD, leaf flattening, prefix queries, transforms, TTL, push
|
|
22
|
+
src/server/SubscriptionEngine.ts — value + child subs, ancestor walk, added/changed/removed
|
|
23
|
+
src/server/QueryEngine.ts — fluent builder delegating to StorageEngine.query
|
|
24
|
+
src/server/BodDB.ts — main facade: storage + subs + rules + transport + FTS + vectors + TTL
|
|
25
|
+
src/server/RulesEngine.ts — path-based permissions with $wildcard capture
|
|
26
|
+
src/server/ExpressionRules.ts — safe AST-based expression parser
|
|
27
|
+
src/server/Transport.ts — Bun.serve WS + REST + SSE, batch/push ops
|
|
28
|
+
src/server/FTSEngine.ts — SQLite FTS5 full-text search
|
|
29
|
+
src/server/VectorEngine.ts — vector similarity search (brute-force cosine/euclidean)
|
|
30
|
+
src/server/StreamEngine.ts — Kafka-like event streaming: consumer groups, offsets, replay, durable subs
|
|
31
|
+
src/server/MQEngine.ts — SQS-style message queue: push/fetch/ack/nack, visibility timeout, DLQ
|
|
32
|
+
src/server/FileAdapter.ts — file system sync, watch, metadata, content read/write-through
|
|
33
|
+
src/client/BodClient.ts — WS client: CRUD, batch, push, subs, streams, auto-reconnect
|
|
34
|
+
src/react/hooks.ts — useValue, useChildren, useQuery, useMutation
|
|
35
|
+
client.ts — client entry point
|
|
36
|
+
react.ts — React hooks entry point
|
|
37
|
+
index.ts — server entry point (all exports)
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
## Key Patterns
|
|
41
|
+
|
|
42
|
+
### Leaf flattening
|
|
43
|
+
Nested objects → flat rows keyed by full path. `set('users/u1', {name:'Alice'})` → row at `users/u1/name`. Push rows are stored as single JSON (not flattened).
|
|
44
|
+
|
|
45
|
+
### Prefix queries
|
|
46
|
+
`WHERE path >= 'prefix/' AND path < 'prefix/\uffff'` — leverages SQLite B-tree on PK.
|
|
47
|
+
|
|
48
|
+
### Transforms
|
|
49
|
+
Sentinel values (`increment`, `serverTimestamp`, `arrayUnion`, `arrayRemove`, `ref`) detected via Symbol marker. Resolved against current data in `set()` and `merge()` before flattening.
|
|
50
|
+
|
|
51
|
+
### Refs
|
|
52
|
+
`ref(path)` stores `{ _ref: path }`. Resolved at read time via `storage.get(path, { resolve: true })`. Walks value tree, batch-fetches referenced paths.
|
|
53
|
+
|
|
54
|
+
### Push IDs
|
|
55
|
+
Time-sortable: 8-char base62 timestamp + 4-char random. Guaranteed unique even within same millisecond (incrementing random suffix). Stored as single JSON row.
|
|
56
|
+
|
|
57
|
+
### TTL
|
|
58
|
+
`expires_at INTEGER` column on nodes table. `setExpiry()` sets on all leaf rows. `sweep()` deletes expired + returns paths for subscription notifications.
|
|
59
|
+
|
|
60
|
+
### Subscription notify flow
|
|
61
|
+
1. Snapshot existing paths before write (`snapshotExisting`)
|
|
62
|
+
2. Perform write → get `changedPaths` (leaf paths written)
|
|
63
|
+
3. `notify(changedPaths, getFn, existedBefore)` → value subs on exact + ancestors, child subs with added/changed/removed
|
|
64
|
+
|
|
65
|
+
### Transactions
|
|
66
|
+
`db.transaction(fn)` wraps fn in `storage.db.transaction()`. Collects all changed paths + existedBefore, fires notifications after commit. Rollback on throw.
|
|
67
|
+
|
|
68
|
+
### Batch protocol
|
|
69
|
+
`{ op: 'batch', operations: [...] }` in Transport. Wraps all ops in `db.transaction()`. Returns array of results for push ops.
|
|
70
|
+
|
|
71
|
+
### FTS5
|
|
72
|
+
Separate virtual table `fts(path, content)`. Manual indexing via `db.index()`. Search via `fts MATCH` with optional path prefix filter.
|
|
73
|
+
|
|
74
|
+
### Vectors
|
|
75
|
+
Separate `_vectors(path, embedding)` table. Float32Array serialized as BLOB. Brute-force cosine similarity search. In-memory cache for hot vectors.
|
|
76
|
+
|
|
77
|
+
### Streams (Kafka-like)
|
|
78
|
+
Push paths are append-only logs. `StreamEngine` adds consumer group offsets (`_streams/<topic>/groups/<groupId>/offset`), stored via direct `storage.set/get` (bypasses rules). `subscribe()` uses subscribe-then-backfill: attach child sub first, replay from offset, drain buffer with dedup, then live. `queryAfterKey()` on StorageEngine does SQL-level `WHERE path > ? LIMIT ?` for efficient replay. Idempotent push via `idempotency_key` column + unique index — check-first approach.
|
|
79
|
+
|
|
80
|
+
### MQ (Message Queue)
|
|
81
|
+
`MQEngine` owns all MQ SQL via `storage.db.prepare()` — same pattern as StreamEngine. Columns: `mq_status` (pending/inflight), `mq_inflight_until` (Unix ms), `mq_delivery_count`. `fetch()` uses SQLite transaction with TOCTOU guard (`changes > 0`). Ack = DELETE. Sweep reclaims expired inflight; exhausted messages move to DLQ at `<queue>/_dlq/<key>`. Per-queue options via longest prefix match on `queues` config.
|
|
82
|
+
|
|
83
|
+
### FileAdapter
|
|
84
|
+
Scans directory recursively on `start()`. Optional `fs.watch` for live sync. Stores metadata (size, mtime, mime) at `basePath/<relPath>`. Content read/write-through methods.
|
|
85
|
+
|
|
86
|
+
### Transport security
|
|
87
|
+
All WS ops enforce rules checks (read/write) before executing. REST/SSE extract auth from `Authorization: Bearer <token>` header. Stream ops: `stream-read`/`stream-sub` = read, `stream-ack` = write, `stream-unsub` = skip (cleanup). Client `set()` supports `{ ttl }`. Protocol covers FTS (`fts-search`, `fts-index`), vectors (`vector-search`, `vector-store`), and stream management (`stream-snapshot`, `stream-materialize`, `stream-compact`, `stream-reset`).
|
|
88
|
+
|
|
89
|
+
### Options pattern
|
|
90
|
+
```typescript
|
|
91
|
+
export class XOptions { field: type = default; }
|
|
92
|
+
export class X {
|
|
93
|
+
constructor(options?: Partial<XOptions>) {
|
|
94
|
+
this.options = { ...new XOptions(), ...options };
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
### Rules
|
|
100
|
+
Path patterns with `$wildcard` capture. Most specific match wins. Supports booleans, functions, expression strings, JSON/TS config files. `BodDB.create()` for async .ts loading.
|
|
101
|
+
|
|
102
|
+
## Implementation Phases
|
|
103
|
+
|
|
104
|
+
- **Phase 1** (DONE): Core — StorageEngine, SubscriptionEngine, QueryEngine, BodDB facade
|
|
105
|
+
- **Phase 2** (DONE): Transport (WS + REST) + RulesEngine (function-based V1)
|
|
106
|
+
- **Phase 3** (DONE): Client SDK (BodClient — CRUD, queries, subscriptions, auto-reconnect)
|
|
107
|
+
- **Phase 4** (DONE): Expression rules V2, SSE fallback, React hooks, benchmarks, examples
|
|
108
|
+
- **Phase 5** (DONE): Rules config files, transforms/sentinels, refs, transactions, batch, push, TTL, FileAdapter, FTS5, VectorEngine
|
|
109
|
+
- **Phase 6** (DONE): Event streaming — consumer groups, offset tracking, replay, idempotent push, StreamEngine
|
|
110
|
+
- **Phase 7** (DONE): Message queue — MQEngine, push/fetch/ack/nack, visibility timeout, DLQ, exactly-once claim
|
|
111
|
+
|
|
112
|
+
## Testing
|
|
113
|
+
|
|
114
|
+
- `bun test` — 187 tests across 19 files
|
|
115
|
+
- Each engine/feature gets its own test file in `tests/`
|
|
116
|
+
- Test happy path, edge cases, error cases
|
|
117
|
+
- Use `{ sweepInterval: 0 }` in tests to disable background sweep
|
|
118
|
+
- Use `forceExpire()` helper for TTL tests (avoids real-time waits)
|
|
119
|
+
|
|
120
|
+
## Rules for Development
|
|
121
|
+
|
|
122
|
+
- Zero external dependencies — only `bun:sqlite`
|
|
123
|
+
- `readonly` on public engine fields
|
|
124
|
+
- Sanitize SQL inputs — use `validatePath()` + regex for field names
|
|
125
|
+
- Update CLAUDE.md, README.md, skills when completing features
|
|
126
|
+
- Update `index.ts` exports for new modules
|
|
127
|
+
- Provide workspace examples in `.tmp/workspace/`
|
|
@@ -0,0 +1,403 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: using-bod-db
|
|
3
|
+
description: Guide for integrating BodDB into applications — embedded reactive SQLite database. Use when setting up a BodDB instance, performing CRUD, transforms, transactions, push, TTL, search, or connecting via WebSocket/REST.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Using BodDB
|
|
7
|
+
|
|
8
|
+
## When to Use
|
|
9
|
+
- Adding BodDB to a project as an embedded database
|
|
10
|
+
- Performing CRUD operations on path-based data
|
|
11
|
+
- Using transforms (increment, serverTimestamp, arrayUnion, arrayRemove, ref)
|
|
12
|
+
- Running transactions or batch operations
|
|
13
|
+
- Push/append-only collections with time-sortable keys
|
|
14
|
+
- Setting TTL / auto-expiry on data
|
|
15
|
+
- Full-text search (FTS5) or vector similarity search
|
|
16
|
+
- Syncing files to the database (FileAdapter)
|
|
17
|
+
- Setting up real-time subscriptions (value or child events)
|
|
18
|
+
- Querying with filters, ordering, and pagination
|
|
19
|
+
- Configuring permission rules and authentication
|
|
20
|
+
- Connecting remote clients via WebSocket or REST
|
|
21
|
+
|
|
22
|
+
## Server Setup
|
|
23
|
+
|
|
24
|
+
```typescript
|
|
25
|
+
import { BodDB, increment, serverTimestamp, arrayUnion, arrayRemove, ref } from 'bod-db';
|
|
26
|
+
|
|
27
|
+
const db = new BodDB({
|
|
28
|
+
path: './data.db', // SQLite file (default: ':memory:')
|
|
29
|
+
port: 4400, // optional — only needed if calling db.serve()
|
|
30
|
+
rules: { // inline rules, or path to .json/.ts file
|
|
31
|
+
'users/$uid': {
|
|
32
|
+
read: true,
|
|
33
|
+
write: "auth.uid === $uid",
|
|
34
|
+
},
|
|
35
|
+
},
|
|
36
|
+
indexes: { 'users': ['role', 'createdAt'] }, // auto-create SQLite indexes
|
|
37
|
+
sweepInterval: 60000, // TTL sweep interval in ms (0 = disabled)
|
|
38
|
+
fts: {}, // enable FTS5 full-text search
|
|
39
|
+
vectors: { dimensions: 384 }, // enable vector search
|
|
40
|
+
auth: (token) => verifyToken(token),
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
// From config file (covers all options)
|
|
44
|
+
const db2 = await BodDB.create('./config.ts');
|
|
45
|
+
|
|
46
|
+
// Or inline with .ts rules file
|
|
47
|
+
const db3 = await BodDB.create({ rules: './rules.ts' });
|
|
48
|
+
|
|
49
|
+
// Start network server (optional — BodDB works fully in-process)
|
|
50
|
+
db.serve();
|
|
51
|
+
|
|
52
|
+
// CLI: bod-db config.ts --port 5000 (see config-file skill)
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
### CLI Quick Start
|
|
56
|
+
|
|
57
|
+
```bash
|
|
58
|
+
bod-db --init # generate bod.config.ts
|
|
59
|
+
bod-db bod.config.ts # start server
|
|
60
|
+
bun run serve # same, via npm script
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
## CRUD
|
|
64
|
+
|
|
65
|
+
```typescript
|
|
66
|
+
db.set('users/u1', { name: 'Alice', role: 'admin' });
|
|
67
|
+
db.get('users/u1'); // { name: 'Alice', role: 'admin' }
|
|
68
|
+
db.get('users/u1/name'); // 'Alice'
|
|
69
|
+
db.update({ 'users/u1/name': 'Bob', 'counters/visits': 42 });
|
|
70
|
+
db.delete('users/u1');
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
## Transforms
|
|
74
|
+
|
|
75
|
+
```typescript
|
|
76
|
+
db.set('counters/likes', increment(5)); // atomic increment
|
|
77
|
+
db.set('posts/p1/updatedAt', serverTimestamp()); // current timestamp
|
|
78
|
+
db.set('posts/p1/tags', arrayUnion('new', 'hot')); // add unique items
|
|
79
|
+
db.set('posts/p1/tags', arrayRemove('old')); // remove items
|
|
80
|
+
db.set('posts/p1/author', ref('users/u1')); // store reference
|
|
81
|
+
|
|
82
|
+
// Resolve refs at read time
|
|
83
|
+
const resolved = db.storage.get('posts/p1', { resolve: true });
|
|
84
|
+
// Or resolve specific fields only
|
|
85
|
+
const partial = db.storage.get('posts/p1', { resolve: ['author'] });
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
## Push (Append-Only)
|
|
89
|
+
|
|
90
|
+
```typescript
|
|
91
|
+
const key = db.push('logs', { level: 'info', msg: 'started', ts: Date.now() });
|
|
92
|
+
// key is time-sortable (8-char timestamp + 4-char random)
|
|
93
|
+
// Stored as single JSON row (NOT flattened) for performance
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
## TTL / Auto-Expiry
|
|
97
|
+
|
|
98
|
+
```typescript
|
|
99
|
+
db.set('sessions/temp', { token: 'abc' }, { ttl: 3600 }); // expires in 1h
|
|
100
|
+
db.sweep(); // manual sweep (also runs on interval if sweepInterval > 0)
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
## Transactions
|
|
104
|
+
|
|
105
|
+
```typescript
|
|
106
|
+
db.transaction((tx) => {
|
|
107
|
+
const user = tx.get('users/u1');
|
|
108
|
+
tx.set('users/u1/lastLogin', Date.now());
|
|
109
|
+
tx.update({ 'stats/logins': { total: 1 } });
|
|
110
|
+
tx.delete('temp/data');
|
|
111
|
+
});
|
|
112
|
+
// All ops in single SQLite transaction, notifications fire after commit
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
## Full-Text Search (FTS5)
|
|
116
|
+
|
|
117
|
+
```typescript
|
|
118
|
+
// Requires: fts: {} in options
|
|
119
|
+
db.index('posts/p1', 'Hello world tutorial');
|
|
120
|
+
db.index('posts/p1', ['title', 'body']); // index specific fields
|
|
121
|
+
|
|
122
|
+
db.search({ text: 'hello', path: 'posts', limit: 10 });
|
|
123
|
+
// Returns: [{ path, data, rank }]
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
## Vector Search
|
|
127
|
+
|
|
128
|
+
```typescript
|
|
129
|
+
// Requires: vectors: { dimensions: N } in options
|
|
130
|
+
db.vectors!.store('docs/d1', [0.1, 0.2, ...]);
|
|
131
|
+
db.vectorSearch({ query: [0.1, 0.2, ...], path: 'docs', limit: 5, threshold: 0.7 });
|
|
132
|
+
// Returns: [{ path, data, score }]
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
## File Adapter
|
|
136
|
+
|
|
137
|
+
```typescript
|
|
138
|
+
import { FileAdapter } from 'bod-db';
|
|
139
|
+
|
|
140
|
+
const adapter = new FileAdapter(db, {
|
|
141
|
+
root: './uploads',
|
|
142
|
+
basePath: 'files',
|
|
143
|
+
watch: true, // fs.watch for changes
|
|
144
|
+
metadata: true, // store size, mtime, mime
|
|
145
|
+
indexContent: false,
|
|
146
|
+
});
|
|
147
|
+
await adapter.start();
|
|
148
|
+
// Read/write through:
|
|
149
|
+
await adapter.readContent('doc.txt');
|
|
150
|
+
await adapter.writeContent('doc.txt', 'new content');
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
## Subscriptions
|
|
154
|
+
|
|
155
|
+
```typescript
|
|
156
|
+
const off = db.on('users/u1', (snap) => {
|
|
157
|
+
snap.val(); snap.path;
|
|
158
|
+
});
|
|
159
|
+
db.onChild('users', (event) => {
|
|
160
|
+
event.type; event.key; event.path; event.val();
|
|
161
|
+
});
|
|
162
|
+
off();
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
## Queries
|
|
166
|
+
|
|
167
|
+
```typescript
|
|
168
|
+
db.query('users')
|
|
169
|
+
.where('role', '==', 'admin')
|
|
170
|
+
.where('age', '>=', 18)
|
|
171
|
+
.order('name', 'asc')
|
|
172
|
+
.limit(10)
|
|
173
|
+
.offset(0)
|
|
174
|
+
.get();
|
|
175
|
+
```
|
|
176
|
+
|
|
177
|
+
## Client SDK (BodClient)
|
|
178
|
+
|
|
179
|
+
```typescript
|
|
180
|
+
import { BodClient } from 'bod-db/client';
|
|
181
|
+
|
|
182
|
+
const client = new BodClient({
|
|
183
|
+
url: 'ws://localhost:4400',
|
|
184
|
+
auth: () => 'my-token',
|
|
185
|
+
reconnect: true,
|
|
186
|
+
});
|
|
187
|
+
|
|
188
|
+
await client.connect();
|
|
189
|
+
|
|
190
|
+
// CRUD (all async)
|
|
191
|
+
await client.set('users/u1', { name: 'Alice' });
|
|
192
|
+
await client.set('sessions/s1', { token: 'x' }, { ttl: 3600 }); // TTL support
|
|
193
|
+
await client.get('users/u1');
|
|
194
|
+
await client.update({ 'users/u1/name': 'Bob' });
|
|
195
|
+
await client.delete('users/u1');
|
|
196
|
+
|
|
197
|
+
// Batch (atomic multi-op)
|
|
198
|
+
await client.batch([
|
|
199
|
+
{ op: 'set', path: 'a', value: 1 },
|
|
200
|
+
{ op: 'delete', path: 'b' },
|
|
201
|
+
{ op: 'push', path: 'logs', value: { msg: 'hi' } },
|
|
202
|
+
]);
|
|
203
|
+
|
|
204
|
+
// Push
|
|
205
|
+
const key = await client.push('logs', { msg: 'hello' });
|
|
206
|
+
|
|
207
|
+
// Query, subscribe, disconnect — same as before
|
|
208
|
+
```
|
|
209
|
+
|
|
210
|
+
## Wire Protocol
|
|
211
|
+
|
|
212
|
+
```typescript
|
|
213
|
+
// Batch
|
|
214
|
+
ws.send(JSON.stringify({ id: '1', op: 'batch', operations: [
|
|
215
|
+
{ op: 'set', path: 'a', value: 1 },
|
|
216
|
+
{ op: 'delete', path: 'b' },
|
|
217
|
+
]}));
|
|
218
|
+
// Push (with optional idempotencyKey)
|
|
219
|
+
ws.send(JSON.stringify({ id: '2', op: 'push', path: 'logs', value: { msg: 'hi' }, idempotencyKey: 'k1' }));
|
|
220
|
+
// Stream ops
|
|
221
|
+
ws.send(JSON.stringify({ id: '3', op: 'stream-read', path: 'events/orders', groupId: 'billing', limit: 100 }));
|
|
222
|
+
ws.send(JSON.stringify({ id: '4', op: 'stream-ack', path: 'events/orders', groupId: 'billing', key: 'abc123' }));
|
|
223
|
+
ws.send(JSON.stringify({ id: '5', op: 'stream-sub', path: 'events/orders', groupId: 'billing' }));
|
|
224
|
+
// Server pushes: { type: 'stream', path, groupId, events: [{key, data}] }
|
|
225
|
+
ws.send(JSON.stringify({ id: '6', op: 'stream-unsub', path: 'events/orders', groupId: 'billing' }));
|
|
226
|
+
|
|
227
|
+
// MQ ops (SQS-style work queue)
|
|
228
|
+
ws.send(JSON.stringify({ id: '7', op: 'mq-push', path: 'queues/jobs', value: { type: 'email' } }));
|
|
229
|
+
ws.send(JSON.stringify({ id: '8', op: 'mq-fetch', path: 'queues/jobs', count: 5 }));
|
|
230
|
+
ws.send(JSON.stringify({ id: '9', op: 'mq-ack', path: 'queues/jobs', key: 'abc123' }));
|
|
231
|
+
ws.send(JSON.stringify({ id: '10', op: 'mq-nack', path: 'queues/jobs', key: 'abc123' }));
|
|
232
|
+
ws.send(JSON.stringify({ id: '11', op: 'mq-peek', path: 'queues/jobs', count: 10 }));
|
|
233
|
+
ws.send(JSON.stringify({ id: '12', op: 'mq-dlq', path: 'queues/jobs' }));
|
|
234
|
+
ws.send(JSON.stringify({ id: '13', op: 'mq-purge', path: 'queues/jobs' })); // pending only
|
|
235
|
+
ws.send(JSON.stringify({ id: '14', op: 'mq-purge', path: 'queues/jobs', all: true })); // all statuses
|
|
236
|
+
|
|
237
|
+
// FTS + Vector ops
|
|
238
|
+
ws.send(JSON.stringify({ id: '15', op: 'fts-search', text: 'hello', path: 'posts', limit: 10 }));
|
|
239
|
+
ws.send(JSON.stringify({ id: '16', op: 'fts-index', path: 'posts/p1', content: 'Hello world' }));
|
|
240
|
+
ws.send(JSON.stringify({ id: '17', op: 'fts-index', path: 'posts/p1', fields: ['title', 'body'] }));
|
|
241
|
+
ws.send(JSON.stringify({ id: '18', op: 'vector-search', query: [0.1, 0.2], path: 'docs', limit: 5 }));
|
|
242
|
+
ws.send(JSON.stringify({ id: '19', op: 'vector-store', path: 'docs/d1', embedding: [0.1, 0.2] }));
|
|
243
|
+
|
|
244
|
+
// Stream extended ops
|
|
245
|
+
ws.send(JSON.stringify({ id: '20', op: 'stream-snapshot', path: 'events/orders' }));
|
|
246
|
+
ws.send(JSON.stringify({ id: '21', op: 'stream-materialize', path: 'events/orders', keepKey: 'orderId' }));
|
|
247
|
+
ws.send(JSON.stringify({ id: '22', op: 'stream-compact', path: 'events/orders', maxAge: 86400 }));
|
|
248
|
+
ws.send(JSON.stringify({ id: '23', op: 'stream-reset', path: 'events/orders' }));
|
|
249
|
+
```
|
|
250
|
+
|
|
251
|
+
## Message Queue
|
|
252
|
+
|
|
253
|
+
SQS-style work queue — each message claimed by exactly one worker, visibility timeout, dead letter queue.
|
|
254
|
+
|
|
255
|
+
```typescript
|
|
256
|
+
// Server-side
|
|
257
|
+
const db = new BodDB({ mq: { visibilityTimeout: 30, maxDeliveries: 5 } });
|
|
258
|
+
db.mq.push('queues/jobs', { type: 'email', to: 'alice@example.com' });
|
|
259
|
+
const jobs = db.mq.fetch('queues/jobs', 5); // claim up to 5
|
|
260
|
+
db.mq.ack('queues/jobs', jobs[0].key); // delete on success
|
|
261
|
+
db.mq.nack('queues/jobs', jobs[0].key); // release back to pending
|
|
262
|
+
db.mq.peek('queues/jobs'); // view without claiming
|
|
263
|
+
db.mq.dlq('queues/jobs'); // view dead letters
|
|
264
|
+
db.mq.purge('queues/jobs'); // delete all pending
|
|
265
|
+
db.mq.purge('queues/jobs', { all: true }); // delete all (pending + inflight + DLQ)
|
|
266
|
+
db.mq.sweep(); // reclaim expired, move to DLQ
|
|
267
|
+
|
|
268
|
+
// Per-queue options
|
|
269
|
+
const db2 = new BodDB({ mq: { queues: { 'queues/critical': { visibilityTimeout: 60, maxDeliveries: 10 } } } });
|
|
270
|
+
|
|
271
|
+
// Client-side
|
|
272
|
+
const q = client.mq('queues/jobs');
|
|
273
|
+
await q.push({ type: 'email' });
|
|
274
|
+
const msgs = await q.fetch(5);
|
|
275
|
+
await q.ack(msgs[0].key);
|
|
276
|
+
await q.nack(msgs[0].key);
|
|
277
|
+
const peeked = await q.peek(10);
|
|
278
|
+
const dead = await q.dlq();
|
|
279
|
+
await q.purge(); // delete pending
|
|
280
|
+
await q.purge({ all: true }); // delete all (pending + inflight + DLQ)
|
|
281
|
+
```
|
|
282
|
+
|
|
283
|
+
## Rules
|
|
284
|
+
|
|
285
|
+
Rules: path patterns with `$wildcard` capture, most-specific match wins. Supports booleans, functions, expression strings, JSON config files, and TS config files.
|
|
286
|
+
|
|
287
|
+
```typescript
|
|
288
|
+
// Expression syntax: auth.*, $wildcard, data/newData, ==, !=, >=, <, &&, ||, !, parens, null
|
|
289
|
+
rules: {
|
|
290
|
+
'users/$uid': { read: true, write: "auth.uid === $uid" },
|
|
291
|
+
'admin': { write: (ctx) => ctx.auth?.role === 'admin' },
|
|
292
|
+
}
|
|
293
|
+
```
|
|
294
|
+
|
|
295
|
+
## Event Streaming (Kafka-like)
|
|
296
|
+
|
|
297
|
+
```typescript
|
|
298
|
+
// Server-side: push events to a topic
|
|
299
|
+
const key = db.push('events/orders', { orderId: 'o1', amount: 42 });
|
|
300
|
+
|
|
301
|
+
// Idempotent push (dedup by key)
|
|
302
|
+
db.push('events/orders', { orderId: 'o1' }, { idempotencyKey: 'order-o1' });
|
|
303
|
+
|
|
304
|
+
// Read unprocessed events for a consumer group
|
|
305
|
+
const events = db.stream.read('events/orders', 'billing', 100);
|
|
306
|
+
// Acknowledge processing
|
|
307
|
+
db.stream.ack('events/orders', 'billing', events[events.length - 1].key);
|
|
308
|
+
|
|
309
|
+
// Subscribe with replay (no event loss on reconnect)
|
|
310
|
+
const unsub = db.stream.subscribe('events/orders', 'billing', (events) => {
|
|
311
|
+
for (const e of events) {
|
|
312
|
+
processOrder(e.data);
|
|
313
|
+
}
|
|
314
|
+
});
|
|
315
|
+
unsub();
|
|
316
|
+
```
|
|
317
|
+
|
|
318
|
+
### Stream Compaction (Snapshot Model)
|
|
319
|
+
|
|
320
|
+
Compaction folds events into a **snapshot** (base state), then deletes them.
|
|
321
|
+
Live events always layer on top of the snapshot. New consumers bootstrap from snapshot + events.
|
|
322
|
+
|
|
323
|
+
```typescript
|
|
324
|
+
// Manual compaction — folds events into snapshot
|
|
325
|
+
db.stream.compact('events/orders', { maxAge: 86400 }); // fold events older than 24h
|
|
326
|
+
db.stream.compact('events/orders', { maxCount: 1000 }); // keep last 1000 live, fold rest
|
|
327
|
+
db.stream.compact('events/orders', { keepKey: 'orderId' }); // kafka-style: snapshot keyed by orderId
|
|
328
|
+
db.stream.compact('events/orders', { maxAge: 3600, maxCount: 500 }); // combine
|
|
329
|
+
|
|
330
|
+
// Auto-compact on sweep (runs on sweepInterval timer)
|
|
331
|
+
const db = new BodDB({
|
|
332
|
+
compact: {
|
|
333
|
+
'events/orders': { maxCount: 10000 },
|
|
334
|
+
'events/logs': { maxAge: 86400 },
|
|
335
|
+
},
|
|
336
|
+
sweepInterval: 60000,
|
|
337
|
+
});
|
|
338
|
+
|
|
339
|
+
// Read snapshot (base state)
|
|
340
|
+
const snap = db.stream.snapshot('events/orders');
|
|
341
|
+
// snap.key = last folded event key, snap.data = merged state
|
|
342
|
+
|
|
343
|
+
// Materialize: snapshot + live events merged into one view
|
|
344
|
+
const view = db.stream.materialize('events/orders', { keepKey: 'orderId' });
|
|
345
|
+
// view = { o1: {orderId:'o1', status:'completed'}, o2: {...}, ... }
|
|
346
|
+
|
|
347
|
+
// Safety: compaction never folds events beyond the minimum consumer group offset
|
|
348
|
+
```
|
|
349
|
+
|
|
350
|
+
### Client-side Streaming
|
|
351
|
+
|
|
352
|
+
```typescript
|
|
353
|
+
import { BodClient } from 'bod-db/client';
|
|
354
|
+
|
|
355
|
+
const client = new BodClient({ url: 'ws://localhost:4400' });
|
|
356
|
+
await client.connect();
|
|
357
|
+
|
|
358
|
+
const reader = client.stream('events/orders', 'billing');
|
|
359
|
+
|
|
360
|
+
// Manual read + ack
|
|
361
|
+
const events = await reader.read(50);
|
|
362
|
+
await reader.ack(events[events.length - 1].key);
|
|
363
|
+
|
|
364
|
+
// Live subscription (replays from last offset, then live)
|
|
365
|
+
const unsub = reader.on((events) => {
|
|
366
|
+
for (const e of events) {
|
|
367
|
+
console.log(e.key, e.val());
|
|
368
|
+
}
|
|
369
|
+
});
|
|
370
|
+
unsub();
|
|
371
|
+
|
|
372
|
+
// Idempotent push
|
|
373
|
+
await client.push('events/orders', { orderId: 'o1' }, { idempotencyKey: 'order-o1' });
|
|
374
|
+
|
|
375
|
+
// FTS search + index
|
|
376
|
+
await client.index('posts/p1', 'Hello world tutorial');
|
|
377
|
+
await client.index('posts/p1', ['title', 'body']);
|
|
378
|
+
const results = await client.search({ text: 'hello', path: 'posts', limit: 10 });
|
|
379
|
+
|
|
380
|
+
// Vector search + store
|
|
381
|
+
await client.vectorStore('docs/d1', [0.1, 0.2, 0.3]);
|
|
382
|
+
const similar = await client.vectorSearch({ query: [0.1, 0.2, 0.3], path: 'docs', limit: 5 });
|
|
383
|
+
|
|
384
|
+
// Stream snapshot, materialize, compact, reset
|
|
385
|
+
const snap = await client.streamSnapshot('events/orders');
|
|
386
|
+
const view = await client.streamMaterialize('events/orders', { keepKey: 'orderId' });
|
|
387
|
+
await client.streamCompact('events/orders', { maxAge: 86400 });
|
|
388
|
+
await client.streamReset('events/orders');
|
|
389
|
+
```
|
|
390
|
+
|
|
391
|
+
## Best Practices
|
|
392
|
+
|
|
393
|
+
1. **Paths are your schema** — design upfront (`users/$uid/settings/theme`)
|
|
394
|
+
2. **Flat is fast** — deeply nested objects create many rows
|
|
395
|
+
3. **Use `push()` for append-only** — logs, events, messages
|
|
396
|
+
4. **Use transforms** — `increment()` is atomic, no read-modify-write
|
|
397
|
+
5. **TTL for sessions** — auto-cleanup with sweep
|
|
398
|
+
6. **Transactions for consistency** — multiple ops, single commit
|
|
399
|
+
7. **`batch()` over network** — single round-trip for multiple ops
|
|
400
|
+
8. **`:memory:` for tests** — fast, no cleanup
|
|
401
|
+
9. **`port: 0` in tests** — random available port
|
|
402
|
+
10. **Streams for event processing** — consumer groups with offset tracking, replay on reconnect
|
|
403
|
+
11. **Idempotent push** — dedup with `idempotencyKey` to prevent duplicate events
|