yutia.db 1.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +159 -0
- package/dist/core/BlockCodec.d.ts +46 -0
- package/dist/core/BlockCodec.js +84 -0
- package/dist/core/Compactor.d.ts +12 -0
- package/dist/core/Compactor.js +68 -0
- package/dist/core/Compression.d.ts +9 -0
- package/dist/core/Compression.js +38 -0
- package/dist/core/Datastore.d.ts +37 -0
- package/dist/core/Datastore.js +184 -0
- package/dist/core/IndexSnapshot.d.ts +6 -0
- package/dist/core/IndexSnapshot.js +33 -0
- package/dist/core/LRUCache.d.ts +10 -0
- package/dist/core/LRUCache.js +36 -0
- package/dist/core/Reader.d.ts +11 -0
- package/dist/core/Reader.js +66 -0
- package/dist/core/Recovery.d.ts +6 -0
- package/dist/core/Recovery.js +66 -0
- package/dist/core/SecondaryIndex.d.ts +10 -0
- package/dist/core/SecondaryIndex.js +45 -0
- package/dist/core/Writer.d.ts +45 -0
- package/dist/core/Writer.js +151 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +3 -0
- package/dist/types/index.d.ts +39 -0
- package/dist/types/index.js +1 -0
- package/dist/utils/Checksum.d.ts +4 -0
- package/dist/utils/Checksum.js +10 -0
- package/dist/utils/Crypto.d.ts +6 -0
- package/dist/utils/Crypto.js +26 -0
- package/dist/utils/QueryMatcher.d.ts +5 -0
- package/dist/utils/QueryMatcher.js +55 -0
- package/package.json +63 -0
package/README.md
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
# YutiaDB (YDB3)
|
|
2
|
+
|
|
3
|
+
Ultra-lightweight embedded database optimized for **realtime logs**, **event streams**, and **AI datasets** — with crash-safe append-only storage and fast by-id reads.
|
|
4
|
+
|
|
5
|
+
> Storage format: **YDB3** = binary framing + JSON payload
|
|
6
|
+
> (`"YDB3" + version + [u32 len][json bytes]...`)
|
|
7
|
+
|
|
8
|
+
## Why YutiaDB?
|
|
9
|
+
|
|
10
|
+
YutiaDB is designed for workloads where **writes are massive & continuous**, and you need:
|
|
11
|
+
- **Very fast ingestion** (append-only)
|
|
12
|
+
- **Crash-safety** (power loss safe; last partial record is ignored on recovery)
|
|
13
|
+
- **Small memory overhead** (streaming reads, small cache, optional pointer index)
|
|
14
|
+
- **Easy export to JSON/JSONL** for analytics / AI training
|
|
15
|
+
|
|
16
|
+
This is **not** a replacement for full-feature query databases (MongoDB/Postgres).
|
|
17
|
+
It’s a purpose-built embedded DB for **append-heavy** use cases.
|
|
18
|
+
|
|
19
|
+
---
|
|
20
|
+
|
|
21
|
+
## Features
|
|
22
|
+
|
|
23
|
+
- ✅ Append-only realtime ingestion (framed JSON)
|
|
24
|
+
- ✅ Crash-safe recovery (truncated tail-safe)
|
|
25
|
+
- ✅ Fast `findOne({_id})` via pointer lookup (O(1) per read)
|
|
26
|
+
- ✅ Streaming scan for analytics (no full file load)
|
|
27
|
+
- ✅ Tombstone deletes (`_deleted: true`)
|
|
28
|
+
- ✅ Lightweight LRU-ish cache for hot records
|
|
29
|
+
- ✅ Works great for: logs, telemetry, dataset storage, user DB (by-id)
|
|
30
|
+
|
|
31
|
+
---
|
|
32
|
+
|
|
33
|
+
## Install (npm)
|
|
34
|
+
|
|
35
|
+
```bash
|
|
36
|
+
npm i yutiadb
|
|
37
|
+
# or
|
|
38
|
+
bun add yutiadb
|
|
39
|
+
# or
|
|
40
|
+
pnpm add yutiadb
|
|
41
|
+
````
|
|
42
|
+
|
|
43
|
+
---
|
|
44
|
+
|
|
45
|
+
## Quick Start
|
|
46
|
+
|
|
47
|
+
```ts
|
|
48
|
+
import { Datastore } from "yutiadb";
|
|
49
|
+
|
|
50
|
+
const db = new Datastore({
|
|
51
|
+
filename: "./data/app.ydb",
|
|
52
|
+
autoload: true,
|
|
53
|
+
|
|
54
|
+
// performance/durability tuning
|
|
55
|
+
durability: "batched", // "none" | "batched" | "immediate"
|
|
56
|
+
batchBytes: 4 * 1024 * 1024,
|
|
57
|
+
autoFlushMs: 50,
|
|
58
|
+
fsyncEveryFlush: 1,
|
|
59
|
+
|
|
60
|
+
maxCacheEntries: 1000,
|
|
61
|
+
maxPendingDocs: 500_000,
|
|
62
|
+
} as any);
|
|
63
|
+
|
|
64
|
+
// write (realtime)
|
|
65
|
+
await db.insert({ type: "req", path: "/api/v1/items", ts: Date.now() });
|
|
66
|
+
|
|
67
|
+
// read by _id (fast)
|
|
68
|
+
const one = await db.findOne({ _id: "..." });
|
|
69
|
+
|
|
70
|
+
// scan query (stream scan)
|
|
71
|
+
const items = await db.find({ type: "req" });
|
|
72
|
+
|
|
73
|
+
// ensure durability (optional)
|
|
74
|
+
await db.flush(); // flush buffered writes
|
|
75
|
+
await db.drain(); // wait until all pending buffered writes are written
|
|
76
|
+
|
|
77
|
+
await db.close();
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
---
|
|
81
|
+
|
|
82
|
+
## Durability Modes
|
|
83
|
+
|
|
84
|
+
* `durability: "none"`
|
|
85
|
+
Fastest. Data may be in OS buffers; power loss might lose last writes.
|
|
86
|
+
|
|
87
|
+
* `durability: "batched"` (recommended)
|
|
88
|
+
Writes are buffered and fsync happens periodically (`fsyncEveryFlush`).
|
|
89
|
+
|
|
90
|
+
* `durability: "immediate"`
|
|
91
|
+
Safest (fsync every flush). Slowest.
|
|
92
|
+
|
|
93
|
+
**Tip for logs:** use `"batched"` and compact/export offline if needed.
|
|
94
|
+
|
|
95
|
+
---
|
|
96
|
+
|
|
97
|
+
## Storage Format (YDB3)
|
|
98
|
+
|
|
99
|
+
File layout:
|
|
100
|
+
|
|
101
|
+
* Header:
|
|
102
|
+
|
|
103
|
+
* `YDB3` (4 bytes)
|
|
104
|
+
* version (1 byte)
|
|
105
|
+
|
|
106
|
+
* Records:
|
|
107
|
+
|
|
108
|
+
* `len` (UInt32LE, 4 bytes)
|
|
109
|
+
* `payload` (UTF-8 JSON bytes)
|
|
110
|
+
|
|
111
|
+
Recovery behavior:
|
|
112
|
+
|
|
113
|
+
* If a crash truncates the last record, reader stops safely at the last valid record.
|
|
114
|
+
|
|
115
|
+
---
|
|
116
|
+
|
|
117
|
+
## Use Cases
|
|
118
|
+
|
|
119
|
+
### Realtime Logs (Web/API)
|
|
120
|
+
|
|
121
|
+
* request logs
|
|
122
|
+
* audit logs
|
|
123
|
+
* webhook logs
|
|
124
|
+
* notification logs
|
|
125
|
+
|
|
126
|
+
### Dataset for AI
|
|
127
|
+
|
|
128
|
+
* store training samples (JSON)
|
|
129
|
+
* export to JSONL later
|
|
130
|
+
* sequential scan for training
|
|
131
|
+
|
|
132
|
+
### User DB (by-id)
|
|
133
|
+
|
|
134
|
+
* fast `findOne({_id})`
|
|
135
|
+
* tombstone deletes
|
|
136
|
+
* optional compaction
|
|
137
|
+
|
|
138
|
+
---
|
|
139
|
+
|
|
140
|
+
## Limitations
|
|
141
|
+
|
|
142
|
+
* Query engine is intentionally minimal (stream scan + by-id pointer)
|
|
143
|
+
* Single-process embedded DB (not a network DB)
|
|
144
|
+
* Multi-field secondary indexing is not included (yet)
|
|
145
|
+
|
|
146
|
+
---
|
|
147
|
+
|
|
148
|
+
## Roadmap (optional)
|
|
149
|
+
|
|
150
|
+
* [ ] Time-window compaction for logs (keep last N days)
|
|
151
|
+
* [ ] Index snapshot `.idx` for faster startup
|
|
152
|
+
* [ ] Tail / live stream tool (`tail -f` for YDB)
|
|
153
|
+
* [ ] Export JSONL tool
|
|
154
|
+
|
|
155
|
+
---
|
|
156
|
+
|
|
157
|
+
## License
|
|
158
|
+
|
|
159
|
+
MIT
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
export type RecordPointer = {
|
|
2
|
+
blockOffset: number;
|
|
3
|
+
blockLength: number;
|
|
4
|
+
recOffset: number;
|
|
5
|
+
recLength: number;
|
|
6
|
+
deleted?: boolean;
|
|
7
|
+
updatedAt?: number;
|
|
8
|
+
createdAt?: number;
|
|
9
|
+
};
|
|
10
|
+
export declare class BlockCodec {
|
|
11
|
+
static HEADER_BYTES: number;
|
|
12
|
+
static encodeBlock(params: {
|
|
13
|
+
raw: Buffer;
|
|
14
|
+
algo: number;
|
|
15
|
+
flags: number;
|
|
16
|
+
payload: Buffer;
|
|
17
|
+
}): Buffer;
|
|
18
|
+
static tryDecodeHeader(buf: Buffer, offset: number): {
|
|
19
|
+
ok: false;
|
|
20
|
+
reason: string;
|
|
21
|
+
nextOffset: number;
|
|
22
|
+
header?: undefined;
|
|
23
|
+
payloadStart?: undefined;
|
|
24
|
+
payloadEnd?: undefined;
|
|
25
|
+
} | {
|
|
26
|
+
ok: true;
|
|
27
|
+
header: {
|
|
28
|
+
version: number;
|
|
29
|
+
flags: number;
|
|
30
|
+
algo: number;
|
|
31
|
+
blockLen: number;
|
|
32
|
+
rawLen: number;
|
|
33
|
+
crc: number;
|
|
34
|
+
};
|
|
35
|
+
payloadStart: number;
|
|
36
|
+
payloadEnd: number;
|
|
37
|
+
nextOffset: number;
|
|
38
|
+
reason?: undefined;
|
|
39
|
+
} | null;
|
|
40
|
+
static verifyRaw(raw: Buffer, crc: number): boolean;
|
|
41
|
+
static iterateRecords(raw: Buffer): Generator<{
|
|
42
|
+
recOffset: number;
|
|
43
|
+
recLength: number;
|
|
44
|
+
}, void, unknown>;
|
|
45
|
+
static buildRaw(recordsJson: string[]): Buffer<ArrayBuffer>;
|
|
46
|
+
}
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
// src/core/BlockCodec.ts
|
|
2
|
+
import { createHash } from "crypto";
|
|
3
|
+
const MAGIC = Buffer.from("YDB2");
|
|
4
|
+
// magic(4) ver(1) flags(1) algo(1) reserved(1) blockLen(u32) rawLen(u32) crc(u32)
|
|
5
|
+
const HEADER_BYTES = 4 + 1 + 1 + 1 + 1 + 4 + 4 + 4;
|
|
6
|
+
function crc32ish(buf) {
|
|
7
|
+
const h = createHash("sha256").update(buf).digest();
|
|
8
|
+
return h.readUInt32LE(0);
|
|
9
|
+
}
|
|
10
|
+
export class BlockCodec {
|
|
11
|
+
static HEADER_BYTES = HEADER_BYTES;
|
|
12
|
+
static encodeBlock(params) {
|
|
13
|
+
const { raw, algo, flags, payload } = params;
|
|
14
|
+
const header = Buffer.alloc(HEADER_BYTES);
|
|
15
|
+
MAGIC.copy(header, 0);
|
|
16
|
+
header.writeUInt8(2, 4);
|
|
17
|
+
header.writeUInt8(flags, 5);
|
|
18
|
+
header.writeUInt8(algo, 6);
|
|
19
|
+
header.writeUInt8(0, 7);
|
|
20
|
+
header.writeUInt32LE(payload.length, 8);
|
|
21
|
+
header.writeUInt32LE(raw.length, 12);
|
|
22
|
+
header.writeUInt32LE(crc32ish(raw), 16);
|
|
23
|
+
return Buffer.concat([header, payload]);
|
|
24
|
+
}
|
|
25
|
+
static tryDecodeHeader(buf, offset) {
|
|
26
|
+
if (buf.length - offset < HEADER_BYTES)
|
|
27
|
+
return null;
|
|
28
|
+
if (buf[offset] !== MAGIC[0] ||
|
|
29
|
+
buf[offset + 1] !== MAGIC[1] ||
|
|
30
|
+
buf[offset + 2] !== MAGIC[2] ||
|
|
31
|
+
buf[offset + 3] !== MAGIC[3]) {
|
|
32
|
+
return {
|
|
33
|
+
ok: false,
|
|
34
|
+
reason: "bad_magic",
|
|
35
|
+
nextOffset: offset + 1,
|
|
36
|
+
};
|
|
37
|
+
}
|
|
38
|
+
const version = buf.readUInt8(offset + 4);
|
|
39
|
+
const flags = buf.readUInt8(offset + 5);
|
|
40
|
+
const algo = buf.readUInt8(offset + 6);
|
|
41
|
+
const blockLen = buf.readUInt32LE(offset + 8);
|
|
42
|
+
const rawLen = buf.readUInt32LE(offset + 12);
|
|
43
|
+
const crc = buf.readUInt32LE(offset + 16);
|
|
44
|
+
const payloadStart = offset + HEADER_BYTES;
|
|
45
|
+
const payloadEnd = payloadStart + blockLen;
|
|
46
|
+
if (payloadEnd > buf.length)
|
|
47
|
+
return null;
|
|
48
|
+
return {
|
|
49
|
+
ok: true,
|
|
50
|
+
header: { version, flags, algo, blockLen, rawLen, crc },
|
|
51
|
+
payloadStart,
|
|
52
|
+
payloadEnd,
|
|
53
|
+
nextOffset: payloadEnd,
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
static verifyRaw(raw, crc) {
|
|
57
|
+
return crc32ish(raw) === crc;
|
|
58
|
+
}
|
|
59
|
+
// records inside raw: len(u32) + json bytes
|
|
60
|
+
static *iterateRecords(raw) {
|
|
61
|
+
let off = 0;
|
|
62
|
+
while (off + 4 <= raw.length) {
|
|
63
|
+
const len = raw.readUInt32LE(off);
|
|
64
|
+
const start = off + 4;
|
|
65
|
+
const end = start + len;
|
|
66
|
+
if (end > raw.length)
|
|
67
|
+
break;
|
|
68
|
+
yield { recOffset: start, recLength: len };
|
|
69
|
+
off = end;
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
static buildRaw(recordsJson) {
|
|
73
|
+
const parts = [];
|
|
74
|
+
let total = 0;
|
|
75
|
+
for (const j of recordsJson) {
|
|
76
|
+
const payload = Buffer.from(j, "utf8");
|
|
77
|
+
const header = Buffer.alloc(4);
|
|
78
|
+
header.writeUInt32LE(payload.length, 0);
|
|
79
|
+
parts.push(header, payload);
|
|
80
|
+
total += 4 + payload.length;
|
|
81
|
+
}
|
|
82
|
+
return Buffer.concat(parts, total);
|
|
83
|
+
}
|
|
84
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
export type CompactOptions = {
|
|
2
|
+
keepBackup?: boolean;
|
|
3
|
+
batchBytes?: number;
|
|
4
|
+
autoFlushMs?: number;
|
|
5
|
+
durability?: "none" | "batched" | "immediate";
|
|
6
|
+
fsyncEveryFlush?: number;
|
|
7
|
+
};
|
|
8
|
+
export declare class Compactor {
|
|
9
|
+
private filename;
|
|
10
|
+
constructor(filename: string);
|
|
11
|
+
compact(opt?: CompactOptions): Promise<void>;
|
|
12
|
+
}
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
// src/core/Compactor.ts
|
|
2
|
+
import { promises as fs } from "fs";
|
|
3
|
+
import { join, dirname } from "path";
|
|
4
|
+
import { Reader } from "./Reader.js";
|
|
5
|
+
import { Writer } from "./Writer.js";
|
|
6
|
+
export class Compactor {
|
|
7
|
+
filename;
|
|
8
|
+
constructor(filename) {
|
|
9
|
+
this.filename = filename;
|
|
10
|
+
}
|
|
11
|
+
async compact(opt = {}) {
|
|
12
|
+
const keepBackup = opt.keepBackup ?? false;
|
|
13
|
+
const dir = dirname(this.filename);
|
|
14
|
+
const base = this.filename.split(/[\\/]/).pop() || "db.ydb";
|
|
15
|
+
const tmp = join(dir, base + ".tmp");
|
|
16
|
+
const bak = join(dir, base + ".bak");
|
|
17
|
+
await fs.mkdir(dir, { recursive: true }).catch(() => { });
|
|
18
|
+
await fs.rm(tmp, { force: true }).catch(() => { });
|
|
19
|
+
const reader = new Reader(this.filename);
|
|
20
|
+
// Dedupe: keep only latest version per _id
|
|
21
|
+
// NOTE: For massive DB, this Map will eat RAM.
|
|
22
|
+
// For logs use-case, you can disable dedupe (append-only) or compact by time window.
|
|
23
|
+
const latest = new Map();
|
|
24
|
+
for await (const { doc } of reader.scan({})) {
|
|
25
|
+
if (!doc || !doc._id)
|
|
26
|
+
continue;
|
|
27
|
+
if (doc._deleted) {
|
|
28
|
+
latest.delete(doc._id);
|
|
29
|
+
continue;
|
|
30
|
+
}
|
|
31
|
+
const id = String(doc._id);
|
|
32
|
+
const prev = latest.get(id);
|
|
33
|
+
if (!prev) {
|
|
34
|
+
latest.set(id, doc);
|
|
35
|
+
continue;
|
|
36
|
+
}
|
|
37
|
+
const prevTs = Number(prev.updatedAt ?? prev.createdAt ?? 0);
|
|
38
|
+
const curTs = Number(doc.updatedAt ?? doc.createdAt ?? 0);
|
|
39
|
+
if (curTs >= prevTs)
|
|
40
|
+
latest.set(id, doc);
|
|
41
|
+
}
|
|
42
|
+
// Write compacted file using NEW Writer (YDB3)
|
|
43
|
+
const writer = new Writer(tmp, {
|
|
44
|
+
batchBytes: opt.batchBytes ?? 4 * 1024 * 1024,
|
|
45
|
+
autoFlushMs: opt.autoFlushMs ?? 50,
|
|
46
|
+
durability: opt.durability ?? "batched",
|
|
47
|
+
fsyncEveryFlush: opt.fsyncEveryFlush ?? 1,
|
|
48
|
+
});
|
|
49
|
+
// Write all docs
|
|
50
|
+
for (const doc of latest.values()) {
|
|
51
|
+
writer.enqueue(JSON.stringify(doc));
|
|
52
|
+
}
|
|
53
|
+
await writer.drain();
|
|
54
|
+
await writer.close();
|
|
55
|
+
// Atomic replace
|
|
56
|
+
// Optionally keep backup
|
|
57
|
+
if (keepBackup) {
|
|
58
|
+
await fs.rm(bak, { force: true }).catch(() => { });
|
|
59
|
+
await fs.rename(this.filename, bak).catch(() => { });
|
|
60
|
+
}
|
|
61
|
+
else {
|
|
62
|
+
await fs.rm(this.filename, { force: true }).catch(() => { });
|
|
63
|
+
}
|
|
64
|
+
await fs.rename(tmp, this.filename);
|
|
65
|
+
// cleanup
|
|
66
|
+
await fs.rm(tmp, { force: true }).catch(() => { });
|
|
67
|
+
}
|
|
68
|
+
}
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import { CompressionLevel } from "../types/index.js";
|
|
2
|
+
export type Algo = 0 | 1 | 2;
|
|
3
|
+
export declare function pickAlgo(level: CompressionLevel): Algo;
|
|
4
|
+
export declare function compress(level: CompressionLevel, raw: Buffer): {
|
|
5
|
+
algo: Algo;
|
|
6
|
+
payload: Buffer;
|
|
7
|
+
flags: number;
|
|
8
|
+
};
|
|
9
|
+
export declare function decompress(algo: Algo, rawLen: number, payload: Buffer): Buffer;
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
// src/core/Compression.ts
|
|
2
|
+
import zlib from "zlib";
|
|
3
|
+
export function pickAlgo(level) {
|
|
4
|
+
if (level === "off")
|
|
5
|
+
return 0;
|
|
6
|
+
return 2; // brotli default (small file)
|
|
7
|
+
}
|
|
8
|
+
export function compress(level, raw) {
|
|
9
|
+
const algo = pickAlgo(level);
|
|
10
|
+
if (algo === 0)
|
|
11
|
+
return { algo, payload: raw, flags: 0 };
|
|
12
|
+
if (algo === 1) {
|
|
13
|
+
const gzLevel = level === "fast" ? 1 : level === "balanced" ? 6 : 9;
|
|
14
|
+
return { algo, payload: zlib.gzipSync(raw, { level: gzLevel }), flags: 1 };
|
|
15
|
+
}
|
|
16
|
+
const q = level === "fast" ? 3 : level === "balanced" ? 5 : 9;
|
|
17
|
+
return {
|
|
18
|
+
algo,
|
|
19
|
+
payload: zlib.brotliCompressSync(raw, {
|
|
20
|
+
params: {
|
|
21
|
+
[zlib.constants.BROTLI_PARAM_QUALITY]: q,
|
|
22
|
+
[zlib.constants.BROTLI_PARAM_SIZE_HINT]: raw.length,
|
|
23
|
+
},
|
|
24
|
+
}),
|
|
25
|
+
flags: 1,
|
|
26
|
+
};
|
|
27
|
+
}
|
|
28
|
+
export function decompress(algo, rawLen, payload) {
|
|
29
|
+
if (algo === 0)
|
|
30
|
+
return payload;
|
|
31
|
+
if (algo === 1)
|
|
32
|
+
return zlib.gunzipSync(payload);
|
|
33
|
+
const out = zlib.brotliDecompressSync(payload);
|
|
34
|
+
// rawLen mismatch not fatal; CRC will decide
|
|
35
|
+
if (rawLen > 0 && out.length !== rawLen) {
|
|
36
|
+
}
|
|
37
|
+
return out;
|
|
38
|
+
}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
import EventEmitter from "eventemitter3";
|
|
2
|
+
import { Doc, Query, UpdateQuery, YutiaOptions } from "../types/index.js";
|
|
3
|
+
import { Durability } from "./Writer.js";
|
|
4
|
+
export declare class Datastore extends EventEmitter {
|
|
5
|
+
private filename;
|
|
6
|
+
private writer;
|
|
7
|
+
private reader;
|
|
8
|
+
private q;
|
|
9
|
+
private byId;
|
|
10
|
+
private cache;
|
|
11
|
+
private cacheLimit;
|
|
12
|
+
private maxPendingDocs;
|
|
13
|
+
private closed;
|
|
14
|
+
constructor(opt: YutiaOptions & {
|
|
15
|
+
durability?: Durability;
|
|
16
|
+
batchBytes?: number;
|
|
17
|
+
autoFlushMs?: number;
|
|
18
|
+
fsyncEveryFlush?: number;
|
|
19
|
+
maxCacheEntries?: number;
|
|
20
|
+
maxPendingDocs?: number;
|
|
21
|
+
autoload?: boolean;
|
|
22
|
+
});
|
|
23
|
+
private touchCache;
|
|
24
|
+
load(): Promise<void>;
|
|
25
|
+
/**
|
|
26
|
+
* Realtime insert: never blocks on disk.
|
|
27
|
+
* If you need guaranteed durability NOW, call await db.flush() or set durability=immediate.
|
|
28
|
+
*/
|
|
29
|
+
insert(doc: Doc): Promise<Doc>;
|
|
30
|
+
flush(): Promise<void>;
|
|
31
|
+
drain(): Promise<void>;
|
|
32
|
+
findOne(query: Query<Doc>): Promise<Doc | null>;
|
|
33
|
+
find(query: Query<Doc>): Promise<Doc[]>;
|
|
34
|
+
update(query: Query<Doc>, update: UpdateQuery): Promise<number>;
|
|
35
|
+
remove(query: Query<Doc>): Promise<number>;
|
|
36
|
+
close(): Promise<void>;
|
|
37
|
+
}
|
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
// src/core/Datastore.ts
|
|
2
|
+
import EventEmitter from "eventemitter3";
|
|
3
|
+
import { randomUUID } from "crypto";
|
|
4
|
+
import PQueue from "p-queue";
|
|
5
|
+
import { Writer } from "./Writer.js";
|
|
6
|
+
import { Reader } from "./Reader.js";
|
|
7
|
+
export class Datastore extends EventEmitter {
|
|
8
|
+
filename;
|
|
9
|
+
writer;
|
|
10
|
+
reader;
|
|
11
|
+
// serialize only heavy ops
|
|
12
|
+
q = new PQueue({ concurrency: 1 });
|
|
13
|
+
// pointer index
|
|
14
|
+
byId = new Map();
|
|
15
|
+
// lightweight cache
|
|
16
|
+
cache = new Map();
|
|
17
|
+
cacheLimit;
|
|
18
|
+
// backpressure
|
|
19
|
+
maxPendingDocs;
|
|
20
|
+
closed = false;
|
|
21
|
+
constructor(opt) {
|
|
22
|
+
super();
|
|
23
|
+
this.filename = opt.filename;
|
|
24
|
+
this.cacheLimit = Math.max(100, Number(opt.maxCacheEntries ?? 5_000));
|
|
25
|
+
this.maxPendingDocs = Math.max(10_000, Number(opt.maxPendingDocs ?? 500_000));
|
|
26
|
+
this.writer = new Writer(this.filename, {
|
|
27
|
+
batchBytes: opt.batchBytes ?? 4 * 1024 * 1024,
|
|
28
|
+
autoFlushMs: opt.autoFlushMs ?? 50,
|
|
29
|
+
durability: opt.durability ?? "batched",
|
|
30
|
+
fsyncEveryFlush: opt.fsyncEveryFlush ?? 1,
|
|
31
|
+
});
|
|
32
|
+
this.reader = new Reader(this.filename);
|
|
33
|
+
const autoload = opt.autoload ?? true;
|
|
34
|
+
if (autoload)
|
|
35
|
+
void this.load();
|
|
36
|
+
}
|
|
37
|
+
touchCache(id, doc) {
|
|
38
|
+
if (this.cache.has(id))
|
|
39
|
+
this.cache.delete(id);
|
|
40
|
+
this.cache.set(id, doc);
|
|
41
|
+
if (this.cache.size > this.cacheLimit) {
|
|
42
|
+
const oldest = this.cache.keys().next().value;
|
|
43
|
+
if (oldest)
|
|
44
|
+
this.cache.delete(oldest);
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
async load() {
|
|
48
|
+
await this.q.add(async () => {
|
|
49
|
+
for await (const { doc, ptr } of this.reader.scan({})) {
|
|
50
|
+
if (!doc?._id)
|
|
51
|
+
continue;
|
|
52
|
+
if (doc._deleted) {
|
|
53
|
+
this.byId.delete(doc._id);
|
|
54
|
+
this.cache.delete(doc._id);
|
|
55
|
+
continue;
|
|
56
|
+
}
|
|
57
|
+
this.byId.set(doc._id, ptr);
|
|
58
|
+
}
|
|
59
|
+
});
|
|
60
|
+
}
|
|
61
|
+
/**
|
|
62
|
+
* Realtime insert: never blocks on disk.
|
|
63
|
+
* If you need guaranteed durability NOW, call await db.flush() or set durability=immediate.
|
|
64
|
+
*/
|
|
65
|
+
async insert(doc) {
|
|
66
|
+
if (this.closed)
|
|
67
|
+
throw new Error("closed");
|
|
68
|
+
if (!doc._id)
|
|
69
|
+
doc._id = randomUUID();
|
|
70
|
+
if (!doc.createdAt)
|
|
71
|
+
doc.createdAt = Date.now();
|
|
72
|
+
doc.updatedAt = doc.updatedAt ?? doc.createdAt;
|
|
73
|
+
const json = JSON.stringify(doc);
|
|
74
|
+
const { ptr } = this.writer.enqueue(json);
|
|
75
|
+
// update pointer index immediately (valid after flush, but pointer is correct)
|
|
76
|
+
this.byId.set(doc._id, ptr);
|
|
77
|
+
this.touchCache(doc._id, doc);
|
|
78
|
+
this.emit("insert", doc);
|
|
79
|
+
// backpressure: if pending too high, force flush (but not deadlock)
|
|
80
|
+
if (this.writer.getPendingCount() > this.maxPendingDocs) {
|
|
81
|
+
void this.flush().catch(() => { });
|
|
82
|
+
}
|
|
83
|
+
return doc;
|
|
84
|
+
}
|
|
85
|
+
async flush() {
|
|
86
|
+
await this.q.add(async () => {
|
|
87
|
+
await this.writer.flush();
|
|
88
|
+
});
|
|
89
|
+
}
|
|
90
|
+
async drain() {
|
|
91
|
+
await this.q.add(async () => {
|
|
92
|
+
await this.writer.drain();
|
|
93
|
+
});
|
|
94
|
+
}
|
|
95
|
+
async findOne(query) {
|
|
96
|
+
if (this.closed)
|
|
97
|
+
throw new Error("closed");
|
|
98
|
+
if (query._id) {
|
|
99
|
+
const id = String(query._id);
|
|
100
|
+
const cached = this.cache.get(id);
|
|
101
|
+
if (cached && !cached._deleted)
|
|
102
|
+
return cached;
|
|
103
|
+
const ptr = this.byId.get(id);
|
|
104
|
+
if (!ptr)
|
|
105
|
+
return null;
|
|
106
|
+
const doc = await this.reader.readByPointer(ptr);
|
|
107
|
+
if (!doc || doc._deleted)
|
|
108
|
+
return null;
|
|
109
|
+
this.touchCache(id, doc);
|
|
110
|
+
return doc;
|
|
111
|
+
}
|
|
112
|
+
// fallback scan
|
|
113
|
+
let last = null;
|
|
114
|
+
for await (const { doc } of this.reader.scan(query)) {
|
|
115
|
+
if (doc._deleted)
|
|
116
|
+
continue;
|
|
117
|
+
last = doc;
|
|
118
|
+
}
|
|
119
|
+
if (last?._id)
|
|
120
|
+
this.touchCache(last._id, last);
|
|
121
|
+
return last;
|
|
122
|
+
}
|
|
123
|
+
async find(query) {
|
|
124
|
+
if (this.closed)
|
|
125
|
+
throw new Error("closed");
|
|
126
|
+
if (query._id) {
|
|
127
|
+
const one = await this.findOne(query);
|
|
128
|
+
return one ? [one] : [];
|
|
129
|
+
}
|
|
130
|
+
const out = [];
|
|
131
|
+
for await (const { doc } of this.reader.scan(query)) {
|
|
132
|
+
if (doc._deleted)
|
|
133
|
+
continue;
|
|
134
|
+
out.push(doc);
|
|
135
|
+
}
|
|
136
|
+
return out;
|
|
137
|
+
}
|
|
138
|
+
async update(query, update) {
|
|
139
|
+
const docs = await this.find(query);
|
|
140
|
+
let c = 0;
|
|
141
|
+
for (const d of docs) {
|
|
142
|
+
const n = { ...d };
|
|
143
|
+
if (update.$set)
|
|
144
|
+
Object.assign(n, update.$set);
|
|
145
|
+
if (update.$inc) {
|
|
146
|
+
for (const k of Object.keys(update.$inc))
|
|
147
|
+
n[k] = (n[k] ?? 0) + update.$inc[k];
|
|
148
|
+
}
|
|
149
|
+
if (update.$unset) {
|
|
150
|
+
for (const k of Object.keys(update.$unset))
|
|
151
|
+
delete n[k];
|
|
152
|
+
}
|
|
153
|
+
n.updatedAt = Date.now();
|
|
154
|
+
await this.insert(n);
|
|
155
|
+
c++;
|
|
156
|
+
}
|
|
157
|
+
return c;
|
|
158
|
+
}
|
|
159
|
+
async remove(query) {
|
|
160
|
+
const docs = await this.find(query);
|
|
161
|
+
let c = 0;
|
|
162
|
+
for (const d of docs) {
|
|
163
|
+
if (!d._id)
|
|
164
|
+
continue;
|
|
165
|
+
await this.insert({
|
|
166
|
+
_id: d._id,
|
|
167
|
+
_deleted: true,
|
|
168
|
+
updatedAt: Date.now(),
|
|
169
|
+
});
|
|
170
|
+
this.byId.delete(d._id);
|
|
171
|
+
this.cache.delete(d._id);
|
|
172
|
+
c++;
|
|
173
|
+
}
|
|
174
|
+
return c;
|
|
175
|
+
}
|
|
176
|
+
async close() {
|
|
177
|
+
if (this.closed)
|
|
178
|
+
return;
|
|
179
|
+
this.closed = true;
|
|
180
|
+
await this.drain();
|
|
181
|
+
await this.writer.close();
|
|
182
|
+
this.q.clear();
|
|
183
|
+
}
|
|
184
|
+
}
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
import { RecordPointer } from "./BlockCodec.js";
|
|
2
|
+
export declare class IndexSnapshot {
|
|
3
|
+
static idxName(dataFilename: string): string;
|
|
4
|
+
static load(dataFilename: string): Promise<Map<string, RecordPointer> | null>;
|
|
5
|
+
static saveAtomic(dataFilename: string, map: Map<string, RecordPointer>): Promise<void>;
|
|
6
|
+
}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
// src/core/IndexSnapshot.ts
|
|
2
|
+
import { readFile, writeFile, rename } from "fs/promises";
|
|
3
|
+
export class IndexSnapshot {
|
|
4
|
+
static idxName(dataFilename) {
|
|
5
|
+
return dataFilename + ".idx";
|
|
6
|
+
}
|
|
7
|
+
static async load(dataFilename) {
|
|
8
|
+
const idx = this.idxName(dataFilename);
|
|
9
|
+
try {
|
|
10
|
+
const txt = await readFile(idx, "utf8");
|
|
11
|
+
const snap = JSON.parse(txt);
|
|
12
|
+
if (!snap || snap.version !== 1 || !snap.byId)
|
|
13
|
+
return null;
|
|
14
|
+
const m = new Map();
|
|
15
|
+
for (const [id, ptr] of Object.entries(snap.byId))
|
|
16
|
+
m.set(id, ptr);
|
|
17
|
+
return m;
|
|
18
|
+
}
|
|
19
|
+
catch {
|
|
20
|
+
return null;
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
static async saveAtomic(dataFilename, map) {
|
|
24
|
+
const idx = this.idxName(dataFilename);
|
|
25
|
+
const tmp = idx + ".tmp";
|
|
26
|
+
const byId = {};
|
|
27
|
+
for (const [id, ptr] of map.entries())
|
|
28
|
+
byId[id] = ptr;
|
|
29
|
+
const snap = { version: 1, createdAt: Date.now(), byId };
|
|
30
|
+
await writeFile(tmp, JSON.stringify(snap));
|
|
31
|
+
await rename(tmp, idx);
|
|
32
|
+
}
|
|
33
|
+
}
|