yutia.db 1.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,36 @@
1
+ // src/core/LRUCache.ts
2
+ export class LRUCache {
3
+ maxEntries;
4
+ map = new Map();
5
+ constructor(maxEntries) {
6
+ this.maxEntries = maxEntries;
7
+ }
8
+ get(key) {
9
+ const v = this.map.get(key);
10
+ if (v === undefined)
11
+ return undefined;
12
+ this.map.delete(key);
13
+ this.map.set(key, v);
14
+ return v;
15
+ }
16
+ set(key, value) {
17
+ if (this.map.has(key))
18
+ this.map.delete(key);
19
+ this.map.set(key, value);
20
+ while (this.map.size > this.maxEntries) {
21
+ const oldest = this.map.keys().next().value;
22
+ if (oldest === undefined)
23
+ break;
24
+ this.map.delete(oldest);
25
+ }
26
+ }
27
+ delete(key) {
28
+ this.map.delete(key);
29
+ }
30
+ has(key) {
31
+ return this.map.has(key);
32
+ }
33
+ clear() {
34
+ this.map.clear();
35
+ }
36
+ }
@@ -0,0 +1,11 @@
1
+ import { Query } from "../types/index.js";
2
+ import type { RecordPtr } from "./Writer.js";
3
+ export declare class Reader {
4
+ private filename;
5
+ constructor(filename: string);
6
+ readByPointer<T>(ptr: RecordPtr): Promise<T | null>;
7
+ scan<T>(query: Query<T>): AsyncIterable<{
8
+ doc: T;
9
+ ptr: RecordPtr;
10
+ }>;
11
+ }
@@ -0,0 +1,66 @@
1
+ // src/core/Reader.ts
2
+ import fs from "fs";
3
+ import { QueryMatcher } from "../utils/QueryMatcher.js";
4
+ const MAGIC = Buffer.from("YDB3");
5
+ export class Reader {
6
+ filename;
7
+ constructor(filename) {
8
+ this.filename = filename;
9
+ }
10
+ async readByPointer(ptr) {
11
+ const fd = fs.openSync(this.filename, "r");
12
+ try {
13
+ const buf = Buffer.allocUnsafe(ptr.length);
14
+ const got = fs.readSync(fd, buf, 0, ptr.length, ptr.offset);
15
+ if (got !== ptr.length)
16
+ return null;
17
+ return JSON.parse(buf.toString("utf8"));
18
+ }
19
+ finally {
20
+ fs.closeSync(fd);
21
+ }
22
+ }
23
+ async *scan(query) {
24
+ const fd = fs.openSync(this.filename, "r");
25
+ try {
26
+ const header = Buffer.allocUnsafe(5);
27
+ const hGot = fs.readSync(fd, header, 0, 5, 0);
28
+ if (hGot < 5)
29
+ return;
30
+ if (!header.subarray(0, 4).equals(MAGIC))
31
+ return;
32
+ let pos = 5;
33
+ const st = fs.fstatSync(fd);
34
+ const end = st.size;
35
+ const lenBuf = Buffer.allocUnsafe(4);
36
+ while (pos + 4 <= end) {
37
+ const gotLen = fs.readSync(fd, lenBuf, 0, 4, pos);
38
+ if (gotLen !== 4)
39
+ break;
40
+ const len = lenBuf.readUInt32LE(0);
41
+ const jsonOffset = pos + 4;
42
+ const next = jsonOffset + len;
43
+ // recovery-safe: if record cut off, stop scanning
44
+ if (next > end)
45
+ break;
46
+ const payload = Buffer.allocUnsafe(len);
47
+ const got = fs.readSync(fd, payload, 0, len, jsonOffset);
48
+ if (got !== len)
49
+ break;
50
+ try {
51
+ const doc = JSON.parse(payload.toString("utf8"));
52
+ if (QueryMatcher.match(doc, query)) {
53
+ yield { doc, ptr: { offset: jsonOffset, length: len } };
54
+ }
55
+ }
56
+ catch {
57
+ // skip corrupt record but continue
58
+ }
59
+ pos = next;
60
+ }
61
+ }
62
+ finally {
63
+ fs.closeSync(fd);
64
+ }
65
+ }
66
+ }
@@ -0,0 +1,6 @@
1
+ export declare class Recovery {
2
+ static recoverFile(filename: string): Promise<{
3
+ truncated: boolean;
4
+ validBytes: number;
5
+ }>;
6
+ }
@@ -0,0 +1,66 @@
1
+ // src/core/Recovery.ts
2
+ import { open } from "fs/promises";
3
+ import { BlockCodec } from "./BlockCodec.js";
4
+ import { decompress } from "./Compression.js";
5
+ export class Recovery {
6
+ static async recoverFile(filename) {
7
+ try {
8
+ const fd = await open(filename, "r+");
9
+ try {
10
+ const stat = await fd.stat();
11
+ const size = stat.size;
12
+ if (size === 0)
13
+ return { truncated: false, validBytes: 0 };
14
+ const CHUNK = 4 * 1024 * 1024;
15
+ let fileOffset = 0;
16
+ let carry = Buffer.alloc(0);
17
+ let lastGoodAbs = 0;
18
+ while (fileOffset < size) {
19
+ const toRead = Math.min(CHUNK, size - fileOffset);
20
+ const buf = Buffer.alloc(toRead);
21
+ const { bytesRead } = await fd.read(buf, 0, toRead, fileOffset);
22
+ if (bytesRead <= 0)
23
+ break;
24
+ let buffer = Buffer.concat([carry, buf.subarray(0, bytesRead)]);
25
+ let off = 0;
26
+ while (true) {
27
+ const d = BlockCodec.tryDecodeHeader(buffer, off);
28
+ if (!d)
29
+ break;
30
+ if (!d.ok) {
31
+ off = d.nextOffset;
32
+ continue;
33
+ }
34
+ const payload = buffer.subarray(d.payloadStart, d.payloadEnd);
35
+ let raw;
36
+ try {
37
+ raw = decompress(d.header.algo, d.header.rawLen, payload);
38
+ }
39
+ catch {
40
+ break;
41
+ }
42
+ if (!BlockCodec.verifyRaw(raw, d.header.crc))
43
+ break;
44
+ lastGoodAbs = fileOffset - carry.length + d.payloadEnd;
45
+ off = d.payloadEnd;
46
+ }
47
+ carry = buffer.subarray(off);
48
+ fileOffset += bytesRead;
49
+ }
50
+ if (lastGoodAbs < size) {
51
+ await fd.truncate(lastGoodAbs);
52
+ return { truncated: true, validBytes: lastGoodAbs };
53
+ }
54
+ return { truncated: false, validBytes: size };
55
+ }
56
+ finally {
57
+ await fd.close();
58
+ }
59
+ }
60
+ catch (err) {
61
+ if (err?.code === "ENOENT")
62
+ return { truncated: false, validBytes: 0 };
63
+ throw err;
64
+ }
65
+ }
66
+ }
@@ -0,0 +1,10 @@
1
+ export declare class SecondaryIndex {
2
+ private fields;
3
+ private idx;
4
+ ensure(field: string): void;
5
+ addDoc(doc: any): void;
6
+ removeIdEverywhere(id: string): void;
7
+ getIds(field: string, value: any): Set<string> | null;
8
+ listFields(): string[];
9
+ clear(): void;
10
+ }
@@ -0,0 +1,45 @@
1
+ // src/core/SecondaryIndex.ts
2
+ export class SecondaryIndex {
3
+ fields = new Set();
4
+ idx = new Map();
5
+ ensure(field) {
6
+ this.fields.add(field);
7
+ if (!this.idx.has(field))
8
+ this.idx.set(field, new Map());
9
+ }
10
+ addDoc(doc) {
11
+ const id = String(doc?._id ?? "");
12
+ if (!id)
13
+ return;
14
+ for (const f of this.fields) {
15
+ const v0 = doc[f];
16
+ if (v0 === undefined || v0 === null)
17
+ continue;
18
+ const v = String(v0);
19
+ const m = this.idx.get(f);
20
+ if (!m.has(v))
21
+ m.set(v, new Set());
22
+ m.get(v).add(id);
23
+ }
24
+ }
25
+ removeIdEverywhere(id) {
26
+ for (const [, m] of this.idx) {
27
+ for (const [, s] of m)
28
+ s.delete(id);
29
+ }
30
+ }
31
+ getIds(field, value) {
32
+ const m = this.idx.get(field);
33
+ if (!m)
34
+ return null;
35
+ const v = value === undefined || value === null ? "" : String(value);
36
+ return m.get(v) ?? null;
37
+ }
38
+ listFields() {
39
+ return [...this.fields];
40
+ }
41
+ clear() {
42
+ this.idx.clear();
43
+ this.fields.clear();
44
+ }
45
+ }
@@ -0,0 +1,45 @@
1
+ export type Durability = "none" | "batched" | "immediate";
2
+ export type RecordPtr = {
3
+ offset: number;
4
+ length: number;
5
+ };
6
+ type WriterOptions = {
7
+ batchBytes?: number;
8
+ autoFlushMs?: number;
9
+ durability?: Durability;
10
+ fsyncEveryFlush?: number;
11
+ };
12
+ export declare class Writer {
13
+ private filename;
14
+ private fd;
15
+ private offset;
16
+ private queue;
17
+ private queuedBytes;
18
+ private batchBytes;
19
+ private autoFlushMs;
20
+ private durability;
21
+ private fsyncEveryFlush;
22
+ private flushTimer;
23
+ private flushing;
24
+ private closed;
25
+ private flushCount;
26
+ totalEnqueued: number;
27
+ totalFlushedRecords: number;
28
+ constructor(filename: string, opt?: WriterOptions);
29
+ open(): Promise<void>;
30
+ /**
31
+ * Super-fast enqueue. DOES NOT BLOCK on disk.
32
+ * Returns pointer info that will be valid after flush.
33
+ * (If you need pointer immediately, call drain/flush)
34
+ */
35
+ enqueue(json: string): {
36
+ ptr: RecordPtr;
37
+ recordBytes: number;
38
+ };
39
+ getPendingCount(): number;
40
+ getPendingBytes(): number;
41
+ flush(): Promise<void>;
42
+ drain(): Promise<void>;
43
+ close(): Promise<void>;
44
+ }
45
+ export {};
@@ -0,0 +1,151 @@
1
+ // src/core/Writer.ts
2
+ import { promises as fsp } from "fs";
3
+ import fs from "fs";
4
+ const MAGIC = Buffer.from("YDB3"); // 4 bytes
5
+ const VERSION = 1;
6
+ function writeFsync(fd) {
7
+ return new Promise((resolve, reject) => {
8
+ fs.fsync(fd, (err) => (err ? reject(err) : resolve()));
9
+ });
10
+ }
11
+ export class Writer {
12
+ filename;
13
+ fd = null;
14
+ offset = 0;
15
+ queue = [];
16
+ queuedBytes = 0;
17
+ batchBytes;
18
+ autoFlushMs;
19
+ durability;
20
+ fsyncEveryFlush;
21
+ flushTimer = null;
22
+ flushing = false;
23
+ closed = false;
24
+ flushCount = 0;
25
+ // stats
26
+ totalEnqueued = 0;
27
+ totalFlushedRecords = 0;
28
+ constructor(filename, opt = {}) {
29
+ this.filename = filename;
30
+ this.batchBytes = opt.batchBytes ?? 4 * 1024 * 1024; // 4MB default
31
+ this.autoFlushMs = opt.autoFlushMs ?? 50;
32
+ this.durability = opt.durability ?? "batched";
33
+ this.fsyncEveryFlush = opt.fsyncEveryFlush ?? 1;
34
+ this.flushTimer = setInterval(() => {
35
+ void this.flush().catch(() => { });
36
+ }, this.autoFlushMs);
37
+ }
38
+ async open() {
39
+ if (this.fd !== null)
40
+ return;
41
+ await fsp
42
+ .mkdir(require("path").dirname(this.filename), { recursive: true })
43
+ .catch(() => { });
44
+ const exists = await fsp
45
+ .stat(this.filename)
46
+ .then(() => true)
47
+ .catch(() => false);
48
+ this.fd = fs.openSync(this.filename, "a+");
49
+ if (!exists) {
50
+ // write header
51
+ const header = Buffer.alloc(5);
52
+ MAGIC.copy(header, 0);
53
+ header.writeUInt8(VERSION, 4);
54
+ fs.writeSync(this.fd, header);
55
+ this.offset = header.length;
56
+ }
57
+ else {
58
+ const st = fs.fstatSync(this.fd);
59
+ this.offset = st.size;
60
+ // if file too small / invalid, you can handle here, but we keep it simple
61
+ }
62
+ }
63
+ /**
64
+ * Super-fast enqueue. DOES NOT BLOCK on disk.
65
+ * Returns pointer info that will be valid after flush.
66
+ * (If you need pointer immediately, call drain/flush)
67
+ */
68
+ enqueue(json) {
69
+ if (this.closed)
70
+ throw new Error("Writer closed");
71
+ const payload = Buffer.from(json, "utf8");
72
+ const len = payload.length;
73
+ const frame = Buffer.allocUnsafe(4 + len);
74
+ frame.writeUInt32LE(len, 0);
75
+ payload.copy(frame, 4);
76
+ // ptr offset points to JSON bytes (not including length prefix)
77
+ const ptr = {
78
+ offset: this.offset + this.queuedBytes + 4,
79
+ length: len,
80
+ };
81
+ this.queue.push(frame);
82
+ this.queuedBytes += frame.length;
83
+ this.totalEnqueued++;
84
+ // trigger flush without awaiting
85
+ if (this.queuedBytes >= this.batchBytes) {
86
+ void this.flush().catch(() => { });
87
+ }
88
+ return { ptr, recordBytes: frame.length };
89
+ }
90
+ getPendingCount() {
91
+ // pending records count
92
+ return this.queue.length;
93
+ }
94
+ getPendingBytes() {
95
+ return this.queuedBytes;
96
+ }
97
+ async flush() {
98
+ if (this.closed)
99
+ return;
100
+ if (this.flushing)
101
+ return;
102
+ if (this.queue.length === 0)
103
+ return;
104
+ this.flushing = true;
105
+ try {
106
+ await this.open();
107
+ const fd = this.fd;
108
+ const batch = this.queue;
109
+ this.queue = [];
110
+ const bytes = this.queuedBytes;
111
+ this.queuedBytes = 0;
112
+ // concat once per flush (big, but far less GC than per-doc concat)
113
+ const buf = Buffer.concat(batch, bytes);
114
+ // write at end (append)
115
+ fs.writeSync(fd, buf, 0, buf.length, this.offset);
116
+ this.offset += buf.length;
117
+ this.totalFlushedRecords += batch.length;
118
+ this.flushCount++;
119
+ if (this.durability === "immediate") {
120
+ await writeFsync(fd);
121
+ }
122
+ else if (this.durability === "batched") {
123
+ if (this.flushCount % this.fsyncEveryFlush === 0) {
124
+ await writeFsync(fd);
125
+ }
126
+ }
127
+ }
128
+ finally {
129
+ this.flushing = false;
130
+ }
131
+ }
132
+ async drain() {
133
+ // ensure all queued data is on disk
134
+ while (this.queue.length > 0 || this.flushing) {
135
+ await this.flush();
136
+ await new Promise((r) => setTimeout(r, 5));
137
+ }
138
+ }
139
+ async close() {
140
+ if (this.closed)
141
+ return;
142
+ this.closed = true;
143
+ if (this.flushTimer)
144
+ clearInterval(this.flushTimer);
145
+ await this.drain();
146
+ if (this.fd !== null) {
147
+ fs.closeSync(this.fd);
148
+ this.fd = null;
149
+ }
150
+ }
151
+ }
@@ -0,0 +1,3 @@
1
+ export { Datastore } from "./core/Datastore.js";
2
+ export { Compactor } from "./core/Compactor.js";
3
+ export * from "./types/index.js";
package/dist/index.js ADDED
@@ -0,0 +1,3 @@
1
+ export { Datastore } from "./core/Datastore.js";
2
+ export { Compactor } from "./core/Compactor.js";
3
+ export * from "./types/index.js";
@@ -0,0 +1,39 @@
1
+ export type CompressionLevel = "off" | "fast" | "balanced" | "max";
2
+ export interface YutiaOptions {
3
+ filename: string;
4
+ autoload?: boolean;
5
+ durability?: "immediate" | "batched";
6
+ compressionLevel?: CompressionLevel;
7
+ blockTargetBytes?: number;
8
+ maxCacheEntries?: number;
9
+ autoFlushMs?: number;
10
+ writeBatchSize?: number;
11
+ corruptAction?: "error" | "ignore" | "log";
12
+ }
13
+ export interface Doc {
14
+ _id?: string;
15
+ createdAt?: number;
16
+ updatedAt?: number;
17
+ _deleted?: boolean;
18
+ [key: string]: any;
19
+ }
20
+ export type Query<T> = Partial<T> & {
21
+ $gt?: any;
22
+ $lt?: any;
23
+ $gte?: any;
24
+ $lte?: any;
25
+ $ne?: any;
26
+ $in?: any[];
27
+ $nin?: any[];
28
+ $regex?: RegExp;
29
+ $exists?: boolean;
30
+ };
31
+ export interface UpdateQuery {
32
+ $set?: Partial<any>;
33
+ $inc?: {
34
+ [key: string]: number;
35
+ };
36
+ $unset?: {
37
+ [key: string]: 1;
38
+ };
39
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,4 @@
1
+ export declare class Checksum {
2
+ static calculate(data: string): string;
3
+ static verify(data: string, hash: string): boolean;
4
+ }
@@ -0,0 +1,10 @@
1
+ // src/utils/Checksum.ts
2
+ import { createHash } from "crypto";
3
+ export class Checksum {
4
+ static calculate(data) {
5
+ return createHash("sha256").update(data).digest("hex");
6
+ }
7
+ static verify(data, hash) {
8
+ return this.calculate(data) === hash;
9
+ }
10
+ }
@@ -0,0 +1,6 @@
1
+ export declare class CryptoManager {
2
+ private key;
3
+ constructor(secret: string);
4
+ encrypt(text: string): string;
5
+ decrypt(encryptedText: string): string;
6
+ }
@@ -0,0 +1,26 @@
1
+ // src/utils/Crypto.ts
2
+ import { randomBytes, createCipheriv, createDecipheriv } from "crypto";
3
+ // Simplified placeholder for Encryption logic
4
+ export class CryptoManager {
5
+ key;
6
+ constructor(secret) {
7
+ // In production, derive key properly (e.g., PBKDF2)
8
+ this.key = Buffer.from(secret.padEnd(32, "0").slice(0, 32));
9
+ }
10
+ encrypt(text) {
11
+ const iv = randomBytes(16);
12
+ const cipher = createCipheriv("aes-256-cbc", this.key, iv);
13
+ let encrypted = cipher.update(text, "utf8", "hex");
14
+ encrypted += cipher.final("hex");
15
+ return `${iv.toString("hex")}:${encrypted}`;
16
+ }
17
+ decrypt(encryptedText) {
18
+ const parts = encryptedText.split(":");
19
+ const iv = Buffer.from(parts[0], "hex");
20
+ const encrypted = parts[1];
21
+ const decipher = createDecipheriv("aes-256-cbc", this.key, iv);
22
+ let decrypted = decipher.update(encrypted, "hex", "utf8");
23
+ decrypted += decipher.final("utf8");
24
+ return decrypted;
25
+ }
26
+ }
@@ -0,0 +1,5 @@
1
+ import { Query } from "../types/index.js";
2
+ export declare class QueryMatcher {
3
+ static match<T extends any>(doc: T, query: Query<T>): boolean;
4
+ private static matchField;
5
+ }
@@ -0,0 +1,55 @@
1
+ const BAD_KEYS = new Set(["__proto__", "prototype", "constructor"]);
2
+ const isBadKey = (k) => BAD_KEYS.has(k);
3
+ export class QueryMatcher {
4
+ static match(doc, query) {
5
+ for (const key in query) {
6
+ if (isBadKey(key))
7
+ return false;
8
+ const qv = query[key];
9
+ const dv = doc?.[key];
10
+ if (!this.matchField(dv, qv))
11
+ return false;
12
+ }
13
+ return true;
14
+ }
15
+ static matchField(docVal, queryVal) {
16
+ if (queryVal instanceof RegExp) {
17
+ return typeof docVal === "string" && queryVal.test(docVal);
18
+ }
19
+ if (typeof queryVal === "object" &&
20
+ queryVal !== null &&
21
+ !Array.isArray(queryVal)) {
22
+ if ("$ne" in queryVal && docVal === queryVal.$ne)
23
+ return false;
24
+ if ("$gt" in queryVal && !(docVal > queryVal.$gt))
25
+ return false;
26
+ if ("$lt" in queryVal && !(docVal < queryVal.$lt))
27
+ return false;
28
+ if ("$gte" in queryVal && !(docVal >= queryVal.$gte))
29
+ return false;
30
+ if ("$lte" in queryVal && !(docVal <= queryVal.$lte))
31
+ return false;
32
+ if ("$in" in queryVal &&
33
+ Array.isArray(queryVal.$in) &&
34
+ !queryVal.$in.includes(docVal))
35
+ return false;
36
+ if ("$nin" in queryVal &&
37
+ Array.isArray(queryVal.$nin) &&
38
+ queryVal.$nin.includes(docVal))
39
+ return false;
40
+ if ("$exists" in queryVal) {
41
+ const exists = docVal !== undefined && docVal !== null;
42
+ if (exists !== Boolean(queryVal.$exists))
43
+ return false;
44
+ }
45
+ if ("$regex" in queryVal) {
46
+ const rx = queryVal.$regex instanceof RegExp ? queryVal.$regex : null;
47
+ if (!rx)
48
+ return false;
49
+ return typeof docVal === "string" && rx.test(docVal);
50
+ }
51
+ return true;
52
+ }
53
+ return docVal === queryVal;
54
+ }
55
+ }
package/package.json ADDED
@@ -0,0 +1,63 @@
1
+ {
2
+ "name": "yutia.db",
3
+ "version": "1.3.2",
4
+ "description": "Ultra-lightweight embedded database for Realtime Logs and AI Datasets.",
5
+ "author": "Clayza Aubert <https://clayza.is-a.dev> (https://github.com/clayzaaubert)",
6
+ "license": "MIT",
7
+ "type": "module",
8
+ "main": "./dist/index.js",
9
+ "types": "./dist/index.d.ts",
10
+ "exports": {
11
+ ".": {
12
+ "types": "./dist/index.d.ts",
13
+ "import": "./dist/index.js"
14
+ }
15
+ },
16
+ "files": [
17
+ "dist",
18
+ "README.md",
19
+ "LICENSE",
20
+ "package.json"
21
+ ],
22
+ "scripts": {
23
+ "build": "tsc",
24
+ "lint": "eslint src",
25
+ "prepublishOnly": "npm run build",
26
+ "stress": "node --enable-source-maps test/stress/stress.ts",
27
+ "stress-bun": "bun test/stress/stress.ts"
28
+ },
29
+ "engines": {
30
+ "node": ">=20.0.0"
31
+ },
32
+ "repository": {
33
+ "type": "git",
34
+ "url": "https://github.com/clayzaaubert/yutia-db"
35
+ },
36
+ "homepage": "https://github.com/clayzaaubert/yutia-db#readme",
37
+ "bugs": {
38
+ "url": "https://github.com/clayzaaubert/yutia-db/issues"
39
+ },
40
+ "keywords": [
41
+ "embedded-database",
42
+ "lightweight-db",
43
+ "realtime-logs",
44
+ "ai-datasets",
45
+ "nedb-alternative",
46
+ "json-database",
47
+ "local-database",
48
+ "typescript",
49
+ "yutia-db",
50
+ "nodejs"
51
+ ],
52
+ "dependencies": {
53
+ "eventemitter3": "^5.0.1",
54
+ "p-queue": "^8.0.0"
55
+ },
56
+ "devDependencies": {
57
+ "@types/node": "^25.0.9",
58
+ "@types/p-queue": "^3.2.1",
59
+ "jest": "^29.7.0",
60
+ "ts-jest": "^29.1.1",
61
+ "typescript": "^5.3.0"
62
+ }
63
+ }