lba 3.0.0 → 3.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,40 @@
1
+ name: Publish to NPM
2
+
3
+ on:
4
+ push:
5
+ tags:
6
+ - "v*"
7
+
8
+ jobs:
9
+ publish:
10
+ runs-on: ubuntu-latest
11
+ permissions:
12
+ id-token: write
13
+ contents: read
14
+
15
+ steps:
16
+ - name: Checkout repository
17
+ uses: actions/checkout@v4
18
+
19
+ - name: Install pnpm
20
+ uses: pnpm/action-setup@v3
21
+ with:
22
+ version: 8
23
+
24
+ - name: Setup Node.js
25
+ uses: actions/setup-node@v4
26
+ with:
27
+ node-version: "20"
28
+ registry-url: "https://registry.npmjs.org"
29
+ # cache: "pnpm"
30
+
31
+ - name: Install dependencies
32
+ run: pnpm install
33
+
34
+ - name: Run Tests
35
+ run: node test/test.js
36
+
37
+ - name: Publish to NPM
38
+ run: npm publish --provenance --access public
39
+ env:
40
+ NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
package/README.md CHANGED
@@ -1,89 +1,89 @@
1
- # 🚀 LBA (Lightweight Binary Archive)
2
-
3
- LBA is an ultra-lightweight, high-performance, file-based key-value store for Node.js. It bridges the gap between the blazing speed of **Redis** and the querying flexibility of **MongoDB**, optimized specifically for modern multi-core environments.
4
-
5
- [![npm version](https://img.shields.io/npm/v/lba.svg)](https://www.npmjs.com/package/lba)
6
- [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
7
-
8
- ## ✨ Key Features
9
-
10
- - **⚡ Hybrid Architecture**: Combines simple Key-Value storage with powerful MongoDB-style NoSQL queries (`$gt`, `$in`, `$exists`, etc.).
11
- - **🧩 Smart Sharding**: Automatically partitions data into multiple shards to eliminate I/O bottlenecks and improve concurrency.
12
- - **⚙️ Auto-Vacuuming**: Background maintenance that automatically defragments storage files after deletions or updates.
13
- - **🚀 Multi-Core Optimized**: Automatically detects your CPU core count to scale worker thread pools for maximum throughput.
14
- - **📦 Built-in Compression**: Transparent `zlib` compression to save disk space without sacrificing usability.
15
- - **🛡️ Atomic Integrity**: Uses CRC32 checksums and atomic write mechanisms to ensure data remains uncorrupted.
16
-
17
- ## 📦 Installation
18
-
19
- ```bash
20
- pnpm add lba
21
- # or
22
- npm install lba
23
- ```
24
-
25
- ## 🚀 Quick Start
26
-
27
- **Basic Usage (Redis Style)**
28
-
29
- ```js
30
- const { LBA } = require("lba");
31
-
32
- // Initialize with auto-worker scaling
33
- const db = new LBA("./storage", {
34
- workerCount: "auto", // Automatically scales to your CPU (e.g., 15 workers for 20 cores)
35
- shardCount: 32,
36
- });
37
-
38
- async function main() {
39
- // Set data
40
- await db.set("user:123", {
41
- name: "Gemini",
42
- level: 99,
43
- tags: ["ai", "developer"],
44
- });
45
-
46
- // Get data
47
- const user = await db.get("user:123");
48
- console.log(user);
49
- }
50
- main();
51
- ```
52
-
53
- **NoSQL Querying (MongoDB Style)**
54
-
55
- ```js
56
- // Search with operators ($gte, $in, $eq, etc.)
57
- const proUsers = await db.find({
58
- level: { $gte: 50 },
59
- tags: { $in: ["ai"] },
60
- });
61
-
62
- // Bulk updates based on criteria
63
- await db.updateMany({ level: { $lt: 10 } }, { status: "newbie" });
64
- ```
65
-
66
- ## ⚙️ Configuration Options
67
-
68
- | Option | Type | Default | Description |
69
- | ---------------- | ------- | --------------------------------------------------------------- | ----------------------------------------------------------- |
70
- | shardCount | number | 32 | Number of shards to partition the data. |
71
- | workerCount | number | 'auto','auto',Number of worker threads for parallel processing. |
72
- | autoVacuum | boolean | true | Enables background storage optimization. |
73
- | vacuumThreshold | number | 500 | Number of writes/deletes before triggering a vacuum. |
74
- | syncOnWrite | boolean | true | Forces physical disk sync on every write (Safety vs Speed). |
75
- | compressionLevel | number | 6 | zlib compression level (0-9). |
76
-
77
- ## 📊 Performance Benchmark
78
-
79
- Tested on a **20-Core / 15-Worker** environment:
80
-
81
- **Read Latency:** ~0.002ms (via Indexing & LRU Caching)
82
-
83
- **Write Throughput:** ~330+ ops/s (Sync Mode)
84
-
85
- > Tip: Set syncOnWrite: false to achieve significantly higher write speeds using OS-level buffering.
86
-
87
- ## 📄 License
88
-
89
- MIT License.
1
+ # 🚀 LBA (Lightweight Binary Archive)
2
+
3
+ LBA is an ultra-lightweight, high-performance, file-based key-value store for Node.js. It bridges the gap between the blazing speed of **Redis** and the querying flexibility of **MongoDB**, optimized specifically for modern multi-core environments.
4
+
5
+ [![npm version](https://img.shields.io/npm/v/lba.svg)](https://www.npmjs.com/package/lba)
6
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
7
+
8
+ ## ✨ Key Features
9
+
10
+ - **⚡ Hybrid Architecture**: Combines simple Key-Value storage with powerful MongoDB-style NoSQL queries (`$gt`, `$in`, `$exists`, etc.).
11
+ - **🧩 Smart Sharding**: Automatically partitions data into multiple shards to eliminate I/O bottlenecks and improve concurrency.
12
+ - **⚙️ Auto-Vacuuming**: Background maintenance that automatically defragments storage files after deletions or updates.
13
+ - **🚀 Multi-Core Optimized**: Automatically detects your CPU core count to scale worker thread pools for maximum throughput.
14
+ - **📦 Built-in Compression**: Transparent `zlib` compression to save disk space without sacrificing usability.
15
+ - **🛡️ Atomic Integrity**: Uses CRC32 checksums and atomic write mechanisms to ensure data remains uncorrupted.
16
+
17
+ ## 📦 Installation
18
+
19
+ ```bash
20
+ pnpm add lba
21
+ # or
22
+ npm install lba
23
+ ```
24
+
25
+ ## 🚀 Quick Start
26
+
27
+ **Basic Usage (Redis Style)**
28
+
29
+ ```js
30
+ const { LBA } = require("lba");
31
+
32
+ // Initialize with auto-worker scaling
33
+ const db = new LBA("./storage", {
34
+ workerCount: "auto", // Automatically scales to your CPU (e.g., 15 workers for 20 cores)
35
+ shardCount: 32,
36
+ });
37
+
38
+ async function main() {
39
+ // Set data
40
+ await db.set("user:123", {
41
+ name: "Gemini",
42
+ level: 99,
43
+ tags: ["ai", "developer"],
44
+ });
45
+
46
+ // Get data
47
+ const user = await db.get("user:123");
48
+ console.log(user);
49
+ }
50
+ main();
51
+ ```
52
+
53
+ **NoSQL Querying (MongoDB Style)**
54
+
55
+ ```js
56
+ // Search with operators ($gte, $in, $eq, etc.)
57
+ const proUsers = await db.find({
58
+ level: { $gte: 50 },
59
+ tags: { $in: ["ai"] },
60
+ });
61
+
62
+ // Bulk updates based on criteria
63
+ await db.updateMany({ level: { $lt: 10 } }, { status: "newbie" });
64
+ ```
65
+
66
+ ## ⚙️ Configuration Options
67
+
68
+ | Option | Type | Default | Description |
69
+ | ---------------- | ------- | --------------------------------------------------------------- | ----------------------------------------------------------- |
70
+ | shardCount | number | 32 | Number of shards to partition the data. |
71
+ | workerCount | number | 'auto','auto',Number of worker threads for parallel processing. |
72
+ | autoVacuum | boolean | true | Enables background storage optimization. |
73
+ | vacuumThreshold | number | 500 | Number of writes/deletes before triggering a vacuum. |
74
+ | syncOnWrite | boolean | true | Forces physical disk sync on every write (Safety vs Speed). |
75
+ | compressionLevel | number | 6 | zlib compression level (0-9). |
76
+
77
+ ## 📊 Performance Benchmark
78
+
79
+ Tested on a **20-Core / 15-Worker** environment:
80
+
81
+ **Read Latency:** ~0.002ms (via Indexing & LRU Caching)
82
+
83
+ **Write Throughput:** ~330+ ops/s (Sync Mode)
84
+
85
+ > Tip: Set syncOnWrite: false to achieve significantly higher write speeds using OS-level buffering.
86
+
87
+ ## 📄 License
88
+
89
+ MIT License.
package/package.json CHANGED
@@ -1,9 +1,14 @@
1
1
  {
2
2
  "name": "lba",
3
- "version": "3.0.0",
3
+ "version": "3.2.2",
4
4
  "description": "Lightweight, high-performance, file-based key-value store with NoSQL query support.",
5
5
  "main": "src/index.js",
6
6
  "types": "src/types",
7
+ "repository": {
8
+ "type": "git",
9
+ "url": "git+https://github.com/yeyok/lba"
10
+ },
11
+ "homepage": "https://github.com/yeyok/lba#readme",
7
12
  "scripts": {
8
13
  "test": "node test/test.js",
9
14
  "bench": "node bench/bench.js"
package/src/index.js CHANGED
@@ -1,257 +1,257 @@
1
- const fs = require("fs");
2
- const path = require("path");
3
- const os = require("os");
4
- const {
5
- deflateRawAsync,
6
- calculateCRC32,
7
- getShard,
8
- safeInflate,
9
- } = require("./utils");
10
- const { matches } = require("./query-engine");
11
-
12
- class LBA {
13
- constructor(dbDir = "lba_storage", options = {}) {
14
- this.dbDir = path.resolve(dbDir);
15
- this.shardCount = options.shardCount || 32;
16
- this.cacheLimit = options.cacheLimit || 10000;
17
- this.syncOnWrite = options.syncOnWrite !== false;
18
- this.compressionLevel = options.compressionLevel || 6;
19
- this.maxDecompressedSize = options.maxDecompressedSize || 100 * 1024 * 1024;
20
-
21
- this.autoVacuum = options.autoVacuum !== false;
22
- this.vacuumThreshold = options.vacuumThreshold || 500;
23
-
24
- const cpuCores = os.cpus().length;
25
- if (options.workerCount === "auto" || !options.workerCount) {
26
- this.workerLimit = Math.max(1, Math.floor(cpuCores * 0.75));
27
- } else {
28
- this.workerLimit = options.workerCount;
29
- }
30
-
31
- this.dirtyCounts = new Array(this.shardCount).fill(0);
32
- this.isVacuuming = new Array(this.shardCount).fill(false);
33
- this.indices = Array.from({ length: this.shardCount }, () => new Map());
34
- this.cache = new Map();
35
- this.queues = Array.from({ length: this.shardCount }, () =>
36
- Promise.resolve(),
37
- );
38
- this.fileHandles = new Array(this.shardCount).fill(null);
39
- this.isLoaded = new Array(this.shardCount).fill(false);
40
-
41
- console.log(
42
- `[LBA] 가동 시작 (CPU 코어: ${cpuCores}, 사용 워커: ${this.workerLimit})`,
43
- );
44
- this._ensureDbDir();
45
- }
46
-
47
- _ensureDbDir() {
48
- if (!fs.existsSync(this.dbDir))
49
- fs.mkdirSync(this.dbDir, { recursive: true });
50
- }
51
-
52
- async _ensureShardLoaded(sIdx) {
53
- if (this.isLoaded[sIdx]) return;
54
- const fPath = path.join(this.dbDir, `shard_${sIdx}.lba`);
55
- const handle = await fs.promises.open(fPath, "a+");
56
- this.fileHandles[sIdx] = handle;
57
-
58
- const { size } = await handle.stat();
59
- let offset = 0;
60
- const head = Buffer.allocUnsafe(11);
61
-
62
- while (offset + 11 <= size) {
63
- await handle.read(head, 0, 11, offset);
64
- if (head[0] !== 0x4c || head[1] !== 0x42) {
65
- offset++;
66
- continue;
67
- }
68
- const vLen = head.readUInt32BE(6);
69
- const kLen = head[10];
70
- const recordSize = 11 + kLen + vLen;
71
- if (offset + recordSize > size) break;
72
-
73
- const kBuf = Buffer.allocUnsafe(kLen);
74
- await handle.read(kBuf, 0, kLen, offset + 11);
75
- const key = kBuf.toString();
76
-
77
- if (vLen > 0) {
78
- this.indices[sIdx].set(key, {
79
- offset: offset + 11 + kLen,
80
- length: vLen,
81
- crc: head.readUInt32BE(2),
82
- kLen,
83
- });
84
- } else {
85
- this.indices[sIdx].delete(key);
86
- }
87
- offset += recordSize;
88
- }
89
- this.isLoaded[sIdx] = true;
90
- }
91
-
92
- async get(key) {
93
- const sIdx = getShard(key, this.shardCount);
94
- return this._enqueue(sIdx, async () => {
95
- const kStr = String(key);
96
- if (this.cache.has(kStr)) return structuredClone(this.cache.get(kStr));
97
- const meta = this.indices[sIdx].get(kStr);
98
- if (!meta) return null;
99
-
100
- const vBuf = Buffer.allocUnsafe(meta.length);
101
- await this.fileHandles[sIdx].read(vBuf, 0, meta.length, meta.offset);
102
-
103
- const decompressed = await safeInflate(vBuf);
104
- const data = JSON.parse(decompressed.toString());
105
- this._addToCache(kStr, data);
106
- return data;
107
- });
108
- }
109
-
110
- async set(key, value) {
111
- const sIdx = getShard(key, this.shardCount);
112
- return this._enqueue(sIdx, async () => {
113
- const kStr = String(key);
114
- const kBuf = Buffer.from(kStr);
115
- let vBuf = null,
116
- vLen = 0;
117
-
118
- if (value !== null && value !== undefined) {
119
- vBuf = await deflateRawAsync(JSON.stringify(value), {
120
- level: this.compressionLevel,
121
- });
122
- vLen = vBuf.length;
123
- }
124
-
125
- const metaBuf = Buffer.allocUnsafe(5);
126
- metaBuf.writeUInt32BE(vLen, 0);
127
- metaBuf[4] = kBuf.length;
128
- const checksum = calculateCRC32([metaBuf, kBuf, vBuf]);
129
-
130
- const head = Buffer.allocUnsafe(11);
131
- head[0] = 0x4c;
132
- head[1] = 0x42;
133
- head.writeUInt32BE(checksum, 2);
134
- head.writeUInt32BE(vLen, 6);
135
- head[10] = kBuf.length;
136
-
137
- const { size: pos } = await this.fileHandles[sIdx].stat();
138
- await this.fileHandles[sIdx].write(
139
- vBuf ? Buffer.concat([head, kBuf, vBuf]) : Buffer.concat([head, kBuf]),
140
- 0,
141
- 11 + kBuf.length + vLen,
142
- null,
143
- );
144
- if (this.syncOnWrite) await this.fileHandles[sIdx].datasync();
145
-
146
- if (vLen > 0) {
147
- this.indices[sIdx].set(kStr, {
148
- offset: pos + 11 + kBuf.length,
149
- length: vLen,
150
- crc: checksum,
151
- kLen: kBuf.length,
152
- });
153
- this._addToCache(kStr, value);
154
- } else {
155
- this.indices[sIdx].delete(kStr);
156
- this.cache.delete(kStr);
157
- }
158
-
159
- this.dirtyCounts[sIdx]++;
160
- if (this.autoVacuum && this.dirtyCounts[sIdx] >= this.vacuumThreshold) {
161
- this.vacuum(sIdx).catch(() => {});
162
- }
163
- });
164
- }
165
-
166
- async delete(key) {
167
- return this.set(key, null);
168
- }
169
-
170
- async vacuum(sIdx) {
171
- if (this.isVacuuming[sIdx]) return;
172
- this.isVacuuming[sIdx] = true;
173
- try {
174
- const fPath = path.join(this.dbDir, `shard_${sIdx}.lba`);
175
- const tempPath = fPath + ".tmp";
176
- const tempHandle = await fs.promises.open(tempPath, "w");
177
- const newIndices = new Map();
178
- let currentPos = 0;
179
-
180
- for (const [key, meta] of this.indices[sIdx].entries()) {
181
- const vBuf = Buffer.allocUnsafe(meta.length);
182
- await this.fileHandles[sIdx].read(vBuf, 0, meta.length, meta.offset);
183
- const kBuf = Buffer.from(key);
184
- const metaBuf = Buffer.allocUnsafe(5);
185
- metaBuf.writeUInt32BE(meta.length, 0);
186
- metaBuf[4] = kBuf.length;
187
- const checksum = calculateCRC32([metaBuf, kBuf, vBuf]);
188
- const head = Buffer.allocUnsafe(11);
189
- head[0] = 0x4c;
190
- head[1] = 0x42;
191
- head.writeUInt32BE(checksum, 2);
192
- head.writeUInt32BE(meta.length, 6);
193
- head[10] = kBuf.length;
194
-
195
- const block = Buffer.concat([head, kBuf, vBuf]);
196
- await tempHandle.write(block, 0, block.length, null);
197
- newIndices.set(key, {
198
- offset: currentPos + 11 + kBuf.length,
199
- length: meta.length,
200
- crc: checksum,
201
- kLen: kBuf.length,
202
- });
203
- currentPos += block.length;
204
- }
205
- await tempHandle.close();
206
- await this.fileHandles[sIdx].close();
207
- await fs.promises.rename(tempPath, fPath);
208
- this.fileHandles[sIdx] = await fs.promises.open(fPath, "a+");
209
- this.indices[sIdx] = newIndices;
210
- this.dirtyCounts[sIdx] = 0;
211
- } finally {
212
- this.isVacuuming[sIdx] = false;
213
- }
214
- }
215
-
216
- async find(query = {}) {
217
- const res = [];
218
- for (let i = 0; i < this.shardCount; i++) {
219
- await this._enqueue(i, async () => {
220
- for (const key of this.indices[i].keys()) {
221
- const val = await this.get(key);
222
- if (matches(val, query)) res.push({ _key: key, ...val });
223
- }
224
- });
225
- }
226
- return res;
227
- }
228
-
229
- async updateMany(query, updateData) {
230
- const targets = await this.find(query);
231
- for (const item of targets) {
232
- const { _key, ...oldVal } = item;
233
- await this.set(_key, { ...oldVal, ...updateData });
234
- }
235
- return targets.length;
236
- }
237
-
238
- _addToCache(k, v) {
239
- if (this.cache.has(k)) this.cache.delete(k);
240
- else if (this.cache.size >= this.cacheLimit)
241
- this.cache.delete(this.cache.keys().next().value);
242
- this.cache.set(k, v);
243
- }
244
-
245
- _enqueue(sIdx, task) {
246
- return (this.queues[sIdx] = this.queues[sIdx]
247
- .then(() => this._ensureShardLoaded(sIdx))
248
- .then(task));
249
- }
250
-
251
- async close() {
252
- await Promise.all(this.queues);
253
- for (const h of this.fileHandles) if (h) await h.close();
254
- }
255
- }
256
-
257
- module.exports = LBA;
1
+ const fs = require("fs");
2
+ const path = require("path");
3
+ const os = require("os");
4
+ const {
5
+ deflateRawAsync,
6
+ calculateCRC32,
7
+ getShard,
8
+ safeInflate,
9
+ } = require("./utils");
10
+ const { matches } = require("./query-engine");
11
+
12
+ class LBA {
13
+ constructor(dbDir = "lba_storage", options = {}) {
14
+ this.dbDir = path.resolve(dbDir);
15
+ this.shardCount = options.shardCount || 32;
16
+ this.cacheLimit = options.cacheLimit || 10000;
17
+ this.syncOnWrite = options.syncOnWrite !== false;
18
+ this.compressionLevel = options.compressionLevel || 6;
19
+ this.maxDecompressedSize = options.maxDecompressedSize || 100 * 1024 * 1024;
20
+
21
+ this.autoVacuum = options.autoVacuum !== false;
22
+ this.vacuumThreshold = options.vacuumThreshold || 500;
23
+
24
+ const cpuCores = os.cpus().length;
25
+ if (options.workerCount === "auto" || !options.workerCount) {
26
+ this.workerLimit = Math.max(1, Math.floor(cpuCores * 0.75));
27
+ } else {
28
+ this.workerLimit = options.workerCount;
29
+ }
30
+
31
+ this.dirtyCounts = new Array(this.shardCount).fill(0);
32
+ this.isVacuuming = new Array(this.shardCount).fill(false);
33
+ this.indices = Array.from({ length: this.shardCount }, () => new Map());
34
+ this.cache = new Map();
35
+ this.queues = Array.from({ length: this.shardCount }, () =>
36
+ Promise.resolve(),
37
+ );
38
+ this.fileHandles = new Array(this.shardCount).fill(null);
39
+ this.isLoaded = new Array(this.shardCount).fill(false);
40
+
41
+ console.log(
42
+ `[LBA] 가동 시작 (CPU 코어: ${cpuCores}, 사용 워커: ${this.workerLimit})`,
43
+ );
44
+ this._ensureDbDir();
45
+ }
46
+
47
+ _ensureDbDir() {
48
+ if (!fs.existsSync(this.dbDir))
49
+ fs.mkdirSync(this.dbDir, { recursive: true });
50
+ }
51
+
52
+ async _ensureShardLoaded(sIdx) {
53
+ if (this.isLoaded[sIdx]) return;
54
+ const fPath = path.join(this.dbDir, `shard_${sIdx}.lba`);
55
+ const handle = await fs.promises.open(fPath, "a+");
56
+ this.fileHandles[sIdx] = handle;
57
+
58
+ const { size } = await handle.stat();
59
+ let offset = 0;
60
+ const head = Buffer.allocUnsafe(11);
61
+
62
+ while (offset + 11 <= size) {
63
+ await handle.read(head, 0, 11, offset);
64
+ if (head[0] !== 0x4c || head[1] !== 0x42) {
65
+ offset++;
66
+ continue;
67
+ }
68
+ const vLen = head.readUInt32BE(6);
69
+ const kLen = head[10];
70
+ const recordSize = 11 + kLen + vLen;
71
+ if (offset + recordSize > size) break;
72
+
73
+ const kBuf = Buffer.allocUnsafe(kLen);
74
+ await handle.read(kBuf, 0, kLen, offset + 11);
75
+ const key = kBuf.toString();
76
+
77
+ if (vLen > 0) {
78
+ this.indices[sIdx].set(key, {
79
+ offset: offset + 11 + kLen,
80
+ length: vLen,
81
+ crc: head.readUInt32BE(2),
82
+ kLen,
83
+ });
84
+ } else {
85
+ this.indices[sIdx].delete(key);
86
+ }
87
+ offset += recordSize;
88
+ }
89
+ this.isLoaded[sIdx] = true;
90
+ }
91
+
92
+ async get(key) {
93
+ const sIdx = getShard(key, this.shardCount);
94
+ return this._enqueue(sIdx, async () => {
95
+ const kStr = String(key);
96
+ if (this.cache.has(kStr)) return structuredClone(this.cache.get(kStr));
97
+ const meta = this.indices[sIdx].get(kStr);
98
+ if (!meta) return null;
99
+
100
+ const vBuf = Buffer.allocUnsafe(meta.length);
101
+ await this.fileHandles[sIdx].read(vBuf, 0, meta.length, meta.offset);
102
+
103
+ const decompressed = await safeInflate(vBuf);
104
+ const data = JSON.parse(decompressed.toString());
105
+ this._addToCache(kStr, data);
106
+ return data;
107
+ });
108
+ }
109
+
110
+ async set(key, value) {
111
+ const sIdx = getShard(key, this.shardCount);
112
+ return this._enqueue(sIdx, async () => {
113
+ const kStr = String(key);
114
+ const kBuf = Buffer.from(kStr);
115
+ let vBuf = null,
116
+ vLen = 0;
117
+
118
+ if (value !== null && value !== undefined) {
119
+ vBuf = await deflateRawAsync(JSON.stringify(value), {
120
+ level: this.compressionLevel,
121
+ });
122
+ vLen = vBuf.length;
123
+ }
124
+
125
+ const metaBuf = Buffer.allocUnsafe(5);
126
+ metaBuf.writeUInt32BE(vLen, 0);
127
+ metaBuf[4] = kBuf.length;
128
+ const checksum = calculateCRC32([metaBuf, kBuf, vBuf]);
129
+
130
+ const head = Buffer.allocUnsafe(11);
131
+ head[0] = 0x4c;
132
+ head[1] = 0x42;
133
+ head.writeUInt32BE(checksum, 2);
134
+ head.writeUInt32BE(vLen, 6);
135
+ head[10] = kBuf.length;
136
+
137
+ const { size: pos } = await this.fileHandles[sIdx].stat();
138
+ await this.fileHandles[sIdx].write(
139
+ vBuf ? Buffer.concat([head, kBuf, vBuf]) : Buffer.concat([head, kBuf]),
140
+ 0,
141
+ 11 + kBuf.length + vLen,
142
+ null,
143
+ );
144
+ if (this.syncOnWrite) await this.fileHandles[sIdx].datasync();
145
+
146
+ if (vLen > 0) {
147
+ this.indices[sIdx].set(kStr, {
148
+ offset: pos + 11 + kBuf.length,
149
+ length: vLen,
150
+ crc: checksum,
151
+ kLen: kBuf.length,
152
+ });
153
+ this._addToCache(kStr, value);
154
+ } else {
155
+ this.indices[sIdx].delete(kStr);
156
+ this.cache.delete(kStr);
157
+ }
158
+
159
+ this.dirtyCounts[sIdx]++;
160
+ if (this.autoVacuum && this.dirtyCounts[sIdx] >= this.vacuumThreshold) {
161
+ this.vacuum(sIdx).catch(() => {});
162
+ }
163
+ });
164
+ }
165
+
166
+ async delete(key) {
167
+ return this.set(key, null);
168
+ }
169
+
170
+ async vacuum(sIdx) {
171
+ if (this.isVacuuming[sIdx]) return;
172
+ this.isVacuuming[sIdx] = true;
173
+ try {
174
+ const fPath = path.join(this.dbDir, `shard_${sIdx}.lba`);
175
+ const tempPath = fPath + ".tmp";
176
+ const tempHandle = await fs.promises.open(tempPath, "w");
177
+ const newIndices = new Map();
178
+ let currentPos = 0;
179
+
180
+ for (const [key, meta] of this.indices[sIdx].entries()) {
181
+ const vBuf = Buffer.allocUnsafe(meta.length);
182
+ await this.fileHandles[sIdx].read(vBuf, 0, meta.length, meta.offset);
183
+ const kBuf = Buffer.from(key);
184
+ const metaBuf = Buffer.allocUnsafe(5);
185
+ metaBuf.writeUInt32BE(meta.length, 0);
186
+ metaBuf[4] = kBuf.length;
187
+ const checksum = calculateCRC32([metaBuf, kBuf, vBuf]);
188
+ const head = Buffer.allocUnsafe(11);
189
+ head[0] = 0x4c;
190
+ head[1] = 0x42;
191
+ head.writeUInt32BE(checksum, 2);
192
+ head.writeUInt32BE(meta.length, 6);
193
+ head[10] = kBuf.length;
194
+
195
+ const block = Buffer.concat([head, kBuf, vBuf]);
196
+ await tempHandle.write(block, 0, block.length, null);
197
+ newIndices.set(key, {
198
+ offset: currentPos + 11 + kBuf.length,
199
+ length: meta.length,
200
+ crc: checksum,
201
+ kLen: kBuf.length,
202
+ });
203
+ currentPos += block.length;
204
+ }
205
+ await tempHandle.close();
206
+ await this.fileHandles[sIdx].close();
207
+ await fs.promises.rename(tempPath, fPath);
208
+ this.fileHandles[sIdx] = await fs.promises.open(fPath, "a+");
209
+ this.indices[sIdx] = newIndices;
210
+ this.dirtyCounts[sIdx] = 0;
211
+ } finally {
212
+ this.isVacuuming[sIdx] = false;
213
+ }
214
+ }
215
+
216
+ async find(query = {}) {
217
+ const res = [];
218
+ for (let i = 0; i < this.shardCount; i++) {
219
+ await this._enqueue(i, async () => {
220
+ for (const key of this.indices[i].keys()) {
221
+ const val = await this.get(key);
222
+ if (matches(val, query)) res.push({ _key: key, ...val });
223
+ }
224
+ });
225
+ }
226
+ return res;
227
+ }
228
+
229
+ async updateMany(query, updateData) {
230
+ const targets = await this.find(query);
231
+ for (const item of targets) {
232
+ const { _key, ...oldVal } = item;
233
+ await this.set(_key, { ...oldVal, ...updateData });
234
+ }
235
+ return targets.length;
236
+ }
237
+
238
+ _addToCache(k, v) {
239
+ if (this.cache.has(k)) this.cache.delete(k);
240
+ else if (this.cache.size >= this.cacheLimit)
241
+ this.cache.delete(this.cache.keys().next().value);
242
+ this.cache.set(k, v);
243
+ }
244
+
245
+ _enqueue(sIdx, task) {
246
+ return (this.queues[sIdx] = this.queues[sIdx]
247
+ .then(() => this._ensureShardLoaded(sIdx))
248
+ .then(task));
249
+ }
250
+
251
+ async close() {
252
+ await Promise.all(this.queues);
253
+ for (const h of this.fileHandles) if (h) await h.close();
254
+ }
255
+ }
256
+
257
+ module.exports = LBA;
@@ -1,43 +1,43 @@
1
- function matches(data, query) {
2
- if (!query || typeof query !== "object" || Object.keys(query).length === 0)
3
- return true;
4
- if (!data || typeof data !== "object") return false;
5
-
6
- return Object.entries(query).every(([field, condition]) => {
7
- const val = data[field];
8
-
9
- if (
10
- condition !== null &&
11
- typeof condition === "object" &&
12
- !Array.isArray(condition)
13
- ) {
14
- return Object.entries(condition).every(([op, target]) => {
15
- switch (op) {
16
- case "$eq":
17
- return val === target;
18
- case "$ne":
19
- return val !== target;
20
- case "$gt":
21
- return val > target;
22
- case "$gte":
23
- return val >= target;
24
- case "$lt":
25
- return val < target;
26
- case "$lte":
27
- return val <= target;
28
- case "$in":
29
- return Array.isArray(target) && target.includes(val);
30
- case "$nin":
31
- return Array.isArray(target) && !target.includes(val);
32
- case "$exists":
33
- return (val !== undefined) === target;
34
- default:
35
- return false;
36
- }
37
- });
38
- }
39
- return val === condition;
40
- });
41
- }
42
-
43
- module.exports = { matches };
1
+ function matches(data, query) {
2
+ if (!query || typeof query !== "object" || Object.keys(query).length === 0)
3
+ return true;
4
+ if (!data || typeof data !== "object") return false;
5
+
6
+ return Object.entries(query).every(([field, condition]) => {
7
+ const val = data[field];
8
+
9
+ if (
10
+ condition !== null &&
11
+ typeof condition === "object" &&
12
+ !Array.isArray(condition)
13
+ ) {
14
+ return Object.entries(condition).every(([op, target]) => {
15
+ switch (op) {
16
+ case "$eq":
17
+ return val === target;
18
+ case "$ne":
19
+ return val !== target;
20
+ case "$gt":
21
+ return val > target;
22
+ case "$gte":
23
+ return val >= target;
24
+ case "$lt":
25
+ return val < target;
26
+ case "$lte":
27
+ return val <= target;
28
+ case "$in":
29
+ return Array.isArray(target) && target.includes(val);
30
+ case "$nin":
31
+ return Array.isArray(target) && !target.includes(val);
32
+ case "$exists":
33
+ return (val !== undefined) === target;
34
+ default:
35
+ return false;
36
+ }
37
+ });
38
+ }
39
+ return val === condition;
40
+ });
41
+ }
42
+
43
+ module.exports = { matches };
package/src/types.ts CHANGED
@@ -1,26 +1,26 @@
1
- export type QueryOperator<T = any> = {
2
- $eq?: T; $ne?: T; $gt?: T; $gte?: T; $lt?: T; $lte?: T; $in?: T[]; $nin?: T[]; $exists?: boolean;
3
- };
4
-
5
- export type Query<T = any> = { [K in keyof T]?: T[K] | QueryOperator<T[K]>; } & { [key: string]: any };
6
-
7
- export interface LBAOptions {
8
- shardCount?: number;
9
- cacheLimit?: number;
10
- syncOnWrite?: boolean;
11
- compressionLevel?: number;
12
- autoVacuum?: boolean;
13
- vacuumThreshold?: number;
14
- // 워커 스레드 설정: 'auto'면 CPU 코어의 75% 사용
15
- workerCount?: number | 'auto';
16
- }
17
-
18
- export declare class LBA<T = any> {
19
- constructor(dbDir?: string, options?: LBAOptions);
20
- get(key: string | number): Promise<T | null>;
21
- set(key: string | number, value: T | null): Promise<void>;
22
- delete(key: string | number): Promise<void>;
23
- find(query?: Query<T>): Promise<(T & { _key: string })[]>;
24
- updateMany(query: Query<T>, updateData: Partial<T>): Promise<number>;
25
- close(): Promise<void>;
1
+ export type QueryOperator<T = any> = {
2
+ $eq?: T; $ne?: T; $gt?: T; $gte?: T; $lt?: T; $lte?: T; $in?: T[]; $nin?: T[]; $exists?: boolean;
3
+ };
4
+
5
+ export type Query<T = any> = { [K in keyof T]?: T[K] | QueryOperator<T[K]>; } & { [key: string]: any };
6
+
7
+ export interface LBAOptions {
8
+ shardCount?: number;
9
+ cacheLimit?: number;
10
+ syncOnWrite?: boolean;
11
+ compressionLevel?: number;
12
+ autoVacuum?: boolean;
13
+ vacuumThreshold?: number;
14
+ // 워커 스레드 설정: 'auto'면 CPU 코어의 75% 사용
15
+ workerCount?: number | 'auto';
16
+ }
17
+
18
+ export declare class LBA<T = any> {
19
+ constructor(dbDir?: string, options?: LBAOptions);
20
+ get(key: string | number): Promise<T | null>;
21
+ set(key: string | number, value: T | null): Promise<void>;
22
+ delete(key: string | number): Promise<void>;
23
+ find(query?: Query<T>): Promise<(T & { _key: string })[]>;
24
+ updateMany(query: Query<T>, updateData: Partial<T>): Promise<number>;
25
+ close(): Promise<void>;
26
26
  }
package/src/utils.js CHANGED
@@ -1,38 +1,38 @@
1
- const zlib = require("zlib");
2
- const util = require("util");
3
- const deflateRawAsync = util.promisify(zlib.deflateRaw);
4
-
5
- const CRC32_TABLE = new Int32Array(256);
6
- for (let i = 0; i < 256; i++) {
7
- let c = i;
8
- for (let j = 0; j < 8; j++) c = c & 1 ? 0xedb88320 ^ (c >>> 1) : c >>> 1;
9
- CRC32_TABLE[i] = c;
10
- }
11
-
12
- function calculateCRC32(buffers) {
13
- let crc = -1;
14
- for (const buf of buffers) {
15
- if (!buf) continue;
16
- for (let i = 0; i < buf.length; i++) {
17
- crc = (crc >>> 8) ^ CRC32_TABLE[(crc ^ buf[i]) & 0xff];
18
- }
19
- }
20
- return (crc ^ -1) >>> 0;
21
- }
22
-
23
- function getShard(key, shardCount) {
24
- let hash = 2166136261;
25
- const str = String(key);
26
- for (let i = 0; i < str.length; i++) {
27
- hash ^= str.charCodeAt(i);
28
- hash = Math.imul(hash, 16777619);
29
- }
30
- return (hash >>> 0) % shardCount;
31
- }
32
-
33
- module.exports = {
34
- deflateRawAsync,
35
- calculateCRC32,
36
- getShard,
37
- safeInflate: require("util").promisify(zlib.inflateRaw),
38
- };
1
+ const zlib = require("zlib");
2
+ const util = require("util");
3
+ const deflateRawAsync = util.promisify(zlib.deflateRaw);
4
+
5
+ const CRC32_TABLE = new Int32Array(256);
6
+ for (let i = 0; i < 256; i++) {
7
+ let c = i;
8
+ for (let j = 0; j < 8; j++) c = c & 1 ? 0xedb88320 ^ (c >>> 1) : c >>> 1;
9
+ CRC32_TABLE[i] = c;
10
+ }
11
+
12
+ function calculateCRC32(buffers) {
13
+ let crc = -1;
14
+ for (const buf of buffers) {
15
+ if (!buf) continue;
16
+ for (let i = 0; i < buf.length; i++) {
17
+ crc = (crc >>> 8) ^ CRC32_TABLE[(crc ^ buf[i]) & 0xff];
18
+ }
19
+ }
20
+ return (crc ^ -1) >>> 0;
21
+ }
22
+
23
+ function getShard(key, shardCount) {
24
+ let hash = 2166136261;
25
+ const str = String(key);
26
+ for (let i = 0; i < str.length; i++) {
27
+ hash ^= str.charCodeAt(i);
28
+ hash = Math.imul(hash, 16777619);
29
+ }
30
+ return (hash >>> 0) % shardCount;
31
+ }
32
+
33
+ module.exports = {
34
+ deflateRawAsync,
35
+ calculateCRC32,
36
+ getShard,
37
+ safeInflate: require("util").promisify(zlib.inflateRaw),
38
+ };