lba 3.2.2 → 3.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/package.json +1 -1
  2. package/src/index.js +208 -96
  3. package/src/types.ts +58 -2
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "lba",
3
- "version": "3.2.2",
3
+ "version": "3.5.1",
4
4
  "description": "Lightweight, high-performance, file-based key-value store with NoSQL query support.",
5
5
  "main": "src/index.js",
6
6
  "types": "src/types",
package/src/index.js CHANGED
@@ -13,7 +13,10 @@ class LBA {
13
13
  constructor(dbDir = "lba_storage", options = {}) {
14
14
  this.dbDir = path.resolve(dbDir);
15
15
  this.shardCount = options.shardCount || 32;
16
+ this.loadingPromises = new Array(this.shardCount).fill(null);
17
+
16
18
  this.cacheLimit = options.cacheLimit || 10000;
19
+ this.fastMode = options.fastMode === true;
17
20
  this.syncOnWrite = options.syncOnWrite !== false;
18
21
  this.compressionLevel = options.compressionLevel || 6;
19
22
  this.maxDecompressedSize = options.maxDecompressedSize || 100 * 1024 * 1024;
@@ -22,11 +25,10 @@ class LBA {
22
25
  this.vacuumThreshold = options.vacuumThreshold || 500;
23
26
 
24
27
  const cpuCores = os.cpus().length;
25
- if (options.workerCount === "auto" || !options.workerCount) {
26
- this.workerLimit = Math.max(1, Math.floor(cpuCores * 0.75));
27
- } else {
28
- this.workerLimit = options.workerCount;
29
- }
28
+ this.workerLimit =
29
+ options.workerCount === "auto" || !options.workerCount
30
+ ? Math.max(1, Math.floor(cpuCores * 0.75))
31
+ : options.workerCount;
30
32
 
31
33
  this.dirtyCounts = new Array(this.shardCount).fill(0);
32
34
  this.isVacuuming = new Array(this.shardCount).fill(false);
@@ -50,43 +52,56 @@ class LBA {
50
52
  }
51
53
 
52
54
  async _ensureShardLoaded(sIdx) {
53
- if (this.isLoaded[sIdx]) return;
54
- const fPath = path.join(this.dbDir, `shard_${sIdx}.lba`);
55
- const handle = await fs.promises.open(fPath, "a+");
56
- this.fileHandles[sIdx] = handle;
55
+ if (this.isLoaded[sIdx] && this.fileHandles[sIdx]) return;
56
+ if (this.loadingPromises[sIdx]) return this.loadingPromises[sIdx];
57
57
 
58
- const { size } = await handle.stat();
59
- let offset = 0;
60
- const head = Buffer.allocUnsafe(11);
58
+ this.loadingPromises[sIdx] = (async () => {
59
+ try {
60
+ const fPath = path.join(this.dbDir, `shard_${sIdx}.lba`);
61
+ if (!this.fileHandles[sIdx]) {
62
+ this.fileHandles[sIdx] = await fs.promises.open(fPath, "a+");
63
+ }
61
64
 
62
- while (offset + 11 <= size) {
63
- await handle.read(head, 0, 11, offset);
64
- if (head[0] !== 0x4c || head[1] !== 0x42) {
65
- offset++;
66
- continue;
67
- }
68
- const vLen = head.readUInt32BE(6);
69
- const kLen = head[10];
70
- const recordSize = 11 + kLen + vLen;
71
- if (offset + recordSize > size) break;
72
-
73
- const kBuf = Buffer.allocUnsafe(kLen);
74
- await handle.read(kBuf, 0, kLen, offset + 11);
75
- const key = kBuf.toString();
76
-
77
- if (vLen > 0) {
78
- this.indices[sIdx].set(key, {
79
- offset: offset + 11 + kLen,
80
- length: vLen,
81
- crc: head.readUInt32BE(2),
82
- kLen,
83
- });
84
- } else {
85
- this.indices[sIdx].delete(key);
65
+ const handle = this.fileHandles[sIdx];
66
+ const { size } = await handle.stat();
67
+ let offset = 0;
68
+ const head = Buffer.allocUnsafe(11);
69
+
70
+ while (offset + 11 <= size) {
71
+ await handle.read(head, 0, 11, offset);
72
+ if (head[0] !== 0x4c || head[1] !== 0x42) {
73
+ offset++;
74
+ continue;
75
+ }
76
+ const vLen = head.readUInt32BE(6);
77
+ const kLen = head[10];
78
+ const recordSize = 11 + kLen + vLen;
79
+
80
+ if (offset + recordSize > size) break;
81
+
82
+ const kBuf = Buffer.allocUnsafe(kLen);
83
+ await handle.read(kBuf, 0, kLen, offset + 11);
84
+ const key = kBuf.toString();
85
+
86
+ if (vLen > 0) {
87
+ this.indices[sIdx].set(key, {
88
+ offset: offset + 11 + kLen,
89
+ length: vLen,
90
+ crc: head.readUInt32BE(2),
91
+ kLen,
92
+ });
93
+ } else {
94
+ this.indices[sIdx].delete(key);
95
+ }
96
+ offset += recordSize;
97
+ }
98
+ this.isLoaded[sIdx] = true;
99
+ } finally {
100
+ this.loadingPromises[sIdx] = null;
86
101
  }
87
- offset += recordSize;
88
- }
89
- this.isLoaded[sIdx] = true;
102
+ })();
103
+
104
+ return this.loadingPromises[sIdx];
90
105
  }
91
106
 
92
107
  async get(key) {
@@ -94,6 +109,7 @@ class LBA {
94
109
  return this._enqueue(sIdx, async () => {
95
110
  const kStr = String(key);
96
111
  if (this.cache.has(kStr)) return structuredClone(this.cache.get(kStr));
112
+
97
113
  const meta = this.indices[sIdx].get(kStr);
98
114
  if (!meta) return null;
99
115
 
@@ -110,64 +126,69 @@ class LBA {
110
126
  async set(key, value) {
111
127
  const sIdx = getShard(key, this.shardCount);
112
128
  return this._enqueue(sIdx, async () => {
113
- const kStr = String(key);
114
- const kBuf = Buffer.from(kStr);
115
- let vBuf = null,
116
- vLen = 0;
129
+ await this._internalSet(key, value, sIdx);
130
+ });
131
+ }
117
132
 
118
- if (value !== null && value !== undefined) {
119
- vBuf = await deflateRawAsync(JSON.stringify(value), {
120
- level: this.compressionLevel,
121
- });
122
- vLen = vBuf.length;
123
- }
133
+ async _internalSet(key, value, sIdx) {
134
+ const kStr = String(key);
135
+ const kBuf = Buffer.from(kStr);
136
+ let vBuf = null,
137
+ vLen = 0;
124
138
 
125
- const metaBuf = Buffer.allocUnsafe(5);
126
- metaBuf.writeUInt32BE(vLen, 0);
127
- metaBuf[4] = kBuf.length;
128
- const checksum = calculateCRC32([metaBuf, kBuf, vBuf]);
129
-
130
- const head = Buffer.allocUnsafe(11);
131
- head[0] = 0x4c;
132
- head[1] = 0x42;
133
- head.writeUInt32BE(checksum, 2);
134
- head.writeUInt32BE(vLen, 6);
135
- head[10] = kBuf.length;
136
-
137
- const { size: pos } = await this.fileHandles[sIdx].stat();
138
- await this.fileHandles[sIdx].write(
139
- vBuf ? Buffer.concat([head, kBuf, vBuf]) : Buffer.concat([head, kBuf]),
140
- 0,
141
- 11 + kBuf.length + vLen,
142
- null,
143
- );
144
- if (this.syncOnWrite) await this.fileHandles[sIdx].datasync();
139
+ if (value !== null && value !== undefined) {
140
+ vBuf = await deflateRawAsync(JSON.stringify(value), {
141
+ level: this.compressionLevel,
142
+ });
143
+ vLen = vBuf.length;
144
+ }
145
145
 
146
- if (vLen > 0) {
147
- this.indices[sIdx].set(kStr, {
148
- offset: pos + 11 + kBuf.length,
149
- length: vLen,
150
- crc: checksum,
151
- kLen: kBuf.length,
152
- });
153
- this._addToCache(kStr, value);
154
- } else {
155
- this.indices[sIdx].delete(kStr);
156
- this.cache.delete(kStr);
157
- }
146
+ const metaBuf = Buffer.allocUnsafe(5);
147
+ metaBuf.writeUInt32BE(vLen, 0);
148
+ metaBuf[4] = kBuf.length;
149
+ const checksum = calculateCRC32([metaBuf, kBuf, vBuf]);
158
150
 
159
- this.dirtyCounts[sIdx]++;
160
- if (this.autoVacuum && this.dirtyCounts[sIdx] >= this.vacuumThreshold) {
161
- this.vacuum(sIdx).catch(() => {});
162
- }
163
- });
151
+ const head = Buffer.allocUnsafe(11);
152
+ head[0] = 0x4c;
153
+ head[1] = 0x42;
154
+ head.writeUInt32BE(checksum, 2);
155
+ head.writeUInt32BE(vLen, 6);
156
+ head[10] = kBuf.length;
157
+
158
+ const { size: pos } = await this.fileHandles[sIdx].stat();
159
+ const writeBuf = vBuf
160
+ ? Buffer.concat([head, kBuf, vBuf])
161
+ : Buffer.concat([head, kBuf]);
162
+
163
+ await this.fileHandles[sIdx].write(writeBuf, 0, writeBuf.length, null);
164
+
165
+ if (!this.fastMode && this.syncOnWrite)
166
+ await this.fileHandles[sIdx].datasync();
167
+
168
+ if (vLen > 0) {
169
+ this.indices[sIdx].set(kStr, {
170
+ offset: pos + 11 + kBuf.length,
171
+ length: vLen,
172
+ crc: checksum,
173
+ kLen: kBuf.length,
174
+ });
175
+ this._addToCache(kStr, value);
176
+ } else {
177
+ this.indices[sIdx].delete(kStr);
178
+ this.cache.delete(kStr);
179
+ }
180
+
181
+ this.dirtyCounts[sIdx]++;
182
+ if (this.autoVacuum && this.dirtyCounts[sIdx] >= this.vacuumThreshold) {
183
+ this._enqueue(sIdx, () => this._internalVacuum(sIdx)).catch(() => {});
184
+ }
164
185
  }
165
186
 
166
187
  async delete(key) {
167
188
  return this.set(key, null);
168
189
  }
169
190
 
170
- async vacuum(sIdx) {
191
+ async _internalVacuum(sIdx) {
171
192
  if (this.isVacuuming[sIdx]) return;
172
193
  this.isVacuuming[sIdx] = true;
173
194
  try {
@@ -181,14 +202,11 @@ class LBA {
181
202
  const vBuf = Buffer.allocUnsafe(meta.length);
182
203
  await this.fileHandles[sIdx].read(vBuf, 0, meta.length, meta.offset);
183
204
  const kBuf = Buffer.from(key);
184
- const metaBuf = Buffer.allocUnsafe(5);
185
- metaBuf.writeUInt32BE(meta.length, 0);
186
- metaBuf[4] = kBuf.length;
187
- const checksum = calculateCRC32([metaBuf, kBuf, vBuf]);
205
+
188
206
  const head = Buffer.allocUnsafe(11);
189
207
  head[0] = 0x4c;
190
208
  head[1] = 0x42;
191
- head.writeUInt32BE(checksum, 2);
209
+ head.writeUInt32BE(meta.crc, 2);
192
210
  head.writeUInt32BE(meta.length, 6);
193
211
  head[10] = kBuf.length;
194
212
 
@@ -197,7 +215,7 @@ class LBA {
197
215
  newIndices.set(key, {
198
216
  offset: currentPos + 11 + kBuf.length,
199
217
  length: meta.length,
200
- crc: checksum,
218
+ crc: meta.crc,
201
219
  kLen: kBuf.length,
202
220
  });
203
221
  currentPos += block.length;
@@ -213,6 +231,10 @@ class LBA {
213
231
  }
214
232
  }
215
233
 
234
+ async vacuum(sIdx) {
235
+ return this._enqueue(sIdx, () => this._internalVacuum(sIdx));
236
+ }
237
+
216
238
  async find(query = {}) {
217
239
  const res = [];
218
240
  for (let i = 0; i < this.shardCount; i++) {
@@ -235,6 +257,81 @@ class LBA {
235
257
  return targets.length;
236
258
  }
237
259
 
260
+ async batchSet(dataObj) {
261
+ const shardTasks = {};
262
+ for (const [key, value] of Object.entries(dataObj)) {
263
+ const sIdx = getShard(key, this.shardCount);
264
+ if (!shardTasks[sIdx]) shardTasks[sIdx] = [];
265
+ shardTasks[sIdx].push({ key, value });
266
+ }
267
+
268
+ const tasks = Object.entries(shardTasks).map(([sIdx, items]) => {
269
+ return this._enqueue(parseInt(sIdx), async () => {
270
+ for (const { key, value } of items) {
271
+ await this._internalSet(key, value, parseInt(sIdx));
272
+ }
273
+ });
274
+ });
275
+ await Promise.all(tasks);
276
+ }
277
+
278
+ async *entries() {
279
+ for (let i = 0; i < this.shardCount; i++) {
280
+ await this._enqueue(i, async () => {});
281
+ for (const key of this.indices[i].keys()) {
282
+ yield [key, await this.get(key)];
283
+ }
284
+ }
285
+ }
286
+
287
+ async clear() {
288
+ const clearTasks = [];
289
+ for (let i = 0; i < this.shardCount; i++) {
290
+ clearTasks.push(
291
+ this._enqueue(i, async () => {
292
+ const fPath = path.join(this.dbDir, `shard_${i}.lba`);
293
+ if (this.fileHandles[i]) await this.fileHandles[i].close();
294
+ await fs.promises.writeFile(fPath, "");
295
+ this.fileHandles[i] = await fs.promises.open(fPath, "a+");
296
+ this.indices[i].clear();
297
+ this.dirtyCounts[i] = 0;
298
+ }),
299
+ );
300
+ }
301
+ await Promise.all(clearTasks);
302
+ this.cache.clear();
303
+ }
304
+
305
+ size() {
306
+ return this.indices.reduce((acc, idxMap) => acc + idxMap.size, 0);
307
+ }
308
+
309
+ async backup(backupPath) {
310
+ const targetDir = path.resolve(backupPath);
311
+ if (!fs.existsSync(targetDir)) fs.mkdirSync(targetDir, { recursive: true });
312
+
313
+ const backupTasks = this.indices.map((_, sIdx) => {
314
+ return this._enqueue(sIdx, async () => {
315
+ const bPath = path.join(targetDir, `shard_${sIdx}.lba.bak`);
316
+ const bHandle = await fs.promises.open(bPath, "w");
317
+ for (const [key, meta] of this.indices[sIdx].entries()) {
318
+ const vBuf = Buffer.allocUnsafe(meta.length);
319
+ await this.fileHandles[sIdx].read(vBuf, 0, meta.length, meta.offset);
320
+ const kBuf = Buffer.from(key);
321
+ const head = Buffer.allocUnsafe(11);
322
+ head[0] = 0x4c;
323
+ head[1] = 0x42;
324
+ head.writeUInt32BE(meta.crc, 2);
325
+ head.writeUInt32BE(meta.length, 6);
326
+ head[10] = kBuf.length;
327
+ await bHandle.write(Buffer.concat([head, kBuf, vBuf]));
328
+ }
329
+ await bHandle.close();
330
+ });
331
+ });
332
+ await Promise.all(backupTasks);
333
+ }
334
+
238
335
  _addToCache(k, v) {
239
336
  if (this.cache.has(k)) this.cache.delete(k);
240
337
  else if (this.cache.size >= this.cacheLimit)
@@ -244,13 +341,28 @@ class LBA {
244
341
 
245
342
  _enqueue(sIdx, task) {
246
343
  return (this.queues[sIdx] = this.queues[sIdx]
247
- .then(() => this._ensureShardLoaded(sIdx))
248
- .then(task));
344
+ .catch(() => {})
345
+ .then(async () => {
346
+ await this._ensureShardLoaded(sIdx);
347
+
348
+ if (!this.fileHandles[sIdx]) {
349
+ const fPath = path.join(this.dbDir, `shard_${sIdx}.lba`);
350
+ this.fileHandles[sIdx] = await fs.promises.open(fPath, "a+");
351
+ }
352
+
353
+ return task();
354
+ }));
249
355
  }
250
356
 
251
357
  async close() {
252
358
  await Promise.all(this.queues);
253
- for (const h of this.fileHandles) if (h) await h.close();
359
+ await Promise.all(this.loadingPromises.filter((p) => p !== null));
360
+ for (let i = 0; i < this.shardCount; i++) {
361
+ if (this.fileHandles[i]) {
362
+ await this.fileHandles[i].close();
363
+ this.fileHandles[i] = null;
364
+ }
365
+ }
254
366
  }
255
367
  }
256
368
 
package/src/types.ts CHANGED
@@ -1,26 +1,82 @@
1
1
  export type QueryOperator<T = any> = {
2
- $eq?: T; $ne?: T; $gt?: T; $gte?: T; $lt?: T; $lte?: T; $in?: T[]; $nin?: T[]; $exists?: boolean;
2
+ /** Equal to */
3
+ $eq?: T;
4
+ /** Not equal to */
5
+ $ne?: T;
6
+ /** Greater than */
7
+ $gt?: T;
8
+ /** Greater than or equal to */
9
+ $gte?: T;
10
+ /** Less than */
11
+ $lt?: T;
12
+ /** Less than or equal to */
13
+ $lte?: T;
14
+ /** Included in an array */
15
+ $in?: T[];
16
+ /** Not included in an array */
17
+ $nin?: T[];
18
+ /** Property exists or not */
19
+ $exists?: boolean;
3
20
  };
4
21
 
5
22
  export type Query<T = any> = { [K in keyof T]?: T[K] | QueryOperator<T[K]>; } & { [key: string]: any };
6
23
 
7
24
  export interface LBAOptions {
25
+ /** Number of shards for data distribution. Default: 32 */
8
26
  shardCount?: number;
27
+ /** Maximum number of items to keep in memory cache. Default: 10000 */
9
28
  cacheLimit?: number;
29
+ /** Whether to wait for hardware disk sync on every write. Default: true */
10
30
  syncOnWrite?: boolean;
31
+ /** Zlib compression level (0-9). Default: 6 */
11
32
  compressionLevel?: number;
33
+ /** Automatically run vacuum to clean up stale data. Default: true */
12
34
  autoVacuum?: boolean;
35
+ /** Number of dirty writes before triggering auto-vacuum. Default: 500 */
13
36
  vacuumThreshold?: number;
14
- // 워커 스레드 설정: 'auto' CPU 코어의 75% 사용
37
+ /** Number of concurrent workers. 'auto' uses 75% of CPU cores. */
15
38
  workerCount?: number | 'auto';
39
+ /** Enable OS-level write buffering for extreme performance. Default: false */
40
+ fastMode?: boolean;
16
41
  }
17
42
 
43
+ /**
44
+ * LBA: A lightweight, high-performance, file-based key-value store for Node.js.
45
+ * Supports sharding, atomic updates, and zlib compression.
46
+ */
18
47
  export declare class LBA<T = any> {
19
48
  constructor(dbDir?: string, options?: LBAOptions);
49
+
50
+ /** Retrieves the value associated with the key. Returns null if not found. */
20
51
  get(key: string | number): Promise<T | null>;
52
+
53
+ /** Stores a value for the given key. Set value to null to delete. */
21
54
  set(key: string | number, value: T | null): Promise<void>;
55
+
56
+ /** Deletes the entry for the given key. */
22
57
  delete(key: string | number): Promise<void>;
58
+
59
+ /** Finds records matching the given query. Supports MongoDB-like operators. */
23
60
  find(query?: Query<T>): Promise<(T & { _key: string })[]>;
61
+
62
+ /** Updates multiple records matching the query with new data. */
24
63
  updateMany(query: Query<T>, updateData: Partial<T>): Promise<number>;
64
+
65
+ /** Gracefully closes the database and ensures all writes are finished. */
25
66
  close(): Promise<void>;
67
+
68
+ /** Saves multiple key-value pairs at once for optimized I/O. */
69
+ batchSet(data: Record<string | number, T>): Promise<void>;
70
+
71
+ /** Async iterator that yields all key-value pairs in the database. */
72
+ entries(): AsyncIterableIterator<[string, T]>;
73
+
74
+ /** Wipes all data from the database and resets shards. Use with caution. */
75
+ clear(): Promise<void>;
76
+
77
+ /** Returns the total number of records currently stored in the database. */
78
+ size(): number;
79
+
80
+ /** Creates an optimized incremental backup at the specified path. */
81
+ backup(backupPath: string): Promise<void>;
26
82
  }