sliftutils 0.50.0 → 0.51.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -252,7 +252,7 @@ export function bundleRequire(config: BundleRequireConfig) {
252
252
  if (property === "default") return newModule.exports;
253
253
 
254
254
  console.warn(
255
- `Module ${newModule.filename} is not available (are you missing "module.allowclient = true"?). Tried to access ${String(property)}.`
255
+ `Module ${newModule.filename} is not available. It might have not been imported in Node.js due to a flag which is checking the browser or an environment variable. It might also be that the entry point is weirdly configured and could not be detected.`
256
256
  );
257
257
  return undefined;
258
258
  },
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "sliftutils",
3
- "version": "0.50.0",
3
+ "version": "0.51.0",
4
4
  "main": "index.js",
5
5
  "license": "MIT",
6
6
  "files": [
@@ -101,9 +101,11 @@ export class TransactionStorage implements IStorage<Buffer> {
101
101
  }
102
102
  // Helps get rid of parse errors which constantly log. Also, uses less space
103
103
  public static async compressAll() {
104
- for (let storage of TransactionStorage.allStorage) {
105
- await storage.compressTransactionLog(true);
106
- }
104
+ await fileLockSection(async () => {
105
+ for (let storage of TransactionStorage.allStorage) {
106
+ await storage.compressTransactionLog(true);
107
+ }
108
+ });
107
109
  }
108
110
 
109
111
  private init: Promise<void> | undefined = this.loadAllTransactions();
@@ -287,6 +289,15 @@ export class TransactionStorage implements IStorage<Buffer> {
287
289
  content = await Zip.gunzip(content);
288
290
  }
289
291
 
292
+ let pendingWriteTimes = new Map<string, number>();
293
+ for (const entry of this.pendingAppends) {
294
+ let prevTime = pendingWriteTimes.get(entry.key);
295
+ if (prevTime && prevTime > entry.time) {
296
+ continue;
297
+ }
298
+ pendingWriteTimes.set(entry.key, entry.time);
299
+ }
300
+
290
301
  let offset = 0;
291
302
  let entries: TransactionEntry[] = [];
292
303
  while (offset < content.length) {
@@ -314,11 +325,18 @@ export class TransactionStorage implements IStorage<Buffer> {
314
325
  offset++;
315
326
  continue;
316
327
  }
328
+
317
329
  this.entryCount++;
318
330
  let { entry } = entryObj;
319
331
  offset = entryObj.offset;
320
332
  entries.push(entry);
321
333
 
334
+ let time = entry.time;
335
+ let prevTime = pendingWriteTimes.get(entry.key);
336
+ if (prevTime && prevTime > time) {
337
+ continue;
338
+ }
339
+
322
340
  if (entry.value === undefined) {
323
341
  this.cache.delete(entry.key);
324
342
  } else {
@@ -432,79 +450,73 @@ export class TransactionStorage implements IStorage<Buffer> {
432
450
  private compressing = false;
433
451
  private async compressTransactionLog(force?: boolean): Promise<void> {
434
452
  if (this.compressing) return;
435
- this.compressing = true;
436
-
437
- let existingDiskEntries = await this.rawStorage.getKeys();
438
- existingDiskEntries = existingDiskEntries.filter(x => x.endsWith(CHUNK_EXT));
439
- let compressNow = force || (
440
- this.entryCount > 100 && this.entryCount > this.cache.size * 3
441
- // NOTE: This compress check breaks down if we only have very large values, but... those
442
- // don't work ANYWAYS (it is better to use one file per value instead).
443
- // - Maybe we should throw, or at least warn, on sets of value > 1MB,
444
- // at which point they should just use a file per value
445
- || existingDiskEntries.length > Math.max(10, Math.ceil(this.entryCount / 1000))
446
- || existingDiskEntries.length > 1000 * 10
447
- );
448
- if (!compressNow) return;
449
- console.log(`Compressing ${this.debugName} transaction log, ${this.entryCount} entries, ${this.cache.size} keys`);
450
-
451
- // Load off disk, in case there are other writes. We still race with them, but at least
452
- // this reduces the race condition considerably
453
+ try {
454
+ this.compressing = true;
453
455
 
454
- sortChunks(existingDiskEntries);
455
- for (let entry of existingDiskEntries) {
456
- await this.loadTransactionFile(entry);
457
- }
456
+ let existingDiskEntries = await this.rawStorage.getKeys();
457
+ existingDiskEntries = existingDiskEntries.filter(x => x.endsWith(CHUNK_EXT));
458
+ let compressNow = force || (
459
+ this.entryCount > 100 && this.entryCount > this.cache.size * 3
460
+ // NOTE: This compress check breaks down if we only have very large values, but... those
461
+ // don't work ANYWAYS (it is better to use one file per value instead).
462
+ // - Maybe we should throw, or at least warn, on sets of value > 1MB,
463
+ // at which point they should just use a file per value
464
+ || existingDiskEntries.length > Math.max(10, Math.ceil(this.entryCount / 1000))
465
+ || existingDiskEntries.length > 1000 * 10
466
+ );
467
+ if (!compressNow) return;
468
+ console.log(`Compressing ${this.debugName} transaction log, ${this.entryCount} entries, ${this.cache.size} keys`);
458
469
 
459
- this.entryCount = this.cache.size;
470
+ // Load off disk, in case there are other writes. We still race with them, but at least
471
+ // this reduces the race condition considerably
460
472
 
473
+ sortChunks(existingDiskEntries);
474
+ for (let entry of existingDiskEntries) {
475
+ await this.loadTransactionFile(entry);
476
+ }
461
477
 
462
- let buffers: Buffer[] = [];
463
- for (const entry of this.cache.values()) {
464
- let buffer = this.serializeTransactionEntry(entry);
465
- buffers.push(buffer);
466
- }
478
+ this.entryCount = this.cache.size;
467
479
 
468
- let newChunks = this.chunkBuffers(buffers);
469
480
 
470
- let newFiles: string[] = [];
471
- for (let chunk of newChunks) {
472
- let file = this.getCurrentChunk();
473
- newFiles.push(file);
474
- this.currentChunk = undefined;
475
- let content = chunk.buffer;
476
- let { header, headerBuffer } = this.getHeader(
477
- // AND, never compress the last one, otherwise we can't append to it!
478
- content.length >= FILE_ZIP_THRESHOLD && chunk !== newChunks[newChunks.length - 1]
479
- );
480
- if (header.zipped) {
481
- content = await Zip.gzip(content);
481
+ let buffers: Buffer[] = [];
482
+ for (const entry of this.cache.values()) {
483
+ let buffer = this.serializeTransactionEntry(entry);
484
+ buffers.push(buffer);
482
485
  }
483
- let buffer = Buffer.concat([headerBuffer, content]);
484
- await this.rawStorage.set(file, buffer);
485
- let verified = await this.rawStorage.get(file);
486
- if (!verified?.equals(buffer)) {
487
- console.error(`Failed to verify transaction file ${file} in ${this.debugName}`);
488
- throw new Error(`Failed to verify transaction file ${file} in ${this.debugName}`);
486
+
487
+ let newChunks = this.chunkBuffers(buffers);
488
+
489
+ let newFiles: string[] = [];
490
+ for (let chunk of newChunks) {
491
+ // Don't use the previous current chunk for the new file. Also clear it afterwards so it's not reused for any future rights.
492
+ this.currentChunk = undefined;
493
+ let file = this.getCurrentChunk();
494
+ this.currentChunk = undefined;
495
+ newFiles.push(file);
496
+ let content = chunk.buffer;
497
+ let { header, headerBuffer } = this.getHeader(
498
+ content.length >= FILE_ZIP_THRESHOLD
499
+ );
500
+ if (header.zipped) {
501
+ content = await Zip.gzip(content);
502
+ }
503
+ let buffer = Buffer.concat([headerBuffer, content]);
504
+ await this.rawStorage.set(file, buffer);
505
+ let verified = await this.rawStorage.get(file);
506
+ if (!verified?.equals(buffer)) {
507
+ console.error(`Failed to verify transaction file ${file} in ${this.debugName}`);
508
+ throw new Error(`Failed to verify transaction file ${file} in ${this.debugName}`);
509
+ }
489
510
  }
490
- }
491
- // Delay, because we want to be absolutely certain that all the writes have finished before deleting the old files.
492
- await delay(5000);
493
- if (!isNode()) {
494
- localStorage.setItem(`${this.debugName}-last-compress`, JSON.stringify({
495
- time: Date.now(),
496
- entryCount: this.entryCount,
497
- cacheSize: this.cache.size,
498
- newFiles,
499
- existingDiskEntries,
500
- }));
501
- }
502
511
 
503
- // This is the ONLY time we can delete old files, as we know for sure the new file has all of our data.
504
- // Any future readers won't know this, unless they write it themselves (or unless they audit it against
505
- // the other generations, which is annoying).
506
- for (const file of existingDiskEntries) {
507
- await this.rawStorage.remove(file);
512
+ // This is the ONLY time we can delete old files, as we know for sure the new file has all of our data.
513
+ // Any future readers won't know this, unless they write it themselves (or unless they audit it against
514
+ // the other generations, which is annoying).
515
+ for (const file of existingDiskEntries) {
516
+ await this.rawStorage.remove(file);
517
+ }
518
+ } finally {
519
+ this.compressing = false;
508
520
  }
509
521
  }
510
522