sliftutils 0.50.0 → 0.52.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.cursorrules
CHANGED
|
@@ -84,6 +84,14 @@ Coding Styles
|
|
|
84
84
|
Use ref={elem => } callbacks. NEVER use .createRef.
|
|
85
85
|
|
|
86
86
|
NEVER render images with a fixed width+height. This will cause them to be stretched or cut off. This is terrible. Only set the width or height.
|
|
87
|
+
|
|
88
|
+
Callback held should be avoided by using async and `import { PromiseObj } from "socket-function/src/misc";`. Most of the time, if there's an event callback, you should wrap it with a promise obj so that you can wait for it asynchronously.
|
|
89
|
+
|
|
90
|
+
`import { keyBy, keyByArray } from "socket-function/src/misc";` Should be used when you need to create lookups from lists and you know what key you want. This can clobber values, or it can gather in an array.
|
|
91
|
+
let example: Map<number, { x: number }> = keyBy([{ x: 5 }, { x: 6 }]);
|
|
92
|
+
let example: Map<number, { x: number }[]> = keyByArrat([{ x: 5 }, { x: 5, version: 2 } { x: 6 }]);
|
|
93
|
+
|
|
94
|
+
Never use alert. If there's an error, you should throw it.
|
|
87
95
|
|
|
88
96
|
|
|
89
97
|
|
package/bundler/bundleRequire.ts
CHANGED
|
@@ -252,7 +252,7 @@ export function bundleRequire(config: BundleRequireConfig) {
|
|
|
252
252
|
if (property === "default") return newModule.exports;
|
|
253
253
|
|
|
254
254
|
console.warn(
|
|
255
|
-
`Module ${newModule.filename} is not available
|
|
255
|
+
`Module ${newModule.filename} is not available. It might have not been imported in Node.js due to a flag which is checking the browser or an environment variable. It might also be that the entry point is weirdly configured and could not be detected.`
|
|
256
256
|
);
|
|
257
257
|
return undefined;
|
|
258
258
|
},
|
package/package.json
CHANGED
|
@@ -101,9 +101,11 @@ export class TransactionStorage implements IStorage<Buffer> {
|
|
|
101
101
|
}
|
|
102
102
|
// Helps get rid of parse errors which constantly log. Also, uses less space
|
|
103
103
|
public static async compressAll() {
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
104
|
+
await fileLockSection(async () => {
|
|
105
|
+
for (let storage of TransactionStorage.allStorage) {
|
|
106
|
+
await storage.compressTransactionLog(true);
|
|
107
|
+
}
|
|
108
|
+
});
|
|
107
109
|
}
|
|
108
110
|
|
|
109
111
|
private init: Promise<void> | undefined = this.loadAllTransactions();
|
|
@@ -287,6 +289,15 @@ export class TransactionStorage implements IStorage<Buffer> {
|
|
|
287
289
|
content = await Zip.gunzip(content);
|
|
288
290
|
}
|
|
289
291
|
|
|
292
|
+
let pendingWriteTimes = new Map<string, number>();
|
|
293
|
+
for (const entry of this.pendingAppends) {
|
|
294
|
+
let prevTime = pendingWriteTimes.get(entry.key);
|
|
295
|
+
if (prevTime && prevTime > entry.time) {
|
|
296
|
+
continue;
|
|
297
|
+
}
|
|
298
|
+
pendingWriteTimes.set(entry.key, entry.time);
|
|
299
|
+
}
|
|
300
|
+
|
|
290
301
|
let offset = 0;
|
|
291
302
|
let entries: TransactionEntry[] = [];
|
|
292
303
|
while (offset < content.length) {
|
|
@@ -314,11 +325,18 @@ export class TransactionStorage implements IStorage<Buffer> {
|
|
|
314
325
|
offset++;
|
|
315
326
|
continue;
|
|
316
327
|
}
|
|
328
|
+
|
|
317
329
|
this.entryCount++;
|
|
318
330
|
let { entry } = entryObj;
|
|
319
331
|
offset = entryObj.offset;
|
|
320
332
|
entries.push(entry);
|
|
321
333
|
|
|
334
|
+
let time = entry.time;
|
|
335
|
+
let prevTime = pendingWriteTimes.get(entry.key);
|
|
336
|
+
if (prevTime && prevTime > time) {
|
|
337
|
+
continue;
|
|
338
|
+
}
|
|
339
|
+
|
|
322
340
|
if (entry.value === undefined) {
|
|
323
341
|
this.cache.delete(entry.key);
|
|
324
342
|
} else {
|
|
@@ -432,79 +450,73 @@ export class TransactionStorage implements IStorage<Buffer> {
|
|
|
432
450
|
private compressing = false;
|
|
433
451
|
private async compressTransactionLog(force?: boolean): Promise<void> {
|
|
434
452
|
if (this.compressing) return;
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
let existingDiskEntries = await this.rawStorage.getKeys();
|
|
438
|
-
existingDiskEntries = existingDiskEntries.filter(x => x.endsWith(CHUNK_EXT));
|
|
439
|
-
let compressNow = force || (
|
|
440
|
-
this.entryCount > 100 && this.entryCount > this.cache.size * 3
|
|
441
|
-
// NOTE: This compress check breaks down if we only have very large values, but... those
|
|
442
|
-
// don't work ANYWAYS (it is better to use one file per value instead).
|
|
443
|
-
// - Maybe we should throw, or at least warn, on sets of value > 1MB,
|
|
444
|
-
// at which point they should just use a file per value
|
|
445
|
-
|| existingDiskEntries.length > Math.max(10, Math.ceil(this.entryCount / 1000))
|
|
446
|
-
|| existingDiskEntries.length > 1000 * 10
|
|
447
|
-
);
|
|
448
|
-
if (!compressNow) return;
|
|
449
|
-
console.log(`Compressing ${this.debugName} transaction log, ${this.entryCount} entries, ${this.cache.size} keys`);
|
|
450
|
-
|
|
451
|
-
// Load off disk, in case there are other writes. We still race with them, but at least
|
|
452
|
-
// this reduces the race condition considerably
|
|
453
|
+
try {
|
|
454
|
+
this.compressing = true;
|
|
453
455
|
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
456
|
+
let existingDiskEntries = await this.rawStorage.getKeys();
|
|
457
|
+
existingDiskEntries = existingDiskEntries.filter(x => x.endsWith(CHUNK_EXT));
|
|
458
|
+
let compressNow = force || (
|
|
459
|
+
this.entryCount > 100 && this.entryCount > this.cache.size * 3
|
|
460
|
+
// NOTE: This compress check breaks down if we only have very large values, but... those
|
|
461
|
+
// don't work ANYWAYS (it is better to use one file per value instead).
|
|
462
|
+
// - Maybe we should throw, or at least warn, on sets of value > 1MB,
|
|
463
|
+
// at which point they should just use a file per value
|
|
464
|
+
|| existingDiskEntries.length > Math.max(10, Math.ceil(this.entryCount / 1000))
|
|
465
|
+
|| existingDiskEntries.length > 1000 * 10
|
|
466
|
+
);
|
|
467
|
+
if (!compressNow) return;
|
|
468
|
+
console.log(`Compressing ${this.debugName} transaction log, ${this.entryCount} entries, ${this.cache.size} keys`);
|
|
458
469
|
|
|
459
|
-
|
|
470
|
+
// Load off disk, in case there are other writes. We still race with them, but at least
|
|
471
|
+
// this reduces the race condition considerably
|
|
460
472
|
|
|
473
|
+
sortChunks(existingDiskEntries);
|
|
474
|
+
for (let entry of existingDiskEntries) {
|
|
475
|
+
await this.loadTransactionFile(entry);
|
|
476
|
+
}
|
|
461
477
|
|
|
462
|
-
|
|
463
|
-
for (const entry of this.cache.values()) {
|
|
464
|
-
let buffer = this.serializeTransactionEntry(entry);
|
|
465
|
-
buffers.push(buffer);
|
|
466
|
-
}
|
|
478
|
+
this.entryCount = this.cache.size;
|
|
467
479
|
|
|
468
|
-
let newChunks = this.chunkBuffers(buffers);
|
|
469
480
|
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
this.currentChunk = undefined;
|
|
475
|
-
let content = chunk.buffer;
|
|
476
|
-
let { header, headerBuffer } = this.getHeader(
|
|
477
|
-
// AND, never compress the last one, otherwise we can't append to it!
|
|
478
|
-
content.length >= FILE_ZIP_THRESHOLD && chunk !== newChunks[newChunks.length - 1]
|
|
479
|
-
);
|
|
480
|
-
if (header.zipped) {
|
|
481
|
-
content = await Zip.gzip(content);
|
|
481
|
+
let buffers: Buffer[] = [];
|
|
482
|
+
for (const entry of this.cache.values()) {
|
|
483
|
+
let buffer = this.serializeTransactionEntry(entry);
|
|
484
|
+
buffers.push(buffer);
|
|
482
485
|
}
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
486
|
+
|
|
487
|
+
let newChunks = this.chunkBuffers(buffers);
|
|
488
|
+
|
|
489
|
+
let newFiles: string[] = [];
|
|
490
|
+
for (let chunk of newChunks) {
|
|
491
|
+
// Don't use the previous current chunk for the new file. Also clear it afterwards so it's not reused for any future rights.
|
|
492
|
+
this.currentChunk = undefined;
|
|
493
|
+
let file = this.getCurrentChunk();
|
|
494
|
+
this.currentChunk = undefined;
|
|
495
|
+
newFiles.push(file);
|
|
496
|
+
let content = chunk.buffer;
|
|
497
|
+
let { header, headerBuffer } = this.getHeader(
|
|
498
|
+
content.length >= FILE_ZIP_THRESHOLD
|
|
499
|
+
);
|
|
500
|
+
if (header.zipped) {
|
|
501
|
+
content = await Zip.gzip(content);
|
|
502
|
+
}
|
|
503
|
+
let buffer = Buffer.concat([headerBuffer, content]);
|
|
504
|
+
await this.rawStorage.set(file, buffer);
|
|
505
|
+
let verified = await this.rawStorage.get(file);
|
|
506
|
+
if (!verified?.equals(buffer)) {
|
|
507
|
+
console.error(`Failed to verify transaction file ${file} in ${this.debugName}`);
|
|
508
|
+
throw new Error(`Failed to verify transaction file ${file} in ${this.debugName}`);
|
|
509
|
+
}
|
|
489
510
|
}
|
|
490
|
-
}
|
|
491
|
-
// Delay, because we want to be absolutely certain that all the writes have finished before deleting the old files.
|
|
492
|
-
await delay(5000);
|
|
493
|
-
if (!isNode()) {
|
|
494
|
-
localStorage.setItem(`${this.debugName}-last-compress`, JSON.stringify({
|
|
495
|
-
time: Date.now(),
|
|
496
|
-
entryCount: this.entryCount,
|
|
497
|
-
cacheSize: this.cache.size,
|
|
498
|
-
newFiles,
|
|
499
|
-
existingDiskEntries,
|
|
500
|
-
}));
|
|
501
|
-
}
|
|
502
511
|
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
512
|
+
// This is the ONLY time we can delete old files, as we know for sure the new file has all of our data.
|
|
513
|
+
// Any future readers won't know this, unless they write it themselves (or unless they audit it against
|
|
514
|
+
// the other generations, which is annoying).
|
|
515
|
+
for (const file of existingDiskEntries) {
|
|
516
|
+
await this.rawStorage.remove(file);
|
|
517
|
+
}
|
|
518
|
+
} finally {
|
|
519
|
+
this.compressing = false;
|
|
508
520
|
}
|
|
509
521
|
}
|
|
510
522
|
|