querysub 0.327.0 → 0.328.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +3 -4
- package/src/-a-archives/archivesBackBlaze.ts +20 -0
- package/src/-a-archives/archivesDisk.ts +5 -5
- package/src/-a-archives/archivesLimitedCache.ts +118 -7
- package/src/-a-archives/archivesPrivateFileSystem.ts +3 -0
- package/src/-g-core-values/NodeCapabilities.ts +26 -11
- package/src/0-path-value-core/auditLogs.ts +4 -2
- package/src/2-proxy/PathValueProxyWatcher.ts +3 -0
- package/src/3-path-functions/PathFunctionRunner.ts +2 -2
- package/src/4-querysub/Querysub.ts +1 -1
- package/src/5-diagnostics/GenericFormat.tsx +2 -2
- package/src/deployManager/machineApplyMainCode.ts +10 -8
- package/src/deployManager/machineSchema.ts +4 -3
- package/src/deployManager/setupMachineMain.ts +3 -2
- package/src/diagnostics/logs/FastArchiveAppendable.ts +75 -51
- package/src/diagnostics/logs/FastArchiveController.ts +5 -2
- package/src/diagnostics/logs/FastArchiveViewer.tsx +205 -48
- package/src/diagnostics/logs/LogViewer2.tsx +78 -34
- package/src/diagnostics/logs/TimeRangeSelector.tsx +8 -0
- package/src/diagnostics/logs/diskLogGlobalContext.ts +3 -3
- package/src/diagnostics/logs/diskLogger.ts +70 -23
- package/src/diagnostics/logs/errorNotifications/ErrorNotificationController.ts +82 -63
- package/src/diagnostics/logs/errorNotifications/ErrorSuppressionUI.tsx +37 -3
- package/src/diagnostics/logs/errorNotifications/ErrorWarning.tsx +45 -16
- package/src/diagnostics/logs/errorNotifications/errorDigests.tsx +8 -0
- package/src/diagnostics/logs/errorNotifications/errorWatchEntry.tsx +198 -56
- package/src/diagnostics/logs/lifeCycleAnalysis/spec.md +3 -2
- package/src/diagnostics/managementPages.tsx +5 -0
- package/src/email_ims_notifications/discord.tsx +203 -0
- package/src/fs.ts +9 -0
- package/src/functional/SocketChannel.ts +9 -0
- package/src/functional/throttleRender.ts +134 -0
- package/src/library-components/ATag.tsx +2 -2
- package/src/misc.ts +13 -0
- package/src/misc2.ts +54 -0
- package/src/user-implementation/SecurityPage.tsx +11 -5
- package/src/user-implementation/userData.ts +31 -16
- package/testEntry2.ts +14 -5
- package/src/user-implementation/setEmailKey.ts +0 -25
- /package/src/{email → email_ims_notifications}/postmark.tsx +0 -0
- /package/src/{email → email_ims_notifications}/sendgrid.tsx +0 -0
|
@@ -21,7 +21,7 @@ import { errorToUndefined, ignoreErrors, timeoutToUndefinedSilent } from "../../
|
|
|
21
21
|
import { getCallObj } from "socket-function/src/nodeProxy";
|
|
22
22
|
import { getSyncedController } from "../../library-components/SyncedController";
|
|
23
23
|
import { getBrowserUrlNode, getOwnNodeId } from "../../-f-node-discovery/NodeDiscovery";
|
|
24
|
-
import { secureRandom } from "../../misc/random";
|
|
24
|
+
import { secureRandom, shuffle } from "../../misc/random";
|
|
25
25
|
import { getPathIndex, getPathStr2 } from "../../path";
|
|
26
26
|
import { onNextPaint } from "../../functional/onNextPaint";
|
|
27
27
|
import { getArchivesBackblazePrivateImmutable, getArchivesBackblazePublicImmutable } from "../../-a-archives/archivesBackBlaze";
|
|
@@ -34,6 +34,7 @@ import { sha256 } from "js-sha256";
|
|
|
34
34
|
import { assertIsNetworkTrusted } from "../../-d-trust/NetworkTrust2";
|
|
35
35
|
import { blue, magenta } from "socket-function/src/formatting/logColors";
|
|
36
36
|
import { FileMetadata, FastArchiveAppendableControllerBase, FastArchiveAppendableController, getFileMetadataHash } from "./FastArchiveController";
|
|
37
|
+
import { fsExistsAsync } from "../../fs";
|
|
37
38
|
|
|
38
39
|
// NOTE: In a single command line micro-test it looks like we can write about 40K writes of 500 per once, when using 10X parallel, on a fairly potato server. We should probably batch though, and only do 1X parallel.
|
|
39
40
|
/*
|
|
@@ -140,7 +141,7 @@ export class FastArchiveAppendable<Datum> {
|
|
|
140
141
|
await this.flushNow();
|
|
141
142
|
});
|
|
142
143
|
// Random, to try to prevent the dead file detection code from getting in sync. It's fine if it gets in sync, it's just inefficient as we'll have multiple services uploading the same file to the same location at the same time.
|
|
143
|
-
void
|
|
144
|
+
void runInfinitePoll(timeInMinute * 20 + Math.random() * timeInMinute * 5, async () => {
|
|
144
145
|
await this.moveLogsToBackblaze();
|
|
145
146
|
});
|
|
146
147
|
}
|
|
@@ -151,13 +152,16 @@ export class FastArchiveAppendable<Datum> {
|
|
|
151
152
|
return nestArchives("fast-logs/" + this.rootPath, archives);
|
|
152
153
|
});
|
|
153
154
|
|
|
154
|
-
public
|
|
155
|
-
|
|
155
|
+
public baseGetLocalPathRoot = () => {
|
|
156
|
+
return (
|
|
156
157
|
os.homedir()
|
|
157
158
|
+ "/fast-log-cache/"
|
|
158
159
|
+ getDomain() + "/"
|
|
159
160
|
+ this.rootPath
|
|
160
161
|
);
|
|
162
|
+
};
|
|
163
|
+
public getLocalPathRoot = lazy(() => {
|
|
164
|
+
let path = this.baseGetLocalPathRoot();
|
|
161
165
|
if (!fs.existsSync(path)) {
|
|
162
166
|
fs.mkdirSync(path, { recursive: true });
|
|
163
167
|
}
|
|
@@ -199,7 +203,10 @@ export class FastArchiveAppendable<Datum> {
|
|
|
199
203
|
// NOTE: This is disk writing, which should be fast, but if it's slow we might be able to remove the measureWrap (as technically spending 50% of our time writing to the disk is fine, and won't lag anything).
|
|
200
204
|
@measureFnc
|
|
201
205
|
public async flushNow(now = Date.now()) {
|
|
206
|
+
|
|
202
207
|
await appendableSerialLock(async () => {
|
|
208
|
+
if (this.pendingWriteQueue.length === 0) return;
|
|
209
|
+
|
|
203
210
|
// 2025-09-06T07
|
|
204
211
|
let hourFile = new Date(now).toISOString().slice(0, 13) + ".log";
|
|
205
212
|
let localCacheFolder = this.getLocalPathRoot() + getOwnThreadId() + "/";
|
|
@@ -208,8 +215,6 @@ export class FastArchiveAppendable<Datum> {
|
|
|
208
215
|
// Always heartbeat
|
|
209
216
|
await fs.promises.writeFile(localCacheFolder + "heartbeat", Buffer.from(now + ""));
|
|
210
217
|
|
|
211
|
-
if (this.pendingWriteQueue.length === 0) return;
|
|
212
|
-
|
|
213
218
|
try {
|
|
214
219
|
let beforeSize = await fs.promises.stat(localCachePath);
|
|
215
220
|
if (beforeSize.size > UNCOMPRESSED_LOG_FILE_STOP_THRESHOLD) {
|
|
@@ -277,18 +282,22 @@ export class FastArchiveAppendable<Datum> {
|
|
|
277
282
|
|
|
278
283
|
public async moveLogsToBackblaze() {
|
|
279
284
|
await appendableSerialLock(async () => {
|
|
280
|
-
let rootCacheFolder = this.
|
|
285
|
+
let rootCacheFolder = this.baseGetLocalPathRoot();
|
|
286
|
+
if (!await fsExistsAsync(rootCacheFolder)) return;
|
|
281
287
|
console.log(magenta(`Moving old logs to Backblaze from ${rootCacheFolder}`));
|
|
282
288
|
|
|
283
289
|
let archives = this.getArchives();
|
|
284
290
|
async function moveLogsForFolder(threadId: string) {
|
|
285
291
|
let threadDir = rootCacheFolder + threadId + "/";
|
|
286
|
-
if (!
|
|
292
|
+
if (!await fsExistsAsync(threadDir)) return;
|
|
287
293
|
let files = await fs.promises.readdir(threadDir);
|
|
294
|
+
// Shuffle, so if we do run multiple at the same time, we are less likely to use the same files at the same time.
|
|
295
|
+
files = shuffle(files, Math.random());
|
|
288
296
|
for (let file of files) {
|
|
289
297
|
if (file === "heartbeat") continue;
|
|
290
298
|
let fullPath = threadDir + file;
|
|
291
299
|
try {
|
|
300
|
+
if (!await fsExistsAsync(fullPath)) continue;
|
|
292
301
|
// We could use modified time here? Although, this is nice if we move files around, and then manually have them moved, although even then... this could cause problem be tripping while we are copying the file, so... maybe this is just wrong?
|
|
293
302
|
let timeStamp = getFileTimeStamp(fullPath);
|
|
294
303
|
if (timeStamp.endTime > Date.now() - UPLOAD_THRESHOLD) continue;
|
|
@@ -419,6 +428,7 @@ export class FastArchiveAppendable<Datum> {
|
|
|
419
428
|
});
|
|
420
429
|
|
|
421
430
|
let createProgress = async (section: string, max: number) => {
|
|
431
|
+
section = `${this.rootPath}|${section}`;
|
|
422
432
|
let cancelled: Error | undefined;
|
|
423
433
|
let lastValue = 0;
|
|
424
434
|
let baseBatch = batchFunction({ delay: 150, },
|
|
@@ -499,7 +509,7 @@ export class FastArchiveAppendable<Datum> {
|
|
|
499
509
|
synchronizeObj.parametersResult.resolve(syncResult);
|
|
500
510
|
|
|
501
511
|
|
|
502
|
-
let downloadProgress = await createProgress(
|
|
512
|
+
let downloadProgress = await createProgress(`Downloading (bytes)`, 0);
|
|
503
513
|
let decompressProgress = await createProgress("Decompressing (bytes)", 0);
|
|
504
514
|
let scanProgress = await createProgress("Scanning (datums)", 0);
|
|
505
515
|
let decodeProgress = await createProgress("Decoding (datums)", 0);
|
|
@@ -509,7 +519,7 @@ export class FastArchiveAppendable<Datum> {
|
|
|
509
519
|
const self = this;
|
|
510
520
|
|
|
511
521
|
|
|
512
|
-
async function downloadAndParseFile(file: FileMetadata) {
|
|
522
|
+
async function downloadAndParseFile(file: FileMetadata, runInner: (code: () => Promise<void>) => Promise<void>) {
|
|
513
523
|
const onFetchedData = runInSerial(async (data: Buffer) => {
|
|
514
524
|
await downloadProgress(data.length, data.length, true);
|
|
515
525
|
await decompressWriter.write(data);
|
|
@@ -611,65 +621,79 @@ export class FastArchiveAppendable<Datum> {
|
|
|
611
621
|
|
|
612
622
|
// TODO: Stream from the local cache instead? It should be possible, we can get the total size, and read chunks.
|
|
613
623
|
let hash = getFileMetadataHash(file) + ".file";
|
|
614
|
-
let
|
|
615
|
-
if (
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
624
|
+
let contents = await localCache.get(hash);
|
|
625
|
+
if (stoppedPromise.resolveCalled) return;
|
|
626
|
+
await runInner(async () => {
|
|
627
|
+
if (contents?.length) {
|
|
628
|
+
const CHUNK_SIZE = 1000 * 1000 * 10;
|
|
629
|
+
for (let i = 0; i < contents.length; i += CHUNK_SIZE) {
|
|
630
|
+
let data = contents.slice(i, i + CHUNK_SIZE);
|
|
631
|
+
await onFetchedData(data);
|
|
632
|
+
}
|
|
633
|
+
} else {
|
|
634
|
+
const response = await fetch(url);
|
|
635
|
+
if (!response.ok) {
|
|
636
|
+
throw new Error(`Failed to fetch ${url}: ${response.status} ${response.statusText}`);
|
|
621
637
|
}
|
|
622
|
-
await onFetchedData(data);
|
|
623
|
-
}
|
|
624
|
-
} else {
|
|
625
|
-
const response = await fetch(url);
|
|
626
|
-
if (!response.ok) {
|
|
627
|
-
throw new Error(`Failed to fetch ${url}: ${response.status} ${response.statusText}`);
|
|
628
|
-
}
|
|
629
638
|
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
639
|
+
if (!response.body) {
|
|
640
|
+
throw new Error(`Response body is undefined for ${url}`);
|
|
641
|
+
}
|
|
642
|
+
let values: Buffer[] = [];
|
|
643
|
+
const reader = response.body.getReader();
|
|
644
|
+
void stoppedPromise.promise.finally(() => {
|
|
645
|
+
void response.body?.cancel();
|
|
646
|
+
});
|
|
647
|
+
try {
|
|
648
|
+
for await (let value of streamToIteratable(reader)) {
|
|
649
|
+
// Cancel entirely
|
|
650
|
+
if (stoppedPromise.resolveCalled) return;
|
|
651
|
+
if (!value) continue;
|
|
652
|
+
let buffer = Buffer.from(value);
|
|
653
|
+
values.push(buffer);
|
|
654
|
+
await onFetchedData(buffer);
|
|
655
|
+
}
|
|
656
|
+
} finally {
|
|
657
|
+
reader.releaseLock();
|
|
646
658
|
}
|
|
647
|
-
} finally {
|
|
648
|
-
reader.releaseLock();
|
|
649
|
-
}
|
|
650
659
|
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
660
|
+
await localCache.set(hash, Buffer.concat(values));
|
|
661
|
+
}
|
|
662
|
+
await decompressWriter.close();
|
|
663
|
+
await decompressPromise;
|
|
664
|
+
});
|
|
655
665
|
}
|
|
656
666
|
|
|
657
667
|
// Fork off the processing
|
|
658
668
|
void (async () => {
|
|
659
669
|
try {
|
|
660
670
|
// Iterate over all files and process them
|
|
661
|
-
let fileProgress = await createProgress("Files",
|
|
671
|
+
let fileProgress = await createProgress("Files", syncResult.files.length);
|
|
672
|
+
let fileInnerProgress = await createProgress("Files Inner", syncResult.files.length);
|
|
662
673
|
let failedFiles = await createProgress("Failed Files", 0);
|
|
663
|
-
|
|
674
|
+
let runSerial = runInSerial(async (fnc: () => Promise<void>) => {
|
|
675
|
+
try {
|
|
676
|
+
await fnc();
|
|
677
|
+
} finally {
|
|
678
|
+
await fileInnerProgress(1, 0, true);
|
|
679
|
+
}
|
|
680
|
+
});
|
|
681
|
+
async function downloadFileWrapper(file: FileMetadata) {
|
|
664
682
|
if (stoppedPromise.resolveCalled) return;
|
|
665
683
|
try {
|
|
666
|
-
await downloadAndParseFile(file);
|
|
684
|
+
await downloadAndParseFile(file, runSerial);
|
|
667
685
|
} catch (e: any) {
|
|
668
686
|
console.warn(`Failed to download and parse file ${file.path}:\n${e.stack}`);
|
|
669
687
|
await failedFiles(1, 1, true);
|
|
670
688
|
}
|
|
671
|
-
|
|
689
|
+
if (stoppedPromise.resolveCalled) return;
|
|
690
|
+
await fileProgress(1, 0, true);
|
|
672
691
|
}
|
|
692
|
+
for (let file of syncResult.files) {
|
|
693
|
+
await downloadFileWrapper(file);
|
|
694
|
+
}
|
|
695
|
+
await runInSerial(async () => {
|
|
696
|
+
});
|
|
673
697
|
|
|
674
698
|
await (await createProgress("Done", 0))(1, 1, true);
|
|
675
699
|
} catch (e: any) {
|
|
@@ -35,6 +35,7 @@ import { assertIsNetworkTrusted } from "../../-d-trust/NetworkTrust2";
|
|
|
35
35
|
import { blue, magenta } from "socket-function/src/formatting/logColors";
|
|
36
36
|
import { FastArchiveAppendable, getFileTimeStamp } from "./FastArchiveAppendable";
|
|
37
37
|
import { IdentityController_getMachineId, IdentityController_getReconnectNodeId } from "../../-c-identity/IdentityController";
|
|
38
|
+
import { fsExistsAsync } from "../../fs";
|
|
38
39
|
|
|
39
40
|
export type FileMetadata = {
|
|
40
41
|
nodeId?: string;
|
|
@@ -84,7 +85,8 @@ export class FastArchiveAppendableControllerBase {
|
|
|
84
85
|
/** Get all pending local files for this rootPath that haven't been uploaded to Backblaze yet */
|
|
85
86
|
public async getPendingFiles(rootPath: string, timeRange: { startTime: number; endTime: number; }): Promise<InternalFileMetadata[]> {
|
|
86
87
|
|
|
87
|
-
let rootCacheFolder = new FastArchiveAppendable(rootPath).
|
|
88
|
+
let rootCacheFolder = new FastArchiveAppendable(rootPath).baseGetLocalPathRoot();
|
|
89
|
+
if (!await fsExistsAsync(rootCacheFolder)) return [];
|
|
88
90
|
|
|
89
91
|
//console.log(`Searching for pending files in ${rootCacheFolder} for time range ${new Date(timeRange.startTime).toISOString()} to ${new Date(timeRange.endTime).toISOString()}`);
|
|
90
92
|
|
|
@@ -377,6 +379,7 @@ export class FastArchiveAppendableControllerBase {
|
|
|
377
379
|
let remoteProgress = this.updateProgress(syncId, "Discovering remote files", byMachineId.size);
|
|
378
380
|
let remoteValue = 0;
|
|
379
381
|
|
|
382
|
+
|
|
380
383
|
await Promise.all(Array.from(byMachineId).map(async ([machineId, nodeObjs]) => {
|
|
381
384
|
let firstAliveNode = new PromiseObj<string>();
|
|
382
385
|
let allFinished = Promise.all(nodeObjs.map(async ({ nodeId, entryPoint }) => {
|
|
@@ -396,7 +399,7 @@ export class FastArchiveAppendableControllerBase {
|
|
|
396
399
|
|
|
397
400
|
let pendingFiles = await errorToUndefined(
|
|
398
401
|
controller.getPendingFiles(config.rootPath, config.range));
|
|
399
|
-
console.log(blue(`Found ${pendingFiles?.length} pending files on node ${aliveNodeId}`)
|
|
402
|
+
console.log(blue(`Found ${pendingFiles?.length} pending files on node ${aliveNodeId}`));
|
|
400
403
|
|
|
401
404
|
remoteValue++;
|
|
402
405
|
remoteProgress(remoteValue);
|