querysub 0.327.0 → 0.329.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/error-email.js +8 -0
- package/bin/error-im.js +8 -0
- package/package.json +4 -3
- package/src/-a-archives/archivesBackBlaze.ts +20 -0
- package/src/-a-archives/archivesCborT.ts +52 -0
- package/src/-a-archives/archivesDisk.ts +5 -5
- package/src/-a-archives/archivesJSONT.ts +19 -5
- package/src/-a-archives/archivesLimitedCache.ts +118 -7
- package/src/-a-archives/archivesPrivateFileSystem.ts +3 -0
- package/src/-g-core-values/NodeCapabilities.ts +26 -11
- package/src/0-path-value-core/auditLogs.ts +4 -2
- package/src/2-proxy/PathValueProxyWatcher.ts +7 -0
- package/src/3-path-functions/PathFunctionRunner.ts +2 -2
- package/src/4-querysub/Querysub.ts +1 -1
- package/src/5-diagnostics/GenericFormat.tsx +2 -2
- package/src/config.ts +15 -3
- package/src/deployManager/machineApplyMainCode.ts +10 -8
- package/src/deployManager/machineSchema.ts +4 -3
- package/src/deployManager/setupMachineMain.ts +3 -2
- package/src/diagnostics/logs/FastArchiveAppendable.ts +86 -53
- package/src/diagnostics/logs/FastArchiveController.ts +11 -2
- package/src/diagnostics/logs/FastArchiveViewer.tsx +205 -48
- package/src/diagnostics/logs/LogViewer2.tsx +78 -34
- package/src/diagnostics/logs/TimeRangeSelector.tsx +8 -0
- package/src/diagnostics/logs/diskLogGlobalContext.ts +5 -4
- package/src/diagnostics/logs/diskLogger.ts +70 -23
- package/src/diagnostics/logs/errorNotifications/ErrorDigestPage.tsx +409 -0
- package/src/diagnostics/logs/errorNotifications/ErrorNotificationController.ts +94 -67
- package/src/diagnostics/logs/errorNotifications/ErrorSuppressionUI.tsx +37 -3
- package/src/diagnostics/logs/errorNotifications/ErrorWarning.tsx +50 -16
- package/src/diagnostics/logs/errorNotifications/errorDigestEmail.tsx +174 -0
- package/src/diagnostics/logs/errorNotifications/errorDigests.tsx +291 -0
- package/src/diagnostics/logs/errorNotifications/errorLoopEntry.tsx +7 -0
- package/src/diagnostics/logs/errorNotifications/errorWatchEntry.tsx +185 -68
- package/src/diagnostics/logs/lifeCycleAnalysis/spec.md +10 -19
- package/src/diagnostics/managementPages.tsx +33 -15
- package/src/email_ims_notifications/discord.tsx +203 -0
- package/src/{email → email_ims_notifications}/postmark.tsx +3 -3
- package/src/fs.ts +9 -0
- package/src/functional/SocketChannel.ts +9 -0
- package/src/functional/throttleRender.ts +134 -0
- package/src/library-components/ATag.tsx +2 -2
- package/src/library-components/SyncedController.ts +3 -3
- package/src/misc.ts +18 -0
- package/src/misc2.ts +106 -0
- package/src/user-implementation/SecurityPage.tsx +11 -5
- package/src/user-implementation/userData.ts +57 -23
- package/testEntry2.ts +14 -5
- package/src/user-implementation/setEmailKey.ts +0 -25
- /package/src/{email → email_ims_notifications}/sendgrid.tsx +0 -0
|
@@ -24,9 +24,11 @@ import { shutdown } from "../diagnostics/periodic";
|
|
|
24
24
|
import { onServiceConfigChange, triggerRollingUpdate } from "./machineController";
|
|
25
25
|
import { PromiseObj } from "../promise";
|
|
26
26
|
import path from "path";
|
|
27
|
+
import { fsExistsAsync } from "../fs";
|
|
27
28
|
|
|
28
29
|
const PIPE_FILE_LINE_LIMIT = 10_000;
|
|
29
30
|
|
|
31
|
+
|
|
30
32
|
const getMemoryInfo = measureWrap(async function getMemoryInfo(): Promise<{ value: number; max: number } | undefined> {
|
|
31
33
|
if (os.platform() === "win32") {
|
|
32
34
|
throw new Error("Windows is not supported for machine resource monitoring");
|
|
@@ -379,7 +381,7 @@ const getScreenState = measureWrap(async function getScreenState(populateIsProce
|
|
|
379
381
|
});
|
|
380
382
|
async function removeOldNodeId(screenName: string) {
|
|
381
383
|
let nodeIdFile = os.homedir() + "/" + SERVICE_FOLDER + screenName + "/" + SERVICE_NODE_FILE_NAME;
|
|
382
|
-
if (
|
|
384
|
+
if (await fsExistsAsync(nodeIdFile)) {
|
|
383
385
|
let nodeId = await fs.promises.readFile(nodeIdFile, "utf8");
|
|
384
386
|
console.log(green(`Removing node if for dead service on ${nodeIdFile}, node id ${nodeId}`));
|
|
385
387
|
await fs.promises.unlink(nodeIdFile);
|
|
@@ -416,7 +418,7 @@ const runScreenCommand = measureWrap(async function runScreenCommand(config: {
|
|
|
416
418
|
if (existingScreen && !rollingObj && await isScreenRunningProcess(existingScreen.pid)) {
|
|
417
419
|
let nodeIdPath = os.homedir() + "/" + SERVICE_FOLDER + screenName + "/" + SERVICE_NODE_FILE_NAME;
|
|
418
420
|
let rollingFinalTime = Date.now() + config.rollingWindow;
|
|
419
|
-
if (
|
|
421
|
+
if (await fsExistsAsync(nodeIdPath)) {
|
|
420
422
|
let nodeId = await fs.promises.readFile(nodeIdPath, "utf8");
|
|
421
423
|
// REMOVE the nodeId file, so we the node isn't terminated!
|
|
422
424
|
await fs.promises.unlink(nodeIdPath);
|
|
@@ -551,7 +553,7 @@ const ensureGitSynced = measureWrap(async function ensureGitSynced(config: {
|
|
|
551
553
|
repoUrl: string;
|
|
552
554
|
gitRef: string;
|
|
553
555
|
}) {
|
|
554
|
-
if (!await
|
|
556
|
+
if (!await fsExistsAsync(config.gitFolder + ".git")) {
|
|
555
557
|
await runPromise(`git clone ${config.repoUrl} ${config.gitFolder}`);
|
|
556
558
|
}
|
|
557
559
|
try {
|
|
@@ -595,7 +597,7 @@ async function quickIsOutdated() {
|
|
|
595
597
|
let folder = root + screenName + "/";
|
|
596
598
|
await fs.promises.mkdir(folder, { recursive: true });
|
|
597
599
|
let parameterPath = folder + "/parameters.json";
|
|
598
|
-
if (!
|
|
600
|
+
if (!await fsExistsAsync(parameterPath)) return true;
|
|
599
601
|
let prevParameters = await fs.promises.readFile(parameterPath, "utf8");
|
|
600
602
|
if (prevParameters !== newParametersString) return true;
|
|
601
603
|
}
|
|
@@ -654,7 +656,7 @@ const resyncServicesBase = runInSerial(measureWrap(async function resyncServices
|
|
|
654
656
|
}
|
|
655
657
|
let parameterPath = folder + "/parameters.json";
|
|
656
658
|
let prevParameters = "";
|
|
657
|
-
if (
|
|
659
|
+
if (await fsExistsAsync(parameterPath)) {
|
|
658
660
|
prevParameters = await fs.promises.readFile(parameterPath, "utf8");
|
|
659
661
|
}
|
|
660
662
|
let newParametersString = JSON.stringify(config.parameters);
|
|
@@ -673,7 +675,7 @@ const resyncServicesBase = runInSerial(measureWrap(async function resyncServices
|
|
|
673
675
|
await fs.promises.writeFile(parameterPath, newParametersString);
|
|
674
676
|
|
|
675
677
|
let nodePathId = folder + SERVICE_NODE_FILE_NAME;
|
|
676
|
-
if (
|
|
678
|
+
if (await fsExistsAsync(nodePathId)) {
|
|
677
679
|
let nodeId = await fs.promises.readFile(nodePathId, "utf8");
|
|
678
680
|
machineInfo.services[config.serviceId].nodeId = nodeId;
|
|
679
681
|
}
|
|
@@ -788,7 +790,7 @@ export async function machineApplyMain() {
|
|
|
788
790
|
|
|
789
791
|
// NOTE: Error's don't get logged unless we host, so... we can't do this earlier than this...
|
|
790
792
|
let lastErrorPath = os.homedir() + "/lastAlwaysUpError.txt";
|
|
791
|
-
if (
|
|
793
|
+
if (await fsExistsAsync(lastErrorPath)) {
|
|
792
794
|
let lastError = await fs.promises.readFile(lastErrorPath, "utf8");
|
|
793
795
|
await fs.promises.unlink(lastErrorPath);
|
|
794
796
|
console.error(`Always up error: ${lastError}`);
|
|
@@ -796,7 +798,7 @@ export async function machineApplyMain() {
|
|
|
796
798
|
|
|
797
799
|
// Kill the last running one
|
|
798
800
|
let isRunningPath = os.homedir() + "/machineApplyPID.txt";
|
|
799
|
-
if (
|
|
801
|
+
if (await fsExistsAsync(isRunningPath)) {
|
|
800
802
|
let pid = await fs.promises.readFile(isRunningPath, "utf8");
|
|
801
803
|
try {
|
|
802
804
|
process.kill(parseInt(pid), "SIGKILL");
|
|
@@ -23,6 +23,7 @@ import { DeployProgress, deployFunctions, deployGetFunctions } from "../4-deploy
|
|
|
23
23
|
import { FunctionSpec, functionSchema } from "../3-path-functions/PathFunctionRunner";
|
|
24
24
|
import { Querysub } from "../4-querysub/QuerysubController";
|
|
25
25
|
import { green, red } from "socket-function/src/formatting/logColors";
|
|
26
|
+
import { fsExistsAsync } from "../fs";
|
|
26
27
|
|
|
27
28
|
const SERVICE_FOLDER_NAME = "machine-services";
|
|
28
29
|
export const SERVICE_FOLDER = `${SERVICE_FOLDER_NAME}/`;
|
|
@@ -240,7 +241,7 @@ export class MachineServiceControllerBase {
|
|
|
240
241
|
let querysubFolder = path.resolve("../querysub");
|
|
241
242
|
let querysubRef = "";
|
|
242
243
|
let querysubUncommitted: string[] = [];
|
|
243
|
-
if (
|
|
244
|
+
if (await fsExistsAsync(querysubFolder)) {
|
|
244
245
|
querysubRef = await getGitRefLive(querysubFolder);
|
|
245
246
|
querysubUncommitted = await getGitUncommitted(querysubFolder);
|
|
246
247
|
}
|
|
@@ -276,7 +277,7 @@ export class MachineServiceControllerBase {
|
|
|
276
277
|
public async commitPushService(commitMessage: string) {
|
|
277
278
|
if (commitMessage.toLowerCase().includes("querysub")) {
|
|
278
279
|
let querysubFolder = path.resolve("../querysub");
|
|
279
|
-
if (
|
|
280
|
+
if (await fsExistsAsync(querysubFolder)) {
|
|
280
281
|
let querysubLastCommit = await getGitRefInfo({
|
|
281
282
|
gitDir: querysubFolder,
|
|
282
283
|
ref: "origin/main",
|
|
@@ -293,7 +294,7 @@ export class MachineServiceControllerBase {
|
|
|
293
294
|
}
|
|
294
295
|
public async commitPushAndPublishQuerysub(commitMessage: string) {
|
|
295
296
|
let querysubFolder = path.resolve("../querysub");
|
|
296
|
-
if (!
|
|
297
|
+
if (!await fsExistsAsync(querysubFolder)) {
|
|
297
298
|
throw new Error(`Querysub folder does not exist at ${querysubFolder}`);
|
|
298
299
|
}
|
|
299
300
|
|
|
@@ -6,6 +6,7 @@ import fs from "fs";
|
|
|
6
6
|
import os from "os";
|
|
7
7
|
import readline from "readline";
|
|
8
8
|
import open from "open";
|
|
9
|
+
import { fsExistsAsync } from "../fs";
|
|
9
10
|
// Import querysub, to fix missing dependencies
|
|
10
11
|
Querysub;
|
|
11
12
|
|
|
@@ -32,7 +33,7 @@ async function getGitHubApiKey(repoUrl: string, sshRemote: string): Promise<stri
|
|
|
32
33
|
const cacheFile = os.homedir() + `/githubkey_${repoOwner}_${repoName}.json`;
|
|
33
34
|
|
|
34
35
|
// Check if we have a cached key
|
|
35
|
-
if (
|
|
36
|
+
if (await fsExistsAsync(cacheFile)) {
|
|
36
37
|
try {
|
|
37
38
|
const cached = JSON.parse(fs.readFileSync(cacheFile, "utf8"));
|
|
38
39
|
if (cached.apiKey) {
|
|
@@ -222,7 +223,7 @@ async function main() {
|
|
|
222
223
|
|
|
223
224
|
// 1. Copy backblaze file to remote server (~/backblaze.json)
|
|
224
225
|
console.log("Copying backblaze credentials...");
|
|
225
|
-
if (
|
|
226
|
+
if (await fsExistsAsync(backblazePath)) {
|
|
226
227
|
await runPromise(`scp "${backblazePath}" ${sshRemote}:~/backblaze.json`);
|
|
227
228
|
console.log("✅ Backblaze credentials copied");
|
|
228
229
|
} else {
|
|
@@ -5,7 +5,7 @@ import { getMachineId, getOwnMachineId } from "../../-a-auth/certs";
|
|
|
5
5
|
import { isDefined, parseFileNameKVP, parsePath, partialCopyObject, streamToIteratable, sum, toFileNameKVP } from "../../misc";
|
|
6
6
|
import { registerShutdownHandler } from "../periodic";
|
|
7
7
|
import { batchFunction, delay, runInSerial, runInfinitePoll, runInfinitePollCallAtStart } from "socket-function/src/batching";
|
|
8
|
-
import { PromiseObj, isNode, keyByArray, nextId, sort, timeInDay, timeInHour, timeInMinute } from "socket-function/src/misc";
|
|
8
|
+
import { PromiseObj, isNode, keyByArray, list, nextId, sort, timeInDay, timeInHour, timeInMinute } from "socket-function/src/misc";
|
|
9
9
|
import os from "os";
|
|
10
10
|
import { getOwnThreadId } from "../../-f-node-discovery/NodeDiscovery";
|
|
11
11
|
import fs from "fs";
|
|
@@ -21,7 +21,7 @@ import { errorToUndefined, ignoreErrors, timeoutToUndefinedSilent } from "../../
|
|
|
21
21
|
import { getCallObj } from "socket-function/src/nodeProxy";
|
|
22
22
|
import { getSyncedController } from "../../library-components/SyncedController";
|
|
23
23
|
import { getBrowserUrlNode, getOwnNodeId } from "../../-f-node-discovery/NodeDiscovery";
|
|
24
|
-
import { secureRandom } from "../../misc/random";
|
|
24
|
+
import { secureRandom, shuffle } from "../../misc/random";
|
|
25
25
|
import { getPathIndex, getPathStr2 } from "../../path";
|
|
26
26
|
import { onNextPaint } from "../../functional/onNextPaint";
|
|
27
27
|
import { getArchivesBackblazePrivateImmutable, getArchivesBackblazePublicImmutable } from "../../-a-archives/archivesBackBlaze";
|
|
@@ -34,6 +34,7 @@ import { sha256 } from "js-sha256";
|
|
|
34
34
|
import { assertIsNetworkTrusted } from "../../-d-trust/NetworkTrust2";
|
|
35
35
|
import { blue, magenta } from "socket-function/src/formatting/logColors";
|
|
36
36
|
import { FileMetadata, FastArchiveAppendableControllerBase, FastArchiveAppendableController, getFileMetadataHash } from "./FastArchiveController";
|
|
37
|
+
import { fsExistsAsync } from "../../fs";
|
|
37
38
|
|
|
38
39
|
// NOTE: In a single command line micro-test it looks like we can write about 40K writes of 500 per once, when using 10X parallel, on a fairly potato server. We should probably batch though, and only do 1X parallel.
|
|
39
40
|
/*
|
|
@@ -140,7 +141,7 @@ export class FastArchiveAppendable<Datum> {
|
|
|
140
141
|
await this.flushNow();
|
|
141
142
|
});
|
|
142
143
|
// Random, to try to prevent the dead file detection code from getting in sync. It's fine if it gets in sync, it's just inefficient as we'll have multiple services uploading the same file to the same location at the same time.
|
|
143
|
-
void
|
|
144
|
+
void runInfinitePoll(timeInMinute * 20 + Math.random() * timeInMinute * 5, async () => {
|
|
144
145
|
await this.moveLogsToBackblaze();
|
|
145
146
|
});
|
|
146
147
|
}
|
|
@@ -151,13 +152,16 @@ export class FastArchiveAppendable<Datum> {
|
|
|
151
152
|
return nestArchives("fast-logs/" + this.rootPath, archives);
|
|
152
153
|
});
|
|
153
154
|
|
|
154
|
-
public
|
|
155
|
-
|
|
155
|
+
public baseGetLocalPathRoot = () => {
|
|
156
|
+
return (
|
|
156
157
|
os.homedir()
|
|
157
158
|
+ "/fast-log-cache/"
|
|
158
159
|
+ getDomain() + "/"
|
|
159
160
|
+ this.rootPath
|
|
160
161
|
);
|
|
162
|
+
};
|
|
163
|
+
public getLocalPathRoot = lazy(() => {
|
|
164
|
+
let path = this.baseGetLocalPathRoot();
|
|
161
165
|
if (!fs.existsSync(path)) {
|
|
162
166
|
fs.mkdirSync(path, { recursive: true });
|
|
163
167
|
}
|
|
@@ -199,7 +203,10 @@ export class FastArchiveAppendable<Datum> {
|
|
|
199
203
|
// NOTE: This is disk writing, which should be fast, but if it's slow we might be able to remove the measureWrap (as technically spending 50% of our time writing to the disk is fine, and won't lag anything).
|
|
200
204
|
@measureFnc
|
|
201
205
|
public async flushNow(now = Date.now()) {
|
|
206
|
+
|
|
202
207
|
await appendableSerialLock(async () => {
|
|
208
|
+
if (this.pendingWriteQueue.length === 0) return;
|
|
209
|
+
|
|
203
210
|
// 2025-09-06T07
|
|
204
211
|
let hourFile = new Date(now).toISOString().slice(0, 13) + ".log";
|
|
205
212
|
let localCacheFolder = this.getLocalPathRoot() + getOwnThreadId() + "/";
|
|
@@ -208,8 +215,6 @@ export class FastArchiveAppendable<Datum> {
|
|
|
208
215
|
// Always heartbeat
|
|
209
216
|
await fs.promises.writeFile(localCacheFolder + "heartbeat", Buffer.from(now + ""));
|
|
210
217
|
|
|
211
|
-
if (this.pendingWriteQueue.length === 0) return;
|
|
212
|
-
|
|
213
218
|
try {
|
|
214
219
|
let beforeSize = await fs.promises.stat(localCachePath);
|
|
215
220
|
if (beforeSize.size > UNCOMPRESSED_LOG_FILE_STOP_THRESHOLD) {
|
|
@@ -277,18 +282,22 @@ export class FastArchiveAppendable<Datum> {
|
|
|
277
282
|
|
|
278
283
|
public async moveLogsToBackblaze() {
|
|
279
284
|
await appendableSerialLock(async () => {
|
|
280
|
-
let rootCacheFolder = this.
|
|
285
|
+
let rootCacheFolder = this.baseGetLocalPathRoot();
|
|
286
|
+
if (!await fsExistsAsync(rootCacheFolder)) return;
|
|
281
287
|
console.log(magenta(`Moving old logs to Backblaze from ${rootCacheFolder}`));
|
|
282
288
|
|
|
283
289
|
let archives = this.getArchives();
|
|
284
290
|
async function moveLogsForFolder(threadId: string) {
|
|
285
291
|
let threadDir = rootCacheFolder + threadId + "/";
|
|
286
|
-
if (!
|
|
292
|
+
if (!await fsExistsAsync(threadDir)) return;
|
|
287
293
|
let files = await fs.promises.readdir(threadDir);
|
|
294
|
+
// Shuffle, so if we do run multiple at the same time, we are less likely to use the same files at the same time.
|
|
295
|
+
files = shuffle(files, Math.random());
|
|
288
296
|
for (let file of files) {
|
|
289
297
|
if (file === "heartbeat") continue;
|
|
290
298
|
let fullPath = threadDir + file;
|
|
291
299
|
try {
|
|
300
|
+
if (!await fsExistsAsync(fullPath)) continue;
|
|
292
301
|
// We could use modified time here? Although, this is nice if we move files around, and then manually have them moved, although even then... this could cause problem be tripping while we are copying the file, so... maybe this is just wrong?
|
|
293
302
|
let timeStamp = getFileTimeStamp(fullPath);
|
|
294
303
|
if (timeStamp.endTime > Date.now() - UPLOAD_THRESHOLD) continue;
|
|
@@ -301,7 +310,10 @@ export class FastArchiveAppendable<Datum> {
|
|
|
301
310
|
let compressed = await measureBlock(async () => Zip.gzip(data), "FastArchiveAppendable|compress");
|
|
302
311
|
console.log(`Uploading ${formatNumber(data.length)}B (compressed to ${formatNumber(compressed.length)}B) logs to ${backblazePath} from ${fullPath}`);
|
|
303
312
|
await archives.set(backblazePath, compressed);
|
|
304
|
-
|
|
313
|
+
// Ignore unlink errors to reduce excess logging. This races on startup, so it is likely we'll hit this a fair amount (especially because archives.set is so slow)
|
|
314
|
+
try {
|
|
315
|
+
await fs.promises.unlink(fullPath);
|
|
316
|
+
} catch { }
|
|
305
317
|
} catch (e: any) {
|
|
306
318
|
// Just skip it, if the first file in the directory is broken we don't want to never move any files
|
|
307
319
|
console.error(`Error moving log file ${fullPath}: ${e.stack}`);
|
|
@@ -419,6 +431,7 @@ export class FastArchiveAppendable<Datum> {
|
|
|
419
431
|
});
|
|
420
432
|
|
|
421
433
|
let createProgress = async (section: string, max: number) => {
|
|
434
|
+
section = `${this.rootPath}|${section}`;
|
|
422
435
|
let cancelled: Error | undefined;
|
|
423
436
|
let lastValue = 0;
|
|
424
437
|
let baseBatch = batchFunction({ delay: 150, },
|
|
@@ -499,7 +512,7 @@ export class FastArchiveAppendable<Datum> {
|
|
|
499
512
|
synchronizeObj.parametersResult.resolve(syncResult);
|
|
500
513
|
|
|
501
514
|
|
|
502
|
-
let downloadProgress = await createProgress(
|
|
515
|
+
let downloadProgress = await createProgress(`Downloading (bytes)`, 0);
|
|
503
516
|
let decompressProgress = await createProgress("Decompressing (bytes)", 0);
|
|
504
517
|
let scanProgress = await createProgress("Scanning (datums)", 0);
|
|
505
518
|
let decodeProgress = await createProgress("Decoding (datums)", 0);
|
|
@@ -509,7 +522,7 @@ export class FastArchiveAppendable<Datum> {
|
|
|
509
522
|
const self = this;
|
|
510
523
|
|
|
511
524
|
|
|
512
|
-
async function downloadAndParseFile(file: FileMetadata) {
|
|
525
|
+
async function downloadAndParseFile(file: FileMetadata, runInner: (code: () => Promise<void>) => Promise<void>) {
|
|
513
526
|
const onFetchedData = runInSerial(async (data: Buffer) => {
|
|
514
527
|
await downloadProgress(data.length, data.length, true);
|
|
515
528
|
await decompressWriter.write(data);
|
|
@@ -611,65 +624,85 @@ export class FastArchiveAppendable<Datum> {
|
|
|
611
624
|
|
|
612
625
|
// TODO: Stream from the local cache instead? It should be possible, we can get the total size, and read chunks.
|
|
613
626
|
let hash = getFileMetadataHash(file) + ".file";
|
|
614
|
-
let
|
|
615
|
-
if (
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
627
|
+
let contents = await localCache.get(hash);
|
|
628
|
+
if (stoppedPromise.resolveCalled) return;
|
|
629
|
+
await runInner(async () => {
|
|
630
|
+
if (contents?.length) {
|
|
631
|
+
const CHUNK_SIZE = 1000 * 1000 * 10;
|
|
632
|
+
for (let i = 0; i < contents.length; i += CHUNK_SIZE) {
|
|
633
|
+
let data = contents.slice(i, i + CHUNK_SIZE);
|
|
634
|
+
await onFetchedData(data);
|
|
635
|
+
}
|
|
636
|
+
} else {
|
|
637
|
+
const response = await fetch(url);
|
|
638
|
+
if (!response.ok) {
|
|
639
|
+
throw new Error(`Failed to fetch ${url}: ${response.status} ${response.statusText}`);
|
|
621
640
|
}
|
|
622
|
-
await onFetchedData(data);
|
|
623
|
-
}
|
|
624
|
-
} else {
|
|
625
|
-
const response = await fetch(url);
|
|
626
|
-
if (!response.ok) {
|
|
627
|
-
throw new Error(`Failed to fetch ${url}: ${response.status} ${response.statusText}`);
|
|
628
|
-
}
|
|
629
641
|
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
642
|
+
if (!response.body) {
|
|
643
|
+
throw new Error(`Response body is undefined for ${url}`);
|
|
644
|
+
}
|
|
645
|
+
let values: Buffer[] = [];
|
|
646
|
+
const reader = response.body.getReader();
|
|
647
|
+
void stoppedPromise.promise.finally(() => {
|
|
648
|
+
void response.body?.cancel();
|
|
649
|
+
});
|
|
650
|
+
try {
|
|
651
|
+
for await (let value of streamToIteratable(reader)) {
|
|
652
|
+
// Cancel entirely
|
|
653
|
+
if (stoppedPromise.resolveCalled) return;
|
|
654
|
+
if (!value) continue;
|
|
655
|
+
let buffer = Buffer.from(value);
|
|
656
|
+
values.push(buffer);
|
|
657
|
+
await onFetchedData(buffer);
|
|
658
|
+
}
|
|
659
|
+
} finally {
|
|
660
|
+
reader.releaseLock();
|
|
646
661
|
}
|
|
647
|
-
} finally {
|
|
648
|
-
reader.releaseLock();
|
|
649
|
-
}
|
|
650
662
|
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
663
|
+
await localCache.set(hash, Buffer.concat(values));
|
|
664
|
+
}
|
|
665
|
+
await decompressWriter.close();
|
|
666
|
+
await decompressPromise;
|
|
667
|
+
});
|
|
655
668
|
}
|
|
656
669
|
|
|
657
670
|
// Fork off the processing
|
|
658
671
|
void (async () => {
|
|
659
672
|
try {
|
|
660
673
|
// Iterate over all files and process them
|
|
661
|
-
let fileProgress = await createProgress("Files",
|
|
674
|
+
let fileProgress = await createProgress("Files", syncResult.files.length);
|
|
675
|
+
let fileInnerProgress = await createProgress("Files Inner", syncResult.files.length);
|
|
662
676
|
let failedFiles = await createProgress("Failed Files", 0);
|
|
663
|
-
|
|
677
|
+
let runSerial = runInSerial(async (fnc: () => Promise<void>) => {
|
|
678
|
+
try {
|
|
679
|
+
await fnc();
|
|
680
|
+
} finally {
|
|
681
|
+
await fileInnerProgress(1, 0, true);
|
|
682
|
+
}
|
|
683
|
+
});
|
|
684
|
+
async function downloadFileWrapper(file: FileMetadata) {
|
|
664
685
|
if (stoppedPromise.resolveCalled) return;
|
|
665
686
|
try {
|
|
666
|
-
await downloadAndParseFile(file);
|
|
687
|
+
await downloadAndParseFile(file, runSerial);
|
|
667
688
|
} catch (e: any) {
|
|
668
689
|
console.warn(`Failed to download and parse file ${file.path}:\n${e.stack}`);
|
|
669
690
|
await failedFiles(1, 1, true);
|
|
670
691
|
}
|
|
671
|
-
|
|
692
|
+
if (stoppedPromise.resolveCalled) return;
|
|
693
|
+
await fileProgress(1, 0, true);
|
|
694
|
+
}
|
|
695
|
+
let remaining = syncResult.files.slice();
|
|
696
|
+
async function runThread() {
|
|
697
|
+
while (true) {
|
|
698
|
+
let file = remaining.shift();
|
|
699
|
+
if (!file) {
|
|
700
|
+
return;
|
|
701
|
+
}
|
|
702
|
+
await downloadFileWrapper(file);
|
|
703
|
+
}
|
|
672
704
|
}
|
|
705
|
+
await Promise.all(list(32).map(() => runThread()));
|
|
673
706
|
|
|
674
707
|
await (await createProgress("Done", 0))(1, 1, true);
|
|
675
708
|
} catch (e: any) {
|
|
@@ -35,6 +35,8 @@ import { assertIsNetworkTrusted } from "../../-d-trust/NetworkTrust2";
|
|
|
35
35
|
import { blue, magenta } from "socket-function/src/formatting/logColors";
|
|
36
36
|
import { FastArchiveAppendable, getFileTimeStamp } from "./FastArchiveAppendable";
|
|
37
37
|
import { IdentityController_getMachineId, IdentityController_getReconnectNodeId } from "../../-c-identity/IdentityController";
|
|
38
|
+
import { fsExistsAsync } from "../../fs";
|
|
39
|
+
import { Querysub } from "../../4-querysub/QuerysubController";
|
|
38
40
|
|
|
39
41
|
export type FileMetadata = {
|
|
40
42
|
nodeId?: string;
|
|
@@ -84,7 +86,8 @@ export class FastArchiveAppendableControllerBase {
|
|
|
84
86
|
/** Get all pending local files for this rootPath that haven't been uploaded to Backblaze yet */
|
|
85
87
|
public async getPendingFiles(rootPath: string, timeRange: { startTime: number; endTime: number; }): Promise<InternalFileMetadata[]> {
|
|
86
88
|
|
|
87
|
-
let rootCacheFolder = new FastArchiveAppendable(rootPath).
|
|
89
|
+
let rootCacheFolder = new FastArchiveAppendable(rootPath).baseGetLocalPathRoot();
|
|
90
|
+
if (!await fsExistsAsync(rootCacheFolder)) return [];
|
|
88
91
|
|
|
89
92
|
//console.log(`Searching for pending files in ${rootCacheFolder} for time range ${new Date(timeRange.startTime).toISOString()} to ${new Date(timeRange.endTime).toISOString()}`);
|
|
90
93
|
|
|
@@ -273,6 +276,9 @@ export class FastArchiveAppendableControllerBase {
|
|
|
273
276
|
}): Promise<{
|
|
274
277
|
files: FileMetadata[];
|
|
275
278
|
}> {
|
|
279
|
+
if (!SocketFunction.mountedNodeId) {
|
|
280
|
+
throw new Error(`Cannot use FastArchiveAppendableController before SocketFunction is mounted`);
|
|
281
|
+
}
|
|
276
282
|
let syncId = config.syncId ?? "";
|
|
277
283
|
|
|
278
284
|
// Define inline functions for parallel execution
|
|
@@ -377,6 +383,7 @@ export class FastArchiveAppendableControllerBase {
|
|
|
377
383
|
let remoteProgress = this.updateProgress(syncId, "Discovering remote files", byMachineId.size);
|
|
378
384
|
let remoteValue = 0;
|
|
379
385
|
|
|
386
|
+
|
|
380
387
|
await Promise.all(Array.from(byMachineId).map(async ([machineId, nodeObjs]) => {
|
|
381
388
|
let firstAliveNode = new PromiseObj<string>();
|
|
382
389
|
let allFinished = Promise.all(nodeObjs.map(async ({ nodeId, entryPoint }) => {
|
|
@@ -396,7 +403,7 @@ export class FastArchiveAppendableControllerBase {
|
|
|
396
403
|
|
|
397
404
|
let pendingFiles = await errorToUndefined(
|
|
398
405
|
controller.getPendingFiles(config.rootPath, config.range));
|
|
399
|
-
console.log(blue(`Found ${pendingFiles?.length} pending files on node ${aliveNodeId}`)
|
|
406
|
+
console.log(blue(`Found ${pendingFiles?.length} pending files on node ${aliveNodeId}`));
|
|
400
407
|
|
|
401
408
|
remoteValue++;
|
|
402
409
|
remoteProgress(remoteValue);
|
|
@@ -418,6 +425,7 @@ export class FastArchiveAppendableControllerBase {
|
|
|
418
425
|
let urlObj = new URL(url);
|
|
419
426
|
urlObj.hostname = ipDomain;
|
|
420
427
|
url = urlObj.toString();
|
|
428
|
+
|
|
421
429
|
let timeStamp = getFileTimeStamp(file.path);
|
|
422
430
|
let startTime = timeStamp.startTime;
|
|
423
431
|
let endTime = timeStamp.endTime;
|
|
@@ -443,6 +451,7 @@ export class FastArchiveAppendableControllerBase {
|
|
|
443
451
|
|
|
444
452
|
let allFilesList = await Promise.all(filePromises);
|
|
445
453
|
let allFiles = allFilesList.flat();
|
|
454
|
+
// Newest first, so recent errors are found quickly
|
|
446
455
|
sort(allFiles, x => -x.startTime);
|
|
447
456
|
|
|
448
457
|
return {
|