querysub 0.312.0 → 0.313.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.cursorrules +1 -1
- package/costsBenefits.txt +4 -1
- package/package.json +3 -2
- package/spec.txt +23 -18
- package/src/-0-hooks/hooks.ts +1 -1
- package/src/-a-archives/archives.ts +16 -3
- package/src/-a-archives/archivesBackBlaze.ts +51 -3
- package/src/-a-archives/archivesLimitedCache.ts +175 -0
- package/src/-a-archives/archivesPrivateFileSystem.ts +299 -0
- package/src/-a-auth/certs.ts +58 -31
- package/src/-b-authorities/cdnAuthority.ts +2 -2
- package/src/-b-authorities/dnsAuthority.ts +3 -2
- package/src/-c-identity/IdentityController.ts +3 -2
- package/src/-d-trust/NetworkTrust2.ts +17 -19
- package/src/-e-certs/EdgeCertController.ts +3 -4
- package/src/-e-certs/certAuthority.ts +1 -2
- package/src/-f-node-discovery/NodeDiscovery.ts +9 -7
- package/src/-g-core-values/NodeCapabilities.ts +6 -1
- package/src/0-path-value-core/NodePathAuthorities.ts +1 -1
- package/src/0-path-value-core/PathValueCommitter.ts +3 -3
- package/src/0-path-value-core/PathValueController.ts +3 -3
- package/src/0-path-value-core/archiveLocks/ArchiveLocks2.ts +15 -37
- package/src/0-path-value-core/pathValueCore.ts +4 -3
- package/src/3-path-functions/PathFunctionRunner.ts +2 -2
- package/src/4-dom/qreact.tsx +4 -3
- package/src/4-querysub/Querysub.ts +2 -2
- package/src/4-querysub/QuerysubController.ts +2 -2
- package/src/5-diagnostics/GenericFormat.tsx +1 -0
- package/src/5-diagnostics/Table.tsx +3 -0
- package/src/5-diagnostics/diskValueAudit.ts +2 -1
- package/src/5-diagnostics/nodeMetadata.ts +0 -1
- package/src/deployManager/components/MachineDetailPage.tsx +9 -1
- package/src/deployManager/components/ServiceDetailPage.tsx +10 -1
- package/src/diagnostics/NodeViewer.tsx +3 -4
- package/src/diagnostics/logs/FastArchiveAppendable.ts +748 -0
- package/src/diagnostics/logs/FastArchiveController.ts +524 -0
- package/src/diagnostics/logs/FastArchiveViewer.tsx +863 -0
- package/src/diagnostics/logs/LogViewer2.tsx +349 -0
- package/src/diagnostics/logs/TimeRangeSelector.tsx +94 -0
- package/src/diagnostics/logs/diskLogger.ts +135 -305
- package/src/diagnostics/logs/diskShimConsoleLogs.ts +6 -29
- package/src/diagnostics/logs/errorNotifications/ErrorNotificationController.ts +577 -0
- package/src/diagnostics/logs/errorNotifications/ErrorSuppressionUI.tsx +225 -0
- package/src/diagnostics/logs/errorNotifications/ErrorWarning.tsx +207 -0
- package/src/diagnostics/logs/importLogsEntry.ts +38 -0
- package/src/diagnostics/logs/injectFileLocationToConsole.ts +7 -17
- package/src/diagnostics/logs/lifeCycleAnalysis/lifeCycles.tsx +0 -0
- package/src/diagnostics/logs/lifeCycleAnalysis/spec.md +151 -0
- package/src/diagnostics/managementPages.tsx +7 -16
- package/src/diagnostics/misc-pages/ComponentSyncStats.tsx +0 -1
- package/src/diagnostics/periodic.ts +5 -0
- package/src/diagnostics/watchdog.ts +2 -2
- package/src/functional/SocketChannel.ts +67 -0
- package/src/library-components/Input.tsx +1 -1
- package/src/library-components/InputLabel.tsx +5 -2
- package/src/misc.ts +111 -0
- package/src/src.d.ts +34 -1
- package/src/user-implementation/userData.ts +4 -3
- package/test.ts +13 -0
- package/testEntry2.ts +29 -0
- package/src/diagnostics/errorLogs/ErrorLogController.ts +0 -535
- package/src/diagnostics/errorLogs/ErrorLogCore.ts +0 -274
- package/src/diagnostics/errorLogs/LogClassifiers.tsx +0 -308
- package/src/diagnostics/errorLogs/LogFilterUI.tsx +0 -84
- package/src/diagnostics/errorLogs/LogNotify.tsx +0 -101
- package/src/diagnostics/errorLogs/LogTimeSelector.tsx +0 -723
- package/src/diagnostics/errorLogs/LogViewer.tsx +0 -757
- package/src/diagnostics/errorLogs/logFiltering.tsx +0 -149
- package/src/diagnostics/logs/DiskLoggerPage.tsx +0 -613
|
@@ -0,0 +1,524 @@
|
|
|
1
|
+
module.hotreload = true;
|
|
2
|
+
module.noserverhotreload = false;
|
|
3
|
+
import { measureBlock, measureFnc, measureWrap } from "socket-function/src/profiling/measure";
|
|
4
|
+
import { getMachineId, getOwnMachineId } from "../../-a-auth/certs";
|
|
5
|
+
import { isDefined, parseFileNameKVP, parsePath, partialCopyObject, streamToIteratable, sum, toFileNameKVP } from "../../misc";
|
|
6
|
+
import { registerShutdownHandler } from "../periodic";
|
|
7
|
+
import { batchFunction, delay, runInSerial, runInfinitePoll, runInfinitePollCallAtStart } from "socket-function/src/batching";
|
|
8
|
+
import { PromiseObj, isNode, keyByArray, nextId, sort, timeInDay, timeInHour, timeInMinute } from "socket-function/src/misc";
|
|
9
|
+
import os from "os";
|
|
10
|
+
import { getOwnThreadId } from "../../-f-node-discovery/NodeDiscovery";
|
|
11
|
+
import fs from "fs";
|
|
12
|
+
import { MaybePromise, canHaveChildren } from "socket-function/src/types";
|
|
13
|
+
import { formatNumber, formatTime } from "socket-function/src/formatting/format";
|
|
14
|
+
import { cache, lazy } from "socket-function/src/caching";
|
|
15
|
+
import { getArchives, nestArchives } from "../../-a-archives/archives";
|
|
16
|
+
import { Zip } from "../../zip";
|
|
17
|
+
import { SocketFunction } from "socket-function/SocketFunction";
|
|
18
|
+
import { assertIsManagementUser } from "../managementPages";
|
|
19
|
+
import { getControllerNodeIdList } from "../../-g-core-values/NodeCapabilities";
|
|
20
|
+
import { errorToUndefined, ignoreErrors, timeoutToUndefinedSilent } from "../../errors";
|
|
21
|
+
import { getCallObj } from "socket-function/src/nodeProxy";
|
|
22
|
+
import { getSyncedController } from "../../library-components/SyncedController";
|
|
23
|
+
import { getBrowserUrlNode, getOwnNodeId } from "../../-f-node-discovery/NodeDiscovery";
|
|
24
|
+
import { secureRandom } from "../../misc/random";
|
|
25
|
+
import { getPathIndex, getPathStr2 } from "../../path";
|
|
26
|
+
import { onNextPaint } from "../../functional/onNextPaint";
|
|
27
|
+
import { getArchivesBackblazePrivateImmutable, getArchivesBackblazePublicImmutable } from "../../-a-archives/archivesBackBlaze";
|
|
28
|
+
import { httpsRequest } from "socket-function/src/https";
|
|
29
|
+
import { getDomain } from "../../config";
|
|
30
|
+
import { getIPDomain } from "../../-e-certs/EdgeCertController";
|
|
31
|
+
import { getArchivesPrivateFileSystem } from "../../-a-archives/archivesPrivateFileSystem";
|
|
32
|
+
import { createArchivesLimitedCache } from "../../-a-archives/archivesLimitedCache";
|
|
33
|
+
import { sha256 } from "js-sha256";
|
|
34
|
+
import { assertIsNetworkTrusted } from "../../-d-trust/NetworkTrust2";
|
|
35
|
+
import { blue, magenta } from "socket-function/src/formatting/logColors";
|
|
36
|
+
import { FastArchiveAppendable, getFileTimeStamp } from "./FastArchiveAppendable";
|
|
37
|
+
import { IdentityController_getMachineId, IdentityController_getReconnectNodeId } from "../../-c-identity/IdentityController";
|
|
38
|
+
|
|
39
|
+
export type FileMetadata = {
|
|
40
|
+
nodeId?: string;
|
|
41
|
+
path: string;
|
|
42
|
+
url: string;
|
|
43
|
+
size: number;
|
|
44
|
+
startTime: number;
|
|
45
|
+
endTime: number;
|
|
46
|
+
};
|
|
47
|
+
|
|
48
|
+
type InternalFileMetadata = {
|
|
49
|
+
path: string;
|
|
50
|
+
size: number;
|
|
51
|
+
};
|
|
52
|
+
|
|
53
|
+
type SynchronizeInfo = {
|
|
54
|
+
id: string;
|
|
55
|
+
nodeId: string;
|
|
56
|
+
config: {
|
|
57
|
+
range: {
|
|
58
|
+
startTime: number;
|
|
59
|
+
endTime: number;
|
|
60
|
+
};
|
|
61
|
+
rootPath: string;
|
|
62
|
+
};
|
|
63
|
+
createTime: number;
|
|
64
|
+
};
|
|
65
|
+
|
|
66
|
+
// Excluding authorize tokens, then hashed
|
|
67
|
+
export function getFileMetadataHash(file: FileMetadata): string {
|
|
68
|
+
let urlObj = new URL(file.url);
|
|
69
|
+
if (!file.nodeId) {
|
|
70
|
+
urlObj.search = "";
|
|
71
|
+
} else {
|
|
72
|
+
let args = JSON.parse(urlObj.searchParams.get("args") || "");
|
|
73
|
+
// The syncId shouldn't be part of the hash though
|
|
74
|
+
args[0] = "";
|
|
75
|
+
urlObj.searchParams.set("args", JSON.stringify(args));
|
|
76
|
+
}
|
|
77
|
+
return sha256(urlObj.toString());
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
export class FastArchiveAppendableControllerBase {
|
|
81
|
+
private static activeSynchronizes = new Map<string, SynchronizeInfo>();
|
|
82
|
+
public static progressCallbacks = new Map<string, (progress: { section: string; value: number; max: number; }) => void>();
|
|
83
|
+
|
|
84
|
+
/** Get all pending local files for this rootPath that haven't been uploaded to Backblaze yet */
|
|
85
|
+
public async getPendingFiles(rootPath: string, timeRange: { startTime: number; endTime: number; }): Promise<InternalFileMetadata[]> {
|
|
86
|
+
|
|
87
|
+
let rootCacheFolder = new FastArchiveAppendable(rootPath).getLocalPathRoot();
|
|
88
|
+
|
|
89
|
+
//console.log(`Searching for pending files in ${rootCacheFolder} for time range ${new Date(timeRange.startTime).toISOString()} to ${new Date(timeRange.endTime).toISOString()}`);
|
|
90
|
+
|
|
91
|
+
let result: InternalFileMetadata[] = [];
|
|
92
|
+
|
|
93
|
+
try {
|
|
94
|
+
let allFolders = await fs.promises.readdir(rootCacheFolder);
|
|
95
|
+
for (let threadId of allFolders) {
|
|
96
|
+
let threadDir = rootCacheFolder + threadId + "/";
|
|
97
|
+
let files = await fs.promises.readdir(threadDir);
|
|
98
|
+
|
|
99
|
+
for (let file of files) {
|
|
100
|
+
if (file === "heartbeat") continue;
|
|
101
|
+
|
|
102
|
+
let fileTimestamp = getFileTimeStamp(file);
|
|
103
|
+
//console.log(`Found ${new Date(fileTimestamp).toISOString()} in ${threadDir + file}`);
|
|
104
|
+
if (fileTimestamp < timeRange.startTime || fileTimestamp > timeRange.endTime) {
|
|
105
|
+
continue;
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
let fullPath = threadDir + file;
|
|
109
|
+
let stat = await fs.promises.stat(fullPath);
|
|
110
|
+
|
|
111
|
+
result.push({
|
|
112
|
+
path: fullPath,
|
|
113
|
+
size: stat.size,
|
|
114
|
+
});
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
} catch (e: any) {
|
|
118
|
+
// If directory doesn't exist, return empty array
|
|
119
|
+
if (e.code === "ENOENT") {
|
|
120
|
+
return [];
|
|
121
|
+
}
|
|
122
|
+
throw e;
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
return result;
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
/** Download a local file from another server (coordinator forwards request).
|
|
129
|
+
* NOTE: This doesn't support streaming, which isn't great. However, eventually most logs will make their way to backblaze. So not supporting streaming here doesn't mean we don't support streaming in general. We also compress the file. So for the compressed file to be so big that you can't send it all at once would mean your uncompressed data would just be ridiculously large.
|
|
130
|
+
*/
|
|
131
|
+
public async downloadLocalFile(syncId: string, targetNodeId: string, path: string): Promise<Buffer> {
|
|
132
|
+
const caller = SocketFunction.getCaller();
|
|
133
|
+
let syncInfo = FastArchiveAppendableControllerBase.activeSynchronizes.get(syncId);
|
|
134
|
+
if (!syncInfo) {
|
|
135
|
+
throw new Error(`Invalid sync ID: ${syncId}`);
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
try {
|
|
139
|
+
// Forward the request to the actual server that has the file
|
|
140
|
+
let targetController = FastArchiveAppendableController.nodes[targetNodeId];
|
|
141
|
+
return await targetController.downloadLocalFileInternal(path);
|
|
142
|
+
} catch {
|
|
143
|
+
let machineId = getMachineId(targetNodeId);
|
|
144
|
+
let nodeIds = await getControllerNodeIdList(FastArchiveAppendableController);
|
|
145
|
+
let byMachineId = keyByArray(nodeIds, x => getMachineId(x.nodeId));
|
|
146
|
+
let nodeIdsOfMachine = byMachineId.get(machineId) || [];
|
|
147
|
+
|
|
148
|
+
let firstAliveNode = new PromiseObj<string>();
|
|
149
|
+
let allFinished = Promise.all(nodeIdsOfMachine.map(async ({ nodeId, entryPoint }) => {
|
|
150
|
+
if (await timeoutToUndefinedSilent(5000, FastArchiveAppendableController.nodes[nodeId].isNodeAlive(nodeId))) {
|
|
151
|
+
firstAliveNode.resolve(nodeId);
|
|
152
|
+
}
|
|
153
|
+
}));
|
|
154
|
+
let aliveNodeId = await Promise.race([firstAliveNode.promise, allFinished]);
|
|
155
|
+
if (Array.isArray(aliveNodeId)) {
|
|
156
|
+
throw new Error(`No alive node found for machine ${machineId}`);
|
|
157
|
+
}
|
|
158
|
+
console.log(magenta(`Remapped request for ${targetNodeId} to ${aliveNodeId} for machine ${machineId}`));
|
|
159
|
+
let targetController = FastArchiveAppendableController.nodes[aliveNodeId];
|
|
160
|
+
return await targetController.downloadLocalFileInternal(path);
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
/** Internal method to download local file (called by coordinator) */
|
|
165
|
+
public async downloadLocalFileInternal(path: string): Promise<Buffer> {
|
|
166
|
+
return await Zip.gzip(await fs.promises.readFile(path));
|
|
167
|
+
}
|
|
168
|
+
public async isNodeAlive(nodeId: string): Promise<boolean> {
|
|
169
|
+
return true;
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
/** Create a new sync session and return the sync ID */
|
|
173
|
+
public async createSyncSession(): Promise<string> {
|
|
174
|
+
let caller = SocketFunction.getCaller();
|
|
175
|
+
let syncId = nextId();
|
|
176
|
+
|
|
177
|
+
let syncInfo: SynchronizeInfo = {
|
|
178
|
+
id: syncId,
|
|
179
|
+
nodeId: caller.nodeId,
|
|
180
|
+
config: {
|
|
181
|
+
range: { startTime: 0, endTime: 0 }, // Will be set by startSynchronize
|
|
182
|
+
rootPath: "",
|
|
183
|
+
},
|
|
184
|
+
createTime: Date.now(),
|
|
185
|
+
};
|
|
186
|
+
|
|
187
|
+
FastArchiveAppendableControllerBase.activeSynchronizes.set(syncId, syncInfo);
|
|
188
|
+
|
|
189
|
+
// Clean up on disconnect
|
|
190
|
+
SocketFunction.onNextDisconnect(caller.nodeId, () => {
|
|
191
|
+
FastArchiveAppendableControllerBase.activeSynchronizes.delete(syncId);
|
|
192
|
+
});
|
|
193
|
+
|
|
194
|
+
return syncId;
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
public async startSynchronize(config: {
|
|
198
|
+
syncId: string;
|
|
199
|
+
range: {
|
|
200
|
+
startTime: number;
|
|
201
|
+
endTime: number;
|
|
202
|
+
};
|
|
203
|
+
rootPath: string;
|
|
204
|
+
noLocalFiles?: boolean;
|
|
205
|
+
}): Promise<{
|
|
206
|
+
files: FileMetadata[];
|
|
207
|
+
}> {
|
|
208
|
+
let caller = SocketFunction.getCaller();
|
|
209
|
+
// If the user uses a bad syncId and it gets leak, screw it.
|
|
210
|
+
let syncId = config.syncId;
|
|
211
|
+
|
|
212
|
+
let syncInfo: SynchronizeInfo = {
|
|
213
|
+
id: syncId,
|
|
214
|
+
nodeId: caller.nodeId,
|
|
215
|
+
config: {
|
|
216
|
+
range: config.range,
|
|
217
|
+
rootPath: config.rootPath,
|
|
218
|
+
},
|
|
219
|
+
createTime: Date.now(),
|
|
220
|
+
};
|
|
221
|
+
|
|
222
|
+
FastArchiveAppendableControllerBase.activeSynchronizes.set(syncId, syncInfo);
|
|
223
|
+
|
|
224
|
+
// Clean up on disconnect
|
|
225
|
+
SocketFunction.onNextDisconnect(caller.nodeId, () => {
|
|
226
|
+
FastArchiveAppendableControllerBase.activeSynchronizes.delete(syncId);
|
|
227
|
+
});
|
|
228
|
+
|
|
229
|
+
return await this.startSynchronizeBase({
|
|
230
|
+
syncId,
|
|
231
|
+
range: config.range,
|
|
232
|
+
rootPath: config.rootPath,
|
|
233
|
+
noLocalFiles: config.noLocalFiles,
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
public async startSynchronizeInternal(config: {
|
|
238
|
+
range: {
|
|
239
|
+
startTime: number;
|
|
240
|
+
endTime: number;
|
|
241
|
+
};
|
|
242
|
+
rootPath: string;
|
|
243
|
+
noLocalFiles?: boolean;
|
|
244
|
+
}): Promise<{
|
|
245
|
+
files: FileMetadata[];
|
|
246
|
+
}> {
|
|
247
|
+
let syncId = "no-progress" + nextId();
|
|
248
|
+
FastArchiveAppendableControllerBase.activeSynchronizes.set(syncId, {
|
|
249
|
+
id: syncId,
|
|
250
|
+
nodeId: getOwnNodeId(),
|
|
251
|
+
config: {
|
|
252
|
+
range: config.range,
|
|
253
|
+
rootPath: config.rootPath,
|
|
254
|
+
},
|
|
255
|
+
createTime: Date.now(),
|
|
256
|
+
});
|
|
257
|
+
return await this.startSynchronizeBase({
|
|
258
|
+
syncId,
|
|
259
|
+
range: config.range,
|
|
260
|
+
rootPath: config.rootPath,
|
|
261
|
+
noLocalFiles: config.noLocalFiles,
|
|
262
|
+
});
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
private async startSynchronizeBase(config: {
|
|
266
|
+
syncId?: string;
|
|
267
|
+
range: {
|
|
268
|
+
startTime: number;
|
|
269
|
+
endTime: number;
|
|
270
|
+
};
|
|
271
|
+
rootPath: string;
|
|
272
|
+
noLocalFiles?: boolean;
|
|
273
|
+
}): Promise<{
|
|
274
|
+
files: FileMetadata[];
|
|
275
|
+
}> {
|
|
276
|
+
let syncId = config.syncId ?? "";
|
|
277
|
+
|
|
278
|
+
// Define inline functions for parallel execution
|
|
279
|
+
const searchBackblazeFiles = async (): Promise<FileMetadata[]> => {
|
|
280
|
+
let archives = new FastArchiveAppendable(config.rootPath).getArchives();
|
|
281
|
+
let backblazeFiles: FileMetadata[] = [];
|
|
282
|
+
if (!archives.getDownloadAuthorization) throw new Error(`archives.getDownloadAuthorization is missing?`);
|
|
283
|
+
let authorization = await archives.getDownloadAuthorization({
|
|
284
|
+
validDurationInSeconds: timeInDay * 6 / 1000,
|
|
285
|
+
});
|
|
286
|
+
let authToken = authorization.authorizationToken;
|
|
287
|
+
|
|
288
|
+
const folderProgress = this.updateProgress(syncId, "Backblaze folder search", 0);
|
|
289
|
+
let folderMax = 0;
|
|
290
|
+
let folderValue = 0;
|
|
291
|
+
|
|
292
|
+
async function searchBackblazeFilesRecursive(prefix: string, level: "year" | "month" | "day" | "hour") {
|
|
293
|
+
folderMax++;
|
|
294
|
+
let folders = await archives.find(prefix, { shallow: true, type: "folders" });
|
|
295
|
+
folderValue++;
|
|
296
|
+
folderProgress(folderValue, folderMax);
|
|
297
|
+
// It's actually really annoying because the time ranges aren't the same amount of time. It's probably fine though. This code only needs to exist in one place.
|
|
298
|
+
|
|
299
|
+
await Promise.all(folders.map(async (folder) => {
|
|
300
|
+
let folderName = folder.split("/").pop()!;
|
|
301
|
+
let folderNum = parseInt(folderName, 10);
|
|
302
|
+
|
|
303
|
+
if (level === "year") {
|
|
304
|
+
let yearStart = Date.UTC(folderNum, 0, 1);
|
|
305
|
+
let yearEnd = Date.UTC(folderNum + 1, 0, 1);
|
|
306
|
+
|
|
307
|
+
if (yearEnd > config.range.startTime && yearStart <= config.range.endTime) {
|
|
308
|
+
await searchBackblazeFilesRecursive(folder + "/", "month");
|
|
309
|
+
}
|
|
310
|
+
} else if (level === "month") {
|
|
311
|
+
// folderName is 1-based month (01, 02, etc)
|
|
312
|
+
let year = parseInt(folder.split("/")[0], 10);
|
|
313
|
+
let monthStart = Date.UTC(year, folderNum - 1, 1);
|
|
314
|
+
let monthEnd = Date.UTC(year, folderNum, 1);
|
|
315
|
+
|
|
316
|
+
if (monthEnd > config.range.startTime && monthStart <= config.range.endTime) {
|
|
317
|
+
await searchBackblazeFilesRecursive(folder + "/", "day");
|
|
318
|
+
}
|
|
319
|
+
} else if (level === "day") {
|
|
320
|
+
let pathParts = folder.split("/");
|
|
321
|
+
let year = parseInt(pathParts[0], 10);
|
|
322
|
+
let month = parseInt(pathParts[1], 10) - 1; // Date constructor expects 0-based month
|
|
323
|
+
let dayStart = Date.UTC(year, month, folderNum);
|
|
324
|
+
let dayEnd = Date.UTC(year, month, folderNum + 1);
|
|
325
|
+
|
|
326
|
+
if (dayEnd > config.range.startTime && dayStart <= config.range.endTime) {
|
|
327
|
+
await searchBackblazeFilesRecursive(folder + "/", "hour");
|
|
328
|
+
}
|
|
329
|
+
} else if (level === "hour") {
|
|
330
|
+
let pathParts = folder.split("/");
|
|
331
|
+
let year = parseInt(pathParts[0], 10);
|
|
332
|
+
let month = parseInt(pathParts[1], 10) - 1;
|
|
333
|
+
let day = parseInt(pathParts[2], 10);
|
|
334
|
+
let hourStart = Date.UTC(year, month, day, folderNum);
|
|
335
|
+
let hourEnd = Date.UTC(year, month, day, folderNum + 1);
|
|
336
|
+
|
|
337
|
+
let inRange = hourEnd > config.range.startTime && hourStart <= config.range.endTime;
|
|
338
|
+
if (!inRange) return;
|
|
339
|
+
// This hour folder is in range, get all files from it
|
|
340
|
+
let filePaths = await archives.findInfo(folder + "/", { shallow: true, type: "files" });
|
|
341
|
+
for (let info of filePaths) {
|
|
342
|
+
if (!info.path.endsWith(".log")) continue;
|
|
343
|
+
|
|
344
|
+
if (!archives.getURL) throw new Error(`archives.getURL is missing?`);
|
|
345
|
+
let url = await archives.getURL(info.path);
|
|
346
|
+
let urlObj = new URL(url);
|
|
347
|
+
// IMPORTANT! This is CASE SENSITIVE! Ugh...
|
|
348
|
+
urlObj.searchParams.set("Authorization", authToken);
|
|
349
|
+
url = urlObj.toString();
|
|
350
|
+
|
|
351
|
+
backblazeFiles.push({
|
|
352
|
+
path: info.path,
|
|
353
|
+
url: url,
|
|
354
|
+
size: info.size,
|
|
355
|
+
startTime: hourStart,
|
|
356
|
+
endTime: hourEnd,
|
|
357
|
+
});
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
}));
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
await searchBackblazeFilesRecursive("", "year");
|
|
364
|
+
|
|
365
|
+
return backblazeFiles;
|
|
366
|
+
};
|
|
367
|
+
|
|
368
|
+
const getRemoteFiles = async (): Promise<FileMetadata[]> => {
|
|
369
|
+
const getControllerProgress = this.updateProgress(syncId, "Discovering remote machines", 0);
|
|
370
|
+
|
|
371
|
+
let localFiles: FileMetadata[] = [];
|
|
372
|
+
|
|
373
|
+
let nodeIds = await getControllerNodeIdList(FastArchiveAppendableController);
|
|
374
|
+
let byMachineId = keyByArray(nodeIds, x => getMachineId(x.nodeId));
|
|
375
|
+
getControllerProgress(byMachineId.size, byMachineId.size);
|
|
376
|
+
|
|
377
|
+
let remoteProgress = this.updateProgress(syncId, "Discovering remote files", byMachineId.size);
|
|
378
|
+
let remoteValue = 0;
|
|
379
|
+
|
|
380
|
+
await Promise.all(Array.from(byMachineId).map(async ([machineId, nodeObjs]) => {
|
|
381
|
+
let firstAliveNode = new PromiseObj<string>();
|
|
382
|
+
let allFinished = Promise.all(nodeObjs.map(async ({ nodeId, entryPoint }) => {
|
|
383
|
+
if (await timeoutToUndefinedSilent(5000, FastArchiveAppendableController.nodes[nodeId].isNodeAlive(nodeId))) {
|
|
384
|
+
firstAliveNode.resolve(nodeId);
|
|
385
|
+
}
|
|
386
|
+
}));
|
|
387
|
+
let aliveNodeId = await Promise.race([firstAliveNode.promise, allFinished]);
|
|
388
|
+
if (Array.isArray(aliveNodeId)) {
|
|
389
|
+
console.log(blue(`No alive nodes found for machine ${machineId}`), nodeObjs);
|
|
390
|
+
remoteValue++;
|
|
391
|
+
remoteProgress(remoteValue);
|
|
392
|
+
return;
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
let controller = FastArchiveAppendableController.nodes[aliveNodeId];
|
|
396
|
+
|
|
397
|
+
let pendingFiles = await errorToUndefined(
|
|
398
|
+
controller.getPendingFiles(config.rootPath, config.range));
|
|
399
|
+
console.log(blue(`Found ${pendingFiles?.length} pending files on node ${aliveNodeId}`), nodeObjs);
|
|
400
|
+
|
|
401
|
+
remoteValue++;
|
|
402
|
+
remoteProgress(remoteValue);
|
|
403
|
+
|
|
404
|
+
if (!pendingFiles) return;
|
|
405
|
+
for (let file of pendingFiles) {
|
|
406
|
+
// Create download URL that points to the coordinator (this server)
|
|
407
|
+
// The coordinator will forward the request to the actual file server
|
|
408
|
+
let coordinatorNodeId = getOwnNodeId();
|
|
409
|
+
let coordinatorController = FastArchiveAppendableController.nodes[coordinatorNodeId];
|
|
410
|
+
let downloadCall = coordinatorController.downloadLocalFile[getCallObj](
|
|
411
|
+
syncId,
|
|
412
|
+
aliveNodeId, // Target server that has the file
|
|
413
|
+
file.path
|
|
414
|
+
);
|
|
415
|
+
let url = SocketFunction.getHTTPCallLink(downloadCall);
|
|
416
|
+
// Have to use the IP domain, as it's externally available. That, plus the port, should uniquely identify us.
|
|
417
|
+
let ipDomain = await getIPDomain();
|
|
418
|
+
let urlObj = new URL(url);
|
|
419
|
+
urlObj.hostname = ipDomain;
|
|
420
|
+
url = urlObj.toString();
|
|
421
|
+
let startTime = getFileTimeStamp(file.path);
|
|
422
|
+
let endTime = startTime + timeInHour;
|
|
423
|
+
|
|
424
|
+
localFiles.push({
|
|
425
|
+
nodeId: aliveNodeId,
|
|
426
|
+
path: file.path,
|
|
427
|
+
url: url,
|
|
428
|
+
size: file.size,
|
|
429
|
+
startTime: startTime,
|
|
430
|
+
endTime: endTime,
|
|
431
|
+
});
|
|
432
|
+
}
|
|
433
|
+
}));
|
|
434
|
+
|
|
435
|
+
return localFiles;
|
|
436
|
+
};
|
|
437
|
+
|
|
438
|
+
// Execute both operations in parallel
|
|
439
|
+
let filePromises: Promise<FileMetadata[]>[] = [];
|
|
440
|
+
filePromises.push(searchBackblazeFiles());
|
|
441
|
+
if (!config.noLocalFiles) filePromises.push(getRemoteFiles());
|
|
442
|
+
|
|
443
|
+
let allFilesList = await Promise.all(filePromises);
|
|
444
|
+
let allFiles = allFilesList.flat();
|
|
445
|
+
sort(allFiles, x => -x.startTime);
|
|
446
|
+
|
|
447
|
+
return {
|
|
448
|
+
files: allFiles,
|
|
449
|
+
};
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
/** Update progress for a synchronization session - returns a function that takes only the progress value */
|
|
453
|
+
public updateProgress(syncId: string, section: string, max: number) {
|
|
454
|
+
let cancelled: Error | undefined;
|
|
455
|
+
let baseBatch = batchFunction({ delay: 150, },
|
|
456
|
+
async (config: { value: number, overrideMax?: number }[]) => {
|
|
457
|
+
if (cancelled) return;
|
|
458
|
+
let syncInfo = FastArchiveAppendableControllerBase.activeSynchronizes.get(syncId);
|
|
459
|
+
if (!syncInfo) {
|
|
460
|
+
cancelled = new Error(`Cancelled (sync for id ${syncId} is missing)`);
|
|
461
|
+
return;
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
let value = config.at(-1)!.value;
|
|
465
|
+
let usedMax = config.map(c => c.overrideMax).filter(isDefined).at(-1) ?? max;
|
|
466
|
+
value = Math.min(value, usedMax);
|
|
467
|
+
|
|
468
|
+
try {
|
|
469
|
+
let result = await FastArchiveAppendableController.nodes[syncInfo.nodeId]
|
|
470
|
+
.onSynchronizeProgress(syncId, { section, value, max: usedMax });
|
|
471
|
+
if (result === "stop") {
|
|
472
|
+
cancelled = new Error(`Sync for id ${syncId} was stopped by the client`);
|
|
473
|
+
return;
|
|
474
|
+
}
|
|
475
|
+
} catch (e: any) {
|
|
476
|
+
cancelled = e;
|
|
477
|
+
}
|
|
478
|
+
}
|
|
479
|
+
);
|
|
480
|
+
let onProgress = (value: number, overrideMax?: number) => {
|
|
481
|
+
if (!syncId) return;
|
|
482
|
+
if (syncId.startsWith("no-progress")) return;
|
|
483
|
+
if (cancelled) throw cancelled;
|
|
484
|
+
void baseBatch({ value, overrideMax });
|
|
485
|
+
};
|
|
486
|
+
onProgress(0);
|
|
487
|
+
return onProgress;
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
/** Progress callback - called by server on client */
|
|
491
|
+
public async onSynchronizeProgress(syncId: string, progress: {
|
|
492
|
+
section: string;
|
|
493
|
+
value: number;
|
|
494
|
+
max: number;
|
|
495
|
+
}): Promise<"stop" | undefined> {
|
|
496
|
+
const callback = FastArchiveAppendableControllerBase.progressCallbacks.get(syncId);
|
|
497
|
+
if (!callback) {
|
|
498
|
+
return "stop";
|
|
499
|
+
}
|
|
500
|
+
callback(progress);
|
|
501
|
+
}
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
export const FastArchiveAppendableController = SocketFunction.register(
|
|
505
|
+
"FastArchiveAppendableController-b8c9e4d5-1f2a-4b6c-8d7e-9f0a1b2c3d4e",
|
|
506
|
+
new FastArchiveAppendableControllerBase(),
|
|
507
|
+
() => ({
|
|
508
|
+
getPendingFiles: { hooks: [assertIsManagementUser] },
|
|
509
|
+
// Secured via syncId (can't use assertIsManagementUser, because it is an HTTP call, so there is no negotiation step)
|
|
510
|
+
downloadLocalFile: {},
|
|
511
|
+
downloadLocalFileInternal: { hooks: [assertIsManagementUser] },
|
|
512
|
+
startSynchronize: { hooks: [assertIsManagementUser] },
|
|
513
|
+
updateProgress: { hooks: [assertIsManagementUser] },
|
|
514
|
+
onSynchronizeProgress: { hooks: [assertIsManagementUser] },
|
|
515
|
+
isNodeAlive: { hooks: [assertIsNetworkTrusted] },
|
|
516
|
+
createSyncSession: { hooks: [assertIsManagementUser] },
|
|
517
|
+
}),
|
|
518
|
+
() => ({
|
|
519
|
+
|
|
520
|
+
}),
|
|
521
|
+
);
|
|
522
|
+
|
|
523
|
+
export const fastArchiveAppendableController = getSyncedController(FastArchiveAppendableController);
|
|
524
|
+
|