querysub 0.312.0 → 0.313.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.cursorrules +1 -1
- package/costsBenefits.txt +4 -1
- package/package.json +3 -2
- package/spec.txt +23 -18
- package/src/-0-hooks/hooks.ts +1 -1
- package/src/-a-archives/archives.ts +16 -3
- package/src/-a-archives/archivesBackBlaze.ts +51 -3
- package/src/-a-archives/archivesLimitedCache.ts +175 -0
- package/src/-a-archives/archivesPrivateFileSystem.ts +299 -0
- package/src/-a-auth/certs.ts +58 -31
- package/src/-b-authorities/cdnAuthority.ts +2 -2
- package/src/-b-authorities/dnsAuthority.ts +3 -2
- package/src/-c-identity/IdentityController.ts +3 -2
- package/src/-d-trust/NetworkTrust2.ts +17 -19
- package/src/-e-certs/EdgeCertController.ts +3 -4
- package/src/-e-certs/certAuthority.ts +1 -2
- package/src/-f-node-discovery/NodeDiscovery.ts +9 -7
- package/src/-g-core-values/NodeCapabilities.ts +6 -1
- package/src/0-path-value-core/NodePathAuthorities.ts +1 -1
- package/src/0-path-value-core/PathValueCommitter.ts +3 -3
- package/src/0-path-value-core/PathValueController.ts +3 -3
- package/src/0-path-value-core/archiveLocks/ArchiveLocks2.ts +15 -37
- package/src/0-path-value-core/pathValueCore.ts +4 -3
- package/src/3-path-functions/PathFunctionRunner.ts +2 -2
- package/src/4-dom/qreact.tsx +4 -3
- package/src/4-querysub/Querysub.ts +2 -2
- package/src/4-querysub/QuerysubController.ts +2 -2
- package/src/5-diagnostics/GenericFormat.tsx +1 -0
- package/src/5-diagnostics/Table.tsx +3 -0
- package/src/5-diagnostics/diskValueAudit.ts +2 -1
- package/src/5-diagnostics/nodeMetadata.ts +0 -1
- package/src/deployManager/components/MachineDetailPage.tsx +9 -1
- package/src/deployManager/components/ServiceDetailPage.tsx +10 -1
- package/src/diagnostics/NodeViewer.tsx +3 -4
- package/src/diagnostics/logs/FastArchiveAppendable.ts +748 -0
- package/src/diagnostics/logs/FastArchiveController.ts +524 -0
- package/src/diagnostics/logs/FastArchiveViewer.tsx +863 -0
- package/src/diagnostics/logs/LogViewer2.tsx +349 -0
- package/src/diagnostics/logs/TimeRangeSelector.tsx +94 -0
- package/src/diagnostics/logs/diskLogger.ts +135 -305
- package/src/diagnostics/logs/diskShimConsoleLogs.ts +6 -29
- package/src/diagnostics/logs/errorNotifications/ErrorNotificationController.ts +577 -0
- package/src/diagnostics/logs/errorNotifications/ErrorSuppressionUI.tsx +225 -0
- package/src/diagnostics/logs/errorNotifications/ErrorWarning.tsx +207 -0
- package/src/diagnostics/logs/importLogsEntry.ts +38 -0
- package/src/diagnostics/logs/injectFileLocationToConsole.ts +7 -17
- package/src/diagnostics/logs/lifeCycleAnalysis/lifeCycles.tsx +0 -0
- package/src/diagnostics/logs/lifeCycleAnalysis/spec.md +151 -0
- package/src/diagnostics/managementPages.tsx +7 -16
- package/src/diagnostics/misc-pages/ComponentSyncStats.tsx +0 -1
- package/src/diagnostics/periodic.ts +5 -0
- package/src/diagnostics/watchdog.ts +2 -2
- package/src/functional/SocketChannel.ts +67 -0
- package/src/library-components/Input.tsx +1 -1
- package/src/library-components/InputLabel.tsx +5 -2
- package/src/misc.ts +111 -0
- package/src/src.d.ts +34 -1
- package/src/user-implementation/userData.ts +4 -3
- package/test.ts +13 -0
- package/testEntry2.ts +29 -0
- package/src/diagnostics/errorLogs/ErrorLogController.ts +0 -535
- package/src/diagnostics/errorLogs/ErrorLogCore.ts +0 -274
- package/src/diagnostics/errorLogs/LogClassifiers.tsx +0 -308
- package/src/diagnostics/errorLogs/LogFilterUI.tsx +0 -84
- package/src/diagnostics/errorLogs/LogNotify.tsx +0 -101
- package/src/diagnostics/errorLogs/LogTimeSelector.tsx +0 -723
- package/src/diagnostics/errorLogs/LogViewer.tsx +0 -757
- package/src/diagnostics/errorLogs/logFiltering.tsx +0 -149
- package/src/diagnostics/logs/DiskLoggerPage.tsx +0 -613
|
@@ -0,0 +1,577 @@
|
|
|
1
|
+
import { isNode } from "typesafecss";
|
|
2
|
+
import { getArchives } from "../../../-a-archives/archives";
|
|
3
|
+
import { SizeLimiter } from "../../SizeLimiter";
|
|
4
|
+
import { FastArchiveAppendable, createLogScanner, objectDelimitterBuffer } from "../FastArchiveAppendable";
|
|
5
|
+
import { LogDatum, getLoggers } from "../diskLogger";
|
|
6
|
+
import os from "os";
|
|
7
|
+
import { SocketFunction } from "socket-function/SocketFunction";
|
|
8
|
+
import { cache, cacheLimited, lazy } from "socket-function/src/caching";
|
|
9
|
+
import { getAllNodeIds } from "../../../-f-node-discovery/NodeDiscovery";
|
|
10
|
+
import { archiveJSONT } from "../../../-a-archives/archivesJSONT";
|
|
11
|
+
import { sort, throttleFunction, timeInDay, timeInHour, timeInMinute } from "socket-function/src/misc";
|
|
12
|
+
import { formatNumber } from "socket-function/src/formatting/format";
|
|
13
|
+
import { batchFunction, delay, runInSerial, runInfinitePoll, runInfinitePollCallAtStart } from "socket-function/src/batching";
|
|
14
|
+
import { SocketChannel } from "../../../functional/SocketChannel";
|
|
15
|
+
import { measureFnc, measureWrap } from "socket-function/src/profiling/measure";
|
|
16
|
+
import { FastArchiveAppendableControllerBase, getFileMetadataHash } from "../FastArchiveController";
|
|
17
|
+
import fs from "fs";
|
|
18
|
+
import zlib from "zlib";
|
|
19
|
+
import { getSyncedController } from "../../../library-components/SyncedController";
|
|
20
|
+
import { qreact } from "../../../4-dom/qreact";
|
|
21
|
+
import { requiresNetworkTrustHook } from "../../../-d-trust/NetworkTrust2";
|
|
22
|
+
import { assertIsManagementUser } from "../../managementPages";
|
|
23
|
+
import { streamToIteratable } from "../../../misc";
|
|
24
|
+
|
|
25
|
+
export const MAX_RECENT_ERRORS = 20;
|
|
26
|
+
const MAX_RECENT_ERRORS_PER_FILE = 3;
|
|
27
|
+
|
|
28
|
+
const BACKBLAZE_POLL_INTERVAL = timeInMinute * 30;
|
|
29
|
+
// The higher we turn this up the less bandwidth and processing time is spent on errors. But also, the longer delay between an error happening and getting an error notification
|
|
30
|
+
const LOCAL_BROADCAST_BATCH = 5000;
|
|
31
|
+
const NOTIFICATION_BROADCAST_BATCH = 5000;
|
|
32
|
+
const SELF_THROTTLE_INTERVAL = 100;
|
|
33
|
+
const SELF_THROTTLE_DELAY = 50;
|
|
34
|
+
|
|
35
|
+
const VIEW_WINDOW = timeInDay * 7;
|
|
36
|
+
const SUPPRESSION_POLL_INTERVAL = timeInMinute * 15;
|
|
37
|
+
const READ_CHUNK_SIZE = 1024 * 1024 * 10;
|
|
38
|
+
|
|
39
|
+
const LOCAL_CACHE_LIMIT_BATCH = timeInMinute * 30;
|
|
40
|
+
|
|
41
|
+
export const NOT_AN_ERROR_EXPIRE_TIME = 2524608000000;
|
|
42
|
+
|
|
43
|
+
export type SuppressionEntry = {
|
|
44
|
+
key: string;
|
|
45
|
+
// Includes, exact case
|
|
46
|
+
// - Supports "*" as a wildcard, which matches slower (converts it to a regex)
|
|
47
|
+
match: string;
|
|
48
|
+
comment: string;
|
|
49
|
+
lastUpdateTime: number;
|
|
50
|
+
expiresAt: number;
|
|
51
|
+
};
|
|
52
|
+
type SuppressionListBase = {
|
|
53
|
+
entries: {
|
|
54
|
+
[key: string]: SuppressionEntry;
|
|
55
|
+
};
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
type SuppressedChecker = {
|
|
59
|
+
entry: SuppressionEntry;
|
|
60
|
+
fnc: (buffer: Buffer, posStart: number, posEnd: number) => boolean;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
function getAppendables() {
|
|
64
|
+
let loggers = getLoggers();
|
|
65
|
+
if (!loggers) throw new Error("Loggers not available?");
|
|
66
|
+
// error, warn
|
|
67
|
+
return [loggers.errorLogs, loggers.warnLogs];
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// NOTE: This cache resets eventually, due to it being no longer used every time we update the suppression list. But that's probably fine...
|
|
71
|
+
export const getSuppressEntryChecker = cacheLimited(
|
|
72
|
+
1000 * 10,
|
|
73
|
+
function getSuppressEntryChecker(entry: SuppressionEntry): SuppressedChecker {
|
|
74
|
+
if (entry.match.includes("*")) {
|
|
75
|
+
try {
|
|
76
|
+
let regex = new RegExp(entry.match.replaceAll("*", ".*"));
|
|
77
|
+
return {
|
|
78
|
+
entry,
|
|
79
|
+
fnc: (buffer: Buffer, posStart: number, posEnd: number) => {
|
|
80
|
+
return regex.test(buffer.slice(posStart, posEnd).toString());
|
|
81
|
+
}
|
|
82
|
+
};
|
|
83
|
+
} catch (error) {
|
|
84
|
+
console.error(`Failed to create regex for ${JSON.stringify(entry.match)}, ignoring wildcard in it`, { error });
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
let matchBuffer = Buffer.from(entry.match);
|
|
88
|
+
let char0 = matchBuffer[0];
|
|
89
|
+
return {
|
|
90
|
+
entry,
|
|
91
|
+
fnc: (buffer: Buffer, posStart: number, posEnd: number) => {
|
|
92
|
+
for (let i = posStart; i < posEnd; i++) {
|
|
93
|
+
if (matchBuffer.length === 1) return true;
|
|
94
|
+
if (buffer[i] === char0) {
|
|
95
|
+
for (let j = 1; j < matchBuffer.length; j++) {
|
|
96
|
+
let ch2 = buffer[i + j];
|
|
97
|
+
if (ch2 !== matchBuffer[j]) {
|
|
98
|
+
break;
|
|
99
|
+
}
|
|
100
|
+
if (j === matchBuffer.length - 1) {
|
|
101
|
+
return true;
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
return false;
|
|
107
|
+
},
|
|
108
|
+
};
|
|
109
|
+
}
|
|
110
|
+
);
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
const suppressionListKey = "suppression-list.json";
|
|
114
|
+
const suppressionListArchive = archiveJSONT<SuppressionListBase>(() =>
|
|
115
|
+
getArchives("suppression-list"),
|
|
116
|
+
);
|
|
117
|
+
|
|
118
|
+
class SuppressionList {
|
|
119
|
+
private getEntries = lazy(async (): Promise<SuppressionListBase> => {
|
|
120
|
+
await runInfinitePollCallAtStart(SUPPRESSION_POLL_INTERVAL, async () => {
|
|
121
|
+
let entries = await suppressionListArchive.get(suppressionListKey);
|
|
122
|
+
if (!entries) {
|
|
123
|
+
entries = { entries: {} };
|
|
124
|
+
}
|
|
125
|
+
await suppressionListArchive.set(suppressionListKey, entries);
|
|
126
|
+
this.getEntries.set(Promise.resolve(entries));
|
|
127
|
+
});
|
|
128
|
+
// Infinite poll will have set this, so we don't infinitely loop
|
|
129
|
+
return await this.getEntries();
|
|
130
|
+
});
|
|
131
|
+
|
|
132
|
+
public async filterObjsToNonSuppressed(objs: LogDatum[]): Promise<LogDatum[]> {
|
|
133
|
+
// NOTE: Streamed data should be rare enough
|
|
134
|
+
let parts: Buffer[] = [];
|
|
135
|
+
for (let obj of objs) {
|
|
136
|
+
parts.push(Buffer.from(JSON.stringify(obj)));
|
|
137
|
+
parts.push(objectDelimitterBuffer);
|
|
138
|
+
}
|
|
139
|
+
let buffer = Buffer.concat(parts);
|
|
140
|
+
let scanner = await this.scanForRecentErrors();
|
|
141
|
+
await scanner.onData(buffer);
|
|
142
|
+
return await scanner.finish();
|
|
143
|
+
}
|
|
144
|
+
public async scanForRecentErrors(): Promise<{
|
|
145
|
+
onData: (data: Buffer) => void;
|
|
146
|
+
finish: () => Promise<LogDatum[]>;
|
|
147
|
+
}> {
|
|
148
|
+
let entries = await this.getEntries();
|
|
149
|
+
let now = Date.now();
|
|
150
|
+
let checkers = Object.values(entries.entries)
|
|
151
|
+
.map(entry => getSuppressEntryChecker(entry))
|
|
152
|
+
;
|
|
153
|
+
let datums: LogDatum[] = [];
|
|
154
|
+
let callback = createLogScanner({
|
|
155
|
+
onParsedData: (posStart, posEnd, buffer) => {
|
|
156
|
+
if (buffer === "done") {
|
|
157
|
+
return;
|
|
158
|
+
}
|
|
159
|
+
let outdatedSuppressionKey: string | undefined = undefined;
|
|
160
|
+
for (let checker of checkers) {
|
|
161
|
+
if (checker.fnc(buffer, posStart, posEnd)) {
|
|
162
|
+
if (checker.entry.expiresAt < now) {
|
|
163
|
+
outdatedSuppressionKey = checker.entry.key;
|
|
164
|
+
continue;
|
|
165
|
+
}
|
|
166
|
+
return;
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
let obj: LogDatum;
|
|
170
|
+
try {
|
|
171
|
+
obj = JSON.parse(buffer.slice(posStart, posEnd).toString()) as LogDatum;
|
|
172
|
+
} catch (e: any) {
|
|
173
|
+
process.stderr.write(`Failed to parse log datum in around ${buffer.slice(posStart, posEnd).slice(0, 100).toString("hex")}, error is:\n${e.stack}`);
|
|
174
|
+
return;
|
|
175
|
+
}
|
|
176
|
+
if (outdatedSuppressionKey) {
|
|
177
|
+
obj.__matchedOutdatedSuppressionKey = outdatedSuppressionKey;
|
|
178
|
+
}
|
|
179
|
+
datums.push(obj);
|
|
180
|
+
},
|
|
181
|
+
});
|
|
182
|
+
let lastWaitTime = Date.now();
|
|
183
|
+
return {
|
|
184
|
+
onData: runInSerial(async (buffer) => {
|
|
185
|
+
// TODO: Maybe we should add this pattern to batching.ts? Basically, if we get called fast, we allow the calls through. BUT, if we called slowly OR we are doing a lot of processing (and so we are working for all of SELF_THROTTLE_INTERVAL), then we wait. This prevents this from taking over the machine. The back off is steep though, and if the machine is lagging we might reduce to a trickle, just getting 1 call in per SELF_THROTTLE_DELAY + synchronous lag from work in other parts of the program.
|
|
186
|
+
let now = Date.now();
|
|
187
|
+
if (now - lastWaitTime > SELF_THROTTLE_INTERVAL) {
|
|
188
|
+
await delay(SELF_THROTTLE_DELAY);
|
|
189
|
+
lastWaitTime = now;
|
|
190
|
+
}
|
|
191
|
+
await callback(buffer);
|
|
192
|
+
}),
|
|
193
|
+
finish: async () => {
|
|
194
|
+
await callback("done");
|
|
195
|
+
// NOTE: We COULD limit as we run, however... how many errors are we really going to encounter that AREN'T suppressed? Suppression is supposed to prevent overload anyways. I guess worst case scenario, yes, we could get overloaded, but... if we are logging more NEW errors than we can store in memory, we have bigger problems...
|
|
196
|
+
return limitRecentErrors(datums);
|
|
197
|
+
},
|
|
198
|
+
};
|
|
199
|
+
}
|
|
200
|
+
public async setSuppressionEntry(entry: SuppressionEntry) {
|
|
201
|
+
let entries = await this.getEntries();
|
|
202
|
+
entry.lastUpdateTime = Date.now();
|
|
203
|
+
entries.entries[entry.key] = entry;
|
|
204
|
+
await suppressionListArchive.set(suppressionListKey, entries);
|
|
205
|
+
await recentErrors.onSuppressionChanged();
|
|
206
|
+
}
|
|
207
|
+
public async removeSuppressionEntry(key: string) {
|
|
208
|
+
let entries = await this.getEntries();
|
|
209
|
+
delete entries.entries[key];
|
|
210
|
+
await suppressionListArchive.set(suppressionListKey, entries);
|
|
211
|
+
await recentErrors.onSuppressionChanged();
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
public async getSuppressionList(): Promise<SuppressionEntry[]> {
|
|
215
|
+
return Object.values((await this.getEntries()).entries);
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
const suppressionList = new SuppressionList();
|
|
219
|
+
export const SuppressionListController = getSyncedController(SocketFunction.register(
|
|
220
|
+
"SuppressionListController-08f985d8-8d06-4041-ac4b-44566c54615d",
|
|
221
|
+
suppressionList,
|
|
222
|
+
() => ({
|
|
223
|
+
setSuppressionEntry: {},
|
|
224
|
+
removeSuppressionEntry: {},
|
|
225
|
+
getSuppressionList: {},
|
|
226
|
+
}),
|
|
227
|
+
() => ({
|
|
228
|
+
hooks: [assertIsManagementUser],
|
|
229
|
+
}),
|
|
230
|
+
{
|
|
231
|
+
noFunctionMeasure: true,
|
|
232
|
+
}
|
|
233
|
+
), {
|
|
234
|
+
reads: {
|
|
235
|
+
getSuppressionList: ["suppression-list"],
|
|
236
|
+
},
|
|
237
|
+
writes: {
|
|
238
|
+
setSuppressionEntry: ["suppression-list", "recent-errors"],
|
|
239
|
+
removeSuppressionEntry: ["suppression-list", "recent-errors"],
|
|
240
|
+
},
|
|
241
|
+
});
|
|
242
|
+
|
|
243
|
+
class URLCache {
|
|
244
|
+
private root = isNode() && os.homedir() + "/backblaze-cache/" || "";
|
|
245
|
+
private sizeLimiter = new SizeLimiter({
|
|
246
|
+
diskRoot: this.root,
|
|
247
|
+
maxBytes: 1024 * 1024 * 1024 * 10,
|
|
248
|
+
// Basically... enough cache for a week of hourly data, for 1000 servers. At which point, this whole system will suck (too many servers to check), and we might have to change it
|
|
249
|
+
// - Ex, by only using backblaze, and aggregating logs there, to reduce file counts
|
|
250
|
+
// - Or... just by using elasticsearch...
|
|
251
|
+
// - Or... just have dedicated search servers, which store the data in memory. We will have few people querying, so having 100 servers all execute a 60s search query is fine.
|
|
252
|
+
maxFiles: 24 * 7 * 1000,
|
|
253
|
+
maxDiskFraction: 0.1,
|
|
254
|
+
maxTotalDiskFraction: 0.96,
|
|
255
|
+
minBytes: 0,
|
|
256
|
+
});
|
|
257
|
+
// Returns the PATH to it on disk, so you can then parse it via a stream, so you never have to store it all at once in memory.
|
|
258
|
+
public async getURLLocalPath(url: string, hash: string): Promise<string | undefined> {
|
|
259
|
+
if (!isNode()) return undefined;
|
|
260
|
+
|
|
261
|
+
// Create cache directory if it doesn't exist
|
|
262
|
+
if (!fs.existsSync(this.root)) {
|
|
263
|
+
await fs.promises.mkdir(this.root, { recursive: true });
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
const filePath = this.root + hash;
|
|
267
|
+
|
|
268
|
+
try {
|
|
269
|
+
// Check if file already exists
|
|
270
|
+
const stats = await fs.promises.stat(filePath);
|
|
271
|
+
if (stats.isFile()) {
|
|
272
|
+
void this.applyLimitting([]);
|
|
273
|
+
return filePath;
|
|
274
|
+
}
|
|
275
|
+
} catch (e: any) {
|
|
276
|
+
// File doesn't exist, need to download it
|
|
277
|
+
if (e.code !== "ENOENT") {
|
|
278
|
+
throw e;
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
try {
|
|
283
|
+
// Download and stream to disk
|
|
284
|
+
const response = await fetch(url);
|
|
285
|
+
if (!response.ok) {
|
|
286
|
+
throw new Error(`Failed to fetch ${url}: ${response.status} ${response.statusText}`);
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
if (!response.body) {
|
|
290
|
+
throw new Error(`Response body is undefined for ${url}`);
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
// Create write stream
|
|
294
|
+
let tempPath = filePath + ".temp";
|
|
295
|
+
const writeStream = fs.createWriteStream(tempPath);
|
|
296
|
+
const reader = response.body.getReader();
|
|
297
|
+
|
|
298
|
+
try {
|
|
299
|
+
for await (const chunk of streamToIteratable(reader)) {
|
|
300
|
+
if (!chunk) continue;
|
|
301
|
+
let result = writeStream.write(Buffer.from(chunk));
|
|
302
|
+
if (!result) {
|
|
303
|
+
await new Promise<void>(resolve => writeStream.once("drain", resolve));
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
await new Promise<void>((resolve, reject) => {
|
|
308
|
+
writeStream.end((err: any) => {
|
|
309
|
+
if (err) reject(err);
|
|
310
|
+
else resolve();
|
|
311
|
+
});
|
|
312
|
+
});
|
|
313
|
+
|
|
314
|
+
// Trigger cache cleanup in the background
|
|
315
|
+
void this.applyLimitting([]);
|
|
316
|
+
|
|
317
|
+
await fs.promises.rename(tempPath, filePath);
|
|
318
|
+
return filePath;
|
|
319
|
+
} catch (error) {
|
|
320
|
+
// Clean up partial file on error
|
|
321
|
+
try {
|
|
322
|
+
await fs.promises.unlink(tempPath);
|
|
323
|
+
} catch {
|
|
324
|
+
// Ignore cleanup errors
|
|
325
|
+
}
|
|
326
|
+
throw error;
|
|
327
|
+
} finally {
|
|
328
|
+
reader.releaseLock();
|
|
329
|
+
}
|
|
330
|
+
} catch (error) {
|
|
331
|
+
console.error(`Failed to download and cache file from ${url}:`, error);
|
|
332
|
+
return undefined;
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
private applyLimitting = batchFunction({ delay: LOCAL_CACHE_LIMIT_BATCH }, async () => {
|
|
336
|
+
const files = await fs.promises.readdir(this.root);
|
|
337
|
+
const fileInfos: { time: number; bytes: number; path: string; }[] = [];
|
|
338
|
+
|
|
339
|
+
for (const file of files) {
|
|
340
|
+
const filePath = this.root + file;
|
|
341
|
+
const stats = await fs.promises.stat(filePath);
|
|
342
|
+
if (stats.isFile()) {
|
|
343
|
+
fileInfos.push({
|
|
344
|
+
time: stats.atimeMs,
|
|
345
|
+
bytes: stats.size,
|
|
346
|
+
path: filePath,
|
|
347
|
+
});
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
const result = await this.sizeLimiter.limit(fileInfos);
|
|
352
|
+
|
|
353
|
+
for (const fileToRemove of result.remove) {
|
|
354
|
+
await fs.promises.unlink(fileToRemove.path);
|
|
355
|
+
}
|
|
356
|
+
});
|
|
357
|
+
}
|
|
358
|
+
const urlCache = new URLCache();
|
|
359
|
+
|
|
360
|
+
const limitRecentErrors = measureWrap(function limitRecentErrors(objs: LogDatum[]) {
|
|
361
|
+
sort(objs, x => x.time);
|
|
362
|
+
let recent: LogDatum[] = [];
|
|
363
|
+
let countByFile = new Map<string, number>();
|
|
364
|
+
// NOTE: We iterate backwards, because... usually new logs come in at the end, and are pushed, so we want to sort by time (that way we often don't have to resort by much). And if we sort by time, the newest at at the end!
|
|
365
|
+
for (let i = objs.length - 1; i >= 0; i--) {
|
|
366
|
+
let obj = objs[i];
|
|
367
|
+
let file = String(obj.__FILE__) || "";
|
|
368
|
+
let count = countByFile.get(file) || 0;
|
|
369
|
+
count++;
|
|
370
|
+
if (count > MAX_RECENT_ERRORS_PER_FILE) continue;
|
|
371
|
+
countByFile.set(file, count);
|
|
372
|
+
recent.push(obj);
|
|
373
|
+
if (recent.length >= MAX_RECENT_ERRORS) break;
|
|
374
|
+
}
|
|
375
|
+
return recent;
|
|
376
|
+
});
|
|
377
|
+
|
|
378
|
+
class RecentErrors {
|
|
379
|
+
|
|
380
|
+
private initialize = lazy(async () => {
|
|
381
|
+
errorWatcherBase.watch(x => this.addErrors(x));
|
|
382
|
+
await this.scanNow({});
|
|
383
|
+
runInfinitePoll(BACKBLAZE_POLL_INTERVAL, async () => {
|
|
384
|
+
await this.scanNow({ noLocalFiles: true });
|
|
385
|
+
});
|
|
386
|
+
});
|
|
387
|
+
|
|
388
|
+
private _recentErrors: LogDatum[] = [];
|
|
389
|
+
private updateRecentErrors = (objs: LogDatum[]) => {
|
|
390
|
+
let newRecentErrors = limitRecentErrors(objs);
|
|
391
|
+
// If any changed
|
|
392
|
+
let prev = new Set(this._recentErrors);
|
|
393
|
+
let newErrors = new Set(newRecentErrors);
|
|
394
|
+
function hasAnyChanged() {
|
|
395
|
+
for (let obj of newRecentErrors) {
|
|
396
|
+
if (!prev.has(obj)) {
|
|
397
|
+
return true;
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
for (let obj of prev) {
|
|
401
|
+
if (!newErrors.has(obj)) {
|
|
402
|
+
return true;
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
return false;
|
|
406
|
+
}
|
|
407
|
+
if (hasAnyChanged()) {
|
|
408
|
+
this._recentErrors = newRecentErrors;
|
|
409
|
+
void this.broadcastUpdate(undefined);
|
|
410
|
+
}
|
|
411
|
+
};
|
|
412
|
+
private broadcastUpdate = batchFunction({ delay: NOTIFICATION_BROADCAST_BATCH }, () => {
|
|
413
|
+
recentErrorsChannel.broadcast(true);
|
|
414
|
+
});
|
|
415
|
+
|
|
416
|
+
private addErrors(objs: LogDatum[]) {
|
|
417
|
+
for (let obj of objs) {
|
|
418
|
+
this._recentErrors.push(obj);
|
|
419
|
+
}
|
|
420
|
+
this.updateRecentErrors(this._recentErrors);
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
public async onSuppressionChanged() {
|
|
424
|
+
this.updateRecentErrors(await suppressionList.filterObjsToNonSuppressed(this._recentErrors));
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
private scannedHashes = new Set<string>();
|
|
428
|
+
private scanNow = runInSerial(async (config: {
|
|
429
|
+
noLocalFiles?: boolean;
|
|
430
|
+
}) => {
|
|
431
|
+
for (let appendable of getAppendables()) {
|
|
432
|
+
let startTime = Date.now() - VIEW_WINDOW;
|
|
433
|
+
let endTime = Date.now() + timeInHour * 2;
|
|
434
|
+
let result = await new FastArchiveAppendableControllerBase().startSynchronizeInternal({
|
|
435
|
+
range: {
|
|
436
|
+
startTime,
|
|
437
|
+
endTime,
|
|
438
|
+
},
|
|
439
|
+
rootPath: appendable.rootPath,
|
|
440
|
+
noLocalFiles: config.noLocalFiles,
|
|
441
|
+
});
|
|
442
|
+
// Filter again, as new suppressions change the errors
|
|
443
|
+
this.updateRecentErrors(await suppressionList.filterObjsToNonSuppressed(this._recentErrors));
|
|
444
|
+
let recentLimit = 0;
|
|
445
|
+
const applyRecentLimit = () => {
|
|
446
|
+
if (this._recentErrors.length < MAX_RECENT_ERRORS) return;
|
|
447
|
+
recentLimit = this._recentErrors[0].time - timeInHour * 2;
|
|
448
|
+
};
|
|
449
|
+
applyRecentLimit();
|
|
450
|
+
for (let file of result.files) {
|
|
451
|
+
let path: string | undefined = undefined;
|
|
452
|
+
let size: number | undefined = undefined;
|
|
453
|
+
try {
|
|
454
|
+
// Skip if it is older than even our throttled values!
|
|
455
|
+
if (file.startTime < recentLimit) continue;
|
|
456
|
+
// Don't skip local files, we only do them once at the start, or on demand when we want to recalc all anyways!
|
|
457
|
+
if (!file.nodeId) {
|
|
458
|
+
let hash = getFileMetadataHash(file);
|
|
459
|
+
if (this.scannedHashes.has(hash)) continue;
|
|
460
|
+
this.scannedHashes.add(hash);
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
let hash = getFileMetadataHash(file);
|
|
464
|
+
path = await urlCache.getURLLocalPath(file.url, hash);
|
|
465
|
+
if (!path) continue;
|
|
466
|
+
let scanner = await suppressionList.scanForRecentErrors();
|
|
467
|
+
|
|
468
|
+
// Stream decompress the file while reading it
|
|
469
|
+
size = await fs.promises.stat(path).then(x => x.size);
|
|
470
|
+
if (!size) {
|
|
471
|
+
console.error(`Deleting empty cached file ${path} for ${file.url} `);
|
|
472
|
+
// NOTE: This means we will repeatedly download empty files, but... that should be fairly fast...
|
|
473
|
+
await fs.promises.unlink(path);
|
|
474
|
+
continue;
|
|
475
|
+
}
|
|
476
|
+
let sizeT = size;
|
|
477
|
+
let fd = await fs.promises.open(path, "r");
|
|
478
|
+
try {
|
|
479
|
+
await new Promise<void>(async (resolve, reject) => {
|
|
480
|
+
const gunzip = zlib.createGunzip();
|
|
481
|
+
|
|
482
|
+
gunzip.on("data", (chunk: Buffer) => {
|
|
483
|
+
void scanner.onData(chunk);
|
|
484
|
+
});
|
|
485
|
+
|
|
486
|
+
gunzip.on("end", async () => {
|
|
487
|
+
try {
|
|
488
|
+
resolve();
|
|
489
|
+
} catch (error) {
|
|
490
|
+
reject(error);
|
|
491
|
+
}
|
|
492
|
+
});
|
|
493
|
+
|
|
494
|
+
gunzip.on("error", reject);
|
|
495
|
+
|
|
496
|
+
try {
|
|
497
|
+
for (let i = 0; i < sizeT; i += READ_CHUNK_SIZE) {
|
|
498
|
+
let chunkSize = Math.min(READ_CHUNK_SIZE, sizeT - i);
|
|
499
|
+
let buffer = Buffer.alloc(chunkSize);
|
|
500
|
+
await fd.read(buffer, 0, chunkSize, i);
|
|
501
|
+
let result = gunzip.write(buffer);
|
|
502
|
+
if (!result) {
|
|
503
|
+
await new Promise(resolve => gunzip.once("drain", resolve));
|
|
504
|
+
}
|
|
505
|
+
}
|
|
506
|
+
gunzip.end();
|
|
507
|
+
} catch (error) {
|
|
508
|
+
reject(error);
|
|
509
|
+
}
|
|
510
|
+
});
|
|
511
|
+
} finally {
|
|
512
|
+
await fd.close();
|
|
513
|
+
}
|
|
514
|
+
let newErrors = await scanner.finish();
|
|
515
|
+
this.addErrors(newErrors);
|
|
516
|
+
applyRecentLimit();
|
|
517
|
+
} catch (e: any) {
|
|
518
|
+
console.error(`Failed to scan file ${file.url}, size is ${size}, error is:\n${e.stack}`);
|
|
519
|
+
}
|
|
520
|
+
}
|
|
521
|
+
}
|
|
522
|
+
});
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
public async getRecentErrors(): Promise<LogDatum[]> {
|
|
526
|
+
await this.initialize();
|
|
527
|
+
return this._recentErrors;
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
// Rescans all local, and new backblaze
|
|
531
|
+
public async rescanAllErrorsNow() {
|
|
532
|
+
await this.scanNow({});
|
|
533
|
+
return this._recentErrors;
|
|
534
|
+
}
|
|
535
|
+
}
|
|
536
|
+
const recentErrors = new RecentErrors();
|
|
537
|
+
export const RecentErrorsController = getSyncedController(SocketFunction.register(
|
|
538
|
+
"RecentErrorsController-8450c626-2a06-4eee-81cb-67d4c2fa8155",
|
|
539
|
+
recentErrors,
|
|
540
|
+
() => ({
|
|
541
|
+
getRecentErrors: {},
|
|
542
|
+
rescanAllErrorsNow: {},
|
|
543
|
+
}),
|
|
544
|
+
() => ({
|
|
545
|
+
hooks: [assertIsManagementUser],
|
|
546
|
+
}),
|
|
547
|
+
), {
|
|
548
|
+
reads: {
|
|
549
|
+
getRecentErrors: ["recent-errors"],
|
|
550
|
+
},
|
|
551
|
+
writes: {
|
|
552
|
+
rescanAllErrorsNow: ["recent-errors"],
|
|
553
|
+
},
|
|
554
|
+
});
|
|
555
|
+
|
|
556
|
+
const recentErrorsChannel = new SocketChannel<true>("recent-errors-eeceb0c8-4086-4ab3-b3ff-fa9fd5282e14");
|
|
557
|
+
|
|
558
|
+
export const watchRecentErrors = lazy(function watchRecentErrors() {
|
|
559
|
+
recentErrorsChannel.watch(async () => {
|
|
560
|
+
// Only 1 function, so just refresh all...
|
|
561
|
+
RecentErrorsController.refreshAll();
|
|
562
|
+
});
|
|
563
|
+
});
|
|
564
|
+
|
|
565
|
+
|
|
566
|
+
|
|
567
|
+
|
|
568
|
+
export const notifyWatchersOfError = batchFunction({
|
|
569
|
+
delay: LOCAL_BROADCAST_BATCH,
|
|
570
|
+
},
|
|
571
|
+
async (objs: LogDatum[]) => {
|
|
572
|
+
objs = await suppressionList.filterObjsToNonSuppressed(objs);
|
|
573
|
+
errorWatcherBase.broadcast(objs);
|
|
574
|
+
}
|
|
575
|
+
);
|
|
576
|
+
|
|
577
|
+
const errorWatcherBase = new SocketChannel<LogDatum[]>("error-watcher-38de08cd-3247-4f75-9ac0-7919b240607d");
|