querysub 0.324.0 → 0.326.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +3 -2
- package/src/-a-archives/archiveCache.ts +2 -1
- package/src/-a-auth/certs.ts +2 -1
- package/src/-c-identity/IdentityController.ts +1 -0
- package/src/-d-trust/NetworkTrust2.ts +26 -27
- package/src/-e-certs/EdgeCertController.ts +13 -4
- package/src/0-path-value-core/archiveLocks/ArchiveLocks2.ts +9 -5
- package/src/4-querysub/Querysub.ts +3 -1
- package/src/diagnostics/logs/FastArchiveAppendable.ts +5 -3
- package/src/diagnostics/logs/FastArchiveViewer.tsx +43 -35
- package/src/diagnostics/logs/LogViewer2.tsx +35 -34
- package/src/diagnostics/logs/TimeRangeSelector.tsx +18 -2
- package/src/diagnostics/logs/errorNotifications/ErrorNotificationController.ts +171 -34
- package/src/diagnostics/logs/errorNotifications/ErrorSuppressionUI.tsx +13 -7
- package/src/diagnostics/logs/errorNotifications/ErrorWarning.tsx +16 -4
- package/src/diagnostics/logs/errorNotifications/errorWatchEntry.tsx +78 -0
- package/src/diagnostics/logs/lifeCycleAnalysis/spec.md +27 -83
- package/src/diagnostics/managementPages.tsx +2 -1
- package/src/functional/SocketChannel.ts +5 -1
- package/src/library-components/ATag.tsx +1 -0
- package/src/library-components/SyncedController.ts +14 -3
- package/src/library-components/SyncedControllerLoadingIndicator.tsx +3 -2
- package/src/library-components/URLParam.ts +35 -5
- package/src/library-components/icons.tsx +3 -0
- package/src/library-components/niceStringify.ts +1 -1
- package/src/library-components/urlResetGroups.ts +14 -0
- package/src/misc/formatJSX.tsx +7 -1
- package/src/server.ts +2 -1
- package/testEntry2.ts +16 -5
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "querysub",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.326.0",
|
|
4
4
|
"main": "index.js",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"note1": "note on node-forge fork, see https://github.com/digitalbazaar/forge/issues/744 for details",
|
|
@@ -37,7 +37,8 @@
|
|
|
37
37
|
"depend": "yarn --silent depcruise src --include-only \"^src\" --config --output-type dot | dot -T svg > dependency-graph.svg",
|
|
38
38
|
"test": "yarn typenode ./test.ts",
|
|
39
39
|
"test3": "yarn typenode ./src/test/test.tsx --local",
|
|
40
|
-
"test2": "yarn typenode ./src/4-dom/qreactTest.tsx --local"
|
|
40
|
+
"test2": "yarn typenode ./src/4-dom/qreactTest.tsx --local",
|
|
41
|
+
"error-watch": "yarn typenode ./src/diagnostics/logs/errorNotifications/errorWatchEntry.tsx"
|
|
41
42
|
},
|
|
42
43
|
"bin": {
|
|
43
44
|
"deploy": "./bin/deploy.js",
|
|
@@ -12,10 +12,11 @@ import { Args } from "socket-function/src/types";
|
|
|
12
12
|
import { getArchivesBackblaze } from "./archivesBackBlaze";
|
|
13
13
|
import { formatNumber } from "socket-function/src/formatting/format";
|
|
14
14
|
import { SizeLimiter } from "../diagnostics/SizeLimiter";
|
|
15
|
+
import { isPublic } from "../config";
|
|
15
16
|
|
|
16
17
|
const SIZE_LIMIT = new SizeLimiter({
|
|
17
18
|
diskRoot: getStorageDir(),
|
|
18
|
-
maxBytes: 1024 * 1024 * 1024 * 50,
|
|
19
|
+
maxBytes: isPublic() ? 1024 * 1024 * 1024 * 250 : 1024 * 1024 * 1024 * 50,
|
|
19
20
|
// Anything less than this and we can't even load enough weights models for a single task
|
|
20
21
|
minBytes: 1024 * 1024 * 1024 * 8,
|
|
21
22
|
maxDiskFraction: 0.3,
|
package/src/-a-auth/certs.ts
CHANGED
|
@@ -469,7 +469,8 @@ export function decodeNodeId(nodeId: string, allowMissingThreadId?: "allowMissin
|
|
|
469
469
|
return undefined;
|
|
470
470
|
}
|
|
471
471
|
let parts = locationObj.address.split(".");
|
|
472
|
-
if
|
|
472
|
+
// NOTE: We have to only allow localhost domains on our port as the underlying domain gets stripped when we're looking at the machine ID, So if we allowed localhost domains on other domains, then it would potentially allow the possibility for a server to trick us to connecting to them. And then once the connection is established, it could talk back and we would think it has a localhost machine ID, which is implicitly trusted, which would then give it access to everything.
|
|
473
|
+
if (nodeId.startsWith(`127-0-0-1.${getDomain()}`) && parts.length === 3 && nodeId.includes(":")) {
|
|
473
474
|
return {
|
|
474
475
|
threadId: "",
|
|
475
476
|
machineId: parts.at(-3) || "",
|
|
@@ -191,6 +191,7 @@ const changeIdentityOnce = cacheWeak(async function changeIdentityOnce(connectio
|
|
|
191
191
|
};
|
|
192
192
|
let signature = sign(threadKeyCert, payload);
|
|
193
193
|
await timeoutToError(
|
|
194
|
+
// NOTE: This timeout has to be small as if we try to connect to a node to send it something time sensitive such as a PathValue and it takes too long it might result in us having a PathValue which is expired. The threshold is around 60 seconds and so we want to build a timeout calling a few different nodes before the PathValue expires.
|
|
194
195
|
10 * 1000,
|
|
195
196
|
IdentityController.nodes[nodeId].changeIdentity(signature, payload),
|
|
196
197
|
() => new Error(`Timeout calling changeIdentity for ${nodeId}`)
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { measureWrap } from "socket-function/src/profiling/measure";
|
|
2
2
|
import { getIdentityCA, getMachineId, getOwnMachineId } from "../-a-auth/certs";
|
|
3
3
|
import { getArchives } from "../-a-archives/archives";
|
|
4
|
-
import { isNode, throttleFunction, timeInSecond } from "socket-function/src/misc";
|
|
4
|
+
import { isNode, throttleFunction, timeInHour, timeInSecond } from "socket-function/src/misc";
|
|
5
5
|
import { SocketFunctionHook } from "socket-function/SocketFunctionTypes";
|
|
6
6
|
import { SocketFunction } from "socket-function/SocketFunction";
|
|
7
7
|
import { IdentityController_getMachineId } from "../-c-identity/IdentityController";
|
|
@@ -19,6 +19,7 @@ import { magenta } from "socket-function/src/formatting/logColors";
|
|
|
19
19
|
// Cache the untrust list, to prevent bugs from causing too many backend reads (while also allowing
|
|
20
20
|
// bad servers which make request before their trust is verified from staying broken).
|
|
21
21
|
const UNTRUST_CACHE_TIME = 30 * timeInSecond;
|
|
22
|
+
const TRUSTED_CACHE_RESET_INTERVAL = timeInHour;
|
|
22
23
|
|
|
23
24
|
const archives = lazy(() => getArchives("trust2/"));
|
|
24
25
|
|
|
@@ -57,6 +58,8 @@ export const isTrusted = measureWrap(async function isTrusted(machineId: string)
|
|
|
57
58
|
// See the comment in requiresNetworkTrustHook for why clients have to trust all callers.
|
|
58
59
|
if (isClient()) return true;
|
|
59
60
|
|
|
61
|
+
await populateTrustedCache();
|
|
62
|
+
|
|
60
63
|
if (trustedCache.has(machineId)) {
|
|
61
64
|
return true;
|
|
62
65
|
}
|
|
@@ -70,31 +73,10 @@ export const isTrusted = measureWrap(async function isTrusted(machineId: string)
|
|
|
70
73
|
return false;
|
|
71
74
|
}
|
|
72
75
|
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
if (!trustedCachePopulated) {
|
|
78
|
-
trustedCachePopulated = true;
|
|
79
|
-
let trustedMachineIds = await archives().find("");
|
|
80
|
-
lastArchivesTrusted = trustedMachineIds.slice();
|
|
81
|
-
for (let trustedMachineId of trustedMachineIds) {
|
|
82
|
-
trustedCache.add(trustedMachineId);
|
|
83
|
-
// NOTE: We don't load trust certs here, as we need to load them on demand in case the trust changes after our initial startup.
|
|
84
|
-
}
|
|
85
|
-
} else {
|
|
86
|
-
// Checking a single entry is a lot faster (as find is slow)
|
|
87
|
-
let trusted = await archives().get(machineId);
|
|
88
|
-
if (trusted) {
|
|
89
|
-
trustedCache.add(machineId);
|
|
90
|
-
}
|
|
91
|
-
}
|
|
92
|
-
// Always trust ourself
|
|
93
|
-
trustedCache.add(getOwnMachineId());
|
|
94
|
-
|
|
95
|
-
// NOTE: The only happens in the case WE connected to it (ex, "127-0-0-1.querysubtest.com:15358"). It can't look like this if it connected to us, in which case the nodeId will be "client:...", being mostly random, and created by us (UNTIL they prove they have another id). So... I'm not even sure the isPublic check is required? We only connect to nodes we discover through node discovery, which requires backblaze write permissions. But I guess it's fine to be extra careful about it...
|
|
96
|
-
if (!isPublic()) {
|
|
97
|
-
trustedCache.add("127-0-0-1");
|
|
76
|
+
// Checking a single entry is fast and if we don't trust them they'll be added to untrusted cache so it shouldn't slow things down by too much.
|
|
77
|
+
let trusted = await archives().get(machineId);
|
|
78
|
+
if (trusted) {
|
|
79
|
+
trustedCache.add(machineId);
|
|
98
80
|
}
|
|
99
81
|
|
|
100
82
|
if (!trustedCache.has(machineId)) {
|
|
@@ -103,7 +85,24 @@ const isTrustedBase = runInSerial(measureWrap(async function isTrustedBase(machi
|
|
|
103
85
|
} else {
|
|
104
86
|
return true;
|
|
105
87
|
}
|
|
106
|
-
})
|
|
88
|
+
});
|
|
89
|
+
let populateTrustedCache = lazy(async () => {
|
|
90
|
+
let trustedMachineIds = await archives().find("");
|
|
91
|
+
lastArchivesTrusted = trustedMachineIds.slice();
|
|
92
|
+
for (let trustedMachineId of trustedMachineIds) {
|
|
93
|
+
trustedCache.add(trustedMachineId);
|
|
94
|
+
}
|
|
95
|
+
// Always trust ourself
|
|
96
|
+
trustedCache.add(getOwnMachineId());
|
|
97
|
+
|
|
98
|
+
// NOTE: This only happens to servers that we connect to. Also we only allow the machine ID to be this special ID in the case it's on our domain. And because we use HTTPS when connecting to domains, it means that it must be implicitly trusted if it has a certificate for our domain.
|
|
99
|
+
trustedCache.add("127-0-0-1");
|
|
100
|
+
|
|
101
|
+
setTimeout(() => {
|
|
102
|
+
trustedCache.clear();
|
|
103
|
+
populateTrustedCache.reset();
|
|
104
|
+
}, TRUSTED_CACHE_RESET_INTERVAL);
|
|
105
|
+
});
|
|
107
106
|
|
|
108
107
|
export async function isNodeTrusted(nodeId: string) {
|
|
109
108
|
let domainName = getNodeIdDomainMaybeUndefined(nodeId);
|
|
@@ -129,12 +129,21 @@ export async function publishMachineARecords() {
|
|
|
129
129
|
let nodeObj = getNodeIdLocation(selfNodeId);
|
|
130
130
|
if (!nodeObj) throw new Error(`Invalid nodeId ${selfNodeId}`);
|
|
131
131
|
let machineAddress = nodeObj.address.split(".").slice(1).join(".");
|
|
132
|
-
await
|
|
133
|
-
await setRecord("A", "*." + machineAddress, ip);
|
|
132
|
+
let prevMachineIP = await getRecords("A", machineAddress);
|
|
134
133
|
let ipDomain = await getIPDomain();
|
|
135
|
-
|
|
134
|
+
let promises: Promise<void>[] = [];
|
|
135
|
+
promises.push(setRecord("A", ipDomain, ip));
|
|
136
136
|
|
|
137
|
-
|
|
137
|
+
|
|
138
|
+
if (ip === "127.0.0.1" && prevMachineIP.length > 0 && !prevMachineIP.includes("127.0.0.1")) {
|
|
139
|
+
console.log(yellow(`Not setting A record for ${machineAddress} to ${ip}, as we previously had a public IP. IF you want to switch back to 127.0.0.1, manually go in and delete the A records for ${machineAddress}. Port forwarding should allow this to work anyways, and the bootstrapper should be smart enough to try 127-0-0-1 style addresses to allow fast development (ex, if you want to download a large file from the local development server quickly).`));
|
|
140
|
+
} else {
|
|
141
|
+
promises.push(setRecord("A", machineAddress, ip));
|
|
142
|
+
promises.push(setRecord("A", "*." + machineAddress, ip));
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
promises.push(publishEdgeDomain());
|
|
146
|
+
await Promise.all(promises);
|
|
138
147
|
|
|
139
148
|
return {
|
|
140
149
|
ip,
|
|
@@ -699,11 +699,15 @@ class TransactionLocker {
|
|
|
699
699
|
// where backblaze returns no files? Which it might be doing, as multiple times this code has
|
|
700
700
|
// triggered (without this check), and deletes all of our files...
|
|
701
701
|
let unconfirmedOldFiles2 = unconfirmedOldFiles.filter(a => !doubleCheckLookup.has(a.file) && doubleCheckDataFiles.has(a.file));
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
702
|
+
if (unconfirmedOldFiles2.length > 0) {
|
|
703
|
+
console.warn(red(`Deleted ${unconfirmedOldFiles2.length} very old unconfirmed files`), { files: unconfirmedOldFiles2.map(x => x.file) });
|
|
704
|
+
logNodeStats(`archives|TΔ Delete Old Rejected File`, formatNumber, unconfirmedOldFiles2.length);
|
|
705
|
+
// At the point the file was very old when we started reading, not part of the active transaction.
|
|
706
|
+
for (let file of unconfirmedOldFiles2) {
|
|
707
|
+
await this.deleteDataFile(file.file, `old unconfirmed file`);
|
|
708
|
+
}
|
|
709
|
+
} else {
|
|
710
|
+
console.warn(`Almost deleted ${unconfirmedOldFiles.length} very old unconfirmed files. This is bad, did we miss their confirmations that first time? If we missed them twice in a row, we might literally delete the database, and need to enter recovery mode to fix it...`, { files: unconfirmedOldFiles });
|
|
707
711
|
}
|
|
708
712
|
}
|
|
709
713
|
}
|
|
@@ -52,6 +52,7 @@ import yargs, { check } from "yargs";
|
|
|
52
52
|
import { parseArgsFactory } from "../misc/rawParams";
|
|
53
53
|
|
|
54
54
|
import * as typesafecss from "typesafecss";
|
|
55
|
+
import "../library-components/urlResetGroups";
|
|
55
56
|
|
|
56
57
|
|
|
57
58
|
typesafecss.setMeasureBlock(measureBlock);
|
|
@@ -72,6 +73,7 @@ let yargObj = parseArgsFactory()
|
|
|
72
73
|
.option("verbosenetwork", { type: "boolean", desc: "Log all network activity" })
|
|
73
74
|
.option("verboseframework", { type: "boolean", desc: "Log internal SocketFunction framework" })
|
|
74
75
|
.option("nodelay", { type: "boolean", desc: "Don't delay committing functions, even ones that are marked to be delayed." })
|
|
76
|
+
.option("hot", { type: "boolean", desc: "force hot reloading to turn on even if public is true." })
|
|
75
77
|
.argv
|
|
76
78
|
;
|
|
77
79
|
setImmediate(() => {
|
|
@@ -724,7 +726,7 @@ export class Querysub {
|
|
|
724
726
|
}
|
|
725
727
|
|
|
726
728
|
// Hot reloading on public servers breaks things when we update (as the git pull triggers a hot reload), so... don't do that.
|
|
727
|
-
if (config.hotReload && !isPublic()) {
|
|
729
|
+
if (config.hotReload && !isPublic() || yargObj.hot) {
|
|
728
730
|
watchFilesAndTriggerHotReloading();
|
|
729
731
|
}
|
|
730
732
|
|
|
@@ -348,7 +348,7 @@ export class FastArchiveAppendable<Datum> {
|
|
|
348
348
|
endTime: number;
|
|
349
349
|
};
|
|
350
350
|
cacheBust: number;
|
|
351
|
-
|
|
351
|
+
getWantData?: (file: FileMetadata) => Promise<((posStart: number, posEnd: number, data: Buffer) => boolean) | undefined>;
|
|
352
352
|
onData: (datum: Datum[], file: FileMetadata) => void;
|
|
353
353
|
// Called after onData
|
|
354
354
|
onStats?: (stats: DatumStats, file: FileMetadata) => void;
|
|
@@ -365,7 +365,7 @@ export class FastArchiveAppendable<Datum> {
|
|
|
365
365
|
};
|
|
366
366
|
stopSynchronize: () => void;
|
|
367
367
|
} | "cancelled"> {
|
|
368
|
-
let {
|
|
368
|
+
let { onData, onStats } = config;
|
|
369
369
|
// Create unique client sync ID upfront
|
|
370
370
|
let syncId = nextId();
|
|
371
371
|
|
|
@@ -525,12 +525,14 @@ export class FastArchiveAppendable<Datum> {
|
|
|
525
525
|
|
|
526
526
|
let scanProgressCount = 0;
|
|
527
527
|
|
|
528
|
+
const wantData = await config.getWantData?.(file);
|
|
529
|
+
|
|
528
530
|
function onParsedData(posStart: number, posEnd: number, buffer: Buffer | "done"): MaybePromise<void> {
|
|
529
531
|
if (buffer !== "done") {
|
|
530
532
|
scanProgressCount++;
|
|
531
533
|
}
|
|
532
534
|
if (buffer !== "done") {
|
|
533
|
-
if (wantData && !wantData(posStart, posEnd, buffer
|
|
535
|
+
if (wantData && !wantData(posStart, posEnd, buffer)) {
|
|
534
536
|
notMatchedSize += (posEnd - posStart);
|
|
535
537
|
notMatchedCount++;
|
|
536
538
|
return;
|
|
@@ -31,7 +31,7 @@ const caseInsensitiveParam = new URLParam("caseInsensitive", false);
|
|
|
31
31
|
export class FastArchiveViewer<T> extends qreact.Component<{
|
|
32
32
|
fastArchives: FastArchiveAppendable<T>[];
|
|
33
33
|
onStart: () => void;
|
|
34
|
-
getWantData?: () => Promise<((posStart: number, posEnd: number, data: Buffer
|
|
34
|
+
getWantData?: (file: FileMetadata) => Promise<((posStart: number, posEnd: number, data: Buffer) => boolean) | undefined>;
|
|
35
35
|
onDatums: (source: FastArchiveAppendable<T>, datums: T[], metadata: FileMetadata) => void;
|
|
36
36
|
// Called after onData
|
|
37
37
|
onStats?: (source: FastArchiveAppendable<T>, stats: DatumStats, metadata: FileMetadata) => void;
|
|
@@ -103,7 +103,6 @@ export class FastArchiveViewer<T> extends qreact.Component<{
|
|
|
103
103
|
let fastArchives = Querysub.fastRead(() => this.props.fastArchives);
|
|
104
104
|
let onFinish = Querysub.fastRead(() => this.props.onFinish);
|
|
105
105
|
let getWantData = Querysub.fastRead(() => this.props.getWantData);
|
|
106
|
-
let wantData = await getWantData?.();
|
|
107
106
|
// Increment sequence number for this new sync attempt
|
|
108
107
|
this.currentSequenceNumber++;
|
|
109
108
|
this.latestSequenceNumber = this.currentSequenceNumber;
|
|
@@ -296,35 +295,37 @@ export class FastArchiveViewer<T> extends qreact.Component<{
|
|
|
296
295
|
const result = await fastArchive.synchronizeData({
|
|
297
296
|
range: timeRange,
|
|
298
297
|
cacheBust: Querysub.fastRead(() => cacheBustParam.value),
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
298
|
+
getWantData: async (file) => {
|
|
299
|
+
let wantData = await getWantData?.(file);
|
|
300
|
+
return (posStart: number, posEnd: number, data: Buffer) => {
|
|
301
|
+
let isLimited = false;
|
|
302
|
+
for (let i = posStart; i < posEnd && !isLimited; i++) {
|
|
303
|
+
if (data[i] === limitedBuffer[0]) {
|
|
304
|
+
for (let j = 1; j < limitedBuffer.length; j++) {
|
|
305
|
+
if (data[i + j] !== limitedBuffer[j]) {
|
|
306
|
+
break;
|
|
307
|
+
}
|
|
308
|
+
if (j === limitedBuffer.length - 1) {
|
|
309
|
+
isLimited = true;
|
|
310
|
+
break;
|
|
311
|
+
}
|
|
311
312
|
}
|
|
312
313
|
}
|
|
313
314
|
}
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
}
|
|
315
|
+
if (isLimited) {
|
|
316
|
+
this.limitedScanCount++;
|
|
317
|
+
}
|
|
318
318
|
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
matched
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
319
|
+
// scanMatch is faster than wantData (generally), so run it first
|
|
320
|
+
let matched = scanMatch(posStart, posEnd, data);
|
|
321
|
+
if (matched && wantData) {
|
|
322
|
+
matched = wantData(posStart, posEnd, data);
|
|
323
|
+
}
|
|
324
|
+
if (isLimited && matched) {
|
|
325
|
+
this.limitedMatchCount++;
|
|
326
|
+
}
|
|
327
|
+
return matched;
|
|
328
|
+
};
|
|
328
329
|
},
|
|
329
330
|
onData: (datums, file) => {
|
|
330
331
|
if (!isLatestSync()) return;
|
|
@@ -570,9 +571,6 @@ export class FastArchiveViewer<T> extends qreact.Component<{
|
|
|
570
571
|
flavor="large"
|
|
571
572
|
fillWidth
|
|
572
573
|
onKeyUp={this.handleDownload}
|
|
573
|
-
ref2={() => {
|
|
574
|
-
void this.handleDownload();
|
|
575
|
-
}}
|
|
576
574
|
noEnterKeyBlur
|
|
577
575
|
placeholder="Filter terms, ex x | y & z"
|
|
578
576
|
/>
|
|
@@ -610,13 +608,23 @@ export class FastArchiveViewer<T> extends qreact.Component<{
|
|
|
610
608
|
</div>}
|
|
611
609
|
{!this.state.finished && <LoaderAurora />}
|
|
612
610
|
{this.limitedScanCount > 0 && (
|
|
613
|
-
<div className={infoDisplay(60).boldStyle}
|
|
614
|
-
{
|
|
611
|
+
<div className={infoDisplay(60).boldStyle.button}
|
|
612
|
+
onClick={() => {
|
|
613
|
+
filterParam.value = LOG_LIMIT_FLAG;
|
|
614
|
+
void this.handleDownload();
|
|
615
|
+
}}
|
|
616
|
+
>
|
|
617
|
+
Click here to see {formatNumber(this.limitedScanCount)} scanned logs were rate limited at a file level. We do not track how many were ignored, there might be infinite missing logs.
|
|
615
618
|
</div>
|
|
616
619
|
)}
|
|
617
|
-
{this.limitedMatchCount > 0 && (
|
|
618
|
-
<div className={infoDisplay(0).boldStyle}
|
|
619
|
-
{
|
|
620
|
+
{this.limitedMatchCount > 0 && filterParam.value && (
|
|
621
|
+
<div className={infoDisplay(0).boldStyle.button}
|
|
622
|
+
onClick={() => {
|
|
623
|
+
filterParam.value += " & " + LOG_LIMIT_FLAG;
|
|
624
|
+
void this.handleDownload();
|
|
625
|
+
}}
|
|
626
|
+
>
|
|
627
|
+
Click here to see {formatNumber(this.limitedMatchCount)} matched logs were rate limited at a file level. We do not track how many were ignored, there might be infinite missing logs.
|
|
620
628
|
</div>
|
|
621
629
|
)}
|
|
622
630
|
{this.state.pendingSyncInitializations > 0 && (
|
|
@@ -13,7 +13,7 @@ import { logErrors } from "../../errors";
|
|
|
13
13
|
import { batchFunction, runInSerial } from "socket-function/src/batching";
|
|
14
14
|
import { Querysub } from "../../4-querysub/QuerysubController";
|
|
15
15
|
import { sort, timeInDay, timeInHour } from "socket-function/src/misc";
|
|
16
|
-
import { FastArchiveViewer } from "./FastArchiveViewer";
|
|
16
|
+
import { FastArchiveViewer, filterParam } from "./FastArchiveViewer";
|
|
17
17
|
import { LogDatum, getLoggers, LOG_LIMIT_FLAG } from "./diskLogger";
|
|
18
18
|
import { ColumnType, Table, TableType } from "../../5-diagnostics/Table";
|
|
19
19
|
import { formatDateJSX } from "../../misc/formatJSX";
|
|
@@ -24,7 +24,7 @@ import { ObjectDisplay } from "./ObjectDisplay";
|
|
|
24
24
|
import { endTime } from "../misc-pages/archiveViewerShared";
|
|
25
25
|
import { ErrorSuppressionUI } from "./errorNotifications/ErrorSuppressionUI";
|
|
26
26
|
import { FileMetadata } from "./FastArchiveController";
|
|
27
|
-
import { SuppressionListController, getSuppressEntryChecker } from "./errorNotifications/ErrorNotificationController";
|
|
27
|
+
import { SuppressionListController, getSuppressEntryChecker, getSuppressionFull } from "./errorNotifications/ErrorNotificationController";
|
|
28
28
|
import { SocketFunction } from "socket-function/SocketFunction";
|
|
29
29
|
|
|
30
30
|
const RENDER_INTERVAL = 1000;
|
|
@@ -56,6 +56,7 @@ export class LogViewer2 extends qreact.Component {
|
|
|
56
56
|
});
|
|
57
57
|
|
|
58
58
|
private example: string | undefined = undefined;
|
|
59
|
+
private operationSequenceNum = 0;
|
|
59
60
|
private datumCount = 0;
|
|
60
61
|
private matchedSize = 0;
|
|
61
62
|
private notMatchedSize = 0;
|
|
@@ -100,6 +101,10 @@ export class LogViewer2 extends qreact.Component {
|
|
|
100
101
|
}
|
|
101
102
|
}
|
|
102
103
|
|
|
104
|
+
let suppressionController = SuppressionListController(SocketFunction.browserNodeId());
|
|
105
|
+
let suppressionList = suppressionController.getSuppressionList();
|
|
106
|
+
let hasErrorNotifyToggle = errorNotifyToggleURL.value;
|
|
107
|
+
|
|
103
108
|
const timeRange = getTimeRange();
|
|
104
109
|
return (
|
|
105
110
|
<div className={css.vbox(20).pad2(20).fillBoth}>
|
|
@@ -114,6 +119,7 @@ export class LogViewer2 extends qreact.Component {
|
|
|
114
119
|
let now = Date.now();
|
|
115
120
|
startTimeParam.value = now - timeInDay * 7;
|
|
116
121
|
endTimeParam.value = now + timeInHour * 2;
|
|
122
|
+
filterParam.value = "";
|
|
117
123
|
}
|
|
118
124
|
this.rerun();
|
|
119
125
|
}}
|
|
@@ -148,40 +154,34 @@ export class LogViewer2 extends qreact.Component {
|
|
|
148
154
|
this.datums = [];
|
|
149
155
|
this.suppressionCounts = new Map();
|
|
150
156
|
this.expiredSuppressionCounts = new Map();
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
let checkers = suppressionList.map(x => getSuppressEntryChecker(x));
|
|
159
|
-
this.suppressionCounts = new Map(suppressionList.map(x => [x.key, 0]));
|
|
160
|
-
this.expiredSuppressionCounts = new Map(suppressionList.map(x => [x.key, 0]));
|
|
161
|
-
let updateCounts = batchFunction({ delay: 1000 }, () => {
|
|
162
|
-
Querysub.commit(() => {
|
|
163
|
-
this.state.datumsSeqNum++;
|
|
164
|
-
});
|
|
165
|
-
});
|
|
166
|
-
return (posStart, posEnd, data, file) => {
|
|
167
|
-
for (let checker of checkers) {
|
|
168
|
-
if (checker.fnc(data, posStart, posEnd)) {
|
|
169
|
-
if (checker.entry.expiresAt < now) {
|
|
170
|
-
let count = this.expiredSuppressionCounts.get(checker.entry.key) || 0;
|
|
171
|
-
count++;
|
|
172
|
-
this.expiredSuppressionCounts.set(checker.entry.key, count);
|
|
173
|
-
void updateCounts(undefined);
|
|
174
|
-
} else {
|
|
175
|
-
let count = this.suppressionCounts.get(checker.entry.key) || 0;
|
|
176
|
-
count++;
|
|
177
|
-
this.suppressionCounts.set(checker.entry.key, count);
|
|
178
|
-
void updateCounts(undefined);
|
|
179
|
-
return false;
|
|
180
|
-
}
|
|
157
|
+
this.operationSequenceNum++;
|
|
158
|
+
void (async () => {
|
|
159
|
+
const currentSequenceNum = this.operationSequenceNum;
|
|
160
|
+
while (true) {
|
|
161
|
+
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
162
|
+
if (this.operationSequenceNum !== currentSequenceNum) {
|
|
163
|
+
break;
|
|
181
164
|
}
|
|
165
|
+
Querysub.commit(() => {
|
|
166
|
+
this.state.datumsSeqNum++;
|
|
167
|
+
});
|
|
182
168
|
}
|
|
183
|
-
|
|
184
|
-
|
|
169
|
+
})();
|
|
170
|
+
}}
|
|
171
|
+
getWantData={async (file) => {
|
|
172
|
+
if (!hasErrorNotifyToggle) return undefined;
|
|
173
|
+
// By defaulting to the synchronous one, the list will be updated if there's any changes. However, we will also just get it asynchronously if the list hasn't been updated by the time we call get1data. And because we assign it back to the variable, it'll be cached.
|
|
174
|
+
suppressionList = suppressionList || await suppressionController.getSuppressionList.promise();
|
|
175
|
+
let suppressionFull = getSuppressionFull({
|
|
176
|
+
entries: suppressionList,
|
|
177
|
+
blockTimeRange: {
|
|
178
|
+
startTime: file.startTime,
|
|
179
|
+
endTime: file.endTime,
|
|
180
|
+
},
|
|
181
|
+
suppressionCounts: this.suppressionCounts,
|
|
182
|
+
expiredSuppressionCounts: this.expiredSuppressionCounts,
|
|
183
|
+
});
|
|
184
|
+
return suppressionFull;
|
|
185
185
|
}}
|
|
186
186
|
onDatums={(source, datums, file) => {
|
|
187
187
|
this.datumCount += datums.length;
|
|
@@ -212,6 +212,7 @@ export class LogViewer2 extends qreact.Component {
|
|
|
212
212
|
}
|
|
213
213
|
}}
|
|
214
214
|
onFinish={() => {
|
|
215
|
+
this.operationSequenceNum++;
|
|
215
216
|
Querysub.commit(() => {
|
|
216
217
|
sort(this.datums, x => -(x.time || 0));
|
|
217
218
|
this.state.datumsSeqNum++;
|
|
@@ -82,10 +82,26 @@ export class TimeRangeSelector extends qreact.Component {
|
|
|
82
82
|
>
|
|
83
83
|
Set to future data
|
|
84
84
|
</Button>
|
|
85
|
-
|
|
85
|
+
<Button
|
|
86
|
+
hue={110} onClick={() => {
|
|
87
|
+
startTimeParam.value = now - timeInDay;
|
|
88
|
+
endTimeParam.value = now + timeInHour * 2;
|
|
89
|
+
}}
|
|
90
|
+
>
|
|
91
|
+
Set to last day
|
|
92
|
+
</Button>
|
|
93
|
+
<Button
|
|
94
|
+
hue={110} onClick={() => {
|
|
95
|
+
startTimeParam.value = now - timeInDay * 7;
|
|
96
|
+
endTimeParam.value = now + timeInHour * 2;
|
|
97
|
+
}}
|
|
98
|
+
>
|
|
99
|
+
Set to last 7 days
|
|
100
|
+
</Button>
|
|
101
|
+
{!!(endTimeParam.value || startTimeParam.value) && <Button
|
|
86
102
|
hue={110} onClick={resetToLastDay}
|
|
87
103
|
>
|
|
88
|
-
Reset
|
|
104
|
+
Reset
|
|
89
105
|
</Button>}
|
|
90
106
|
{(!startTimeParam.value || !endTimeParam.value) && <Button
|
|
91
107
|
hue={110}
|