querysub 0.311.0 → 0.313.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.cursorrules +1 -1
- package/costsBenefits.txt +4 -1
- package/package.json +3 -2
- package/spec.txt +23 -18
- package/src/-0-hooks/hooks.ts +1 -1
- package/src/-a-archives/archives.ts +16 -3
- package/src/-a-archives/archivesBackBlaze.ts +51 -3
- package/src/-a-archives/archivesLimitedCache.ts +175 -0
- package/src/-a-archives/archivesPrivateFileSystem.ts +299 -0
- package/src/-a-auth/certs.ts +58 -31
- package/src/-b-authorities/cdnAuthority.ts +2 -2
- package/src/-b-authorities/dnsAuthority.ts +3 -2
- package/src/-c-identity/IdentityController.ts +9 -2
- package/src/-d-trust/NetworkTrust2.ts +38 -31
- package/src/-e-certs/EdgeCertController.ts +3 -4
- package/src/-e-certs/certAuthority.ts +1 -2
- package/src/-f-node-discovery/NodeDiscovery.ts +20 -13
- package/src/-g-core-values/NodeCapabilities.ts +6 -1
- package/src/0-path-value-core/NodePathAuthorities.ts +1 -1
- package/src/0-path-value-core/PathValueCommitter.ts +3 -3
- package/src/0-path-value-core/PathValueController.ts +3 -3
- package/src/0-path-value-core/archiveLocks/ArchiveLocks2.ts +15 -37
- package/src/0-path-value-core/pathValueCore.ts +4 -3
- package/src/3-path-functions/PathFunctionRunner.ts +2 -2
- package/src/4-dom/qreact.tsx +4 -3
- package/src/4-querysub/Querysub.ts +2 -2
- package/src/4-querysub/QuerysubController.ts +2 -2
- package/src/5-diagnostics/GenericFormat.tsx +1 -0
- package/src/5-diagnostics/Table.tsx +3 -0
- package/src/5-diagnostics/diskValueAudit.ts +2 -1
- package/src/5-diagnostics/nodeMetadata.ts +0 -1
- package/src/deployManager/components/MachineDetailPage.tsx +9 -1
- package/src/deployManager/components/ServiceDetailPage.tsx +10 -1
- package/src/diagnostics/NodeViewer.tsx +3 -4
- package/src/diagnostics/logs/FastArchiveAppendable.ts +748 -0
- package/src/diagnostics/logs/FastArchiveController.ts +524 -0
- package/src/diagnostics/logs/FastArchiveViewer.tsx +863 -0
- package/src/diagnostics/logs/LogViewer2.tsx +349 -0
- package/src/diagnostics/logs/TimeRangeSelector.tsx +94 -0
- package/src/diagnostics/logs/diskLogger.ts +135 -305
- package/src/diagnostics/logs/diskShimConsoleLogs.ts +6 -29
- package/src/diagnostics/logs/errorNotifications/ErrorNotificationController.ts +577 -0
- package/src/diagnostics/logs/errorNotifications/ErrorSuppressionUI.tsx +225 -0
- package/src/diagnostics/logs/errorNotifications/ErrorWarning.tsx +207 -0
- package/src/diagnostics/logs/importLogsEntry.ts +38 -0
- package/src/diagnostics/logs/injectFileLocationToConsole.ts +7 -17
- package/src/diagnostics/logs/lifeCycleAnalysis/lifeCycles.tsx +0 -0
- package/src/diagnostics/logs/lifeCycleAnalysis/spec.md +151 -0
- package/src/diagnostics/managementPages.tsx +7 -16
- package/src/diagnostics/misc-pages/ComponentSyncStats.tsx +0 -1
- package/src/diagnostics/periodic.ts +5 -0
- package/src/diagnostics/watchdog.ts +2 -2
- package/src/functional/SocketChannel.ts +67 -0
- package/src/library-components/Input.tsx +1 -1
- package/src/library-components/InputLabel.tsx +5 -2
- package/src/misc.ts +111 -0
- package/src/src.d.ts +34 -1
- package/src/user-implementation/userData.ts +4 -3
- package/test.ts +13 -0
- package/testEntry2.ts +29 -0
- package/src/diagnostics/errorLogs/ErrorLogController.ts +0 -535
- package/src/diagnostics/errorLogs/ErrorLogCore.ts +0 -274
- package/src/diagnostics/errorLogs/LogClassifiers.tsx +0 -308
- package/src/diagnostics/errorLogs/LogFilterUI.tsx +0 -84
- package/src/diagnostics/errorLogs/LogNotify.tsx +0 -101
- package/src/diagnostics/errorLogs/LogTimeSelector.tsx +0 -723
- package/src/diagnostics/errorLogs/LogViewer.tsx +0 -757
- package/src/diagnostics/errorLogs/logFiltering.tsx +0 -149
- package/src/diagnostics/logs/DiskLoggerPage.tsx +0 -613
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { measureWrap } from "socket-function/src/profiling/measure";
|
|
2
|
-
import {
|
|
2
|
+
import { getIdentityCA, getMachineId, getOwnMachineId } from "../-a-auth/certs";
|
|
3
3
|
import { getArchives } from "../-a-archives/archives";
|
|
4
4
|
import { isNode, throttleFunction, timeInSecond } from "socket-function/src/misc";
|
|
5
5
|
import { SocketFunctionHook } from "socket-function/SocketFunctionTypes";
|
|
@@ -14,12 +14,13 @@ import { devDebugbreak, getDomain, isDevDebugbreak, isPublic, isRecovery } from
|
|
|
14
14
|
import { formatTime } from "socket-function/src/formatting/format";
|
|
15
15
|
import { runInSerial } from "socket-function/src/batching";
|
|
16
16
|
import { Querysub } from "../4-querysub/QuerysubController";
|
|
17
|
+
import { magenta } from "socket-function/src/formatting/logColors";
|
|
17
18
|
|
|
18
19
|
// Cache the untrust list, to prevent bugs from causing too many backend reads (while also allowing
|
|
19
20
|
// bad servers which make request before their trust is verified from staying broken).
|
|
20
21
|
const UNTRUST_CACHE_TIME = 30 * timeInSecond;
|
|
21
22
|
|
|
22
|
-
const archives = lazy(() => getArchives("
|
|
23
|
+
const archives = lazy(() => getArchives("trust2/"));
|
|
23
24
|
|
|
24
25
|
export const requiresNetworkTrustHook: SocketFunctionHook = async config => {
|
|
25
26
|
// HACK: On the clientside we strip the domain process and machine id, so we can no longer determine
|
|
@@ -39,10 +40,11 @@ export const requiresNetworkTrustHook: SocketFunctionHook = async config => {
|
|
|
39
40
|
if (getNodeIdIP(caller.nodeId) === "127.0.0.1" && isRecovery()) {
|
|
40
41
|
return;
|
|
41
42
|
}
|
|
42
|
-
let machineId = IdentityController_getMachineId(
|
|
43
|
+
let machineId = IdentityController_getMachineId(caller);
|
|
43
44
|
let trusted = await isTrusted(machineId);
|
|
44
45
|
if (!trusted) {
|
|
45
46
|
devDebugbreak();
|
|
47
|
+
let machineId = IdentityController_getMachineId(caller);
|
|
46
48
|
throw new Error(`Calling machine is not trusted. Caller ${machineId} is not trusted by ${SocketFunction.mountedNodeId} to make call ${config.call.classGuid}.${config.call.functionName}. To gain trust add backblaze permissions (see hasBackblazePermissions) or set --nonetwork.`);
|
|
47
49
|
}
|
|
48
50
|
};
|
|
@@ -51,7 +53,7 @@ export const assertIsNetworkTrusted = requiresNetworkTrustHook;
|
|
|
51
53
|
let lastArchivesTrusted: string[] | undefined;
|
|
52
54
|
let trustedCache = new Set<string>();
|
|
53
55
|
let untrustedCache = new Map<string, number>();
|
|
54
|
-
export const isTrusted =
|
|
56
|
+
export const isTrusted = measureWrap(async function isTrusted(machineId: string) {
|
|
55
57
|
// See the comment in requiresNetworkTrustHook for why clients have to trust all callers.
|
|
56
58
|
if (isClient()) return true;
|
|
57
59
|
|
|
@@ -68,25 +70,31 @@ export const isTrusted = runInSerial(async function isTrusted(machineId: string)
|
|
|
68
70
|
return false;
|
|
69
71
|
}
|
|
70
72
|
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
73
|
+
return await isTrustedBase(machineId);
|
|
74
|
+
});
|
|
75
|
+
let trustedCachePopulated = false;
|
|
76
|
+
const isTrustedBase = runInSerial(measureWrap(async function isTrustedBase(machineId: string) {
|
|
77
|
+
if (!trustedCachePopulated) {
|
|
78
|
+
trustedCachePopulated = true;
|
|
79
|
+
let trustedMachineIds = await archives().find("");
|
|
80
|
+
lastArchivesTrusted = trustedMachineIds.slice();
|
|
81
|
+
for (let trustedMachineId of trustedMachineIds) {
|
|
82
|
+
trustedCache.add(trustedMachineId);
|
|
83
|
+
// NOTE: We don't load trust certs here, as we need to load them on demand in case the trust changes after our initial startup.
|
|
84
|
+
}
|
|
85
|
+
} else {
|
|
86
|
+
// Checking a single entry is a lot faster (as find is slow)
|
|
87
|
+
let trusted = await archives().get(machineId);
|
|
88
|
+
if (trusted) {
|
|
89
|
+
trustedCache.add(machineId);
|
|
90
|
+
}
|
|
84
91
|
}
|
|
92
|
+
// Always trust ourself
|
|
93
|
+
trustedCache.add(getOwnMachineId());
|
|
85
94
|
|
|
86
|
-
// NOTE:
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
trustedCache.add(trustedMachineId);
|
|
95
|
+
// NOTE: The only happens in the case WE connected to it (ex, "127-0-0-1.querysubtest.com:15358"). It can't look like this if it connected to us, in which case the nodeId will be "client:...", being mostly random, and created by us (UNTIL they prove they have another id). So... I'm not even sure the isPublic check is required? We only connect to nodes we discover through node discovery, which requires backblaze write permissions. But I guess it's fine to be extra careful about it...
|
|
96
|
+
if (!isPublic()) {
|
|
97
|
+
trustedCache.add("127-0-0-1");
|
|
90
98
|
}
|
|
91
99
|
|
|
92
100
|
if (!trustedCache.has(machineId)) {
|
|
@@ -95,7 +103,7 @@ export const isTrusted = runInSerial(async function isTrusted(machineId: string)
|
|
|
95
103
|
} else {
|
|
96
104
|
return true;
|
|
97
105
|
}
|
|
98
|
-
});
|
|
106
|
+
}));
|
|
99
107
|
|
|
100
108
|
export async function isNodeTrusted(nodeId: string) {
|
|
101
109
|
let domainName = getNodeIdDomainMaybeUndefined(nodeId);
|
|
@@ -112,15 +120,14 @@ const loadServerCert = cache(async (machineId: string) => {
|
|
|
112
120
|
console.warn(`Could not find certificate in archives for ${machineId}`);
|
|
113
121
|
return;
|
|
114
122
|
}
|
|
123
|
+
console.log(magenta(`Loading certificate for ${machineId}`));
|
|
115
124
|
trustCertificate(certFile);
|
|
116
125
|
});
|
|
117
126
|
|
|
118
|
-
const ensureWeAreTrusted = lazy(measureWrap(async () => {
|
|
127
|
+
export const ensureWeAreTrusted = lazy(measureWrap(async () => {
|
|
119
128
|
let machineKeyCert = getIdentityCA();
|
|
120
|
-
let machineId =
|
|
121
|
-
|
|
122
|
-
await isTrusted(machineId);
|
|
123
|
-
if (!lastArchivesTrusted?.includes(machineId)) {
|
|
129
|
+
let machineId = getOwnMachineId();
|
|
130
|
+
if (!await archives().get(machineId)) {
|
|
124
131
|
await archives().set(machineId, machineKeyCert.cert);
|
|
125
132
|
}
|
|
126
133
|
}));
|
|
@@ -159,6 +166,7 @@ const TrustedController = SocketFunction.register(
|
|
|
159
166
|
if (isNode()) {
|
|
160
167
|
// We have to be trusted if we make calls to a trusted endpoint, OR our mounting
|
|
161
168
|
// (really only if we are mounting a trusted endpoint, but we don't actually know that)
|
|
169
|
+
// ONLY done on received calls, not on calls we made. If we make a call we assume that the server we called is known to us through a trusted route, and therefore, trusted.
|
|
162
170
|
requiresNetworkTrustHook.clientHook = async config => {
|
|
163
171
|
await ensureWeAreTrusted();
|
|
164
172
|
};
|
|
@@ -166,10 +174,9 @@ if (isNode()) {
|
|
|
166
174
|
// Load the remote certificate, in the almost certain case it isn't a real certificate, and is just internal
|
|
167
175
|
SocketFunction.addGlobalClientHook(async config => {
|
|
168
176
|
await measureWrap(async function checkTrust() {
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
]);
|
|
177
|
+
// IMPORTANT! We SHOULDN'T need to add our machineId before we connect, as we only check machineId on received calls, so we only need to set it before we make a call (so by the time anyone receives a call, they trust us!)
|
|
178
|
+
await ensureWeAreTrusted();
|
|
179
|
+
await loadTrustCerts(config.call.nodeId);
|
|
173
180
|
})();
|
|
174
181
|
});
|
|
175
182
|
}
|
|
@@ -37,7 +37,7 @@ export const getHostedIP = lazy(async () => {
|
|
|
37
37
|
}
|
|
38
38
|
return await getExternalIP();
|
|
39
39
|
});
|
|
40
|
-
const getIPDomain = lazy(async () => {
|
|
40
|
+
export const getIPDomain = lazy(async () => {
|
|
41
41
|
let ip = await getHostedIP();
|
|
42
42
|
return ip.replaceAll(".", "-") + "." + getDomain();
|
|
43
43
|
});
|
|
@@ -56,7 +56,7 @@ export async function getSNICerts(config: {
|
|
|
56
56
|
[getDomain()]: async (callback) => {
|
|
57
57
|
await EdgeCertController_watchHTTPSKeyCert(callback);
|
|
58
58
|
},
|
|
59
|
-
[getOwnMachineId()]: async (callback) => {
|
|
59
|
+
[getOwnMachineId() + "." + getDomain()]: async (callback) => {
|
|
60
60
|
let threadCert = await getThreadKeyCert();
|
|
61
61
|
callback({
|
|
62
62
|
key: threadCert.key,
|
|
@@ -273,7 +273,7 @@ async function getHTTPSKeyCertInner(callerIP: string) {
|
|
|
273
273
|
console.warn(`Tried to serve an edge node on the local domain, but SocketFunction.mount did NOT specify a public ip (ex, { ip: "0.0.0.0" }) AND there are already existing public servers. You can't load balance between real ips and 127.0.0.1! ${existingIPs.join(", ")}. You will need to use 127-0-0-1.${edgeDomain} to access the local server (instead of just ${edgeDomain}).`);
|
|
274
274
|
}
|
|
275
275
|
*/
|
|
276
|
-
console.log(yellow(`Current process is not marked as public, but machine previous had public services. NOT setting ${edgeDomain} to 127.0.0.1, as this would make the public services inaccessible. Assuming the current process is for development, I recommend using
|
|
276
|
+
console.log(yellow(`Current process is not marked as public, but machine previous had public services. NOT setting ${edgeDomain} to 127.0.0.1, as this would make the public services inaccessible. Assuming the current process is for development, I recommend using 127-0-0-1.${edgeDomain} or 127-0-0-1.${edgeDomain} to access the server.`));
|
|
277
277
|
}
|
|
278
278
|
} else {
|
|
279
279
|
if (existingIPs.includes("127.0.0.1")) {
|
|
@@ -289,7 +289,6 @@ async function getHTTPSKeyCertInner(callerIP: string) {
|
|
|
289
289
|
}
|
|
290
290
|
});
|
|
291
291
|
}
|
|
292
|
-
promises.push(addRecord("A", "noproxy." + edgeDomain, "127.0.0.1"));
|
|
293
292
|
promises.push(addRecord("A", "127-0-0-1." + edgeDomain, "127.0.0.1"));
|
|
294
293
|
// Add records in parallel, so we can wait for DNS propagation in parallel
|
|
295
294
|
await Promise.all(promises);
|
|
@@ -10,7 +10,7 @@ import { formatDateTime, formatTime } from "socket-function/src/formatting/forma
|
|
|
10
10
|
import { delay } from "socket-function/src/batching";
|
|
11
11
|
import { timeInMinute } from "socket-function/src/misc";
|
|
12
12
|
|
|
13
|
-
const archives = lazy(() => getArchives(`
|
|
13
|
+
const archives = lazy(() => getArchives(`https_certs_3/`));
|
|
14
14
|
// Expire EXPIRATION_THRESHOLD% of the way through the certificate's lifetime
|
|
15
15
|
const EXPIRATION_THRESHOLD = 0.4;
|
|
16
16
|
|
|
@@ -50,7 +50,6 @@ export const getHTTPSKeyCert = cache(async (domain: string): Promise<{ key: stri
|
|
|
50
50
|
const accountKey = getAccountKey();
|
|
51
51
|
let altDomains: string[] = [];
|
|
52
52
|
|
|
53
|
-
// altDomains.push("noproxy." + domain);
|
|
54
53
|
// // NOTE: Allowing local access is just an optimization, not to avoid having to forward ports
|
|
55
54
|
// // (unless you type 127-0-0-1.domain into the browser... then I guess you don't have to forward ports?)
|
|
56
55
|
// altDomains.push("127-0-0-1." + domain);
|
|
@@ -4,7 +4,7 @@ import { getDomain, isDevDebugbreak, isNoNetwork, isPublic } from "../config";
|
|
|
4
4
|
import { measureBlock } from "socket-function/src/profiling/measure";
|
|
5
5
|
import { isNode, sha256Hash, throttleFunction, timeInMinute, timeInSecond } from "socket-function/src/misc";
|
|
6
6
|
import { errorToUndefinedSilent, ignoreErrors, logErrors, timeoutToUndefinedSilent } from "../errors";
|
|
7
|
-
import { requiresNetworkTrustHook } from "../-d-trust/NetworkTrust2";
|
|
7
|
+
import { ensureWeAreTrusted, requiresNetworkTrustHook } from "../-d-trust/NetworkTrust2";
|
|
8
8
|
import { delay, runInfinitePoll, runInfinitePollCallAtStart } from "socket-function/src/batching";
|
|
9
9
|
import { getNodeId, getNodeIdFromLocation } from "socket-function/src/nodeCache";
|
|
10
10
|
import { lazy } from "socket-function/src/caching";
|
|
@@ -17,9 +17,10 @@ import { waitForFirstTimeSync } from "socket-function/time/trueTimeShim";
|
|
|
17
17
|
import { decodeNodeId, decodeNodeIdAssert } from "../-a-auth/certs";
|
|
18
18
|
|
|
19
19
|
import { isDefined } from "../misc";
|
|
20
|
-
import { diskLog } from "../diagnostics/logs/diskLogger";
|
|
21
20
|
import { getBootedEdgeNode } from "../-0-hooks/hooks";
|
|
22
21
|
import { EdgeNodeConfig } from "../4-deploy/edgeNodes";
|
|
22
|
+
import * as certs from "../-a-auth/certs";
|
|
23
|
+
import { logDisk } from "../diagnostics/logs/diskLogger";
|
|
23
24
|
|
|
24
25
|
let HEARTBEAT_INTERVAL = timeInMinute * 15;
|
|
25
26
|
// Interval which we check other heartbeats
|
|
@@ -44,6 +45,8 @@ let DISK_AUDIT_RATE = timeInMinute * 15;
|
|
|
44
45
|
// probably is less than that). Which is around 2.5 cents on digital ocean IF we go over
|
|
45
46
|
// our 1TB/month allowance.
|
|
46
47
|
let API_AUDIT_RATE = timeInSecond * 30;
|
|
48
|
+
// BUT, for now, poll less often... because I think it is lagging our 2 core potato digital ocean server.
|
|
49
|
+
API_AUDIT_RATE = timeInMinute * 5;
|
|
47
50
|
let API_AUDIT_COUNT = 12;
|
|
48
51
|
|
|
49
52
|
|
|
@@ -84,10 +87,10 @@ export function getOwnNodeIdAssert(): string {
|
|
|
84
87
|
}
|
|
85
88
|
|
|
86
89
|
export const getOwnThreadId = lazy(() => {
|
|
87
|
-
return
|
|
90
|
+
return certs.getOwnThreadId();
|
|
88
91
|
});
|
|
89
92
|
export const getOwnMachineId = lazy(() => {
|
|
90
|
-
return
|
|
93
|
+
return certs.getOwnMachineId();
|
|
91
94
|
});
|
|
92
95
|
|
|
93
96
|
export function isOwnNodeId(nodeId: string): boolean {
|
|
@@ -178,7 +181,7 @@ function addNodeIdBase(nodeId: string) {
|
|
|
178
181
|
function setNodeIds(nodeIds: string[]) {
|
|
179
182
|
nodeIds = nodeIds.filter(x => x !== SPECIAL_NODE_ID_FOR_UNMOUNTED_NODE);
|
|
180
183
|
|
|
181
|
-
|
|
184
|
+
logDisk("log", "setNodeIds", { nodeIds });
|
|
182
185
|
// Also try all localhost ports, if we are developing and not in public mode
|
|
183
186
|
if (isNode() && !isPublic() && isDevDebugbreak()) {
|
|
184
187
|
let ports = new Set(nodeIds.map(nodeId => decodeNodeId(nodeId)?.port).filter(isDefined));
|
|
@@ -343,7 +346,7 @@ async function runHeartbeatAuditLoop() {
|
|
|
343
346
|
}
|
|
344
347
|
} else {
|
|
345
348
|
deadCount.delete(nodeId);
|
|
346
|
-
|
|
349
|
+
logDisk("log", "Read node heartbeat", { nodeId, lastTime });
|
|
347
350
|
}
|
|
348
351
|
}
|
|
349
352
|
if (pendingDeadCount) {
|
|
@@ -353,7 +356,7 @@ async function runHeartbeatAuditLoop() {
|
|
|
353
356
|
if (removedNodeIds.length > 0) {
|
|
354
357
|
console.log(blue(`Removed ${removedNodeIds.length}/${nodeIds.length} nodes from node list`), { removedNodeIds });
|
|
355
358
|
await syncArchives();
|
|
356
|
-
await tellEveryoneNodesChanges();
|
|
359
|
+
await tellEveryoneNodesChanges(`removedNodeIds ${removedNodeIds.join("|")}`);
|
|
357
360
|
}
|
|
358
361
|
});
|
|
359
362
|
}
|
|
@@ -471,7 +474,8 @@ export async function onNodeDiscoveryReady() {
|
|
|
471
474
|
}
|
|
472
475
|
|
|
473
476
|
if (isServer()) {
|
|
474
|
-
setImmediate(() => {
|
|
477
|
+
setImmediate(async () => {
|
|
478
|
+
|
|
475
479
|
logErrors(runHeartbeatAuditLoop());
|
|
476
480
|
logErrors(runMemoryAuditLoop());
|
|
477
481
|
// NOTE: We used to wait until we mounted, but... we should be able to find nodes
|
|
@@ -517,7 +521,7 @@ if (isServer()) {
|
|
|
517
521
|
|
|
518
522
|
export async function forceRemoveNode(nodeId: string) {
|
|
519
523
|
await archives().del(nodeId);
|
|
520
|
-
void tellEveryoneNodesChanges();
|
|
524
|
+
void tellEveryoneNodesChanges(`forceRemoveNode ${nodeId}`);
|
|
521
525
|
}
|
|
522
526
|
|
|
523
527
|
|
|
@@ -528,13 +532,14 @@ export async function nodeDiscoveryShutdown() {
|
|
|
528
532
|
if (isServer()) {
|
|
529
533
|
await archives().del(getOwnNodeId());
|
|
530
534
|
}
|
|
531
|
-
void tellEveryoneNodesChanges();
|
|
535
|
+
void tellEveryoneNodesChanges("nodeDiscoveryShutdown");
|
|
532
536
|
}
|
|
533
|
-
const tellEveryoneNodesChanges = throttleFunction(1000, function tellEveryoneNodesChanges() {
|
|
537
|
+
const tellEveryoneNodesChanges = throttleFunction(1000, function tellEveryoneNodesChanges(reason: string) {
|
|
534
538
|
if (isClient()) return;
|
|
539
|
+
console.log(red(`Telling everyone nodes changed`));
|
|
535
540
|
for (let nodeId of allNodeIds2) {
|
|
536
541
|
if (isOwnNodeId(nodeId)) continue;
|
|
537
|
-
ignoreErrors(NodeDiscoveryController.nodes[nodeId].resyncNodes());
|
|
542
|
+
ignoreErrors(NodeDiscoveryController.nodes[nodeId].resyncNodes(reason));
|
|
538
543
|
}
|
|
539
544
|
});
|
|
540
545
|
|
|
@@ -544,7 +549,9 @@ class NodeDiscoveryControllerBase {
|
|
|
544
549
|
console.log(magenta(`Received addNode`), { nodeId });
|
|
545
550
|
addNodeId(nodeId);
|
|
546
551
|
}
|
|
547
|
-
public async resyncNodes() {
|
|
552
|
+
public async resyncNodes(reason: string) {
|
|
553
|
+
let caller = SocketFunction.getCaller();
|
|
554
|
+
console.log(magenta(`Received resyncNodes from ${caller.nodeId}, reason = ${reason}`));
|
|
548
555
|
await syncArchives();
|
|
549
556
|
}
|
|
550
557
|
public async getAllNodesHash(): Promise<string> {
|
|
@@ -78,7 +78,12 @@ export async function getControllerNodeIdList(
|
|
|
78
78
|
passedNodeIds.set(nodeId, entryPoint);
|
|
79
79
|
}
|
|
80
80
|
}));
|
|
81
|
-
|
|
81
|
+
|
|
82
|
+
let results = Array.from(passedNodeIds.entries()).map(([nodeId, entryPoint]) => ({ nodeId, entryPoint }));
|
|
83
|
+
// Ignore the special local IDs, otherwise we'll be returning duplicates. And our caller probably doesn't want the fastest path, they probably just want every path.
|
|
84
|
+
// TODO: We should detect if the local ID and the remote ID is the same and then always pick the local ID instead. However, I have no idea how to do this as it doesn't include the machine ID, just the port. So how do we know if they're the same? I think we actually have to talk to them and identify their machine IDs, which we need to cache, which then takes longer, et cetera, et cetera.
|
|
85
|
+
results = results.filter(x => !x.nodeId.startsWith("127-0-0-1."));
|
|
86
|
+
return results;
|
|
82
87
|
}
|
|
83
88
|
|
|
84
89
|
|
|
@@ -21,7 +21,6 @@ import { cache, cacheLimited } from "socket-function/src/caching";
|
|
|
21
21
|
import { IdentityController_getCurrentReconnectNodeIdAssert, IdentityController_getReconnectNodeIdAssert } from "../-c-identity/IdentityController";
|
|
22
22
|
import { getBufferFraction, getBufferInt, getShortNumber } from "../bits";
|
|
23
23
|
import { devDebugbreak, getDomain, isDevDebugbreak } from "../config";
|
|
24
|
-
import { diskLog } from "../diagnostics/logs/diskLogger";
|
|
25
24
|
import { waitForFirstTimeSync } from "socket-function/time/trueTimeShim";
|
|
26
25
|
|
|
27
26
|
export const LOCAL_DOMAIN = "LOCAL";
|
|
@@ -262,6 +261,7 @@ class NodePathAuthorities {
|
|
|
262
261
|
this.previouslyNotAvailableNodes.add(nodeId);
|
|
263
262
|
return;
|
|
264
263
|
}
|
|
264
|
+
|
|
265
265
|
this.previouslyNotAvailableNodes.delete(nodeId);
|
|
266
266
|
|
|
267
267
|
time = Date.now() - time;
|
|
@@ -21,7 +21,7 @@ import { formatNumber, formatTime } from "socket-function/src/formatting/format"
|
|
|
21
21
|
import { isClient } from "../config2";
|
|
22
22
|
import { remoteWatcher } from "../1-path-client/RemoteWatcher";
|
|
23
23
|
import { auditLog, isDebugLogEnabled } from "./auditLogs";
|
|
24
|
-
import {
|
|
24
|
+
import { logDisk } from "../diagnostics/logs/diskLogger";
|
|
25
25
|
|
|
26
26
|
/*
|
|
27
27
|
- commitWrites <= creating writes
|
|
@@ -177,7 +177,7 @@ class PathValueCommitter {
|
|
|
177
177
|
markArrayAsSplitable(values);
|
|
178
178
|
const { Querysub } = await import("../4-querysub/Querysub");
|
|
179
179
|
let serializedValues = await pathValueSerializer.serialize(values, { compress: Querysub.COMPRESS_NETWORK });
|
|
180
|
-
|
|
180
|
+
logDisk("log", "Send PathValues to server", { valueCount: values.length, targetId: otherAuthority, });
|
|
181
181
|
let forwardPromise = PathValueController.nodes[otherAuthority].forwardWrites(
|
|
182
182
|
serializedValues,
|
|
183
183
|
undefined,
|
|
@@ -662,4 +662,4 @@ function trackLag(now: number, lag: number) {
|
|
|
662
662
|
}
|
|
663
663
|
|
|
664
664
|
|
|
665
|
-
export const pathValueCommitter = new PathValueCommitter();
|
|
665
|
+
export const pathValueCommitter = new PathValueCommitter();
|
|
@@ -16,7 +16,7 @@ import debugbreak from "debugbreak";
|
|
|
16
16
|
import { ClientWatcher } from "../1-path-client/pathValueClientWatcher";
|
|
17
17
|
import { auditLog, isDebugLogEnabled } from "./auditLogs";
|
|
18
18
|
import { debugNodeId } from "../-c-identity/IdentityController";
|
|
19
|
-
import {
|
|
19
|
+
import { logDisk } from "../diagnostics/logs/diskLogger";
|
|
20
20
|
export { pathValueCommitter };
|
|
21
21
|
|
|
22
22
|
class PathValueControllerBase {
|
|
@@ -86,9 +86,9 @@ class PathValueControllerBase {
|
|
|
86
86
|
auditLog("RECEIVE VALUE", { path: value.path, time: value.time.time, sourceNodeId });
|
|
87
87
|
}
|
|
88
88
|
}
|
|
89
|
-
|
|
89
|
+
logDisk("log", "Received PathValues via forwardWrites", { valueCount: values.length, callerId, });
|
|
90
90
|
for (let value of values) {
|
|
91
|
-
|
|
91
|
+
logDisk("log", "Received PathValue for path", { path: value.path, time: value.time.time, callerId });
|
|
92
92
|
}
|
|
93
93
|
|
|
94
94
|
if (isCoreQuiet) {
|
|
@@ -12,8 +12,9 @@ import { devDebugbreak } from "../../config";
|
|
|
12
12
|
import { logErrors } from "../../errors";
|
|
13
13
|
import { saveSnapshot } from "./archiveSnapshots";
|
|
14
14
|
import { getNodeId } from "socket-function/src/nodeCache";
|
|
15
|
-
import { diskLog } from "../../diagnostics/logs/diskLogger";
|
|
16
15
|
import { logNodeStateStats, logNodeStats } from "../../-0-hooks/hooks";
|
|
16
|
+
import { parsePath, toFileNameKVP, parseFileNameKVP } from "../../misc";
|
|
17
|
+
import { logDisk } from "../../diagnostics/logs/diskLogger";
|
|
17
18
|
|
|
18
19
|
/** Clean up old files after a while */
|
|
19
20
|
const DEAD_CREATE_THRESHOLD = timeInHour * 12;
|
|
@@ -301,14 +302,14 @@ class TransactionLocker {
|
|
|
301
302
|
private isTransactionValidOverride?: (transaction: Transaction, dataFiles: FileInfo[], rawDataFiles: FileInfo[]) => boolean
|
|
302
303
|
) { }
|
|
303
304
|
|
|
304
|
-
// #region Base File
|
|
305
|
+
// #region Base File Ops
|
|
305
306
|
public getConfirmKey(key: string): string {
|
|
306
307
|
let { dir, name } = parsePath(key);
|
|
307
308
|
return `${dir}confirm_${name}.confirm`;
|
|
308
309
|
}
|
|
309
310
|
public async createConfirm(key: string) {
|
|
310
311
|
let path = this.getConfirmKey(key);
|
|
311
|
-
|
|
312
|
+
logDisk("log", "Creating confirmation for ${key}");
|
|
312
313
|
await this.storage.setValue(path, Buffer.from(""));
|
|
313
314
|
return path;
|
|
314
315
|
}
|
|
@@ -361,7 +362,7 @@ class TransactionLocker {
|
|
|
361
362
|
delete: ellipsize(deletes.map(a => debugFileInfo(a.key)).join(","), 50),
|
|
362
363
|
});
|
|
363
364
|
|
|
364
|
-
|
|
365
|
+
logDisk("log", "Writing transaction", {
|
|
365
366
|
name,
|
|
366
367
|
ops: transaction.ops.length,
|
|
367
368
|
});
|
|
@@ -483,7 +484,7 @@ class TransactionLocker {
|
|
|
483
484
|
}
|
|
484
485
|
}
|
|
485
486
|
|
|
486
|
-
|
|
487
|
+
logDisk("log", "Read archive state", {
|
|
487
488
|
rawFilesCount: files.length,
|
|
488
489
|
confirmedCount: currentDataFiles.size,
|
|
489
490
|
rawFiles: files.map(a => a.file),
|
|
@@ -502,7 +503,7 @@ class TransactionLocker {
|
|
|
502
503
|
let result = await tryToRead();
|
|
503
504
|
if (result) {
|
|
504
505
|
let timeToRead = Date.now() - startTime;
|
|
505
|
-
|
|
506
|
+
logDisk("log", `Read data state in ${formatTime(timeToRead)}`);
|
|
506
507
|
return result;
|
|
507
508
|
}
|
|
508
509
|
}
|
|
@@ -541,7 +542,7 @@ class TransactionLocker {
|
|
|
541
542
|
let rawLookup = new Set(Array.from(rawDataFiles).map(a => a.file));
|
|
542
543
|
// If any creates are not confirmed, it must not have been applied
|
|
543
544
|
if (transaction.ops.some(a => a.type === "create" && rawLookup.has(a.key) && !confirmedKeys.has(a.key))) {
|
|
544
|
-
|
|
545
|
+
logDisk("log", `Transaction not applied (has pending confirmations of creates)`, {
|
|
545
546
|
keys: transaction.ops
|
|
546
547
|
.filter(a => a.type === "create" && rawLookup.has(a.key) && !confirmedKeys.has(a.key))
|
|
547
548
|
.map(a => a.key)
|
|
@@ -550,7 +551,7 @@ class TransactionLocker {
|
|
|
550
551
|
}
|
|
551
552
|
// If any deletes still exist, it must not have been applied
|
|
552
553
|
if (transaction.ops.some(a => a.type === "delete" && confirmedKeys.has(a.key))) {
|
|
553
|
-
|
|
554
|
+
logDisk("log", `Transaction not applied (has pending deletes)`, {
|
|
554
555
|
keys: transaction.ops
|
|
555
556
|
.filter(a => a.type === "delete" && confirmedKeys.has(a.key))
|
|
556
557
|
.map(a => a.key)
|
|
@@ -563,7 +564,7 @@ class TransactionLocker {
|
|
|
563
564
|
let createCount = transaction.ops.filter(a => a.type === "create").length;
|
|
564
565
|
let deleteCount = transaction.ops.filter(a => a.type === "delete").length;
|
|
565
566
|
let lockedFiles = transaction.lockedFilesMustEqual?.length;
|
|
566
|
-
|
|
567
|
+
logDisk("log", `Applying transaction with ${createCount} file creates and ${deleteCount} file deletes. ${lockedFiles !== undefined && `Lock state depends on ${lockedFiles} files` || ""}`, {
|
|
567
568
|
transactions: transaction.ops.map(x => JSON.stringify(x)),
|
|
568
569
|
});
|
|
569
570
|
logNodeStats(`archives|TΔ Apply`, formatNumber, 1);
|
|
@@ -589,7 +590,7 @@ class TransactionLocker {
|
|
|
589
590
|
};
|
|
590
591
|
await Promise.all(list(CONCURRENT_WRITE_COUNT).map(runThread));
|
|
591
592
|
|
|
592
|
-
|
|
593
|
+
logDisk("log", `Applied transaction with ${createCount} file creates and file ${deleteCount} deletes. ${lockedFiles !== undefined && `Lock state depends on ${lockedFiles} files` || ""}`, {
|
|
593
594
|
transactions: transaction.ops.map(x => JSON.stringify(x)),
|
|
594
595
|
});
|
|
595
596
|
}
|
|
@@ -655,7 +656,7 @@ class TransactionLocker {
|
|
|
655
656
|
let threshold = activeT.createTime + this.storage.propagationTime;
|
|
656
657
|
if (Date.now() < threshold) {
|
|
657
658
|
let waitTime = threshold - Date.now();
|
|
658
|
-
|
|
659
|
+
logDisk("log", `Waiting ${formatTime(waitTime)} for transaction ${activeT.seqNum} to settle.`);
|
|
659
660
|
await new Promise(resolve => setTimeout(resolve, waitTime));
|
|
660
661
|
return this.getFilesBase();
|
|
661
662
|
}
|
|
@@ -759,7 +760,7 @@ class TransactionLocker {
|
|
|
759
760
|
let dels = transaction.ops.filter(a => a.type === "delete").length;
|
|
760
761
|
let creates = transaction.ops.filter(a => a.type === "create").length;
|
|
761
762
|
let createBytes = transaction.ops.map(a => a.type === "create" && a.value?.length || 0).reduce((a, b) => a + b, 0);
|
|
762
|
-
|
|
763
|
+
logDisk("log", `Starting transaction with ${creates} file creates and ${dels} file deletes, ${formatNumber(createBytes)}B`, {
|
|
763
764
|
createFilesNames: transaction.ops.filter(a => a.type === "create").map(a => a.key),
|
|
764
765
|
deleteFilesNames: transaction.ops.filter(a => a.type === "delete").map(a => a.key),
|
|
765
766
|
});
|
|
@@ -788,7 +789,7 @@ class TransactionLocker {
|
|
|
788
789
|
let beforeData = await this.getFilesBase();
|
|
789
790
|
if (!this.isTransactionValid(transaction, beforeData.dataFiles, beforeData.rawDataFiles)) {
|
|
790
791
|
logNodeStats(`archives|TΔ Rejected`, formatNumber, 1);
|
|
791
|
-
|
|
792
|
+
logDisk("log", `Finished transaction with rejection, ${transaction.ops.length} ops`);
|
|
792
793
|
return "rejected";
|
|
793
794
|
}
|
|
794
795
|
|
|
@@ -797,33 +798,10 @@ class TransactionLocker {
|
|
|
797
798
|
let afterData = await this.getFilesBase();
|
|
798
799
|
if (this.wasTransactionApplied(transaction, afterData.dataFiles, afterData.rawDataFiles)) {
|
|
799
800
|
logNodeStats(`archives|TΔ Accepted`, formatNumber, 1);
|
|
800
|
-
|
|
801
|
+
logDisk("log", `Finished transaction with ${transaction.ops.length} ops`);
|
|
801
802
|
return "accepted";
|
|
802
803
|
}
|
|
803
804
|
}
|
|
804
805
|
}
|
|
805
806
|
}
|
|
806
807
|
|
|
807
|
-
function parsePath(path: string): { dir: string; name: string } {
|
|
808
|
-
path = path.replaceAll("\\", "/");
|
|
809
|
-
let lastSlash = path.lastIndexOf("/");
|
|
810
|
-
if (lastSlash === -1) return { dir: "", name: path };
|
|
811
|
-
return { dir: path.slice(0, lastSlash + 1), name: path.slice(lastSlash + 1) };
|
|
812
|
-
}
|
|
813
|
-
|
|
814
|
-
// Loses spaces in keys and values
|
|
815
|
-
function toFileNameKVP(kvp: { [key: string]: string }): string {
|
|
816
|
-
function s(v: string) {
|
|
817
|
-
return v.replaceAll(" ", "_");
|
|
818
|
-
}
|
|
819
|
-
return " " + Object.entries(kvp).map(([key, value]) => `${s(key)}=${s(value)}`).join(" ") + " ";
|
|
820
|
-
}
|
|
821
|
-
function parseFileNameKVP(fileName: string): { [key: string]: string } {
|
|
822
|
-
let parts = fileName.trim().split(" ");
|
|
823
|
-
let obj: { [key: string]: string } = {};
|
|
824
|
-
for (let part of parts) {
|
|
825
|
-
let [key, value] = part.split("=");
|
|
826
|
-
obj[key] = value || key;
|
|
827
|
-
}
|
|
828
|
-
return obj;
|
|
829
|
-
}
|
|
@@ -29,7 +29,8 @@ import { sha256 } from "js-sha256";
|
|
|
29
29
|
import { PromiseObj } from "../promise";
|
|
30
30
|
import { ClientWatcher } from "../1-path-client/pathValueClientWatcher";
|
|
31
31
|
import { auditLog, isDebugLogEnabled } from "./auditLogs";
|
|
32
|
-
import {
|
|
32
|
+
import { logDisk } from "../diagnostics/logs/diskLogger";
|
|
33
|
+
|
|
33
34
|
|
|
34
35
|
let yargObj = isNodeTrue() && yargs(process.argv)
|
|
35
36
|
.option("noarchive", { type: "boolean", alias: ["noarchives"], desc: "Don't save writes to disk. Still reads from disk." })
|
|
@@ -1260,7 +1261,7 @@ class PathWatcher {
|
|
|
1260
1261
|
auditLog("new local WATCH PARENT", { path });
|
|
1261
1262
|
}
|
|
1262
1263
|
}
|
|
1263
|
-
|
|
1264
|
+
logDisk("log", `New PathValue watches`, {
|
|
1264
1265
|
newPathsWatched: newPathsWatched.size,
|
|
1265
1266
|
newParentsWatched: newParentsWatched.size,
|
|
1266
1267
|
});
|
|
@@ -1393,7 +1394,7 @@ class PathWatcher {
|
|
|
1393
1394
|
}
|
|
1394
1395
|
|
|
1395
1396
|
if (fullyUnwatched.paths.length > 0 || fullyUnwatched.parentPaths.length > 0) {
|
|
1396
|
-
|
|
1397
|
+
logDisk("log", `Unwatched PathValue watches`, {
|
|
1397
1398
|
unwatchedPaths: fullyUnwatched.paths.length,
|
|
1398
1399
|
unwatchedParents: fullyUnwatched.parentPaths.length,
|
|
1399
1400
|
});
|
|
@@ -18,13 +18,13 @@ import { parseArgs } from "./PathFunctionHelpers";
|
|
|
18
18
|
import { PERMISSIONS_FUNCTION_ID, getAllDevelopmentModulesIds, getDevelopmentModule, getExportPath, getModuleRelativePath, getSchemaObject } from "./syncSchema";
|
|
19
19
|
import { formatTime } from "socket-function/src/formatting/format";
|
|
20
20
|
import { getControllerNodeIdList, set_debug_getFunctionRunnerShards } from "../-g-core-values/NodeCapabilities";
|
|
21
|
-
import { diskLog } from "../diagnostics/logs/diskLogger";
|
|
22
21
|
import { FilterSelector, Filterable, doesMatch } from "../misc/filterable";
|
|
23
22
|
import { SocketFunction } from "socket-function/SocketFunction";
|
|
24
23
|
import { requiresNetworkTrustHook } from "../-d-trust/NetworkTrust2";
|
|
25
24
|
import { getDomain, isLocal } from "../config";
|
|
26
25
|
import { getGitRefSync, getGitURLSync } from "../4-deploy/git";
|
|
27
26
|
import { DeployProgress } from "../4-deploy/deployFunctions";
|
|
27
|
+
import { logDisk } from "../diagnostics/logs/diskLogger";
|
|
28
28
|
|
|
29
29
|
export const functionSchema = rawSchema<{
|
|
30
30
|
[domainName: string]: {
|
|
@@ -679,7 +679,7 @@ export class PathFunctionRunner {
|
|
|
679
679
|
let syncTime = wallTime - evalTime;
|
|
680
680
|
|
|
681
681
|
|
|
682
|
-
|
|
682
|
+
logDisk("log", "Finished FunctionRunner function", {
|
|
683
683
|
...callPath, argsEncoded: "", functionSpec,
|
|
684
684
|
wallTime, syncTime, evalTime,
|
|
685
685
|
loops: runCount,
|
package/src/4-dom/qreact.tsx
CHANGED
|
@@ -2067,7 +2067,7 @@ function updateDOMNodeFields(domNode: DOMNode, vNode: VirtualDOM, prevVNode: Vir
|
|
|
2067
2067
|
if (name === "blur") {
|
|
2068
2068
|
let target = args[0].currentTarget as HTMLElement;
|
|
2069
2069
|
if (!target.getAttribute("data-blur-on-unmount") && !target.isConnected) {
|
|
2070
|
-
|
|
2070
|
+
logDisk("log", "Ignoring blur for disconnected element. You can use data-blur-on-unmount to re-enable blurs on this element.", target);
|
|
2071
2071
|
return;
|
|
2072
2072
|
}
|
|
2073
2073
|
}
|
|
@@ -2376,7 +2376,7 @@ function blurFixOnMouseDownHack(event: MouseEvent) {
|
|
|
2376
2376
|
|
|
2377
2377
|
// Looks like we are going to blur, so blur now
|
|
2378
2378
|
if (selected instanceof HTMLElement && !selected.hasAttribute("data-no-early-blur")) {
|
|
2379
|
-
|
|
2379
|
+
logDisk("log", `Simulating early blur to prevent blur from firing after mousedown. This solves a problem where mousedown changes the UI, and then the blur fires on the wrong element. You can use data-no-early-blur to opt-out of this feature`, selected);
|
|
2380
2380
|
selected.blur();
|
|
2381
2381
|
}
|
|
2382
2382
|
}
|
|
@@ -2745,4 +2745,5 @@ function triggerGlobalOnMountWatch(component: QRenderClass) {
|
|
|
2745
2745
|
|
|
2746
2746
|
// NOTE: Import Querysub at the end, so we can export qreact before we require it. That way Querysub
|
|
2747
2747
|
// can statically access qreact.
|
|
2748
|
-
import { Querysub } from "../4-querysub/Querysub";
|
|
2748
|
+
import { Querysub } from "../4-querysub/Querysub";
|
|
2749
|
+
import { logDisk } from "../diagnostics/logs/diskLogger";
|
|
@@ -849,11 +849,11 @@ export class Querysub {
|
|
|
849
849
|
globalThis.remapImportRequestsClientside = globalThis.remapImportRequestsClientside || [];
|
|
850
850
|
globalThis.remapImportRequestsClientside.push(async (args) => {
|
|
851
851
|
try {
|
|
852
|
-
let key: typeof identityStorageKey = "
|
|
852
|
+
let key: typeof identityStorageKey = "machineCA_9";
|
|
853
853
|
let storageValueJSON = localStorage.getItem(key);
|
|
854
854
|
if (!storageValueJSON) return args;
|
|
855
855
|
let storageValue = JSON.parse(storageValueJSON) as IdentityStorageType;
|
|
856
|
-
let machineId = storageValue.domain;
|
|
856
|
+
let machineId = storageValue.domain.split(".").at(-3) || "";
|
|
857
857
|
|
|
858
858
|
let pem = Buffer.from(storageValue.keyB64, "base64");
|
|
859
859
|
let privateKey = exports.extractRawED25519PrivateKey(pem);
|