querysub 0.312.0 → 0.314.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/.cursorrules +1 -1
  2. package/costsBenefits.txt +4 -1
  3. package/package.json +3 -2
  4. package/spec.txt +23 -18
  5. package/src/-0-hooks/hooks.ts +1 -1
  6. package/src/-a-archives/archives.ts +16 -3
  7. package/src/-a-archives/archivesBackBlaze.ts +51 -3
  8. package/src/-a-archives/archivesLimitedCache.ts +175 -0
  9. package/src/-a-archives/archivesPrivateFileSystem.ts +299 -0
  10. package/src/-a-auth/certs.ts +58 -31
  11. package/src/-b-authorities/cdnAuthority.ts +2 -2
  12. package/src/-b-authorities/dnsAuthority.ts +3 -2
  13. package/src/-c-identity/IdentityController.ts +3 -2
  14. package/src/-d-trust/NetworkTrust2.ts +17 -19
  15. package/src/-e-certs/EdgeCertController.ts +19 -81
  16. package/src/-e-certs/certAuthority.ts +7 -2
  17. package/src/-f-node-discovery/NodeDiscovery.ts +9 -7
  18. package/src/-g-core-values/NodeCapabilities.ts +6 -1
  19. package/src/0-path-value-core/NodePathAuthorities.ts +1 -1
  20. package/src/0-path-value-core/PathValueCommitter.ts +3 -3
  21. package/src/0-path-value-core/PathValueController.ts +3 -3
  22. package/src/0-path-value-core/archiveLocks/ArchiveLocks2.ts +15 -37
  23. package/src/0-path-value-core/pathValueCore.ts +4 -3
  24. package/src/3-path-functions/PathFunctionRunner.ts +2 -2
  25. package/src/4-dom/qreact.tsx +4 -3
  26. package/src/4-querysub/Querysub.ts +2 -2
  27. package/src/4-querysub/QuerysubController.ts +2 -2
  28. package/src/5-diagnostics/GenericFormat.tsx +1 -0
  29. package/src/5-diagnostics/Table.tsx +3 -0
  30. package/src/5-diagnostics/diskValueAudit.ts +2 -1
  31. package/src/5-diagnostics/nodeMetadata.ts +0 -1
  32. package/src/deployManager/components/MachineDetailPage.tsx +9 -1
  33. package/src/deployManager/components/ServiceDetailPage.tsx +10 -1
  34. package/src/deployManager/setupMachineMain.ts +8 -1
  35. package/src/diagnostics/NodeViewer.tsx +5 -6
  36. package/src/diagnostics/logs/FastArchiveAppendable.ts +757 -0
  37. package/src/diagnostics/logs/FastArchiveController.ts +524 -0
  38. package/src/diagnostics/logs/FastArchiveViewer.tsx +863 -0
  39. package/src/diagnostics/logs/LogViewer2.tsx +349 -0
  40. package/src/diagnostics/logs/TimeRangeSelector.tsx +94 -0
  41. package/src/diagnostics/logs/diskLogger.ts +135 -305
  42. package/src/diagnostics/logs/diskShimConsoleLogs.ts +6 -29
  43. package/src/diagnostics/logs/errorNotifications/ErrorNotificationController.ts +577 -0
  44. package/src/diagnostics/logs/errorNotifications/ErrorSuppressionUI.tsx +225 -0
  45. package/src/diagnostics/logs/errorNotifications/ErrorWarning.tsx +207 -0
  46. package/src/diagnostics/logs/importLogsEntry.ts +38 -0
  47. package/src/diagnostics/logs/injectFileLocationToConsole.ts +7 -17
  48. package/src/diagnostics/logs/lifeCycleAnalysis/lifeCycles.tsx +0 -0
  49. package/src/diagnostics/logs/lifeCycleAnalysis/spec.md +153 -0
  50. package/src/diagnostics/managementPages.tsx +7 -16
  51. package/src/diagnostics/misc-pages/ComponentSyncStats.tsx +0 -1
  52. package/src/diagnostics/periodic.ts +5 -0
  53. package/src/diagnostics/watchdog.ts +2 -2
  54. package/src/functional/SocketChannel.ts +67 -0
  55. package/src/library-components/Input.tsx +1 -1
  56. package/src/library-components/InputLabel.tsx +5 -2
  57. package/src/misc.ts +111 -0
  58. package/src/src.d.ts +34 -1
  59. package/src/user-implementation/userData.ts +4 -3
  60. package/test.ts +13 -0
  61. package/testEntry2.ts +29 -0
  62. package/src/diagnostics/errorLogs/ErrorLogController.ts +0 -535
  63. package/src/diagnostics/errorLogs/ErrorLogCore.ts +0 -274
  64. package/src/diagnostics/errorLogs/LogClassifiers.tsx +0 -308
  65. package/src/diagnostics/errorLogs/LogFilterUI.tsx +0 -84
  66. package/src/diagnostics/errorLogs/LogNotify.tsx +0 -101
  67. package/src/diagnostics/errorLogs/LogTimeSelector.tsx +0 -723
  68. package/src/diagnostics/errorLogs/LogViewer.tsx +0 -757
  69. package/src/diagnostics/errorLogs/logFiltering.tsx +0 -149
  70. package/src/diagnostics/logs/DiskLoggerPage.tsx +0 -613
@@ -28,6 +28,7 @@ import { magenta, yellow } from "socket-function/src/formatting/logColors";
28
28
  import { timeInMinute, timeInSecond } from "socket-function/src/misc";
29
29
  import { nodeDiscoveryShutdown } from "../-f-node-discovery/NodeDiscovery";
30
30
  import { shutdown } from "../diagnostics/periodic";
31
+ import { formatDateTime } from "socket-function/src/formatting/format";
31
32
 
32
33
  let publicPort = -1;
33
34
 
@@ -37,7 +38,7 @@ export const getHostedIP = lazy(async () => {
37
38
  }
38
39
  return await getExternalIP();
39
40
  });
40
- const getIPDomain = lazy(async () => {
41
+ export const getIPDomain = lazy(async () => {
41
42
  let ip = await getHostedIP();
42
43
  return ip.replaceAll(".", "-") + "." + getDomain();
43
44
  });
@@ -56,7 +57,7 @@ export async function getSNICerts(config: {
56
57
  [getDomain()]: async (callback) => {
57
58
  await EdgeCertController_watchHTTPSKeyCert(callback);
58
59
  },
59
- [getOwnMachineId()]: async (callback) => {
60
+ [getOwnMachineId() + "." + getDomain()]: async (callback) => {
60
61
  let threadCert = await getThreadKeyCert();
61
62
  callback({
62
63
  key: threadCert.key,
@@ -65,6 +66,8 @@ export async function getSNICerts(config: {
65
66
  },
66
67
  };
67
68
 
69
+ await publishEdgeDomain();
70
+
68
71
  return certs;
69
72
  }
70
73
 
@@ -93,7 +96,7 @@ const certUpdateLoop = lazy(() => {
93
96
  logErrors((async () => {
94
97
  let firstLoop = true;
95
98
  while (true) {
96
- let curCert = await getCertFromRemote();
99
+ let curCert = await getHTTPSKeyCert(getDomain());
97
100
  if (!curCert) {
98
101
  throw new Error(`Internal error, certUpdateLoop called before lastPromise was set`);
99
102
  }
@@ -102,14 +105,9 @@ const certUpdateLoop = lazy(() => {
102
105
  let expirationTime = +new Date(certObj.validity.notAfter);
103
106
  let createTime = +new Date(certObj.validity.notBefore);
104
107
 
105
- // If 75% of the lifetime has passed, renew the cert
108
+ // If 75% of the lifetime has passed, getHTTPSKeyCert should have updated it, so wait until that, then get the new cert, and update our watchers (which should be the servers).
106
109
  let renewDate = createTime + (expirationTime - createTime) * 0.75;
107
110
  let timeToExpire = renewDate - Date.now();
108
- if (timeToExpire < 0) {
109
- console.log(`HTTPS certificate is looking too old. Renewing from remote.`);
110
- getCertFromRemote = createGetCertFromRemote();
111
- continue;
112
- }
113
111
 
114
112
  if (!firstLoop) {
115
113
  for (let callback of callbacks) {
@@ -118,43 +116,19 @@ const certUpdateLoop = lazy(() => {
118
116
  }
119
117
  firstLoop = false;
120
118
 
119
+ if (timeToExpire < 0) {
120
+ console.warn(`getHTTPSKeyCert gave as an almost expired. It is not supposed to do this. It should have updated it by now... Expires on ${formatDateTime(expirationTime)}`);
121
+ timeToExpire = timeInMinute * 15;
122
+ }
121
123
  // Max timeout a signed integer, but lower is fine too
122
- timeToExpire = Math.min(timeToExpire, 2 ** 30);
123
- console.log(`Certicates up to date, renewing on ${new Date(Date.now() + timeToExpire)}`);
124
+ timeToExpire = Math.min(Math.floor(timeToExpire), 2 ** 30);
124
125
  await delay(timeToExpire);
125
- console.log(`Woke up to renew`);
126
+ console.log(`Woke up to propagate new certs`);
126
127
  }
127
128
  })());
128
129
  });
129
130
 
130
- function createGetCertFromRemote() {
131
- return lazy(async () => {
132
- return backoffRetryLoop(async () => {
133
- // Skip the remote call if we have DNS write permissions, to
134
- // make bootstrapping easier.
135
- if (await hasDNSWritePermissions()) {
136
- let ip = SocketFunction.mountedIP;
137
- if (ip === "0.0.0.0") {
138
- ip = await getExternalIP();
139
- }
140
- return await getHTTPSKeyCertInner(ip);
141
- }
142
-
143
- let edgeNodeId = await getControllerNodeId(EdgeCertController);
144
- if (!edgeNodeId) {
145
- throw new Error("No EdgeCertController found");
146
- }
147
- return await EdgeCertController.nodes[edgeNodeId].getHTTPSKeyCert();
148
- });
149
- });
150
- }
151
- let getCertFromRemote = createGetCertFromRemote();
152
131
 
153
- // NOTE: Only works if the machine is already listening. Needed for some special websocket
154
- // stuff related to debugging.
155
- export function debugGetRawEdgeCert() {
156
- return getCertFromRemote();
157
- }
158
132
 
159
133
  let callbacks: ((newCertPair: { cert: string; key: string }) => void)[] = [];
160
134
  async function EdgeCertController_watchHTTPSKeyCert(
@@ -163,7 +137,7 @@ async function EdgeCertController_watchHTTPSKeyCert(
163
137
  certUpdateLoop();
164
138
 
165
139
  callbacks.push(callback);
166
- let certPair = await getCertFromRemote();
140
+ let certPair = await getHTTPSKeyCert(getDomain());
167
141
  callback(certPair);
168
142
  }
169
143
 
@@ -228,24 +202,11 @@ async function checkEdgeDomainsAlive() {
228
202
  }
229
203
  }
230
204
 
231
- async function getHTTPSKeyCertInner(callerIP: string) {
232
- let cert = await getBaseCert();
233
- // If the cert is 50% expired generate a new one
234
- let certObj = parseCert(cert.cert);
235
-
236
- // Get expiration date
237
- let expirationTime = +new Date(certObj.validity.notAfter);
238
- let createTime = +new Date(certObj.validity.notBefore);
239
-
240
- // If 50% of the lifetime has passed, renew the cert
241
- let renewDate = createTime + (expirationTime - createTime) * 0.5;
242
- if (renewDate < Date.now()) {
243
- console.log(`HTTPS certificate is looking too old, forcefully renewing`);
244
- getHTTPSKeyCert.clear(getDomain());
245
- getBaseCert = createGetBaseCert();
246
- cert = await getBaseCert();
205
+ async function publishEdgeDomain() {
206
+ let callerIP = SocketFunction.mountedIP;
207
+ if (callerIP === "0.0.0.0") {
208
+ callerIP = await getExternalIP();
247
209
  }
248
-
249
210
  // IMPORTANT! We have to set our A record AFTER we create our cert, otherwise we might wait a while
250
211
  // with our A record public while we create our cert.
251
212
  runEdgeDomainAliveLoop();
@@ -273,7 +234,7 @@ async function getHTTPSKeyCertInner(callerIP: string) {
273
234
  console.warn(`Tried to serve an edge node on the local domain, but SocketFunction.mount did NOT specify a public ip (ex, { ip: "0.0.0.0" }) AND there are already existing public servers. You can't load balance between real ips and 127.0.0.1! ${existingIPs.join(", ")}. You will need to use 127-0-0-1.${edgeDomain} to access the local server (instead of just ${edgeDomain}).`);
274
235
  }
275
236
  */
276
- console.log(yellow(`Current process is not marked as public, but machine previous had public services. NOT setting ${edgeDomain} to 127.0.0.1, as this would make the public services inaccessible. Assuming the current process is for development, I recommend using noproxy.${edgeDomain} or 127-0-0-1.${edgeDomain} to access the server.`));
237
+ console.log(yellow(`Current process is not marked as public, but machine previous had public services. NOT setting ${edgeDomain} to 127.0.0.1, as this would make the public services inaccessible. Assuming the current process is for development, I recommend using 127-0-0-1.${edgeDomain} or 127-0-0-1.${edgeDomain} to access the server.`));
277
238
  }
278
239
  } else {
279
240
  if (existingIPs.includes("127.0.0.1")) {
@@ -289,7 +250,6 @@ async function getHTTPSKeyCertInner(callerIP: string) {
289
250
  }
290
251
  });
291
252
  }
292
- promises.push(addRecord("A", "noproxy." + edgeDomain, "127.0.0.1"));
293
253
  promises.push(addRecord("A", "127-0-0-1." + edgeDomain, "127.0.0.1"));
294
254
  // Add records in parallel, so we can wait for DNS propagation in parallel
295
255
  await Promise.all(promises);
@@ -297,27 +257,5 @@ async function getHTTPSKeyCertInner(callerIP: string) {
297
257
  console.error(`Error updating DNS records, continuing without updating them`, e);
298
258
  }
299
259
  }
300
-
301
- return cert;
302
260
  }
303
261
 
304
- class EdgeCertControllerBase {
305
- public async getHTTPSKeyCert() {
306
- // Ensure the a record is subscribed
307
- const caller = SocketFunction.getCaller();
308
- let callerIP = getNodeIdIP(caller.nodeId);
309
- return await getHTTPSKeyCertInner(callerIP);
310
- }
311
- }
312
-
313
-
314
- const EdgeCertController = SocketFunction.register(
315
- "EdgeCertController-694c925b-fb10-4656-aed0-b53a48ded548",
316
- new EdgeCertControllerBase(),
317
- () => ({
318
- getHTTPSKeyCert: {},
319
- }),
320
- () => ({
321
- hooks: [requiresNetworkTrustHook],
322
- })
323
- );
@@ -10,7 +10,7 @@ import { formatDateTime, formatTime } from "socket-function/src/formatting/forma
10
10
  import { delay } from "socket-function/src/batching";
11
11
  import { timeInMinute } from "socket-function/src/misc";
12
12
 
13
- const archives = lazy(() => getArchives(`https_certs_2/`));
13
+ const archives = lazy(() => getArchives(`https_certs_3/`));
14
14
  // Expire EXPIRATION_THRESHOLD% of the way through the certificate's lifetime
15
15
  const EXPIRATION_THRESHOLD = 0.4;
16
16
 
@@ -40,6 +40,12 @@ export const getHTTPSKeyCert = cache(async (domain: string): Promise<{ key: stri
40
40
  console.log(magenta(`Renewing domain ${domain} (renew target is ${formatDateTime(renewDate)}).`));
41
41
  keyCert = undefined;
42
42
  }
43
+ let timeUntilRenew = renewDate - Date.now();
44
+ setTimeout(() => {
45
+ console.log(magenta(`Clearing getHTTPSKeyCert to try to renew ${domain} (renew target is ${formatDateTime(renewDate)}).`));
46
+ getHTTPSKeyCert.clear(domain);
47
+ void getHTTPSKeyCert(domain);
48
+ }, Math.min(Math.floor(timeUntilRenew * 1.1), 2 ** 30));
43
49
  } else {
44
50
  console.log(magenta(`No cert found for domain ${domain}, generating shortly.`));
45
51
  }
@@ -50,7 +56,6 @@ export const getHTTPSKeyCert = cache(async (domain: string): Promise<{ key: stri
50
56
  const accountKey = getAccountKey();
51
57
  let altDomains: string[] = [];
52
58
 
53
- // altDomains.push("noproxy." + domain);
54
59
  // // NOTE: Allowing local access is just an optimization, not to avoid having to forward ports
55
60
  // // (unless you type 127-0-0-1.domain into the browser... then I guess you don't have to forward ports?)
56
61
  // altDomains.push("127-0-0-1." + domain);
@@ -4,7 +4,7 @@ import { getDomain, isDevDebugbreak, isNoNetwork, isPublic } from "../config";
4
4
  import { measureBlock } from "socket-function/src/profiling/measure";
5
5
  import { isNode, sha256Hash, throttleFunction, timeInMinute, timeInSecond } from "socket-function/src/misc";
6
6
  import { errorToUndefinedSilent, ignoreErrors, logErrors, timeoutToUndefinedSilent } from "../errors";
7
- import { requiresNetworkTrustHook } from "../-d-trust/NetworkTrust2";
7
+ import { ensureWeAreTrusted, requiresNetworkTrustHook } from "../-d-trust/NetworkTrust2";
8
8
  import { delay, runInfinitePoll, runInfinitePollCallAtStart } from "socket-function/src/batching";
9
9
  import { getNodeId, getNodeIdFromLocation } from "socket-function/src/nodeCache";
10
10
  import { lazy } from "socket-function/src/caching";
@@ -17,9 +17,10 @@ import { waitForFirstTimeSync } from "socket-function/time/trueTimeShim";
17
17
  import { decodeNodeId, decodeNodeIdAssert } from "../-a-auth/certs";
18
18
 
19
19
  import { isDefined } from "../misc";
20
- import { diskLog } from "../diagnostics/logs/diskLogger";
21
20
  import { getBootedEdgeNode } from "../-0-hooks/hooks";
22
21
  import { EdgeNodeConfig } from "../4-deploy/edgeNodes";
22
+ import * as certs from "../-a-auth/certs";
23
+ import { logDisk } from "../diagnostics/logs/diskLogger";
23
24
 
24
25
  let HEARTBEAT_INTERVAL = timeInMinute * 15;
25
26
  // Interval which we check other heartbeats
@@ -86,10 +87,10 @@ export function getOwnNodeIdAssert(): string {
86
87
  }
87
88
 
88
89
  export const getOwnThreadId = lazy(() => {
89
- return decodeNodeIdAssert(getOwnNodeIdAssert()).threadId;
90
+ return certs.getOwnThreadId();
90
91
  });
91
92
  export const getOwnMachineId = lazy(() => {
92
- return decodeNodeIdAssert(getOwnNodeIdAssert()).machineId;
93
+ return certs.getOwnMachineId();
93
94
  });
94
95
 
95
96
  export function isOwnNodeId(nodeId: string): boolean {
@@ -180,7 +181,7 @@ function addNodeIdBase(nodeId: string) {
180
181
  function setNodeIds(nodeIds: string[]) {
181
182
  nodeIds = nodeIds.filter(x => x !== SPECIAL_NODE_ID_FOR_UNMOUNTED_NODE);
182
183
 
183
- diskLog("setNodeIds", { nodeIds });
184
+ logDisk("log", "setNodeIds", { nodeIds });
184
185
  // Also try all localhost ports, if we are developing and not in public mode
185
186
  if (isNode() && !isPublic() && isDevDebugbreak()) {
186
187
  let ports = new Set(nodeIds.map(nodeId => decodeNodeId(nodeId)?.port).filter(isDefined));
@@ -345,7 +346,7 @@ async function runHeartbeatAuditLoop() {
345
346
  }
346
347
  } else {
347
348
  deadCount.delete(nodeId);
348
- diskLog("Read node heartbeat", { nodeId, lastTime });
349
+ logDisk("log", "Read node heartbeat", { nodeId, lastTime });
349
350
  }
350
351
  }
351
352
  if (pendingDeadCount) {
@@ -473,7 +474,8 @@ export async function onNodeDiscoveryReady() {
473
474
  }
474
475
 
475
476
  if (isServer()) {
476
- setImmediate(() => {
477
+ setImmediate(async () => {
478
+
477
479
  logErrors(runHeartbeatAuditLoop());
478
480
  logErrors(runMemoryAuditLoop());
479
481
  // NOTE: We used to wait until we mounted, but... we should be able to find nodes
@@ -78,7 +78,12 @@ export async function getControllerNodeIdList(
78
78
  passedNodeIds.set(nodeId, entryPoint);
79
79
  }
80
80
  }));
81
- return Array.from(passedNodeIds.entries()).map(([nodeId, entryPoint]) => ({ nodeId, entryPoint }));
81
+
82
+ let results = Array.from(passedNodeIds.entries()).map(([nodeId, entryPoint]) => ({ nodeId, entryPoint }));
83
+ // Ignore the special local IDs, otherwise we'll be returning duplicates. And our caller probably doesn't want the fastest path, they probably just want every path.
84
+ // TODO: We should detect if the local ID and the remote ID is the same and then always pick the local ID instead. However, I have no idea how to do this as it doesn't include the machine ID, just the port. So how do we know if they're the same? I think we actually have to talk to them and identify their machine IDs, which we need to cache, which then takes longer, et cetera, et cetera.
85
+ results = results.filter(x => !x.nodeId.startsWith("127-0-0-1."));
86
+ return results;
82
87
  }
83
88
 
84
89
 
@@ -21,7 +21,6 @@ import { cache, cacheLimited } from "socket-function/src/caching";
21
21
  import { IdentityController_getCurrentReconnectNodeIdAssert, IdentityController_getReconnectNodeIdAssert } from "../-c-identity/IdentityController";
22
22
  import { getBufferFraction, getBufferInt, getShortNumber } from "../bits";
23
23
  import { devDebugbreak, getDomain, isDevDebugbreak } from "../config";
24
- import { diskLog } from "../diagnostics/logs/diskLogger";
25
24
  import { waitForFirstTimeSync } from "socket-function/time/trueTimeShim";
26
25
 
27
26
  export const LOCAL_DOMAIN = "LOCAL";
@@ -262,6 +261,7 @@ class NodePathAuthorities {
262
261
  this.previouslyNotAvailableNodes.add(nodeId);
263
262
  return;
264
263
  }
264
+
265
265
  this.previouslyNotAvailableNodes.delete(nodeId);
266
266
 
267
267
  time = Date.now() - time;
@@ -21,7 +21,7 @@ import { formatNumber, formatTime } from "socket-function/src/formatting/format"
21
21
  import { isClient } from "../config2";
22
22
  import { remoteWatcher } from "../1-path-client/RemoteWatcher";
23
23
  import { auditLog, isDebugLogEnabled } from "./auditLogs";
24
- import { diskLog } from "../diagnostics/logs/diskLogger";
24
+ import { logDisk } from "../diagnostics/logs/diskLogger";
25
25
 
26
26
  /*
27
27
  - commitWrites <= creating writes
@@ -177,7 +177,7 @@ class PathValueCommitter {
177
177
  markArrayAsSplitable(values);
178
178
  const { Querysub } = await import("../4-querysub/Querysub");
179
179
  let serializedValues = await pathValueSerializer.serialize(values, { compress: Querysub.COMPRESS_NETWORK });
180
- diskLog(`Send PathValues to server`, { valueCount: values.length, targetId: otherAuthority, });
180
+ logDisk("log", "Send PathValues to server", { valueCount: values.length, targetId: otherAuthority, });
181
181
  let forwardPromise = PathValueController.nodes[otherAuthority].forwardWrites(
182
182
  serializedValues,
183
183
  undefined,
@@ -662,4 +662,4 @@ function trackLag(now: number, lag: number) {
662
662
  }
663
663
 
664
664
 
665
- export const pathValueCommitter = new PathValueCommitter();
665
+ export const pathValueCommitter = new PathValueCommitter();
@@ -16,7 +16,7 @@ import debugbreak from "debugbreak";
16
16
  import { ClientWatcher } from "../1-path-client/pathValueClientWatcher";
17
17
  import { auditLog, isDebugLogEnabled } from "./auditLogs";
18
18
  import { debugNodeId } from "../-c-identity/IdentityController";
19
- import { diskLog } from "../diagnostics/logs/diskLogger";
19
+ import { logDisk } from "../diagnostics/logs/diskLogger";
20
20
  export { pathValueCommitter };
21
21
 
22
22
  class PathValueControllerBase {
@@ -86,9 +86,9 @@ class PathValueControllerBase {
86
86
  auditLog("RECEIVE VALUE", { path: value.path, time: value.time.time, sourceNodeId });
87
87
  }
88
88
  }
89
- diskLog(`Received PathValues via forwardWrites`, { valueCount: values.length, callerId, });
89
+ logDisk("log", "Received PathValues via forwardWrites", { valueCount: values.length, callerId, });
90
90
  for (let value of values) {
91
- diskLog("Received PathValue for path", { path: value.path, time: value.time.time, callerId });
91
+ logDisk("log", "Received PathValue for path", { path: value.path, time: value.time.time, callerId });
92
92
  }
93
93
 
94
94
  if (isCoreQuiet) {
@@ -12,8 +12,9 @@ import { devDebugbreak } from "../../config";
12
12
  import { logErrors } from "../../errors";
13
13
  import { saveSnapshot } from "./archiveSnapshots";
14
14
  import { getNodeId } from "socket-function/src/nodeCache";
15
- import { diskLog } from "../../diagnostics/logs/diskLogger";
16
15
  import { logNodeStateStats, logNodeStats } from "../../-0-hooks/hooks";
16
+ import { parsePath, toFileNameKVP, parseFileNameKVP } from "../../misc";
17
+ import { logDisk } from "../../diagnostics/logs/diskLogger";
17
18
 
18
19
  /** Clean up old files after a while */
19
20
  const DEAD_CREATE_THRESHOLD = timeInHour * 12;
@@ -301,14 +302,14 @@ class TransactionLocker {
301
302
  private isTransactionValidOverride?: (transaction: Transaction, dataFiles: FileInfo[], rawDataFiles: FileInfo[]) => boolean
302
303
  ) { }
303
304
 
304
- // #region Base File Operations
305
+ // #region Base File Ops
305
306
  public getConfirmKey(key: string): string {
306
307
  let { dir, name } = parsePath(key);
307
308
  return `${dir}confirm_${name}.confirm`;
308
309
  }
309
310
  public async createConfirm(key: string) {
310
311
  let path = this.getConfirmKey(key);
311
- diskLog(`Creating confirmation for ${key}`);
312
+ logDisk("log", "Creating confirmation for ${key}");
312
313
  await this.storage.setValue(path, Buffer.from(""));
313
314
  return path;
314
315
  }
@@ -361,7 +362,7 @@ class TransactionLocker {
361
362
  delete: ellipsize(deletes.map(a => debugFileInfo(a.key)).join(","), 50),
362
363
  });
363
364
 
364
- diskLog(`Writing transaction`, {
365
+ logDisk("log", "Writing transaction", {
365
366
  name,
366
367
  ops: transaction.ops.length,
367
368
  });
@@ -483,7 +484,7 @@ class TransactionLocker {
483
484
  }
484
485
  }
485
486
 
486
- diskLog(`Read archive state`, {
487
+ logDisk("log", "Read archive state", {
487
488
  rawFilesCount: files.length,
488
489
  confirmedCount: currentDataFiles.size,
489
490
  rawFiles: files.map(a => a.file),
@@ -502,7 +503,7 @@ class TransactionLocker {
502
503
  let result = await tryToRead();
503
504
  if (result) {
504
505
  let timeToRead = Date.now() - startTime;
505
- diskLog(`Read data state in ${formatTime(timeToRead)}`);
506
+ logDisk("log", `Read data state in ${formatTime(timeToRead)}`);
506
507
  return result;
507
508
  }
508
509
  }
@@ -541,7 +542,7 @@ class TransactionLocker {
541
542
  let rawLookup = new Set(Array.from(rawDataFiles).map(a => a.file));
542
543
  // If any creates are not confirmed, it must not have been applied
543
544
  if (transaction.ops.some(a => a.type === "create" && rawLookup.has(a.key) && !confirmedKeys.has(a.key))) {
544
- diskLog(`Transaction not applied (has pending confirmations of creates)`, {
545
+ logDisk("log", `Transaction not applied (has pending confirmations of creates)`, {
545
546
  keys: transaction.ops
546
547
  .filter(a => a.type === "create" && rawLookup.has(a.key) && !confirmedKeys.has(a.key))
547
548
  .map(a => a.key)
@@ -550,7 +551,7 @@ class TransactionLocker {
550
551
  }
551
552
  // If any deletes still exist, it must not have been applied
552
553
  if (transaction.ops.some(a => a.type === "delete" && confirmedKeys.has(a.key))) {
553
- diskLog(`Transaction not applied (has pending deletes)`, {
554
+ logDisk("log", `Transaction not applied (has pending deletes)`, {
554
555
  keys: transaction.ops
555
556
  .filter(a => a.type === "delete" && confirmedKeys.has(a.key))
556
557
  .map(a => a.key)
@@ -563,7 +564,7 @@ class TransactionLocker {
563
564
  let createCount = transaction.ops.filter(a => a.type === "create").length;
564
565
  let deleteCount = transaction.ops.filter(a => a.type === "delete").length;
565
566
  let lockedFiles = transaction.lockedFilesMustEqual?.length;
566
- diskLog(`Applying transaction with ${createCount} file creates and ${deleteCount} file deletes. ${lockedFiles !== undefined && `Lock state depends on ${lockedFiles} files` || ""}`, {
567
+ logDisk("log", `Applying transaction with ${createCount} file creates and ${deleteCount} file deletes. ${lockedFiles !== undefined && `Lock state depends on ${lockedFiles} files` || ""}`, {
567
568
  transactions: transaction.ops.map(x => JSON.stringify(x)),
568
569
  });
569
570
  logNodeStats(`archives|TΔ Apply`, formatNumber, 1);
@@ -589,7 +590,7 @@ class TransactionLocker {
589
590
  };
590
591
  await Promise.all(list(CONCURRENT_WRITE_COUNT).map(runThread));
591
592
 
592
- diskLog(`Applied transaction with ${createCount} file creates and file ${deleteCount} deletes. ${lockedFiles !== undefined && `Lock state depends on ${lockedFiles} files` || ""}`, {
593
+ logDisk("log", `Applied transaction with ${createCount} file creates and file ${deleteCount} deletes. ${lockedFiles !== undefined && `Lock state depends on ${lockedFiles} files` || ""}`, {
593
594
  transactions: transaction.ops.map(x => JSON.stringify(x)),
594
595
  });
595
596
  }
@@ -655,7 +656,7 @@ class TransactionLocker {
655
656
  let threshold = activeT.createTime + this.storage.propagationTime;
656
657
  if (Date.now() < threshold) {
657
658
  let waitTime = threshold - Date.now();
658
- diskLog(`Waiting ${formatTime(waitTime)} for transaction ${activeT.seqNum} to settle.`);
659
+ logDisk("log", `Waiting ${formatTime(waitTime)} for transaction ${activeT.seqNum} to settle.`);
659
660
  await new Promise(resolve => setTimeout(resolve, waitTime));
660
661
  return this.getFilesBase();
661
662
  }
@@ -759,7 +760,7 @@ class TransactionLocker {
759
760
  let dels = transaction.ops.filter(a => a.type === "delete").length;
760
761
  let creates = transaction.ops.filter(a => a.type === "create").length;
761
762
  let createBytes = transaction.ops.map(a => a.type === "create" && a.value?.length || 0).reduce((a, b) => a + b, 0);
762
- diskLog(`Starting transaction with ${creates} file creates and ${dels} file deletes, ${formatNumber(createBytes)}B`, {
763
+ logDisk("log", `Starting transaction with ${creates} file creates and ${dels} file deletes, ${formatNumber(createBytes)}B`, {
763
764
  createFilesNames: transaction.ops.filter(a => a.type === "create").map(a => a.key),
764
765
  deleteFilesNames: transaction.ops.filter(a => a.type === "delete").map(a => a.key),
765
766
  });
@@ -788,7 +789,7 @@ class TransactionLocker {
788
789
  let beforeData = await this.getFilesBase();
789
790
  if (!this.isTransactionValid(transaction, beforeData.dataFiles, beforeData.rawDataFiles)) {
790
791
  logNodeStats(`archives|TΔ Rejected`, formatNumber, 1);
791
- diskLog(`Finished transaction with rejection, ${transaction.ops.length} ops`);
792
+ logDisk("log", `Finished transaction with rejection, ${transaction.ops.length} ops`);
792
793
  return "rejected";
793
794
  }
794
795
 
@@ -797,33 +798,10 @@ class TransactionLocker {
797
798
  let afterData = await this.getFilesBase();
798
799
  if (this.wasTransactionApplied(transaction, afterData.dataFiles, afterData.rawDataFiles)) {
799
800
  logNodeStats(`archives|TΔ Accepted`, formatNumber, 1);
800
- diskLog(`Finished transaction with ${transaction.ops.length} ops`);
801
+ logDisk("log", `Finished transaction with ${transaction.ops.length} ops`);
801
802
  return "accepted";
802
803
  }
803
804
  }
804
805
  }
805
806
  }
806
807
 
807
- function parsePath(path: string): { dir: string; name: string } {
808
- path = path.replaceAll("\\", "/");
809
- let lastSlash = path.lastIndexOf("/");
810
- if (lastSlash === -1) return { dir: "", name: path };
811
- return { dir: path.slice(0, lastSlash + 1), name: path.slice(lastSlash + 1) };
812
- }
813
-
814
- // Loses spaces in keys and values
815
- function toFileNameKVP(kvp: { [key: string]: string }): string {
816
- function s(v: string) {
817
- return v.replaceAll(" ", "_");
818
- }
819
- return " " + Object.entries(kvp).map(([key, value]) => `${s(key)}=${s(value)}`).join(" ") + " ";
820
- }
821
- function parseFileNameKVP(fileName: string): { [key: string]: string } {
822
- let parts = fileName.trim().split(" ");
823
- let obj: { [key: string]: string } = {};
824
- for (let part of parts) {
825
- let [key, value] = part.split("=");
826
- obj[key] = value || key;
827
- }
828
- return obj;
829
- }
@@ -29,7 +29,8 @@ import { sha256 } from "js-sha256";
29
29
  import { PromiseObj } from "../promise";
30
30
  import { ClientWatcher } from "../1-path-client/pathValueClientWatcher";
31
31
  import { auditLog, isDebugLogEnabled } from "./auditLogs";
32
- import { diskLog } from "../diagnostics/logs/diskLogger";
32
+ import { logDisk } from "../diagnostics/logs/diskLogger";
33
+
33
34
 
34
35
  let yargObj = isNodeTrue() && yargs(process.argv)
35
36
  .option("noarchive", { type: "boolean", alias: ["noarchives"], desc: "Don't save writes to disk. Still reads from disk." })
@@ -1260,7 +1261,7 @@ class PathWatcher {
1260
1261
  auditLog("new local WATCH PARENT", { path });
1261
1262
  }
1262
1263
  }
1263
- diskLog(`New PathValue watches`, {
1264
+ logDisk("log", `New PathValue watches`, {
1264
1265
  newPathsWatched: newPathsWatched.size,
1265
1266
  newParentsWatched: newParentsWatched.size,
1266
1267
  });
@@ -1393,7 +1394,7 @@ class PathWatcher {
1393
1394
  }
1394
1395
 
1395
1396
  if (fullyUnwatched.paths.length > 0 || fullyUnwatched.parentPaths.length > 0) {
1396
- diskLog(`Unwatched PathValue watches`, {
1397
+ logDisk("log", `Unwatched PathValue watches`, {
1397
1398
  unwatchedPaths: fullyUnwatched.paths.length,
1398
1399
  unwatchedParents: fullyUnwatched.parentPaths.length,
1399
1400
  });
@@ -18,13 +18,13 @@ import { parseArgs } from "./PathFunctionHelpers";
18
18
  import { PERMISSIONS_FUNCTION_ID, getAllDevelopmentModulesIds, getDevelopmentModule, getExportPath, getModuleRelativePath, getSchemaObject } from "./syncSchema";
19
19
  import { formatTime } from "socket-function/src/formatting/format";
20
20
  import { getControllerNodeIdList, set_debug_getFunctionRunnerShards } from "../-g-core-values/NodeCapabilities";
21
- import { diskLog } from "../diagnostics/logs/diskLogger";
22
21
  import { FilterSelector, Filterable, doesMatch } from "../misc/filterable";
23
22
  import { SocketFunction } from "socket-function/SocketFunction";
24
23
  import { requiresNetworkTrustHook } from "../-d-trust/NetworkTrust2";
25
24
  import { getDomain, isLocal } from "../config";
26
25
  import { getGitRefSync, getGitURLSync } from "../4-deploy/git";
27
26
  import { DeployProgress } from "../4-deploy/deployFunctions";
27
+ import { logDisk } from "../diagnostics/logs/diskLogger";
28
28
 
29
29
  export const functionSchema = rawSchema<{
30
30
  [domainName: string]: {
@@ -679,7 +679,7 @@ export class PathFunctionRunner {
679
679
  let syncTime = wallTime - evalTime;
680
680
 
681
681
 
682
- diskLog(`Finished FunctionRunner function`, {
682
+ logDisk("log", "Finished FunctionRunner function", {
683
683
  ...callPath, argsEncoded: "", functionSpec,
684
684
  wallTime, syncTime, evalTime,
685
685
  loops: runCount,
@@ -2067,7 +2067,7 @@ function updateDOMNodeFields(domNode: DOMNode, vNode: VirtualDOM, prevVNode: Vir
2067
2067
  if (name === "blur") {
2068
2068
  let target = args[0].currentTarget as HTMLElement;
2069
2069
  if (!target.getAttribute("data-blur-on-unmount") && !target.isConnected) {
2070
- console.info("Ignoring blur for disconnected element. You can use data-blur-on-unmount to re-enable blurs on this element.", target);
2070
+ logDisk("log", "Ignoring blur for disconnected element. You can use data-blur-on-unmount to re-enable blurs on this element.", target);
2071
2071
  return;
2072
2072
  }
2073
2073
  }
@@ -2376,7 +2376,7 @@ function blurFixOnMouseDownHack(event: MouseEvent) {
2376
2376
 
2377
2377
  // Looks like we are going to blur, so blur now
2378
2378
  if (selected instanceof HTMLElement && !selected.hasAttribute("data-no-early-blur")) {
2379
- console.info(`Simulating early blur to prevent blur from firing after mousedown. This solves a problem where mousedown changes the UI, and then the blur fires on the wrong element. You can use data-no-early-blur to opt-out of this feature`, selected);
2379
+ logDisk("log", `Simulating early blur to prevent blur from firing after mousedown. This solves a problem where mousedown changes the UI, and then the blur fires on the wrong element. You can use data-no-early-blur to opt-out of this feature`, selected);
2380
2380
  selected.blur();
2381
2381
  }
2382
2382
  }
@@ -2745,4 +2745,5 @@ function triggerGlobalOnMountWatch(component: QRenderClass) {
2745
2745
 
2746
2746
  // NOTE: Import Querysub at the end, so we can export qreact before we require it. That way Querysub
2747
2747
  // can statically access qreact.
2748
- import { Querysub } from "../4-querysub/Querysub";
2748
+ import { Querysub } from "../4-querysub/Querysub";
2749
+ import { logDisk } from "../diagnostics/logs/diskLogger";
@@ -849,11 +849,11 @@ export class Querysub {
849
849
  globalThis.remapImportRequestsClientside = globalThis.remapImportRequestsClientside || [];
850
850
  globalThis.remapImportRequestsClientside.push(async (args) => {
851
851
  try {
852
- let key: typeof identityStorageKey = "machineCA_6";
852
+ let key: typeof identityStorageKey = "machineCA_9";
853
853
  let storageValueJSON = localStorage.getItem(key);
854
854
  if (!storageValueJSON) return args;
855
855
  let storageValue = JSON.parse(storageValueJSON) as IdentityStorageType;
856
- let machineId = storageValue.domain;
856
+ let machineId = storageValue.domain.split(".").at(-3) || "";
857
857
 
858
858
  let pem = Buffer.from(storageValue.keyB64, "base64");
859
859
  let privateKey = exports.extractRawED25519PrivateKey(pem);
@@ -35,7 +35,6 @@ import { LoggingClient } from "../0-path-value-core/LoggingClient";
35
35
  import * as prediction from "./querysubPrediction";
36
36
  import { getCallResultPath } from "./querysubPrediction";
37
37
  import { nodePathAuthority, pathValueAuthority2 } from "../0-path-value-core/NodePathAuthorities";
38
- import { diskLog } from "../diagnostics/logs/diskLogger";
39
38
  import { assertIsManagementUser } from "../diagnostics/managementPages";
40
39
  import { getBrowserUrlNode } from "../-f-node-discovery/NodeDiscovery";
41
40
  setFlag(require, "preact", "allowclient", true);
@@ -44,6 +43,7 @@ import yargs from "yargs";
44
43
  import { mergeFilterables, parseFilterable, serializeFilterable } from "../misc/filterable";
45
44
  import { isManagementUser, onAllPredictionsFinished } from "../-0-hooks/hooks";
46
45
  import { getDomain, isBootstrapOnly, isLocal } from "../config";
46
+ import { logDisk } from "../diagnostics/logs/diskLogger";
47
47
 
48
48
  let yargObj = isNodeTrue() && yargs(process.argv)
49
49
  .option("fncfilter", { type: "string", default: "", desc: `Sets the filterable state for function calls, causing them to target specific FunctionRunners. If no FunctionRunner matches, all functions will fail to run. For example: "devtestserver" will match a FunctionRunner that uses the "devtestserver" filter. Merges with the existing filterable state if a client sets it explicitly.` })
@@ -452,7 +452,7 @@ export class QuerysubControllerBase {
452
452
  // make the UI look cleaner (instead of showing stale values, it shows nothing)?
453
453
  let undefinedValues: PathValue[] = newPathsNotAllowed.map(path => ({ path, value: undefined, canGCValue: true, isTransparent: true, time: epochTime, locks: [], lockCount: 0, valid: true, event: false }));
454
454
 
455
- diskLog(`Disallowing PathValue watches due to disallowed permissions`, { count: newPathsNotAllowed.length, callerId });
455
+ logDisk("log", "Disallowing PathValue watches due to disallowed permissions", { count: newPathsNotAllowed.length, callerId });
456
456
 
457
457
  ignoreErrors(pathValueSerializer.serialize(undefinedValues, { compress: Querysub.COMPRESS_NETWORK }).then(buffers =>
458
458
  PathValueController.nodes[callerId].forwardWrites(
@@ -168,6 +168,7 @@ export function formatValue(value: unknown, formatter: JSXFormatter = "guess", c
168
168
  }
169
169
 
170
170
  export function toSpaceCase(text: string) {
171
+ if (text.startsWith("_")) return text;
171
172
  return text
172
173
  // "camelCase" => "camel Case"
173
174
  // "URL" => "URL"