querysub 0.189.0 → 0.191.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env node
2
+
3
+ require("typenode");
4
+ require("../src/user-implementation/addSuperUser");
package/bin/deploy.js CHANGED
@@ -1,4 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
 
3
+ // Always local, as we want to always use the local code? Might not be needed anymore?
4
+ process.argv.push("--local");
5
+
3
6
  require("typenode");
4
7
  require("../src/4-deploy/deployMain");
@@ -0,0 +1,7 @@
1
+ #!/usr/bin/env node
2
+
3
+ process.argv.push("--nobreak");
4
+ process.argv.push("--public");
5
+
6
+ require("typenode");
7
+ require("../src/3-path-functions/PathFunctionRunnerMain");
package/bin/function.js CHANGED
@@ -1,4 +1,9 @@
1
1
  #!/usr/bin/env node
2
2
 
3
+ process.argv.push("--local");
4
+ // Only dev. If we listen to all functions it will start picking up production functions calls, which breaks things (unlike the path value server, where we are unreachable, so nothing breaks).
5
+ process.argv.push("--filter");
6
+ process.argv.push("dev");
7
+
3
8
  require("typenode");
4
- require("../src/3-path-functions/PathFunctionRunnerMain");
9
+ require("../src/3-path-functions/PathFunctionRunnerMain");
@@ -0,0 +1,8 @@
1
+ #!/usr/bin/env node
2
+
3
+ process.argv.push("--watch");
4
+ process.argv.push("--nobreak");
5
+ process.argv.push("--public");
6
+
7
+ require("typenode");
8
+ require("../src/archiveapps/archiveGCEntry");
@@ -0,0 +1,6 @@
1
+ #!/usr/bin/env node
2
+
3
+ process.argv.push("--watch");
4
+
5
+ require("typenode");
6
+ require("../src/archiveapps/archiveGCEntry");
package/bin/gc.js ADDED
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env node
2
+
3
+ require("typenode");
4
+ require("../src/archiveapps/archiveGCEntry");
@@ -0,0 +1,7 @@
1
+ #!/usr/bin/env node
2
+
3
+ process.argv.push("--nobreak");
4
+ process.argv.push("--public");
5
+
6
+ require("typenode");
7
+ require("../src/archiveapps/archiveJoinEntry");
package/bin/join.js ADDED
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env node
2
+
3
+ require("typenode");
4
+ require("../src/archiveapps/archiveGCEntry");
package/bin/machine.js ADDED
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env node
2
+
3
+ require("typenode");
4
+ require("../src/deployManager/machineApplyMain");
@@ -0,0 +1,7 @@
1
+ #!/usr/bin/env node
2
+
3
+ process.argv.push("--nobreak");
4
+ process.argv.push("--public");
5
+
6
+ require("typenode");
7
+ require("../src/server.ts");
package/bin/server.js CHANGED
@@ -1,4 +1,6 @@
1
1
  #!/usr/bin/env node
2
2
 
3
+ process.argv.push("--local");
4
+
3
5
  require("typenode");
4
- require("../src/server.ts");
6
+ require("../src/server.ts");
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env node
2
+
3
+ require("typenode");
4
+ require("../src/user-implementation/setEmailKey");
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "querysub",
3
- "version": "0.189.0",
3
+ "version": "0.191.0",
4
4
  "main": "index.js",
5
5
  "license": "MIT",
6
6
  "note1": "note on node-forge fork, see https://github.com/digitalbazaar/forge/issues/744 for details",
@@ -39,12 +39,24 @@
39
39
  "type": "yarn tsc --noEmit",
40
40
  "depend": "yarn --silent depcruise src --include-only \"^src\" --config --output-type dot | dot -T svg > dependency-graph.svg",
41
41
  "test": "yarn typenode ./src/test/test.tsx --local",
42
- "test2": "yarn typenode ./src/4-dom/qreactTest.tsx --local"
42
+ "test2": "yarn typenode ./src/4-dom/qreactTest.tsx --local",
43
+ "machine-apply": "yarn typenode ./src/deployManager/deployApplyMain.ts"
43
44
  },
44
45
  "bin": {
45
- "querysub-deploy": "./bin/deploy.js",
46
- "querysub-server": "./bin/server.js",
47
- "querysub-function": "./bin/function.js"
46
+ "deploy": "./bin/deploy.js",
47
+ "server": "./bin/server.js",
48
+ "server-public": "./bin/server-public.js",
49
+ "function": "./bin/function.js",
50
+ "function-public": "./bin/function-public.js",
51
+ "machine": "./bin/machine.js",
52
+ "gc": "./bin/gc.js",
53
+ "gc-watch": "./bin/gc-watch.js",
54
+ "gc-watch-public": "./bin/gc-watch-public.js",
55
+ "join": "./bin/join.js",
56
+ "join-public": "./bin/join-public.js",
57
+ "merge": "./bin/merge.js",
58
+ "addsuperuser": "./bin/addsuperuser.js",
59
+ "setemailkey": "./bin/setemailkey.js"
48
60
  },
49
61
  "devDependencies": {
50
62
  "dependency-cruiser": "^12.11.0",
@@ -0,0 +1,46 @@
1
+ import { lazy } from "socket-function/src/caching";
2
+ import { Archives } from "./archives";
3
+
4
+ export type ArchiveJSONT<T> = {
5
+ get(key: string): Promise<T | undefined>;
6
+ set(key: string, value: T): Promise<void>;
7
+ delete(key: string): Promise<void>;
8
+ keys(): Promise<string[]>;
9
+ values(): Promise<T[]>;
10
+ entries(): Promise<[string, T][]>;
11
+ [Symbol.asyncIterator](): AsyncIterator<[string, T]>;
12
+ };
13
+
14
+ export function archiveJSONT<T>(archives: () => Archives): ArchiveJSONT<T> {
15
+ archives = lazy(archives);
16
+ async function get(key: string) {
17
+ let buffer = await archives().get(key);
18
+ if (!buffer) return undefined;
19
+ return JSON.parse(buffer.toString());
20
+ }
21
+ async function set(key: string, value: T) {
22
+ await archives().set(key, Buffer.from(JSON.stringify(value)));
23
+ }
24
+ async function deleteFnc(key: string) {
25
+ await archives().del(key);
26
+ }
27
+ async function keys() {
28
+ return (await archives().find("")).map(value => value.toString());
29
+ }
30
+ async function values() {
31
+ let keysArray = await keys();
32
+ return Promise.all(keysArray.map(key => get(key)));
33
+ }
34
+ async function entries(): Promise<[string, T][]> {
35
+ let keysArray = await keys();
36
+ return Promise.all(keysArray.map(async key => [key, await get(key)]));
37
+ }
38
+ async function* asyncIterator(): AsyncIterator<[string, T]> {
39
+ for (let [key, value] of await entries()) {
40
+ yield [key, value];
41
+ }
42
+ }
43
+ return {
44
+ get, set, delete: deleteFnc, keys, values, entries, [Symbol.asyncIterator]: asyncIterator
45
+ };
46
+ }
@@ -267,7 +267,7 @@ async function syncArchives() {
267
267
  async function runHeartbeatAuditLoop() {
268
268
  await getAllNodeIds();
269
269
  let deadCount = new Map<string, number>();
270
- // 90% of the normal interval, so we don't run at the same tmie as the other audit
270
+ // 90% of the normal interval, so we don't run at the same time as the other audit
271
271
  await runInfinitePollCallAtStart(CHECK_INTERVAL * 0.9, async () => {
272
272
  if (shutdown) return;
273
273
  // Wait a bit longer, to try to prevent all nodes from synchronizing their audit times.
@@ -435,6 +435,7 @@ export function undeleteFromLookup<T>(lookup: { [key: string]: T }, key: string)
435
435
 
436
436
  const syncedSymbol = Symbol.for("syncedSymbol");
437
437
  // HACK: This should probably be somewhere else, but... it is just so useful for PathFunctionRunner...
438
+ /** @deprecated this is not very accurate (it breaks for schema accesses). Only use it for low level places that can't call the more accurate Querysub.isSynced) */
438
439
  export function isSynced(obj: unknown): boolean {
439
440
  // If it is a primitive, then it must be synced!
440
441
  if (!canHaveChildren(obj)) return true;
@@ -20,7 +20,7 @@ import { PermissionsCheck } from "../4-querysub/permissions";
20
20
  import { timeInMinute } from "socket-function/src/misc";
21
21
  import { getDomain, isLocal, isPublic } from "../config";
22
22
  import { publishMachineARecords } from "../-e-certs/EdgeCertController";
23
- import { green } from "socket-function/src/formatting/logColors";
23
+ import { green, magenta } from "socket-function/src/formatting/logColors";
24
24
  import { parseFilterSelector } from "../misc/filterable";
25
25
  import path from "path";
26
26
 
@@ -69,5 +69,9 @@ async function main() {
69
69
  let deployPath = path.resolve("./deploy.ts");
70
70
  await import(deployPath);
71
71
  }
72
+
73
+ if (yargObj.filter) {
74
+ console.log(magenta(`Only running functions that match the filter: ${yargObj.filter}. Use ?setfncfilter=${yargObj.filter} in the browser to trigger functions that match this filter.`));
75
+ }
72
76
  }
73
77
  logErrors(main());
@@ -379,6 +379,7 @@ function nextLocalId() {
379
379
  export type ExternalRenderClass = {
380
380
  data(): QComponent;
381
381
  getParent(): ExternalRenderClass | undefined;
382
+ isDisposed(): boolean;
382
383
  readonly renderWatcher: SyncWatcher;
383
384
  readonly VNodeWatcher: SyncWatcher;
384
385
  readonly mountDOMWatcher: SyncWatcher;
@@ -1719,6 +1720,9 @@ class QRenderClass {
1719
1720
 
1720
1721
  private disposing = false;
1721
1722
  public disposed = false;
1723
+ public isDisposed(): boolean {
1724
+ return this.disposed;
1725
+ }
1722
1726
  private dispose() {
1723
1727
  if (this.disposed) return;
1724
1728
  this.disposing = true;
@@ -15,7 +15,7 @@ import { cache, cacheLimited, lazy } from "socket-function/src/caching";
15
15
  import { getOwnMachineId, getThreadKeyCert, verifyMachineIdForPublicKey } from "../-a-auth/certs";
16
16
  import { getSNICerts, publishMachineARecords } from "../-e-certs/EdgeCertController";
17
17
  import { LOCAL_DOMAIN, nodePathAuthority } from "../0-path-value-core/NodePathAuthorities";
18
- import { debugCoreMode, registerGetCompressNetwork, encodeParentFilter, registerGetCompressDisk } from "../0-path-value-core/pathValueCore";
18
+ import { debugCoreMode, registerGetCompressNetwork, encodeParentFilter, registerGetCompressDisk, authorityStorage } from "../0-path-value-core/pathValueCore";
19
19
  import { clientWatcher, ClientWatcher } from "../1-path-client/pathValueClientWatcher";
20
20
  import { SyncWatcher, proxyWatcher, specialObjectWriteValue, isSynced, PathValueProxyWatcher, atomic, doAtomicWrites, noAtomicSchema, undeleteFromLookup, registerSchemaPrefix, WatcherOptions } from "../2-proxy/PathValueProxyWatcher";
21
21
  import { isInProxyDatabase, rawSchema } from "../2-proxy/pathDatabaseProxyBase";
@@ -285,7 +285,7 @@ export class Querysub {
285
285
  public static createWatcher(watcher: (obj: SyncWatcher) => void, options?: Partial<WatcherOptions<unknown>>): {
286
286
  dispose: () => void;
287
287
  explicitlyTrigger: () => void;
288
- } {
288
+ } & SyncWatcher {
289
289
  return proxyWatcher.createWatcher({
290
290
  debugName: watcher.name,
291
291
  canWrite: true,
@@ -462,9 +462,18 @@ export class Querysub {
462
462
  // onCommitFinished prevents duplicates, as well as only running when we are actually done
463
463
  Querysub.onCommitFinished(async () => {
464
464
  await clientWatcher.waitForTriggerFinished();
465
+ // No idea why this work, but... we do need to wait for promises... twice. Maybe we are waiting for some promise stacks to unwind? But twice? Bizarre
466
+ // - Needed for ReactEditor:updateSelectionRects AND LazyRenderList...
467
+ await Promise.resolve();
468
+ await Promise.resolve();
465
469
  callback();
466
470
  });
467
471
  }
472
+ public static async afterAllRendersFinishedPromise() {
473
+ return new Promise<void>(r => {
474
+ Querysub.afterAllRendersFinished(r);
475
+ });
476
+ }
468
477
 
469
478
  /** Solely for use to prevent local writes from occuring before predictions. We MIGHT make this a framework thing,
470
479
  * or just not bother with it (as after a prediction is run once the code will be loaded allowing it to always
@@ -510,8 +519,10 @@ export class Querysub {
510
519
  return proxyWatcher.inWatcher() && isInProxyDatabase();
511
520
  }
512
521
 
513
- public static isSynced(value: unknown) {
514
- return isSynced(value);
522
+ public static isSynced(value: unknown | (() => unknown)) {
523
+ let path = Querysub.getPath(value);
524
+ if (!path) return true;
525
+ return authorityStorage.isSynced(path);
515
526
  }
516
527
 
517
528
  public static ignoreWatches<T>(code: () => T) {
@@ -42,7 +42,7 @@ export class FullscreenModal extends qreact.Component<{
42
42
  display: "flex",
43
43
  flexDirection: "column",
44
44
  gap: 10,
45
- maxHeight: "calc(100% - 200px)",
45
+ maxHeight: "calc(100vh - 200px)",
46
46
  overflow: "auto",
47
47
  ...this.props.style
48
48
  }}
@@ -95,6 +95,9 @@ export function closeAllModals() {
95
95
  }
96
96
  });
97
97
  }
98
+ export function isShowingModal() {
99
+ return Object.values(data().modals).length > 0;
100
+ }
98
101
 
99
102
 
100
103
  export class ModalHolder extends qreact.Component {
package/src/config.ts CHANGED
@@ -3,6 +3,8 @@ import yargs from "yargs";
3
3
  import debugbreak from "debugbreak";
4
4
  import { MaybePromise } from "socket-function/src/types";
5
5
  import { parseArgsFactory } from "./misc/rawParams";
6
+ import { lazy } from "socket-function/src/caching";
7
+ import fs from "fs";
6
8
 
7
9
  export const serverPort = 11748;
8
10
 
@@ -11,7 +13,6 @@ let yargObj = parseArgsFactory()
11
13
  .option("domain", { type: "string", desc: `Sets the domain` })
12
14
  .option("client", { type: "boolean", desc: `Drops permissions, acting as an unauthenticated node` })
13
15
  .option("authority", { type: "string", desc: `Defines the base paths we are an authority on (the domain is prepended to them). Either a file path to a JSON(AuthorityPath[]), or a base64 representation of the JSON(AuthorityPath[]).` })
14
- .option("debugbreak", { type: "boolean", desc: "Break and show a popup when there are fatal errors" })
15
16
  .option("nobreak", { type: "boolean", desc: "Do not break on errors. Safer to set this than to just not set debugbreak, as some places might break without checking debugbreak, but nobreak works at a level where it is always used." })
16
17
  .option("public", { type: "boolean", desc: "Expose on public ports." })
17
18
  .option("local", { type: "boolean", desc: `If true, uses the local directory instead of the remote git repo. Also hotreloads from disk. Determines the repo to replace through the package.json "repository" property.` })
@@ -20,10 +21,21 @@ let yargObj = parseArgsFactory()
20
21
  .argv
21
22
  ;
22
23
 
23
- if (!isNode()) {
24
- // TODO: Get yargs running in the browser, instead of this hack processing
25
- yargObj.local = process.argv.includes("--local");
26
- }
24
+ let querysubConfig = lazy((): {
25
+ domain?: string;
26
+ } => {
27
+ if (!isNode()) throw new Error("querysubConfig is only available on the server");
28
+ const path = "./querysub.json";
29
+ if (!fs.existsSync(path)) {
30
+ return {};
31
+ }
32
+ try {
33
+ return JSON.parse(fs.readFileSync(path, "utf8"));
34
+ } catch (e) {
35
+ console.error("Error parsing querysub.json", e);
36
+ return {};
37
+ }
38
+ });
27
39
 
28
40
  export function isNoNetwork() {
29
41
  return yargObj.nonetwork;
@@ -33,7 +45,7 @@ export function getDomain() {
33
45
  if (!isNode()) {
34
46
  return location.hostname.split(".").slice(-2).join(".");
35
47
  }
36
- return yargObj.domain || "querysub.com";
48
+ return yargObj.domain || querysubConfig().domain || "querysub.com";
37
49
  }
38
50
 
39
51
  export function baseIsClient() {
@@ -49,13 +61,13 @@ export function devDebugbreak() {
49
61
  debugger;
50
62
  return;
51
63
  }
52
- if (!yargObj.debugbreak) return;
64
+ if (yargObj.nobreak) return;
53
65
  debugbreak(2);
54
66
  debugger;
55
67
  }
56
68
  let debuggedEnabled = false;
57
69
  export function isDevDebugbreak() {
58
- return yargObj.debugbreak || !isNode() && location.hostname.startsWith("127-0-0-1.") || debuggedEnabled;
70
+ return !yargObj.nobreak || !isNode() && location.hostname.startsWith("127-0-0-1.") || debuggedEnabled;
59
71
  }
60
72
  export function enableDebugging() {
61
73
  debuggedEnabled = true;
@@ -0,0 +1,52 @@
1
+ import { SocketFunction } from "socket-function/SocketFunction";
2
+ import { qreact } from "../../src/4-dom/qreact";
3
+ import { DeployController } from "./deploySchema";
4
+ import { css } from "typesafecss";
5
+
6
+ export class DeployPage extends qreact.Component {
7
+ render() {
8
+ let controller = DeployController(SocketFunction.browserNodeId());
9
+ return <div className={css.vbox(10)}>
10
+ <h1>DeployPage</h1>
11
+ <div className={css.whiteSpace("pre-wrap")}>
12
+ {JSON.stringify(controller.getAllInfo(), null, 4)}
13
+ </div>
14
+ </div>;
15
+ }
16
+ }
17
+ /*
18
+
19
+ todonext
20
+
21
+ # Domain config file
22
+ - Have the domain configured via a file in the repo, so we don't have to set it on every single script
23
+ - This also makes it possible to use binary scripts directly in querysub without having to add a script in our package.json to set the domain parameter
24
+ - FORWARD the main scripts
25
+ function
26
+ function-public
27
+ server
28
+ server-public
29
+ deploy
30
+ ... anything that uses node_modules, which is a lot...
31
+ - And remove all --domain arguments (which is a lot as well)
32
+ - ALSO, support setting arbitrary config parameter in this config file?
33
+ - Maybe on a per script basis, giving scripts names.
34
+
35
+ 5) synced controller loading indicator
36
+ - Probably which wataches all controllers (on all nodes), for any pending, shown globally
37
+ 6) Validate that our loading indicator work, as well as our notifications on changed configuration, etc
38
+ - Test with errors, repeated crashes, etc.
39
+ 2) Web interface to setup configuration
40
+ - Needs to be able to run with NO PathValueServer, etc, just running locally
41
+ - I think we have a flag to recovery which allows anyone on localhost to be a superuser. Test this, as we might need it...
42
+ - A regular management page
43
+ - Shows servers
44
+ - Shows services
45
+ - Allows changing services (configuration, adding, removing, etc)
46
+ - Both writes to the backblaze file AND explicitly tells each service
47
+ - Use an editor which allows directly editting it as text?
48
+ - Monaco?
49
+ - BUT, which applies a type to it (I think monaco supports this, like with tsconfig.json).
50
+ - Copy the repo from the last changed service config
51
+ - Gives command for our setup script, in case we forget it (basically just instructions)
52
+ */
@@ -0,0 +1,170 @@
1
+ import { isNodeTrue } from "socket-function/src/misc";
2
+ import { nestArchives } from "../-a-archives/archives";
3
+ import { getArchivesBackblaze } from "../-a-archives/archivesBackBlaze";
4
+ import { getDomain } from "../config";
5
+ import { archiveJSONT } from "../-a-archives/archivesJSONT";
6
+ import { SocketFunction } from "socket-function/SocketFunction";
7
+ import { assertIsManagementUser } from "../diagnostics/managementPages";
8
+ import { getCallObj } from "socket-function/src/nodeProxy";
9
+ import { getSyncedController } from "../library-components/SyncedController";
10
+ import { requiresNetworkTrustHook } from "../-d-trust/NetworkTrust2";
11
+
12
+ export type ServiceConfig = {
13
+ // Just a random id to manage the service
14
+ serviceId: string;
15
+ // When parameters update, we restart it (and when info updates, we do not)
16
+ parameters: {
17
+ // MUST be unique, and clean enough to be used as the screen/tmux name, and folder name
18
+ key: string;
19
+
20
+ // Run multiple services, as `${key}-${index}-dply`, each with their own screen and folder
21
+ count: number;
22
+
23
+ repoUrl: string;
24
+ gitRef: string;
25
+ command: string;
26
+ // Allows forcing an update
27
+ poke?: number;
28
+ // If once we don't restart it on exit, only running it once. However we will run it again on boot, so it's not once globally, it's just running it once for now.
29
+ once?: boolean;
30
+
31
+ // Not set by default, so we can setup the configuration before deploying it (or so we can undeploy easily without deleting it)
32
+ deploy?: boolean;
33
+
34
+
35
+ // TODO:
36
+ //rollingWindow?: number
37
+ };
38
+ info: {
39
+ title: string;
40
+ notes: string;
41
+ lastUpdatedTime: number;
42
+ };
43
+ };
44
+ export type MachineConfig = {
45
+ machineId: string;
46
+
47
+ services: Record<string, ServiceConfig>;
48
+ };
49
+ export type MachineInfo = {
50
+ machineId: string;
51
+
52
+ // Used to tell the apply tool to update it's configs now
53
+ applyNodeId: string;
54
+
55
+ heartbeat: number;
56
+ /*
57
+ // TODO: ShowMore on each of the infos, so large ones are fine.
58
+ hostnamectl (fallback to hostname)
59
+ getExternalIP()
60
+ lscpu
61
+ id (fallback to whoami)
62
+ */
63
+ info: Record<string, string>;
64
+
65
+ services: Record<string, {
66
+ lastLaunchedTime: number;
67
+ errorFromLastRun: string;
68
+ // Only times launched for the current applyNodeId, but... still very useful.
69
+ totalTimesLaunched: number;
70
+ }>;
71
+ };
72
+
73
+ const machineConfigs = archiveJSONT<MachineConfig>(() => nestArchives("deploy/machine-configs/", getArchivesBackblaze(getDomain())));
74
+ const machineInfos = archiveJSONT<MachineInfo>(() => nestArchives("deploy/machine-heartbeats/", getArchivesBackblaze(getDomain())));
75
+
76
+
77
+
78
+ // Only works if our self id has been registered as applyNodeId (with setMachineInfo).
79
+ let serviceConfigChangeWatchers = new Set<() => Promise<void>>();
80
+
81
+ // NOTE: The callback should block until the changes are applied (possibly throwing). This way the change results can be deployed and when the changer function finishes it will automatically reload them.
82
+ export function onServiceConfigChange(callback: () => Promise<void>): () => void {
83
+ serviceConfigChangeWatchers.add(callback);
84
+ return () => {
85
+ serviceConfigChangeWatchers.delete(callback);
86
+ };
87
+ }
88
+ class OnServiceChangeBase {
89
+ public async onServiceConfigChange() {
90
+ for (let callback of serviceConfigChangeWatchers) {
91
+ await callback();
92
+ }
93
+ }
94
+ }
95
+ const OnServiceChange = SocketFunction.register(
96
+ "on-service-change-aa6b4aaa-c325-4112-b2a8-f81c180016a0",
97
+ () => new OnServiceChangeBase(),
98
+ () => ({
99
+ onServiceConfigChange: {},
100
+ }),
101
+ () => ({
102
+ hooks: [requiresNetworkTrustHook],
103
+ }),
104
+ );
105
+
106
+
107
+ export class DeployControllerBase {
108
+
109
+ public async getAllInfo(): Promise<{ configs: MachineConfig[], infos: MachineInfo[] }> {
110
+ let configs = await machineConfigs.values();
111
+ let infos = await machineInfos.values();
112
+ return { configs, infos };
113
+ }
114
+ public async getMachineInfo(machineId: string): Promise<MachineInfo> {
115
+ let info = await machineInfos.get(machineId);
116
+ if (!info) throw new Error(`MachineInfo not found for ${machineId}`);
117
+ return info;
118
+ }
119
+ public async getMachineConfig(machineId: string): Promise<MachineConfig> {
120
+ let config = await machineConfigs.get(machineId);
121
+ if (!config) throw new Error(`MachineConfig not found for ${machineId}`);
122
+ return config;
123
+ }
124
+ public async setMachineInfo(machineId: string, info: MachineInfo) {
125
+ await machineInfos.set(machineId, info);
126
+ }
127
+ public async setServiceConfig(machineId: string, serviceId: string, config: ServiceConfig | "remove") {
128
+ let machineConfig = await machineConfigs.get(machineId);
129
+ if (!machineConfig) throw new Error(`Machine not found for ${machineId}`);
130
+ if (config === "remove") {
131
+ delete machineConfig.services[serviceId];
132
+ } else {
133
+ machineConfig.services[serviceId] = config;
134
+ }
135
+ await machineConfigs.set(machineId, machineConfig);
136
+ let machineInfo = await machineInfos.get(machineId);
137
+ if (!machineInfo) throw new Error(`MachineInfo not found for ${machineId}`);
138
+ await OnServiceChange.nodes[machineInfo.applyNodeId].onServiceConfigChange();
139
+ }
140
+ }
141
+
142
+
143
+ export const DeployController = getSyncedController(
144
+ SocketFunction.register(
145
+ "deploy-eda94f05-5e4d-4f5a-b1c1-98613fba60b8",
146
+ () => new DeployControllerBase(),
147
+ () => ({
148
+ getAllInfo: {},
149
+ getMachineInfo: {},
150
+ getMachineConfig: {},
151
+ setMachineInfo: {},
152
+ setServiceConfig: {},
153
+ }),
154
+ () => ({
155
+ hooks: [assertIsManagementUser],
156
+ }),
157
+ ),
158
+ {
159
+ writes: {
160
+ setMachineInfo: ["MachineInfo"],
161
+ // NOTE: Also changes the MachineInfo, but telling the machine to redeploy, which should cause the machine info (the service launched times or errors) to update!
162
+ setServiceConfig: ["MachineConfig", "MachineInfo"],
163
+ },
164
+ reads: {
165
+ getAllInfo: ["MachineConfig", "MachineInfo"],
166
+ getMachineInfo: ["MachineInfo"],
167
+ getMachineConfig: ["MachineConfig"],
168
+ }
169
+ }
170
+ );
@@ -0,0 +1,57 @@
1
+ import { measureWrap } from "socket-function/src/profiling/measure";
2
+ import { getOwnMachineId } from "../-a-auth/certs";
3
+ import { getOurNodeId, getOurNodeIdAssert } from "../-f-node-discovery/NodeDiscovery";
4
+ import { Querysub } from "../4-querysub/QuerysubController";
5
+ import { DeployControllerBase, MachineInfo } from "./deploySchema";
6
+ import { runPromise } from "../functional/runCommand";
7
+ import { getExternalIP } from "socket-function/src/networking";
8
+ import { errorToUndefined, errorToUndefinedSilent } from "../errors";
9
+ import { getDomain } from "../config";
10
+ import { formatTime } from "socket-function/src/formatting/format";
11
+ import { sort } from "socket-function/src/misc";
12
+ import { isDefined } from "../misc";
13
+ import { logLoadTime } from "../logModuleLoadTimes";
14
+
15
+
16
+
17
+ const getLiveMachineInfo = measureWrap(async function getLiveMachineInfo() {
18
+ let machineInfo: MachineInfo = {
19
+ machineId: getOwnMachineId(),
20
+ applyNodeId: getOurNodeIdAssert(),
21
+ heartbeat: Date.now(),
22
+ info: {},
23
+ services: {},
24
+ };
25
+
26
+ machineInfo.info.hostnamectl = await errorToUndefinedSilent(runPromise("hostnamectl")) || "";
27
+ machineInfo.info.getExternalIP = await errorToUndefinedSilent(getExternalIP()) || "";
28
+ machineInfo.info.lscpu = await errorToUndefinedSilent(runPromise("lscpu")) || "";
29
+ machineInfo.info.id = await errorToUndefinedSilent(runPromise("id")) || await errorToUndefinedSilent(runPromise("whoami")) || "";
30
+
31
+ // TODO: Populate services via checking tmux for special keywords ("-dply", probably...)
32
+
33
+ return machineInfo;
34
+ });
35
+ const updateMachineInfo = measureWrap(async function updateMachineInfo() {
36
+ let machineInfo = await getLiveMachineInfo();
37
+ console.log("updateMachineInfo", machineInfo);
38
+ await new DeployControllerBase().setMachineInfo(machineInfo.machineId, machineInfo);
39
+ });
40
+
41
+ //todonext
42
+ // The child process will be run with shell, and then we'll watch it (don't use runPromise)
43
+
44
+ function getTotalTime(module: NodeJS.Module, now: number) {
45
+ return (module.evalEndTime || now) - (module.evalStartTime || now);
46
+ }
47
+
48
+
49
+ export async function machineApplyMain() {
50
+ console.log(getDomain());
51
+ process.exit();
52
+ // await Querysub.hostService("machine-apply");
53
+
54
+ // await updateMachineInfo();
55
+ }
56
+
57
+ machineApplyMain().catch(console.error);
@@ -0,0 +1,79 @@
1
+ 5) synced controller loading indicator
2
+ - Probably which wataches all controllers (on all nodes), for any pending, shown globally
3
+ 6) Validate that our loading indicator work, as well as our notifications on changed configuration, etc
4
+ - Test with errors, repeated crashes, etc.
5
+
6
+ 2) Web interface to setup configuration
7
+ - Needs to be able to run with NO PathValueServer, etc, just running locally
8
+ - I think we have a flag to recovery which allows anyone on localhost to be a superuser. Test this, as we might need it...
9
+ - A regular management page
10
+ - Shows servers
11
+ - Shows services
12
+ - Allows changing services (configuration, adding, removing, etc)
13
+ - Both writes to the backblaze file AND explicitly tells each service
14
+ - Use an editor which allows directly editting it as text?
15
+ - Monaco?
16
+ - BUT, which applies a type to it (I think monaco supports this, like with tsconfig.json).
17
+ - Copy the repo from the last changed service config
18
+ - Gives command for our setup script, in case we forget it (basically just instructions)
19
+ 3) Configuration apply utility
20
+ - registers with actual machineId, from getOwnMachineId (which doesn't require any permissions, however writing it to backblaze does...)
21
+ - tmux
22
+ - Work on windows as well? For testing?
23
+ - heartbeat machine every once in a while
24
+ - Updating the info as well
25
+ - Every 15 minutes?
26
+ - listener so we can be told to update our configs now
27
+ - otherwise poll every 5 minutes
28
+
29
+ 4) Run apply utility on windows, and test it in the UI, to get the UI working
30
+
31
+
32
+ 4) Always up wrapper
33
+ - Reruns apply utility when it crashes
34
+ - And error log forwarding support, so crashes get logged by the next apply script to run, by writing apply utility crashes to a special file the apply utility reads on startup
35
+ 4.1) Requisition script
36
+ - I guess a nodejs script, with runPromise it should be easy...
37
+ - Given remote IP, SSHes in, copies backblaze config from current machine, clones repo, setups crontab for always up wrapper, and then runs it
38
+ - OH, also installs nodejs, yarn, etc
39
+ - Maybe ansible, although... probably not. Especially because we can just get the AI to write this script anyway. We'll probably just have it copy over a bash script and then run that.
40
+ git
41
+ nodejs
42
+ yarn
43
+ add git permissions
44
+ - Ugh... we need to use github API for this
45
+ clones
46
+ copies backblaze.json
47
+ copy startup.sh
48
+ setup crontab to startup.sh
49
+ run startup.sh
50
+ - We might need to have it use to github API to give the remote machine access to the repo?
51
+ - Test on a new server, that we run temporarily with some test scripts
52
+
53
+ 4.1) Requisition script in "bin", as a .js bootstrapper
54
+ - Verify it works, so we can do
55
+
56
+ 5) Setup on our regular digital ocean server
57
+ - Remove previous startup script and kill existing tmux services
58
+ 6) Verify crash logging works with error notifications (it work if we just apply our console logs shims)
59
+ 7) Quick node removal on process crash or removal
60
+ Detect the nodeId of services (if they have one), and when the service dies, immediately remove "edgenodes/" file, and trigger an update of "edge-nodes-index.json"
61
+ 8) Fix deploy user notification issue, where the refresh button doesn't work?
62
+ 9) Rolling updates
63
+ - Written to service definition
64
+ - Keep previous service alive while we update
65
+ - If we update within that window... keep the oldest one alive, not the newest
66
+ - Notify server, so it can shutdown nicely
67
+ - We have a shutdown process, all servers should run that, for PathValueServers properly flush to disk
68
+ - In HTTP server, notify useers, in the same way we notify for hash updates, that they will need to switch servers
69
+ - Also, of course, take ourself out of the HTTP pool
70
+ - Maybe add this to the shutdown process?
71
+ - Disable infinite pollers, and wait for any outstanding to finish
72
+ - Maybe add this to the shutdown process?
73
+
74
+ 10) Reduce the heartbeat interval in NodeDiscovery to every 15 minutes... because our quick node removal should get rid of any latency with removing nodes
75
+ - Test it with adding / removing nodes (on the server)
76
+ - Also, make a huge amount of dead nodes locally, and make sure nothing breaks. It really shouldn't, because we should be checking the nodes all at once, and if a node never responds that's fine... right? (It's just a refused connection, and we don't try again for awhile, and we give up after some time).
77
+ - This should massively reduce our backblaze costs, as our current interval has probably cost us over 100 USD since we wrote it, for no benefit...
78
+
79
+
@@ -127,6 +127,11 @@ export async function registerManagementPages2(config: {
127
127
  componentName: "RequireAuditPage",
128
128
  getModule: () => import("./misc-pages/RequireAuditPage"),
129
129
  });
130
+ inputPages.push({
131
+ title: "Deploy",
132
+ componentName: "DeployPage",
133
+ getModule: () => import("../deployManager/DeployPage"),
134
+ });
130
135
  inputPages.push(...config.pages);
131
136
 
132
137
  // NOTE: We don't store the UI in the database (here, or anywhere else, at least
@@ -1,6 +1,61 @@
1
1
  import child_process from "child_process";
2
2
  import path from "path";
3
+ import { red } from "socket-function/src/formatting/logColors";
3
4
 
5
+ export const runAsync = runPromise;
6
+ export async function runPromise(command: string, config?: { cwd?: string; quiet?: boolean; }) {
7
+ return new Promise<string>((resolve, reject) => {
8
+ const childProc = child_process.spawn(command, {
9
+ shell: true,
10
+ cwd: config?.cwd,
11
+ stdio: ["inherit", "pipe", "pipe"], // stdin: inherit, stdout: pipe, stderr: pipe
12
+ });
13
+
14
+ let stdout = "";
15
+ let stderr = "";
16
+
17
+ // Always collect output
18
+ childProc.stdout?.on("data", (data) => {
19
+ const chunk = data.toString();
20
+ stdout += chunk;
21
+
22
+ // Stream to console unless quiet mode
23
+ if (!config?.quiet) {
24
+ process.stdout.write(chunk);
25
+ }
26
+ });
27
+
28
+ childProc.stderr?.on("data", (data) => {
29
+ const chunk = data.toString();
30
+ stderr += chunk;
31
+ stdout += chunk;
32
+
33
+ // Stream to console unless quiet mode
34
+ if (!config?.quiet) {
35
+ process.stderr.write(red(chunk));
36
+ }
37
+ });
38
+
39
+ childProc.on("error", (err) => {
40
+ reject(err);
41
+ });
42
+
43
+ childProc.on("close", (code) => {
44
+ if (code === 0) {
45
+ resolve(stdout);
46
+ } else {
47
+ let errorMessage = `Process exited with code ${code} for command: ${command}`;
48
+ if (stderr) {
49
+ errorMessage += `\n${stderr}`;
50
+ }
51
+ const error = new Error(errorMessage);
52
+ reject(error);
53
+ }
54
+ });
55
+ });
56
+ }
57
+
58
+ /** @deprecated, use runPromise */
4
59
  export async function runCommand(config: {
5
60
  exe: string;
6
61
  args: string[];
@@ -27,6 +82,7 @@ export async function runCommand(config: {
27
82
  });
28
83
  }
29
84
 
85
+ /** @deprecated, use runPromise */
30
86
  export function runCommandShell(command: string, config?: { cwd?: string; maxBuffer?: number }) {
31
87
  return new Promise<string>((resolve, reject) => {
32
88
  child_process.exec(command, {
@@ -29,7 +29,11 @@ export type ATagProps = (
29
29
  rawLink?: boolean;
30
30
  lightMode?: boolean;
31
31
  noStyles?: boolean;
32
+
32
33
  onRef?: (element: HTMLAnchorElement | null) => void;
34
+
35
+ // On click, do this. Independent of URL behavior (which will still run if they middle click, or copy the link, etc)
36
+ clickOverride?: () => void;
33
37
  }
34
38
  );
35
39
 
@@ -74,6 +78,12 @@ export class ATag extends qreact.Component<ATagProps> {
74
78
  onClick={e => {
75
79
  if (this.props.rawLink) return;
76
80
  if (e.button !== 0) return;
81
+ if (this.props.clickOverride) {
82
+ e.preventDefault();
83
+ e.stopPropagation();
84
+ this.props.clickOverride();
85
+ return;
86
+ }
77
87
  if (this.props.target !== "_blank") {
78
88
  e.preventDefault();
79
89
  let resolvedValues = typeof values === "function" ? values() : values;
@@ -3,6 +3,7 @@ import { isNode, list, nextId } from "socket-function/src/misc";
3
3
  import { css } from "typesafecss";
4
4
  import { Querysub } from "../4-querysub/QuerysubController";
5
5
  import { qreact } from "../4-dom/qreact";
6
+ import { isShowingModal } from "../5-diagnostics/Modal";
6
7
 
7
8
  export type ButtonProps = (
8
9
  preact.JSX.HTMLAttributes<HTMLButtonElement>
@@ -81,6 +82,7 @@ if (!isNode()) {
81
82
  let insideAnims = new Set<Watcher>();
82
83
  let keyUpListener = new Set<() => void>();
83
84
  export function triggerKeyDown(e: KeyboardEvent, forceAmbient = true) {
85
+ if (Querysub.localRead(() => isShowingModal())) return;
84
86
  let isAmbientEvent = (
85
87
  e.target === document.body
86
88
  // Some elements are commonly selected, but shouldn't handling key events
@@ -1,10 +1,11 @@
1
1
  import { atomic, atomicObjectWrite, atomicObjectWriteNoFreeze, doAtomicWrites, proxyWatcher } from "../../src/2-proxy/PathValueProxyWatcher";
2
2
  import { Querysub } from "../../src/4-querysub/Querysub";
3
3
  import { SocketFunction } from "socket-function/SocketFunction";
4
- import { SocketRegistered } from "socket-function/SocketFunctionTypes";
4
+ import { FullCallType, SocketRegistered } from "socket-function/SocketFunctionTypes";
5
5
  import { onHotReload } from "socket-function/hot/HotReloadController";
6
6
  import { cache } from "socket-function/src/caching";
7
7
  import { nextId } from "socket-function/src/misc";
8
+ import { getCallObj } from "socket-function/src/nodeProxy";
8
9
  import { MaybePromise } from "socket-function/src/types";
9
10
 
10
11
  // IMPORTANT! See cacheAsyncSynced if you just want to run promise functions
@@ -37,6 +38,7 @@ let syncedData = Querysub.createLocalSchema<{
37
38
  [argsHash: string]: {
38
39
  promise: Promise<unknown> | undefined;
39
40
  result?: { result: unknown } | { error: Error };
41
+ invalidated?: boolean;
40
42
  }
41
43
  }
42
44
  }
@@ -46,20 +48,58 @@ let syncedData = Querysub.createLocalSchema<{
46
48
  type RemapFunction<T> = T extends (...args: infer Args) => Promise<infer Return>
47
49
  ? {
48
50
  (...args: Args): Return | undefined;
51
+ promise(...args: Args): Promise<Return>;
49
52
  reset(...args: Args): void;
50
53
  resetAll(): void;
54
+
55
+ refresh(...args: Args): void;
56
+ refreshAll(): void;
57
+ isAnyLoading(): boolean;
58
+ setCache(config: { args: Args, result: Return }): void;
51
59
  }
52
60
  : T;
53
- export function getSyncedController<T extends SocketRegistered>(controller: T): {
61
+
62
+ // key =>
63
+ const writeWatchers = new Map<string, {
64
+ controllerId: string;
65
+ fncName: string;
66
+ }[]>();
67
+
68
+ export function getSyncedController<T extends SocketRegistered>(
69
+ controller: T,
70
+ config?: {
71
+ /** When a controller call for a write finishes, we refresh all readers.
72
+ * - Invalidation is global, across all controllers.
73
+ */
74
+ reads?: { [key in keyof T["nodes"][""]]?: string[]; };
75
+ writes?: { [key in keyof T["nodes"][""]]?: string[]; };
76
+ }
77
+ ): {
54
78
  (nodeId: string): {
55
79
  [fnc in keyof T["nodes"][""]]: RemapFunction<T["nodes"][""][fnc]>;
56
80
  } & {
57
81
  resetAll(): void;
82
+ refreshAll(): void;
83
+ isAnyLoading(): boolean;
58
84
  };
59
85
  resetAll(): void;
86
+ refreshAll(): void;
87
+ isAnyLoading(): boolean;
60
88
  } {
61
89
  let id = nextId();
62
90
  controllerIds.add(id);
91
+
92
+ for (let [fncName, keys] of Object.entries(config?.reads ?? {})) {
93
+ for (let key of keys || []) {
94
+ let watcherList = writeWatchers.get(key);
95
+ if (!watcherList) {
96
+ watcherList = [];
97
+ writeWatchers.set(key, watcherList);
98
+ }
99
+ watcherList.push({ controllerId: id, fncName });
100
+ }
101
+ }
102
+
63
103
  let result = cache((nodeId: string) => {
64
104
  SocketFunction.onNextDisconnect(nodeId, () => {
65
105
  Querysub.commitLocal(() => {
@@ -91,36 +131,76 @@ export function getSyncedController<T extends SocketRegistered>(controller: T):
91
131
  });
92
132
  };
93
133
  }
134
+ if (fncNameUntyped === "refreshAll") {
135
+ return () => {
136
+ return Querysub.commitLocal(() => {
137
+ for (let fnc in syncedData()[id][nodeId]) {
138
+ for (let argsHash in syncedData()[id][nodeId][fnc]) {
139
+ syncedData()[id][nodeId][fnc][argsHash].invalidated = true;
140
+ }
141
+ }
142
+ });
143
+ };
144
+ }
145
+ if (fncNameUntyped === "isAnyLoading") {
146
+ return () => {
147
+ return Querysub.commitLocal(() => {
148
+ for (let fnc in syncedData()[id][nodeId]) {
149
+ for (let argsHash in syncedData()[id][nodeId][fnc]) {
150
+ if (atomic(syncedData()[id][nodeId][fnc][argsHash].promise)) {
151
+ return true;
152
+ }
153
+ }
154
+ }
155
+ });
156
+ };
157
+ }
94
158
  let fncName = fncNameUntyped;
95
159
  function call(...args: any[]) {
96
160
  return Querysub.commitLocal(() => {
97
161
  let argsHash = JSON.stringify(args);
98
162
  let obj = syncedData()[id][nodeId][fncName][argsHash];
99
163
 
164
+ let result = atomic(obj.result);
100
165
  let promise = atomic(obj.promise);
101
- if (!promise) {
102
- //console.log("calling", fncName, args);
103
- promise = controller.nodes[nodeId][fncName](...args) as Promise<unknown>;
166
+
167
+ // NOTE: If we are invalidated when the promise is running, nothing happens (as we don't watch invalidated if we are running with a promise). BUT, if the promise isn't running, we will run again, and start running again. In this way we don't queue up a lot if we invalidate a lot, but we do always run again after the invalidation to get the latest result!
168
+ if (!promise && (!result || atomic(obj.invalidated))) {
169
+ let promise = controller.nodes[nodeId][fncName](...args) as Promise<unknown>;
104
170
  doAtomicWrites(() => {
105
171
  obj.promise = promise;
106
172
  });
107
173
  promise.then(
108
174
  result => {
109
- //console.log("result", result);
110
175
  Querysub.commitLocal(() => {
111
176
  obj.result = atomicObjectWriteNoFreeze({ result });
177
+ obj.promise = undefined;
178
+
179
+ let root = syncedData();
180
+ for (let writesTo of config?.writes?.[fncName] || []) {
181
+ for (let watcher of writeWatchers.get(writesTo) || []) {
182
+ for (let nodeId in root[watcher.controllerId]) {
183
+ for (let fnc in root[watcher.controllerId][nodeId]) {
184
+ for (let argsHash in root[watcher.controllerId][nodeId][fnc]) {
185
+ let obj = root[watcher.controllerId][nodeId][fnc][argsHash];
186
+ obj.invalidated = true;
187
+ }
188
+ }
189
+ }
190
+ }
191
+ }
112
192
  });
113
193
  },
114
194
  error => {
115
- //console.log("error", error);
116
195
  Querysub.commitLocal(() => {
117
196
  obj.result = atomicObjectWriteNoFreeze({ error });
197
+ obj.promise = undefined;
118
198
  });
119
199
  }
120
200
  );
121
201
  }
122
202
 
123
- let result = atomic(obj.result);
203
+
124
204
  if (result) {
125
205
  if ("error" in result) {
126
206
  throw result.error;
@@ -131,6 +211,16 @@ export function getSyncedController<T extends SocketRegistered>(controller: T):
131
211
  return undefined;
132
212
  });
133
213
  }
214
+ call.promise = (...args: any[]) => {
215
+ call(...args);
216
+ let argsHash = JSON.stringify(args);
217
+ let promise = atomic(syncedData()[id][nodeId][fncName][argsHash].promise);
218
+ if (!promise) {
219
+ debugger;
220
+ throw new Error(`Impossible, called function, but promise is not found for ${fncName}`);
221
+ }
222
+ return promise;
223
+ };
134
224
  call.reset = (...args: any[]) => {
135
225
  return Querysub.commitLocal(() => {
136
226
  let argsHash = JSON.stringify(args);
@@ -148,7 +238,36 @@ export function getSyncedController<T extends SocketRegistered>(controller: T):
148
238
  delete syncedData()[id][nodeId][fncName];
149
239
  });
150
240
  };
151
-
241
+ call.refresh = (...args: any[]) => {
242
+ return Querysub.commitLocal(() => {
243
+ let argsHash = JSON.stringify(args);
244
+ let obj = syncedData()[id][nodeId][fncName][argsHash];
245
+ obj.invalidated = true;
246
+ });
247
+ };
248
+ call.refreshAll = () => {
249
+ return Querysub.commitLocal(() => {
250
+ for (let argsHash in syncedData()[id][nodeId][fncName]) {
251
+ syncedData()[id][nodeId][fncName][argsHash].invalidated = true;
252
+ }
253
+ });
254
+ };
255
+ call.isAnyLoading = () => {
256
+ return Querysub.commitLocal(() => {
257
+ for (let argsHash in syncedData()[id][nodeId][fncName]) {
258
+ if (atomic(syncedData()[id][nodeId][fncName][argsHash].promise)) {
259
+ return true;
260
+ }
261
+ }
262
+ });
263
+ };
264
+ call.setCache = (config: { args: any[], result: any }) => {
265
+ return Querysub.commitLocal(() => {
266
+ let argsHash = JSON.stringify(config.args);
267
+ syncedData()[id][nodeId][fncName][argsHash].promise = undefined;
268
+ syncedData()[id][nodeId][fncName][argsHash].result = atomicObjectWriteNoFreeze({ result: config.result });
269
+ });
270
+ };
152
271
  return call;
153
272
  },
154
273
  });
@@ -167,5 +286,29 @@ export function getSyncedController<T extends SocketRegistered>(controller: T):
167
286
  }
168
287
  });
169
288
  };
289
+ result.refreshAll = () => {
290
+ return Querysub.commitLocal(() => {
291
+ for (let nodeId in syncedData()[id]) {
292
+ for (let fnc in syncedData()[id][nodeId]) {
293
+ for (let argsHash in syncedData()[id][nodeId][fnc]) {
294
+ syncedData()[id][nodeId][fnc][argsHash].invalidated = true;
295
+ }
296
+ }
297
+ }
298
+ });
299
+ };
300
+ result.isAnyLoading = () => {
301
+ return Querysub.commitLocal(() => {
302
+ for (let nodeId in syncedData()[id]) {
303
+ for (let fnc in syncedData()[id][nodeId]) {
304
+ for (let argsHash in syncedData()[id][nodeId][fnc]) {
305
+ if (atomic(syncedData()[id][nodeId][fnc][argsHash].promise)) {
306
+ return true;
307
+ }
308
+ }
309
+ }
310
+ }
311
+ });
312
+ };
170
313
  return result;
171
314
  }
@@ -0,0 +1,45 @@
1
+ import { sort } from "socket-function/src/misc";
2
+ import { isDefined } from "./misc";
3
+ import { formatTime } from "socket-function/src/formatting/format";
4
+
5
+ export function logLoadTime() {
6
+ let allModules = Object.values(require.cache).filter(isDefined);
7
+ let now = Date.now();
8
+ sort(allModules, x => x?.evalStartTime || now);
9
+ let timePerModule = new Map<NodeJS.Module, number>();
10
+ let moduleStack: NodeJS.Module[] = [];
11
+ let lastTime = allModules[0].evalStartTime || now;
12
+ function emitAtTime(time: number) {
13
+ for (let i = moduleStack.length - 1; i >= 0; i--) {
14
+ let module = moduleStack[i];
15
+ let endTime = module.evalEndTime || now;
16
+ let prevTime = timePerModule.get(module) || 0;
17
+ if (endTime < time) {
18
+ // If it ends, pop it, and account for the partial time
19
+ moduleStack.pop();
20
+ let duration = endTime - lastTime;
21
+ prevTime += duration;
22
+ lastTime = endTime;
23
+ timePerModule.set(module, prevTime);
24
+ } else {
25
+ // Account for the full time
26
+ let duration = time - lastTime;
27
+ prevTime += duration;
28
+ timePerModule.set(module, prevTime);
29
+ break;
30
+ }
31
+ }
32
+ lastTime = time;
33
+ }
34
+ for (let module of allModules) {
35
+ emitAtTime(module.evalStartTime || now);
36
+ moduleStack.push(module);
37
+ }
38
+ emitAtTime(now);
39
+
40
+ let flat = Array.from(timePerModule);
41
+ sort(flat, x => -x[1]);
42
+ for (let [module, time] of flat.slice(0, 100).reverse()) {
43
+ console.log(`${formatTime(time)} ${module.filename}`);
44
+ }
45
+ }