querysub 0.233.0 → 0.235.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "querysub",
3
- "version": "0.233.0",
3
+ "version": "0.235.0",
4
4
  "main": "index.js",
5
5
  "license": "MIT",
6
6
  "note1": "note on node-forge fork, see https://github.com/digitalbazaar/forge/issues/744 for details",
@@ -22,7 +22,7 @@
22
22
  "js-sha512": "^0.9.0",
23
23
  "node-forge": "https://github.com/sliftist/forge#e618181b469b07bdc70b968b0391beb8ef5fecd6",
24
24
  "pako": "^2.1.0",
25
- "socket-function": "^0.132.0",
25
+ "socket-function": "^0.133.0",
26
26
  "terser": "^5.31.0",
27
27
  "typesafecss": "^0.22.0",
28
28
  "yaml": "^2.5.0",
@@ -1,35 +1,52 @@
1
- 7) More graceful shutdown of services
2
- - In some way communicate that we should shut down?
3
- - ACTUALLY, we just ctrl+c, which... we might catch right now?
4
- - So we might need to use ctrl+c multiple times, and callback to kill the service?
5
-
1
+ todonext
2
+ Hmm... so... a function that returns... all infinite polls, and states for them, and... a function which can disable infinite polls. Yep...
3
+ - Oh, and in socket-function, so we have to publish the changes...
4
+ Then test it in test.ts
6
5
 
7
6
  7.1) Better infinite poll support in shutdown
8
7
  - Stop all loops
9
8
  - If any are running, wait (with timeout, same as with regular handlers), for them to finish
10
9
 
11
- 4) Kill our testing server
10
+ 4) Destroy our testing digital ocean server
12
11
 
13
12
  5) Setup on our regular digital ocean server
14
13
  - Remove previous startup.sh, and crontab and kill existing tmux services
15
14
  - Setup all the services in the new UI
16
15
  - Copy from the previous startup.sh, running the same services
17
16
  - Changing the UI if anything is extremely annoying, but... I don't see how it would be...
17
+ tmux send-keys -t server1 "cd ~/cyoa && yarn server-public" Enter
18
+ tmux send-keys -t server2 "cd ~/cyoa && yarn server-public" Enter
19
+ tmux send-keys -t fnc "cd ~/cyoa && yarn function-public --verbosecalls" Enter
20
+ tmux send-keys -t http "cd ~/cyoa && yarn cyoa-public --verbosecalls" Enter
21
+ tmux send-keys -t watch "cd ~/cyoa && yarn gc-watch-public" Enter
22
+ tmux send-keys -t join "cd ~/cyoa && yarn join-public" Enter
18
23
  5) Verify the editor works
19
24
 
20
25
  6) Verify PathValueServer gracefully shutdowns, not losing any values (because it delays and flushes writes before shutting down, detecting the ctrl+c).
21
26
 
22
27
 
23
- 7) Quick node removal on process crash or removal
24
- Detect the nodeId of services (if they have one), and when the service dies, immediately remove "edgenodes/" file, and trigger an update of "edge-nodes-index.json"
25
- - How? Damn it, I have no idea. Maybe... they can determine their screen from their pid and then write to their screen folder? That seems... the best way, even though it's extremely coupling.
28
+ 7) Quick node removal on process crash OR removal
29
+ - In the service, check our parent folder to see if we are in a screen (/machine-services/git/), and then write our nodeId to /machine-services/nodeId
30
+ - If we see a nodeId when we are removing a screen, or killing the service, then delete that nodeId from the nodeId directory (and call tellEveryoneNodesChanges)
26
31
  7.1) Verify this by killing a lot of services (the function runner?), by just poking it over and over, verifying the nodes are quickly deleted
27
32
 
28
33
 
29
34
 
30
35
  8) REIMPLEMENT yarn do-update functionality, with UI on the configuration page
36
+ OH, maybe... buttons for querysub, button for the main site, and then buttons on each service to update them!
37
+ YES, and then, a button to do all of that on each service, and a button to do that and on each service.
38
+ - So a lot of buttons. But only 2 components, one for all, and one for a specific service.
39
+ - Endpoints
40
+ - anyQuerysubUnsaved
41
+ - anyAppUnsaved
42
+ - querysubLatestHash
43
+ - appLatestHash (already have this)
44
+ - publish, add, commit, and push querysub (and update app package.json reference)
45
+ - add, commit and push app
46
+
31
47
  - On services list page, and individual service page
32
48
  - So... maybe the button a component
49
+ - MULTIPLE buttons, to just update the main site, just querysub, or both
33
50
  - Button to update on each service where the repoUrl === the repoUrl of the server
34
51
  - Only from a non-public server
35
52
  - ALSO, button to update all (that match repoUrl)
@@ -1,4 +1,4 @@
1
- import { delay, runInfinitePoll, runInfinitePollCallAtStart } from "socket-function/src/batching";
1
+ import { delay, runInfinitePoll, runInfinitePollCallAtStart, shutdownPolling } from "socket-function/src/batching";
2
2
  import { isNode, timeInMinute } from "socket-function/src/misc";
3
3
  import { logErrors, timeoutToError } from "../errors";
4
4
  import debugbreak from "debugbreak";
@@ -37,31 +37,29 @@ export async function shutdown() {
37
37
  console.log(red("Starting shutdown"));
38
38
  shuttingDown = true;
39
39
  const { authorityStorage } = await import("../0-path-value-core/pathValueCore");
40
- for (let fnc of preshutdownHandlers) {
41
- try {
42
- await timeoutToError(timeInMinute, fnc(), () => new Error(`Preshutdown handler ${fnc.name} timed out`));
43
- } catch (e) {
44
- console.log(`Error on preshutdown handler ${fnc.name}`, e);
45
- }
40
+ try {
41
+ await Promise.allSettled([
42
+ ...preshutdownHandlers,
43
+ ].map(fnc => timeoutToError(timeInMinute, fnc(), () => new Error(`Preshutdown handler ${fnc.name} timed out`))));
44
+ } catch (e) {
45
+ console.log(`Error on preshutdown handlers`, e);
46
46
  }
47
47
  try {
48
- await authorityStorage.onShutdown();
49
- await nodeDiscoveryShutdown();
48
+ await Promise.allSettled([
49
+ function authorityStorageShutdown() { return authorityStorage.onShutdown(); },
50
+ nodeDiscoveryShutdown,
51
+ shutdownPolling,
52
+ ...shutdownHandlers,
53
+ ].map(fnc => timeoutToError(timeInMinute, fnc(), () => new Error(`Shutdown handler ${fnc.name} timed out`))));
50
54
  } catch (e) {
51
55
  console.log("Error on shutdown", e);
52
56
  }
53
- for (let fnc of shutdownHandlers) {
54
- try {
55
- await timeoutToError(timeInMinute, fnc(), () => new Error(`Shutdown handler ${fnc.name} timed out`));
56
- } catch (e) {
57
- console.log(`Error on shutdown handler ${fnc.name}`, e);
58
- }
59
- }
60
57
  // Wait to allow any logged errors to hopefully be written somewhere?
61
58
  await delay(2000);
62
59
  process.exit();
63
60
  }
64
61
 
62
+ // IMPORTANT! Yarn detaches the processes, so they keep running when you ctrl+c, even though the shell shows back up. We can fix this by using `node -r ./node_modules/typenode/index.js ./test.ts`. However, it's probably fine, as we still run the shutdown code, it's just that the manager doesn't know if we've shutdown or not.
65
63
  if (isNode()) {
66
64
  let lineBuffer = "";
67
65
  process.stdin.on("data", data => {