querysub 0.293.0 → 0.295.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "querysub",
3
- "version": "0.293.0",
3
+ "version": "0.295.0",
4
4
  "main": "index.js",
5
5
  "license": "MIT",
6
6
  "note1": "note on node-forge fork, see https://github.com/digitalbazaar/forge/issues/744 for details",
@@ -66,7 +66,8 @@ function onLiveHashChange(liveHash: string, refreshThresholdTime: number) {
66
66
  if (liveHash === curHash) return;
67
67
  let prevHash = curHash;
68
68
  // Don't notify the user right away. Hopefully they refresh naturally, and we never have to notify them at all!
69
- let notifyIntervals = [0.4, 0.75, 1];
69
+ // Also, refresh BEFORE the server dies, not exactly when it is about to die
70
+ let notifyIntervals = [0.4, 0.75, 0.95];
70
71
  console.log(blue(`Client liveHash changed ${liveHash}, prev hash: ${prevHash}`));
71
72
  // If we are replacing an already existing notification, don't show immediately
72
73
  let skipFirst = false;
@@ -26,6 +26,7 @@ export class MachineDetailPage extends qreact.Component {
26
26
  let relevantServiceConfigs = new Map<string, ServiceConfig>();
27
27
  for (let serviceId of serviceList) {
28
28
  let serviceConfig = controller.getServiceConfig(serviceId);
29
+ if (!serviceConfig?.parameters.deploy) continue;
29
30
  if (serviceConfig && serviceConfig.machineIds.includes(selectedMachineId)) {
30
31
  relevantServiceConfigs.set(serviceId, serviceConfig);
31
32
  }
@@ -359,9 +359,9 @@ type ServiceConfig = ${serviceConfigType}
359
359
  </div>
360
360
  {serviceInfo && (
361
361
  <>
362
- <div title={formatVeryNiceDateTime(serviceInfo.lastLaunchedTime)}>
362
+ {!!serviceInfo.lastLaunchedTime && <div title={formatVeryNiceDateTime(serviceInfo.lastLaunchedTime)}>
363
363
  Uptime: {formatTime(now - serviceInfo.lastLaunchedTime)}
364
- </div>
364
+ </div>}
365
365
  <div>
366
366
  Launches: {serviceInfo.totalTimesLaunched}
367
367
  </div>
@@ -349,6 +349,7 @@ const runScreenCommand = measureWrap(async function runScreenCommand(config: {
349
349
  // REMOVE the nodeId file, so we the node isn't terminated!
350
350
  await fs.promises.unlink(nodeIdPath);
351
351
  try {
352
+ console.log(green(`Triggering rolling update notification to ${nodeId} for ${screenName} at ${new Date().toLocaleString()}`));
352
353
  await triggerRollingUpdate({
353
354
  nodeId,
354
355
  time: rollingFinalTime,
@@ -361,7 +362,7 @@ const runScreenCommand = measureWrap(async function runScreenCommand(config: {
361
362
  console.log(green(`Renaming screen ${screenName} to ${rollingScreenName} for rolling interval ${config.rollingWindow} at ${new Date().toLocaleString()}`));
362
363
  await runPromise(`${prefix}tmux rename-session -t ${screenName} ${rollingScreenName}`);
363
364
  rollingScreens.set(rollingScreenName, screenName);
364
- rollingKeepScreenAlive.set(rollingScreenName, {
365
+ rollingKeepScreenAlive.set(screenName, {
365
366
  rollingScreenName,
366
367
  originalScreenName: screenName,
367
368
  pinnedDuration: config.rollingWindow,
@@ -448,7 +449,16 @@ done`);
448
449
  const killScreen = measureWrap(async function killScreen(config: {
449
450
  screenName: string;
450
451
  }) {
452
+ console.log(red(`Killing screen ${config.screenName}`));
451
453
  let prefix = getTmuxPrefix();
454
+ // Try ctrl+c a few times first
455
+ for (let i = 0; i < 5; i++) {
456
+ if (!await isScreenRunningProcess(config.screenName)) {
457
+ break;
458
+ }
459
+ await runPromise(`${prefix}tmux send-keys -t ${config.screenName} 'C-c' Enter`);
460
+ await delay(5000);
461
+ }
452
462
  await runPromise(`${prefix}tmux kill-session -t ${config.screenName}`);
453
463
  await removeOldNodeId(config.screenName);
454
464
  });
@@ -478,6 +488,13 @@ let lastLaunchedTimePerService = new Map<string, number>();
478
488
 
479
489
  async function quickIsOutdated() {
480
490
  let machineId = getOwnMachineId();
491
+ // If any rolling are ready to restart, we are outdated
492
+ for (let [rollingScreenName, rollingInfo] of rollingKeepScreenAlive) {
493
+ if (rollingInfo.pinnedTime + rollingInfo.pinnedDuration < Date.now()) {
494
+ console.log(red(`Rolling screen ${rollingScreenName} is finished, doing a full resync`));
495
+ return true;
496
+ }
497
+ }
481
498
  let configs = await serviceConfigs.values();
482
499
  let relevantConfigs = configs.filter(config => config.machineIds.includes(machineId)).filter(x => x.parameters.deploy);
483
500
  let screens = await getScreenState();
@@ -499,7 +516,6 @@ async function quickIsOutdated() {
499
516
  if (!fs.existsSync(parameterPath)) return true;
500
517
  let prevParameters = await fs.promises.readFile(parameterPath, "utf8");
501
518
  if (prevParameters !== newParametersString) return true;
502
-
503
519
  }
504
520
  }
505
521
  return false;
@@ -629,7 +645,10 @@ const resyncServicesBase = runInSerial(measureWrap(async function resyncServices
629
645
  rollingKeepScreenAlive.delete(rollingScreenRemainder);
630
646
  rollingInfo = undefined;
631
647
  }
632
- if (rollingInfo) continue;
648
+ if (rollingInfo) {
649
+ console.log(green(`Skipping killing rolling screen ${screenName} because it's still alive at ${new Date().toLocaleString()}. Keeping it alive until ${new Date(rollingInfo.pinnedTime + rollingInfo.pinnedDuration).toLocaleString()}`));
650
+ continue;
651
+ }
633
652
  }
634
653
  await killScreen({
635
654
  screenName,
@@ -1,26 +1,8 @@
1
1
  Hmm... our cyoa servers are dying very fast... why?
2
2
 
3
- 9) Rolling service updates
4
- - Add rollingWindow to the definition
5
- - Set for public facing services (but not for scripts)
6
- - PathValueServer - 10 minutes
7
- - FunctionRunner - CANNOT use rolling
8
- - HTTP - 4 hours
9
- - Boostrapper - NO rolling
10
- - gc/join - NO rolling
11
- - Show the rolling time in the update buttons (both list and the save button on the details page), so we know it will be a rolling update
12
- - Keep the oldest service alive when we update
13
- - Tracked per serviceId
14
- - Update the rolling time, so if we remove the rolling window we kill the old service immediately
15
- - Notify when servers are outdated
16
- - After min(5 minute, 10% of rolling window size)
17
- - Use the node registration to know the nodeId to talk to, and allow servers to register to get told when they are outdated (and how long before they are shutdown).
18
- - In HTTP server, notify users, in the same way we notify for hash updates, that they will need to switch servers
19
- - Verify this update works with a relatively low rolling update window, ensuring it force refreshes before the server actually restarts.
20
-
21
- 10) Verify we can using a rolling update with CYOA, and that it'll notify users, then refresh (eventually)
22
- - I guess set the rollingWindow to 10 minutes for testing.
23
-
3
+ 1) Verify clientside notifications work when we rolling update cyoa
4
+ 2) Verify edge nodes are removed when we rolling update cyoa
5
+ 3) Update cyoa rolling time back to 4 hours
24
6
 
25
7
  10) Add RAM total, ram % used, cpu count, CPU %, disk size, disk % used to machine info
26
8
  - Show this all in the list page, with nice bars?