qdone 2.0.52-alpha → 2.0.53-alpha

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -130,7 +130,7 @@ async function processMessages(queues, callback, options) {
130
130
  const remainingMemory = Math.max(0, freeMemory - freememThreshold);
131
131
  const freememFactor = Math.min(1, Math.max(0, remainingMemory / memoryThreshold));
132
132
  // Load
133
- const oneMinuteLoad = (0, os_1.loadavg)()[0];
133
+ const oneMinuteLoad = systemMonitor.getLoad();
134
134
  const loadPerCore = oneMinuteLoad / cores;
135
135
  const loadFactor = 1 - Math.min(1, Math.max(0, loadPerCore / 3));
136
136
  const overallFactor = Math.min(latencyFactor, freememFactor, loadFactor);
@@ -70,8 +70,10 @@ async function qrlCacheGet(qname) {
70
70
  // debug({ cmd })
71
71
  const result = await client.send(cmd);
72
72
  // debug('result', result)
73
- if (!result)
73
+ if (!result) {
74
+ qrlCacheInvalidate(qname);
74
75
  throw new client_sqs_1.QueueDoesNotExist(qname);
76
+ }
75
77
  const { QueueUrl: qrl } = result;
76
78
  // debug('getQueueUrl returned', data)
77
79
  qcache.set(qname, qrl);
@@ -84,7 +86,8 @@ exports.qrlCacheGet = qrlCacheGet;
84
86
  // Immediately updates the cache
85
87
  //
86
88
  function qrlCacheSet(qname, qrl) {
87
- qcache.set(qname, qrl);
89
+ if (qrl)
90
+ qcache.set(qname, qrl);
88
91
  // debug('qcache', Object.keys(qcache), 'set', qname, ' => ', qcache[qname])
89
92
  }
90
93
  exports.qrlCacheSet = qrlCacheSet;
@@ -3,13 +3,19 @@
3
3
  * Component to track event loop latency, which can be used as a metric for
4
4
  * backpressure.
5
5
  */
6
+ var __importDefault = (this && this.__importDefault) || function (mod) {
7
+ return (mod && mod.__esModule) ? mod : { "default": mod };
8
+ };
6
9
  Object.defineProperty(exports, "__esModule", { value: true });
7
10
  exports.SystemMonitor = void 0;
11
+ const os_1 = __importDefault(require("os"));
8
12
  class SystemMonitor {
9
13
  constructor(reportCallback, reportSeconds = 1) {
10
14
  this.reportCallback = reportCallback || console.log;
11
15
  this.reportSeconds = reportSeconds;
12
- this.measurements = [];
16
+ this.latencies = [];
17
+ this.oneMinuteLoad = os_1.default.loadavg()[0];
18
+ this.instantaneousLoad = this.oneMinuteLoad;
13
19
  this.measure();
14
20
  this.reportLatency();
15
21
  }
@@ -17,15 +23,19 @@ class SystemMonitor {
17
23
  clearTimeout(this.measureTimeout);
18
24
  const start = new Date();
19
25
  this.measureTimeout = setTimeout(() => {
20
- const latency = new Date() - start;
21
- this.measurements.push(latency);
22
- if (this.measurements.length > 1000)
23
- this.measurements.shift();
26
+ this.measureLatency(start);
27
+ this.measureLoad();
24
28
  this.measure();
25
29
  });
26
30
  }
31
+ measureLatency(start) {
32
+ const latency = new Date() - start;
33
+ this.latencies.push(latency);
34
+ if (this.latencies.length > 1000)
35
+ this.latencies.shift();
36
+ }
27
37
  getLatency() {
28
- return this.measurements.length ? this.measurements.reduce((a, b) => a + b, 0) / this.measurements.length : 0;
38
+ return this.latencies.length ? this.latencies.reduce((a, b) => a + b, 0) / this.latencies.length : 0;
29
39
  }
30
40
  reportLatency() {
31
41
  clearTimeout(this.reportTimeout);
@@ -37,6 +47,45 @@ class SystemMonitor {
37
47
  this.reportLatency();
38
48
  }, this.reportSeconds * 1000);
39
49
  }
50
+ /**
51
+ * Measures load over the last five seconds instead of being averaged over one
52
+ * minute. This lets the scheduler respond much faster to dips in load.
53
+ *
54
+ * Theory:
55
+ *
56
+ * The Linux kernel calculates the moving average something like:
57
+ * A_1 = A_0 * e + A_now (1 - e)
58
+ * Where:
59
+ * - A_now is the number of processes active/waiting
60
+ * - A_1 is the new one-minute load average after the measurement of A_now
61
+ * - A_0 is the previous one-minute average
62
+ * - e is 1884/2048.
63
+ *
64
+ * Solving this for A_now, which we want to access, we get:
65
+ * A_now = (A_1 - A_0 * e) / (1 - e)
66
+ *
67
+ * We use this formula below to extract A_now when we detect a change in A_1.
68
+ *
69
+ * Note: this code assums that we are observing the average often enough to
70
+ * detect each change. So you have to call it at least every 5 seconds. 1
71
+ * second is better to reduce latency of detecting the change.
72
+ */
73
+ measureLoad() {
74
+ const [newLoad,] = os_1.default.loadavg();
75
+ const previousLoad = this.oneMinuteLoad;
76
+ if (previousLoad !== newLoad) {
77
+ const e = 1884 / 2048; // see include/linux/sched/loadavg.h
78
+ const active = (newLoad - previousLoad * e) / (1 - e);
79
+ // We take the min here so that spikes up in load are averaged out. We
80
+ // care about detecting spikes downward so we can allow more jobs to run.
81
+ this.instantaneousLoad = Math.min(active, newLoad);
82
+ this.oneMinuteLoad = newLoad;
83
+ console.log({ newLoad, previousLoad, active, instantaneousLoad: this.instantaneousLoad, oneMinuteLoad: this.oneMinuteLoad });
84
+ }
85
+ }
86
+ getLoad() {
87
+ return this.instantaneousLoad;
88
+ }
40
89
  shutdown() {
41
90
  clearTimeout(this.measureTimeout);
42
91
  clearTimeout(this.reportTimeout);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "qdone",
3
- "version": "2.0.52-alpha",
3
+ "version": "2.0.53-alpha",
4
4
  "description": "A distributed scheduler for SQS",
5
5
  "type": "module",
6
6
  "main": "./index.js",
package/src/consumer.js CHANGED
@@ -136,7 +136,7 @@ export async function processMessages (queues, callback, options) {
136
136
  const freememFactor = Math.min(1, Math.max(0, remainingMemory / memoryThreshold))
137
137
 
138
138
  // Load
139
- const oneMinuteLoad = loadavg()[0]
139
+ const oneMinuteLoad = systemMonitor.getLoad()
140
140
  const loadPerCore = oneMinuteLoad / cores
141
141
  const loadFactor = 1 - Math.min(1, Math.max(0, loadPerCore / 3))
142
142
 
package/src/qrlCache.js CHANGED
@@ -66,7 +66,10 @@ export async function qrlCacheGet (qname) {
66
66
  // debug({ cmd })
67
67
  const result = await client.send(cmd)
68
68
  // debug('result', result)
69
- if (!result) throw new QueueDoesNotExist(qname)
69
+ if (!result) {
70
+ qrlCacheInvalidate(qname)
71
+ throw new QueueDoesNotExist(qname)
72
+ }
70
73
  const { QueueUrl: qrl } = result
71
74
  // debug('getQueueUrl returned', data)
72
75
  qcache.set(qname, qrl)
@@ -79,7 +82,7 @@ export async function qrlCacheGet (qname) {
79
82
  // Immediately updates the cache
80
83
  //
81
84
  export function qrlCacheSet (qname, qrl) {
82
- qcache.set(qname, qrl)
85
+ if (qrl) qcache.set(qname, qrl)
83
86
  // debug('qcache', Object.keys(qcache), 'set', qname, ' => ', qcache[qname])
84
87
  }
85
88
 
@@ -3,11 +3,15 @@
3
3
  * backpressure.
4
4
  */
5
5
 
6
+ import os from 'os'
7
+
6
8
  export class SystemMonitor {
7
9
  constructor (reportCallback, reportSeconds = 1) {
8
10
  this.reportCallback = reportCallback || console.log
9
11
  this.reportSeconds = reportSeconds
10
- this.measurements = []
12
+ this.latencies = []
13
+ this.oneMinuteLoad = os.loadavg()[0]
14
+ this.instantaneousLoad = this.oneMinuteLoad
11
15
  this.measure()
12
16
  this.reportLatency()
13
17
  }
@@ -16,15 +20,20 @@ export class SystemMonitor {
16
20
  clearTimeout(this.measureTimeout)
17
21
  const start = new Date()
18
22
  this.measureTimeout = setTimeout(() => {
19
- const latency = new Date() - start
20
- this.measurements.push(latency)
21
- if (this.measurements.length > 1000) this.measurements.shift()
23
+ this.measureLatency(start)
24
+ this.measureLoad()
22
25
  this.measure()
23
26
  })
24
27
  }
25
28
 
29
+ measureLatency (start) {
30
+ const latency = new Date() - start
31
+ this.latencies.push(latency)
32
+ if (this.latencies.length > 1000) this.latencies.shift()
33
+ }
34
+
26
35
  getLatency () {
27
- return this.measurements.length ? this.measurements.reduce((a, b) => a + b, 0) / this.measurements.length : 0
36
+ return this.latencies.length ? this.latencies.reduce((a, b) => a + b, 0) / this.latencies.length : 0
28
37
  }
29
38
 
30
39
  reportLatency () {
@@ -37,6 +46,48 @@ export class SystemMonitor {
37
46
  }, this.reportSeconds * 1000)
38
47
  }
39
48
 
49
+ /**
50
+ * Measures load over the last five seconds instead of being averaged over one
51
+ * minute. This lets the scheduler respond much faster to dips in load.
52
+ *
53
+ * Theory:
54
+ *
55
+ * The Linux kernel calculates the moving average something like:
56
+ * A_1 = A_0 * e + A_now (1 - e)
57
+ * Where:
58
+ * - A_now is the number of processes active/waiting
59
+ * - A_1 is the new one-minute load average after the measurement of A_now
60
+ * - A_0 is the previous one-minute average
61
+ * - e is 1884/2048.
62
+ *
63
+ * Solving this for A_now, which we want to access, we get:
64
+ * A_now = (A_1 - A_0 * e) / (1 - e)
65
+ *
66
+ * We use this formula below to extract A_now when we detect a change in A_1.
67
+ *
68
+ * Note: this code assums that we are observing the average often enough to
69
+ * detect each change. So you have to call it at least every 5 seconds. 1
70
+ * second is better to reduce latency of detecting the change.
71
+ */
72
+
73
+ measureLoad () {
74
+ const [newLoad, ] = os.loadavg()
75
+ const previousLoad = this.oneMinuteLoad
76
+ if (previousLoad !== newLoad) {
77
+ const e = 1884 / 2048 // see include/linux/sched/loadavg.h
78
+ const active = (newLoad - previousLoad * e) / (1 - e)
79
+ // We take the min here so that spikes up in load are averaged out. We
80
+ // care about detecting spikes downward so we can allow more jobs to run.
81
+ this.instantaneousLoad = Math.min(active, newLoad)
82
+ this.oneMinuteLoad = newLoad
83
+ console.log({ newLoad, previousLoad, active, instantaneousLoad: this.instantaneousLoad, oneMinuteLoad: this.oneMinuteLoad })
84
+ }
85
+ }
86
+
87
+ getLoad() {
88
+ return this.instantaneousLoad
89
+ }
90
+
40
91
  shutdown () {
41
92
  clearTimeout(this.measureTimeout)
42
93
  clearTimeout(this.reportTimeout)