qdone 2.1.0 → 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +2 -2
- package/README.md +1 -4
- package/commonjs/index.js +11 -0
- package/commonjs/src/cache.js +71 -0
- package/commonjs/src/cloudWatch.js +111 -0
- package/commonjs/src/consumer.js +172 -0
- package/commonjs/src/dedup.js +265 -0
- package/commonjs/src/defaults.js +184 -0
- package/commonjs/src/enqueue.js +520 -0
- package/commonjs/src/exponentialBackoff.js +101 -0
- package/commonjs/src/idleQueues.js +333 -0
- package/commonjs/src/monitor.js +86 -0
- package/commonjs/src/qrlCache.js +172 -0
- package/commonjs/src/scheduler/jobExecutor.js +391 -0
- package/commonjs/src/scheduler/queueManager.js +161 -0
- package/commonjs/src/scheduler/systemMonitor.js +94 -0
- package/commonjs/src/sqs.js +98 -0
- package/package.json +16 -12
- package/src/cloudWatch.js +10 -0
- package/src/defaults.js +5 -2
- package/src/idleQueues.js +1 -1
- package/src/monitor.js +7 -1
- package/src/scheduler/jobExecutor.js +10 -2
- package/src/sqs.js +4 -2
- package/src/worker.js +12 -1
package/LICENSE
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
ISC License
|
|
2
2
|
|
|
3
|
-
Copyright (c) 2017,
|
|
3
|
+
Copyright (c) 2017-2026, SureDone, Inc.
|
|
4
4
|
|
|
5
5
|
Permission to use, copy, modify, and/or distribute this software for any
|
|
6
6
|
purpose with or without fee is hereby granted, provided that the above
|
|
@@ -12,4 +12,4 @@ AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
|
|
12
12
|
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
|
13
13
|
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
|
14
14
|
OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
|
15
|
-
PERFORMANCE OF THIS SOFTWARE.
|
|
15
|
+
PERFORMANCE OF THIS SOFTWARE.
|
package/README.md
CHANGED
|
@@ -1,9 +1,6 @@
|
|
|
1
1
|
[](https://www.npmjs.com/package/qdone)
|
|
2
|
-
[](https://coveralls.io/github/suredone/qdone)
|
|
4
|
-
[](https://david-dm.org/suredone/qdone)
|
|
2
|
+
[](https://github.com/suredone/qdone/actions/workflows/build.yaml)
|
|
5
3
|
[](https://standardjs.com)
|
|
6
|
-
[](https://greenkeeper.io/)
|
|
7
4
|
|
|
8
5
|
# qdone
|
|
9
6
|
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.monitor = exports.requestShutdown = exports.processMessages = exports.enqueueBatch = exports.enqueue = void 0;
|
|
4
|
+
var enqueue_js_1 = require("./src/enqueue.js");
|
|
5
|
+
Object.defineProperty(exports, "enqueue", { enumerable: true, get: function () { return enqueue_js_1.enqueue; } });
|
|
6
|
+
Object.defineProperty(exports, "enqueueBatch", { enumerable: true, get: function () { return enqueue_js_1.enqueueBatch; } });
|
|
7
|
+
var consumer_js_1 = require("./src/consumer.js");
|
|
8
|
+
Object.defineProperty(exports, "processMessages", { enumerable: true, get: function () { return consumer_js_1.processMessages; } });
|
|
9
|
+
Object.defineProperty(exports, "requestShutdown", { enumerable: true, get: function () { return consumer_js_1.requestShutdown; } });
|
|
10
|
+
var monitor_js_1 = require("./src/monitor.js");
|
|
11
|
+
Object.defineProperty(exports, "monitor", { enumerable: true, get: function () { return monitor_js_1.monitor; } });
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.getCacheClient = getCacheClient;
|
|
7
|
+
exports.shutdownCache = shutdownCache;
|
|
8
|
+
exports.getCache = getCache;
|
|
9
|
+
exports.setCache = setCache;
|
|
10
|
+
const ioredis_1 = __importDefault(require("ioredis"));
|
|
11
|
+
const url_1 = require("url");
|
|
12
|
+
const debug_1 = __importDefault(require("debug"));
|
|
13
|
+
const debug = (0, debug_1.default)('qdone:cache');
|
|
14
|
+
class UsageError extends Error {
|
|
15
|
+
}
|
|
16
|
+
let client;
|
|
17
|
+
/**
|
|
18
|
+
* Internal function to setup redis client. Parses URI to figure out
|
|
19
|
+
* how to connect.
|
|
20
|
+
*/
|
|
21
|
+
function getCacheClient(opt) {
|
|
22
|
+
const RedisClass = opt.Redis || ioredis_1.default;
|
|
23
|
+
if (client) {
|
|
24
|
+
return client;
|
|
25
|
+
}
|
|
26
|
+
else if (opt.cacheUri) {
|
|
27
|
+
const url = new url_1.URL(opt.cacheUri);
|
|
28
|
+
if (url.protocol === 'redis:') {
|
|
29
|
+
client = new RedisClass(url.toString());
|
|
30
|
+
}
|
|
31
|
+
else if (url.protocol === 'redis-cluster:') {
|
|
32
|
+
url.protocol = 'redis:';
|
|
33
|
+
client = new RedisClass.Cluster([url.toString()], { slotsRefreshInterval: 60 * 1000 });
|
|
34
|
+
}
|
|
35
|
+
else {
|
|
36
|
+
throw new UsageError(`Only redis:// or redis-cluster:// URLs are currently supported. Got: ${url.protocol}`);
|
|
37
|
+
}
|
|
38
|
+
return client;
|
|
39
|
+
}
|
|
40
|
+
else {
|
|
41
|
+
throw new UsageError('Caching requires the --cache-uri option');
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
function shutdownCache() {
|
|
45
|
+
if (client)
|
|
46
|
+
client.quit();
|
|
47
|
+
client = undefined;
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Returns a promise for the item. Resolves to false if cache is empty, object
|
|
51
|
+
* if it is found.
|
|
52
|
+
*/
|
|
53
|
+
async function getCache(key, opt) {
|
|
54
|
+
const client = getCacheClient(opt);
|
|
55
|
+
const cacheKey = opt.cachePrefix + key;
|
|
56
|
+
debug({ action: 'getCache', cacheKey });
|
|
57
|
+
const result = await client.get(cacheKey);
|
|
58
|
+
debug({ action: 'getCache got', cacheKey, result });
|
|
59
|
+
return result ? JSON.parse(result) : undefined;
|
|
60
|
+
}
|
|
61
|
+
/**
|
|
62
|
+
* Returns a promise for the status. Encodes object as JSON
|
|
63
|
+
*/
|
|
64
|
+
async function setCache(key, value, opt) {
|
|
65
|
+
const client = getCacheClient(opt);
|
|
66
|
+
const encoded = JSON.stringify(value);
|
|
67
|
+
const cacheKey = opt.cachePrefix + key;
|
|
68
|
+
debug({ action: 'setCache', cacheKey, value });
|
|
69
|
+
return client.setex(cacheKey, opt.cacheTtlSeconds, encoded);
|
|
70
|
+
}
|
|
71
|
+
debug('loaded');
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Functions that deal with CloudWatch
|
|
4
|
+
*/
|
|
5
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
6
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
7
|
+
};
|
|
8
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
9
|
+
exports.getCloudWatchClient = getCloudWatchClient;
|
|
10
|
+
exports.setCloudWatchClient = setCloudWatchClient;
|
|
11
|
+
exports.putAggregateData = putAggregateData;
|
|
12
|
+
const client_cloudwatch_1 = require("@aws-sdk/client-cloudwatch");
|
|
13
|
+
const debug_1 = __importDefault(require("debug"));
|
|
14
|
+
const debug = (0, debug_1.default)('qdone:cloudWatch');
|
|
15
|
+
/**
|
|
16
|
+
* Utility function to return an instantiated, shared CloudWatchClient.
|
|
17
|
+
*/
|
|
18
|
+
let client;
|
|
19
|
+
function getCloudWatchClient() {
|
|
20
|
+
if (client)
|
|
21
|
+
return client;
|
|
22
|
+
client = new client_cloudwatch_1.CloudWatchClient();
|
|
23
|
+
return client;
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Utility function to set the client explicitly, used in testing.
|
|
27
|
+
*/
|
|
28
|
+
function setCloudWatchClient(explicitClient) {
|
|
29
|
+
client = explicitClient;
|
|
30
|
+
}
|
|
31
|
+
/**
|
|
32
|
+
* Takes data in the form returned by getAggregageData() and pushes it to
|
|
33
|
+
* CloudWatch metrics under the given queueName.
|
|
34
|
+
*
|
|
35
|
+
* @param queueName {String} - The name of the wildcard queue these metrics are for.
|
|
36
|
+
* @param total {Object} - returned object from getAggregateData()
|
|
37
|
+
*/
|
|
38
|
+
async function putAggregateData(total, timestamp) {
|
|
39
|
+
const client = getCloudWatchClient();
|
|
40
|
+
const now = timestamp || new Date();
|
|
41
|
+
const input = {
|
|
42
|
+
Namespace: 'qmonitor',
|
|
43
|
+
MetricData: [
|
|
44
|
+
{
|
|
45
|
+
MetricName: 'totalQueues',
|
|
46
|
+
Dimensions: [{
|
|
47
|
+
Name: 'queueName',
|
|
48
|
+
Value: total.queueName
|
|
49
|
+
}],
|
|
50
|
+
Timestamp: now,
|
|
51
|
+
Value: total.totalQueues,
|
|
52
|
+
Unit: 'Count'
|
|
53
|
+
},
|
|
54
|
+
{
|
|
55
|
+
MetricName: 'contributingQueueCount',
|
|
56
|
+
Dimensions: [{
|
|
57
|
+
Name: 'queueName',
|
|
58
|
+
Value: total.queueName
|
|
59
|
+
}],
|
|
60
|
+
Timestamp: now,
|
|
61
|
+
Value: total.contributingQueueNames.length,
|
|
62
|
+
Unit: 'Count'
|
|
63
|
+
},
|
|
64
|
+
{
|
|
65
|
+
MetricName: 'ApproximateNumberOfMessages',
|
|
66
|
+
Dimensions: [{
|
|
67
|
+
Name: 'queueName',
|
|
68
|
+
Value: total.queueName
|
|
69
|
+
}],
|
|
70
|
+
Timestamp: now,
|
|
71
|
+
Value: total.ApproximateNumberOfMessages || 0,
|
|
72
|
+
Unit: 'Count'
|
|
73
|
+
},
|
|
74
|
+
{
|
|
75
|
+
MetricName: 'ApproximateNumberOfMessagesDelayed',
|
|
76
|
+
Dimensions: [{
|
|
77
|
+
Name: 'queueName',
|
|
78
|
+
Value: total.queueName
|
|
79
|
+
}],
|
|
80
|
+
Timestamp: now,
|
|
81
|
+
Value: total.ApproximateNumberOfMessagesDelayed || 0,
|
|
82
|
+
Unit: 'Count'
|
|
83
|
+
},
|
|
84
|
+
{
|
|
85
|
+
MetricName: 'ApproximateNumberOfMessagesNotVisible',
|
|
86
|
+
Dimensions: [{
|
|
87
|
+
Name: 'queueName',
|
|
88
|
+
Value: total.queueName
|
|
89
|
+
}],
|
|
90
|
+
Timestamp: now,
|
|
91
|
+
Value: total.ApproximateNumberOfMessagesNotVisible || 0,
|
|
92
|
+
Unit: 'Count'
|
|
93
|
+
},
|
|
94
|
+
{
|
|
95
|
+
MetricName: 'ApproximateAgeOfOldestMessage',
|
|
96
|
+
Dimensions: [{
|
|
97
|
+
Name: 'queueName',
|
|
98
|
+
Value: total.queueName
|
|
99
|
+
}],
|
|
100
|
+
Timestamp: now,
|
|
101
|
+
Value: total.ApproximateAgeOfOldestMessage || 0,
|
|
102
|
+
Unit: 'Seconds'
|
|
103
|
+
}
|
|
104
|
+
]
|
|
105
|
+
};
|
|
106
|
+
const command = new client_cloudwatch_1.PutMetricDataCommand(input);
|
|
107
|
+
// debug({ input, command })
|
|
108
|
+
const response = await client.send(command);
|
|
109
|
+
debug({ response });
|
|
110
|
+
}
|
|
111
|
+
debug('loaded');
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Consumer implementation.
|
|
4
|
+
*/
|
|
5
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
6
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
7
|
+
};
|
|
8
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
9
|
+
exports.requestShutdown = requestShutdown;
|
|
10
|
+
exports.getMessages = getMessages;
|
|
11
|
+
exports.processMessages = processMessages;
|
|
12
|
+
const os_1 = require("os");
|
|
13
|
+
const client_sqs_1 = require("@aws-sdk/client-sqs");
|
|
14
|
+
const chalk_1 = __importDefault(require("chalk"));
|
|
15
|
+
const debug_1 = __importDefault(require("debug"));
|
|
16
|
+
const systemMonitor_js_1 = require("./scheduler/systemMonitor.js");
|
|
17
|
+
const queueManager_js_1 = require("./scheduler/queueManager.js");
|
|
18
|
+
const jobExecutor_js_1 = require("./scheduler/jobExecutor.js");
|
|
19
|
+
const defaults_js_1 = require("./defaults.js");
|
|
20
|
+
const sqs_js_1 = require("./sqs.js");
|
|
21
|
+
const debug = (0, debug_1.default)('qdone:consumer');
|
|
22
|
+
// Global flag for shutdown request
|
|
23
|
+
let shutdownRequested = false;
|
|
24
|
+
const shutdownCallbacks = [];
|
|
25
|
+
async function requestShutdown() {
|
|
26
|
+
debug('requestShutdown');
|
|
27
|
+
shutdownRequested = true;
|
|
28
|
+
for (const callback of shutdownCallbacks) {
|
|
29
|
+
debug('callback', callback);
|
|
30
|
+
await callback();
|
|
31
|
+
// try { callback() } catch (e) { }
|
|
32
|
+
}
|
|
33
|
+
debug('requestShutdown done');
|
|
34
|
+
}
|
|
35
|
+
async function getMessages(qrl, opt, maxMessages) {
|
|
36
|
+
const params = {
|
|
37
|
+
AttributeNames: ['All'],
|
|
38
|
+
MaxNumberOfMessages: maxMessages,
|
|
39
|
+
MessageAttributeNames: ['All'],
|
|
40
|
+
QueueUrl: qrl,
|
|
41
|
+
VisibilityTimeout: 120,
|
|
42
|
+
WaitTimeSeconds: opt.waitTime
|
|
43
|
+
};
|
|
44
|
+
const response = await (0, sqs_js_1.getSQSClient)().send(new client_sqs_1.ReceiveMessageCommand(params));
|
|
45
|
+
// debug('ReceiveMessage response', response)
|
|
46
|
+
return response.Messages || [];
|
|
47
|
+
}
|
|
48
|
+
//
|
|
49
|
+
// Consumer
|
|
50
|
+
//
|
|
51
|
+
async function processMessages(queues, callback, options) {
|
|
52
|
+
debug({ options });
|
|
53
|
+
const opt = (0, defaults_js_1.getOptionsWithDefaults)(options);
|
|
54
|
+
debug('processMessages', { queues, callback, options, opt, argv: process.argv });
|
|
55
|
+
let lastLatency = 10;
|
|
56
|
+
const systemMonitor = new systemMonitor_js_1.SystemMonitor(latency => {
|
|
57
|
+
const percentDifference = 100 * Math.abs(lastLatency - latency) / lastLatency;
|
|
58
|
+
if (percentDifference > 10 && opt.verbose) {
|
|
59
|
+
console.error(chalk_1.default.blue('Latency:', Math.round(latency), 'ms'));
|
|
60
|
+
}
|
|
61
|
+
lastLatency = latency;
|
|
62
|
+
});
|
|
63
|
+
const jobExecutor = new jobExecutor_js_1.JobExecutor(opt);
|
|
64
|
+
const queueManager = new queueManager_js_1.QueueManager(opt, queues, 10);
|
|
65
|
+
const cores = (0, os_1.cpus)().length;
|
|
66
|
+
// debug({ systemMonitor, jobExecutor, queueManager })
|
|
67
|
+
// This delay function keeps a timeout reference around so it can be
|
|
68
|
+
// cancelled at shutdown
|
|
69
|
+
let delayTimeout;
|
|
70
|
+
const delay = (ms) => new Promise(resolve => {
|
|
71
|
+
delayTimeout = setTimeout(resolve, ms);
|
|
72
|
+
});
|
|
73
|
+
shutdownCallbacks.push(async () => {
|
|
74
|
+
clearTimeout(delayTimeout);
|
|
75
|
+
await queueManager.shutdown();
|
|
76
|
+
debug({ queueManager: 'done' });
|
|
77
|
+
await jobExecutor.shutdown();
|
|
78
|
+
debug({ jobExecutor: 'done' });
|
|
79
|
+
await systemMonitor.shutdown();
|
|
80
|
+
debug({ systemMonitor: 'done' });
|
|
81
|
+
});
|
|
82
|
+
// Keep track of how many messages could be returned from each queue
|
|
83
|
+
const activeQrls = new Map();
|
|
84
|
+
const listeningQrls = new Set();
|
|
85
|
+
let maxReturnCount = 0;
|
|
86
|
+
const listen = async (qname, qrl, maxMessages) => {
|
|
87
|
+
if (opt.verbose) {
|
|
88
|
+
console.error(chalk_1.default.blue('Listening on: '), qname);
|
|
89
|
+
}
|
|
90
|
+
maxReturnCount += maxMessages;
|
|
91
|
+
try {
|
|
92
|
+
listeningQrls.add(qrl);
|
|
93
|
+
const messages = await getMessages(qrl, opt, maxMessages);
|
|
94
|
+
listeningQrls.delete(qrl);
|
|
95
|
+
if (!shutdownRequested) {
|
|
96
|
+
if (messages.length) {
|
|
97
|
+
activeQrls.set(qrl, (activeQrls.get(qrl) || 0) + 1);
|
|
98
|
+
await jobExecutor.executeJobs(messages, callback, qname, qrl);
|
|
99
|
+
const count = activeQrls.get(qrl) - 1;
|
|
100
|
+
if (count)
|
|
101
|
+
activeQrls.set(qrl, count);
|
|
102
|
+
else
|
|
103
|
+
activeQrls.delete(qrl);
|
|
104
|
+
queueManager.updateIcehouse(qrl, false);
|
|
105
|
+
}
|
|
106
|
+
else {
|
|
107
|
+
// If we didn't get any, update the icehouse so we can back off
|
|
108
|
+
queueManager.updateIcehouse(qrl, true);
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
// Max job accounting
|
|
112
|
+
maxReturnCount -= maxMessages;
|
|
113
|
+
}
|
|
114
|
+
catch (e) {
|
|
115
|
+
// If the queue has been cleaned up, we should back off anyway
|
|
116
|
+
if (e instanceof client_sqs_1.QueueDoesNotExist) {
|
|
117
|
+
queueManager.updateIcehouse(qrl, true);
|
|
118
|
+
}
|
|
119
|
+
else {
|
|
120
|
+
throw e;
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
};
|
|
124
|
+
if (opt.verbose) {
|
|
125
|
+
function printUrls() {
|
|
126
|
+
console.error({ activeQrls, listeningQrls });
|
|
127
|
+
if (!shutdownRequested)
|
|
128
|
+
setTimeout(printUrls, 2000);
|
|
129
|
+
}
|
|
130
|
+
printUrls();
|
|
131
|
+
}
|
|
132
|
+
while (!shutdownRequested) { // eslint-disable-line
|
|
133
|
+
// Figure out how we are running
|
|
134
|
+
const runningJobs = jobExecutor.runningJobCount();
|
|
135
|
+
const allowedJobs = Math.max(0, opt.maxConcurrentJobs - maxReturnCount - runningJobs);
|
|
136
|
+
// Latency
|
|
137
|
+
const maxLatency = 100;
|
|
138
|
+
const latency = systemMonitor.getLatency() || 10;
|
|
139
|
+
const latencyFactor = 1 - Math.abs(Math.min(latency / maxLatency, 1)); // 0 if latency is at max, 1 if latency 0
|
|
140
|
+
// Memory
|
|
141
|
+
const freeMemory = (0, os_1.freemem)();
|
|
142
|
+
const totalMemory = (0, os_1.totalmem)();
|
|
143
|
+
const memoryThreshold = totalMemory * opt.maxMemoryPercent / 100;
|
|
144
|
+
const freememThreshold = totalMemory - memoryThreshold;
|
|
145
|
+
const remainingMemory = Math.max(0, freeMemory - freememThreshold);
|
|
146
|
+
const freememFactor = Math.min(1, Math.max(0, remainingMemory / memoryThreshold));
|
|
147
|
+
// Load
|
|
148
|
+
const oneMinuteLoad = systemMonitor.getLoad();
|
|
149
|
+
const loadPerCore = oneMinuteLoad / cores;
|
|
150
|
+
const loadFactor = 1 - Math.min(1, Math.max(0, loadPerCore / 3));
|
|
151
|
+
const overallFactor = Math.min(latencyFactor, freememFactor, loadFactor);
|
|
152
|
+
const targetJobs = Math.round(allowedJobs * overallFactor);
|
|
153
|
+
let jobsLeft = targetJobs;
|
|
154
|
+
if (opt.verbose) {
|
|
155
|
+
console.error({ maxConcurrentJobs: opt.maxConcurrentJobs, maxReturnCount, runningJobs, allowedJobs, maxLatency, latencyFactor, freememFactor, loadFactor, overallFactor, targetJobs });
|
|
156
|
+
}
|
|
157
|
+
for (const { qname, qrl } of queueManager.getPairs()) {
|
|
158
|
+
// const qcount = jobExecutor.runningJobCountForQueue(qname)
|
|
159
|
+
// console.log({ evaluating: { qname, qrl, qcount, jobsLeft, activeQrlsHasQrl: activeQrls.has(qrl) } })
|
|
160
|
+
if (jobsLeft <= 0)
|
|
161
|
+
break;
|
|
162
|
+
if (listeningQrls.has(qrl))
|
|
163
|
+
continue;
|
|
164
|
+
const maxMessages = Math.min(10, jobsLeft);
|
|
165
|
+
listen(qname, qrl, maxMessages);
|
|
166
|
+
jobsLeft -= maxMessages;
|
|
167
|
+
// debug({ listenedTo: { qname, maxMessages, jobsLeft } })
|
|
168
|
+
}
|
|
169
|
+
await delay(300);
|
|
170
|
+
}
|
|
171
|
+
debug('after all');
|
|
172
|
+
}
|
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.getDeduplicationId = getDeduplicationId;
|
|
7
|
+
exports.getCacheKey = getCacheKey;
|
|
8
|
+
exports.addDedupParamsToMessage = addDedupParamsToMessage;
|
|
9
|
+
exports.updateStats = updateStats;
|
|
10
|
+
exports.statMaintenance = statMaintenance;
|
|
11
|
+
exports.dedupShouldEnqueue = dedupShouldEnqueue;
|
|
12
|
+
exports.dedupShouldEnqueueMulti = dedupShouldEnqueueMulti;
|
|
13
|
+
exports.dedupSuccessfullyProcessed = dedupSuccessfullyProcessed;
|
|
14
|
+
exports.dedupSuccessfullyProcessedMulti = dedupSuccessfullyProcessedMulti;
|
|
15
|
+
const crypto_1 = require("crypto");
|
|
16
|
+
const uuid_1 = require("uuid");
|
|
17
|
+
const cache_js_1 = require("./cache.js");
|
|
18
|
+
const debug_1 = __importDefault(require("debug"));
|
|
19
|
+
const debug = (0, debug_1.default)('qdone:dedup');
|
|
20
|
+
/**
|
|
21
|
+
* Returns a MessageDeduplicationId key appropriate for using with Amazon SQS
|
|
22
|
+
* for the given message. The passed dedupContent will be returned untouched
|
|
23
|
+
* if it meets all the requirements for SQS's MessageDeduplicationId,
|
|
24
|
+
* otherwise disallowed characters will be replaced by `_` and content longer
|
|
25
|
+
* than 128 characters will be truncated and a hash of the content appended.
|
|
26
|
+
* @param {String} dedupContent - Content used to construct the deduplication id.
|
|
27
|
+
* @param {Object} opt - Opt object from getOptionsWithDefaults()
|
|
28
|
+
* @returns {String} the cache key
|
|
29
|
+
*/
|
|
30
|
+
function getDeduplicationId(dedupContent, opt) {
|
|
31
|
+
debug({ getDeduplicationId: { dedupContent } });
|
|
32
|
+
// Don't transmit long keys to redis
|
|
33
|
+
dedupContent = dedupContent.trim().replace(/[^a-zA-Z0-9!"#$%&'()*+,-./:;<=>?@[\\\]^_`{|}~]/g, '_');
|
|
34
|
+
const max = 128;
|
|
35
|
+
const sep = 'sha1::body:';
|
|
36
|
+
const hash = (0, crypto_1.createHash)('sha1').update(dedupContent).digest('hex');
|
|
37
|
+
const truncated = dedupContent.slice(0, max - sep.length - 42);
|
|
38
|
+
const id = `sha1:{${hash}}:body:${truncated}`;
|
|
39
|
+
return id;
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Returns the cache key given a deduplication id.
|
|
43
|
+
* @param {String} dedupId - a deduplication id returned from getDeduplicationId
|
|
44
|
+
* @param opt - Opt object from getOptionsWithDefaults()
|
|
45
|
+
* @returns the cache key
|
|
46
|
+
*/
|
|
47
|
+
function getCacheKey(dedupId, opt) {
|
|
48
|
+
const cacheKey = opt.cachePrefix + 'dedup:' + dedupId;
|
|
49
|
+
debug({ getCacheKey: { cacheKey } });
|
|
50
|
+
return cacheKey;
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* Modifies a message (parameters to SendMessageCommand) to add the parameters
|
|
54
|
+
* for whatever deduplication options the caller has set.
|
|
55
|
+
* @param {String} message - parameters to SendMessageCommand
|
|
56
|
+
* @param {Object} opt - Opt object from getOptionsWithDefaults()
|
|
57
|
+
* @param {Object} [messageOptions] - optional per message options. We only care about the key deduplicationId.
|
|
58
|
+
* @returns {Object} the modified parameters/message object
|
|
59
|
+
*/
|
|
60
|
+
function addDedupParamsToMessage(message, opt, messageOptions) {
|
|
61
|
+
// Either of these means we need to calculate an id
|
|
62
|
+
if (opt.fifo || opt.externalDedup) {
|
|
63
|
+
const uuidFunction = opt.uuidFunction || uuid_1.v1;
|
|
64
|
+
if (opt.deduplicationId)
|
|
65
|
+
message.MessageDeduplicationId = opt.deduplicationId;
|
|
66
|
+
if (opt.dedupIdPerMessage)
|
|
67
|
+
message.MessageDeduplicationId = uuidFunction();
|
|
68
|
+
if (messageOptions?.deduplicationId)
|
|
69
|
+
message.MessageDeduplicationId = messageOptions.deduplicationId;
|
|
70
|
+
// Fallback to using the message body
|
|
71
|
+
if (!message.MessageDeduplicationId) {
|
|
72
|
+
message.MessageDeduplicationId = getDeduplicationId(message.MessageBody, opt);
|
|
73
|
+
}
|
|
74
|
+
else {
|
|
75
|
+
// Transform the incoming ID so that it fits SPS spec and is suitable for Redis
|
|
76
|
+
message.MessageDeduplicationId = getDeduplicationId(message.MessageDeduplicationId, opt);
|
|
77
|
+
}
|
|
78
|
+
// Track our own dedup id so we can look it up upon ReceiveMessage
|
|
79
|
+
if (opt.externalDedup) {
|
|
80
|
+
message.MessageAttributes = {
|
|
81
|
+
QdoneDeduplicationId: {
|
|
82
|
+
StringValue: message.MessageDeduplicationId,
|
|
83
|
+
DataType: 'String'
|
|
84
|
+
}
|
|
85
|
+
};
|
|
86
|
+
// If we are using our own dedup, then we must disable the SQS dedup by
|
|
87
|
+
// providing a different unique ID. Otherwise SQS will interact with us.
|
|
88
|
+
if (opt.fifo)
|
|
89
|
+
message.MessageDeduplicationId = uuidFunction();
|
|
90
|
+
}
|
|
91
|
+
// Non fifo can't have this parameter
|
|
92
|
+
if (!opt.fifo)
|
|
93
|
+
delete message.MessageDeduplicationId;
|
|
94
|
+
}
|
|
95
|
+
return message;
|
|
96
|
+
}
|
|
97
|
+
/**
|
|
98
|
+
* Updates statistics in redis, of which there are two:
|
|
99
|
+
* 1. duplicateSet - a set who's members are cache keys and scores are the number of duplicate
|
|
100
|
+
* runs prevented by dedup.
|
|
101
|
+
* 2. expirationSet - a set who's members are cache keys and scores are when the cache key expires
|
|
102
|
+
* @param {String} cacheKey
|
|
103
|
+
* @param {Number} duplicates - the number of duplicates, must be at least 1 to gather stats
|
|
104
|
+
* @param {Number} expireAt - timestamp for when this key's dedupPeriod expires
|
|
105
|
+
* @param {Object} opt - Opt object from getOptionsWithDefaults()
|
|
106
|
+
* @param {Object} pipeline - (Optional) redis pipeline you will exec() yourself
|
|
107
|
+
*/
|
|
108
|
+
async function updateStats(cacheKey, duplicates, expireAt, opt, pipeline) {
|
|
109
|
+
if (duplicates >= 1) {
|
|
110
|
+
const duplicateSet = opt.cachePrefix + 'dedup-stats:duplicateSet';
|
|
111
|
+
const expirationSet = opt.cachePrefix + 'dedup-stats:expirationSet';
|
|
112
|
+
const hadPipeline = !!pipeline;
|
|
113
|
+
if (!hadPipeline)
|
|
114
|
+
pipeline = (0, cache_js_1.getCacheClient)(opt).multi();
|
|
115
|
+
pipeline.zadd(duplicateSet, 'GT', duplicates, cacheKey);
|
|
116
|
+
pipeline.zadd(expirationSet, 'GT', expireAt, cacheKey);
|
|
117
|
+
if (!hadPipeline)
|
|
118
|
+
await pipeline.exec();
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
/**
|
|
122
|
+
* Removes expired items from stats.
|
|
123
|
+
*/
|
|
124
|
+
async function statMaintenance(opt) {
|
|
125
|
+
const duplicateSet = opt.cachePrefix + 'dedup-stats:duplicateSet';
|
|
126
|
+
const expirationSet = opt.cachePrefix + 'dedup-stats:expirationSet';
|
|
127
|
+
const client = (0, cache_js_1.getCacheClient)(opt);
|
|
128
|
+
const now = new Date().getTime();
|
|
129
|
+
// Grab a batch of expired keys
|
|
130
|
+
debug({ statMaintenance: { aboutToGo: true, expirationSet } });
|
|
131
|
+
const expiredStats = await client.zrange(expirationSet, '-inf', now, 'BYSCORE');
|
|
132
|
+
debug({ statMaintenance: { expiredStats } });
|
|
133
|
+
// And remove them from indexes, main storage
|
|
134
|
+
if (expiredStats.length) {
|
|
135
|
+
const result = await client.multi()
|
|
136
|
+
.zrem(expirationSet, expiredStats)
|
|
137
|
+
.zrem(duplicateSet, expiredStats)
|
|
138
|
+
.exec();
|
|
139
|
+
debug({ statMaintenance: { result } });
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
/**
|
|
143
|
+
* Determines whether we should enqueue this message or whether it is a duplicate.
|
|
144
|
+
* Returns true if enqueuing the message would not result in a duplicate.
|
|
145
|
+
* @param {Object} message - Parameters to SendMessageCommand
|
|
146
|
+
* @param {Object} opt - Opt object from getOptionsWithDefaults()
|
|
147
|
+
* @returns {Boolean} true if the message can be enqueued without duplicate, else false
|
|
148
|
+
*/
|
|
149
|
+
async function dedupShouldEnqueue(message, opt) {
|
|
150
|
+
const client = (0, cache_js_1.getCacheClient)(opt);
|
|
151
|
+
const dedupId = message?.MessageAttributes?.QdoneDeduplicationId?.StringValue;
|
|
152
|
+
const cacheKey = getCacheKey(dedupId, opt);
|
|
153
|
+
const expireAt = new Date().getTime() + opt.dedupPeriod;
|
|
154
|
+
const copies = await client.incr(cacheKey);
|
|
155
|
+
debug({ action: 'shouldEnqueue', cacheKey, copies });
|
|
156
|
+
if (copies === 1) {
|
|
157
|
+
await client.expireat(cacheKey, expireAt);
|
|
158
|
+
return true;
|
|
159
|
+
}
|
|
160
|
+
if (opt.dedupStats) {
|
|
161
|
+
const duplicates = copies - 1;
|
|
162
|
+
await updateStats(cacheKey, duplicates, expireAt, opt);
|
|
163
|
+
}
|
|
164
|
+
return false;
|
|
165
|
+
}
|
|
166
|
+
/**
|
|
167
|
+
* Determines which messages we should enqueue, returning only those that
|
|
168
|
+
* would not be duplicates.
|
|
169
|
+
* @param {Array[Object]} messages - Entries array for the SendMessageBatchCommand
|
|
170
|
+
* @param {Object} opt - Opt object from getOptionsWithDefaults()
|
|
171
|
+
* @returns {Array[Object]} an array of messages that can be safely enqueued. Could be empty.
|
|
172
|
+
*/
|
|
173
|
+
async function dedupShouldEnqueueMulti(messages, opt) {
|
|
174
|
+
debug({ dedupShouldEnqueueMulti: { messages, opt } });
|
|
175
|
+
const expireAt = new Date().getTime() + opt.dedupPeriod;
|
|
176
|
+
// Increment all
|
|
177
|
+
const incrPipeline = (0, cache_js_1.getCacheClient)(opt).pipeline();
|
|
178
|
+
for (const message of messages) {
|
|
179
|
+
const dedupId = message?.MessageAttributes?.QdoneDeduplicationId?.StringValue;
|
|
180
|
+
const cacheKey = getCacheKey(dedupId, opt);
|
|
181
|
+
incrPipeline.incr(cacheKey);
|
|
182
|
+
}
|
|
183
|
+
const responses = await incrPipeline.exec();
|
|
184
|
+
debug({ dedupShouldEnqueueMulti: { messages, responses } });
|
|
185
|
+
// Interpret responses and expire keys for races we won
|
|
186
|
+
const expirePipeline = (0, cache_js_1.getCacheClient)(opt).pipeline();
|
|
187
|
+
const statsPipeline = opt.dedupStats ? (0, cache_js_1.getCacheClient)(opt).pipeline() : undefined;
|
|
188
|
+
const messagesToEnqueue = [];
|
|
189
|
+
for (let i = 0; i < messages.length; i++) {
|
|
190
|
+
const message = messages[i];
|
|
191
|
+
const [, copies] = responses[i];
|
|
192
|
+
const dedupId = message?.MessageAttributes?.QdoneDeduplicationId?.StringValue;
|
|
193
|
+
const cacheKey = getCacheKey(dedupId, opt);
|
|
194
|
+
if (copies === 1) {
|
|
195
|
+
messagesToEnqueue.push(message);
|
|
196
|
+
expirePipeline.expireat(cacheKey, expireAt);
|
|
197
|
+
}
|
|
198
|
+
else if (opt.dedupStats) {
|
|
199
|
+
const duplicates = copies - 1;
|
|
200
|
+
updateStats(cacheKey, duplicates, expireAt, opt, statsPipeline);
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
await expirePipeline.exec();
|
|
204
|
+
if (opt.dedupStats)
|
|
205
|
+
await statsPipeline.exec();
|
|
206
|
+
return messagesToEnqueue;
|
|
207
|
+
}
|
|
208
|
+
/**
|
|
209
|
+
* Marks a message as processed so that subsequent calls to dedupShouldEnqueue
|
|
210
|
+
* and dedupShouldEnqueueMulti will allow a message to be enqueued again
|
|
211
|
+
* without waiting for dedupPeriod to expire.
|
|
212
|
+
* @param {Object} message - Return value from RecieveMessageCommand
|
|
213
|
+
* @param {Object} opt - Opt object from getOptionsWithDefaults()
|
|
214
|
+
* @returns {Number} 1 if a cache key was deleted, otherwise 0
|
|
215
|
+
*/
|
|
216
|
+
async function dedupSuccessfullyProcessed(message, opt) {
|
|
217
|
+
debug({ dedupSuccessfullyProcessed: { message, opt } });
|
|
218
|
+
const client = (0, cache_js_1.getCacheClient)(opt);
|
|
219
|
+
const dedupId = message?.MessageAttributes?.QdoneDeduplicationId?.StringValue;
|
|
220
|
+
if (dedupId) {
|
|
221
|
+
const cacheKey = getCacheKey(dedupId, opt);
|
|
222
|
+
const count = await client.del(cacheKey);
|
|
223
|
+
// Probabalistic stat maintenance
|
|
224
|
+
if (opt.dedupStats) {
|
|
225
|
+
const chance = 1 / 100.0;
|
|
226
|
+
if (Math.random() < chance)
|
|
227
|
+
await statMaintenance(opt);
|
|
228
|
+
}
|
|
229
|
+
return count;
|
|
230
|
+
}
|
|
231
|
+
return 0;
|
|
232
|
+
}
|
|
233
|
+
/**
|
|
234
|
+
* Marks an array of messages as processed so that subsequent calls to
|
|
235
|
+
* dedupShouldEnqueue and dedupShouldEnqueueMulti will allow a message to be
|
|
236
|
+
* enqueued again without waiting for dedupPeriod to expire.
|
|
237
|
+
* @param {Array[Object]} messages - Return values from RecieveMessageCommand
|
|
238
|
+
* @param {Object} opt - Opt object from getOptionsWithDefaults()
|
|
239
|
+
* @returns {Number} number of deleted keys
|
|
240
|
+
*/
|
|
241
|
+
async function dedupSuccessfullyProcessedMulti(messages, opt) {
|
|
242
|
+
debug({ messages, dedupSuccessfullyProcessedMulti: { messages, opt } });
|
|
243
|
+
const cacheKeys = [];
|
|
244
|
+
for (const message of messages) {
|
|
245
|
+
const dedupId = message?.MessageAttributes?.QdoneDeduplicationId?.StringValue;
|
|
246
|
+
if (dedupId) {
|
|
247
|
+
const cacheKey = getCacheKey(dedupId, opt);
|
|
248
|
+
cacheKeys.push(cacheKey);
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
debug({ dedupSuccessfullyProcessedMulti: { cacheKeys } });
|
|
252
|
+
if (cacheKeys.length) {
|
|
253
|
+
const numDeleted = await (0, cache_js_1.getCacheClient)(opt).del(cacheKeys);
|
|
254
|
+
// const numDeleted = results.map(([, val]) => val).reduce((a, b) => a + b, 0)
|
|
255
|
+
debug({ dedupSuccessfullyProcessedMulti: { cacheKeys, numDeleted } });
|
|
256
|
+
// Probabalistic stat maintenance
|
|
257
|
+
if (opt.dedupStats) {
|
|
258
|
+
const chance = numDeleted / 100.0;
|
|
259
|
+
if (Math.random() < chance)
|
|
260
|
+
await statMaintenance(opt);
|
|
261
|
+
}
|
|
262
|
+
return numDeleted;
|
|
263
|
+
}
|
|
264
|
+
return 0;
|
|
265
|
+
}
|