qdone 2.1.0 → 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +2 -2
- package/README.md +1 -4
- package/commonjs/index.js +11 -0
- package/commonjs/src/cache.js +71 -0
- package/commonjs/src/cloudWatch.js +111 -0
- package/commonjs/src/consumer.js +172 -0
- package/commonjs/src/dedup.js +265 -0
- package/commonjs/src/defaults.js +184 -0
- package/commonjs/src/enqueue.js +520 -0
- package/commonjs/src/exponentialBackoff.js +101 -0
- package/commonjs/src/idleQueues.js +333 -0
- package/commonjs/src/monitor.js +86 -0
- package/commonjs/src/qrlCache.js +172 -0
- package/commonjs/src/scheduler/jobExecutor.js +391 -0
- package/commonjs/src/scheduler/queueManager.js +161 -0
- package/commonjs/src/scheduler/systemMonitor.js +94 -0
- package/commonjs/src/sqs.js +98 -0
- package/package.json +16 -12
- package/src/cloudWatch.js +10 -0
- package/src/defaults.js +5 -2
- package/src/idleQueues.js +1 -1
- package/src/monitor.js +7 -1
- package/src/scheduler/jobExecutor.js +10 -2
- package/src/sqs.js +4 -2
- package/src/worker.js +12 -1
|
@@ -0,0 +1,520 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.getDLQParams = getDLQParams;
|
|
7
|
+
exports.getOrCreateDLQ = getOrCreateDLQ;
|
|
8
|
+
exports.getFailParams = getFailParams;
|
|
9
|
+
exports.getOrCreateFailQueue = getOrCreateFailQueue;
|
|
10
|
+
exports.getQueueParams = getQueueParams;
|
|
11
|
+
exports.getOrCreateQueue = getOrCreateQueue;
|
|
12
|
+
exports.getQueueAttributes = getQueueAttributes;
|
|
13
|
+
exports.formatMessage = formatMessage;
|
|
14
|
+
exports.sendMessage = sendMessage;
|
|
15
|
+
exports.sendMessageBatch = sendMessageBatch;
|
|
16
|
+
exports.flushMessages = flushMessages;
|
|
17
|
+
exports.addMessage = addMessage;
|
|
18
|
+
exports.enqueue = enqueue;
|
|
19
|
+
exports.enqueueBatch = enqueueBatch;
|
|
20
|
+
const node_1 = require("@sentry/node");
|
|
21
|
+
const uuid_1 = require("uuid");
|
|
22
|
+
const chalk_1 = __importDefault(require("chalk"));
|
|
23
|
+
const debug_1 = __importDefault(require("debug"));
|
|
24
|
+
const client_sqs_1 = require("@aws-sdk/client-sqs");
|
|
25
|
+
const qrlCache_js_1 = require("./qrlCache.js");
|
|
26
|
+
const sqs_js_1 = require("./sqs.js");
|
|
27
|
+
const dedup_js_1 = require("./dedup.js");
|
|
28
|
+
const defaults_js_1 = require("./defaults.js");
|
|
29
|
+
const exponentialBackoff_js_1 = require("./exponentialBackoff.js");
|
|
30
|
+
const debug = (0, debug_1.default)('qdone:enqueue');
|
|
31
|
+
function getDLQParams(queue, opt) {
|
|
32
|
+
const dqname = (0, qrlCache_js_1.normalizeDLQName)(queue, opt);
|
|
33
|
+
const params = {
|
|
34
|
+
Attributes: { MessageRetentionPeriod: opt.messageRetentionPeriod + '' },
|
|
35
|
+
QueueName: dqname
|
|
36
|
+
};
|
|
37
|
+
if (opt.tags)
|
|
38
|
+
params.tags = opt.tags;
|
|
39
|
+
if (opt.fifo)
|
|
40
|
+
params.Attributes.FifoQueue = 'true';
|
|
41
|
+
return { dqname, params };
|
|
42
|
+
}
|
|
43
|
+
async function getOrCreateDLQ(queue, opt) {
|
|
44
|
+
debug('getOrCreateDLQ(', queue, ')');
|
|
45
|
+
const { dqname, params } = getDLQParams(queue, opt);
|
|
46
|
+
try {
|
|
47
|
+
const dqrl = await (0, qrlCache_js_1.qrlCacheGet)(dqname);
|
|
48
|
+
return dqrl;
|
|
49
|
+
}
|
|
50
|
+
catch (err) {
|
|
51
|
+
// Anything other than queue doesn't exist gets re-thrown
|
|
52
|
+
if (!(err instanceof client_sqs_1.QueueDoesNotExist))
|
|
53
|
+
throw err;
|
|
54
|
+
// Create our DLQ
|
|
55
|
+
const client = (0, sqs_js_1.getSQSClient)();
|
|
56
|
+
const cmd = new client_sqs_1.CreateQueueCommand(params);
|
|
57
|
+
if (opt.verbose)
|
|
58
|
+
console.error(chalk_1.default.blue('Creating dead letter queue ') + dqname);
|
|
59
|
+
const data = await client.send(cmd);
|
|
60
|
+
debug('createQueue returned', data);
|
|
61
|
+
const dqrl = data.QueueUrl;
|
|
62
|
+
(0, qrlCache_js_1.qrlCacheSet)(dqname, dqrl);
|
|
63
|
+
return dqrl;
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
/**
|
|
67
|
+
* Returns the parameters needed for creating a failed queue. If DLQ options
|
|
68
|
+
* are set, it makes an API call to get this DLQ's ARN.
|
|
69
|
+
*/
|
|
70
|
+
async function getFailParams(queue, opt) {
|
|
71
|
+
const fqname = (0, qrlCache_js_1.normalizeFailQueueName)(queue, opt);
|
|
72
|
+
const params = {
|
|
73
|
+
Attributes: { MessageRetentionPeriod: opt.messageRetentionPeriod + '' },
|
|
74
|
+
QueueName: fqname
|
|
75
|
+
};
|
|
76
|
+
// If we have a dlq, we grab it and set a redrive policy
|
|
77
|
+
if (opt.dlq) {
|
|
78
|
+
const dqname = (0, qrlCache_js_1.normalizeDLQName)(queue, opt);
|
|
79
|
+
const dqrl = await (0, qrlCache_js_1.qrlCacheGet)(dqname);
|
|
80
|
+
const dqa = await getQueueAttributes(dqrl);
|
|
81
|
+
debug('dqa', dqa);
|
|
82
|
+
params.Attributes.RedrivePolicy = JSON.stringify({
|
|
83
|
+
deadLetterTargetArn: dqa.Attributes.QueueArn,
|
|
84
|
+
maxReceiveCount: opt.dlqAfter
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
if (opt.failDelay)
|
|
88
|
+
params.Attributes.DelaySeconds = opt.failDelay + '';
|
|
89
|
+
if (opt.tags)
|
|
90
|
+
params.tags = opt.tags;
|
|
91
|
+
if (opt.fifo)
|
|
92
|
+
params.Attributes.FifoQueue = 'true';
|
|
93
|
+
return params;
|
|
94
|
+
}
|
|
95
|
+
/**
|
|
96
|
+
* Returns the qrl for the failed queue for the given queue. Creates the queue
|
|
97
|
+
* if it does not exist.
|
|
98
|
+
*/
|
|
99
|
+
async function getOrCreateFailQueue(queue, opt, doesNotExist) {
|
|
100
|
+
debug('getOrCreateFailQueue(', queue, ')');
|
|
101
|
+
const fqname = (0, qrlCache_js_1.normalizeFailQueueName)(queue, opt);
|
|
102
|
+
try {
|
|
103
|
+
// Bail early if the caller knew we didn't have a queue
|
|
104
|
+
if (doesNotExist)
|
|
105
|
+
throw new client_sqs_1.QueueDoesNotExist(fqname);
|
|
106
|
+
const fqrl = await (0, qrlCache_js_1.qrlCacheGet)(fqname);
|
|
107
|
+
return fqrl;
|
|
108
|
+
}
|
|
109
|
+
catch (err) {
|
|
110
|
+
// Anything other than queue doesn't exist gets re-thrown
|
|
111
|
+
if (!(err instanceof client_sqs_1.QueueDoesNotExist))
|
|
112
|
+
throw err;
|
|
113
|
+
// Grab params, creating DLQ if needed
|
|
114
|
+
let params;
|
|
115
|
+
try {
|
|
116
|
+
params = await getFailParams(queue, opt);
|
|
117
|
+
}
|
|
118
|
+
catch (e) {
|
|
119
|
+
// If DLQ doesn't exist, create it
|
|
120
|
+
if (!(opt.dlq && e instanceof client_sqs_1.QueueDoesNotExist))
|
|
121
|
+
throw e;
|
|
122
|
+
await getOrCreateDLQ(queue, opt);
|
|
123
|
+
params = await getFailParams(queue, opt);
|
|
124
|
+
}
|
|
125
|
+
// Create our fail queue
|
|
126
|
+
const client = (0, sqs_js_1.getSQSClient)();
|
|
127
|
+
const cmd = new client_sqs_1.CreateQueueCommand(params);
|
|
128
|
+
if (opt.verbose)
|
|
129
|
+
console.error(chalk_1.default.blue('Creating fail queue ') + fqname);
|
|
130
|
+
const data = await client.send(cmd);
|
|
131
|
+
debug('createQueue returned', data);
|
|
132
|
+
const fqrl = data.QueueUrl;
|
|
133
|
+
(0, qrlCache_js_1.qrlCacheSet)(fqname, fqrl);
|
|
134
|
+
return fqrl;
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
/**
|
|
138
|
+
* Returns the parameters needed for creating a queue. If fail options
|
|
139
|
+
* are set, it makes an API call to get the fail queue's ARN.
|
|
140
|
+
*/
|
|
141
|
+
async function getQueueParams(queue, opt) {
|
|
142
|
+
const qname = (0, qrlCache_js_1.normalizeQueueName)(queue, opt);
|
|
143
|
+
const fqname = (0, qrlCache_js_1.normalizeFailQueueName)(queue, opt);
|
|
144
|
+
const fqrl = await (0, qrlCache_js_1.qrlCacheGet)(fqname, opt);
|
|
145
|
+
const fqa = await getQueueAttributes(fqrl);
|
|
146
|
+
const params = {
|
|
147
|
+
Attributes: {
|
|
148
|
+
MessageRetentionPeriod: opt.messageRetentionPeriod + '',
|
|
149
|
+
RedrivePolicy: JSON.stringify({
|
|
150
|
+
deadLetterTargetArn: fqa.Attributes.QueueArn,
|
|
151
|
+
maxReceiveCount: 1
|
|
152
|
+
})
|
|
153
|
+
},
|
|
154
|
+
QueueName: qname
|
|
155
|
+
};
|
|
156
|
+
if (opt.tags)
|
|
157
|
+
params.tags = opt.tags;
|
|
158
|
+
if (opt.fifo)
|
|
159
|
+
params.Attributes.FifoQueue = 'true';
|
|
160
|
+
return params;
|
|
161
|
+
}
|
|
162
|
+
/**
|
|
163
|
+
* Returns a qrl for a queue that either exists or does not
|
|
164
|
+
*/
|
|
165
|
+
async function getOrCreateQueue(queue, opt) {
|
|
166
|
+
debug('getOrCreateQueue(', queue, ')');
|
|
167
|
+
const qname = (0, qrlCache_js_1.normalizeQueueName)(queue, opt);
|
|
168
|
+
try {
|
|
169
|
+
const qrl = await (0, qrlCache_js_1.qrlCacheGet)(qname);
|
|
170
|
+
return qrl;
|
|
171
|
+
}
|
|
172
|
+
catch (err) {
|
|
173
|
+
// Anything other than queue doesn't exist gets re-thrown
|
|
174
|
+
if (!(err instanceof client_sqs_1.QueueDoesNotExist))
|
|
175
|
+
throw err;
|
|
176
|
+
// Grab params, creating fail queue if needed
|
|
177
|
+
let params;
|
|
178
|
+
try {
|
|
179
|
+
params = await getQueueParams(qname, opt);
|
|
180
|
+
}
|
|
181
|
+
catch (e) {
|
|
182
|
+
// If fail queue doesn't exist, create it
|
|
183
|
+
if (!(e instanceof client_sqs_1.QueueDoesNotExist))
|
|
184
|
+
throw e;
|
|
185
|
+
await getOrCreateFailQueue(qname, opt, true);
|
|
186
|
+
params = await getQueueParams(qname, opt);
|
|
187
|
+
}
|
|
188
|
+
debug({ getOrCreateQueue: { qname, params } });
|
|
189
|
+
// Create our queue
|
|
190
|
+
const client = (0, sqs_js_1.getSQSClient)();
|
|
191
|
+
const cmd = new client_sqs_1.CreateQueueCommand(params);
|
|
192
|
+
if (opt.verbose)
|
|
193
|
+
console.error(chalk_1.default.blue('Creating fail queue ') + qname);
|
|
194
|
+
const data = await client.send(cmd);
|
|
195
|
+
debug('AWS createQueue returned', data);
|
|
196
|
+
const qrl = data.QueueUrl;
|
|
197
|
+
(0, qrlCache_js_1.qrlCacheSet)(qname, qrl);
|
|
198
|
+
return qrl;
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
async function getQueueAttributes(qrl) {
|
|
202
|
+
debug('getQueueAttributes(', qrl, ')');
|
|
203
|
+
const client = (0, sqs_js_1.getSQSClient)();
|
|
204
|
+
const params = { AttributeNames: ['All'], QueueUrl: qrl };
|
|
205
|
+
const cmd = new client_sqs_1.GetQueueAttributesCommand(params);
|
|
206
|
+
// debug({ cmd })
|
|
207
|
+
const data = await client.send(cmd);
|
|
208
|
+
debug('GetQueueAttributes returned', data);
|
|
209
|
+
return data;
|
|
210
|
+
}
|
|
211
|
+
function formatMessage(body, id, opt, messageOptions) {
|
|
212
|
+
const message = { MessageBody: body };
|
|
213
|
+
if (typeof id !== 'undefined')
|
|
214
|
+
message.Id = '' + id;
|
|
215
|
+
if (opt.fifo) {
|
|
216
|
+
message.MessageGroupId = messageOptions?.groupId || opt?.groupId;
|
|
217
|
+
}
|
|
218
|
+
(0, dedup_js_1.addDedupParamsToMessage)(message, opt, messageOptions);
|
|
219
|
+
if (opt.delay)
|
|
220
|
+
message.DelaySeconds = opt.delay;
|
|
221
|
+
if (messageOptions?.delay)
|
|
222
|
+
message.DelaySeconds = messageOptions.delay;
|
|
223
|
+
return message;
|
|
224
|
+
}
|
|
225
|
+
// Retry happens within the context of the send functions
|
|
226
|
+
const retryableExceptions = [
|
|
227
|
+
client_sqs_1.RequestThrottled,
|
|
228
|
+
client_sqs_1.KmsThrottled,
|
|
229
|
+
client_sqs_1.QueueDoesNotExist // Queue could temporarily not exist due to eventual consistency, let it retry
|
|
230
|
+
];
|
|
231
|
+
async function sendMessage(qrl, queue, command, opt, messageOptions) {
|
|
232
|
+
debug('sendMessage(', qrl, command, ')');
|
|
233
|
+
const uuidFunction = opt.uuidFunction || uuid_1.v1;
|
|
234
|
+
const params = {
|
|
235
|
+
QueueUrl: qrl,
|
|
236
|
+
...formatMessage(command, null, opt, messageOptions)
|
|
237
|
+
};
|
|
238
|
+
// See if we even have to send it
|
|
239
|
+
if (opt.externalDedup) {
|
|
240
|
+
const shouldEnqueue = await (0, dedup_js_1.dedupShouldEnqueue)(params, opt);
|
|
241
|
+
if (!shouldEnqueue)
|
|
242
|
+
return { MessageId: uuidFunction() };
|
|
243
|
+
}
|
|
244
|
+
// Send it
|
|
245
|
+
const client = (0, sqs_js_1.getSQSClient)();
|
|
246
|
+
let cmd = new client_sqs_1.SendMessageCommand(params);
|
|
247
|
+
debug({ cmd });
|
|
248
|
+
const backoff = new exponentialBackoff_js_1.ExponentialBackoff(opt.sendRetries);
|
|
249
|
+
const send = async (attemptNumber) => {
|
|
250
|
+
cmd.input.attemptNumber = attemptNumber;
|
|
251
|
+
const data = await client.send(cmd);
|
|
252
|
+
debug('sendMessage returned', data);
|
|
253
|
+
return data;
|
|
254
|
+
};
|
|
255
|
+
const shouldRetry = async (result, error) => {
|
|
256
|
+
if (!error)
|
|
257
|
+
return false;
|
|
258
|
+
if (error instanceof client_sqs_1.QueueDoesNotExist) {
|
|
259
|
+
const qname = (0, qrlCache_js_1.normalizeQueueName)(queue, opt);
|
|
260
|
+
(0, qrlCache_js_1.qrlCacheInvalidate)(qname);
|
|
261
|
+
// clear cache in case cache does not reflect reality, then try recreating the queue before sending message again
|
|
262
|
+
const newQrl = await getOrCreateQueue(queue, opt);
|
|
263
|
+
params.QueueUrl = newQrl;
|
|
264
|
+
cmd = new client_sqs_1.SendMessageCommand(params);
|
|
265
|
+
}
|
|
266
|
+
for (const exceptionClass of retryableExceptions) {
|
|
267
|
+
if (error instanceof exceptionClass) {
|
|
268
|
+
debug({ sendMessageRetryingBecause: { error, result } });
|
|
269
|
+
return true;
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
// If we could not send it, we also need to remove our dedup flag
|
|
273
|
+
await (0, dedup_js_1.dedupSuccessfullyProcessed)(params, opt);
|
|
274
|
+
return false;
|
|
275
|
+
};
|
|
276
|
+
const result = await backoff.run(send, shouldRetry);
|
|
277
|
+
debug({ sendMessageResult: result });
|
|
278
|
+
return result;
|
|
279
|
+
}
|
|
280
|
+
async function sendMessageBatch(qrl, queue, messages, opt) {
|
|
281
|
+
debug('sendMessageBatch(', qrl, messages.map(e => Object.assign(Object.assign({}, e), { MessageBody: e.MessageBody.slice(0, 10) + '...' })), ')');
|
|
282
|
+
const params = { Entries: messages, QueueUrl: qrl };
|
|
283
|
+
if (opt.sentryDsn) {
|
|
284
|
+
(0, node_1.addBreadcrumb)({ category: 'sendMessageBatch', message: JSON.stringify({ params }), level: 'debug' });
|
|
285
|
+
}
|
|
286
|
+
debug({ params });
|
|
287
|
+
// See which messages we even have to send
|
|
288
|
+
if (opt.externalDedup) {
|
|
289
|
+
const promises = params.Entries.map(async (m) => ({ m, shouldEnqueue: await (0, dedup_js_1.dedupShouldEnqueue)(m, opt) }));
|
|
290
|
+
const results = await Promise.all(promises);
|
|
291
|
+
params.Entries = results.filter(({ shouldEnqueue }) => shouldEnqueue).map(({ m }) => m);
|
|
292
|
+
if (!params.Entries.length) {
|
|
293
|
+
const result = {
|
|
294
|
+
Failed: [],
|
|
295
|
+
Successful: results.map(({ m: { Id: id, MessageAttributes: ma } }) => ({
|
|
296
|
+
Id: id,
|
|
297
|
+
MessageId: 'duplicate',
|
|
298
|
+
QdoneDeduplicationId: ma?.QdoneDeduplicationId?.StringValue
|
|
299
|
+
}))
|
|
300
|
+
};
|
|
301
|
+
return result;
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
// Send them
|
|
305
|
+
const client = (0, sqs_js_1.getSQSClient)();
|
|
306
|
+
let cmd = new client_sqs_1.SendMessageBatchCommand(params);
|
|
307
|
+
debug({ cmd });
|
|
308
|
+
const backoff = new exponentialBackoff_js_1.ExponentialBackoff(opt.sendRetries);
|
|
309
|
+
const send = async (attemptNumber) => {
|
|
310
|
+
debug({ sendMessageBatchSend: { attemptNumber, params } });
|
|
311
|
+
const data = await client.send(cmd);
|
|
312
|
+
return data;
|
|
313
|
+
};
|
|
314
|
+
const shouldRetry = async (result, error) => {
|
|
315
|
+
debug({ shouldRetry: { error, result } });
|
|
316
|
+
if (result) {
|
|
317
|
+
// Handle failed result of one or more messages in the batch
|
|
318
|
+
if (result.Failed && result.Failed.length) {
|
|
319
|
+
for (const failed of result.Failed) {
|
|
320
|
+
// Find corresponding messages
|
|
321
|
+
const original = params.Entries.find((e) => e.Id === failed.Id);
|
|
322
|
+
const info = { failed, original, opt };
|
|
323
|
+
if (opt.sentryDsn) {
|
|
324
|
+
(0, node_1.addBreadcrumb)({ category: 'sendMessageBatch', message: 'Failed message: ' + JSON.stringify(info), level: 'error' });
|
|
325
|
+
}
|
|
326
|
+
else {
|
|
327
|
+
console.error(info);
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
throw new Error('One or more message failures: ' + JSON.stringify(result.Failed));
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
if (error) {
|
|
334
|
+
// Handle a failed result from an overall error on request
|
|
335
|
+
if (opt.sentryDsn) {
|
|
336
|
+
(0, node_1.addBreadcrumb)({ category: 'sendMessageBatch', message: JSON.stringify({ error }), level: 'error' });
|
|
337
|
+
}
|
|
338
|
+
if (error instanceof client_sqs_1.QueueDoesNotExist) {
|
|
339
|
+
const qname = (0, qrlCache_js_1.normalizeQueueName)(queue, opt);
|
|
340
|
+
(0, qrlCache_js_1.qrlCacheInvalidate)(qname);
|
|
341
|
+
// Clear stale cache entry and recreate queue before retrying
|
|
342
|
+
const newQrl = await getOrCreateQueue(queue, opt);
|
|
343
|
+
params.QueueUrl = newQrl;
|
|
344
|
+
cmd = new client_sqs_1.SendMessageBatchCommand(params);
|
|
345
|
+
}
|
|
346
|
+
for (const exceptionClass of retryableExceptions) {
|
|
347
|
+
debug({ exceptionClass, retryableExceptions });
|
|
348
|
+
if (error instanceof exceptionClass) {
|
|
349
|
+
debug({ sendMessageRetryingBecause: { error, result } });
|
|
350
|
+
return true;
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
};
|
|
355
|
+
return backoff.run(send, shouldRetry);
|
|
356
|
+
}
|
|
357
|
+
let requestCount = 0;
|
|
358
|
+
//
|
|
359
|
+
// Flushes the internal message buffer for qrl.
|
|
360
|
+
// If the message is too large, batch is retried with half the messages.
|
|
361
|
+
// Returns number of messages flushed.
|
|
362
|
+
//
|
|
363
|
+
async function flushMessages(qrl, queue, opt, sendBuffer) {
|
|
364
|
+
debug('flushMessages', { qrl, queue, sendBuffer });
|
|
365
|
+
// Track our outgoing messages to map with Failed / Successful returns
|
|
366
|
+
const messagesById = new Map();
|
|
367
|
+
const resultsById = new Map();
|
|
368
|
+
const results = [];
|
|
369
|
+
if (sendBuffer[qrl] && sendBuffer[qrl].length) {
|
|
370
|
+
for (const message of sendBuffer[qrl]) {
|
|
371
|
+
const Id = message.Id;
|
|
372
|
+
messagesById.set(Id, message);
|
|
373
|
+
// Pre-prepare results
|
|
374
|
+
const result = { Id };
|
|
375
|
+
resultsById.set(Id, result);
|
|
376
|
+
results.push(result);
|
|
377
|
+
}
|
|
378
|
+
}
|
|
379
|
+
// Flush until empty
|
|
380
|
+
let numFlushed = 0;
|
|
381
|
+
async function whileNotEmpty() {
|
|
382
|
+
if (!(sendBuffer[qrl] && sendBuffer[qrl].length)) {
|
|
383
|
+
return { numFlushed, results };
|
|
384
|
+
}
|
|
385
|
+
// Construct batch until full
|
|
386
|
+
const batch = [];
|
|
387
|
+
let nextSize = JSON.stringify(sendBuffer[qrl][0]).length;
|
|
388
|
+
let totalSize = 0;
|
|
389
|
+
while ((totalSize + nextSize) < 262144 && sendBuffer[qrl].length && batch.length < 10) {
|
|
390
|
+
batch.push(sendBuffer[qrl].shift());
|
|
391
|
+
totalSize += nextSize;
|
|
392
|
+
if (sendBuffer[qrl].length)
|
|
393
|
+
nextSize = JSON.stringify(sendBuffer[qrl][0]).length;
|
|
394
|
+
else
|
|
395
|
+
nextSize = 0;
|
|
396
|
+
}
|
|
397
|
+
// Send batch
|
|
398
|
+
const data = await sendMessageBatch(qrl, queue, batch, opt);
|
|
399
|
+
// Fail if there are any individual message failures
|
|
400
|
+
if (data?.Failed && data?.Failed.length) {
|
|
401
|
+
const err = new Error('One or more message failures: ' + JSON.stringify(data.Failed));
|
|
402
|
+
err.Failed = data.Failed;
|
|
403
|
+
throw err;
|
|
404
|
+
}
|
|
405
|
+
// If we actually managed to flush any of them
|
|
406
|
+
if (batch.length) {
|
|
407
|
+
requestCount += 1;
|
|
408
|
+
if (data?.Successful) {
|
|
409
|
+
for (const { Id, MessageId } of data.Successful) {
|
|
410
|
+
const result = resultsById.get(Id);
|
|
411
|
+
const message = messagesById.get(Id);
|
|
412
|
+
result.MessageId = MessageId;
|
|
413
|
+
result.Id = Id;
|
|
414
|
+
if (message?.MessageAttributes?.QdoneDeduplicationId?.StringValue) {
|
|
415
|
+
result.QdoneDeduplicationId = message?.MessageAttributes?.QdoneDeduplicationId?.StringValue;
|
|
416
|
+
}
|
|
417
|
+
if (opt.verbose)
|
|
418
|
+
console.error(chalk_1.default.blue('Enqueued job ') + MessageId + chalk_1.default.blue(' request ' + requestCount));
|
|
419
|
+
}
|
|
420
|
+
}
|
|
421
|
+
numFlushed += batch.length;
|
|
422
|
+
}
|
|
423
|
+
return whileNotEmpty();
|
|
424
|
+
}
|
|
425
|
+
return whileNotEmpty();
|
|
426
|
+
}
|
|
427
|
+
//
|
|
428
|
+
// Adds a message to the inernal message buffer for the given qrl.
|
|
429
|
+
// Automaticaly flushes if queue has >= 10 messages.
|
|
430
|
+
// Returns number of messages flushed.
|
|
431
|
+
//
|
|
432
|
+
const debugAddMessage = (0, debug_1.default)('qdone:enqueue:addMessage');
|
|
433
|
+
async function addMessage(qrl, queue, command, messageIndex, opt, sendBuffer, messageOptions) {
|
|
434
|
+
const message = formatMessage(command, messageIndex, opt, messageOptions);
|
|
435
|
+
sendBuffer[qrl] = sendBuffer[qrl] || [];
|
|
436
|
+
sendBuffer[qrl].push(message);
|
|
437
|
+
debugAddMessage({ location: 'addMessage', messageIndex, sendBuffer });
|
|
438
|
+
if (sendBuffer[qrl].length >= 10) {
|
|
439
|
+
return flushMessages(qrl, queue, opt, sendBuffer);
|
|
440
|
+
}
|
|
441
|
+
return { numFlushed: 0, results: [] };
|
|
442
|
+
}
|
|
443
|
+
//
|
|
444
|
+
// Enqueue a single command
|
|
445
|
+
// Returns a promise for the SQS API response.
|
|
446
|
+
//
|
|
447
|
+
async function enqueue(queue, command, options) {
|
|
448
|
+
debug('enqueue(', { queue, command }, ')');
|
|
449
|
+
const opt = (0, defaults_js_1.getOptionsWithDefaults)(options);
|
|
450
|
+
if (opt.sentryDsn) {
|
|
451
|
+
(0, node_1.setExtra)({ qdoneOperation: 'enqueue', args: { queue, command, opt } });
|
|
452
|
+
}
|
|
453
|
+
try {
|
|
454
|
+
const qrl = await getOrCreateQueue(queue, opt);
|
|
455
|
+
return sendMessage(qrl, queue, command, opt);
|
|
456
|
+
}
|
|
457
|
+
catch (e) {
|
|
458
|
+
console.log(e);
|
|
459
|
+
throw e;
|
|
460
|
+
}
|
|
461
|
+
}
|
|
462
|
+
//
|
|
463
|
+
// Enqueue many commands formatted as an array of {queue: ..., command: ...} pairs.
|
|
464
|
+
// Returns a promise for the total number of messages enqueued.
|
|
465
|
+
//
|
|
466
|
+
async function enqueueBatch(pairs, options) {
|
|
467
|
+
debug('enqueueBatch(', pairs, ')');
|
|
468
|
+
const opt = (0, defaults_js_1.getOptionsWithDefaults)(options);
|
|
469
|
+
if (opt.sentryDsn) {
|
|
470
|
+
(0, node_1.setExtra)({ qdoneOperation: 'enqueueBatch', args: { pairs, opt } });
|
|
471
|
+
}
|
|
472
|
+
try {
|
|
473
|
+
const allResults = [];
|
|
474
|
+
// Find unique queues so we can pre-fetch qrls. We do this so that all
|
|
475
|
+
// queues are created prior to going through our flush logic
|
|
476
|
+
const normalizedPairs = pairs.map(({ queue, command, messageOptions }) => ({
|
|
477
|
+
qname: (0, qrlCache_js_1.normalizeQueueName)(queue, opt),
|
|
478
|
+
command,
|
|
479
|
+
messageOptions: (0, defaults_js_1.validateMessageOptions)(messageOptions)
|
|
480
|
+
}));
|
|
481
|
+
const uniqueQnames = new Set(normalizedPairs.map(p => p.qname));
|
|
482
|
+
// Prefetch qrls / create queues in parallel, building a reverse map for cache invalidation
|
|
483
|
+
const createPromises = [];
|
|
484
|
+
const qrlToQname = new Map();
|
|
485
|
+
for (const qname of uniqueQnames) {
|
|
486
|
+
createPromises.push(getOrCreateQueue(qname, opt).then(qrl => qrlToQname.set(qrl, qname)));
|
|
487
|
+
}
|
|
488
|
+
await Promise.all(createPromises);
|
|
489
|
+
// After we've prefetched, all qrls are in cache
|
|
490
|
+
// so go back through the list of pairs and fire off messages
|
|
491
|
+
requestCount = 0;
|
|
492
|
+
const sendBuffer = {};
|
|
493
|
+
let messageIndex = 0;
|
|
494
|
+
let initialFlushTotal = 0;
|
|
495
|
+
for (const { qname, command, messageOptions } of normalizedPairs) {
|
|
496
|
+
const qrl = await getOrCreateQueue(qname, opt);
|
|
497
|
+
const { numFlushed, results } = await addMessage(qrl, qname, command, messageIndex++, opt, sendBuffer, messageOptions);
|
|
498
|
+
initialFlushTotal += numFlushed;
|
|
499
|
+
allResults.push(...results);
|
|
500
|
+
}
|
|
501
|
+
// And flush any remaining messages
|
|
502
|
+
const extraFlushPromises = [];
|
|
503
|
+
for (const qrl in sendBuffer) {
|
|
504
|
+
extraFlushPromises.push(flushMessages(qrl, qrlToQname.get(qrl), opt, sendBuffer));
|
|
505
|
+
}
|
|
506
|
+
let extraFlushTotal = 0;
|
|
507
|
+
for (const { numFlushed, results } of await Promise.all(extraFlushPromises)) {
|
|
508
|
+
allResults.push(...results);
|
|
509
|
+
extraFlushTotal += numFlushed;
|
|
510
|
+
}
|
|
511
|
+
const totalFlushed = initialFlushTotal + extraFlushTotal;
|
|
512
|
+
debug({ initialFlushTotal, extraFlushTotal, totalFlushed });
|
|
513
|
+
return { numFlushed: totalFlushed, results: allResults };
|
|
514
|
+
}
|
|
515
|
+
catch (e) {
|
|
516
|
+
console.log(e);
|
|
517
|
+
throw e;
|
|
518
|
+
}
|
|
519
|
+
}
|
|
520
|
+
debug('loaded');
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Exponential backoff controller.
|
|
4
|
+
* usage:
|
|
5
|
+
* const exp = new ExponentialBackoff()
|
|
6
|
+
* const result = await exp.run(
|
|
7
|
+
* function action (attemptNumber) {
|
|
8
|
+
* console.log(attemptNumber) // 1, 2, 3, ...
|
|
9
|
+
* return axios.post(...)
|
|
10
|
+
* },
|
|
11
|
+
* function shouldRetry (returnValue, error) {
|
|
12
|
+
* if (returnValue && return value.code = 500) return true
|
|
13
|
+
* if (error && error.message === 'Internal Server Error') return true
|
|
14
|
+
* }
|
|
15
|
+
* )
|
|
16
|
+
*/
|
|
17
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
18
|
+
exports.ExponentialBackoff = void 0;
|
|
19
|
+
class ExponentialBackoff {
|
|
20
|
+
/**
|
|
21
|
+
* Creates various behaviors for backoff.
|
|
22
|
+
* @param {number} maxRetries - Number of times to attempt the action before
|
|
23
|
+
* throwing an error. Defaults to 3.
|
|
24
|
+
* @param {number} maxJitterPercent - Jitter as a percentage of the delay.
|
|
25
|
+
* For example, if the exponential delay is 2 seconds, then a jitter of
|
|
26
|
+
* 0.5 could lead to a delay as low as 1 second and as high as 3 seconds,
|
|
27
|
+
* since 0.5 * 2 = 1. Defaults to 0.5.
|
|
28
|
+
* @param {number} exponentBase - The base for the exponent. Defaults to 2,
|
|
29
|
+
* which means the delay doubles every attempt.
|
|
30
|
+
*/
|
|
31
|
+
constructor(maxRetries = 3, maxJitterPercent = 0.5, exponentBase = 2) {
|
|
32
|
+
if (maxRetries < 1)
|
|
33
|
+
throw new Error('maxRetries must be >= 1');
|
|
34
|
+
if (maxJitterPercent < 0.1 || maxJitterPercent > 1)
|
|
35
|
+
throw new Error('maxJitterPercent must be in the interval [0.1, 1]');
|
|
36
|
+
if (exponentBase < 1 || exponentBase > 10)
|
|
37
|
+
throw new Error('exponentBase must be in the range [1, 10]');
|
|
38
|
+
this.maxRetries = parseInt(maxRetries);
|
|
39
|
+
this.maxJitterPercent = parseFloat(maxJitterPercent);
|
|
40
|
+
this.exponentBase = parseFloat(exponentBase);
|
|
41
|
+
this.attemptNumber = 0;
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* Calculates how many ms to delay based on the current attempt number.
|
|
45
|
+
*/
|
|
46
|
+
calculateDelayMs(attemptNumber) {
|
|
47
|
+
const secondsRaw = this.exponentBase ** attemptNumber; // 2, 4, 8, 16, ....
|
|
48
|
+
const jitter = this.maxJitterPercent * (Math.random() - 0.5); // [-0.5, 0.5]
|
|
49
|
+
const delayMs = Math.round(secondsRaw * (1 + jitter) * 1000);
|
|
50
|
+
// console.log({ secondsRaw, jitter, delayMs })
|
|
51
|
+
return delayMs;
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Resolves after a delay set by the current attempt.
|
|
55
|
+
*/
|
|
56
|
+
async delay(attemptNumber) {
|
|
57
|
+
// console.log(attemptNumber)
|
|
58
|
+
const delay = this.calculateDelayMs(attemptNumber);
|
|
59
|
+
// console.log({ function: 'delay', attemptNumber, delay })
|
|
60
|
+
return new Promise((resolve, reject) => setTimeout(resolve, delay));
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Call another function repeatedly, retrying with exponential backoff and
|
|
64
|
+
* jitter if not successful.
|
|
65
|
+
* @param {ExponentialBackoff~action} action - Callback that does the action
|
|
66
|
+
* to be attempted (web request, rpc, database call, etc). Will be called
|
|
67
|
+
* again after the exponential dealy if shouldRetry() returns true.
|
|
68
|
+
* @param {ExponentialBackoff~shouldRetry} shouldRetry - Callback that gets
|
|
69
|
+
* to look at the return value of action() and any potential exception. If
|
|
70
|
+
* this returns true then the action will be retried with the appropriate
|
|
71
|
+
* backoff delay. Defaults to a function that returns true if an exception
|
|
72
|
+
* is thrown.
|
|
73
|
+
*/
|
|
74
|
+
async run(action = async (attemptNumber) => undefined, shouldRetry = async (returnValue, error) => !!error) {
|
|
75
|
+
let attemptNumber = 0;
|
|
76
|
+
while (attemptNumber++ < this.maxRetries) {
|
|
77
|
+
try {
|
|
78
|
+
const result = await action(attemptNumber);
|
|
79
|
+
if (await shouldRetry(result, undefined)) {
|
|
80
|
+
if (attemptNumber >= this.maxRetries)
|
|
81
|
+
throw new Error('Maximum number of attempts reached');
|
|
82
|
+
await this.delay(attemptNumber);
|
|
83
|
+
}
|
|
84
|
+
else {
|
|
85
|
+
return result;
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
catch (e) {
|
|
89
|
+
if (await shouldRetry(undefined, e)) {
|
|
90
|
+
if (attemptNumber >= this.maxRetries)
|
|
91
|
+
throw e;
|
|
92
|
+
await this.delay(attemptNumber);
|
|
93
|
+
}
|
|
94
|
+
else {
|
|
95
|
+
throw e;
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
exports.ExponentialBackoff = ExponentialBackoff;
|