qdone 1.6.0 → 2.0.0-alpha

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/README.md +9 -1
  2. package/commonjs/index.js +10 -0
  3. package/commonjs/package.json +3 -0
  4. package/commonjs/src/cache.js +142 -0
  5. package/commonjs/src/cloudWatch.js +148 -0
  6. package/commonjs/src/consumer.js +483 -0
  7. package/commonjs/src/defaults.js +107 -0
  8. package/commonjs/src/enqueue.js +498 -0
  9. package/commonjs/src/idleQueues.js +466 -0
  10. package/commonjs/src/qrlCache.js +250 -0
  11. package/commonjs/src/sqs.js +160 -0
  12. package/npm-shrinkwrap.json +17598 -264
  13. package/package.json +41 -29
  14. package/src/bin.js +3 -0
  15. package/src/cache.js +21 -25
  16. package/src/cli.js +269 -181
  17. package/src/cloudWatch.js +97 -0
  18. package/src/consumer.js +346 -0
  19. package/src/defaults.js +114 -0
  20. package/src/enqueue.js +239 -196
  21. package/src/idleQueues.js +242 -223
  22. package/src/monitor.js +53 -0
  23. package/src/qrlCache.js +110 -83
  24. package/src/sentry.js +30 -0
  25. package/src/sqs.js +73 -0
  26. package/src/worker.js +197 -202
  27. package/.DS_Store +0 -0
  28. package/.coveralls.yml +0 -2
  29. package/.travis.yml +0 -17
  30. package/CHANGELOG.md +0 -107
  31. package/dump.rdb +0 -0
  32. package/index.js +0 -6
  33. package/package-lock.json.old +0 -3939
  34. package/qdone +0 -2
  35. package/test/fixtures/test-child-kill-linux.sh +0 -9
  36. package/test/fixtures/test-fifo01-x24.batch +0 -24
  37. package/test/fixtures/test-too-big-1.batch +0 -10
  38. package/test/fixtures/test-unique01-x24.batch +0 -24
  39. package/test/fixtures/test-unique02-x24.batch +0 -24
  40. package/test/fixtures/test-unique24-x24.batch +0 -24
  41. package/test/fixtures/test-unique24-x240.batch +0 -240
  42. package/test/test.cache.js +0 -61
  43. package/test/test.cli.js +0 -1609
package/src/cli.js CHANGED
@@ -1,15 +1,23 @@
1
-
2
- const debug = require('debug')('qdone:cli')
3
- const Q = require('q')
4
- const fs = require('fs')
5
- const readline = require('readline')
6
- const chalk = require('chalk')
7
- const commandLineCommands = require('command-line-commands')
8
- const commandLineArgs = require('command-line-args')
9
- const getUsage = require('command-line-usage')
10
- const uuid = require('uuid')
1
+ /**
2
+ * Command line interface implementation
3
+ */
4
+ import { createReadStream, openSync } from 'node:fs'
5
+ import { createInterface } from 'node:readline'
6
+ import { createRequire } from 'module'
7
+ import getUsage from 'command-line-usage'
8
+ import commandLineCommands from 'command-line-commands'
9
+ import commandLineArgs from 'command-line-args'
10
+ import Debug from 'debug'
11
+ import chalk from 'chalk'
12
+
13
+ import { QueueDoesNotExist } from '@aws-sdk/client-sqs'
14
+ import { defaults, setupAWS, setupVerbose, getOptionsWithDefaults } from './defaults.js'
15
+ import { shutdownCache } from './cache.js'
16
+ import { withSentry } from './sentry.js'
17
+
18
+ const debug = Debug('qdone:cli')
19
+ const require = createRequire(import.meta.url)
11
20
  const packageJson = require('../package.json')
12
-
13
21
  class UsageError extends Error {}
14
22
 
15
23
  const awsUsageHeader = { content: 'AWS SQS Authentication', raw: true, long: true }
@@ -24,40 +32,33 @@ const awsUsageBody = {
24
32
  }
25
33
 
26
34
  const globalOptionDefinitions = [
27
- { name: 'prefix', type: String, defaultValue: 'qdone_', description: 'Prefix to place at the front of each SQS queue name [default: qdone_]' },
28
- { name: 'fail-suffix', type: String, defaultValue: '_failed', description: 'Suffix to append to each queue to generate fail queue name [default: _failed]' },
29
- { name: 'region', type: String, defaultValue: 'us-east-1', description: 'AWS region for Queues [default: us-east-1]' },
30
- { name: 'quiet', alias: 'q', type: Boolean, defaultValue: false, description: 'Turn on production logging. Automatically set if stderr is not a tty.' },
31
- { name: 'verbose', alias: 'v', type: Boolean, defaultValue: false, description: 'Turn on verbose output. Automatically set if stderr is a tty.' },
35
+ { name: 'prefix', type: String, description: `Prefix to place at the front of each SQS queue name [default: ${defaults.prefix}]` },
36
+ { name: 'fail-suffix', type: String, description: `Suffix to append to each queue to generate fail queue name [default: ${defaults.failSuffix}]` },
37
+ { name: 'region', type: String, description: `AWS region for Queues [default: ${defaults.region}]` },
38
+ { name: 'quiet', alias: 'q', type: Boolean, description: 'Turn on production logging. Automatically set if stderr is not a tty.' },
39
+ { name: 'verbose', alias: 'v', type: Boolean, description: 'Turn on verbose output. Automatically set if stderr is a tty.' },
32
40
  { name: 'version', alias: 'V', type: Boolean, description: 'Show version number' },
33
41
  { name: 'cache-uri', type: String, description: 'URL to caching cluster. Only redis://... currently supported.' },
34
- { name: 'cache-prefix', type: String, defaultValue: 'qdone:', description: 'Prefix for all keys in cache.' },
35
- { name: 'cache-ttl-seconds', type: Number, defaultValue: 10, description: 'Number of seconds to cache GetQueueAttributes calls.' },
36
- { name: 'help', type: Boolean, description: 'Print full help message.' }
42
+ { name: 'cache-prefix', type: String, description: `Prefix for all keys in cache. [default: ${defaults.cachePrefix}]` },
43
+ { name: 'cache-ttl-seconds', type: Number, description: `Number of seconds to cache GetQueueAttributes calls. [default: ${defaults.cacheTtlSeconds}]` },
44
+ { name: 'help', type: Boolean, description: 'Print full help message.' },
45
+ { name: 'sentry-dsn', type: String, description: 'Optional Sentry DSN to track unhandled errors.' }
37
46
  ]
38
47
 
39
- function setupAWS (options) {
40
- debug('loading aws-sdk')
41
- const AWS = require('aws-sdk')
42
- AWS.config.setPromisesDependency(Q.Promise)
43
- AWS.config.update({ region: options.region })
44
- debug('loaded')
45
- }
46
-
47
- function setupVerbose (options) {
48
- const verbose = options.verbose || (process.stderr.isTTY && !options.quiet)
49
- const quiet = options.quiet || (!process.stderr.isTTY && !options.verbose)
50
- options.verbose = verbose
51
- options.quiet = quiet
52
- }
53
-
54
48
  const enqueueOptionDefinitions = [
55
49
  { name: 'fifo', alias: 'f', type: Boolean, description: 'Create new queues as FIFOs' },
56
- { name: 'group-id', alias: 'g', type: String, defaultValue: uuid.v1(), description: 'FIFO Group ID to use for all messages enqueued in current command. Defaults to an string unique to this invocation.' },
57
- { name: 'group-id-per-message', type: Boolean, description: 'Use a unique Group ID for every message, even messages in the same batch.' }
50
+ { name: 'group-id', alias: 'g', type: String, description: 'FIFO Group ID to use for all messages enqueued in current command. Defaults to a string unique to this invocation.' },
51
+ { name: 'group-id-per-message', type: Boolean, description: 'Use a unique Group ID for every message, even messages in the same batch.' },
52
+ { name: 'deduplication-id', type: String, description: 'A Message Deduplication ID to give SQS when sending a message. Use this option if you are managing retries outside of qdone, and make sure the ID is the same for each retry in the deduplication window. Defaults to a string unique to this invocation.' },
53
+ { name: 'message-retention-period', type: Number, description: `Number of seconds to retain jobs (up to 14 days). [default: ${defaults.messageRetentionPeriod}]` },
54
+ { name: 'delay', alias: 'd', type: Number, description: 'Delays delivery of each message by the given number of seconds (up to 900 seconds, or 15 minutes). Defaults to immediate delivery (no delay).' },
55
+ { name: 'dlq', type: Boolean, description: 'Send messages from the failed queue to a DLQ.' },
56
+ { name: 'dql-suffix', type: String, description: `Suffix to append to each queue to generate DLQ name [default: ${defaults.dlqSuffix}]` },
57
+ { name: 'dql-after', type: String, description: `Drives message to the DLQ after this many failures in the failed queue. [default: ${defaults.dlqAfter}]` },
58
+ { name: 'tag', type: String, multiple: true, description: 'Adds an AWS tag to queue creation. Use the format Key=Value. Can specify multiple times.' }
58
59
  ]
59
60
 
60
- exports.enqueue = function enqueue (argv) {
61
+ export async function enqueue (argv, testHook) {
61
62
  const optionDefinitions = [].concat(enqueueOptionDefinitions, globalOptionDefinitions)
62
63
  const usageSections = [
63
64
  { content: 'usage: qdone enqueue [options] <queue> <command>', raw: true },
@@ -75,14 +76,80 @@ exports.enqueue = function enqueue (argv) {
75
76
  debug('enqueue argv', argv)
76
77
 
77
78
  // Parse command and options
79
+ let options, queue, command
78
80
  try {
79
- var options = commandLineArgs(optionDefinitions, { argv, partial: true })
81
+ options = commandLineArgs(optionDefinitions, { argv, partial: true })
80
82
  setupVerbose(options)
81
83
  debug('enqueue options', options)
82
84
  if (options.help) return Promise.resolve(console.log(getUsage(usageSections)))
83
85
  if (!options._unknown || options._unknown.length !== 2) throw new UsageError('enqueue requires both <queue> and <command> arguments')
84
- var [queue, command] = options._unknown
86
+ queue = options._unknown[0]
87
+ command = options._unknown[1]
85
88
  debug('queue', queue, 'command', command)
89
+ } catch (err) {
90
+ console.log(getUsage(usageSections.filter(s => !s.long)))
91
+ throw err
92
+ }
93
+
94
+ // Process tags
95
+ if (options.tag && options.tag.length) {
96
+ options.tags = {}
97
+ for (const input of options.tag) {
98
+ debug({ input })
99
+ if (input.indexOf('=') === -1) throw new UsageError('Tags must be separated with the "=" character.')
100
+ const [key, ...rest] = input.split('=')
101
+ const value = rest.join('=')
102
+ debug({ input, key, rest, value, tags: options.tags })
103
+ options.tags[key] = value
104
+ }
105
+ }
106
+
107
+ // Load module after AWS global load
108
+ setupAWS(options)
109
+ const { enqueue: enqueueOriginal } = await import('./enqueue.js')
110
+ const enqueue = testHook || enqueueOriginal
111
+
112
+ // Normal (non batch) enqueue
113
+ const opt = getOptionsWithDefaults(options)
114
+ const result = (
115
+ await withSentry(async () => enqueue(queue, command, opt), opt)
116
+ )
117
+ debug('enqueue returned', result)
118
+ if (options.verbose) console.error(chalk.blue('Enqueued job ') + result.MessageId)
119
+ return result
120
+ }
121
+
122
+ const monitorOptionDefinitions = [
123
+ { name: 'save', alias: 's', type: Boolean, description: 'Saves data to CloudWatch' }
124
+ ]
125
+
126
+ export async function monitor (argv) {
127
+ const optionDefinitions = [].concat(monitorOptionDefinitions, globalOptionDefinitions)
128
+ const usageSections = [
129
+ { content: 'usage: qdone monitor <queuePattern> ', raw: true },
130
+ { content: 'Options', raw: true },
131
+ { optionList: optionDefinitions },
132
+ { content: 'SQS API Call Complexity', raw: true, long: true },
133
+ {
134
+ content: [
135
+ { count: '1 + N', summary: 'one call to resolve the queue names (potentially more calls if there are pages)\none call per queue to get attributes' }
136
+ ],
137
+ long: true
138
+ },
139
+ awsUsageHeader, awsUsageBody
140
+ ]
141
+ debug('monitor argv', argv)
142
+
143
+ // Parse command and options
144
+ let options, queue
145
+ try {
146
+ options = commandLineArgs(optionDefinitions, { argv, partial: true })
147
+ setupVerbose(options)
148
+ debug('enqueue options', options)
149
+ if (options.help) return Promise.resolve(console.log(getUsage(usageSections)))
150
+ if (!options._unknown || options._unknown.length !== 1) throw new UsageError('monitor requires the <queuePattern> argument')
151
+ queue = options._unknown[0]
152
+ debug('queue', queue)
86
153
  } catch (e) {
87
154
  console.log(getUsage(usageSections.filter(s => !s.long)))
88
155
  return Promise.reject(e)
@@ -90,19 +157,44 @@ exports.enqueue = function enqueue (argv) {
90
157
 
91
158
  // Load module after AWS global load
92
159
  setupAWS(options)
93
- const enqueue = require('./enqueue')
160
+ const { getAggregateData } = await import('./monitor.js')
161
+ const { putAggregateData } = await import('./cloudWatch.js')
162
+ const data = await getAggregateData(queue)
163
+ console.log(data)
164
+ if (options.save) {
165
+ process.stderr.write('Saving to CloudWatch...')
166
+ await putAggregateData(data)
167
+ process.stderr.write('done\n')
168
+ }
169
+ return data
170
+ }
94
171
 
95
- // Normal (non batch) enqueue
96
- return enqueue
97
- .enqueue(queue, command, options)
98
- .then(function (result) {
99
- debug('enqueue returned', result)
100
- if (options.verbose) console.error(chalk.blue('Enqueued job ') + result.MessageId)
101
- return result
172
+ export async function loadBatchFile (filename) {
173
+ const file = filename === '-' ? process.stdin : createReadStream(filename, { fd: openSync(filename, 'r') })
174
+ const pairs = []
175
+ await new Promise((resolve, reject) => {
176
+ debug('file', file.name || 'stdin')
177
+ // Construct (queue, command) pairs from input
178
+ const input = createInterface({ input: file })
179
+ input.on('line', line => {
180
+ const parts = line.split(/\s+/)
181
+ const queue = parts[0]
182
+ const command = line.slice(queue.length).trim()
183
+ pairs.push({ queue, command })
102
184
  })
185
+ input.on('error', reject)
186
+ input.on('close', resolve)
187
+ })
188
+ return pairs
189
+ }
190
+
191
+ export async function loadBatchFiles (filenames) {
192
+ const results = await Promise.all(filenames.map(loadBatchFile))
193
+ const pairs = results.flat()
194
+ return pairs
103
195
  }
104
196
 
105
- exports.enqueueBatch = function enqueueBatch (argv) {
197
+ export async function enqueueBatch (argv, testHook) {
106
198
  const optionDefinitions = [].concat(enqueueOptionDefinitions, globalOptionDefinitions)
107
199
  const usageSections = [
108
200
  { content: 'usage: qdone enqueue-batch [options] <file...>', raw: true },
@@ -121,61 +213,46 @@ exports.enqueueBatch = function enqueueBatch (argv) {
121
213
  debug('enqueue-batch argv', argv)
122
214
 
123
215
  // Parse command and options
124
- let files
216
+ let filenames, options
125
217
  try {
126
- var options = commandLineArgs(optionDefinitions, { argv, partial: true })
218
+ options = commandLineArgs(optionDefinitions, { argv, partial: true })
127
219
  setupVerbose(options)
128
220
  debug('enqueue-batch options', options)
129
221
  if (options.help) return Promise.resolve(console.log(getUsage(usageSections)))
130
222
  if (!options._unknown || options._unknown.length === 0) throw new UsageError('enqueue-batch requres one or more <file> arguments')
131
223
  debug('filenames', options._unknown)
132
- files = options._unknown.map(f => f === '-' ? process.stdin : fs.createReadStream(f, { fd: fs.openSync(f, 'r') }))
224
+ filenames = options._unknown
133
225
  } catch (err) {
134
226
  console.log(getUsage(usageSections.filter(s => !s.long)))
135
- return Promise.reject(err)
227
+ throw err
136
228
  }
137
229
 
138
230
  // Load module after AWS global load
139
231
  setupAWS(options)
140
- const enqueue = require('./enqueue')
141
- const pairs = []
232
+ const { enqueueBatch: enqueueBatchOriginal } = await import('./enqueue.js')
233
+ const enqueueBatch = testHook || enqueueBatchOriginal
142
234
 
143
235
  // Load data and enqueue it
144
- return Promise.all(
145
- files.map(function (file) {
146
- // Construct (queue, command) pairs from input
147
- debug('file', file.name || 'stdin')
148
- const input = readline.createInterface({ input: file })
149
- const deferred = Q.defer()
150
- input.on('line', line => {
151
- const parts = line.split(/\s+/)
152
- const queue = parts[0]
153
- const command = line.slice(queue.length).trim()
154
- pairs.push({ queue, command })
155
- })
156
- input.on('error', deferred.reject)
157
- input.on('close', deferred.resolve)
158
- return deferred.promise
159
- })
236
+ const pairs = await loadBatchFiles(filenames)
237
+ debug('pairs', pairs)
238
+
239
+ // Normal (non batch) enqueue
240
+ const opt = getOptionsWithDefaults(options)
241
+ const result = (
242
+ await withSentry(async () => enqueueBatch(pairs, opt), opt)
160
243
  )
161
- .then(function () {
162
- debug('pairs', pairs)
163
- return enqueue
164
- .enqueueBatch(pairs, options)
165
- .then(function (result) {
166
- debug('enqueueBatch returned', result)
167
- if (options.verbose) console.error(chalk.blue('Enqueued ') + result + chalk.blue(' jobs'))
168
- })
169
- })
244
+ debug('enqueueBatch returned', result)
245
+ if (options.verbose) console.error(chalk.blue('Enqueued ') + result + chalk.blue(' jobs'))
170
246
  }
171
247
 
172
- exports.worker = function worker (argv) {
248
+ export async function worker (argv, testHook) {
173
249
  const optionDefinitions = [
174
250
  { name: 'kill-after', alias: 'k', type: Number, defaultValue: 30, description: 'Kill job after this many seconds [default: 30]' },
175
251
  { name: 'wait-time', alias: 'w', type: Number, defaultValue: 20, description: 'Listen at most this long on each queue [default: 20]' },
176
252
  { name: 'include-failed', type: Boolean, description: 'When using \'*\' do not ignore fail queues.' },
177
253
  { name: 'active-only', type: Boolean, description: 'Listen only to queues with pending messages.' },
178
254
  { name: 'drain', type: Boolean, description: 'Run until no more work is found and quit. NOTE: if used with --wait-time 0, this option will not drain queues.' },
255
+ { name: 'archive', type: Boolean, description: 'Does not run jobs, just prints commands to stdout. Use this flag for draining a queue and recording the commands that were in it.' },
179
256
  { name: 'fifo', alias: 'f', type: Boolean, description: 'Automatically adds .fifo to queue names. Only listens to fifo queues when using \'*\'.' }
180
257
  ].concat(globalOptionDefinitions)
181
258
 
@@ -198,9 +275,9 @@ exports.worker = function worker (argv) {
198
275
  debug('enqueue-batch argv', argv)
199
276
 
200
277
  // Parse command and options
201
- let queues
278
+ let queues, options
202
279
  try {
203
- var options = commandLineArgs(optionDefinitions, { argv, partial: true })
280
+ options = commandLineArgs(optionDefinitions, { argv, partial: true })
204
281
  setupVerbose(options)
205
282
  debug('worker options', options)
206
283
  if (options.help) return Promise.resolve(console.log(getUsage(usageSections)))
@@ -210,17 +287,18 @@ exports.worker = function worker (argv) {
210
287
  debug('queues', queues)
211
288
  } catch (err) {
212
289
  console.log(getUsage(usageSections.filter(s => !s.long)))
213
- return Promise.reject(err)
290
+ throw err
214
291
  }
215
292
 
216
293
  // Load module after AWS global load
217
294
  setupAWS(options)
218
- const worker = require('./worker')
295
+ const { listen: originalListen, requestShutdown } = await import('./worker.js')
296
+ const listen = testHook || originalListen
219
297
 
220
- var jobCount = 0
221
- var jobsSucceeded = 0
222
- var jobsFailed = 0
223
- var shutdownRequested = false
298
+ let jobCount = 0
299
+ let jobsSucceeded = 0
300
+ let jobsFailed = 0
301
+ let shutdownRequested = false
224
302
 
225
303
  function handleShutdown () {
226
304
  // Second signal forces shutdown
@@ -229,7 +307,7 @@ exports.worker = function worker (argv) {
229
307
  process.kill(-process.pid, 'SIGKILL')
230
308
  }
231
309
  shutdownRequested = true
232
- worker.requestShutdown()
310
+ requestShutdown()
233
311
  if (options.verbose) {
234
312
  console.error(chalk.yellow('Shutdown requested. Will stop when current job is done or a second signal is recieved.'))
235
313
  if (process.stdout.isTTY) {
@@ -240,56 +318,59 @@ exports.worker = function worker (argv) {
240
318
  process.on('SIGINT', handleShutdown)
241
319
  process.on('SIGTERM', handleShutdown)
242
320
 
243
- function workLoop () {
321
+ async function workLoop () {
244
322
  if (shutdownRequested) {
245
323
  if (options.verbose) console.error(chalk.blue('Shutting down as requested.'))
246
324
  return Promise.resolve()
247
325
  }
248
- return worker
249
- .listen(queues, options)
250
- .then(function (result) {
251
- debug('listen returned', result)
252
-
253
- // Handle delay in the case we don't have any queues
254
- if (result === 'noQueues') {
255
- const roundDelay = Math.max(1000, options['wait-time'] * 1000)
256
- if (options.verbose) console.error(chalk.yellow('No queues to listen on!'))
257
- if (options.drain) {
258
- console.error(chalk.blue('Shutting down because we are in drain mode and no work is available.'))
259
- return Promise.resolve()
260
- }
261
- console.error(chalk.yellow('Retrying in ' + (roundDelay / 1000) + 's'))
262
- return Q.delay(roundDelay).then(workLoop)
263
- }
264
-
265
- const ranJob = (result.jobsSucceeded + result.jobsFailed) > 0
266
- jobCount += result.jobsSucceeded + result.jobsFailed
267
- jobsFailed += result.jobsFailed
268
- jobsSucceeded += result.jobsSucceeded
269
- // Draining continues to listen as long as there is work
270
- if (options.drain) {
271
- if (ranJob) return workLoop()
272
- if (options.verbose) {
273
- console.error(chalk.blue('Ran ') + jobCount + chalk.blue(' jobs: ') + jobsSucceeded + chalk.blue(' succeeded ') + jobsFailed + chalk.blue(' failed'))
274
- }
275
- // return Promise.resolve(jobCount)
276
- } else {
277
- // If we're not draining, loop forever
278
- // We can go immediately if we just ran a job
279
- if (ranJob) return workLoop()
280
- // Otherwise, we could do backoff logic here to slow down requests when
281
- // work is not happening (at the expense of latency)
282
- // But we won't do that now.
283
- return workLoop()
284
- }
285
- })
326
+ // const result = await listen(queues, options)
327
+ const opt = getOptionsWithDefaults(options)
328
+ const result = (
329
+ await withSentry(async () => listen(queues, opt), opt)
330
+ )
331
+ debug('listen returned', result)
332
+
333
+ // Handle delay in the case we don't have any queues
334
+ if (result === 'noQueues') {
335
+ const roundDelay = Math.max(1000, options['wait-time'] * 1000)
336
+ if (options.verbose) console.error(chalk.yellow('No queues to listen on!'))
337
+ if (options.drain) {
338
+ console.error(chalk.blue('Shutting down because we are in drain mode and no work is available.'))
339
+ return Promise.resolve()
340
+ }
341
+ console.error(chalk.yellow('Retrying in ' + (roundDelay / 1000) + 's'))
342
+ const delay = (ms) => new Promise(resolve => setTimeout(resolve, ms))
343
+ return delay(roundDelay).then(workLoop)
344
+ }
345
+
346
+ const ranJob = (result.jobsSucceeded + result.jobsFailed) > 0
347
+ jobCount += result.jobsSucceeded + result.jobsFailed
348
+ jobsFailed += result.jobsFailed
349
+ jobsSucceeded += result.jobsSucceeded
350
+ // Draining continues to listen as long as there is work
351
+ if (options.drain) {
352
+ if (ranJob) return workLoop()
353
+ if (options.verbose) {
354
+ console.error(chalk.blue('Ran ') + jobCount + chalk.blue(' jobs: ') + jobsSucceeded + chalk.blue(' succeeded ') + jobsFailed + chalk.blue(' failed'))
355
+ }
356
+ // return Promise.resolve(jobCount)
357
+ } else {
358
+ // If we're not draining, loop forever
359
+ // We can go immediately if we just ran a job
360
+ if (ranJob) return workLoop()
361
+ // Otherwise, we could do backoff logic here to slow down requests when
362
+ // work is not happening (at the expense of latency)
363
+ // But we won't do that now.
364
+ return workLoop()
365
+ }
286
366
  }
367
+
287
368
  return workLoop()
288
369
  }
289
370
 
290
- exports.idleQueues = function idleQueues (argv) {
371
+ export async function idleQueues (argv, testHook) {
291
372
  const optionDefinitions = [
292
- { name: 'idle-for', alias: 'o', type: Number, defaultValue: 60, description: 'Minutes of inactivity after which a queue is considered idle. [default: 60]' },
373
+ { name: 'idle-for', alias: 'o', type: Number, defaultValue: defaults.idleFor, description: `Minutes of inactivity after which a queue is considered idle. [default: ${defaults.idleFor}]` },
293
374
  { name: 'delete', type: Boolean, description: 'Delete the queue if it is idle. The fail queue also must be idle unless you use --unpair.' },
294
375
  { name: 'unpair', type: Boolean, description: 'Treat queues and their fail queues as independent. By default they are treated as a unit.' },
295
376
  { name: 'include-failed', type: Boolean, description: 'When using \'*\' do not ignore fail queues. This option only applies if you use --unpair. Otherwise, queues and fail queues are treated as a unit.' }
@@ -324,9 +405,9 @@ exports.idleQueues = function idleQueues (argv) {
324
405
  debug('idleQueues argv', argv)
325
406
 
326
407
  // Parse command and options
327
- let queues
408
+ let queues, options
328
409
  try {
329
- var options = commandLineArgs(optionDefinitions, { argv, partial: true })
410
+ options = commandLineArgs(optionDefinitions, { argv, partial: true })
330
411
  setupVerbose(options)
331
412
  debug('idleQueues options', options)
332
413
  if (options.help) return Promise.resolve(console.log(getUsage(usageSections)))
@@ -342,48 +423,56 @@ exports.idleQueues = function idleQueues (argv) {
342
423
 
343
424
  // Load module after AWS global load
344
425
  setupAWS(options)
345
- const idleQueues = require('./idleQueues')
346
-
347
- return idleQueues
348
- .idleQueues(queues, options)
349
- .then(function (result) {
350
- debug('idleQueues returned', result)
351
- if (result === 'noQueues') return Promise.resolve()
352
- const callsSQS = result.map(a => a.apiCalls.SQS).reduce((a, b) => a + b, 0)
353
- const callsCloudWatch = result.map(a => a.apiCalls.CloudWatch).reduce((a, b) => a + b, 0)
354
- if (options.verbose) console.error(chalk.blue('Used ') + callsSQS + chalk.blue(' SQS and ') + callsCloudWatch + chalk.blue(' CloudWatch API calls.'))
355
- // Print idle queues to stdout
356
- result.filter(a => a.idle).map(a => a.queue).forEach(q => console.log(q))
357
- return result
358
- })
359
- .catch(err => {
360
- if (err.code === 'AWS.SimpleQueueService.NonExistentQueue') {
361
- console.error(chalk.yellow('This error can occur when you run this command immediately after deleting a queue. Wait 60 seconds and try again.'))
362
- return Promise.reject(err)
363
- }
364
- })
426
+ const { idleQueues: idleQueuesOriginal } = await import('./idleQueues.js')
427
+ const idleQueues = testHook || idleQueuesOriginal
428
+ const opt = getOptionsWithDefaults(options)
429
+ try {
430
+ const result = (
431
+ await withSentry(async () => idleQueues(queues, opt), opt)
432
+ )
433
+ debug('idleQueues returned', result)
434
+ if (result === 'noQueues') return Promise.resolve()
435
+ const callsSQS = result.map(a => a.apiCalls.SQS).reduce((a, b) => a + b, 0)
436
+ const callsCloudWatch = result.map(a => a.apiCalls.CloudWatch).reduce((a, b) => a + b, 0)
437
+ if (options.verbose) console.error(chalk.blue('Used ') + callsSQS + chalk.blue(' SQS and ') + callsCloudWatch + chalk.blue(' CloudWatch API calls.'))
438
+
439
+ // Print idle queues to stdout
440
+ result.filter(a => a.idle).map(a => a.queue).forEach(q => console.log(q))
441
+ return result
442
+ } catch (err) {
443
+ if (err instanceof QueueDoesNotExist) {
444
+ console.error(chalk.yellow('This error can occur when you run this command immediately after deleting a queue. Wait 60 seconds and try again.'))
445
+ }
446
+ throw err
447
+ }
365
448
  }
366
449
 
367
- exports.root = function root (originalArgv) {
368
- const validCommands = [null, 'enqueue', 'enqueue-batch', 'worker', 'idle-queues']
450
+ export async function root (originalArgv, testHook) {
451
+ const validCommands = [null, 'enqueue', 'enqueue-batch', 'worker', 'idle-queues', 'monitor']
369
452
  const usageSections = [
370
453
  { content: 'qdone - Command line job queue for SQS', raw: true, long: true },
371
454
  { content: 'usage: qdone [options] <command>', raw: true },
372
455
  { content: 'Commands', raw: true },
373
- { content: [
374
- { name: 'enqueue', summary: 'Enqueue a single command' },
375
- { name: 'enqueue-batch', summary: 'Enqueue multiple commands from stdin or a file' },
376
- { name: 'worker', summary: 'Execute work on one or more queues' },
377
- { name: 'idle-queues', summary: 'Write a list of idle queues to stdout' }
378
- ] },
456
+ {
457
+ content: [
458
+ { name: 'enqueue', summary: 'Enqueue a single command' },
459
+ { name: 'enqueue-batch', summary: 'Enqueue multiple commands from stdin or a file' },
460
+ { name: 'worker', summary: 'Execute work on one or more queues' },
461
+ { name: 'idle-queues', summary: 'Write a list of idle queues to stdout' },
462
+ { name: 'monitor', summary: 'Monitor multiple queues at once' }
463
+ ]
464
+ },
379
465
  { content: 'Global Options', raw: true },
380
466
  { optionList: globalOptionDefinitions },
381
467
  awsUsageHeader, awsUsageBody
382
468
  ]
383
469
 
384
470
  // Parse command and options
471
+ let command, argv
385
472
  try {
386
- var { command, argv } = commandLineCommands(validCommands, originalArgv)
473
+ const parsed = commandLineCommands(validCommands, originalArgv)
474
+ command = parsed.command
475
+ argv = parsed.argv
387
476
  debug('command', command)
388
477
 
389
478
  // Root command
@@ -391,7 +480,7 @@ exports.root = function root (originalArgv) {
391
480
  const options = commandLineArgs(globalOptionDefinitions, { argv: originalArgv })
392
481
  setupVerbose(options)
393
482
  debug('options', options)
394
- if (options.version) return Promise.resolve(console.log(packageJson.version))
483
+ if (options.version) return console.log(packageJson.version)
395
484
  else if (options.help) return Promise.resolve(console.log(getUsage(usageSections)))
396
485
  else console.log(getUsage(usageSections.filter(s => !s.long)))
397
486
  return Promise.resolve()
@@ -403,32 +492,31 @@ exports.root = function root (originalArgv) {
403
492
 
404
493
  // Run child commands
405
494
  if (command === 'enqueue') {
406
- return exports.enqueue(argv)
495
+ return enqueue(argv, testHook)
407
496
  } else if (command === 'enqueue-batch') {
408
- return exports.enqueueBatch(argv)
497
+ return enqueueBatch(argv, testHook)
409
498
  } else if (command === 'worker') {
410
- return exports.worker(argv)
499
+ return worker(argv, testHook)
411
500
  } else if (command === 'idle-queues') {
412
- return exports.idleQueues(argv)
501
+ return idleQueues(argv, testHook)
502
+ } else if (command === 'monitor') {
503
+ return monitor(argv, testHook)
413
504
  }
414
505
  }
415
506
 
416
- exports.run = function run (argv) {
507
+ export async function run (argv, testHook) {
417
508
  debug('run', argv)
418
- return exports
419
- .root(argv)
420
- .then(() => {
421
- // If cache actually is active, it will keep our program from exiting
422
- // until we disconnect the cache client
423
- const cache = require('./cache')
424
- cache.resetClient()
425
- })
426
- .catch(function (err) {
427
- if (err.code === 'AccessDenied') console.log(getUsage([awsUsageHeader, awsUsageBody]))
428
- console.error(chalk.red.bold(err))
429
- console.error(err.stack.slice(err.stack.indexOf('\n') + 1))
430
- throw err
431
- })
509
+ try {
510
+ await root(argv, testHook)
511
+ // If cache actually is active, it will keep our program from exiting
512
+ // until we disconnect the cache client
513
+ shutdownCache()
514
+ } catch (err) {
515
+ if (err.Code === 'AccessDenied') console.log(getUsage([awsUsageHeader, awsUsageBody]))
516
+ console.error(chalk.red.bold(err))
517
+ console.error(err.stack.slice(err.stack.indexOf('\n') + 1))
518
+ throw err
519
+ }
432
520
  }
433
521
 
434
522
  debug('loaded')