qdone 1.7.0 → 2.0.0-alpha

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/worker.js CHANGED
@@ -1,68 +1,86 @@
1
+ /**
2
+ * Implementation for the worker that pulls jobs from queue and executes them.
3
+ */
1
4
 
2
- const Q = require('q')
3
- const childProcess = require('child_process')
4
- const debug = require('debug')('qdone:worker')
5
- const chalk = require('chalk')
6
- const treeKill = require('tree-kill')
7
- const qrlCache = require('./qrlCache')
8
- const cheapIdleCheck = require('./idleQueues').cheapIdleCheck
9
- const AWS = require('aws-sdk')
10
- var shutdownRequested = false
5
+ import {
6
+ ChangeMessageVisibilityCommand,
7
+ ReceiveMessageCommand,
8
+ DeleteMessageCommand
9
+ } from '@aws-sdk/client-sqs'
10
+ import { exec } from 'child_process' // node:child_process
11
+ import treeKill from 'tree-kill'
12
+ import chalk from 'chalk'
13
+ import Debug from 'debug'
11
14
 
12
- exports.requestShutdown = function requestShutdown () {
15
+ import { normalizeQueueName, getQnameUrlPairs } from './qrlCache.js'
16
+ import { getOptionsWithDefaults } from './defaults.js'
17
+ import { cheapIdleCheck } from './idleQueues.js'
18
+ import { getSQSClient } from './sqs.js'
19
+
20
+ const debug = Debug('qdone:worker')
21
+
22
+ // Global flag for shutdown request
23
+ let shutdownRequested = false
24
+
25
+ export function requestShutdown () {
13
26
  shutdownRequested = true
14
27
  }
15
28
 
16
29
  //
17
30
  // Actually run the subprocess job
18
31
  //
19
- function executeJob (job, qname, qrl, options) {
32
+ export async function executeJob (job, qname, qrl, opt) {
20
33
  debug('executeJob', job)
21
34
  const cmd = 'nice ' + job.Body
22
- if (options.verbose) console.error(chalk.blue(' Executing job command:'), cmd)
35
+ if (opt.archive) {
36
+ await getSQSClient().send(new DeleteMessageCommand({
37
+ QueueUrl: qrl,
38
+ ReceiptHandle: job.ReceiptHandle
39
+ }))
40
+ console.log(cmd)
41
+ return { noJobs: 0, jobsSucceeded: 1, jobsFailed: 0 }
42
+ }
43
+ if (opt.verbose) console.error(chalk.blue(' Executing job command:'), cmd)
23
44
 
24
- var jobStart = new Date()
25
- var visibilityTimeout = 30 // this should be the queue timeout
26
- var timeoutExtender
45
+ const jobStart = new Date()
46
+ let visibilityTimeout = 30 // this should be the queue timeout
47
+ let timeoutExtender
27
48
 
28
- function extendTimeout () {
49
+ async function extendTimeout () {
29
50
  debug('extendTimeout')
30
51
  const maxJobRun = 12 * 60 * 60
31
52
  const jobRunTime = ((new Date()) - jobStart) / 1000
32
53
  // Double every time, up to max
33
- visibilityTimeout = Math.min(visibilityTimeout * 2, maxJobRun - jobRunTime, options['kill-after'] - jobRunTime)
34
- if (options.verbose) {
54
+ visibilityTimeout = Math.min(visibilityTimeout * 2, maxJobRun - jobRunTime, opt.killAfter - jobRunTime)
55
+ if (opt.verbose) {
35
56
  console.error(
36
57
  chalk.blue(' Ran for ') + jobRunTime +
37
58
  chalk.blue(' seconds, requesting another ') + visibilityTimeout +
38
59
  chalk.blue(' seconds')
39
60
  )
40
61
  }
41
- const sqs = new AWS.SQS()
42
- sqs
43
- .changeMessageVisibility({
62
+
63
+ try {
64
+ const result = await getSQSClient().send(new ChangeMessageVisibilityCommand({
44
65
  QueueUrl: qrl,
45
66
  ReceiptHandle: job.ReceiptHandle,
46
67
  VisibilityTimeout: visibilityTimeout
47
- })
48
- .promise()
49
- .then(function (result) {
50
- debug('changeMessageVisibility.then returned', result)
51
- if (
52
- jobRunTime + visibilityTimeout >= maxJobRun ||
53
- jobRunTime + visibilityTimeout >= options['kill-after']
54
- ) {
55
- if (options.verbose) console.error(chalk.yellow(' warning: this is our last time extension'))
56
- } else {
57
- // Extend when we get 50% of the way to timeout
58
- timeoutExtender = setTimeout(extendTimeout, visibilityTimeout * 1000 * 0.5)
59
- }
60
- })
61
- .catch(function (err) {
62
- debug('changeMessageVisibility.catch returned', err)
63
- // Rejection means we're ouuta time, whatever, let the job die
64
- if (options.verbose) console.error(chalk.red(' failed to extend job: ') + err)
65
- })
68
+ }))
69
+ debug('ChangeMessageVisibility.then returned', result)
70
+ if (
71
+ jobRunTime + visibilityTimeout >= maxJobRun ||
72
+ jobRunTime + visibilityTimeout >= opt.killAfter
73
+ ) {
74
+ if (opt.verbose) console.error(chalk.yellow(' warning: this is our last time extension'))
75
+ } else {
76
+ // Extend when we get 50% of the way to timeout
77
+ timeoutExtender = setTimeout(extendTimeout, visibilityTimeout * 1000 * 0.5)
78
+ }
79
+ } catch (err) {
80
+ debug('changeMessageVisibility.catch returned', err)
81
+ // Rejection means we're ouuta time, whatever, let the job die
82
+ if (opt.verbose) console.error(chalk.red(' failed to extend job: ') + err)
83
+ }
66
84
  }
67
85
 
68
86
  // Extend when we get 50% of the way to timeout
@@ -73,84 +91,84 @@ function executeJob (job, qname, qrl, options) {
73
91
  // it does not seem to work for child processes of the shell, so we'll create our
74
92
  // own timeout and use tree-kill to catch all of the child processes.
75
93
 
76
- let child
94
+ let child, sigKillTimeout
77
95
  function killTree () {
78
96
  debug('killTree', child.pid)
79
97
  treeKill(child.pid, 'SIGTERM')
80
98
  setTimeout(function () {
81
- treeKill(child.pid, 'SIGKILL')
99
+ sigKillTimeout = treeKill(child.pid, 'SIGKILL')
82
100
  }, 1000)
83
101
  }
84
- const treeKiller = setTimeout(killTree, options['kill-after'] * 1000)
85
- debug({ treeKiller: options['kill-after'] * 1000, date: Date.now() })
102
+ const treeKiller = setTimeout(killTree, opt.killAfter * 1000)
103
+ debug({ treeKiller: opt.killAfter * 1000, date: Date.now() })
86
104
 
87
- const promise = new Promise(function (resolve, reject) {
88
- child = childProcess.exec(cmd, function (err, stdout, stderr) {
89
- if (err) reject(err, stdout, stderr)
90
- else resolve(stdout, stderr)
105
+ try {
106
+ // Success path for job execution
107
+ const { stdout, stderr } = await new Promise(function (resolve, reject) {
108
+ child = exec(cmd, function (err, stdout, stderr) {
109
+ if (err) {
110
+ err.stdout = stdout
111
+ err.stderr = stderr
112
+ reject(err)
113
+ } else resolve({ stdout, stderr })
114
+ })
91
115
  })
92
- })
93
116
 
94
- return promise
95
- // Q.nfcall(childProcess.exec, cmd, {timeout: options['kill-after'] * 1000})
96
- .then(function (stdout, stderr) {
97
- debug('childProcess.exec.then', Date.now())
98
- clearTimeout(timeoutExtender)
99
- clearTimeout(treeKiller)
100
- if (options.verbose) {
101
- console.error(chalk.green(' SUCCESS'))
102
- if (stdout) console.error(chalk.blue(' stdout: ') + stdout)
103
- if (stderr) console.error(chalk.blue(' stderr: ') + stderr)
104
- console.error(chalk.blue(' cleaning up (removing job) ...'))
105
- }
106
- const sqs = new AWS.SQS()
107
- return sqs
108
- .deleteMessage({
109
- QueueUrl: qrl,
110
- ReceiptHandle: job.ReceiptHandle
111
- })
112
- .promise()
113
- .then(function () {
114
- if (options.verbose) {
115
- console.error(chalk.blue(' done'))
116
- console.error()
117
- }
118
- return Promise.resolve({ noJobs: 0, jobsSucceeded: 1, jobsFailed: 0 })
119
- })
120
- })
121
- .catch((err, stdout, stderr) => {
122
- debug('childProcess.exec.catch')
123
- clearTimeout(timeoutExtender)
124
- clearTimeout(treeKiller)
125
- if (options.verbose) {
126
- console.error(chalk.red(' FAILED'))
127
- if (err.code) console.error(chalk.blue(' code : ') + err.code)
128
- if (err.signal) console.error(chalk.blue(' signal: ') + err.signal)
129
- if (stdout) console.error(chalk.blue(' stdout: ') + stdout)
130
- if (stderr) console.error(chalk.blue(' stderr: ') + stderr)
131
- console.error(chalk.blue(' error : ') + err)
132
- } else {
133
- // Production error logging
134
- console.log(JSON.stringify({
135
- event: 'JOB_FAILED',
136
- timestamp: new Date(),
137
- job: job.MessageId,
138
- command: job.Body,
139
- exitCode: err.code || undefined,
140
- killSignal: err.signal || undefined,
141
- stderr,
142
- stdout,
143
- errorMessage: err.toString().split('\n').slice(1).join('\n').trim() || undefined
144
- }))
145
- }
146
- return Promise.resolve({ noJobs: 0, jobsSucceeded: 0, jobsFailed: 1 })
147
- })
117
+ debug('exec.then', Date.now())
118
+ clearTimeout(timeoutExtender)
119
+ clearTimeout(treeKiller)
120
+ clearTimeout(sigKillTimeout)
121
+ if (opt.verbose) {
122
+ console.error(chalk.green(' SUCCESS'))
123
+ if (stdout) console.error(chalk.blue(' stdout: ') + stdout)
124
+ if (stderr) console.error(chalk.blue(' stderr: ') + stderr)
125
+ console.error(chalk.blue(' cleaning up (removing job) ...'))
126
+ }
127
+ await getSQSClient().send(new DeleteMessageCommand({
128
+ QueueUrl: qrl,
129
+ ReceiptHandle: job.ReceiptHandle
130
+ }))
131
+ if (opt.verbose) {
132
+ console.error(chalk.blue(' done'))
133
+ console.error()
134
+ }
135
+ return { noJobs: 0, jobsSucceeded: 1, jobsFailed: 0 }
136
+ } catch (err) {
137
+ // Fail path for job execution
138
+ debug('exec.catch')
139
+ clearTimeout(timeoutExtender)
140
+ clearTimeout(treeKiller)
141
+ clearTimeout(sigKillTimeout)
142
+ if (opt.verbose) {
143
+ const { code, signal, stdout, stderr } = err
144
+ console.error(chalk.red(' FAILED'))
145
+ if (code) console.error(chalk.blue(' code : ') + code)
146
+ if (signal) console.error(chalk.blue(' signal: ') + signal)
147
+ if (stdout) console.error(chalk.blue(' stdout: ') + stdout)
148
+ if (stderr) console.error(chalk.blue(' stderr: ') + stderr)
149
+ console.error(chalk.blue(' error : ') + err)
150
+ } else {
151
+ // Production error logging
152
+ console.log(JSON.stringify({
153
+ event: 'JOB_FAILED',
154
+ timestamp: new Date(),
155
+ job: job.MessageId,
156
+ command: job.Body,
157
+ exitCode: err.code || err.code || undefined,
158
+ killSignal: err.signal || undefined,
159
+ stderr: err.stderr,
160
+ stdout: err.stdout,
161
+ errorMessage: err.toString().split('\n').slice(1).join('\n').trim() || undefined
162
+ }))
163
+ }
164
+ return { noJobs: 0, jobsSucceeded: 0, jobsFailed: 1 }
165
+ }
148
166
  }
149
167
 
150
168
  //
151
169
  // Pull work off of a single queue
152
170
  //
153
- function pollForJobs (qname, qrl, options) {
171
+ export async function pollForJobs (qname, qrl, opt) {
154
172
  debug('pollForJobs')
155
173
  const params = {
156
174
  AttributeNames: ['All'],
@@ -158,116 +176,93 @@ function pollForJobs (qname, qrl, options) {
158
176
  MessageAttributeNames: ['All'],
159
177
  QueueUrl: qrl,
160
178
  VisibilityTimeout: 30,
161
- WaitTimeSeconds: options['wait-time']
179
+ WaitTimeSeconds: opt.waitTime
180
+ }
181
+ const response = await getSQSClient().send(new ReceiveMessageCommand(params))
182
+ debug('sqs.receiveMessage.then', response)
183
+ if (shutdownRequested) return { noJobs: 0, jobsSucceeded: 0, jobsFailed: 0 }
184
+ if (response.Messages) {
185
+ const job = response.Messages[0]
186
+ if (opt.verbose) console.error(chalk.blue(' Found job ' + job.MessageId))
187
+ return executeJob(job, qname, qrl, opt)
188
+ } else {
189
+ return { noJobs: 1, jobsSucceeded: 0, jobsFailed: 0 }
162
190
  }
163
- const sqs = new AWS.SQS()
164
- return sqs
165
- // .receiveMessage(params, function (err, response) { if (err) throw err })
166
- .receiveMessage(params)
167
- .promise()
168
- .then(function (response) {
169
- debug('sqs.receiveMessage.then', response)
170
- if (shutdownRequested) return Promise.resolve({ noJobs: 0, jobsSucceeded: 0, jobsFailed: 0 })
171
- if (response.Messages) {
172
- const job = response.Messages[0]
173
- if (options.verbose) console.error(chalk.blue(' Found job ' + job.MessageId))
174
- return executeJob(job, qname, qrl, options)
175
- } else {
176
- return Promise.resolve({ noJobs: 1, jobsSucceeded: 0, jobsFailed: 0 })
177
- }
178
- })
179
191
  }
180
192
 
181
193
  //
182
194
  // Resolve queues for listening loop listen
183
195
  //
184
- exports.listen = function listen (queues, options) {
185
- if (options.verbose) console.error(chalk.blue('Resolving queues: ') + queues.join(' '))
186
- const qnames = queues.map(function (queue) { return options.prefix + qrlCache.normalizeQueueName(queue, options) })
187
- debug({ hello: '?' })
188
- return qrlCache
189
- .getQnameUrlPairs(qnames, options)
190
- .then(function (entries) {
191
- // If user only wants active queues, run a cheap idle check
192
- if (options['active-only']) {
193
- debug({ entiresBeforeCheck: entries })
194
- return Promise.all(entries.map(entry =>
195
- cheapIdleCheck(entry.qname, entry.qrl, options)
196
- .then(({ result }) =>
197
- Promise.resolve(
198
- Object.assign(entry, { idle: result.idle })
199
- )
200
- )
201
- ))
202
- } else {
203
- return entries
204
- }
205
- })
206
- .then(function (entries) {
207
- if (options['active-only']) {
208
- // Filter out idle queues
209
- return entries.filter(entry => entry && entry.idle !== true)
210
- } else {
211
- return entries
212
- }
213
- })
214
- .then(function (entries) {
215
- debug('qrlCache.getQnameUrlPairs.then')
216
- if (options.verbose) {
217
- console.error(chalk.blue(' done'))
218
- console.error()
196
+ export async function listen (queues, options) {
197
+ const opt = getOptionsWithDefaults(options)
198
+ debug({ opt, options })
199
+ // Function to listen to all queues in order
200
+ async function oneRound (queues) {
201
+ const stats = { noJobs: 0, jobsSucceeded: 0, jobsFailed: 0 }
202
+ for (const { qname, qrl } of queues) {
203
+ if (shutdownRequested) return stats
204
+ if (opt.verbose) {
205
+ console.error(
206
+ chalk.blue('Looking for work on ') +
207
+ qname.slice(opt.prefix.length) +
208
+ chalk.blue(' (' + qrl + ')')
209
+ )
219
210
  }
211
+ // Aggregate the results
212
+ const { noJobs, jobsSucceeded, jobsFailed } = await pollForJobs(qname, qrl, opt)
213
+ stats.noJobs += noJobs
214
+ stats.jobsFailed += jobsFailed
215
+ stats.jobsSucceeded += jobsSucceeded
216
+ }
217
+ return stats
218
+ }
220
219
 
221
- // Don't listen to fail queues... unless user wants to
222
- entries = entries
223
- .filter(function (entry) {
224
- const suf = options['fail-suffix'] + (options.fifo ? '.fifo' : '')
225
- return options['include-failed'] ? true : entry.qname.slice(-suf.length) !== suf
226
- })
220
+ // Start processing
221
+ if (opt.verbose) console.error(chalk.blue('Resolving queues: ') + queues.join(' '))
222
+ const qnames = queues.map(queue => normalizeQueueName(queue, opt))
223
+ const pairs = await getQnameUrlPairs(qnames, opt)
227
224
 
228
- // Listen to all queues once
229
- function oneRound () {
230
- var result = Q()
231
- entries.forEach(function (entry) {
232
- debug('entries.forEach.funtion')
233
- result = result.then((soFar = { noJobs: 0, jobsSucceeded: 0, jobsFailed: 0 }) => {
234
- debug('soFar', soFar)
235
- // Don't poll the next queue if shutdown was requested
236
- if (shutdownRequested) return Promise.resolve(soFar)
237
- if (options.verbose) {
238
- console.error(
239
- chalk.blue('Looking for work on ') +
240
- entry.qname.slice(options.prefix.length) +
241
- chalk.blue(' (' + entry.qrl + ')')
242
- )
243
- }
244
- // Aggregate the results
245
- return pollForJobs(entry.qname, entry.qrl, options)
246
- .then(({ noJobs, jobsSucceeded, jobsFailed }) => ({
247
- noJobs: soFar.noJobs + noJobs,
248
- jobsSucceeded: soFar.jobsSucceeded + jobsSucceeded,
249
- jobsFailed: soFar.jobsFailed + jobsFailed
250
- }))
251
- })
252
- })
253
- return result
254
- }
225
+ // Figure out which pairs are active
226
+ const activePairs = []
227
+ if (opt.activeOnly) {
228
+ debug({ pairsBeforeCheck: pairs })
229
+ await Promise.all(pairs.map(async pair => {
230
+ const { idle } = await cheapIdleCheck(pair.qname, pair.qrl, opt)
231
+ if (!idle) activePairs.push(pair)
232
+ }))
233
+ }
255
234
 
256
- // But only if we have queues to listen on
257
- if (entries.length) {
258
- if (options.verbose) {
259
- console.error(chalk.blue('Listening to queues (in this order):'))
260
- console.error(entries.map(function (e) {
261
- return ' ' + e.qname.slice(options.prefix.length) + chalk.blue(' - ' + e.qrl)
262
- }).join('\n'))
263
- console.error()
264
- }
265
- return oneRound()
266
- }
235
+ // Finished resolving
236
+ debug('getQnameUrlPairs.then')
237
+ if (opt.verbose) {
238
+ console.error(chalk.blue(' done'))
239
+ console.error()
240
+ }
267
241
 
268
- // Otherwise, let caller know
269
- return Promise.resolve('noQueues')
242
+ // Figure out which queues we want to listen on, choosing between active and
243
+ // all, filtering out failed queues if the user wants that
244
+ const selectedPairs = (opt.activeOnly ? activePairs : pairs)
245
+ .filter(({ qname }) => {
246
+ const suf = opt.failSuffix + (opt.fifo ? '.fifo' : '')
247
+ const isFailQueue = qname.slice(-suf.length) === suf
248
+ const shouldInclude = opt.includeFailed ? true : !isFailQueue
249
+ return shouldInclude
270
250
  })
251
+
252
+ // But only if we have queues to listen on
253
+ if (selectedPairs.length) {
254
+ if (opt.verbose) {
255
+ console.error(chalk.blue('Listening to queues (in this order):'))
256
+ console.error(selectedPairs.map(({ qname, qrl }) =>
257
+ ' ' + qname.slice(opt.prefix.length) + chalk.blue(' - ' + qrl)
258
+ ).join('\n'))
259
+ console.error()
260
+ }
261
+ return oneRound(selectedPairs)
262
+ }
263
+
264
+ // Otherwise, let caller know
265
+ return 'noQueues'
271
266
  }
272
267
 
273
268
  debug('loaded')
package/.coveralls.yml DELETED
@@ -1 +0,0 @@
1
- service_name: travis-ci
package/.travis.yml DELETED
@@ -1,19 +0,0 @@
1
-
2
- language: node_js
3
- services:
4
- - redis-server
5
- node_js:
6
- - '6'
7
- - '8'
8
- - '10'
9
- cache:
10
- directories:
11
- - node_modules
12
- after_success: npm run coverage
13
-
14
- # Trigger a push build on master and greenkeeper branches + PRs build on every branches
15
- # Avoid double build on PRs (See https://github.com/travis-ci/travis-ci/issues/1147)
16
- branches:
17
- only:
18
- - master
19
- - /^greenkeeper.*$/
package/CHANGELOG.md DELETED
@@ -1,121 +0,0 @@
1
- # Changelog
2
-
3
- v.1.7.0
4
- -------
5
-
6
- ### New Features
7
-
8
- #### Added `--deduplication-id` option for enqueue ([#40](https://github.com/suredone/qdone/issues/40))
9
-
10
- `qdone` has always set a deduplication id (using a UUID v1) when sending enqueue calls, but it looks like the aws sdk does not have adequate retry defaults set. This option lets a qdone user retry enqueue operations. For more information please see the [AWS docs for Message Deduplication ID](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html).
11
-
12
- ### Under the hood
13
-
14
- - Updated aws-sdk.
15
- - Updated locked dependencies.
16
-
17
- v.1.6.0
18
- -------
19
-
20
- ### New Features
21
-
22
- #### Caching for SQS `GetQueueAttributes` calls ([#41](https://github.com/suredone/qdone/issues/41))
23
-
24
- After switching our infrastructure to `--active-only` on jobs that have a large number of dynamic queues, we noticed that spend a lot of money on GetQueueAttributes calls. However the state of the active queues is very cacheable, especially if queues tend to have large backlogs, as ours do.
25
-
26
- We added the following options to the `idle-queues`, and `worker` commands to be used in conjunction with `--active-only`:
27
-
28
- - `--cache-url` that takes a `redis://...` or a `redis-cluster://` url [no default]
29
- - `--cache-ttl-seconds` that takes a number of seconds [default `10`]
30
- - `--cache-prefix` that defines a cache key prefix [default `qdone:`]
31
-
32
- The presence of the `--cache-url` option will cause the worker to cache `GetQueueAttributes` for each queue for the specified ttl.
33
-
34
-
35
- v.1.5.0
36
- -------
37
-
38
- ### New Features
39
-
40
- #### Added `--group-id-per-message` option for `enqueue-batch` ([#33](https://github.com/suredone/qdone/issues/33))
41
-
42
- This option creates a new Group ID for every message in a batch, for when you want exactly once delivery, but don't care about message order.
43
-
44
- ### Bug Fixes
45
-
46
- - Fixed ([#35](https://github.com/suredone/qdone/issues/35)) by making `idle-queues` pairing behavior work for FIFO queues as well as normal queues.
47
-
48
-
49
- v.1.4.0
50
- -------
51
-
52
- ### Bug Fixes
53
-
54
- - Fixed ([#25](https://github.com/suredone/qdone/issues/25)) bug on Linux in `worker` where child processes were not getting killed after `--kill-after` timer was reached.
55
-
56
-
57
- v.1.3.0
58
- -------
59
-
60
- ### New Features
61
-
62
- #### FIFO Option ([#18](https://github.com/suredone/qdone/issues/18))
63
-
64
- Added a `--fifo` and `--group-id <string>` option to `equeue` and `enqueue-batch`
65
- - Causes any new queues to be created as FIFO queues
66
- - Causes the `.fifo` suffix to be appended to any queue names that do not explicitly have them
67
- - Causes failed queues to take the form `${name}_failed.fifo`
68
- - Any commands with the same `--group-id` will be worked on in the order they were received by SQS (see [FIFO docs](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html))
69
- - If you don't set `--group-id` it defaults to a unique id per call to `qdone`, so this means messages sent by `enqueue-batch` will always be ordered as you sent them.
70
- - There is NO option to set group id per-message in `enqueue-batch`. Adding this feature in the future will change the format of the batch input file.
71
- - There is NO support right now for Content Deduplication, however a Unique Message Deduplication ID is generated for each command, so retry-able errors should not result in duplicate messages.
72
-
73
- Added a `--fifo` option to `worker`
74
- - Causes the `.fifo` suffix to be appended to any queue names that do not explicitly have them
75
- - When wildcard names are specified (e.g. `test_*` or `*`), worker only listens to queues with a `.fifo` suffix.
76
- - Failed queues are still only included if `--include-failed` is set.
77
- - Regardless of how many workers you have, FIFO commands with the same `--group-id` will only be executed by one worker at a time.
78
- - There is NO support right now for only-once processing using the Receive Request Attempt ID
79
-
80
- #### Only Listen To Active Queues with `--active-only`
81
-
82
- We encountered an occasional production problem where aggressively deleting idle queues can cause the loss of a message that was sent between the idle check and the delete operation. We were using `qdone idle-queues --delete --idle-for 10`, which is much more aggressive than the default of 60 minutes.
83
-
84
- To address this, we are adding an alternate mode of operation to the worker with the new `--active-only` flag for use with wildcard (`*`) queues that does a cheap SQS API call to check whether a queue currently has waiting messages. If so, it's put into the list of queues for the current listening round. This should have the net effect of reducing the number of queues workers have to listen to (similarly to aggresive usage of `qdone idle-queues --delete`) without exposing messages to the delete race condition. For cases where idle queues still must be deleted, we recommend using a longer timeout.
85
-
86
- ### Bug Fixes
87
-
88
- - Fixed ([#29](https://github.com/suredone/qdone/issues/29)) bug in `enqueue-batch` where SQS batches where command lines added up to > 256kb would not be split correctly and loop
89
-
90
- ### Under the hood
91
-
92
- - Increased test coverage related to ([#29](https://github.com/suredone/qdone/issues/29))
93
- - Added test coverage for ([#18](https://github.com/suredone/qdone/issues/18))
94
- - Updated command line args libraries
95
-
96
-
97
- v1.2.0 (January 5, 2018)
98
- ---------------------------
99
-
100
- ### Bug Fixes
101
-
102
- - [#22](https://github.com/suredone/qdone/issues/22) fixes exception deleting failed queues in paired mode when fail queue does not exist
103
-
104
-
105
- v1.1.0 (December 25, 2017)
106
- -----------------------------
107
-
108
- ### New Features
109
-
110
- - Add experimental support for using exports in node. Exports various functions from enqueue and worker for use from node. Doesn't change the public facing interface (which is command line only).
111
-
112
-
113
- v1.0.0 (August 8, 2017)
114
- --------------------------
115
-
116
- ### New Features
117
-
118
- - There is a new command called [`idle-queues`](https://github.com/suredone/qdone#idle-queues-usage) which can identify queues that have had no activity for a specified period of time, and delete them, if desired.
119
- - Qdone's `worker` now [allows a child process to finish running](https://github.com/suredone/qdone#shutdown-behavior) before shutting down in response to a `SIGTERM` or `SIGINT`.
120
- - Queues are now always resolved, and the `--always-resolve` option has been removed.
121
- - Output to non TTYs is less chatty by default, but you can get the previous behavior by using `--verbose`, or silence output in a TTY by using `--quiet`.
package/index.js DELETED
@@ -1,6 +0,0 @@
1
-
2
- module.exports = {
3
- enqueue: require('./src/enqueue'),
4
- worker: require('./src/worker'),
5
- cli: require('./src/cli')
6
- }
package/qdone DELETED
@@ -1,2 +0,0 @@
1
- #!/usr/bin/env node
2
- require('./src/cli.js').run(process.argv.slice(2)).catch(_ => process.exit(1))
@@ -1,9 +0,0 @@
1
- #!/bin/bash
2
- OUTFILE=/tmp/qdone-test-child-kill-linux.out
3
- rm $OUTFILE
4
- _term() {
5
- echo "terminated" > $OUTFILE
6
- exit 1
7
- }
8
- trap _term SIGTERM
9
- for i in 1 2 3; do sleep 1; echo $i; echo $i >> $OUTFILE; done
@@ -1,24 +0,0 @@
1
- test.fifo true
2
- test.fifo true
3
- test.fifo true
4
- test.fifo true
5
- test.fifo true
6
- test.fifo true
7
- test.fifo true
8
- test.fifo true
9
- test.fifo true
10
- test.fifo true
11
- test.fifo true
12
- test.fifo true
13
- test.fifo true
14
- test.fifo true
15
- test.fifo true
16
- test.fifo true
17
- test.fifo true
18
- test.fifo true
19
- test.fifo true
20
- test.fifo true
21
- test.fifo true
22
- test.fifo true
23
- test.fifo true
24
- test.fifo true