shoryuken 2.1.3 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.codeclimate.yml +2 -0
- data/.rubocop.yml +8 -2
- data/.travis.yml +1 -0
- data/CHANGELOG.md +19 -0
- data/README.md +20 -104
- data/Rakefile +0 -1
- data/bin/cli/base.rb +42 -0
- data/bin/cli/sqs.rb +188 -0
- data/bin/shoryuken +47 -9
- data/examples/default_worker.rb +1 -1
- data/lib/shoryuken.rb +75 -55
- data/lib/shoryuken/client.rb +3 -15
- data/lib/shoryuken/default_worker_registry.rb +9 -5
- data/lib/shoryuken/environment_loader.rb +9 -40
- data/lib/shoryuken/fetcher.rb +16 -18
- data/lib/shoryuken/launcher.rb +5 -28
- data/lib/shoryuken/manager.rb +60 -140
- data/lib/shoryuken/message.rb +4 -13
- data/lib/shoryuken/middleware/chain.rb +1 -18
- data/lib/shoryuken/middleware/server/auto_extend_visibility.rb +7 -16
- data/lib/shoryuken/middleware/server/exponential_backoff_retry.rb +25 -21
- data/lib/shoryuken/polling.rb +2 -4
- data/lib/shoryuken/processor.rb +2 -11
- data/lib/shoryuken/queue.rb +1 -3
- data/lib/shoryuken/runner.rb +143 -0
- data/lib/shoryuken/util.rb +0 -8
- data/lib/shoryuken/version.rb +1 -1
- data/lib/shoryuken/worker.rb +1 -1
- data/shoryuken.gemspec +6 -5
- data/spec/integration/launcher_spec.rb +4 -3
- data/spec/shoryuken/client_spec.rb +2 -45
- data/spec/shoryuken/default_worker_registry_spec.rb +12 -10
- data/spec/shoryuken/environment_loader_spec.rb +34 -0
- data/spec/shoryuken/manager_spec.rb +11 -21
- data/spec/shoryuken/middleware/chain_spec.rb +0 -24
- data/spec/shoryuken/middleware/server/auto_extend_visibility_spec.rb +0 -2
- data/spec/shoryuken/middleware/server/exponential_backoff_retry_spec.rb +46 -29
- data/spec/shoryuken/processor_spec.rb +5 -5
- data/spec/shoryuken/{cli_spec.rb → runner_spec.rb} +8 -22
- data/spec/shoryuken_spec.rb +13 -1
- data/spec/spec_helper.rb +3 -8
- metadata +29 -22
- data/lib/shoryuken/aws_config.rb +0 -64
- data/lib/shoryuken/cli.rb +0 -215
- data/lib/shoryuken/sns_arn.rb +0 -27
- data/lib/shoryuken/topic.rb +0 -17
- data/spec/shoryuken/sns_arn_spec.rb +0 -42
- data/spec/shoryuken/topic_spec.rb +0 -32
- data/spec/shoryuken_endpoint.yml +0 -6
data/lib/shoryuken/launcher.rb
CHANGED
@@ -1,42 +1,19 @@
|
|
1
1
|
module Shoryuken
|
2
2
|
class Launcher
|
3
|
-
include Celluloid
|
4
3
|
include Util
|
5
4
|
|
6
|
-
trap_exit :actor_died
|
7
|
-
|
8
|
-
attr_accessor :manager
|
9
|
-
|
10
5
|
def initialize
|
11
|
-
@
|
12
|
-
|
13
|
-
|
14
|
-
@done = false
|
15
|
-
|
16
|
-
manager.fetcher = Shoryuken::Fetcher.new
|
17
|
-
manager.polling_strategy = Shoryuken.options[:polling_strategy].new(Shoryuken.queues)
|
6
|
+
@manager = Shoryuken::Manager.new(Shoryuken::Fetcher.new,
|
7
|
+
Shoryuken.options[:polling_strategy].new(Shoryuken.queues))
|
18
8
|
end
|
19
9
|
|
20
10
|
def stop(options = {})
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
manager.async.stop(shutdown: !!options[:shutdown], timeout: Shoryuken.options[:timeout])
|
25
|
-
@condvar.wait
|
26
|
-
manager.terminate
|
27
|
-
end
|
11
|
+
@manager.stop(shutdown: !options[:shutdown].nil?,
|
12
|
+
timeout: Shoryuken.options[:timeout])
|
28
13
|
end
|
29
14
|
|
30
15
|
def run
|
31
|
-
|
32
|
-
manager.async.start
|
33
|
-
end
|
34
|
-
end
|
35
|
-
|
36
|
-
def actor_died(actor, reason)
|
37
|
-
return if @done
|
38
|
-
logger.warn { "Shoryuken died due to the following error, cannot recover, process exiting: #{reason}" }
|
39
|
-
exit 1
|
16
|
+
@manager.start
|
40
17
|
end
|
41
18
|
end
|
42
19
|
end
|
data/lib/shoryuken/manager.rb
CHANGED
@@ -1,157 +1,100 @@
|
|
1
|
-
require 'shoryuken/processor'
|
2
|
-
require 'shoryuken/fetcher'
|
3
|
-
|
4
1
|
module Shoryuken
|
5
2
|
class Manager
|
6
|
-
include Celluloid
|
7
3
|
include Util
|
8
4
|
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
exclusive :dispatch
|
13
|
-
|
14
|
-
trap_exit :processor_died
|
5
|
+
BATCH_LIMIT = 10
|
6
|
+
HEARTBEAT_INTERVAL = 0.1
|
15
7
|
|
16
|
-
|
8
|
+
def initialize(fetcher, polling_strategy)
|
9
|
+
@count = Shoryuken.options.fetch(:concurrency, 25)
|
17
10
|
|
18
|
-
def initialize(condvar)
|
19
|
-
@count = Shoryuken.options[:concurrency] || 25
|
20
11
|
raise(ArgumentError, "Concurrency value #{@count} is invalid, it needs to be a positive number") unless @count > 0
|
12
|
+
|
21
13
|
@queues = Shoryuken.queues.dup.uniq
|
22
|
-
@finished = condvar
|
23
14
|
|
24
|
-
@done = false
|
15
|
+
@done = Concurrent::AtomicBoolean.new(false)
|
16
|
+
@dispatching = Concurrent::AtomicBoolean.new(false)
|
17
|
+
|
18
|
+
@fetcher = fetcher
|
19
|
+
@polling_strategy = polling_strategy
|
20
|
+
|
21
|
+
@heartbeat = Concurrent::TimerTask.new(run_now: true,
|
22
|
+
execution_interval: HEARTBEAT_INTERVAL,
|
23
|
+
timeout_interval: 60) { dispatch }
|
25
24
|
|
26
|
-
@
|
27
|
-
@busy_threads = {}
|
28
|
-
@ready_processors = @count.times.map { build_processor }
|
25
|
+
@pool = Concurrent::FixedThreadPool.new(@count, max_queue: @count)
|
29
26
|
end
|
30
27
|
|
31
28
|
def start
|
32
29
|
logger.info { 'Starting' }
|
33
30
|
|
34
|
-
|
31
|
+
@heartbeat.execute
|
35
32
|
end
|
36
33
|
|
37
34
|
def stop(options = {})
|
38
|
-
|
39
|
-
@done = true
|
35
|
+
@done.make_true
|
40
36
|
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
end
|
45
|
-
|
46
|
-
fire_event(:shutdown, true)
|
47
|
-
|
48
|
-
logger.info { "Shutting down #{@ready_processors.size} quiet workers" }
|
49
|
-
|
50
|
-
@ready_processors.each do |processor|
|
51
|
-
processor.terminate if processor.alive?
|
52
|
-
end
|
53
|
-
@ready_processors.clear
|
54
|
-
|
55
|
-
return after(0) { @finished.signal } if @busy_processors.empty?
|
56
|
-
|
57
|
-
if options[:shutdown]
|
58
|
-
hard_shutdown_in(options[:timeout])
|
59
|
-
else
|
60
|
-
soft_shutdown(options[:timeout])
|
61
|
-
end
|
37
|
+
if (callback = Shoryuken.stop_callback)
|
38
|
+
logger.info { 'Calling Shoryuken.on_stop block' }
|
39
|
+
callback.call
|
62
40
|
end
|
63
|
-
end
|
64
|
-
|
65
|
-
def processor_done(queue, processor)
|
66
|
-
watchdog('Manager#processor_done died') do
|
67
|
-
logger.debug { "Process done for '#{queue}'" }
|
68
41
|
|
69
|
-
|
70
|
-
@busy_threads.delete(processor.object_id)
|
42
|
+
fire_event(:shutdown, true)
|
71
43
|
|
72
|
-
|
73
|
-
processor.terminate if processor.alive?
|
74
|
-
return after(0) { @finished.signal } if @busy_processors.empty?
|
75
|
-
else
|
76
|
-
@ready_processors << processor
|
77
|
-
async.dispatch
|
78
|
-
end
|
79
|
-
end
|
80
|
-
end
|
44
|
+
logger.info { 'Shutting down workers' }
|
81
45
|
|
82
|
-
|
83
|
-
watchdog("Manager#processor_died died") do
|
84
|
-
logger.error { "Process died, reason: #{reason}" }
|
46
|
+
@heartbeat.kill
|
85
47
|
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
return after(0) { @finished.signal } if @busy_processors.empty?
|
91
|
-
else
|
92
|
-
@ready_processors << build_processor
|
93
|
-
async.dispatch
|
94
|
-
end
|
48
|
+
if options[:shutdown]
|
49
|
+
hard_shutdown_in(options[:timeout])
|
50
|
+
else
|
51
|
+
soft_shutdown
|
95
52
|
end
|
96
53
|
end
|
97
54
|
|
98
|
-
def
|
99
|
-
|
55
|
+
def processor_done(queue)
|
56
|
+
logger.debug { "Process done for '#{queue}'" }
|
100
57
|
end
|
101
58
|
|
102
|
-
|
103
|
-
return if stopped?
|
59
|
+
private
|
104
60
|
|
105
|
-
|
61
|
+
def dispatch
|
62
|
+
return if @done.true?
|
63
|
+
return unless @dispatching.make_true
|
106
64
|
|
107
|
-
if
|
108
|
-
|
109
|
-
dispatch_later
|
110
|
-
return
|
111
|
-
end
|
65
|
+
return if ready.zero?
|
66
|
+
return unless (queue = @polling_strategy.next_queue)
|
112
67
|
|
113
|
-
|
114
|
-
if queue.nil?
|
115
|
-
logger.debug { 'Pausing fetcher, because all queues are paused' }
|
116
|
-
dispatch_later
|
117
|
-
return
|
118
|
-
end
|
68
|
+
logger.debug { "Ready: #{ready}, Busy: #{busy}, Active Queues: #{@polling_strategy.active_queues}" }
|
119
69
|
|
120
70
|
batched_queue?(queue) ? dispatch_batch(queue) : dispatch_single_messages(queue)
|
121
|
-
|
122
|
-
|
71
|
+
ensure
|
72
|
+
@dispatching.make_false
|
123
73
|
end
|
124
74
|
|
125
|
-
|
75
|
+
def busy
|
76
|
+
@count - ready
|
77
|
+
end
|
126
78
|
|
127
|
-
def
|
128
|
-
@
|
129
|
-
@_dispatch_timer = nil
|
130
|
-
dispatch
|
131
|
-
end
|
79
|
+
def ready
|
80
|
+
@pool.remaining_capacity
|
132
81
|
end
|
133
82
|
|
134
83
|
def assign(queue, sqs_msg)
|
135
|
-
|
136
|
-
logger.debug { "Assigning #{sqs_msg.message_id}" }
|
137
|
-
|
138
|
-
processor = @ready_processors.pop
|
139
|
-
@busy_threads[processor.object_id] = processor.running_thread
|
140
|
-
@busy_processors << processor
|
84
|
+
logger.debug { "Assigning #{sqs_msg.message_id}" }
|
141
85
|
|
142
|
-
|
143
|
-
end
|
86
|
+
@pool.post { Processor.new(self).process(queue, sqs_msg) }
|
144
87
|
end
|
145
88
|
|
146
89
|
def dispatch_batch(queue)
|
147
|
-
batch = fetcher.fetch(queue, BATCH_LIMIT)
|
148
|
-
polling_strategy.messages_found(queue.name, batch.size)
|
90
|
+
batch = @fetcher.fetch(queue, BATCH_LIMIT)
|
91
|
+
@polling_strategy.messages_found(queue.name, batch.size)
|
149
92
|
assign(queue.name, patch_batch!(batch))
|
150
93
|
end
|
151
94
|
|
152
95
|
def dispatch_single_messages(queue)
|
153
|
-
messages = fetcher.fetch(queue,
|
154
|
-
polling_strategy.messages_found(queue.name, messages.size)
|
96
|
+
messages = @fetcher.fetch(queue, ready)
|
97
|
+
@polling_strategy.messages_found(queue.name, messages.size)
|
155
98
|
messages.each { |message| assign(queue.name, message) }
|
156
99
|
end
|
157
100
|
|
@@ -159,45 +102,22 @@ module Shoryuken
|
|
159
102
|
Shoryuken.worker_registry.batch_receive_messages?(queue.name)
|
160
103
|
end
|
161
104
|
|
162
|
-
def
|
163
|
-
|
105
|
+
def soft_shutdown
|
106
|
+
@pool.shutdown
|
107
|
+
@pool.wait_for_termination
|
164
108
|
end
|
165
109
|
|
166
|
-
def
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
end
|
110
|
+
def hard_shutdown_in(delay)
|
111
|
+
if busy > 0
|
112
|
+
logger.info { "Pausing up to #{delay} seconds to allow workers to finish..." }
|
113
|
+
end
|
171
114
|
|
172
|
-
|
173
|
-
logger.info { "Waiting for #{@busy_processors.size} busy workers" }
|
115
|
+
@pool.shutdown
|
174
116
|
|
175
|
-
if @
|
176
|
-
after(delay) { soft_shutdown(delay) }
|
177
|
-
else
|
178
|
-
@finished.signal
|
179
|
-
end
|
180
|
-
end
|
117
|
+
return if @pool.wait_for_termination(delay)
|
181
118
|
|
182
|
-
|
183
|
-
|
184
|
-
logger.info { "Pausing up to #{delay} seconds to allow workers to finish..." }
|
185
|
-
|
186
|
-
after(delay) do
|
187
|
-
watchdog('Manager#hard_shutdown_in died') do
|
188
|
-
if @busy_processors.size > 0
|
189
|
-
logger.info { "Hard shutting down #{@busy_processors.size} busy workers" }
|
190
|
-
|
191
|
-
@busy_processors.each do |processor|
|
192
|
-
if processor.alive? && t = @busy_threads.delete(processor.object_id)
|
193
|
-
t.raise Shutdown
|
194
|
-
end
|
195
|
-
end
|
196
|
-
end
|
197
|
-
|
198
|
-
@finished.signal
|
199
|
-
end
|
200
|
-
end
|
119
|
+
logger.info { "Hard shutting down #{busy} busy workers" }
|
120
|
+
@pool.kill
|
201
121
|
end
|
202
122
|
|
203
123
|
def patch_batch!(sqs_msgs)
|
data/lib/shoryuken/message.rb
CHANGED
@@ -3,19 +3,10 @@ module Shoryuken
|
|
3
3
|
attr_accessor :client, :queue_url, :queue_name, :data
|
4
4
|
|
5
5
|
def initialize(client, queue, data)
|
6
|
-
self.client
|
7
|
-
self.data
|
8
|
-
|
9
|
-
|
10
|
-
self.queue_url = queue.url
|
11
|
-
self.queue_name = queue.name
|
12
|
-
else
|
13
|
-
# TODO: Remove next major release
|
14
|
-
Shoryuken.logger.warn do
|
15
|
-
'[DEPRECATION] Passing a queue url into Shoryuken::Message is deprecated, please pass the queue itself'
|
16
|
-
end
|
17
|
-
self.queue_url = queue
|
18
|
-
end
|
6
|
+
self.client = client
|
7
|
+
self.data = data
|
8
|
+
self.queue_url = queue.url
|
9
|
+
self.queue_name = queue.name
|
19
10
|
end
|
20
11
|
|
21
12
|
def delete
|
@@ -102,32 +102,15 @@ module Shoryuken
|
|
102
102
|
|
103
103
|
class Entry
|
104
104
|
attr_reader :klass
|
105
|
+
|
105
106
|
def initialize(klass, *args)
|
106
107
|
@klass = klass
|
107
108
|
@args = args
|
108
|
-
|
109
|
-
patch_deprecated_middleware!(klass)
|
110
109
|
end
|
111
110
|
|
112
111
|
def make_new
|
113
112
|
@klass.new(*@args)
|
114
113
|
end
|
115
|
-
|
116
|
-
private
|
117
|
-
|
118
|
-
def patch_deprecated_middleware!(klass)
|
119
|
-
if klass.instance_method(:call).arity == 3
|
120
|
-
Shoryuken.logger.warn { "[DEPRECATION] #{klass.name}#call(worker_instance, queue, sqs_msg) is deprecated. Please use #{klass.name}#call(worker_instance, queue, sqs_msg, body)" }
|
121
|
-
|
122
|
-
klass.class_eval do
|
123
|
-
alias_method :deprecated_call, :call
|
124
|
-
|
125
|
-
def call(worker_instance, queue, sqs_msg, body = nil, &block)
|
126
|
-
deprecated_call(worker_instance, queue, sqs_msg, &block)
|
127
|
-
end
|
128
|
-
end
|
129
|
-
end
|
130
|
-
end
|
131
114
|
end
|
132
115
|
end
|
133
116
|
end
|
@@ -1,5 +1,3 @@
|
|
1
|
-
require 'celluloid/current' unless defined?(Celluloid)
|
2
|
-
|
3
1
|
module Shoryuken
|
4
2
|
module Middleware
|
5
3
|
module Server
|
@@ -10,32 +8,25 @@ module Shoryuken
|
|
10
8
|
|
11
9
|
def call(worker, queue, sqs_msg, body)
|
12
10
|
if sqs_msg.is_a?(Array)
|
13
|
-
logger.warn { "Auto extend visibility isn't supported for batch workers"
|
11
|
+
logger.warn { "Auto extend visibility isn't supported for batch workers" }
|
14
12
|
return yield
|
15
13
|
end
|
16
14
|
|
17
15
|
timer = auto_visibility_timer(worker, queue, sqs_msg, body)
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
ensure
|
22
|
-
if timer
|
23
|
-
timer.cancel
|
24
|
-
@visibility_extender.terminate
|
25
|
-
end
|
26
|
-
end
|
16
|
+
yield
|
17
|
+
ensure
|
18
|
+
timer.kill if timer
|
27
19
|
end
|
28
20
|
|
29
21
|
private
|
30
22
|
|
31
23
|
class MessageVisibilityExtender
|
32
|
-
include Celluloid
|
33
24
|
include Util
|
34
25
|
|
35
26
|
def auto_extend(worker, queue, sqs_msg, body)
|
36
27
|
queue_visibility_timeout = Shoryuken::Client.queues(queue).visibility_timeout
|
37
28
|
|
38
|
-
|
29
|
+
Concurrent::TimerTask.new(execution_interval: queue_visibility_timeout - EXTEND_UPFRONT_SECONDS) do
|
39
30
|
begin
|
40
31
|
logger.debug do
|
41
32
|
"Extending message #{worker_name(worker.class, sqs_msg, body)}/#{queue}/#{sqs_msg.message_id} " \
|
@@ -56,8 +47,8 @@ module Shoryuken
|
|
56
47
|
|
57
48
|
def auto_visibility_timer(worker, queue, sqs_msg, body)
|
58
49
|
return unless worker.class.auto_visibility_timeout?
|
59
|
-
|
60
|
-
|
50
|
+
|
51
|
+
MessageVisibilityExtender.new.auto_extend(worker, queue, sqs_msg, body).tap(&:execute)
|
61
52
|
end
|
62
53
|
end
|
63
54
|
end
|
@@ -6,16 +6,16 @@ module Shoryuken
|
|
6
6
|
|
7
7
|
def call(worker, queue, sqs_msg, body)
|
8
8
|
if sqs_msg.is_a?(Array)
|
9
|
-
logger.warn { "Exponential backoff isn't supported for batch workers"
|
9
|
+
logger.warn { "Exponential backoff isn't supported for batch workers" }
|
10
10
|
return yield
|
11
11
|
end
|
12
12
|
|
13
13
|
started_at = Time.now
|
14
14
|
yield
|
15
15
|
rescue
|
16
|
-
retry_intervals =
|
16
|
+
retry_intervals = worker.class.get_shoryuken_options['retry_intervals']
|
17
17
|
|
18
|
-
if retry_intervals.
|
18
|
+
if retry_intervals.nil? || !handle_failure(sqs_msg, started_at, retry_intervals)
|
19
19
|
# Re-raise the exception if the job is not going to be exponential backoff retried.
|
20
20
|
# This allows custom middleware (like exception notifiers) to be aware of the unhandled failure.
|
21
21
|
raise
|
@@ -24,28 +24,32 @@ module Shoryuken
|
|
24
24
|
|
25
25
|
private
|
26
26
|
|
27
|
-
def
|
28
|
-
return
|
29
|
-
|
30
|
-
attempts =
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
# We calculate the maximum timeout by subtracting the amount of time since the receipt of the message.
|
40
|
-
#
|
41
|
-
# From the docs: "Amazon SQS restarts the timeout period using the new value."
|
42
|
-
# http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html#AboutVT-extending-message-visibility-timeout
|
43
|
-
max_timeout = 43200 - (Time.now - started_at).ceil - 1
|
27
|
+
def get_interval(retry_intervals, attempts)
|
28
|
+
return retry_intervals.call(attempts) if retry_intervals.respond_to?(:call)
|
29
|
+
|
30
|
+
if attempts <= (retry_intervals = Array(retry_intervals)).size
|
31
|
+
retry_intervals[attempts - 1]
|
32
|
+
else
|
33
|
+
retry_intervals.last
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
def next_visibility_timeout(interval, started_at)
|
38
|
+
max_timeout = 43_200 - (Time.now - started_at).ceil - 1
|
44
39
|
interval = max_timeout if interval > max_timeout
|
40
|
+
interval.to_i
|
41
|
+
end
|
42
|
+
|
43
|
+
def handle_failure(sqs_msg, started_at, retry_intervals)
|
44
|
+
receive_count = sqs_msg.attributes['ApproximateReceiveCount'].to_i
|
45
45
|
|
46
|
-
|
46
|
+
return false unless (interval = get_interval(retry_intervals, receive_count))
|
47
|
+
|
48
|
+
sqs_msg.change_visibility(visibility_timeout: next_visibility_timeout(interval.to_i, started_at))
|
47
49
|
|
48
50
|
logger.info { "Message #{sqs_msg.message_id} failed, will be retried in #{interval} seconds." }
|
51
|
+
|
52
|
+
true
|
49
53
|
end
|
50
54
|
end
|
51
55
|
end
|