activejob 7.0.8 → 7.1.3.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +160 -133
- data/MIT-LICENSE +1 -1
- data/README.md +2 -2
- data/lib/active_job/arguments.rb +28 -31
- data/lib/active_job/base.rb +1 -1
- data/lib/active_job/callbacks.rb +3 -5
- data/lib/active_job/configured_job.rb +4 -0
- data/lib/active_job/core.rb +26 -6
- data/lib/active_job/deprecator.rb +7 -0
- data/lib/active_job/enqueuing.rb +31 -1
- data/lib/active_job/exceptions.rb +48 -5
- data/lib/active_job/execution.rb +5 -1
- data/lib/active_job/gem_version.rb +4 -4
- data/lib/active_job/instrumentation.rb +18 -10
- data/lib/active_job/log_subscriber.rb +80 -8
- data/lib/active_job/queue_adapter.rb +13 -2
- data/lib/active_job/queue_adapters/async_adapter.rb +2 -2
- data/lib/active_job/queue_adapters/backburner_adapter.rb +7 -3
- data/lib/active_job/queue_adapters/delayed_job_adapter.rb +1 -1
- data/lib/active_job/queue_adapters/inline_adapter.rb +1 -1
- data/lib/active_job/queue_adapters/queue_classic_adapter.rb +4 -4
- data/lib/active_job/queue_adapters/resque_adapter.rb +1 -1
- data/lib/active_job/queue_adapters/sidekiq_adapter.rb +42 -14
- data/lib/active_job/queue_adapters/sneakers_adapter.rb +1 -1
- data/lib/active_job/queue_adapters/sucker_punch_adapter.rb +3 -3
- data/lib/active_job/queue_adapters/test_adapter.rb +3 -3
- data/lib/active_job/queue_adapters.rb +8 -7
- data/lib/active_job/queue_priority.rb +18 -1
- data/lib/active_job/railtie.rb +25 -6
- data/lib/active_job/serializers/big_decimal_serializer.rb +22 -0
- data/lib/active_job/serializers/duration_serializer.rb +4 -2
- data/lib/active_job/serializers/time_with_zone_serializer.rb +11 -2
- data/lib/active_job/serializers.rb +7 -3
- data/lib/active_job/test_helper.rb +32 -14
- data/lib/active_job/version.rb +1 -1
- data/lib/active_job.rb +26 -4
- data/lib/rails/generators/job/USAGE +19 -0
- data/lib/rails/generators/job/job_generator.rb +6 -2
- data/lib/rails/generators/job/templates/job.rb.tt +1 -1
- metadata +11 -9
- data/lib/active_job/queue_adapters/que_adapter.rb +0 -61
data/lib/active_job/enqueuing.rb
CHANGED
@@ -9,6 +9,36 @@ module ActiveJob
|
|
9
9
|
# why the adapter was unexpectedly unable to enqueue a job.
|
10
10
|
class EnqueueError < StandardError; end
|
11
11
|
|
12
|
+
class << self
|
13
|
+
# Push many jobs onto the queue at once without running enqueue callbacks.
|
14
|
+
# Queue adapters may communicate the enqueue status of each job by setting
|
15
|
+
# successfully_enqueued and/or enqueue_error on the passed-in job instances.
|
16
|
+
def perform_all_later(*jobs)
|
17
|
+
jobs.flatten!
|
18
|
+
jobs.group_by(&:queue_adapter).each do |queue_adapter, adapter_jobs|
|
19
|
+
instrument_enqueue_all(queue_adapter, adapter_jobs) do
|
20
|
+
if queue_adapter.respond_to?(:enqueue_all)
|
21
|
+
queue_adapter.enqueue_all(adapter_jobs)
|
22
|
+
else
|
23
|
+
adapter_jobs.each do |job|
|
24
|
+
job.successfully_enqueued = false
|
25
|
+
if job.scheduled_at
|
26
|
+
queue_adapter.enqueue_at(job, job._scheduled_at_time.to_f)
|
27
|
+
else
|
28
|
+
queue_adapter.enqueue(job)
|
29
|
+
end
|
30
|
+
job.successfully_enqueued = true
|
31
|
+
rescue EnqueueError => e
|
32
|
+
job.enqueue_error = e
|
33
|
+
end
|
34
|
+
adapter_jobs.count(&:successfully_enqueued?)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
nil
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
12
42
|
module Enqueuing
|
13
43
|
extend ActiveSupport::Concern
|
14
44
|
|
@@ -62,7 +92,7 @@ module ActiveJob
|
|
62
92
|
|
63
93
|
run_callbacks :enqueue do
|
64
94
|
if scheduled_at
|
65
|
-
queue_adapter.enqueue_at self,
|
95
|
+
queue_adapter.enqueue_at self, _scheduled_at_time.to_f
|
66
96
|
else
|
67
97
|
queue_adapter.enqueue self
|
68
98
|
end
|
@@ -9,6 +9,7 @@ module ActiveJob
|
|
9
9
|
|
10
10
|
included do
|
11
11
|
class_attribute :retry_jitter, instance_accessor: false, instance_predicate: false, default: 0.0
|
12
|
+
class_attribute :after_discard_procs, default: []
|
12
13
|
end
|
13
14
|
|
14
15
|
module ClassMethods
|
@@ -20,10 +21,13 @@ module ActiveJob
|
|
20
21
|
# You can also pass a block that'll be invoked if the retry attempts fail for custom logic rather than letting
|
21
22
|
# the exception bubble up. This block is yielded with the job instance as the first and the error instance as the second parameter.
|
22
23
|
#
|
24
|
+
# `retry_on` and `discard_on` handlers are searched from bottom to top, and up the class hierarchy. The handler of the first class for
|
25
|
+
# which <tt>exception.is_a?(klass)</tt> holds true is the one invoked, if any.
|
26
|
+
#
|
23
27
|
# ==== Options
|
24
28
|
# * <tt>:wait</tt> - Re-enqueues the job with a delay specified either in seconds (default: 3 seconds),
|
25
29
|
# as a computing proc that takes the number of executions so far as an argument, or as a symbol reference of
|
26
|
-
# <tt>:
|
30
|
+
# <tt>:polynomially_longer</tt>, which applies the wait algorithm of <tt>((executions**4) + (Kernel.rand * (executions**4) * jitter)) + 2</tt>
|
27
31
|
# (first wait ~3s, then ~18s, then ~83s, etc)
|
28
32
|
# * <tt>:attempts</tt> - Re-enqueues the job the specified number of times (default: 5 attempts) or a symbol reference of <tt>:unlimited</tt>
|
29
33
|
# to retry the job until it succeeds
|
@@ -39,11 +43,11 @@ module ActiveJob
|
|
39
43
|
# retry_on CustomInfrastructureException, wait: 5.minutes, attempts: :unlimited
|
40
44
|
#
|
41
45
|
# retry_on ActiveRecord::Deadlocked, wait: 5.seconds, attempts: 3
|
42
|
-
# retry_on Net::OpenTimeout, Timeout::Error, wait: :
|
46
|
+
# retry_on Net::OpenTimeout, Timeout::Error, wait: :polynomially_longer, attempts: 10 # retries at most 10 times for Net::OpenTimeout and Timeout::Error combined
|
43
47
|
# # To retry at most 10 times for each individual exception:
|
44
|
-
# # retry_on Net::OpenTimeout, wait: :
|
48
|
+
# # retry_on Net::OpenTimeout, wait: :polynomially_longer, attempts: 10
|
45
49
|
# # retry_on Net::ReadTimeout, wait: 5.seconds, jitter: 0.30, attempts: 10
|
46
|
-
# # retry_on Timeout::Error, wait: :
|
50
|
+
# # retry_on Timeout::Error, wait: :polynomially_longer, attempts: 10
|
47
51
|
#
|
48
52
|
# retry_on(YetAnotherCustomAppException) do |job, error|
|
49
53
|
# ExceptionNotifier.caught(error)
|
@@ -56,6 +60,12 @@ module ActiveJob
|
|
56
60
|
# end
|
57
61
|
# end
|
58
62
|
def retry_on(*exceptions, wait: 3.seconds, attempts: 5, queue: nil, priority: nil, jitter: JITTER_DEFAULT)
|
63
|
+
if wait == :exponentially_longer
|
64
|
+
ActiveJob.deprecator.warn(<<~MSG.squish)
|
65
|
+
`wait: :exponentially_longer` will actually wait polynomially longer and is therefore deprecated.
|
66
|
+
Prefer `wait: :polynomially_longer` to avoid confusion and keep the same behavior.
|
67
|
+
MSG
|
68
|
+
end
|
59
69
|
rescue_from(*exceptions) do |error|
|
60
70
|
executions = executions_for(exceptions)
|
61
71
|
if attempts == :unlimited || executions < attempts
|
@@ -65,8 +75,10 @@ module ActiveJob
|
|
65
75
|
instrument :retry_stopped, error: error do
|
66
76
|
yield self, error
|
67
77
|
end
|
78
|
+
run_after_discard_procs(error)
|
68
79
|
else
|
69
80
|
instrument :retry_stopped, error: error
|
81
|
+
run_after_discard_procs(error)
|
70
82
|
raise error
|
71
83
|
end
|
72
84
|
end
|
@@ -78,6 +90,9 @@ module ActiveJob
|
|
78
90
|
#
|
79
91
|
# You can also pass a block that'll be invoked. This block is yielded with the job instance as the first and the error instance as the second parameter.
|
80
92
|
#
|
93
|
+
# `retry_on` and `discard_on` handlers are searched from bottom to top, and up the class hierarchy. The handler of the first class for
|
94
|
+
# which <tt>exception.is_a?(klass)</tt> holds true is the one invoked, if any.
|
95
|
+
#
|
81
96
|
# ==== Example
|
82
97
|
#
|
83
98
|
# class SearchIndexingJob < ActiveJob::Base
|
@@ -95,9 +110,26 @@ module ActiveJob
|
|
95
110
|
rescue_from(*exceptions) do |error|
|
96
111
|
instrument :discard, error: error do
|
97
112
|
yield self, error if block_given?
|
113
|
+
run_after_discard_procs(error)
|
98
114
|
end
|
99
115
|
end
|
100
116
|
end
|
117
|
+
|
118
|
+
# A block to run when a job is about to be discarded for any reason.
|
119
|
+
#
|
120
|
+
# ==== Example
|
121
|
+
#
|
122
|
+
# class WorkJob < ActiveJob::Base
|
123
|
+
# after_discard do |job, exception|
|
124
|
+
# ExceptionNotifier.report(exception)
|
125
|
+
# end
|
126
|
+
#
|
127
|
+
# ...
|
128
|
+
#
|
129
|
+
# end
|
130
|
+
def after_discard(&blk)
|
131
|
+
self.after_discard_procs += [blk]
|
132
|
+
end
|
101
133
|
end
|
102
134
|
|
103
135
|
# Reschedules the job to be re-executed. This is useful in combination with
|
@@ -136,7 +168,8 @@ module ActiveJob
|
|
136
168
|
jitter = jitter == JITTER_DEFAULT ? self.class.retry_jitter : (jitter || 0.0)
|
137
169
|
|
138
170
|
case seconds_or_duration_or_algorithm
|
139
|
-
when :exponentially_longer
|
171
|
+
when :exponentially_longer, :polynomially_longer
|
172
|
+
# This delay uses a polynomial backoff strategy, which was previously misnamed as exponential
|
140
173
|
delay = executions**4
|
141
174
|
delay_jitter = determine_jitter_for_delay(delay, jitter)
|
142
175
|
delay + delay_jitter + 2
|
@@ -165,5 +198,15 @@ module ActiveJob
|
|
165
198
|
executions
|
166
199
|
end
|
167
200
|
end
|
201
|
+
|
202
|
+
def run_after_discard_procs(exception)
|
203
|
+
exceptions = []
|
204
|
+
after_discard_procs.each do |blk|
|
205
|
+
instance_exec(self, exception, &blk)
|
206
|
+
rescue StandardError => e
|
207
|
+
exceptions << e
|
208
|
+
end
|
209
|
+
raise exceptions.last unless exceptions.empty?
|
210
|
+
end
|
168
211
|
end
|
169
212
|
end
|
data/lib/active_job/execution.rb
CHANGED
@@ -51,7 +51,11 @@ module ActiveJob
|
|
51
51
|
|
52
52
|
_perform_job
|
53
53
|
rescue Exception => exception
|
54
|
-
rescue_with_handler(exception)
|
54
|
+
handled = rescue_with_handler(exception)
|
55
|
+
return handled if handled
|
56
|
+
|
57
|
+
run_after_discard_procs(exception)
|
58
|
+
raise
|
55
59
|
end
|
56
60
|
|
57
61
|
def perform(*)
|
@@ -1,16 +1,16 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module ActiveJob
|
4
|
-
# Returns the currently loaded version of Active Job as a
|
4
|
+
# Returns the currently loaded version of Active Job as a +Gem::Version+.
|
5
5
|
def self.gem_version
|
6
6
|
Gem::Version.new VERSION::STRING
|
7
7
|
end
|
8
8
|
|
9
9
|
module VERSION
|
10
10
|
MAJOR = 7
|
11
|
-
MINOR =
|
12
|
-
TINY =
|
13
|
-
PRE =
|
11
|
+
MINOR = 1
|
12
|
+
TINY = 3
|
13
|
+
PRE = "2"
|
14
14
|
|
15
15
|
STRING = [MAJOR, MINOR, TINY, PRE].compact.join(".")
|
16
16
|
end
|
@@ -1,6 +1,18 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module ActiveJob
|
4
|
+
class << self
|
5
|
+
private
|
6
|
+
def instrument_enqueue_all(queue_adapter, jobs)
|
7
|
+
payload = { adapter: queue_adapter, jobs: jobs }
|
8
|
+
ActiveSupport::Notifications.instrument("enqueue_all.active_job", payload) do
|
9
|
+
result = yield payload
|
10
|
+
payload[:enqueued_count] = result
|
11
|
+
result
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
4
16
|
module Instrumentation # :nodoc:
|
5
17
|
extend ActiveSupport::Concern
|
6
18
|
|
@@ -21,19 +33,15 @@ module ActiveJob
|
|
21
33
|
end
|
22
34
|
|
23
35
|
def instrument(operation, payload = {}, &block)
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
if defined?(@_halted_callback_hook_called) && @_halted_callback_hook_called
|
28
|
-
event_payload[:aborted] = true
|
29
|
-
@_halted_callback_hook_called = nil
|
30
|
-
end
|
36
|
+
payload[:job] = self
|
37
|
+
payload[:adapter] = queue_adapter
|
31
38
|
|
39
|
+
ActiveSupport::Notifications.instrument("#{operation}.active_job", payload) do
|
40
|
+
value = block.call if block
|
41
|
+
payload[:aborted] = @_halted_callback_hook_called if defined?(@_halted_callback_hook_called)
|
42
|
+
@_halted_callback_hook_called = nil
|
32
43
|
value
|
33
44
|
end
|
34
|
-
|
35
|
-
ActiveSupport::Notifications.instrument \
|
36
|
-
"#{operation}.active_job", payload.merge(adapter: queue_adapter, job: self), &enhanced_block
|
37
45
|
end
|
38
46
|
|
39
47
|
def halted_callback_hook(*)
|
@@ -1,10 +1,11 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require "active_support/core_ext/string/filters"
|
4
3
|
require "active_support/log_subscriber"
|
5
4
|
|
6
5
|
module ActiveJob
|
7
6
|
class LogSubscriber < ActiveSupport::LogSubscriber # :nodoc:
|
7
|
+
class_attribute :backtrace_cleaner, default: ActiveSupport::BacktraceCleaner.new
|
8
|
+
|
8
9
|
def enqueue(event)
|
9
10
|
job = event.payload[:job]
|
10
11
|
ex = event.payload[:exception_object] || job.enqueue_error
|
@@ -23,6 +24,7 @@ module ActiveJob
|
|
23
24
|
end
|
24
25
|
end
|
25
26
|
end
|
27
|
+
subscribe_log_level :enqueue, :info
|
26
28
|
|
27
29
|
def enqueue_at(event)
|
28
30
|
job = event.payload[:job]
|
@@ -38,17 +40,48 @@ module ActiveJob
|
|
38
40
|
end
|
39
41
|
else
|
40
42
|
info do
|
41
|
-
"Enqueued #{job.class.name} (Job ID: #{job.job_id}) to #{queue_name(event)} at #{scheduled_at(event)
|
43
|
+
"Enqueued #{job.class.name} (Job ID: #{job.job_id}) to #{queue_name(event)} at #{scheduled_at(event)}" + args_info(job)
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
subscribe_log_level :enqueue_at, :info
|
48
|
+
|
49
|
+
def enqueue_all(event)
|
50
|
+
info do
|
51
|
+
jobs = event.payload[:jobs]
|
52
|
+
adapter = event.payload[:adapter]
|
53
|
+
enqueued_count = event.payload[:enqueued_count]
|
54
|
+
|
55
|
+
if enqueued_count == jobs.size
|
56
|
+
enqueued_jobs_message(adapter, jobs)
|
57
|
+
elsif jobs.any?(&:successfully_enqueued?)
|
58
|
+
enqueued_jobs = jobs.select(&:successfully_enqueued?)
|
59
|
+
|
60
|
+
failed_enqueue_count = jobs.size - enqueued_count
|
61
|
+
if failed_enqueue_count == 0
|
62
|
+
enqueued_jobs_message(adapter, enqueued_jobs)
|
63
|
+
else
|
64
|
+
"#{enqueued_jobs_message(adapter, enqueued_jobs)}. "\
|
65
|
+
"Failed enqueuing #{failed_enqueue_count} #{'job'.pluralize(failed_enqueue_count)}"
|
66
|
+
end
|
67
|
+
else
|
68
|
+
failed_enqueue_count = jobs.size - enqueued_count
|
69
|
+
"Failed enqueuing #{failed_enqueue_count} #{'job'.pluralize(failed_enqueue_count)} "\
|
70
|
+
"to #{ActiveJob.adapter_name(adapter)}"
|
42
71
|
end
|
43
72
|
end
|
44
73
|
end
|
74
|
+
subscribe_log_level :enqueue_all, :info
|
45
75
|
|
46
76
|
def perform_start(event)
|
47
77
|
info do
|
48
78
|
job = event.payload[:job]
|
49
|
-
|
79
|
+
enqueue_info = job.enqueued_at.present? ? " enqueued at #{job.enqueued_at.utc.iso8601(9)}" : ""
|
80
|
+
|
81
|
+
"Performing #{job.class.name} (Job ID: #{job.job_id}) from #{queue_name(event)}" + enqueue_info + args_info(job)
|
50
82
|
end
|
51
83
|
end
|
84
|
+
subscribe_log_level :perform_start, :info
|
52
85
|
|
53
86
|
def perform(event)
|
54
87
|
job = event.payload[:job]
|
@@ -67,6 +100,7 @@ module ActiveJob
|
|
67
100
|
end
|
68
101
|
end
|
69
102
|
end
|
103
|
+
subscribe_log_level :perform, :info
|
70
104
|
|
71
105
|
def enqueue_retry(event)
|
72
106
|
job = event.payload[:job]
|
@@ -75,34 +109,37 @@ module ActiveJob
|
|
75
109
|
|
76
110
|
info do
|
77
111
|
if ex
|
78
|
-
"Retrying #{job.class} in #{wait.to_i} seconds, due to a #{ex.class}."
|
112
|
+
"Retrying #{job.class} (Job ID: #{job.job_id}) after #{job.executions} attempts in #{wait.to_i} seconds, due to a #{ex.class} (#{ex.message})."
|
79
113
|
else
|
80
|
-
"Retrying #{job.class} in #{wait.to_i} seconds."
|
114
|
+
"Retrying #{job.class} (Job ID: #{job.job_id}) after #{job.executions} attempts in #{wait.to_i} seconds."
|
81
115
|
end
|
82
116
|
end
|
83
117
|
end
|
118
|
+
subscribe_log_level :enqueue_retry, :info
|
84
119
|
|
85
120
|
def retry_stopped(event)
|
86
121
|
job = event.payload[:job]
|
87
122
|
ex = event.payload[:error]
|
88
123
|
|
89
124
|
error do
|
90
|
-
"Stopped retrying #{job.class} due to a #{ex.class}, which reoccurred on #{job.executions} attempts."
|
125
|
+
"Stopped retrying #{job.class} (Job ID: #{job.job_id}) due to a #{ex.class} (#{ex.message}), which reoccurred on #{job.executions} attempts."
|
91
126
|
end
|
92
127
|
end
|
128
|
+
subscribe_log_level :enqueue_retry, :error
|
93
129
|
|
94
130
|
def discard(event)
|
95
131
|
job = event.payload[:job]
|
96
132
|
ex = event.payload[:error]
|
97
133
|
|
98
134
|
error do
|
99
|
-
"Discarded #{job.class} due to a #{ex.class}."
|
135
|
+
"Discarded #{job.class} (Job ID: #{job.job_id}) due to a #{ex.class} (#{ex.message})."
|
100
136
|
end
|
101
137
|
end
|
138
|
+
subscribe_log_level :discard, :error
|
102
139
|
|
103
140
|
private
|
104
141
|
def queue_name(event)
|
105
|
-
event.payload[:adapter]
|
142
|
+
ActiveJob.adapter_name(event.payload[:adapter]) + "(#{event.payload[:job].queue_name})"
|
106
143
|
end
|
107
144
|
|
108
145
|
def args_info(job)
|
@@ -134,6 +171,41 @@ module ActiveJob
|
|
134
171
|
def logger
|
135
172
|
ActiveJob::Base.logger
|
136
173
|
end
|
174
|
+
|
175
|
+
def info(progname = nil, &block)
|
176
|
+
return unless super
|
177
|
+
|
178
|
+
if ActiveJob.verbose_enqueue_logs
|
179
|
+
log_enqueue_source
|
180
|
+
end
|
181
|
+
end
|
182
|
+
|
183
|
+
def error(progname = nil, &block)
|
184
|
+
return unless super
|
185
|
+
|
186
|
+
if ActiveJob.verbose_enqueue_logs
|
187
|
+
log_enqueue_source
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
191
|
+
def log_enqueue_source
|
192
|
+
source = extract_enqueue_source_location(caller)
|
193
|
+
|
194
|
+
if source
|
195
|
+
logger.info("↳ #{source}")
|
196
|
+
end
|
197
|
+
end
|
198
|
+
|
199
|
+
def extract_enqueue_source_location(locations)
|
200
|
+
backtrace_cleaner.clean(locations.lazy).first
|
201
|
+
end
|
202
|
+
|
203
|
+
def enqueued_jobs_message(adapter, enqueued_jobs)
|
204
|
+
enqueued_count = enqueued_jobs.size
|
205
|
+
job_classes_counts = enqueued_jobs.map(&:class).tally.sort_by { |_k, v| -v }
|
206
|
+
"Enqueued #{enqueued_count} #{'job'.pluralize(enqueued_count)} to #{ActiveJob.adapter_name(adapter)}"\
|
207
|
+
" (#{job_classes_counts.map { |klass, count| "#{count} #{klass}" }.join(', ')})"
|
208
|
+
end
|
137
209
|
end
|
138
210
|
end
|
139
211
|
|
@@ -3,7 +3,18 @@
|
|
3
3
|
require "active_support/core_ext/string/inflections"
|
4
4
|
|
5
5
|
module ActiveJob
|
6
|
-
|
6
|
+
class << self
|
7
|
+
def adapter_name(adapter) # :nodoc:
|
8
|
+
return adapter.queue_adapter_name if adapter.respond_to?(:queue_adapter_name)
|
9
|
+
|
10
|
+
adapter_class = adapter.is_a?(Module) ? adapter : adapter.class
|
11
|
+
"#{adapter_class.name.demodulize.delete_suffix('Adapter')}"
|
12
|
+
end
|
13
|
+
end
|
14
|
+
|
15
|
+
# = Active Job Queue adapter
|
16
|
+
#
|
17
|
+
# The +ActiveJob::QueueAdapter+ module is used to load the
|
7
18
|
# correct adapter. The default queue adapter is the +:async+ queue.
|
8
19
|
module QueueAdapter # :nodoc:
|
9
20
|
extend ActiveSupport::Concern
|
@@ -41,7 +52,7 @@ module ActiveJob
|
|
41
52
|
assign_adapter(name_or_adapter.to_s, queue_adapter)
|
42
53
|
else
|
43
54
|
if queue_adapter?(name_or_adapter)
|
44
|
-
adapter_name =
|
55
|
+
adapter_name = ActiveJob.adapter_name(name_or_adapter).underscore
|
45
56
|
assign_adapter(adapter_name, name_or_adapter)
|
46
57
|
else
|
47
58
|
raise ArgumentError
|
@@ -7,7 +7,7 @@ require "concurrent/utility/processor_counter"
|
|
7
7
|
|
8
8
|
module ActiveJob
|
9
9
|
module QueueAdapters
|
10
|
-
#
|
10
|
+
# = Active Job Async adapter
|
11
11
|
#
|
12
12
|
# The Async adapter runs jobs with an in-process thread pool.
|
13
13
|
#
|
@@ -95,7 +95,7 @@ module ActiveJob
|
|
95
95
|
|
96
96
|
def enqueue_at(job, timestamp, queue_name:)
|
97
97
|
delay = timestamp - Time.current.to_f
|
98
|
-
if delay > 0
|
98
|
+
if !immediate && delay > 0
|
99
99
|
Concurrent::ScheduledTask.execute(delay, args: [job], executor: executor, &:perform)
|
100
100
|
else
|
101
101
|
enqueue(job, queue_name: queue_name)
|
@@ -4,7 +4,7 @@ require "backburner"
|
|
4
4
|
|
5
5
|
module ActiveJob
|
6
6
|
module QueueAdapters
|
7
|
-
#
|
7
|
+
# = Backburner adapter for Active Job
|
8
8
|
#
|
9
9
|
# Backburner is a beanstalkd-powered job queue that can handle a very
|
10
10
|
# high volume of jobs. You create background jobs and place them on
|
@@ -16,12 +16,16 @@ module ActiveJob
|
|
16
16
|
# Rails.application.config.active_job.queue_adapter = :backburner
|
17
17
|
class BackburnerAdapter
|
18
18
|
def enqueue(job) # :nodoc:
|
19
|
-
Backburner::Worker.enqueue(JobWrapper, [job.serialize], queue: job.queue_name, pri: job.priority)
|
19
|
+
response = Backburner::Worker.enqueue(JobWrapper, [job.serialize], queue: job.queue_name, pri: job.priority)
|
20
|
+
job.provider_job_id = response[:id] if response.is_a?(Hash)
|
21
|
+
response
|
20
22
|
end
|
21
23
|
|
22
24
|
def enqueue_at(job, timestamp) # :nodoc:
|
23
25
|
delay = timestamp - Time.current.to_f
|
24
|
-
Backburner::Worker.enqueue(JobWrapper, [job.serialize], queue: job.queue_name, pri: job.priority, delay: delay)
|
26
|
+
response = Backburner::Worker.enqueue(JobWrapper, [job.serialize], queue: job.queue_name, pri: job.priority, delay: delay)
|
27
|
+
job.provider_job_id = response[:id] if response.is_a?(Hash)
|
28
|
+
response
|
25
29
|
end
|
26
30
|
|
27
31
|
class JobWrapper # :nodoc:
|
@@ -5,7 +5,7 @@ require "active_support/core_ext/string/inflections"
|
|
5
5
|
|
6
6
|
module ActiveJob
|
7
7
|
module QueueAdapters
|
8
|
-
#
|
8
|
+
# = Delayed Job adapter for Active Job
|
9
9
|
#
|
10
10
|
# Delayed::Job (or DJ) encapsulates the common pattern of asynchronously
|
11
11
|
# executing longer tasks in the background. Although DJ can have many
|
@@ -4,7 +4,7 @@ require "queue_classic"
|
|
4
4
|
|
5
5
|
module ActiveJob
|
6
6
|
module QueueAdapters
|
7
|
-
#
|
7
|
+
# = queue_classic adapter for Active Job
|
8
8
|
#
|
9
9
|
# queue_classic provides a simple interface to a PostgreSQL-backed message
|
10
10
|
# queue. queue_classic specializes in concurrent locking and minimizing
|
@@ -37,10 +37,10 @@ module ActiveJob
|
|
37
37
|
qc_job
|
38
38
|
end
|
39
39
|
|
40
|
-
# Builds a
|
40
|
+
# Builds a +QC::Queue+ object to schedule jobs on.
|
41
41
|
#
|
42
|
-
# If you have a custom
|
43
|
-
#
|
42
|
+
# If you have a custom +QC::Queue+ subclass you'll need to subclass
|
43
|
+
# +ActiveJob::QueueAdapters::QueueClassicAdapter+ and override the
|
44
44
|
# <tt>build_queue</tt> method.
|
45
45
|
def build_queue(queue_name)
|
46
46
|
QC::Queue.new(queue_name)
|
@@ -16,7 +16,7 @@ end
|
|
16
16
|
|
17
17
|
module ActiveJob
|
18
18
|
module QueueAdapters
|
19
|
-
#
|
19
|
+
# = Resque adapter for Active Job
|
20
20
|
#
|
21
21
|
# Resque (pronounced like "rescue") is a Redis-backed library for creating
|
22
22
|
# background jobs, placing those jobs on multiple queues, and processing
|
@@ -1,14 +1,15 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
gem "sidekiq", ">= 4.1.0"
|
3
4
|
require "sidekiq"
|
4
5
|
|
5
6
|
module ActiveJob
|
6
7
|
module QueueAdapters
|
7
|
-
#
|
8
|
+
# = Sidekiq adapter for Active Job
|
8
9
|
#
|
9
10
|
# Simple, efficient background processing for Ruby. Sidekiq uses threads to
|
10
11
|
# handle many jobs at the same time in the same process. It does not
|
11
|
-
# require Rails but will integrate tightly with it to make background
|
12
|
+
# require \Rails but will integrate tightly with it to make background
|
12
13
|
# processing dead simple.
|
13
14
|
#
|
14
15
|
# Read more about Sidekiq {here}[http://sidekiq.org].
|
@@ -18,21 +19,48 @@ module ActiveJob
|
|
18
19
|
# Rails.application.config.active_job.queue_adapter = :sidekiq
|
19
20
|
class SidekiqAdapter
|
20
21
|
def enqueue(job) # :nodoc:
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
"queue" => job.queue_name,
|
26
|
-
"args" => [ job.serialize ]
|
22
|
+
job.provider_job_id = JobWrapper.set(
|
23
|
+
wrapped: job.class,
|
24
|
+
queue: job.queue_name
|
25
|
+
).perform_async(job.serialize)
|
27
26
|
end
|
28
27
|
|
29
28
|
def enqueue_at(job, timestamp) # :nodoc:
|
30
|
-
job.provider_job_id =
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
29
|
+
job.provider_job_id = JobWrapper.set(
|
30
|
+
wrapped: job.class,
|
31
|
+
queue: job.queue_name,
|
32
|
+
).perform_at(timestamp, job.serialize)
|
33
|
+
end
|
34
|
+
|
35
|
+
def enqueue_all(jobs) # :nodoc:
|
36
|
+
enqueued_count = 0
|
37
|
+
jobs.group_by(&:class).each do |job_class, same_class_jobs|
|
38
|
+
same_class_jobs.group_by(&:queue_name).each do |queue, same_class_and_queue_jobs|
|
39
|
+
immediate_jobs, scheduled_jobs = same_class_and_queue_jobs.partition { |job| job.scheduled_at.nil? }
|
40
|
+
|
41
|
+
if immediate_jobs.any?
|
42
|
+
jids = Sidekiq::Client.push_bulk(
|
43
|
+
"class" => JobWrapper,
|
44
|
+
"wrapped" => job_class,
|
45
|
+
"queue" => queue,
|
46
|
+
"args" => immediate_jobs.map { |job| [job.serialize] },
|
47
|
+
)
|
48
|
+
enqueued_count += jids.compact.size
|
49
|
+
end
|
50
|
+
|
51
|
+
if scheduled_jobs.any?
|
52
|
+
jids = Sidekiq::Client.push_bulk(
|
53
|
+
"class" => JobWrapper,
|
54
|
+
"wrapped" => job_class,
|
55
|
+
"queue" => queue,
|
56
|
+
"args" => scheduled_jobs.map { |job| [job.serialize] },
|
57
|
+
"at" => scheduled_jobs.map { |job| job.scheduled_at&.to_f }
|
58
|
+
)
|
59
|
+
enqueued_count += jids.compact.size
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
63
|
+
enqueued_count
|
36
64
|
end
|
37
65
|
|
38
66
|
class JobWrapper # :nodoc:
|
@@ -5,7 +5,7 @@ require "monitor"
|
|
5
5
|
|
6
6
|
module ActiveJob
|
7
7
|
module QueueAdapters
|
8
|
-
#
|
8
|
+
# = Sneakers adapter for Active Job
|
9
9
|
#
|
10
10
|
# A high-performance RabbitMQ background processing framework for Ruby.
|
11
11
|
# Sneakers is being used in production for both I/O and CPU intensive
|
@@ -4,13 +4,13 @@ require "sucker_punch"
|
|
4
4
|
|
5
5
|
module ActiveJob
|
6
6
|
module QueueAdapters
|
7
|
-
#
|
7
|
+
# = Sucker Punch adapter for Active Job
|
8
8
|
#
|
9
9
|
# Sucker Punch is a single-process Ruby asynchronous processing library.
|
10
10
|
# This reduces the cost of hosting on a service like Heroku along
|
11
11
|
# with the memory footprint of having to maintain additional jobs if
|
12
12
|
# hosting on a dedicated server. All queues can run within a
|
13
|
-
# single application (e.g. Rails, Sinatra, etc.) process.
|
13
|
+
# single application (e.g. \Rails, Sinatra, etc.) process.
|
14
14
|
#
|
15
15
|
# Read more about Sucker Punch {here}[https://github.com/brandonhilkert/sucker_punch].
|
16
16
|
#
|
@@ -33,7 +33,7 @@ module ActiveJob
|
|
33
33
|
delay = timestamp - Time.current.to_f
|
34
34
|
JobWrapper.perform_in delay, job.serialize
|
35
35
|
else
|
36
|
-
raise NotImplementedError, "sucker_punch 1.0 does not support `
|
36
|
+
raise NotImplementedError, "sucker_punch 1.0 does not support `enqueue_at`. Please upgrade to version ~> 2.0.0 to enable this behavior."
|
37
37
|
end
|
38
38
|
end
|
39
39
|
|