mission_control-jobs 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/MIT-LICENSE +20 -0
- data/README.md +244 -0
- data/Rakefile +8 -0
- data/app/assets/config/mission_control_jobs_manifest.js +4 -0
- data/app/assets/stylesheets/mission_control/jobs/application.css +16 -0
- data/app/assets/stylesheets/mission_control/jobs/forms.css +8 -0
- data/app/assets/stylesheets/mission_control/jobs/jobs.css +7 -0
- data/app/controllers/concerns/mission_control/jobs/adapter_features.rb +20 -0
- data/app/controllers/concerns/mission_control/jobs/application_scoped.rb +38 -0
- data/app/controllers/concerns/mission_control/jobs/failed_jobs_bulk_operations.rb +17 -0
- data/app/controllers/concerns/mission_control/jobs/job_filters.rb +18 -0
- data/app/controllers/concerns/mission_control/jobs/job_scoped.rb +16 -0
- data/app/controllers/concerns/mission_control/jobs/not_found_redirections.rb +25 -0
- data/app/controllers/concerns/mission_control/jobs/queue_scoped.rb +12 -0
- data/app/controllers/mission_control/jobs/application_controller.rb +11 -0
- data/app/controllers/mission_control/jobs/bulk_discards_controller.rb +20 -0
- data/app/controllers/mission_control/jobs/bulk_retries_controller.rb +10 -0
- data/app/controllers/mission_control/jobs/discards_controller.rb +13 -0
- data/app/controllers/mission_control/jobs/jobs_controller.rb +37 -0
- data/app/controllers/mission_control/jobs/queues/pauses_controller.rb +15 -0
- data/app/controllers/mission_control/jobs/queues_controller.rb +24 -0
- data/app/controllers/mission_control/jobs/retries_controller.rb +13 -0
- data/app/controllers/mission_control/jobs/workers_controller.rb +18 -0
- data/app/helpers/mission_control/jobs/application_helper.rb +8 -0
- data/app/helpers/mission_control/jobs/dates_helper.rb +19 -0
- data/app/helpers/mission_control/jobs/jobs_helper.rb +63 -0
- data/app/helpers/mission_control/jobs/navigation_helper.rb +51 -0
- data/app/helpers/mission_control/jobs/ui_helper.rb +23 -0
- data/app/javascript/mission_control/jobs/application.js +4 -0
- data/app/javascript/mission_control/jobs/controllers/application.js +9 -0
- data/app/javascript/mission_control/jobs/controllers/form_controller.js +21 -0
- data/app/javascript/mission_control/jobs/controllers/index.js +11 -0
- data/app/javascript/mission_control/jobs/helpers/debounce_helpers.js +9 -0
- data/app/javascript/mission_control/jobs/helpers/index.js +1 -0
- data/app/jobs/mission_control/jobs/application_job.rb +6 -0
- data/app/mailers/mission_control/jobs/application_mailer.rb +8 -0
- data/app/models/mission_control/jobs/application_record.rb +7 -0
- data/app/models/mission_control/jobs/current.rb +3 -0
- data/app/models/mission_control/jobs/page.rb +48 -0
- data/app/models/mission_control/jobs/worker.rb +17 -0
- data/app/views/layouts/mission_control/jobs/_application_selection.html.erb +11 -0
- data/app/views/layouts/mission_control/jobs/_flash.html.erb +9 -0
- data/app/views/layouts/mission_control/jobs/_navigation.html.erb +9 -0
- data/app/views/layouts/mission_control/jobs/application.html.erb +25 -0
- data/app/views/layouts/mission_control/jobs/application_selection/_applications.html.erb +13 -0
- data/app/views/layouts/mission_control/jobs/application_selection/_servers.html.erb +15 -0
- data/app/views/mission_control/jobs/jobs/_error_information.html.erb +19 -0
- data/app/views/mission_control/jobs/jobs/_filters.html.erb +35 -0
- data/app/views/mission_control/jobs/jobs/_general_information.html.erb +54 -0
- data/app/views/mission_control/jobs/jobs/_job.html.erb +13 -0
- data/app/views/mission_control/jobs/jobs/_jobs_page.html.erb +15 -0
- data/app/views/mission_control/jobs/jobs/_raw_data.html.erb +4 -0
- data/app/views/mission_control/jobs/jobs/_title.html.erb +13 -0
- data/app/views/mission_control/jobs/jobs/_toolbar.html.erb +18 -0
- data/app/views/mission_control/jobs/jobs/blocked/_job.html.erb +3 -0
- data/app/views/mission_control/jobs/jobs/failed/_actions.html.erb +5 -0
- data/app/views/mission_control/jobs/jobs/failed/_job.html.erb +7 -0
- data/app/views/mission_control/jobs/jobs/finished/_job.html.erb +2 -0
- data/app/views/mission_control/jobs/jobs/in_progress/_job.html.erb +9 -0
- data/app/views/mission_control/jobs/jobs/index.html.erb +19 -0
- data/app/views/mission_control/jobs/jobs/scheduled/_job.html.erb +7 -0
- data/app/views/mission_control/jobs/jobs/show.html.erb +6 -0
- data/app/views/mission_control/jobs/queues/_actions.html.erb +7 -0
- data/app/views/mission_control/jobs/queues/_job.html.erb +15 -0
- data/app/views/mission_control/jobs/queues/_queue.html.erb +16 -0
- data/app/views/mission_control/jobs/queues/_queue_title.html.erb +17 -0
- data/app/views/mission_control/jobs/queues/index.html.erb +16 -0
- data/app/views/mission_control/jobs/queues/show.html.erb +25 -0
- data/app/views/mission_control/jobs/shared/_pagination_toolbar.html.erb +5 -0
- data/app/views/mission_control/jobs/workers/_configuration.html.erb +6 -0
- data/app/views/mission_control/jobs/workers/_job.html.erb +19 -0
- data/app/views/mission_control/jobs/workers/_jobs.html.erb +20 -0
- data/app/views/mission_control/jobs/workers/_raw_data.html.erb +6 -0
- data/app/views/mission_control/jobs/workers/_title.html.erb +11 -0
- data/app/views/mission_control/jobs/workers/_worker.html.erb +21 -0
- data/app/views/mission_control/jobs/workers/index.html.erb +17 -0
- data/app/views/mission_control/jobs/workers/show.html.erb +7 -0
- data/config/importmap.rb +6 -0
- data/config/routes.rb +33 -0
- data/lib/active_job/errors/invalid_operation.rb +5 -0
- data/lib/active_job/errors/job_not_found_error.rb +14 -0
- data/lib/active_job/errors/query_error.rb +5 -0
- data/lib/active_job/executing.rb +43 -0
- data/lib/active_job/execution_error.rb +8 -0
- data/lib/active_job/failed.rb +11 -0
- data/lib/active_job/job_proxy.rb +26 -0
- data/lib/active_job/jobs_relation.rb +300 -0
- data/lib/active_job/querying.rb +44 -0
- data/lib/active_job/queue.rb +62 -0
- data/lib/active_job/queue_adapters/resque_ext.rb +300 -0
- data/lib/active_job/queue_adapters/solid_queue_ext.rb +294 -0
- data/lib/active_job/queues.rb +29 -0
- data/lib/mission_control/jobs/adapter.rb +108 -0
- data/lib/mission_control/jobs/application.rb +17 -0
- data/lib/mission_control/jobs/applications.rb +8 -0
- data/lib/mission_control/jobs/console/context.rb +11 -0
- data/lib/mission_control/jobs/console/helpers.rb +26 -0
- data/lib/mission_control/jobs/engine.rb +88 -0
- data/lib/mission_control/jobs/errors/incompatible_adapter.rb +2 -0
- data/lib/mission_control/jobs/errors/resource_not_found.rb +2 -0
- data/lib/mission_control/jobs/identified_by_name.rb +18 -0
- data/lib/mission_control/jobs/identified_elements.rb +23 -0
- data/lib/mission_control/jobs/server/serializable.rb +24 -0
- data/lib/mission_control/jobs/server/workers.rb +15 -0
- data/lib/mission_control/jobs/server.rb +26 -0
- data/lib/mission_control/jobs/version.rb +5 -0
- data/lib/mission_control/jobs.rb +19 -0
- data/lib/resque/thread_safe_redis.rb +34 -0
- data/lib/tasks/mission_control/jobs_tasks.rake +4 -0
- metadata +364 -0
@@ -0,0 +1,300 @@
|
|
1
|
+
# A relation of jobs that can be filtered and acted on.
|
2
|
+
#
|
3
|
+
# Relations of jobs are normally fetched via +ActiveJob::Base.jobs+
|
4
|
+
# or through a given queue (+ActiveJob::Queue#jobs+).
|
5
|
+
#
|
6
|
+
# This class offers a fluid interface to query a subset of jobs. For
|
7
|
+
# example:
|
8
|
+
#
|
9
|
+
# queue = ActiveJob::Base.queues[:default]
|
10
|
+
# queue.jobs.limit(10).where(job_class_name: "DummyJob").last
|
11
|
+
#
|
12
|
+
# Relations are enumerable, so you can use +Enumerable+ methods on them.
|
13
|
+
# Notice however that using these methods will imply loading all the relation
|
14
|
+
# in memory, which could introduce performance concerns.
|
15
|
+
#
|
16
|
+
# Internally, +ActiveJob+ will always use paginated queries to the underlying
|
17
|
+
# queue adapter. The page size can be controlled via +config.active_job.default_page_size+
|
18
|
+
# (1000 by default).
|
19
|
+
#
|
20
|
+
# There are additional performance concerns depending on the configured
|
21
|
+
# adapter. Please check +ActiveJob::Relation#where+, +ActiveJob::Relation#count+.
|
22
|
+
class ActiveJob::JobsRelation
|
23
|
+
include Enumerable
|
24
|
+
|
25
|
+
STATUSES = %i[ pending failed in_progress blocked scheduled finished ]
|
26
|
+
FILTERS = %i[ queue_name job_class_name ]
|
27
|
+
|
28
|
+
PROPERTIES = %i[ queue_name status offset_value limit_value job_class_name worker_id ]
|
29
|
+
attr_reader *PROPERTIES, :default_page_size
|
30
|
+
|
31
|
+
delegate :last, :[], :reverse, to: :to_a
|
32
|
+
delegate :logger, to: MissionControl::Jobs
|
33
|
+
|
34
|
+
ALL_JOBS_LIMIT = 100_000_000 # When no limit value it defaults to "all jobs"
|
35
|
+
|
36
|
+
def initialize(queue_adapter: ActiveJob::Base.queue_adapter, default_page_size: ActiveJob::Base.default_page_size)
|
37
|
+
@queue_adapter = queue_adapter
|
38
|
+
@default_page_size = default_page_size
|
39
|
+
|
40
|
+
set_defaults
|
41
|
+
end
|
42
|
+
|
43
|
+
# Returns a +ActiveJob::JobsRelation+ with the configured filtering options.
|
44
|
+
#
|
45
|
+
# === Options
|
46
|
+
#
|
47
|
+
# * <tt>:job_class_name</tt> - To only include the jobs of a given class.
|
48
|
+
# Depending on the configured queue adapter, this will perform the
|
49
|
+
# filtering in memory, which could introduce performance concerns
|
50
|
+
# for large sets of jobs.
|
51
|
+
# * <tt>:queue_name</tt> - To only include the jobs in the provided queue.
|
52
|
+
# * <tt>:worker_id</tt> - To only include the jobs processed by the provided worker.
|
53
|
+
def where(job_class_name: nil, queue_name: nil, worker_id: nil)
|
54
|
+
# Remove nil arguments to avoid overriding parameters when concatenating +where+ clauses
|
55
|
+
arguments = { job_class_name: job_class_name, queue_name: queue_name, worker_id: worker_id }.compact.collect { |key, value| [ key, value.to_s ] }.to_h
|
56
|
+
clone_with **arguments
|
57
|
+
end
|
58
|
+
|
59
|
+
def with_status(status)
|
60
|
+
if status.to_sym.in? STATUSES
|
61
|
+
clone_with status: status.to_sym
|
62
|
+
else
|
63
|
+
self
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
STATUSES.each do |status|
|
68
|
+
define_method status do
|
69
|
+
with_status(status)
|
70
|
+
end
|
71
|
+
|
72
|
+
define_method "#{status}?" do
|
73
|
+
self.status == status
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
# Sets an offset for the jobs-fetching query. The first position is 0.
|
78
|
+
def offset(offset)
|
79
|
+
clone_with offset_value: offset
|
80
|
+
end
|
81
|
+
|
82
|
+
# Sets the max number of jobs to fetch in the query.
|
83
|
+
def limit(limit)
|
84
|
+
clone_with limit_value: limit
|
85
|
+
end
|
86
|
+
|
87
|
+
# Returns the number of jobs in the relation.
|
88
|
+
#
|
89
|
+
# When filtering jobs, if the adapter doesn't support the filter(s)
|
90
|
+
# directly, this will load all the jobs in memory to filter them.
|
91
|
+
def count
|
92
|
+
if loaded? || filtering_needed?
|
93
|
+
to_a.length
|
94
|
+
else
|
95
|
+
query_count
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
alias length count
|
100
|
+
alias size count
|
101
|
+
|
102
|
+
def empty?
|
103
|
+
count == 0
|
104
|
+
end
|
105
|
+
|
106
|
+
def to_s
|
107
|
+
properties_with_values = PROPERTIES.collect do |name|
|
108
|
+
value = public_send(name)
|
109
|
+
"#{name}: #{value}" unless value.nil?
|
110
|
+
end.compact.join(", ")
|
111
|
+
"<Jobs with [#{properties_with_values}]> (loaded: #{loaded?})"
|
112
|
+
end
|
113
|
+
|
114
|
+
alias inspect to_s
|
115
|
+
|
116
|
+
def each(&block)
|
117
|
+
loaded_jobs&.each(&block) || load_jobs(&block)
|
118
|
+
end
|
119
|
+
|
120
|
+
# Retry all the jobs in the queue.
|
121
|
+
#
|
122
|
+
# This operation is only valid for sets of failed jobs. It will
|
123
|
+
# raise an error +ActiveJob::Errors::InvalidOperation+ otherwise.
|
124
|
+
def retry_all
|
125
|
+
ensure_failed_status
|
126
|
+
queue_adapter.retry_all_jobs(self)
|
127
|
+
nil
|
128
|
+
end
|
129
|
+
|
130
|
+
# Retry the provided job.
|
131
|
+
#
|
132
|
+
# This operation is only valid for sets of failed jobs. It will
|
133
|
+
# raise an error +ActiveJob::Errors::InvalidOperation+ otherwise.
|
134
|
+
def retry_job(job)
|
135
|
+
ensure_failed_status
|
136
|
+
queue_adapter.retry_job(job, self)
|
137
|
+
end
|
138
|
+
|
139
|
+
# Discard all the jobs in the relation.
|
140
|
+
def discard_all
|
141
|
+
queue_adapter.discard_all_jobs(self)
|
142
|
+
nil
|
143
|
+
end
|
144
|
+
|
145
|
+
# Discard the provided job.
|
146
|
+
def discard_job(job)
|
147
|
+
queue_adapter.discard_job(job, self)
|
148
|
+
end
|
149
|
+
|
150
|
+
# Find a job by id.
|
151
|
+
#
|
152
|
+
# Returns nil when not found.
|
153
|
+
def find_by_id(job_id)
|
154
|
+
queue_adapter.find_job(job_id, self)
|
155
|
+
end
|
156
|
+
|
157
|
+
# Find a job by id.
|
158
|
+
#
|
159
|
+
# Raises +ActiveJob::Errors::JobNotFoundError+ when not found.
|
160
|
+
def find_by_id!(job_id)
|
161
|
+
queue_adapter.find_job(job_id, self) or raise ActiveJob::Errors::JobNotFoundError.new(job_id, self)
|
162
|
+
end
|
163
|
+
|
164
|
+
# Returns an array of jobs class names in the first +from_first+ jobs.
|
165
|
+
def job_class_names(from_first: 500)
|
166
|
+
first(from_first).collect(&:job_class_name).uniq
|
167
|
+
end
|
168
|
+
|
169
|
+
def reload
|
170
|
+
@count = nil
|
171
|
+
@loaded_jobs = nil
|
172
|
+
@filters = nil
|
173
|
+
|
174
|
+
self
|
175
|
+
end
|
176
|
+
|
177
|
+
def in_batches(of: default_page_size, order: :asc, &block)
|
178
|
+
validate_looping_in_batches_is_possible
|
179
|
+
|
180
|
+
case order
|
181
|
+
when :asc
|
182
|
+
in_ascending_batches(of: of, &block)
|
183
|
+
when :desc
|
184
|
+
in_descending_batches(of: of, &block)
|
185
|
+
else
|
186
|
+
raise "Unsupported order: #{order}. Valid values: :asc, :desc."
|
187
|
+
end
|
188
|
+
end
|
189
|
+
|
190
|
+
def paginated?
|
191
|
+
offset_value > 0 || limit_value_provided?
|
192
|
+
end
|
193
|
+
|
194
|
+
def limit_value_provided?
|
195
|
+
limit_value.present? && limit_value != ActiveJob::JobsRelation::ALL_JOBS_LIMIT
|
196
|
+
end
|
197
|
+
|
198
|
+
def filtering_needed?
|
199
|
+
filters.any?
|
200
|
+
end
|
201
|
+
|
202
|
+
private
|
203
|
+
attr_reader :queue_adapter, :loaded_jobs
|
204
|
+
attr_writer *PROPERTIES
|
205
|
+
|
206
|
+
def set_defaults
|
207
|
+
self.offset_value = 0
|
208
|
+
self.limit_value = ALL_JOBS_LIMIT
|
209
|
+
end
|
210
|
+
|
211
|
+
def clone_with(**properties)
|
212
|
+
dup.reload.tap do |relation|
|
213
|
+
properties.each do |key, value|
|
214
|
+
relation.send("#{key}=", value)
|
215
|
+
end
|
216
|
+
end
|
217
|
+
end
|
218
|
+
|
219
|
+
def query_count
|
220
|
+
@count ||= queue_adapter.jobs_count(self)
|
221
|
+
end
|
222
|
+
|
223
|
+
def load_jobs
|
224
|
+
@loaded_jobs = []
|
225
|
+
perform_each do |job|
|
226
|
+
@loaded_jobs << job
|
227
|
+
yield job
|
228
|
+
end
|
229
|
+
end
|
230
|
+
|
231
|
+
def perform_each
|
232
|
+
current_offset = offset_value
|
233
|
+
pending_count = limit_value || Float::INFINITY
|
234
|
+
|
235
|
+
begin
|
236
|
+
limit = [ pending_count, default_page_size ].min
|
237
|
+
page = offset(current_offset).limit(limit)
|
238
|
+
jobs = queue_adapter.fetch_jobs(page)
|
239
|
+
finished = jobs.empty?
|
240
|
+
jobs = filter(jobs) if filtering_needed?
|
241
|
+
Array(jobs).each { |job| yield job }
|
242
|
+
current_offset += limit
|
243
|
+
pending_count -= jobs.length
|
244
|
+
end until finished || pending_count.zero?
|
245
|
+
end
|
246
|
+
|
247
|
+
def loaded?
|
248
|
+
!@loaded_jobs.nil?
|
249
|
+
end
|
250
|
+
|
251
|
+
# Filtering for not natively supported filters is performed in memory
|
252
|
+
def filter(jobs)
|
253
|
+
jobs.filter { |job| satisfy_filter?(job) }
|
254
|
+
end
|
255
|
+
|
256
|
+
def satisfy_filter?(job)
|
257
|
+
filters.all? { |property| public_send(property) == job.public_send(property) }
|
258
|
+
end
|
259
|
+
|
260
|
+
def filters
|
261
|
+
@filters ||= FILTERS.select { |property| public_send(property).present? && !queue_adapter.supports_filter?(self, property) }
|
262
|
+
end
|
263
|
+
|
264
|
+
def ensure_failed_status
|
265
|
+
raise ActiveJob::Errors::InvalidOperation, "This operation can only be performed on failed jobs, but these jobs are #{status}" unless failed?
|
266
|
+
end
|
267
|
+
|
268
|
+
def validate_looping_in_batches_is_possible
|
269
|
+
raise ActiveJob::Errors::InvalidOperation, "Looping in batches is not compatible with providing offset or limit" if paginated?
|
270
|
+
end
|
271
|
+
|
272
|
+
def in_ascending_batches(of:)
|
273
|
+
current_offset = 0
|
274
|
+
max = count
|
275
|
+
begin
|
276
|
+
page = offset(current_offset).limit(of)
|
277
|
+
current_offset += of
|
278
|
+
logger.info page
|
279
|
+
yield page
|
280
|
+
wait_batch_delay
|
281
|
+
end until current_offset >= max
|
282
|
+
end
|
283
|
+
|
284
|
+
def in_descending_batches(of:)
|
285
|
+
current_offset = count - of
|
286
|
+
|
287
|
+
begin
|
288
|
+
limit = current_offset < 0 ? of + current_offset : of
|
289
|
+
page = offset([ current_offset, 0 ].max).limit(limit)
|
290
|
+
current_offset -= of
|
291
|
+
logger.info page
|
292
|
+
yield page
|
293
|
+
wait_batch_delay
|
294
|
+
end until current_offset + of <= 0
|
295
|
+
end
|
296
|
+
|
297
|
+
def wait_batch_delay
|
298
|
+
sleep MissionControl::Jobs.delay_between_bulk_operation_batches if MissionControl::Jobs.delay_between_bulk_operation_batches.to_i > 0
|
299
|
+
end
|
300
|
+
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
module ActiveJob::Querying
|
2
|
+
extend ActiveSupport::Concern
|
3
|
+
|
4
|
+
included do
|
5
|
+
# ActiveJob will use pagination internally when fetching relations of jobs. This
|
6
|
+
# parameter sets the max amount of jobs to fetch in each data store query.
|
7
|
+
class_attribute :default_page_size, default: 1000
|
8
|
+
end
|
9
|
+
|
10
|
+
class_methods do
|
11
|
+
# Returns the list of queues.
|
12
|
+
#
|
13
|
+
# See +ActiveJob::Queues+
|
14
|
+
def queues
|
15
|
+
ActiveJob::Queues.new(fetch_queues)
|
16
|
+
end
|
17
|
+
|
18
|
+
def jobs
|
19
|
+
ActiveJob::JobsRelation.new(queue_adapter: queue_adapter, default_page_size: default_page_size)
|
20
|
+
end
|
21
|
+
|
22
|
+
private
|
23
|
+
def fetch_queues
|
24
|
+
queue_adapter.queues.collect do |queue|
|
25
|
+
ActiveJob::Queue.new(queue[:name], size: queue[:size], active: queue[:active], queue_adapter: queue_adapter)
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
def queue
|
31
|
+
self.class.queues[queue_name]
|
32
|
+
end
|
33
|
+
|
34
|
+
# Top-level query methods added to `ActiveJob`
|
35
|
+
module Root
|
36
|
+
def queues
|
37
|
+
ActiveJob::Base.queues
|
38
|
+
end
|
39
|
+
|
40
|
+
def jobs
|
41
|
+
ActiveJob::Base.jobs
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
@@ -0,0 +1,62 @@
|
|
1
|
+
# A queue of jobs
|
2
|
+
class ActiveJob::Queue
|
3
|
+
attr_reader :name
|
4
|
+
|
5
|
+
def initialize(name, size: nil, active: nil, queue_adapter: ActiveJob::Base.queue_adapter)
|
6
|
+
@name = name
|
7
|
+
@queue_adapter = queue_adapter
|
8
|
+
|
9
|
+
@size = size
|
10
|
+
@active = active
|
11
|
+
end
|
12
|
+
|
13
|
+
def size
|
14
|
+
@size ||= queue_adapter.queue_size(name)
|
15
|
+
end
|
16
|
+
|
17
|
+
alias length size
|
18
|
+
|
19
|
+
def clear
|
20
|
+
queue_adapter.clear_queue(name)
|
21
|
+
end
|
22
|
+
|
23
|
+
def empty?
|
24
|
+
size == 0
|
25
|
+
end
|
26
|
+
|
27
|
+
def pause
|
28
|
+
queue_adapter.pause_queue(name)
|
29
|
+
end
|
30
|
+
|
31
|
+
def resume
|
32
|
+
queue_adapter.resume_queue(name)
|
33
|
+
end
|
34
|
+
|
35
|
+
def paused?
|
36
|
+
!active?
|
37
|
+
end
|
38
|
+
|
39
|
+
def active?
|
40
|
+
return @active unless @active.nil?
|
41
|
+
@active = !queue_adapter.queue_paused?(name)
|
42
|
+
end
|
43
|
+
|
44
|
+
# Return an +ActiveJob::JobsRelation+ with the pending jobs in the queue.
|
45
|
+
def jobs
|
46
|
+
ActiveJob::JobsRelation.new(queue_adapter: queue_adapter).pending.where(queue_name: name)
|
47
|
+
end
|
48
|
+
|
49
|
+
def reload
|
50
|
+
@active = @size = nil
|
51
|
+
self
|
52
|
+
end
|
53
|
+
|
54
|
+
def id
|
55
|
+
name.parameterize
|
56
|
+
end
|
57
|
+
|
58
|
+
alias to_param id
|
59
|
+
|
60
|
+
private
|
61
|
+
attr_reader :queue_adapter
|
62
|
+
end
|
@@ -0,0 +1,300 @@
|
|
1
|
+
module ActiveJob::QueueAdapters::ResqueExt
|
2
|
+
include MissionControl::Jobs::Adapter
|
3
|
+
|
4
|
+
def initialize(redis = Resque.redis)
|
5
|
+
super()
|
6
|
+
@redis = redis
|
7
|
+
end
|
8
|
+
|
9
|
+
def activating(&block)
|
10
|
+
Resque.with_per_thread_redis_override(redis, &block)
|
11
|
+
end
|
12
|
+
|
13
|
+
def queues
|
14
|
+
queues = queue_names
|
15
|
+
active_statuses = []
|
16
|
+
counts = []
|
17
|
+
|
18
|
+
redis.multi do |multi|
|
19
|
+
queues.each do |queue_name|
|
20
|
+
active_statuses << multi.mget("pause:queue:#{queue_name}", "pause:all")
|
21
|
+
counts << multi.llen("queue:#{queue_name}")
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
queues.collect.with_index do |queue_name, index|
|
26
|
+
{ name: queue_name, active: active_statuses[index].value.compact.empty?, size: counts[index].value }
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
def queue_size(queue_name)
|
31
|
+
Resque.size queue_name
|
32
|
+
end
|
33
|
+
|
34
|
+
def clear_queue(queue_name)
|
35
|
+
Resque.remove_queue(queue_name)
|
36
|
+
end
|
37
|
+
|
38
|
+
def pause_queue(queue_name)
|
39
|
+
ResquePauseHelper.pause(queue_name)
|
40
|
+
end
|
41
|
+
|
42
|
+
def resume_queue(queue_name)
|
43
|
+
ResquePauseHelper.unpause(queue_name)
|
44
|
+
end
|
45
|
+
|
46
|
+
def queue_paused?(queue_name)
|
47
|
+
ResquePauseHelper.paused?(queue_name)
|
48
|
+
end
|
49
|
+
|
50
|
+
def supported_filters(jobs_relation)
|
51
|
+
if jobs_relation.pending? then [ :queue_name ]
|
52
|
+
else []
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
def jobs_count(jobs_relation)
|
57
|
+
resque_jobs_for(jobs_relation).count
|
58
|
+
end
|
59
|
+
|
60
|
+
def fetch_jobs(jobs_relation)
|
61
|
+
resque_jobs_for(jobs_relation).all
|
62
|
+
end
|
63
|
+
|
64
|
+
def retry_all_jobs(jobs_relation)
|
65
|
+
resque_jobs_for(jobs_relation).retry_all
|
66
|
+
end
|
67
|
+
|
68
|
+
def retry_job(job, jobs_relation)
|
69
|
+
resque_jobs_for(jobs_relation).retry_job(job)
|
70
|
+
end
|
71
|
+
|
72
|
+
def discard_all_jobs(jobs_relation)
|
73
|
+
resque_jobs_for(jobs_relation).discard_all
|
74
|
+
end
|
75
|
+
|
76
|
+
def discard_job(job, jobs_relation)
|
77
|
+
resque_jobs_for(jobs_relation).discard(job)
|
78
|
+
end
|
79
|
+
|
80
|
+
def find_job(job_id, jobs_relation)
|
81
|
+
resque_jobs_for(jobs_relation).find_job(job_id)
|
82
|
+
end
|
83
|
+
|
84
|
+
private
|
85
|
+
attr_reader :redis
|
86
|
+
|
87
|
+
def queue_names
|
88
|
+
Resque.queues
|
89
|
+
end
|
90
|
+
|
91
|
+
def resque_jobs_for(jobs_relation)
|
92
|
+
ResqueJobs.new(jobs_relation, redis: redis)
|
93
|
+
end
|
94
|
+
|
95
|
+
class ResqueJobs
|
96
|
+
attr_reader :jobs_relation
|
97
|
+
|
98
|
+
delegate :default_page_size, :paginated?, :limit_value_provided?, to: :jobs_relation
|
99
|
+
|
100
|
+
def initialize(jobs_relation, redis:)
|
101
|
+
@jobs_relation = jobs_relation
|
102
|
+
@redis = redis
|
103
|
+
end
|
104
|
+
|
105
|
+
def count
|
106
|
+
if paginated?
|
107
|
+
count_fetched_jobs # no direct way of counting jobs
|
108
|
+
else
|
109
|
+
direct_jobs_count
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
def all
|
114
|
+
@all ||= fetch_resque_jobs.collect.with_index { |resque_job, index| deserialize_resque_job(resque_job, index) if resque_job.is_a?(Hash) }.compact
|
115
|
+
end
|
116
|
+
|
117
|
+
def retry_all
|
118
|
+
if use_batches?
|
119
|
+
retry_all_in_batches
|
120
|
+
else
|
121
|
+
retry_jobs(jobs_relation.to_a.reverse)
|
122
|
+
end
|
123
|
+
end
|
124
|
+
|
125
|
+
def retry_job(job)
|
126
|
+
# Not named just +retry+ because it collides with reserved Ruby keyword.
|
127
|
+
resque_requeue_and_discard(job)
|
128
|
+
end
|
129
|
+
|
130
|
+
def discard_all
|
131
|
+
if jobs_relation.failed? && targeting_all_jobs?
|
132
|
+
clear_failed_queue
|
133
|
+
else
|
134
|
+
discard_all_one_by_one
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
def discard(job)
|
139
|
+
redis.multi do |multi|
|
140
|
+
multi.lset(queue_redis_key, job.position, SENTINEL)
|
141
|
+
multi.lrem(queue_redis_key, 1, SENTINEL)
|
142
|
+
end
|
143
|
+
rescue Redis::CommandError => error
|
144
|
+
handle_resque_job_error(job, error)
|
145
|
+
end
|
146
|
+
|
147
|
+
def find_job(job_id)
|
148
|
+
jobs_by_id[job_id]
|
149
|
+
end
|
150
|
+
|
151
|
+
private
|
152
|
+
attr_reader :redis
|
153
|
+
|
154
|
+
SENTINEL = "" # See +Resque::Datastore#remove_from_failed_queue+
|
155
|
+
|
156
|
+
# Redis transactions severely speed up operations, specially when the network latency is high.
|
157
|
+
# We limit the transaction size because large batches can result in redis timeout errors.
|
158
|
+
MAX_REDIS_TRANSACTION_SIZE = 100
|
159
|
+
|
160
|
+
def targeting_all_jobs?
|
161
|
+
!paginated? && !jobs_relation.filtering_needed?
|
162
|
+
end
|
163
|
+
|
164
|
+
def fetch_resque_jobs
|
165
|
+
if jobs_relation.failed? || jobs_relation.queue_name.blank?
|
166
|
+
fetch_failed_resque_jobs
|
167
|
+
else
|
168
|
+
fetch_queue_resque_jobs
|
169
|
+
end
|
170
|
+
end
|
171
|
+
|
172
|
+
def fetch_failed_resque_jobs
|
173
|
+
Array.wrap(Resque::Failure.all(jobs_relation.offset_value, jobs_relation.limit_value))
|
174
|
+
end
|
175
|
+
|
176
|
+
def fetch_queue_resque_jobs
|
177
|
+
unless jobs_relation.queue_name.present?
|
178
|
+
raise ActiveJob::Errors::QueryError, "This adapter requires a queue name unless fetching failed jobs"
|
179
|
+
end
|
180
|
+
Array.wrap(Resque.peek(jobs_relation.queue_name, jobs_relation.offset_value, jobs_relation.limit_value))
|
181
|
+
end
|
182
|
+
|
183
|
+
def deserialize_resque_job(resque_job_hash, index)
|
184
|
+
args_hash = resque_job_hash.dig("payload", "args") || resque_job_hash.dig("args")
|
185
|
+
ActiveJob::JobProxy.new(args_hash&.first).tap do |job|
|
186
|
+
job.last_execution_error = execution_error_from_resque_job(resque_job_hash)
|
187
|
+
job.raw_data = resque_job_hash
|
188
|
+
job.position = jobs_relation.offset_value + index
|
189
|
+
job.failed_at = resque_job_hash["failed_at"]&.to_datetime
|
190
|
+
job.status = job.failed_at.present? ? :failed : :pending
|
191
|
+
end
|
192
|
+
end
|
193
|
+
|
194
|
+
def execution_error_from_resque_job(resque_job_hash)
|
195
|
+
if resque_job_hash["exception"].present?
|
196
|
+
ActiveJob::ExecutionError.new \
|
197
|
+
error_class: resque_job_hash["exception"],
|
198
|
+
message: resque_job_hash["error"],
|
199
|
+
backtrace: resque_job_hash["backtrace"]
|
200
|
+
end
|
201
|
+
end
|
202
|
+
|
203
|
+
def direct_jobs_count
|
204
|
+
jobs_relation.failed? ? failed_jobs_count : pending_jobs_count
|
205
|
+
end
|
206
|
+
|
207
|
+
def pending_jobs_count
|
208
|
+
Resque.queue_sizes.inject(0) do |sum, (queue_name, queue_size)|
|
209
|
+
if jobs_relation.queue_name.blank? || jobs_relation.queue_name == queue_name
|
210
|
+
sum + queue_size
|
211
|
+
else
|
212
|
+
sum
|
213
|
+
end
|
214
|
+
end
|
215
|
+
end
|
216
|
+
|
217
|
+
def failed_jobs_count
|
218
|
+
Resque.data_store.num_failed
|
219
|
+
end
|
220
|
+
|
221
|
+
def count_fetched_jobs
|
222
|
+
all.size
|
223
|
+
end
|
224
|
+
|
225
|
+
def queue_redis_key
|
226
|
+
jobs_relation.failed? ? "failed" : "queue:#{jobs_relation.queue_name}"
|
227
|
+
end
|
228
|
+
|
229
|
+
def clear_failed_queue
|
230
|
+
Resque::Failure.clear("failed")
|
231
|
+
end
|
232
|
+
|
233
|
+
def retry_jobs(jobs)
|
234
|
+
in_transactional_jobs_batches(jobs) do |jobs_batch|
|
235
|
+
jobs_batch.each { |job| retry_job(job) }
|
236
|
+
end
|
237
|
+
end
|
238
|
+
|
239
|
+
def in_transactional_jobs_batches(jobs)
|
240
|
+
jobs.each_slice(MAX_REDIS_TRANSACTION_SIZE) do |jobs_batch|
|
241
|
+
redis.multi do |multi|
|
242
|
+
yield jobs_batch
|
243
|
+
end
|
244
|
+
end
|
245
|
+
end
|
246
|
+
|
247
|
+
def use_batches?
|
248
|
+
!jobs_relation.limit_value_provided? && jobs_relation.count > default_page_size
|
249
|
+
end
|
250
|
+
|
251
|
+
def retry_all_in_batches
|
252
|
+
jobs_relation.in_batches(order: :desc, &:retry_all)
|
253
|
+
end
|
254
|
+
|
255
|
+
def resque_requeue_and_discard(job)
|
256
|
+
requeue(job)
|
257
|
+
discard(job)
|
258
|
+
end
|
259
|
+
|
260
|
+
def requeue(job)
|
261
|
+
resque_job = job.raw_data
|
262
|
+
resque_job["retried_at"] = Time.now.strftime("%Y/%m/%d %H:%M:%S")
|
263
|
+
|
264
|
+
redis.lset(queue_redis_key, job.position, resque_job)
|
265
|
+
Resque::Job.create(resque_job["queue"], resque_job["payload"]["class"], *resque_job["payload"]["args"])
|
266
|
+
rescue Redis::CommandError => error
|
267
|
+
handle_resque_job_error(job, error)
|
268
|
+
end
|
269
|
+
|
270
|
+
def discard_all_one_by_one
|
271
|
+
if use_batches?
|
272
|
+
discard_all_in_batches
|
273
|
+
else
|
274
|
+
discard_jobs(jobs_relation.to_a.reverse)
|
275
|
+
end
|
276
|
+
end
|
277
|
+
|
278
|
+
def discard_jobs(jobs)
|
279
|
+
in_transactional_jobs_batches(jobs) do |jobs_batch|
|
280
|
+
jobs_batch.each { |job| discard(job) }
|
281
|
+
end
|
282
|
+
end
|
283
|
+
|
284
|
+
def discard_all_in_batches
|
285
|
+
jobs_relation.in_batches(order: :desc, &:discard_all)
|
286
|
+
end
|
287
|
+
|
288
|
+
def jobs_by_id
|
289
|
+
@jobs_by_id ||= all.index_by(&:job_id)
|
290
|
+
end
|
291
|
+
|
292
|
+
def handle_resque_job_error(job, error)
|
293
|
+
if error.message =~/no such key/i
|
294
|
+
raise ActiveJob::Errors::JobNotFoundError.new(job, jobs_relation)
|
295
|
+
else
|
296
|
+
raise error
|
297
|
+
end
|
298
|
+
end
|
299
|
+
end
|
300
|
+
end
|