inst-jobs 2.2.1 → 2.4.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
- data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
- data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
- data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
- data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
- data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
- data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
- data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
- data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
- data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
- data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
- data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
- data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
- data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
- data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
- data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
- data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
- data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
- data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
- data/db/migrate/20210812210128_add_singleton_column.rb +203 -0
- data/exe/inst_jobs +3 -2
- data/lib/delayed/backend/active_record.rb +187 -159
- data/lib/delayed/backend/base.rb +80 -69
- data/lib/delayed/batch.rb +11 -9
- data/lib/delayed/cli.rb +98 -84
- data/lib/delayed/core_ext/kernel.rb +4 -2
- data/lib/delayed/daemon.rb +70 -74
- data/lib/delayed/job_tracking.rb +26 -25
- data/lib/delayed/lifecycle.rb +27 -24
- data/lib/delayed/log_tailer.rb +17 -17
- data/lib/delayed/logging.rb +13 -16
- data/lib/delayed/message_sending.rb +42 -51
- data/lib/delayed/performable_method.rb +5 -7
- data/lib/delayed/periodic.rb +66 -65
- data/lib/delayed/plugin.rb +2 -4
- data/lib/delayed/pool.rb +198 -192
- data/lib/delayed/server/helpers.rb +6 -6
- data/lib/delayed/server.rb +51 -54
- data/lib/delayed/settings.rb +93 -81
- data/lib/delayed/testing.rb +21 -22
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/work_queue/in_process.rb +21 -17
- data/lib/delayed/work_queue/parent_process/client.rb +55 -53
- data/lib/delayed/work_queue/parent_process/server.rb +215 -209
- data/lib/delayed/work_queue/parent_process.rb +52 -53
- data/lib/delayed/worker/consul_health_check.rb +21 -19
- data/lib/delayed/worker/health_check.rb +21 -12
- data/lib/delayed/worker/null_health_check.rb +3 -1
- data/lib/delayed/worker/process_helper.rb +8 -9
- data/lib/delayed/worker.rb +271 -261
- data/lib/delayed/yaml_extensions.rb +12 -10
- data/lib/delayed_job.rb +37 -37
- data/lib/inst-jobs.rb +1 -1
- data/spec/active_record_job_spec.rb +142 -138
- data/spec/delayed/cli_spec.rb +7 -7
- data/spec/delayed/daemon_spec.rb +8 -8
- data/spec/delayed/message_sending_spec.rb +8 -9
- data/spec/delayed/periodic_spec.rb +13 -12
- data/spec/delayed/server_spec.rb +38 -38
- data/spec/delayed/settings_spec.rb +26 -25
- data/spec/delayed/work_queue/in_process_spec.rb +7 -7
- data/spec/delayed/work_queue/parent_process/client_spec.rb +16 -12
- data/spec/delayed/work_queue/parent_process/server_spec.rb +43 -40
- data/spec/delayed/work_queue/parent_process_spec.rb +21 -21
- data/spec/delayed/worker/consul_health_check_spec.rb +22 -22
- data/spec/delayed/worker/health_check_spec.rb +51 -49
- data/spec/delayed/worker_spec.rb +28 -25
- data/spec/gemfiles/52.gemfile +5 -3
- data/spec/gemfiles/52.gemfile.lock +240 -0
- data/spec/gemfiles/60.gemfile +5 -3
- data/spec/gemfiles/60.gemfile.lock +246 -0
- data/spec/gemfiles/61.gemfile +9 -0
- data/spec/sample_jobs.rb +45 -15
- data/spec/shared/delayed_batch.rb +74 -67
- data/spec/shared/delayed_method.rb +143 -102
- data/spec/shared/performable_method.rb +39 -38
- data/spec/shared/shared_backend.rb +537 -437
- data/spec/shared/testing.rb +14 -14
- data/spec/shared/worker.rb +156 -148
- data/spec/shared_jobs_specs.rb +13 -13
- data/spec/spec_helper.rb +43 -51
- metadata +101 -70
- data/lib/delayed/backend/redis/bulk_update.lua +0 -50
- data/lib/delayed/backend/redis/destroy_job.lua +0 -2
- data/lib/delayed/backend/redis/enqueue.lua +0 -29
- data/lib/delayed/backend/redis/fail_job.lua +0 -5
- data/lib/delayed/backend/redis/find_available.lua +0 -3
- data/lib/delayed/backend/redis/functions.rb +0 -59
- data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
- data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
- data/lib/delayed/backend/redis/job.rb +0 -535
- data/lib/delayed/backend/redis/set_running.lua +0 -5
- data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
- data/spec/gemfiles/42.gemfile +0 -7
- data/spec/gemfiles/50.gemfile +0 -7
- data/spec/gemfiles/51.gemfile +0 -7
- data/spec/redis_job_spec.rb +0 -148
data/lib/delayed/backend/base.rb
CHANGED
@@ -12,7 +12,7 @@ module Delayed
|
|
12
12
|
end
|
13
13
|
|
14
14
|
module Base
|
15
|
-
ON_HOLD_LOCKED_BY =
|
15
|
+
ON_HOLD_LOCKED_BY = "on hold"
|
16
16
|
ON_HOLD_COUNT = 50
|
17
17
|
|
18
18
|
def self.included(base)
|
@@ -22,9 +22,7 @@ module Delayed
|
|
22
22
|
end
|
23
23
|
|
24
24
|
module ClassMethods
|
25
|
-
attr_accessor :batches
|
26
|
-
attr_accessor :batch_enqueue_args
|
27
|
-
attr_accessor :default_priority
|
25
|
+
attr_accessor :batches, :batch_enqueue_args, :default_priority
|
28
26
|
|
29
27
|
# Add a job to the queue
|
30
28
|
# The first argument should be an object that respond_to?(:perform)
|
@@ -32,29 +30,37 @@ module Delayed
|
|
32
30
|
# :priority, :run_at, :queue, :strand, :singleton
|
33
31
|
# Example: Delayed::Job.enqueue(object, priority: 0, run_at: time, queue: queue)
|
34
32
|
def enqueue(object,
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
33
|
+
priority: default_priority,
|
34
|
+
run_at: nil,
|
35
|
+
expires_at: nil,
|
36
|
+
queue: Delayed::Settings.queue,
|
37
|
+
strand: nil,
|
38
|
+
singleton: nil,
|
39
|
+
n_strand: nil,
|
40
|
+
max_attempts: Delayed::Settings.max_attempts,
|
41
|
+
**kwargs)
|
44
42
|
|
45
43
|
unless object.respond_to?(:perform)
|
46
|
-
raise ArgumentError,
|
44
|
+
raise ArgumentError, "Cannot enqueue items which do not respond to perform"
|
47
45
|
end
|
48
46
|
|
47
|
+
strand ||= singleton if Settings.infer_strand_from_singleton
|
48
|
+
|
49
49
|
kwargs = Settings.default_job_options.merge(kwargs)
|
50
50
|
kwargs[:payload_object] = object
|
51
51
|
kwargs[:priority] = priority
|
52
52
|
kwargs[:run_at] = run_at if run_at
|
53
53
|
kwargs[:strand] = strand
|
54
54
|
kwargs[:max_attempts] = max_attempts
|
55
|
-
|
55
|
+
if defined?(Marginalia) && Marginalia::Comment.components
|
56
|
+
kwargs[:source] =
|
57
|
+
Marginalia::Comment.construct_comment
|
58
|
+
end
|
56
59
|
kwargs[:expires_at] = expires_at
|
57
60
|
kwargs[:queue] = queue
|
61
|
+
kwargs[:singleton] = singleton
|
62
|
+
|
63
|
+
raise ArgumentError, "Only one of strand or n_strand can be used" if strand && n_strand
|
58
64
|
|
59
65
|
# If two parameters are given to n_strand, the first param is used
|
60
66
|
# as the strand name for looking up the Setting, while the second
|
@@ -80,18 +86,15 @@ module Delayed
|
|
80
86
|
end
|
81
87
|
|
82
88
|
if singleton
|
83
|
-
|
84
|
-
job = self.create_singleton(**kwargs)
|
89
|
+
job = create(**kwargs)
|
85
90
|
elsif batches && strand.nil? && run_at.nil?
|
86
91
|
batch_enqueue_args = kwargs.slice(*self.batch_enqueue_args)
|
87
92
|
batches[batch_enqueue_args] << kwargs
|
88
93
|
return true
|
89
94
|
else
|
90
|
-
if kwargs[:on_conflict]
|
91
|
-
|
92
|
-
|
93
|
-
end
|
94
|
-
job = self.create(**kwargs)
|
95
|
+
raise ArgumentError, "on_conflict can only be provided with singleton" if kwargs[:on_conflict]
|
96
|
+
|
97
|
+
job = create(**kwargs)
|
95
98
|
end
|
96
99
|
|
97
100
|
JobTracking.job_created(job)
|
@@ -122,10 +125,10 @@ module Delayed
|
|
122
125
|
|
123
126
|
def check_priorities(min_priority, max_priority)
|
124
127
|
if min_priority && min_priority < Delayed::MIN_PRIORITY
|
125
|
-
raise
|
128
|
+
raise ArgumentError, "min_priority #{min_priority} can't be less than #{Delayed::MIN_PRIORITY}"
|
126
129
|
end
|
127
|
-
if max_priority && max_priority > Delayed::MAX_PRIORITY
|
128
|
-
raise
|
130
|
+
if max_priority && max_priority > Delayed::MAX_PRIORITY # rubocop:disable Style/GuardClause
|
131
|
+
raise ArgumentError, "max_priority #{max_priority} can't be greater than #{Delayed::MAX_PRIORITY}"
|
129
132
|
end
|
130
133
|
end
|
131
134
|
|
@@ -138,13 +141,19 @@ module Delayed
|
|
138
141
|
|
139
142
|
def processes_locked_locally(name: nil)
|
140
143
|
name ||= Socket.gethostname rescue x
|
141
|
-
running_jobs.select
|
144
|
+
local_jobs = running_jobs.select do |job|
|
145
|
+
job.locked_by.start_with?("#{name}:")
|
146
|
+
end
|
147
|
+
local_jobs.map { |job| job.locked_by.split(":").last.to_i }
|
142
148
|
end
|
143
149
|
|
144
150
|
def unlock_orphaned_prefetched_jobs
|
145
151
|
horizon = db_time_now - Settings.parent_process[:prefetched_jobs_timeout] * 4
|
146
|
-
orphaned_jobs = running_jobs.select
|
152
|
+
orphaned_jobs = running_jobs.select do |job|
|
153
|
+
job.locked_by.start_with?("prefetch:") && job.locked_at < horizon
|
154
|
+
end
|
147
155
|
return 0 if orphaned_jobs.empty?
|
156
|
+
|
148
157
|
unlock(orphaned_jobs)
|
149
158
|
end
|
150
159
|
|
@@ -158,13 +167,14 @@ module Delayed
|
|
158
167
|
regex = Regexp.new("^#{Regexp.escape(name)}:#{pid_regex}$")
|
159
168
|
unlocked_jobs = 0
|
160
169
|
running = false if pid
|
161
|
-
|
170
|
+
running_jobs.each do |job|
|
162
171
|
next unless job.locked_by =~ regex
|
172
|
+
|
163
173
|
unless pid
|
164
174
|
job_pid = $1.to_i
|
165
175
|
running = Process.kill(0, job_pid) rescue false
|
166
176
|
end
|
167
|
-
|
177
|
+
unless running
|
168
178
|
unlocked_jobs += 1
|
169
179
|
job.reschedule("process died")
|
170
180
|
end
|
@@ -176,14 +186,14 @@ module Delayed
|
|
176
186
|
def failed?
|
177
187
|
failed_at
|
178
188
|
end
|
179
|
-
|
189
|
+
alias failed failed?
|
180
190
|
|
181
191
|
def expired?
|
182
192
|
expires_at && (self.class.db_time_now >= expires_at)
|
183
193
|
end
|
184
194
|
|
185
195
|
def inferred_max_attempts
|
186
|
-
|
196
|
+
max_attempts || Delayed::Settings.max_attempts
|
187
197
|
end
|
188
198
|
|
189
199
|
# Reschedule the job in the future (when a job fails).
|
@@ -191,22 +201,22 @@ module Delayed
|
|
191
201
|
def reschedule(error = nil, time = nil)
|
192
202
|
begin
|
193
203
|
obj = payload_object
|
194
|
-
return_code = obj.on_failure(error) if obj
|
204
|
+
return_code = obj.on_failure(error) if obj.respond_to?(:on_failure)
|
195
205
|
rescue
|
196
206
|
# don't allow a failed deserialization to prevent rescheduling
|
197
207
|
end
|
198
208
|
|
199
209
|
self.attempts += 1 unless return_code == :unlock
|
200
210
|
|
201
|
-
if self.attempts >=
|
211
|
+
if self.attempts >= inferred_max_attempts
|
202
212
|
permanent_failure error || "max attempts reached"
|
203
213
|
elsif expired?
|
204
214
|
permanent_failure error || "job has expired"
|
205
215
|
else
|
206
|
-
time ||=
|
216
|
+
time ||= reschedule_at
|
207
217
|
self.run_at = time
|
208
|
-
|
209
|
-
|
218
|
+
unlock
|
219
|
+
save!
|
210
220
|
end
|
211
221
|
end
|
212
222
|
|
@@ -214,26 +224,24 @@ module Delayed
|
|
214
224
|
begin
|
215
225
|
# notify the payload_object of a permanent failure
|
216
226
|
obj = payload_object
|
217
|
-
obj.on_permanent_failure(error) if obj
|
227
|
+
obj.on_permanent_failure(error) if obj.respond_to?(:on_permanent_failure)
|
218
228
|
rescue
|
219
229
|
# don't allow a failed deserialization to prevent destroying the job
|
220
230
|
end
|
221
231
|
|
222
232
|
# optionally destroy the object
|
223
233
|
destroy_self = true
|
224
|
-
if Delayed::Worker.on_max_failures
|
225
|
-
destroy_self = Delayed::Worker.on_max_failures.call(self, error)
|
226
|
-
end
|
234
|
+
destroy_self = Delayed::Worker.on_max_failures.call(self, error) if Delayed::Worker.on_max_failures
|
227
235
|
|
228
236
|
if destroy_self
|
229
|
-
|
237
|
+
destroy
|
230
238
|
else
|
231
|
-
|
239
|
+
fail!
|
232
240
|
end
|
233
241
|
end
|
234
242
|
|
235
243
|
def payload_object
|
236
|
-
@payload_object ||= deserialize(self[
|
244
|
+
@payload_object ||= deserialize(self["handler"].untaint)
|
237
245
|
end
|
238
246
|
|
239
247
|
def name
|
@@ -249,7 +257,7 @@ module Delayed
|
|
249
257
|
|
250
258
|
def full_name
|
251
259
|
obj = payload_object rescue nil
|
252
|
-
if obj
|
260
|
+
if obj.respond_to?(:full_name)
|
253
261
|
obj.full_name
|
254
262
|
else
|
255
263
|
name
|
@@ -258,14 +266,14 @@ module Delayed
|
|
258
266
|
|
259
267
|
def payload_object=(object)
|
260
268
|
@payload_object = object
|
261
|
-
self[
|
262
|
-
self[
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
+
self["handler"] = object.to_yaml
|
270
|
+
self["tag"] = if object.respond_to?(:tag)
|
271
|
+
object.tag
|
272
|
+
elsif object.is_a?(Module)
|
273
|
+
"#{object}.perform"
|
274
|
+
else
|
275
|
+
"#{object.class}#perform"
|
276
|
+
end
|
269
277
|
end
|
270
278
|
|
271
279
|
# Moved into its own method so that new_relic can trace it.
|
@@ -292,15 +300,16 @@ module Delayed
|
|
292
300
|
end
|
293
301
|
|
294
302
|
def locked?
|
295
|
-
!!(
|
303
|
+
!!(locked_at || locked_by)
|
296
304
|
end
|
297
305
|
|
298
306
|
def reschedule_at
|
299
|
-
new_time = self.class.db_time_now + (attempts
|
307
|
+
new_time = self.class.db_time_now + (attempts**4) + 5
|
300
308
|
begin
|
301
309
|
if payload_object.respond_to?(:reschedule_at)
|
302
310
|
new_time = payload_object.reschedule_at(
|
303
|
-
|
311
|
+
self.class.db_time_now, attempts
|
312
|
+
)
|
304
313
|
end
|
305
314
|
rescue
|
306
315
|
# TODO: just swallow errors from reschedule_at ?
|
@@ -312,25 +321,26 @@ module Delayed
|
|
312
321
|
self.locked_by = ON_HOLD_LOCKED_BY
|
313
322
|
self.locked_at = self.class.db_time_now
|
314
323
|
self.attempts = ON_HOLD_COUNT
|
315
|
-
|
324
|
+
save!
|
316
325
|
end
|
317
326
|
|
318
327
|
def unhold!
|
319
328
|
self.locked_by = nil
|
320
329
|
self.locked_at = nil
|
321
330
|
self.attempts = 0
|
322
|
-
self.run_at = [self.class.db_time_now,
|
331
|
+
self.run_at = [self.class.db_time_now, run_at].max
|
323
332
|
self.failed_at = nil
|
324
|
-
|
333
|
+
save!
|
325
334
|
end
|
326
335
|
|
327
336
|
def on_hold?
|
328
|
-
|
337
|
+
locked_by == "on hold" && locked_at && self.attempts == ON_HOLD_COUNT
|
329
338
|
end
|
330
339
|
|
331
|
-
|
340
|
+
private
|
332
341
|
|
333
|
-
|
342
|
+
PARSE_OBJECT_FROM_YAML = %r{!ruby/\w+:([^\s]+)}.freeze
|
343
|
+
private_constant :PARSE_OBJECT_FROM_YAML
|
334
344
|
|
335
345
|
def deserialize(source)
|
336
346
|
handler = nil
|
@@ -344,13 +354,13 @@ module Delayed
|
|
344
354
|
return handler if handler.respond_to?(:perform)
|
345
355
|
|
346
356
|
raise DeserializationError,
|
347
|
-
|
357
|
+
"Job failed to load: Unknown handler. Try to manually require the appropriate file."
|
348
358
|
rescue TypeError, LoadError, NameError => e
|
349
359
|
raise DeserializationError,
|
350
|
-
|
360
|
+
"Job failed to load: #{e.message}. Try to manually require the required file."
|
351
361
|
rescue Psych::SyntaxError => e
|
352
|
-
|
353
|
-
|
362
|
+
raise DeserializationError,
|
363
|
+
"YAML parsing error: #{e.message}. Probably not recoverable."
|
354
364
|
end
|
355
365
|
|
356
366
|
def _yaml_deserialize(source)
|
@@ -358,12 +368,13 @@ module Delayed
|
|
358
368
|
end
|
359
369
|
|
360
370
|
def attempt_to_load_from_source(source)
|
361
|
-
|
362
|
-
|
363
|
-
|
371
|
+
return unless (md = PARSE_OBJECT_FROM_YAML.match(source))
|
372
|
+
|
373
|
+
md[1].constantize
|
364
374
|
end
|
365
375
|
|
366
|
-
|
376
|
+
public
|
377
|
+
|
367
378
|
def initialize_defaults
|
368
379
|
self.queue ||= Delayed::Settings.queue
|
369
380
|
self.run_at ||= self.class.db_time_now
|
data/lib/delayed/batch.rb
CHANGED
@@ -2,11 +2,11 @@
|
|
2
2
|
|
3
3
|
module Delayed
|
4
4
|
module Batch
|
5
|
-
|
5
|
+
PerformableBatch = Struct.new(:mode, :items) do
|
6
6
|
def initialize(mode, items)
|
7
7
|
raise "unsupported mode" unless mode == :serial
|
8
|
-
|
9
|
-
|
8
|
+
|
9
|
+
super
|
10
10
|
end
|
11
11
|
|
12
12
|
def display_name
|
@@ -25,14 +25,16 @@ module Delayed
|
|
25
25
|
end
|
26
26
|
|
27
27
|
class << self
|
28
|
-
def serial_batch(opts = {})
|
29
|
-
prepare_batches(:serial, opts)
|
28
|
+
def serial_batch(opts = {}, &block)
|
29
|
+
prepare_batches(:serial, opts, &block)
|
30
30
|
end
|
31
31
|
|
32
32
|
private
|
33
|
+
|
33
34
|
def prepare_batches(mode, opts)
|
34
35
|
raise "nested batching is not supported" if Delayed::Job.batches
|
35
|
-
|
36
|
+
|
37
|
+
Delayed::Job.batches = Hash.new { |h, k| h[k] = Set.new }
|
36
38
|
batch_enqueue_args = [:queue]
|
37
39
|
batch_enqueue_args << :priority unless opts[:priority]
|
38
40
|
Delayed::Job.batch_enqueue_args = batch_enqueue_args
|
@@ -42,9 +44,9 @@ module Delayed
|
|
42
44
|
Delayed::Job.batches = nil
|
43
45
|
batch_args = opts.slice(:priority)
|
44
46
|
batches.each do |enqueue_args, batch|
|
45
|
-
if batch.size
|
46
|
-
|
47
|
-
|
47
|
+
next if batch.size.zero?
|
48
|
+
|
49
|
+
if batch.size == 1
|
48
50
|
args = batch.first.merge(batch_args)
|
49
51
|
payload_object = args.delete(:payload_object)
|
50
52
|
Delayed::Job.enqueue(payload_object, **args)
|
data/lib/delayed/cli.rb
CHANGED
@@ -1,111 +1,125 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require
|
3
|
+
require "optparse"
|
4
4
|
|
5
5
|
module Delayed
|
6
|
-
class CLI
|
7
|
-
|
8
|
-
|
9
|
-
|
6
|
+
class CLI
|
7
|
+
class << self
|
8
|
+
attr_accessor :instance
|
9
|
+
end
|
10
10
|
|
11
|
-
|
11
|
+
attr_reader :config
|
12
12
|
|
13
|
-
|
14
|
-
|
13
|
+
def initialize(args = ARGV)
|
14
|
+
self.class.instance = self
|
15
15
|
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
16
|
+
@args = args
|
17
|
+
# config that will be applied on Settings and passed to the created Pool
|
18
|
+
@config = {}
|
19
|
+
# CLI options that will be kept to this class
|
20
|
+
@options = {
|
21
|
+
config_file: Settings.default_worker_config_name,
|
22
|
+
pid_folder: Settings.expand_rails_path("tmp/pids"),
|
23
|
+
tail_logs: true # only in FG mode
|
24
|
+
}
|
25
|
+
end
|
26
26
|
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
27
|
+
def run
|
28
|
+
parse_cli_options!
|
29
|
+
load_and_apply_config!
|
30
|
+
|
31
|
+
command = @args.shift
|
32
|
+
case command
|
33
|
+
when "start"
|
34
|
+
exit 1 if daemon.status(print: :alive) == :running
|
35
|
+
daemon.daemonize!
|
36
|
+
start
|
37
|
+
when "stop"
|
38
|
+
daemon.stop(kill: @options[:kill])
|
39
|
+
when "run"
|
40
|
+
start
|
41
|
+
when "status"
|
42
|
+
if daemon.status
|
43
|
+
exit 0
|
44
|
+
else
|
45
|
+
exit 1
|
46
|
+
end
|
47
|
+
when "restart"
|
48
|
+
daemon.stop(kill: @options[:kill])
|
49
|
+
daemon.daemonize!
|
50
|
+
start
|
51
|
+
when nil
|
52
|
+
puts option_parser.to_s
|
44
53
|
else
|
45
|
-
|
54
|
+
raise("Unknown command: #{command.inspect}")
|
46
55
|
end
|
47
|
-
when 'restart'
|
48
|
-
daemon.stop(kill: @options[:kill])
|
49
|
-
daemon.daemonize!
|
50
|
-
start
|
51
|
-
when nil
|
52
|
-
puts option_parser.to_s
|
53
|
-
else
|
54
|
-
raise("Unknown command: #{command.inspect}")
|
55
56
|
end
|
56
|
-
end
|
57
57
|
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
58
|
+
def parse_cli_options!
|
59
|
+
option_parser.parse!(@args)
|
60
|
+
@options
|
61
|
+
end
|
62
62
|
|
63
|
-
|
63
|
+
protected
|
64
64
|
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
65
|
+
def load_and_apply_config!
|
66
|
+
@config = Settings.worker_config(@options[:config_file])
|
67
|
+
Settings.apply_worker_config!(@config)
|
68
|
+
end
|
69
69
|
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
70
|
+
def option_parser
|
71
|
+
@option_parser ||= OptionParser.new do |opts|
|
72
|
+
opts.banner = "Usage #{$0} <command> <options>"
|
73
|
+
opts.separator %(\nWhere <command> is one of:
|
74
74
|
start start the jobs daemon
|
75
75
|
stop stop the jobs daemon
|
76
76
|
run start and run in the foreground
|
77
77
|
restart stop and then start the jobs daemon
|
78
78
|
status show daemon status
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
79
|
+
)
|
80
|
+
|
81
|
+
opts.separator "\n<options>"
|
82
|
+
opts.on("-c", "--config [CONFIG_PATH]", "Use alternate config file (default #{@options[:config_file]})") do |c|
|
83
|
+
@options[:config_file] = c
|
84
|
+
end
|
85
|
+
opts.on("-p", "--pid [PID_PATH]",
|
86
|
+
"Use alternate folder for PID files (default #{@options[:pid_folder]})") do |p|
|
87
|
+
@options[:pid_folder] = p
|
88
|
+
end
|
89
|
+
opts.on("--no-tail", "Don't tail the logs (only affects non-daemon mode)") { @options[:tail_logs] = false }
|
90
|
+
opts.on("--with-prejudice", "When stopping, interrupt jobs in progress, instead of letting them drain") do
|
91
|
+
@options[:kill] ||= true
|
92
|
+
end
|
93
|
+
opts.on("--with-extreme-prejudice",
|
94
|
+
"When stopping, immediately kill jobs in progress, instead of letting them drain") do
|
95
|
+
@options[:kill] = 9
|
96
|
+
end
|
97
|
+
opts.on_tail("-h", "--help", "Show this message") do
|
98
|
+
puts opts
|
99
|
+
exit
|
100
|
+
end
|
101
|
+
end
|
88
102
|
end
|
89
|
-
end
|
90
103
|
|
91
|
-
|
92
|
-
|
93
|
-
|
104
|
+
def daemon
|
105
|
+
@daemon ||= Delayed::Daemon.new(@options[:pid_folder])
|
106
|
+
end
|
94
107
|
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
108
|
+
def start
|
109
|
+
load_rails
|
110
|
+
tail_rails_log unless daemon.daemonized?
|
111
|
+
Delayed::Pool.new(@config).start
|
112
|
+
end
|
100
113
|
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
114
|
+
def load_rails
|
115
|
+
require(Settings.expand_rails_path("config/environment.rb"))
|
116
|
+
Dir.chdir(Rails.root)
|
117
|
+
end
|
118
|
+
|
119
|
+
def tail_rails_log
|
120
|
+
return unless @options[:tail_logs]
|
105
121
|
|
106
|
-
|
107
|
-
|
108
|
-
Delayed::LogTailer.new.run
|
122
|
+
Delayed::LogTailer.new.run
|
123
|
+
end
|
109
124
|
end
|
110
125
|
end
|
111
|
-
end
|
@@ -1,9 +1,11 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
module Kernel
|
2
|
-
def sender(
|
4
|
+
def sender(idx = 0)
|
3
5
|
frame_self = nil
|
4
6
|
# 3. one for the block, one for this method, one for the method calling this
|
5
7
|
# method, and _then_ we get to the self for who sent the message we want
|
6
|
-
RubyVM::DebugInspector.open { |dc| frame_self = dc.frame_self(3 +
|
8
|
+
RubyVM::DebugInspector.open { |dc| frame_self = dc.frame_self(3 + idx) }
|
7
9
|
frame_self
|
8
10
|
end
|
9
11
|
end
|