inst-jobs 2.2.1 → 2.4.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (98) hide show
  1. checksums.yaml +4 -4
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
  3. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
  4. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
  5. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
  6. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
  7. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
  8. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
  9. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
  10. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
  11. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
  12. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
  13. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
  14. data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
  15. data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
  16. data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
  17. data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
  18. data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
  19. data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
  20. data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
  21. data/db/migrate/20210812210128_add_singleton_column.rb +203 -0
  22. data/exe/inst_jobs +3 -2
  23. data/lib/delayed/backend/active_record.rb +187 -159
  24. data/lib/delayed/backend/base.rb +80 -69
  25. data/lib/delayed/batch.rb +11 -9
  26. data/lib/delayed/cli.rb +98 -84
  27. data/lib/delayed/core_ext/kernel.rb +4 -2
  28. data/lib/delayed/daemon.rb +70 -74
  29. data/lib/delayed/job_tracking.rb +26 -25
  30. data/lib/delayed/lifecycle.rb +27 -24
  31. data/lib/delayed/log_tailer.rb +17 -17
  32. data/lib/delayed/logging.rb +13 -16
  33. data/lib/delayed/message_sending.rb +42 -51
  34. data/lib/delayed/performable_method.rb +5 -7
  35. data/lib/delayed/periodic.rb +66 -65
  36. data/lib/delayed/plugin.rb +2 -4
  37. data/lib/delayed/pool.rb +198 -192
  38. data/lib/delayed/server/helpers.rb +6 -6
  39. data/lib/delayed/server.rb +51 -54
  40. data/lib/delayed/settings.rb +93 -81
  41. data/lib/delayed/testing.rb +21 -22
  42. data/lib/delayed/version.rb +1 -1
  43. data/lib/delayed/work_queue/in_process.rb +21 -17
  44. data/lib/delayed/work_queue/parent_process/client.rb +55 -53
  45. data/lib/delayed/work_queue/parent_process/server.rb +215 -209
  46. data/lib/delayed/work_queue/parent_process.rb +52 -53
  47. data/lib/delayed/worker/consul_health_check.rb +21 -19
  48. data/lib/delayed/worker/health_check.rb +21 -12
  49. data/lib/delayed/worker/null_health_check.rb +3 -1
  50. data/lib/delayed/worker/process_helper.rb +8 -9
  51. data/lib/delayed/worker.rb +271 -261
  52. data/lib/delayed/yaml_extensions.rb +12 -10
  53. data/lib/delayed_job.rb +37 -37
  54. data/lib/inst-jobs.rb +1 -1
  55. data/spec/active_record_job_spec.rb +142 -138
  56. data/spec/delayed/cli_spec.rb +7 -7
  57. data/spec/delayed/daemon_spec.rb +8 -8
  58. data/spec/delayed/message_sending_spec.rb +8 -9
  59. data/spec/delayed/periodic_spec.rb +13 -12
  60. data/spec/delayed/server_spec.rb +38 -38
  61. data/spec/delayed/settings_spec.rb +26 -25
  62. data/spec/delayed/work_queue/in_process_spec.rb +7 -7
  63. data/spec/delayed/work_queue/parent_process/client_spec.rb +16 -12
  64. data/spec/delayed/work_queue/parent_process/server_spec.rb +43 -40
  65. data/spec/delayed/work_queue/parent_process_spec.rb +21 -21
  66. data/spec/delayed/worker/consul_health_check_spec.rb +22 -22
  67. data/spec/delayed/worker/health_check_spec.rb +51 -49
  68. data/spec/delayed/worker_spec.rb +28 -25
  69. data/spec/gemfiles/52.gemfile +5 -3
  70. data/spec/gemfiles/52.gemfile.lock +240 -0
  71. data/spec/gemfiles/60.gemfile +5 -3
  72. data/spec/gemfiles/60.gemfile.lock +246 -0
  73. data/spec/gemfiles/61.gemfile +9 -0
  74. data/spec/sample_jobs.rb +45 -15
  75. data/spec/shared/delayed_batch.rb +74 -67
  76. data/spec/shared/delayed_method.rb +143 -102
  77. data/spec/shared/performable_method.rb +39 -38
  78. data/spec/shared/shared_backend.rb +537 -437
  79. data/spec/shared/testing.rb +14 -14
  80. data/spec/shared/worker.rb +156 -148
  81. data/spec/shared_jobs_specs.rb +13 -13
  82. data/spec/spec_helper.rb +43 -51
  83. metadata +101 -70
  84. data/lib/delayed/backend/redis/bulk_update.lua +0 -50
  85. data/lib/delayed/backend/redis/destroy_job.lua +0 -2
  86. data/lib/delayed/backend/redis/enqueue.lua +0 -29
  87. data/lib/delayed/backend/redis/fail_job.lua +0 -5
  88. data/lib/delayed/backend/redis/find_available.lua +0 -3
  89. data/lib/delayed/backend/redis/functions.rb +0 -59
  90. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
  91. data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
  92. data/lib/delayed/backend/redis/job.rb +0 -535
  93. data/lib/delayed/backend/redis/set_running.lua +0 -5
  94. data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
  95. data/spec/gemfiles/42.gemfile +0 -7
  96. data/spec/gemfiles/50.gemfile +0 -7
  97. data/spec/gemfiles/51.gemfile +0 -7
  98. data/spec/redis_job_spec.rb +0 -148
@@ -12,7 +12,7 @@ module Delayed
12
12
  end
13
13
 
14
14
  module Base
15
- ON_HOLD_LOCKED_BY = 'on hold'
15
+ ON_HOLD_LOCKED_BY = "on hold"
16
16
  ON_HOLD_COUNT = 50
17
17
 
18
18
  def self.included(base)
@@ -22,9 +22,7 @@ module Delayed
22
22
  end
23
23
 
24
24
  module ClassMethods
25
- attr_accessor :batches
26
- attr_accessor :batch_enqueue_args
27
- attr_accessor :default_priority
25
+ attr_accessor :batches, :batch_enqueue_args, :default_priority
28
26
 
29
27
  # Add a job to the queue
30
28
  # The first argument should be an object that respond_to?(:perform)
@@ -32,29 +30,37 @@ module Delayed
32
30
  # :priority, :run_at, :queue, :strand, :singleton
33
31
  # Example: Delayed::Job.enqueue(object, priority: 0, run_at: time, queue: queue)
34
32
  def enqueue(object,
35
- priority: default_priority,
36
- run_at: nil,
37
- expires_at: nil,
38
- queue: Delayed::Settings.queue,
39
- strand: nil,
40
- singleton: nil,
41
- n_strand: nil,
42
- max_attempts: Delayed::Settings.max_attempts,
43
- **kwargs)
33
+ priority: default_priority,
34
+ run_at: nil,
35
+ expires_at: nil,
36
+ queue: Delayed::Settings.queue,
37
+ strand: nil,
38
+ singleton: nil,
39
+ n_strand: nil,
40
+ max_attempts: Delayed::Settings.max_attempts,
41
+ **kwargs)
44
42
 
45
43
  unless object.respond_to?(:perform)
46
- raise ArgumentError, 'Cannot enqueue items which do not respond to perform'
44
+ raise ArgumentError, "Cannot enqueue items which do not respond to perform"
47
45
  end
48
46
 
47
+ strand ||= singleton if Settings.infer_strand_from_singleton
48
+
49
49
  kwargs = Settings.default_job_options.merge(kwargs)
50
50
  kwargs[:payload_object] = object
51
51
  kwargs[:priority] = priority
52
52
  kwargs[:run_at] = run_at if run_at
53
53
  kwargs[:strand] = strand
54
54
  kwargs[:max_attempts] = max_attempts
55
- kwargs[:source] = Marginalia::Comment.construct_comment if defined?(Marginalia) && Marginalia::Comment.components
55
+ if defined?(Marginalia) && Marginalia::Comment.components
56
+ kwargs[:source] =
57
+ Marginalia::Comment.construct_comment
58
+ end
56
59
  kwargs[:expires_at] = expires_at
57
60
  kwargs[:queue] = queue
61
+ kwargs[:singleton] = singleton
62
+
63
+ raise ArgumentError, "Only one of strand or n_strand can be used" if strand && n_strand
58
64
 
59
65
  # If two parameters are given to n_strand, the first param is used
60
66
  # as the strand name for looking up the Setting, while the second
@@ -80,18 +86,15 @@ module Delayed
80
86
  end
81
87
 
82
88
  if singleton
83
- kwargs[:strand] = singleton
84
- job = self.create_singleton(**kwargs)
89
+ job = create(**kwargs)
85
90
  elsif batches && strand.nil? && run_at.nil?
86
91
  batch_enqueue_args = kwargs.slice(*self.batch_enqueue_args)
87
92
  batches[batch_enqueue_args] << kwargs
88
93
  return true
89
94
  else
90
- if kwargs[:on_conflict].present?
91
- Delayed::Logging.logger.warn("[DELAYED_JOB] WARNING: providing 'on_conflict' as an option to a non-singleton job will have no effect. Discarding.")
92
- kwargs.delete(:on_conflict)
93
- end
94
- job = self.create(**kwargs)
95
+ raise ArgumentError, "on_conflict can only be provided with singleton" if kwargs[:on_conflict]
96
+
97
+ job = create(**kwargs)
95
98
  end
96
99
 
97
100
  JobTracking.job_created(job)
@@ -122,10 +125,10 @@ module Delayed
122
125
 
123
126
  def check_priorities(min_priority, max_priority)
124
127
  if min_priority && min_priority < Delayed::MIN_PRIORITY
125
- raise(ArgumentError, "min_priority #{min_priority} can't be less than #{Delayed::MIN_PRIORITY}")
128
+ raise ArgumentError, "min_priority #{min_priority} can't be less than #{Delayed::MIN_PRIORITY}"
126
129
  end
127
- if max_priority && max_priority > Delayed::MAX_PRIORITY
128
- raise(ArgumentError, "max_priority #{max_priority} can't be greater than #{Delayed::MAX_PRIORITY}")
130
+ if max_priority && max_priority > Delayed::MAX_PRIORITY # rubocop:disable Style/GuardClause
131
+ raise ArgumentError, "max_priority #{max_priority} can't be greater than #{Delayed::MAX_PRIORITY}"
129
132
  end
130
133
  end
131
134
 
@@ -138,13 +141,19 @@ module Delayed
138
141
 
139
142
  def processes_locked_locally(name: nil)
140
143
  name ||= Socket.gethostname rescue x
141
- running_jobs.select{|job| job.locked_by.start_with?("#{name}:")}.map{|job| job.locked_by.split(':').last.to_i}
144
+ local_jobs = running_jobs.select do |job|
145
+ job.locked_by.start_with?("#{name}:")
146
+ end
147
+ local_jobs.map { |job| job.locked_by.split(":").last.to_i }
142
148
  end
143
149
 
144
150
  def unlock_orphaned_prefetched_jobs
145
151
  horizon = db_time_now - Settings.parent_process[:prefetched_jobs_timeout] * 4
146
- orphaned_jobs = running_jobs.select { |job| job.locked_by.start_with?('prefetch:') && job.locked_at < horizon }
152
+ orphaned_jobs = running_jobs.select do |job|
153
+ job.locked_by.start_with?("prefetch:") && job.locked_at < horizon
154
+ end
147
155
  return 0 if orphaned_jobs.empty?
156
+
148
157
  unlock(orphaned_jobs)
149
158
  end
150
159
 
@@ -158,13 +167,14 @@ module Delayed
158
167
  regex = Regexp.new("^#{Regexp.escape(name)}:#{pid_regex}$")
159
168
  unlocked_jobs = 0
160
169
  running = false if pid
161
- self.running_jobs.each do |job|
170
+ running_jobs.each do |job|
162
171
  next unless job.locked_by =~ regex
172
+
163
173
  unless pid
164
174
  job_pid = $1.to_i
165
175
  running = Process.kill(0, job_pid) rescue false
166
176
  end
167
- if !running
177
+ unless running
168
178
  unlocked_jobs += 1
169
179
  job.reschedule("process died")
170
180
  end
@@ -176,14 +186,14 @@ module Delayed
176
186
  def failed?
177
187
  failed_at
178
188
  end
179
- alias_method :failed, :failed?
189
+ alias failed failed?
180
190
 
181
191
  def expired?
182
192
  expires_at && (self.class.db_time_now >= expires_at)
183
193
  end
184
194
 
185
195
  def inferred_max_attempts
186
- self.max_attempts || Delayed::Settings.max_attempts
196
+ max_attempts || Delayed::Settings.max_attempts
187
197
  end
188
198
 
189
199
  # Reschedule the job in the future (when a job fails).
@@ -191,22 +201,22 @@ module Delayed
191
201
  def reschedule(error = nil, time = nil)
192
202
  begin
193
203
  obj = payload_object
194
- return_code = obj.on_failure(error) if obj && obj.respond_to?(:on_failure)
204
+ return_code = obj.on_failure(error) if obj.respond_to?(:on_failure)
195
205
  rescue
196
206
  # don't allow a failed deserialization to prevent rescheduling
197
207
  end
198
208
 
199
209
  self.attempts += 1 unless return_code == :unlock
200
210
 
201
- if self.attempts >= self.inferred_max_attempts
211
+ if self.attempts >= inferred_max_attempts
202
212
  permanent_failure error || "max attempts reached"
203
213
  elsif expired?
204
214
  permanent_failure error || "job has expired"
205
215
  else
206
- time ||= self.reschedule_at
216
+ time ||= reschedule_at
207
217
  self.run_at = time
208
- self.unlock
209
- self.save!
218
+ unlock
219
+ save!
210
220
  end
211
221
  end
212
222
 
@@ -214,26 +224,24 @@ module Delayed
214
224
  begin
215
225
  # notify the payload_object of a permanent failure
216
226
  obj = payload_object
217
- obj.on_permanent_failure(error) if obj && obj.respond_to?(:on_permanent_failure)
227
+ obj.on_permanent_failure(error) if obj.respond_to?(:on_permanent_failure)
218
228
  rescue
219
229
  # don't allow a failed deserialization to prevent destroying the job
220
230
  end
221
231
 
222
232
  # optionally destroy the object
223
233
  destroy_self = true
224
- if Delayed::Worker.on_max_failures
225
- destroy_self = Delayed::Worker.on_max_failures.call(self, error)
226
- end
234
+ destroy_self = Delayed::Worker.on_max_failures.call(self, error) if Delayed::Worker.on_max_failures
227
235
 
228
236
  if destroy_self
229
- self.destroy
237
+ destroy
230
238
  else
231
- self.fail!
239
+ fail!
232
240
  end
233
241
  end
234
242
 
235
243
  def payload_object
236
- @payload_object ||= deserialize(self['handler'].untaint)
244
+ @payload_object ||= deserialize(self["handler"].untaint)
237
245
  end
238
246
 
239
247
  def name
@@ -249,7 +257,7 @@ module Delayed
249
257
 
250
258
  def full_name
251
259
  obj = payload_object rescue nil
252
- if obj && obj.respond_to?(:full_name)
260
+ if obj.respond_to?(:full_name)
253
261
  obj.full_name
254
262
  else
255
263
  name
@@ -258,14 +266,14 @@ module Delayed
258
266
 
259
267
  def payload_object=(object)
260
268
  @payload_object = object
261
- self['handler'] = object.to_yaml
262
- self['tag'] = if object.respond_to?(:tag)
263
- object.tag
264
- elsif object.is_a?(Module)
265
- "#{object}.perform"
266
- else
267
- "#{object.class}#perform"
268
- end
269
+ self["handler"] = object.to_yaml
270
+ self["tag"] = if object.respond_to?(:tag)
271
+ object.tag
272
+ elsif object.is_a?(Module)
273
+ "#{object}.perform"
274
+ else
275
+ "#{object.class}#perform"
276
+ end
269
277
  end
270
278
 
271
279
  # Moved into its own method so that new_relic can trace it.
@@ -292,15 +300,16 @@ module Delayed
292
300
  end
293
301
 
294
302
  def locked?
295
- !!(self.locked_at || self.locked_by)
303
+ !!(locked_at || locked_by)
296
304
  end
297
305
 
298
306
  def reschedule_at
299
- new_time = self.class.db_time_now + (attempts ** 4) + 5
307
+ new_time = self.class.db_time_now + (attempts**4) + 5
300
308
  begin
301
309
  if payload_object.respond_to?(:reschedule_at)
302
310
  new_time = payload_object.reschedule_at(
303
- self.class.db_time_now, attempts)
311
+ self.class.db_time_now, attempts
312
+ )
304
313
  end
305
314
  rescue
306
315
  # TODO: just swallow errors from reschedule_at ?
@@ -312,25 +321,26 @@ module Delayed
312
321
  self.locked_by = ON_HOLD_LOCKED_BY
313
322
  self.locked_at = self.class.db_time_now
314
323
  self.attempts = ON_HOLD_COUNT
315
- self.save!
324
+ save!
316
325
  end
317
326
 
318
327
  def unhold!
319
328
  self.locked_by = nil
320
329
  self.locked_at = nil
321
330
  self.attempts = 0
322
- self.run_at = [self.class.db_time_now, self.run_at].max
331
+ self.run_at = [self.class.db_time_now, run_at].max
323
332
  self.failed_at = nil
324
- self.save!
333
+ save!
325
334
  end
326
335
 
327
336
  def on_hold?
328
- self.locked_by == 'on hold' && self.locked_at && self.attempts == ON_HOLD_COUNT
337
+ locked_by == "on hold" && locked_at && self.attempts == ON_HOLD_COUNT
329
338
  end
330
339
 
331
- private
340
+ private
332
341
 
333
- ParseObjectFromYaml = /\!ruby\/\w+\:([^\s]+)/
342
+ PARSE_OBJECT_FROM_YAML = %r{!ruby/\w+:([^\s]+)}.freeze
343
+ private_constant :PARSE_OBJECT_FROM_YAML
334
344
 
335
345
  def deserialize(source)
336
346
  handler = nil
@@ -344,13 +354,13 @@ module Delayed
344
354
  return handler if handler.respond_to?(:perform)
345
355
 
346
356
  raise DeserializationError,
347
- 'Job failed to load: Unknown handler. Try to manually require the appropriate file.'
357
+ "Job failed to load: Unknown handler. Try to manually require the appropriate file."
348
358
  rescue TypeError, LoadError, NameError => e
349
359
  raise DeserializationError,
350
- "Job failed to load: #{e.message}. Try to manually require the required file."
360
+ "Job failed to load: #{e.message}. Try to manually require the required file."
351
361
  rescue Psych::SyntaxError => e
352
- raise DeserializationError,
353
- "YAML parsing error: #{e.message}. Probably not recoverable."
362
+ raise DeserializationError,
363
+ "YAML parsing error: #{e.message}. Probably not recoverable."
354
364
  end
355
365
 
356
366
  def _yaml_deserialize(source)
@@ -358,12 +368,13 @@ module Delayed
358
368
  end
359
369
 
360
370
  def attempt_to_load_from_source(source)
361
- if md = ParseObjectFromYaml.match(source)
362
- md[1].constantize
363
- end
371
+ return unless (md = PARSE_OBJECT_FROM_YAML.match(source))
372
+
373
+ md[1].constantize
364
374
  end
365
375
 
366
- public
376
+ public
377
+
367
378
  def initialize_defaults
368
379
  self.queue ||= Delayed::Settings.queue
369
380
  self.run_at ||= self.class.db_time_now
data/lib/delayed/batch.rb CHANGED
@@ -2,11 +2,11 @@
2
2
 
3
3
  module Delayed
4
4
  module Batch
5
- class PerformableBatch < Struct.new(:mode, :items)
5
+ PerformableBatch = Struct.new(:mode, :items) do
6
6
  def initialize(mode, items)
7
7
  raise "unsupported mode" unless mode == :serial
8
- self.mode = mode
9
- self.items = items
8
+
9
+ super
10
10
  end
11
11
 
12
12
  def display_name
@@ -25,14 +25,16 @@ module Delayed
25
25
  end
26
26
 
27
27
  class << self
28
- def serial_batch(opts = {})
29
- prepare_batches(:serial, opts){ yield }
28
+ def serial_batch(opts = {}, &block)
29
+ prepare_batches(:serial, opts, &block)
30
30
  end
31
31
 
32
32
  private
33
+
33
34
  def prepare_batches(mode, opts)
34
35
  raise "nested batching is not supported" if Delayed::Job.batches
35
- Delayed::Job.batches = Hash.new { |h,k| h[k] = Set.new }
36
+
37
+ Delayed::Job.batches = Hash.new { |h, k| h[k] = Set.new }
36
38
  batch_enqueue_args = [:queue]
37
39
  batch_enqueue_args << :priority unless opts[:priority]
38
40
  Delayed::Job.batch_enqueue_args = batch_enqueue_args
@@ -42,9 +44,9 @@ module Delayed
42
44
  Delayed::Job.batches = nil
43
45
  batch_args = opts.slice(:priority)
44
46
  batches.each do |enqueue_args, batch|
45
- if batch.size == 0
46
- next
47
- elsif batch.size == 1
47
+ next if batch.size.zero?
48
+
49
+ if batch.size == 1
48
50
  args = batch.first.merge(batch_args)
49
51
  payload_object = args.delete(:payload_object)
50
52
  Delayed::Job.enqueue(payload_object, **args)
data/lib/delayed/cli.rb CHANGED
@@ -1,111 +1,125 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'optparse'
3
+ require "optparse"
4
4
 
5
5
  module Delayed
6
- class CLI
7
- class << self
8
- attr_accessor :instance
9
- end
6
+ class CLI
7
+ class << self
8
+ attr_accessor :instance
9
+ end
10
10
 
11
- attr_reader :config
11
+ attr_reader :config
12
12
 
13
- def initialize(args = ARGV)
14
- self.class.instance = self
13
+ def initialize(args = ARGV)
14
+ self.class.instance = self
15
15
 
16
- @args = args
17
- # config that will be applied on Settings and passed to the created Pool
18
- @config = {}
19
- # CLI options that will be kept to this class
20
- @options = {
21
- :config_file => Settings.default_worker_config_name,
22
- :pid_folder => Settings.expand_rails_path("tmp/pids"),
23
- :tail_logs => true, # only in FG mode
24
- }
25
- end
16
+ @args = args
17
+ # config that will be applied on Settings and passed to the created Pool
18
+ @config = {}
19
+ # CLI options that will be kept to this class
20
+ @options = {
21
+ config_file: Settings.default_worker_config_name,
22
+ pid_folder: Settings.expand_rails_path("tmp/pids"),
23
+ tail_logs: true # only in FG mode
24
+ }
25
+ end
26
26
 
27
- def run
28
- parse_cli_options!
29
- load_and_apply_config!
30
-
31
- command = @args.shift
32
- case command
33
- when 'start'
34
- exit 1 if daemon.status(print: :alive) == :running
35
- daemon.daemonize!
36
- start
37
- when 'stop'
38
- daemon.stop(kill: @options[:kill])
39
- when 'run'
40
- start
41
- when 'status'
42
- if daemon.status
43
- exit 0
27
+ def run
28
+ parse_cli_options!
29
+ load_and_apply_config!
30
+
31
+ command = @args.shift
32
+ case command
33
+ when "start"
34
+ exit 1 if daemon.status(print: :alive) == :running
35
+ daemon.daemonize!
36
+ start
37
+ when "stop"
38
+ daemon.stop(kill: @options[:kill])
39
+ when "run"
40
+ start
41
+ when "status"
42
+ if daemon.status
43
+ exit 0
44
+ else
45
+ exit 1
46
+ end
47
+ when "restart"
48
+ daemon.stop(kill: @options[:kill])
49
+ daemon.daemonize!
50
+ start
51
+ when nil
52
+ puts option_parser.to_s
44
53
  else
45
- exit 1
54
+ raise("Unknown command: #{command.inspect}")
46
55
  end
47
- when 'restart'
48
- daemon.stop(kill: @options[:kill])
49
- daemon.daemonize!
50
- start
51
- when nil
52
- puts option_parser.to_s
53
- else
54
- raise("Unknown command: #{command.inspect}")
55
56
  end
56
- end
57
57
 
58
- def parse_cli_options!
59
- option_parser.parse!(@args)
60
- @options
61
- end
58
+ def parse_cli_options!
59
+ option_parser.parse!(@args)
60
+ @options
61
+ end
62
62
 
63
- protected
63
+ protected
64
64
 
65
- def load_and_apply_config!
66
- @config = Settings.worker_config(@options[:config_file])
67
- Settings.apply_worker_config!(@config)
68
- end
65
+ def load_and_apply_config!
66
+ @config = Settings.worker_config(@options[:config_file])
67
+ Settings.apply_worker_config!(@config)
68
+ end
69
69
 
70
- def option_parser
71
- @option_parser ||= OptionParser.new do |opts|
72
- opts.banner = "Usage #{$0} <command> <options>"
73
- opts.separator %{\nWhere <command> is one of:
70
+ def option_parser
71
+ @option_parser ||= OptionParser.new do |opts|
72
+ opts.banner = "Usage #{$0} <command> <options>"
73
+ opts.separator %(\nWhere <command> is one of:
74
74
  start start the jobs daemon
75
75
  stop stop the jobs daemon
76
76
  run start and run in the foreground
77
77
  restart stop and then start the jobs daemon
78
78
  status show daemon status
79
- }
80
-
81
- opts.separator "\n<options>"
82
- opts.on("-c", "--config [CONFIG_PATH]", "Use alternate config file (default #{@options[:config_file]})") { |c| @options[:config_file] = c }
83
- opts.on("-p", "--pid", "Use alternate folder for PID files (default #{@options[:pid_folder]})") { |p| @options[:pid_folder] = p }
84
- opts.on("--no-tail", "Don't tail the logs (only affects non-daemon mode)") { @options[:tail_logs] = false }
85
- opts.on("--with-prejudice", "When stopping, interrupt jobs in progress, instead of letting them drain") { @options[:kill] ||= true }
86
- opts.on("--with-extreme-prejudice", "When stopping, immediately kill jobs in progress, instead of letting them drain") { @options[:kill] = 9 }
87
- opts.on_tail("-h", "--help", "Show this message") { puts opts; exit }
79
+ )
80
+
81
+ opts.separator "\n<options>"
82
+ opts.on("-c", "--config [CONFIG_PATH]", "Use alternate config file (default #{@options[:config_file]})") do |c|
83
+ @options[:config_file] = c
84
+ end
85
+ opts.on("-p", "--pid [PID_PATH]",
86
+ "Use alternate folder for PID files (default #{@options[:pid_folder]})") do |p|
87
+ @options[:pid_folder] = p
88
+ end
89
+ opts.on("--no-tail", "Don't tail the logs (only affects non-daemon mode)") { @options[:tail_logs] = false }
90
+ opts.on("--with-prejudice", "When stopping, interrupt jobs in progress, instead of letting them drain") do
91
+ @options[:kill] ||= true
92
+ end
93
+ opts.on("--with-extreme-prejudice",
94
+ "When stopping, immediately kill jobs in progress, instead of letting them drain") do
95
+ @options[:kill] = 9
96
+ end
97
+ opts.on_tail("-h", "--help", "Show this message") do
98
+ puts opts
99
+ exit
100
+ end
101
+ end
88
102
  end
89
- end
90
103
 
91
- def daemon
92
- @daemon ||= Delayed::Daemon.new(@options[:pid_folder])
93
- end
104
+ def daemon
105
+ @daemon ||= Delayed::Daemon.new(@options[:pid_folder])
106
+ end
94
107
 
95
- def start
96
- load_rails
97
- tail_rails_log unless daemon.daemonized?
98
- Delayed::Pool.new(@config).start
99
- end
108
+ def start
109
+ load_rails
110
+ tail_rails_log unless daemon.daemonized?
111
+ Delayed::Pool.new(@config).start
112
+ end
100
113
 
101
- def load_rails
102
- require(Settings.expand_rails_path("config/environment.rb"))
103
- Dir.chdir(Rails.root)
104
- end
114
+ def load_rails
115
+ require(Settings.expand_rails_path("config/environment.rb"))
116
+ Dir.chdir(Rails.root)
117
+ end
118
+
119
+ def tail_rails_log
120
+ return unless @options[:tail_logs]
105
121
 
106
- def tail_rails_log
107
- return if !@options[:tail_logs]
108
- Delayed::LogTailer.new.run
122
+ Delayed::LogTailer.new.run
123
+ end
109
124
  end
110
125
  end
111
- end
@@ -1,9 +1,11 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Kernel
2
- def sender(i = 0)
4
+ def sender(idx = 0)
3
5
  frame_self = nil
4
6
  # 3. one for the block, one for this method, one for the method calling this
5
7
  # method, and _then_ we get to the self for who sent the message we want
6
- RubyVM::DebugInspector.open { |dc| frame_self = dc.frame_self(3 + i) }
8
+ RubyVM::DebugInspector.open { |dc| frame_self = dc.frame_self(3 + idx) }
7
9
  frame_self
8
10
  end
9
11
  end