inst-jobs 2.3.2 → 2.4.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (95) hide show
  1. checksums.yaml +4 -4
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
  3. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
  4. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
  5. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
  6. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
  7. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
  8. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
  9. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
  10. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
  11. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
  12. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
  13. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
  14. data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
  15. data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
  16. data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
  17. data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
  18. data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
  19. data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
  20. data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
  21. data/db/migrate/20210812210128_add_singleton_column.rb +203 -0
  22. data/exe/inst_jobs +3 -2
  23. data/lib/delayed/backend/active_record.rb +182 -148
  24. data/lib/delayed/backend/base.rb +79 -74
  25. data/lib/delayed/batch.rb +11 -9
  26. data/lib/delayed/cli.rb +98 -84
  27. data/lib/delayed/core_ext/kernel.rb +4 -2
  28. data/lib/delayed/daemon.rb +70 -74
  29. data/lib/delayed/job_tracking.rb +26 -25
  30. data/lib/delayed/lifecycle.rb +27 -24
  31. data/lib/delayed/log_tailer.rb +17 -17
  32. data/lib/delayed/logging.rb +13 -16
  33. data/lib/delayed/message_sending.rb +42 -51
  34. data/lib/delayed/performable_method.rb +5 -7
  35. data/lib/delayed/periodic.rb +66 -65
  36. data/lib/delayed/plugin.rb +2 -4
  37. data/lib/delayed/pool.rb +198 -193
  38. data/lib/delayed/server/helpers.rb +6 -6
  39. data/lib/delayed/server.rb +51 -54
  40. data/lib/delayed/settings.rb +93 -81
  41. data/lib/delayed/testing.rb +21 -22
  42. data/lib/delayed/version.rb +1 -1
  43. data/lib/delayed/work_queue/in_process.rb +21 -18
  44. data/lib/delayed/work_queue/parent_process/client.rb +54 -55
  45. data/lib/delayed/work_queue/parent_process/server.rb +215 -209
  46. data/lib/delayed/work_queue/parent_process.rb +52 -53
  47. data/lib/delayed/worker/consul_health_check.rb +21 -19
  48. data/lib/delayed/worker/health_check.rb +21 -12
  49. data/lib/delayed/worker/null_health_check.rb +3 -1
  50. data/lib/delayed/worker/process_helper.rb +8 -9
  51. data/lib/delayed/worker.rb +271 -265
  52. data/lib/delayed/yaml_extensions.rb +12 -10
  53. data/lib/delayed_job.rb +37 -38
  54. data/lib/inst-jobs.rb +1 -1
  55. data/spec/active_record_job_spec.rb +128 -135
  56. data/spec/delayed/cli_spec.rb +7 -7
  57. data/spec/delayed/daemon_spec.rb +8 -8
  58. data/spec/delayed/message_sending_spec.rb +16 -9
  59. data/spec/delayed/periodic_spec.rb +13 -12
  60. data/spec/delayed/server_spec.rb +38 -38
  61. data/spec/delayed/settings_spec.rb +26 -25
  62. data/spec/delayed/work_queue/in_process_spec.rb +7 -7
  63. data/spec/delayed/work_queue/parent_process/client_spec.rb +15 -11
  64. data/spec/delayed/work_queue/parent_process/server_spec.rb +43 -40
  65. data/spec/delayed/work_queue/parent_process_spec.rb +21 -21
  66. data/spec/delayed/worker/consul_health_check_spec.rb +22 -22
  67. data/spec/delayed/worker/health_check_spec.rb +51 -49
  68. data/spec/delayed/worker_spec.rb +28 -25
  69. data/spec/gemfiles/52.gemfile +5 -3
  70. data/spec/gemfiles/52.gemfile.lock +240 -0
  71. data/spec/gemfiles/60.gemfile +5 -3
  72. data/spec/gemfiles/60.gemfile.lock +1 -1
  73. data/spec/gemfiles/61.gemfile +5 -3
  74. data/spec/sample_jobs.rb +45 -15
  75. data/spec/shared/delayed_batch.rb +74 -67
  76. data/spec/shared/delayed_method.rb +143 -102
  77. data/spec/shared/performable_method.rb +39 -38
  78. data/spec/shared/shared_backend.rb +534 -441
  79. data/spec/shared/testing.rb +14 -14
  80. data/spec/shared/worker.rb +155 -147
  81. data/spec/shared_jobs_specs.rb +13 -13
  82. data/spec/spec_helper.rb +43 -40
  83. metadata +75 -56
  84. data/lib/delayed/backend/redis/bulk_update.lua +0 -50
  85. data/lib/delayed/backend/redis/destroy_job.lua +0 -2
  86. data/lib/delayed/backend/redis/enqueue.lua +0 -29
  87. data/lib/delayed/backend/redis/fail_job.lua +0 -5
  88. data/lib/delayed/backend/redis/find_available.lua +0 -3
  89. data/lib/delayed/backend/redis/functions.rb +0 -59
  90. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
  91. data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
  92. data/lib/delayed/backend/redis/job.rb +0 -528
  93. data/lib/delayed/backend/redis/set_running.lua +0 -5
  94. data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
  95. data/spec/redis_job_spec.rb +0 -148
@@ -12,7 +12,7 @@ module Delayed
12
12
  end
13
13
 
14
14
  module Base
15
- ON_HOLD_LOCKED_BY = 'on hold'
15
+ ON_HOLD_LOCKED_BY = "on hold"
16
16
  ON_HOLD_COUNT = 50
17
17
 
18
18
  def self.included(base)
@@ -22,9 +22,7 @@ module Delayed
22
22
  end
23
23
 
24
24
  module ClassMethods
25
- attr_accessor :batches
26
- attr_accessor :batch_enqueue_args
27
- attr_accessor :default_priority
25
+ attr_accessor :batches, :batch_enqueue_args, :default_priority
28
26
 
29
27
  # Add a job to the queue
30
28
  # The first argument should be an object that respond_to?(:perform)
@@ -32,35 +30,37 @@ module Delayed
32
30
  # :priority, :run_at, :queue, :strand, :singleton
33
31
  # Example: Delayed::Job.enqueue(object, priority: 0, run_at: time, queue: queue)
34
32
  def enqueue(object,
35
- priority: default_priority,
36
- run_at: nil,
37
- expires_at: nil,
38
- queue: Delayed::Settings.queue,
39
- strand: nil,
40
- singleton: nil,
41
- n_strand: nil,
42
- max_attempts: Delayed::Settings.max_attempts,
43
- **kwargs)
33
+ priority: default_priority,
34
+ run_at: nil,
35
+ expires_at: nil,
36
+ queue: Delayed::Settings.queue,
37
+ strand: nil,
38
+ singleton: nil,
39
+ n_strand: nil,
40
+ max_attempts: Delayed::Settings.max_attempts,
41
+ **kwargs)
44
42
 
45
43
  unless object.respond_to?(:perform)
46
- raise ArgumentError, 'Cannot enqueue items which do not respond to perform'
44
+ raise ArgumentError, "Cannot enqueue items which do not respond to perform"
47
45
  end
48
46
 
47
+ strand ||= singleton if Settings.infer_strand_from_singleton
48
+
49
49
  kwargs = Settings.default_job_options.merge(kwargs)
50
50
  kwargs[:payload_object] = object
51
51
  kwargs[:priority] = priority
52
52
  kwargs[:run_at] = run_at if run_at
53
53
  kwargs[:strand] = strand
54
54
  kwargs[:max_attempts] = max_attempts
55
- kwargs[:source] = Marginalia::Comment.construct_comment if defined?(Marginalia) && Marginalia::Comment.components
55
+ if defined?(Marginalia) && Marginalia::Comment.components
56
+ kwargs[:source] =
57
+ Marginalia::Comment.construct_comment
58
+ end
56
59
  kwargs[:expires_at] = expires_at
57
60
  kwargs[:queue] = queue
61
+ kwargs[:singleton] = singleton
58
62
 
59
- strand_args = 0
60
- strand_args += 1 if strand
61
- strand_args += 1 if n_strand
62
- strand_args += 1 if singleton
63
- raise ArgumentError, "Only one of strand, n_strand, or singleton can be used" if strand_args > 1
63
+ raise ArgumentError, "Only one of strand or n_strand can be used" if strand && n_strand
64
64
 
65
65
  # If two parameters are given to n_strand, the first param is used
66
66
  # as the strand name for looking up the Setting, while the second
@@ -86,18 +86,15 @@ module Delayed
86
86
  end
87
87
 
88
88
  if singleton
89
- kwargs[:strand] = singleton
90
- job = self.create_singleton(**kwargs)
89
+ job = create(**kwargs)
91
90
  elsif batches && strand.nil? && run_at.nil?
92
91
  batch_enqueue_args = kwargs.slice(*self.batch_enqueue_args)
93
92
  batches[batch_enqueue_args] << kwargs
94
93
  return true
95
94
  else
96
- if kwargs[:on_conflict].present?
97
- Delayed::Logging.logger.warn("[DELAYED_JOB] WARNING: providing 'on_conflict' as an option to a non-singleton job will have no effect. Discarding.")
98
- kwargs.delete(:on_conflict)
99
- end
100
- job = self.create(**kwargs)
95
+ raise ArgumentError, "on_conflict can only be provided with singleton" if kwargs[:on_conflict]
96
+
97
+ job = create(**kwargs)
101
98
  end
102
99
 
103
100
  JobTracking.job_created(job)
@@ -128,10 +125,10 @@ module Delayed
128
125
 
129
126
  def check_priorities(min_priority, max_priority)
130
127
  if min_priority && min_priority < Delayed::MIN_PRIORITY
131
- raise(ArgumentError, "min_priority #{min_priority} can't be less than #{Delayed::MIN_PRIORITY}")
128
+ raise ArgumentError, "min_priority #{min_priority} can't be less than #{Delayed::MIN_PRIORITY}"
132
129
  end
133
- if max_priority && max_priority > Delayed::MAX_PRIORITY
134
- raise(ArgumentError, "max_priority #{max_priority} can't be greater than #{Delayed::MAX_PRIORITY}")
130
+ if max_priority && max_priority > Delayed::MAX_PRIORITY # rubocop:disable Style/GuardClause
131
+ raise ArgumentError, "max_priority #{max_priority} can't be greater than #{Delayed::MAX_PRIORITY}"
135
132
  end
136
133
  end
137
134
 
@@ -144,13 +141,19 @@ module Delayed
144
141
 
145
142
  def processes_locked_locally(name: nil)
146
143
  name ||= Socket.gethostname rescue x
147
- running_jobs.select{|job| job.locked_by.start_with?("#{name}:")}.map{|job| job.locked_by.split(':').last.to_i}
144
+ local_jobs = running_jobs.select do |job|
145
+ job.locked_by.start_with?("#{name}:")
146
+ end
147
+ local_jobs.map { |job| job.locked_by.split(":").last.to_i }
148
148
  end
149
149
 
150
150
  def unlock_orphaned_prefetched_jobs
151
151
  horizon = db_time_now - Settings.parent_process[:prefetched_jobs_timeout] * 4
152
- orphaned_jobs = running_jobs.select { |job| job.locked_by.start_with?('prefetch:') && job.locked_at < horizon }
152
+ orphaned_jobs = running_jobs.select do |job|
153
+ job.locked_by.start_with?("prefetch:") && job.locked_at < horizon
154
+ end
153
155
  return 0 if orphaned_jobs.empty?
156
+
154
157
  unlock(orphaned_jobs)
155
158
  end
156
159
 
@@ -164,13 +167,14 @@ module Delayed
164
167
  regex = Regexp.new("^#{Regexp.escape(name)}:#{pid_regex}$")
165
168
  unlocked_jobs = 0
166
169
  running = false if pid
167
- self.running_jobs.each do |job|
170
+ running_jobs.each do |job|
168
171
  next unless job.locked_by =~ regex
172
+
169
173
  unless pid
170
174
  job_pid = $1.to_i
171
175
  running = Process.kill(0, job_pid) rescue false
172
176
  end
173
- if !running
177
+ unless running
174
178
  unlocked_jobs += 1
175
179
  job.reschedule("process died")
176
180
  end
@@ -182,14 +186,14 @@ module Delayed
182
186
  def failed?
183
187
  failed_at
184
188
  end
185
- alias_method :failed, :failed?
189
+ alias failed failed?
186
190
 
187
191
  def expired?
188
192
  expires_at && (self.class.db_time_now >= expires_at)
189
193
  end
190
194
 
191
195
  def inferred_max_attempts
192
- self.max_attempts || Delayed::Settings.max_attempts
196
+ max_attempts || Delayed::Settings.max_attempts
193
197
  end
194
198
 
195
199
  # Reschedule the job in the future (when a job fails).
@@ -197,22 +201,22 @@ module Delayed
197
201
  def reschedule(error = nil, time = nil)
198
202
  begin
199
203
  obj = payload_object
200
- return_code = obj.on_failure(error) if obj && obj.respond_to?(:on_failure)
204
+ return_code = obj.on_failure(error) if obj.respond_to?(:on_failure)
201
205
  rescue
202
206
  # don't allow a failed deserialization to prevent rescheduling
203
207
  end
204
208
 
205
209
  self.attempts += 1 unless return_code == :unlock
206
210
 
207
- if self.attempts >= self.inferred_max_attempts
211
+ if self.attempts >= inferred_max_attempts
208
212
  permanent_failure error || "max attempts reached"
209
213
  elsif expired?
210
214
  permanent_failure error || "job has expired"
211
215
  else
212
- time ||= self.reschedule_at
216
+ time ||= reschedule_at
213
217
  self.run_at = time
214
- self.unlock
215
- self.save!
218
+ unlock
219
+ save!
216
220
  end
217
221
  end
218
222
 
@@ -220,26 +224,24 @@ module Delayed
220
224
  begin
221
225
  # notify the payload_object of a permanent failure
222
226
  obj = payload_object
223
- obj.on_permanent_failure(error) if obj && obj.respond_to?(:on_permanent_failure)
227
+ obj.on_permanent_failure(error) if obj.respond_to?(:on_permanent_failure)
224
228
  rescue
225
229
  # don't allow a failed deserialization to prevent destroying the job
226
230
  end
227
231
 
228
232
  # optionally destroy the object
229
233
  destroy_self = true
230
- if Delayed::Worker.on_max_failures
231
- destroy_self = Delayed::Worker.on_max_failures.call(self, error)
232
- end
234
+ destroy_self = Delayed::Worker.on_max_failures.call(self, error) if Delayed::Worker.on_max_failures
233
235
 
234
236
  if destroy_self
235
- self.destroy
237
+ destroy
236
238
  else
237
- self.fail!
239
+ fail!
238
240
  end
239
241
  end
240
242
 
241
243
  def payload_object
242
- @payload_object ||= deserialize(self['handler'].untaint)
244
+ @payload_object ||= deserialize(self["handler"].untaint)
243
245
  end
244
246
 
245
247
  def name
@@ -255,7 +257,7 @@ module Delayed
255
257
 
256
258
  def full_name
257
259
  obj = payload_object rescue nil
258
- if obj && obj.respond_to?(:full_name)
260
+ if obj.respond_to?(:full_name)
259
261
  obj.full_name
260
262
  else
261
263
  name
@@ -264,14 +266,14 @@ module Delayed
264
266
 
265
267
  def payload_object=(object)
266
268
  @payload_object = object
267
- self['handler'] = object.to_yaml
268
- self['tag'] = if object.respond_to?(:tag)
269
- object.tag
270
- elsif object.is_a?(Module)
271
- "#{object}.perform"
272
- else
273
- "#{object.class}#perform"
274
- end
269
+ self["handler"] = object.to_yaml
270
+ self["tag"] = if object.respond_to?(:tag)
271
+ object.tag
272
+ elsif object.is_a?(Module)
273
+ "#{object}.perform"
274
+ else
275
+ "#{object.class}#perform"
276
+ end
275
277
  end
276
278
 
277
279
  # Moved into its own method so that new_relic can trace it.
@@ -298,15 +300,16 @@ module Delayed
298
300
  end
299
301
 
300
302
  def locked?
301
- !!(self.locked_at || self.locked_by)
303
+ !!(locked_at || locked_by)
302
304
  end
303
305
 
304
306
  def reschedule_at
305
- new_time = self.class.db_time_now + (attempts ** 4) + 5
307
+ new_time = self.class.db_time_now + (attempts**4) + 5
306
308
  begin
307
309
  if payload_object.respond_to?(:reschedule_at)
308
310
  new_time = payload_object.reschedule_at(
309
- self.class.db_time_now, attempts)
311
+ self.class.db_time_now, attempts
312
+ )
310
313
  end
311
314
  rescue
312
315
  # TODO: just swallow errors from reschedule_at ?
@@ -318,25 +321,26 @@ module Delayed
318
321
  self.locked_by = ON_HOLD_LOCKED_BY
319
322
  self.locked_at = self.class.db_time_now
320
323
  self.attempts = ON_HOLD_COUNT
321
- self.save!
324
+ save!
322
325
  end
323
326
 
324
327
  def unhold!
325
328
  self.locked_by = nil
326
329
  self.locked_at = nil
327
330
  self.attempts = 0
328
- self.run_at = [self.class.db_time_now, self.run_at].max
331
+ self.run_at = [self.class.db_time_now, run_at].max
329
332
  self.failed_at = nil
330
- self.save!
333
+ save!
331
334
  end
332
335
 
333
336
  def on_hold?
334
- self.locked_by == 'on hold' && self.locked_at && self.attempts == ON_HOLD_COUNT
337
+ locked_by == "on hold" && locked_at && self.attempts == ON_HOLD_COUNT
335
338
  end
336
339
 
337
- private
340
+ private
338
341
 
339
- ParseObjectFromYaml = /\!ruby\/\w+\:([^\s]+)/
342
+ PARSE_OBJECT_FROM_YAML = %r{!ruby/\w+:([^\s]+)}.freeze
343
+ private_constant :PARSE_OBJECT_FROM_YAML
340
344
 
341
345
  def deserialize(source)
342
346
  handler = nil
@@ -350,13 +354,13 @@ module Delayed
350
354
  return handler if handler.respond_to?(:perform)
351
355
 
352
356
  raise DeserializationError,
353
- 'Job failed to load: Unknown handler. Try to manually require the appropriate file.'
357
+ "Job failed to load: Unknown handler. Try to manually require the appropriate file."
354
358
  rescue TypeError, LoadError, NameError => e
355
359
  raise DeserializationError,
356
- "Job failed to load: #{e.message}. Try to manually require the required file."
360
+ "Job failed to load: #{e.message}. Try to manually require the required file."
357
361
  rescue Psych::SyntaxError => e
358
- raise DeserializationError,
359
- "YAML parsing error: #{e.message}. Probably not recoverable."
362
+ raise DeserializationError,
363
+ "YAML parsing error: #{e.message}. Probably not recoverable."
360
364
  end
361
365
 
362
366
  def _yaml_deserialize(source)
@@ -364,12 +368,13 @@ module Delayed
364
368
  end
365
369
 
366
370
  def attempt_to_load_from_source(source)
367
- if md = ParseObjectFromYaml.match(source)
368
- md[1].constantize
369
- end
371
+ return unless (md = PARSE_OBJECT_FROM_YAML.match(source))
372
+
373
+ md[1].constantize
370
374
  end
371
375
 
372
- public
376
+ public
377
+
373
378
  def initialize_defaults
374
379
  self.queue ||= Delayed::Settings.queue
375
380
  self.run_at ||= self.class.db_time_now
data/lib/delayed/batch.rb CHANGED
@@ -2,11 +2,11 @@
2
2
 
3
3
  module Delayed
4
4
  module Batch
5
- class PerformableBatch < Struct.new(:mode, :items)
5
+ PerformableBatch = Struct.new(:mode, :items) do
6
6
  def initialize(mode, items)
7
7
  raise "unsupported mode" unless mode == :serial
8
- self.mode = mode
9
- self.items = items
8
+
9
+ super
10
10
  end
11
11
 
12
12
  def display_name
@@ -25,14 +25,16 @@ module Delayed
25
25
  end
26
26
 
27
27
  class << self
28
- def serial_batch(opts = {})
29
- prepare_batches(:serial, opts){ yield }
28
+ def serial_batch(opts = {}, &block)
29
+ prepare_batches(:serial, opts, &block)
30
30
  end
31
31
 
32
32
  private
33
+
33
34
  def prepare_batches(mode, opts)
34
35
  raise "nested batching is not supported" if Delayed::Job.batches
35
- Delayed::Job.batches = Hash.new { |h,k| h[k] = Set.new }
36
+
37
+ Delayed::Job.batches = Hash.new { |h, k| h[k] = Set.new }
36
38
  batch_enqueue_args = [:queue]
37
39
  batch_enqueue_args << :priority unless opts[:priority]
38
40
  Delayed::Job.batch_enqueue_args = batch_enqueue_args
@@ -42,9 +44,9 @@ module Delayed
42
44
  Delayed::Job.batches = nil
43
45
  batch_args = opts.slice(:priority)
44
46
  batches.each do |enqueue_args, batch|
45
- if batch.size == 0
46
- next
47
- elsif batch.size == 1
47
+ next if batch.size.zero?
48
+
49
+ if batch.size == 1
48
50
  args = batch.first.merge(batch_args)
49
51
  payload_object = args.delete(:payload_object)
50
52
  Delayed::Job.enqueue(payload_object, **args)
data/lib/delayed/cli.rb CHANGED
@@ -1,111 +1,125 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'optparse'
3
+ require "optparse"
4
4
 
5
5
  module Delayed
6
- class CLI
7
- class << self
8
- attr_accessor :instance
9
- end
6
+ class CLI
7
+ class << self
8
+ attr_accessor :instance
9
+ end
10
10
 
11
- attr_reader :config
11
+ attr_reader :config
12
12
 
13
- def initialize(args = ARGV)
14
- self.class.instance = self
13
+ def initialize(args = ARGV)
14
+ self.class.instance = self
15
15
 
16
- @args = args
17
- # config that will be applied on Settings and passed to the created Pool
18
- @config = {}
19
- # CLI options that will be kept to this class
20
- @options = {
21
- :config_file => Settings.default_worker_config_name,
22
- :pid_folder => Settings.expand_rails_path("tmp/pids"),
23
- :tail_logs => true, # only in FG mode
24
- }
25
- end
16
+ @args = args
17
+ # config that will be applied on Settings and passed to the created Pool
18
+ @config = {}
19
+ # CLI options that will be kept to this class
20
+ @options = {
21
+ config_file: Settings.default_worker_config_name,
22
+ pid_folder: Settings.expand_rails_path("tmp/pids"),
23
+ tail_logs: true # only in FG mode
24
+ }
25
+ end
26
26
 
27
- def run
28
- parse_cli_options!
29
- load_and_apply_config!
30
-
31
- command = @args.shift
32
- case command
33
- when 'start'
34
- exit 1 if daemon.status(print: :alive) == :running
35
- daemon.daemonize!
36
- start
37
- when 'stop'
38
- daemon.stop(kill: @options[:kill])
39
- when 'run'
40
- start
41
- when 'status'
42
- if daemon.status
43
- exit 0
27
+ def run
28
+ parse_cli_options!
29
+ load_and_apply_config!
30
+
31
+ command = @args.shift
32
+ case command
33
+ when "start"
34
+ exit 1 if daemon.status(print: :alive) == :running
35
+ daemon.daemonize!
36
+ start
37
+ when "stop"
38
+ daemon.stop(kill: @options[:kill])
39
+ when "run"
40
+ start
41
+ when "status"
42
+ if daemon.status
43
+ exit 0
44
+ else
45
+ exit 1
46
+ end
47
+ when "restart"
48
+ daemon.stop(kill: @options[:kill])
49
+ daemon.daemonize!
50
+ start
51
+ when nil
52
+ puts option_parser.to_s
44
53
  else
45
- exit 1
54
+ raise("Unknown command: #{command.inspect}")
46
55
  end
47
- when 'restart'
48
- daemon.stop(kill: @options[:kill])
49
- daemon.daemonize!
50
- start
51
- when nil
52
- puts option_parser.to_s
53
- else
54
- raise("Unknown command: #{command.inspect}")
55
56
  end
56
- end
57
57
 
58
- def parse_cli_options!
59
- option_parser.parse!(@args)
60
- @options
61
- end
58
+ def parse_cli_options!
59
+ option_parser.parse!(@args)
60
+ @options
61
+ end
62
62
 
63
- protected
63
+ protected
64
64
 
65
- def load_and_apply_config!
66
- @config = Settings.worker_config(@options[:config_file])
67
- Settings.apply_worker_config!(@config)
68
- end
65
+ def load_and_apply_config!
66
+ @config = Settings.worker_config(@options[:config_file])
67
+ Settings.apply_worker_config!(@config)
68
+ end
69
69
 
70
- def option_parser
71
- @option_parser ||= OptionParser.new do |opts|
72
- opts.banner = "Usage #{$0} <command> <options>"
73
- opts.separator %{\nWhere <command> is one of:
70
+ def option_parser
71
+ @option_parser ||= OptionParser.new do |opts|
72
+ opts.banner = "Usage #{$0} <command> <options>"
73
+ opts.separator %(\nWhere <command> is one of:
74
74
  start start the jobs daemon
75
75
  stop stop the jobs daemon
76
76
  run start and run in the foreground
77
77
  restart stop and then start the jobs daemon
78
78
  status show daemon status
79
- }
80
-
81
- opts.separator "\n<options>"
82
- opts.on("-c", "--config [CONFIG_PATH]", "Use alternate config file (default #{@options[:config_file]})") { |c| @options[:config_file] = c }
83
- opts.on("-p", "--pid", "Use alternate folder for PID files (default #{@options[:pid_folder]})") { |p| @options[:pid_folder] = p }
84
- opts.on("--no-tail", "Don't tail the logs (only affects non-daemon mode)") { @options[:tail_logs] = false }
85
- opts.on("--with-prejudice", "When stopping, interrupt jobs in progress, instead of letting them drain") { @options[:kill] ||= true }
86
- opts.on("--with-extreme-prejudice", "When stopping, immediately kill jobs in progress, instead of letting them drain") { @options[:kill] = 9 }
87
- opts.on_tail("-h", "--help", "Show this message") { puts opts; exit }
79
+ )
80
+
81
+ opts.separator "\n<options>"
82
+ opts.on("-c", "--config [CONFIG_PATH]", "Use alternate config file (default #{@options[:config_file]})") do |c|
83
+ @options[:config_file] = c
84
+ end
85
+ opts.on("-p", "--pid [PID_PATH]",
86
+ "Use alternate folder for PID files (default #{@options[:pid_folder]})") do |p|
87
+ @options[:pid_folder] = p
88
+ end
89
+ opts.on("--no-tail", "Don't tail the logs (only affects non-daemon mode)") { @options[:tail_logs] = false }
90
+ opts.on("--with-prejudice", "When stopping, interrupt jobs in progress, instead of letting them drain") do
91
+ @options[:kill] ||= true
92
+ end
93
+ opts.on("--with-extreme-prejudice",
94
+ "When stopping, immediately kill jobs in progress, instead of letting them drain") do
95
+ @options[:kill] = 9
96
+ end
97
+ opts.on_tail("-h", "--help", "Show this message") do
98
+ puts opts
99
+ exit
100
+ end
101
+ end
88
102
  end
89
- end
90
103
 
91
- def daemon
92
- @daemon ||= Delayed::Daemon.new(@options[:pid_folder])
93
- end
104
+ def daemon
105
+ @daemon ||= Delayed::Daemon.new(@options[:pid_folder])
106
+ end
94
107
 
95
- def start
96
- load_rails
97
- tail_rails_log unless daemon.daemonized?
98
- Delayed::Pool.new(@config).start
99
- end
108
+ def start
109
+ load_rails
110
+ tail_rails_log unless daemon.daemonized?
111
+ Delayed::Pool.new(@config).start
112
+ end
100
113
 
101
- def load_rails
102
- require(Settings.expand_rails_path("config/environment.rb"))
103
- Dir.chdir(Rails.root)
104
- end
114
+ def load_rails
115
+ require(Settings.expand_rails_path("config/environment.rb"))
116
+ Dir.chdir(Rails.root)
117
+ end
118
+
119
+ def tail_rails_log
120
+ return unless @options[:tail_logs]
105
121
 
106
- def tail_rails_log
107
- return if !@options[:tail_logs]
108
- Delayed::LogTailer.new.run
122
+ Delayed::LogTailer.new.run
123
+ end
109
124
  end
110
125
  end
111
- end