inst-jobs 2.0.0 → 3.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (98) hide show
  1. checksums.yaml +4 -4
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
  3. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
  4. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
  5. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
  6. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
  7. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
  8. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
  9. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
  10. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
  11. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
  12. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
  13. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
  14. data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
  15. data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
  16. data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
  17. data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
  18. data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
  19. data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
  20. data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
  21. data/db/migrate/20210812210128_add_singleton_column.rb +200 -0
  22. data/db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb +27 -0
  23. data/db/migrate/20210928174754_fix_singleton_condition_in_before_insert.rb +56 -0
  24. data/db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb +27 -0
  25. data/exe/inst_jobs +3 -2
  26. data/lib/delayed/backend/active_record.rb +211 -168
  27. data/lib/delayed/backend/base.rb +110 -72
  28. data/lib/delayed/batch.rb +11 -9
  29. data/lib/delayed/cli.rb +98 -84
  30. data/lib/delayed/core_ext/kernel.rb +4 -2
  31. data/lib/delayed/daemon.rb +70 -74
  32. data/lib/delayed/job_tracking.rb +26 -25
  33. data/lib/delayed/lifecycle.rb +27 -23
  34. data/lib/delayed/log_tailer.rb +17 -17
  35. data/lib/delayed/logging.rb +13 -16
  36. data/lib/delayed/message_sending.rb +43 -52
  37. data/lib/delayed/performable_method.rb +6 -8
  38. data/lib/delayed/periodic.rb +72 -68
  39. data/lib/delayed/plugin.rb +2 -4
  40. data/lib/delayed/pool.rb +205 -168
  41. data/lib/delayed/server/helpers.rb +6 -6
  42. data/lib/delayed/server.rb +51 -54
  43. data/lib/delayed/settings.rb +94 -81
  44. data/lib/delayed/testing.rb +21 -22
  45. data/lib/delayed/version.rb +1 -1
  46. data/lib/delayed/work_queue/in_process.rb +21 -17
  47. data/lib/delayed/work_queue/parent_process/client.rb +55 -53
  48. data/lib/delayed/work_queue/parent_process/server.rb +245 -207
  49. data/lib/delayed/work_queue/parent_process.rb +52 -53
  50. data/lib/delayed/worker/consul_health_check.rb +32 -33
  51. data/lib/delayed/worker/health_check.rb +34 -26
  52. data/lib/delayed/worker/null_health_check.rb +3 -1
  53. data/lib/delayed/worker/process_helper.rb +8 -9
  54. data/lib/delayed/worker.rb +272 -241
  55. data/lib/delayed/yaml_extensions.rb +12 -10
  56. data/lib/delayed_job.rb +37 -37
  57. data/lib/inst-jobs.rb +1 -1
  58. data/spec/active_record_job_spec.rb +143 -139
  59. data/spec/delayed/cli_spec.rb +7 -7
  60. data/spec/delayed/daemon_spec.rb +10 -9
  61. data/spec/delayed/message_sending_spec.rb +16 -9
  62. data/spec/delayed/periodic_spec.rb +14 -21
  63. data/spec/delayed/server_spec.rb +38 -38
  64. data/spec/delayed/settings_spec.rb +26 -25
  65. data/spec/delayed/work_queue/in_process_spec.rb +7 -8
  66. data/spec/delayed/work_queue/parent_process/client_spec.rb +17 -12
  67. data/spec/delayed/work_queue/parent_process/server_spec.rb +117 -41
  68. data/spec/delayed/work_queue/parent_process_spec.rb +21 -23
  69. data/spec/delayed/worker/consul_health_check_spec.rb +37 -50
  70. data/spec/delayed/worker/health_check_spec.rb +60 -52
  71. data/spec/delayed/worker_spec.rb +44 -21
  72. data/spec/sample_jobs.rb +45 -15
  73. data/spec/shared/delayed_batch.rb +74 -67
  74. data/spec/shared/delayed_method.rb +143 -102
  75. data/spec/shared/performable_method.rb +39 -38
  76. data/spec/shared/shared_backend.rb +550 -437
  77. data/spec/shared/testing.rb +14 -14
  78. data/spec/shared/worker.rb +156 -148
  79. data/spec/shared_jobs_specs.rb +13 -13
  80. data/spec/spec_helper.rb +53 -55
  81. metadata +148 -82
  82. data/lib/delayed/backend/redis/bulk_update.lua +0 -50
  83. data/lib/delayed/backend/redis/destroy_job.lua +0 -2
  84. data/lib/delayed/backend/redis/enqueue.lua +0 -29
  85. data/lib/delayed/backend/redis/fail_job.lua +0 -5
  86. data/lib/delayed/backend/redis/find_available.lua +0 -3
  87. data/lib/delayed/backend/redis/functions.rb +0 -59
  88. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
  89. data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
  90. data/lib/delayed/backend/redis/job.rb +0 -535
  91. data/lib/delayed/backend/redis/set_running.lua +0 -5
  92. data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
  93. data/spec/gemfiles/42.gemfile +0 -7
  94. data/spec/gemfiles/50.gemfile +0 -7
  95. data/spec/gemfiles/51.gemfile +0 -7
  96. data/spec/gemfiles/52.gemfile +0 -7
  97. data/spec/gemfiles/60.gemfile +0 -7
  98. data/spec/redis_job_spec.rb +0 -148
@@ -12,7 +12,7 @@ module Delayed
12
12
  end
13
13
 
14
14
  module Base
15
- ON_HOLD_LOCKED_BY = 'on hold'
15
+ ON_HOLD_LOCKED_BY = "on hold"
16
16
  ON_HOLD_COUNT = 50
17
17
 
18
18
  def self.included(base)
@@ -22,9 +22,7 @@ module Delayed
22
22
  end
23
23
 
24
24
  module ClassMethods
25
- attr_accessor :batches
26
- attr_accessor :batch_enqueue_args
27
- attr_accessor :default_priority
25
+ attr_accessor :batches, :batch_enqueue_args, :default_priority
28
26
 
29
27
  # Add a job to the queue
30
28
  # The first argument should be an object that respond_to?(:perform)
@@ -32,29 +30,37 @@ module Delayed
32
30
  # :priority, :run_at, :queue, :strand, :singleton
33
31
  # Example: Delayed::Job.enqueue(object, priority: 0, run_at: time, queue: queue)
34
32
  def enqueue(object,
35
- priority: default_priority,
36
- run_at: nil,
37
- expires_at: nil,
38
- queue: Delayed::Settings.queue,
39
- strand: nil,
40
- singleton: nil,
41
- n_strand: nil,
42
- max_attempts: Delayed::Settings.max_attempts,
43
- **kwargs)
33
+ priority: default_priority,
34
+ run_at: nil,
35
+ expires_at: nil,
36
+ queue: Delayed::Settings.queue,
37
+ strand: nil,
38
+ singleton: nil,
39
+ n_strand: nil,
40
+ max_attempts: Delayed::Settings.max_attempts,
41
+ **kwargs)
44
42
 
45
43
  unless object.respond_to?(:perform)
46
- raise ArgumentError, 'Cannot enqueue items which do not respond to perform'
44
+ raise ArgumentError, "Cannot enqueue items which do not respond to perform"
47
45
  end
48
46
 
47
+ strand ||= singleton if Settings.infer_strand_from_singleton
48
+
49
49
  kwargs = Settings.default_job_options.merge(kwargs)
50
50
  kwargs[:payload_object] = object
51
51
  kwargs[:priority] = priority
52
52
  kwargs[:run_at] = run_at if run_at
53
53
  kwargs[:strand] = strand
54
54
  kwargs[:max_attempts] = max_attempts
55
- kwargs[:source] = Marginalia::Comment.construct_comment if defined?(Marginalia) && Marginalia::Comment.components
55
+ if defined?(Marginalia) && Marginalia::Comment.components
56
+ kwargs[:source] =
57
+ Marginalia::Comment.construct_comment
58
+ end
56
59
  kwargs[:expires_at] = expires_at
57
60
  kwargs[:queue] = queue
61
+ kwargs[:singleton] = singleton
62
+
63
+ raise ArgumentError, "Only one of strand or n_strand can be used" if strand && n_strand
58
64
 
59
65
  # If two parameters are given to n_strand, the first param is used
60
66
  # as the strand name for looking up the Setting, while the second
@@ -80,14 +86,15 @@ module Delayed
80
86
  end
81
87
 
82
88
  if singleton
83
- kwargs[:strand] = singleton
84
- job = self.create_singleton(**kwargs)
89
+ job = create(**kwargs)
85
90
  elsif batches && strand.nil? && run_at.nil?
86
91
  batch_enqueue_args = kwargs.slice(*self.batch_enqueue_args)
87
92
  batches[batch_enqueue_args] << kwargs
88
93
  return true
89
94
  else
90
- job = self.create(**kwargs)
95
+ raise ArgumentError, "on_conflict can only be provided with singleton" if kwargs[:on_conflict]
96
+
97
+ job = create(**kwargs)
91
98
  end
92
99
 
93
100
  JobTracking.job_created(job)
@@ -118,10 +125,10 @@ module Delayed
118
125
 
119
126
  def check_priorities(min_priority, max_priority)
120
127
  if min_priority && min_priority < Delayed::MIN_PRIORITY
121
- raise(ArgumentError, "min_priority #{min_priority} can't be less than #{Delayed::MIN_PRIORITY}")
128
+ raise ArgumentError, "min_priority #{min_priority} can't be less than #{Delayed::MIN_PRIORITY}"
122
129
  end
123
- if max_priority && max_priority > Delayed::MAX_PRIORITY
124
- raise(ArgumentError, "max_priority #{max_priority} can't be greater than #{Delayed::MAX_PRIORITY}")
130
+ if max_priority && max_priority > Delayed::MAX_PRIORITY # rubocop:disable Style/GuardClause
131
+ raise ArgumentError, "max_priority #{max_priority} can't be greater than #{Delayed::MAX_PRIORITY}"
125
132
  end
126
133
  end
127
134
 
@@ -134,13 +141,19 @@ module Delayed
134
141
 
135
142
  def processes_locked_locally(name: nil)
136
143
  name ||= Socket.gethostname rescue x
137
- running_jobs.select{|job| job.locked_by.start_with?("#{name}:")}.map{|job| job.locked_by.split(':').last.to_i}
144
+ local_jobs = running_jobs.select do |job|
145
+ job.locked_by.start_with?("#{name}:")
146
+ end
147
+ local_jobs.map { |job| job.locked_by.split(":").last.to_i }
138
148
  end
139
149
 
140
150
  def unlock_orphaned_prefetched_jobs
141
- horizon = db_time_now - Settings.parent_process[:prefetched_jobs_timeout] * 4
142
- orphaned_jobs = running_jobs.select { |job| job.locked_by.start_with?('prefetch:') && job.locked_at < horizon }
151
+ horizon = db_time_now - (Settings.parent_process[:prefetched_jobs_timeout] * 4)
152
+ orphaned_jobs = running_jobs.select do |job|
153
+ job.locked_by.start_with?("prefetch:") && job.locked_at < horizon
154
+ end
143
155
  return 0 if orphaned_jobs.empty?
156
+
144
157
  unlock(orphaned_jobs)
145
158
  end
146
159
 
@@ -153,16 +166,36 @@ module Delayed
153
166
  pid_regex = pid || '(\d+)'
154
167
  regex = Regexp.new("^#{Regexp.escape(name)}:#{pid_regex}$")
155
168
  unlocked_jobs = 0
169
+ escaped_name = name.gsub("\\", "\\\\")
170
+ .gsub("%", "\\%")
171
+ .gsub("_", "\\_")
172
+ locked_by_like = "#{escaped_name}:%"
156
173
  running = false if pid
157
- self.running_jobs.each do |job|
158
- next unless job.locked_by =~ regex
159
- unless pid
160
- job_pid = $1.to_i
161
- running = Process.kill(0, job_pid) rescue false
162
- end
163
- if !running
164
- unlocked_jobs += 1
165
- job.reschedule("process died")
174
+ jobs = running_jobs.limit(100)
175
+ jobs = pid ? jobs.where(locked_by: "#{name}:#{pid}") : jobs.where("locked_by LIKE ?", locked_by_like)
176
+ ignores = []
177
+ loop do
178
+ batch_scope = ignores.empty? ? jobs : jobs.where.not(id: ignores)
179
+ batch = batch_scope.to_a
180
+ break if batch.empty?
181
+
182
+ batch.each do |job|
183
+ unless job.locked_by =~ regex
184
+ ignores << job.id
185
+ next
186
+ end
187
+
188
+ unless pid
189
+ job_pid = $1.to_i
190
+ running = Process.kill(0, job_pid) rescue false
191
+ end
192
+
193
+ if running
194
+ ignores << job.id
195
+ else
196
+ unlocked_jobs += 1
197
+ job.reschedule("process died")
198
+ end
166
199
  end
167
200
  end
168
201
  unlocked_jobs
@@ -172,33 +205,37 @@ module Delayed
172
205
  def failed?
173
206
  failed_at
174
207
  end
175
- alias_method :failed, :failed?
208
+ alias failed failed?
176
209
 
177
210
  def expired?
178
211
  expires_at && (self.class.db_time_now >= expires_at)
179
212
  end
180
213
 
214
+ def inferred_max_attempts
215
+ max_attempts || Delayed::Settings.max_attempts
216
+ end
217
+
181
218
  # Reschedule the job in the future (when a job fails).
182
219
  # Uses an exponential scale depending on the number of failed attempts.
183
220
  def reschedule(error = nil, time = nil)
184
221
  begin
185
222
  obj = payload_object
186
- return_code = obj.on_failure(error) if obj && obj.respond_to?(:on_failure)
223
+ return_code = obj.on_failure(error) if obj.respond_to?(:on_failure)
187
224
  rescue
188
225
  # don't allow a failed deserialization to prevent rescheduling
189
226
  end
190
227
 
191
228
  self.attempts += 1 unless return_code == :unlock
192
229
 
193
- if self.attempts >= (self.max_attempts || Delayed::Settings.max_attempts)
230
+ if self.attempts >= inferred_max_attempts
194
231
  permanent_failure error || "max attempts reached"
195
232
  elsif expired?
196
233
  permanent_failure error || "job has expired"
197
234
  else
198
- time ||= self.reschedule_at
235
+ time ||= reschedule_at
199
236
  self.run_at = time
200
- self.unlock
201
- self.save!
237
+ unlock
238
+ save!
202
239
  end
203
240
  end
204
241
 
@@ -206,26 +243,24 @@ module Delayed
206
243
  begin
207
244
  # notify the payload_object of a permanent failure
208
245
  obj = payload_object
209
- obj.on_permanent_failure(error) if obj && obj.respond_to?(:on_permanent_failure)
246
+ obj.on_permanent_failure(error) if obj.respond_to?(:on_permanent_failure)
210
247
  rescue
211
248
  # don't allow a failed deserialization to prevent destroying the job
212
249
  end
213
250
 
214
251
  # optionally destroy the object
215
252
  destroy_self = true
216
- if Delayed::Worker.on_max_failures
217
- destroy_self = Delayed::Worker.on_max_failures.call(self, error)
218
- end
253
+ destroy_self = Delayed::Worker.on_max_failures.call(self, error) if Delayed::Worker.on_max_failures
219
254
 
220
255
  if destroy_self
221
- self.destroy
256
+ destroy
222
257
  else
223
- self.fail!
258
+ fail!
224
259
  end
225
260
  end
226
261
 
227
262
  def payload_object
228
- @payload_object ||= deserialize(self['handler'].untaint)
263
+ @payload_object ||= deserialize(self["handler"].untaint)
229
264
  end
230
265
 
231
266
  def name
@@ -241,7 +276,7 @@ module Delayed
241
276
 
242
277
  def full_name
243
278
  obj = payload_object rescue nil
244
- if obj && obj.respond_to?(:full_name)
279
+ if obj.respond_to?(:full_name)
245
280
  obj.full_name
246
281
  else
247
282
  name
@@ -250,14 +285,14 @@ module Delayed
250
285
 
251
286
  def payload_object=(object)
252
287
  @payload_object = object
253
- self['handler'] = object.to_yaml
254
- self['tag'] = if object.respond_to?(:tag)
255
- object.tag
256
- elsif object.is_a?(Module)
257
- "#{object}.perform"
258
- else
259
- "#{object.class}#perform"
260
- end
288
+ self["handler"] = object.to_yaml
289
+ self["tag"] = if object.respond_to?(:tag)
290
+ object.tag
291
+ elsif object.is_a?(Module)
292
+ "#{object}.perform"
293
+ else
294
+ "#{object.class}#perform"
295
+ end
261
296
  end
262
297
 
263
298
  # Moved into its own method so that new_relic can trace it.
@@ -284,15 +319,16 @@ module Delayed
284
319
  end
285
320
 
286
321
  def locked?
287
- !!(self.locked_at || self.locked_by)
322
+ !!(locked_at || locked_by)
288
323
  end
289
324
 
290
325
  def reschedule_at
291
- new_time = self.class.db_time_now + (attempts ** 4) + 5
326
+ new_time = self.class.db_time_now + (attempts**4) + 5
292
327
  begin
293
328
  if payload_object.respond_to?(:reschedule_at)
294
329
  new_time = payload_object.reschedule_at(
295
- self.class.db_time_now, attempts)
330
+ self.class.db_time_now, attempts
331
+ )
296
332
  end
297
333
  rescue
298
334
  # TODO: just swallow errors from reschedule_at ?
@@ -304,25 +340,26 @@ module Delayed
304
340
  self.locked_by = ON_HOLD_LOCKED_BY
305
341
  self.locked_at = self.class.db_time_now
306
342
  self.attempts = ON_HOLD_COUNT
307
- self.save!
343
+ save!
308
344
  end
309
345
 
310
346
  def unhold!
311
347
  self.locked_by = nil
312
348
  self.locked_at = nil
313
349
  self.attempts = 0
314
- self.run_at = [self.class.db_time_now, self.run_at].max
350
+ self.run_at = [self.class.db_time_now, run_at].max
315
351
  self.failed_at = nil
316
- self.save!
352
+ save!
317
353
  end
318
354
 
319
355
  def on_hold?
320
- self.locked_by == 'on hold' && self.locked_at && self.attempts == ON_HOLD_COUNT
356
+ locked_by == "on hold" && locked_at && self.attempts == ON_HOLD_COUNT
321
357
  end
322
358
 
323
- private
359
+ private
324
360
 
325
- ParseObjectFromYaml = /\!ruby\/\w+\:([^\s]+)/
361
+ PARSE_OBJECT_FROM_YAML = %r{!ruby/\w+:([^\s]+)}.freeze
362
+ private_constant :PARSE_OBJECT_FROM_YAML
326
363
 
327
364
  def deserialize(source)
328
365
  handler = nil
@@ -336,13 +373,13 @@ module Delayed
336
373
  return handler if handler.respond_to?(:perform)
337
374
 
338
375
  raise DeserializationError,
339
- 'Job failed to load: Unknown handler. Try to manually require the appropriate file.'
376
+ "Job failed to load: Unknown handler. Try to manually require the appropriate file."
340
377
  rescue TypeError, LoadError, NameError => e
341
378
  raise DeserializationError,
342
- "Job failed to load: #{e.message}. Try to manually require the required file."
379
+ "Job failed to load: #{e.message}. Try to manually require the required file."
343
380
  rescue Psych::SyntaxError => e
344
- raise DeserializationError,
345
- "YAML parsing error: #{e.message}. Probably not recoverable."
381
+ raise DeserializationError,
382
+ "YAML parsing error: #{e.message}. Probably not recoverable."
346
383
  end
347
384
 
348
385
  def _yaml_deserialize(source)
@@ -350,12 +387,13 @@ module Delayed
350
387
  end
351
388
 
352
389
  def attempt_to_load_from_source(source)
353
- if md = ParseObjectFromYaml.match(source)
354
- md[1].constantize
355
- end
390
+ return unless (md = PARSE_OBJECT_FROM_YAML.match(source))
391
+
392
+ md[1].constantize
356
393
  end
357
394
 
358
- public
395
+ public
396
+
359
397
  def initialize_defaults
360
398
  self.queue ||= Delayed::Settings.queue
361
399
  self.run_at ||= self.class.db_time_now
data/lib/delayed/batch.rb CHANGED
@@ -2,11 +2,11 @@
2
2
 
3
3
  module Delayed
4
4
  module Batch
5
- class PerformableBatch < Struct.new(:mode, :items)
5
+ PerformableBatch = Struct.new(:mode, :items) do
6
6
  def initialize(mode, items)
7
7
  raise "unsupported mode" unless mode == :serial
8
- self.mode = mode
9
- self.items = items
8
+
9
+ super
10
10
  end
11
11
 
12
12
  def display_name
@@ -25,14 +25,16 @@ module Delayed
25
25
  end
26
26
 
27
27
  class << self
28
- def serial_batch(opts = {})
29
- prepare_batches(:serial, opts){ yield }
28
+ def serial_batch(opts = {}, &block)
29
+ prepare_batches(:serial, opts, &block)
30
30
  end
31
31
 
32
32
  private
33
+
33
34
  def prepare_batches(mode, opts)
34
35
  raise "nested batching is not supported" if Delayed::Job.batches
35
- Delayed::Job.batches = Hash.new { |h,k| h[k] = Set.new }
36
+
37
+ Delayed::Job.batches = Hash.new { |h, k| h[k] = Set.new }
36
38
  batch_enqueue_args = [:queue]
37
39
  batch_enqueue_args << :priority unless opts[:priority]
38
40
  Delayed::Job.batch_enqueue_args = batch_enqueue_args
@@ -42,9 +44,9 @@ module Delayed
42
44
  Delayed::Job.batches = nil
43
45
  batch_args = opts.slice(:priority)
44
46
  batches.each do |enqueue_args, batch|
45
- if batch.size == 0
46
- next
47
- elsif batch.size == 1
47
+ next if batch.size.zero?
48
+
49
+ if batch.size == 1
48
50
  args = batch.first.merge(batch_args)
49
51
  payload_object = args.delete(:payload_object)
50
52
  Delayed::Job.enqueue(payload_object, **args)
data/lib/delayed/cli.rb CHANGED
@@ -1,111 +1,125 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'optparse'
3
+ require "optparse"
4
4
 
5
5
  module Delayed
6
- class CLI
7
- class << self
8
- attr_accessor :instance
9
- end
6
+ class CLI
7
+ class << self
8
+ attr_accessor :instance
9
+ end
10
10
 
11
- attr_reader :config
11
+ attr_reader :config
12
12
 
13
- def initialize(args = ARGV)
14
- self.class.instance = self
13
+ def initialize(args = ARGV)
14
+ self.class.instance = self
15
15
 
16
- @args = args
17
- # config that will be applied on Settings and passed to the created Pool
18
- @config = {}
19
- # CLI options that will be kept to this class
20
- @options = {
21
- :config_file => Settings.default_worker_config_name,
22
- :pid_folder => Settings.expand_rails_path("tmp/pids"),
23
- :tail_logs => true, # only in FG mode
24
- }
25
- end
16
+ @args = args
17
+ # config that will be applied on Settings and passed to the created Pool
18
+ @config = {}
19
+ # CLI options that will be kept to this class
20
+ @options = {
21
+ config_file: Settings.default_worker_config_name,
22
+ pid_folder: Settings.expand_rails_path("tmp/pids"),
23
+ tail_logs: true # only in FG mode
24
+ }
25
+ end
26
26
 
27
- def run
28
- parse_cli_options!
29
- load_and_apply_config!
30
-
31
- command = @args.shift
32
- case command
33
- when 'start'
34
- exit 1 if daemon.status(print: :alive) == :running
35
- daemon.daemonize!
36
- start
37
- when 'stop'
38
- daemon.stop(kill: @options[:kill])
39
- when 'run'
40
- start
41
- when 'status'
42
- if daemon.status
43
- exit 0
27
+ def run
28
+ parse_cli_options!
29
+ load_and_apply_config!
30
+
31
+ command = @args.shift
32
+ case command
33
+ when "start"
34
+ exit 1 if daemon.status(print: :alive) == :running
35
+ daemon.daemonize!
36
+ start
37
+ when "stop"
38
+ daemon.stop(kill: @options[:kill])
39
+ when "run"
40
+ start
41
+ when "status"
42
+ if daemon.status
43
+ exit 0
44
+ else
45
+ exit 1
46
+ end
47
+ when "restart"
48
+ daemon.stop(kill: @options[:kill])
49
+ daemon.daemonize!
50
+ start
51
+ when nil
52
+ puts option_parser.to_s
44
53
  else
45
- exit 1
54
+ raise("Unknown command: #{command.inspect}")
46
55
  end
47
- when 'restart'
48
- daemon.stop(kill: @options[:kill])
49
- daemon.daemonize!
50
- start
51
- when nil
52
- puts option_parser.to_s
53
- else
54
- raise("Unknown command: #{command.inspect}")
55
56
  end
56
- end
57
57
 
58
- def parse_cli_options!
59
- option_parser.parse!(@args)
60
- @options
61
- end
58
+ def parse_cli_options!
59
+ option_parser.parse!(@args)
60
+ @options
61
+ end
62
62
 
63
- protected
63
+ protected
64
64
 
65
- def load_and_apply_config!
66
- @config = Settings.worker_config(@options[:config_file])
67
- Settings.apply_worker_config!(@config)
68
- end
65
+ def load_and_apply_config!
66
+ @config = Settings.worker_config(@options[:config_file])
67
+ Settings.apply_worker_config!(@config)
68
+ end
69
69
 
70
- def option_parser
71
- @option_parser ||= OptionParser.new do |opts|
72
- opts.banner = "Usage #{$0} <command> <options>"
73
- opts.separator %{\nWhere <command> is one of:
70
+ def option_parser
71
+ @option_parser ||= OptionParser.new do |opts|
72
+ opts.banner = "Usage #{$0} <command> <options>"
73
+ opts.separator %(\nWhere <command> is one of:
74
74
  start start the jobs daemon
75
75
  stop stop the jobs daemon
76
76
  run start and run in the foreground
77
77
  restart stop and then start the jobs daemon
78
78
  status show daemon status
79
- }
80
-
81
- opts.separator "\n<options>"
82
- opts.on("-c", "--config [CONFIG_PATH]", "Use alternate config file (default #{@options[:config_file]})") { |c| @options[:config_file] = c }
83
- opts.on("-p", "--pid", "Use alternate folder for PID files (default #{@options[:pid_folder]})") { |p| @options[:pid_folder] = p }
84
- opts.on("--no-tail", "Don't tail the logs (only affects non-daemon mode)") { @options[:tail_logs] = false }
85
- opts.on("--with-prejudice", "When stopping, interrupt jobs in progress, instead of letting them drain") { @options[:kill] ||= true }
86
- opts.on("--with-extreme-prejudice", "When stopping, immediately kill jobs in progress, instead of letting them drain") { @options[:kill] = 9 }
87
- opts.on_tail("-h", "--help", "Show this message") { puts opts; exit }
79
+ )
80
+
81
+ opts.separator "\n<options>"
82
+ opts.on("-c", "--config [CONFIG_PATH]", "Use alternate config file (default #{@options[:config_file]})") do |c|
83
+ @options[:config_file] = c
84
+ end
85
+ opts.on("-p", "--pid [PID_PATH]",
86
+ "Use alternate folder for PID files (default #{@options[:pid_folder]})") do |p|
87
+ @options[:pid_folder] = p
88
+ end
89
+ opts.on("--no-tail", "Don't tail the logs (only affects non-daemon mode)") { @options[:tail_logs] = false }
90
+ opts.on("--with-prejudice", "When stopping, interrupt jobs in progress, instead of letting them drain") do
91
+ @options[:kill] ||= true
92
+ end
93
+ opts.on("--with-extreme-prejudice",
94
+ "When stopping, immediately kill jobs in progress, instead of letting them drain") do
95
+ @options[:kill] = 9
96
+ end
97
+ opts.on_tail("-h", "--help", "Show this message") do
98
+ puts opts
99
+ exit
100
+ end
101
+ end
88
102
  end
89
- end
90
103
 
91
- def daemon
92
- @daemon ||= Delayed::Daemon.new(@options[:pid_folder])
93
- end
104
+ def daemon
105
+ @daemon ||= Delayed::Daemon.new(@options[:pid_folder])
106
+ end
94
107
 
95
- def start
96
- load_rails
97
- tail_rails_log unless daemon.daemonized?
98
- Delayed::Pool.new(@config).start
99
- end
108
+ def start
109
+ load_rails
110
+ tail_rails_log unless daemon.daemonized?
111
+ Delayed::Pool.new(@config).start
112
+ end
100
113
 
101
- def load_rails
102
- require(Settings.expand_rails_path("config/environment.rb"))
103
- Dir.chdir(Rails.root)
104
- end
114
+ def load_rails
115
+ require(Settings.expand_rails_path("config/environment.rb"))
116
+ Dir.chdir(Rails.root)
117
+ end
118
+
119
+ def tail_rails_log
120
+ return unless @options[:tail_logs]
105
121
 
106
- def tail_rails_log
107
- return if !@options[:tail_logs]
108
- Delayed::LogTailer.new.run
122
+ Delayed::LogTailer.new.run
123
+ end
109
124
  end
110
125
  end
111
- end
@@ -1,9 +1,11 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Kernel
2
- def sender(i = 0)
4
+ def sender(idx = 0)
3
5
  frame_self = nil
4
6
  # 3. one for the block, one for this method, one for the method calling this
5
7
  # method, and _then_ we get to the self for who sent the message we want
6
- RubyVM::DebugInspector.open { |dc| frame_self = dc.frame_self(3 + i) }
8
+ RubyVM::DebugInspector.open { |dc| frame_self = dc.frame_self(3 + idx) }
7
9
  frame_self
8
10
  end
9
11
  end