inst-jobs 2.3.3 → 2.4.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (95) hide show
  1. checksums.yaml +4 -4
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
  3. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
  4. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
  5. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
  6. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
  7. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
  8. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
  9. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
  10. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
  11. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
  12. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
  13. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
  14. data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
  15. data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
  16. data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
  17. data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
  18. data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
  19. data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
  20. data/db/migrate/20210809145804_add_n_strand_index.rb +3 -3
  21. data/db/migrate/20210812210128_add_singleton_column.rb +203 -0
  22. data/exe/inst_jobs +3 -2
  23. data/lib/delayed/backend/active_record.rb +182 -148
  24. data/lib/delayed/backend/base.rb +79 -74
  25. data/lib/delayed/batch.rb +11 -9
  26. data/lib/delayed/cli.rb +98 -84
  27. data/lib/delayed/core_ext/kernel.rb +4 -2
  28. data/lib/delayed/daemon.rb +70 -74
  29. data/lib/delayed/job_tracking.rb +26 -25
  30. data/lib/delayed/lifecycle.rb +27 -24
  31. data/lib/delayed/log_tailer.rb +17 -17
  32. data/lib/delayed/logging.rb +13 -16
  33. data/lib/delayed/message_sending.rb +42 -51
  34. data/lib/delayed/performable_method.rb +5 -7
  35. data/lib/delayed/periodic.rb +66 -65
  36. data/lib/delayed/plugin.rb +2 -4
  37. data/lib/delayed/pool.rb +198 -193
  38. data/lib/delayed/server/helpers.rb +6 -6
  39. data/lib/delayed/server.rb +51 -54
  40. data/lib/delayed/settings.rb +93 -81
  41. data/lib/delayed/testing.rb +21 -22
  42. data/lib/delayed/version.rb +1 -1
  43. data/lib/delayed/work_queue/in_process.rb +21 -18
  44. data/lib/delayed/work_queue/parent_process/client.rb +54 -55
  45. data/lib/delayed/work_queue/parent_process/server.rb +215 -209
  46. data/lib/delayed/work_queue/parent_process.rb +52 -53
  47. data/lib/delayed/worker/consul_health_check.rb +21 -19
  48. data/lib/delayed/worker/health_check.rb +21 -12
  49. data/lib/delayed/worker/null_health_check.rb +3 -1
  50. data/lib/delayed/worker/process_helper.rb +8 -9
  51. data/lib/delayed/worker.rb +271 -265
  52. data/lib/delayed/yaml_extensions.rb +12 -10
  53. data/lib/delayed_job.rb +37 -38
  54. data/lib/inst-jobs.rb +1 -1
  55. data/spec/active_record_job_spec.rb +128 -135
  56. data/spec/delayed/cli_spec.rb +7 -7
  57. data/spec/delayed/daemon_spec.rb +8 -8
  58. data/spec/delayed/message_sending_spec.rb +8 -9
  59. data/spec/delayed/periodic_spec.rb +13 -12
  60. data/spec/delayed/server_spec.rb +38 -38
  61. data/spec/delayed/settings_spec.rb +26 -25
  62. data/spec/delayed/work_queue/in_process_spec.rb +7 -7
  63. data/spec/delayed/work_queue/parent_process/client_spec.rb +15 -11
  64. data/spec/delayed/work_queue/parent_process/server_spec.rb +43 -40
  65. data/spec/delayed/work_queue/parent_process_spec.rb +21 -21
  66. data/spec/delayed/worker/consul_health_check_spec.rb +22 -22
  67. data/spec/delayed/worker/health_check_spec.rb +51 -49
  68. data/spec/delayed/worker_spec.rb +28 -25
  69. data/spec/gemfiles/52.gemfile +5 -3
  70. data/spec/gemfiles/52.gemfile.lock +240 -0
  71. data/spec/gemfiles/60.gemfile +5 -3
  72. data/spec/gemfiles/60.gemfile.lock +1 -1
  73. data/spec/gemfiles/61.gemfile +5 -3
  74. data/spec/sample_jobs.rb +45 -15
  75. data/spec/shared/delayed_batch.rb +74 -67
  76. data/spec/shared/delayed_method.rb +143 -102
  77. data/spec/shared/performable_method.rb +39 -38
  78. data/spec/shared/shared_backend.rb +517 -441
  79. data/spec/shared/testing.rb +14 -14
  80. data/spec/shared/worker.rb +155 -147
  81. data/spec/shared_jobs_specs.rb +13 -13
  82. data/spec/spec_helper.rb +43 -40
  83. metadata +74 -56
  84. data/lib/delayed/backend/redis/bulk_update.lua +0 -50
  85. data/lib/delayed/backend/redis/destroy_job.lua +0 -2
  86. data/lib/delayed/backend/redis/enqueue.lua +0 -29
  87. data/lib/delayed/backend/redis/fail_job.lua +0 -5
  88. data/lib/delayed/backend/redis/find_available.lua +0 -3
  89. data/lib/delayed/backend/redis/functions.rb +0 -59
  90. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
  91. data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
  92. data/lib/delayed/backend/redis/job.rb +0 -528
  93. data/lib/delayed/backend/redis/set_running.lua +0 -5
  94. data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
  95. data/spec/redis_job_spec.rb +0 -148
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Delayed
4
- class PerformableMethod < Struct.new(:object, :method, :args, :kwargs, :fail_cb, :permanent_fail_cb, :sender)
4
+ PerformableMethod = Struct.new(:object, :method, :args, :kwargs, :fail_cb, :permanent_fail_cb, :sender) do # rubocop:disable Lint/StructNewOverride
5
5
  def initialize(object, method, args: [], kwargs: {}, on_failure: nil, on_permanent_failure: nil, sender: nil)
6
6
  raise NoMethodError, "undefined method `#{method}' for #{object.inspect}" unless object.respond_to?(method, true)
7
7
 
@@ -41,12 +41,10 @@ module Delayed
41
41
  else
42
42
  object.send(method, *args, **kwargs)
43
43
  end
44
+ elsif kwargs.empty?
45
+ object.public_send(method, *args)
44
46
  else
45
- if kwargs.empty?
46
- object.public_send(method, *args)
47
- else
48
- object.public_send(method, *args, **kwargs)
49
- end
47
+ object.public_send(method, *args, **kwargs)
50
48
  end
51
49
  end
52
50
 
@@ -74,7 +72,7 @@ module Delayed
74
72
  def full_name
75
73
  obj_name = object.is_a?(ActiveRecord::Base) ? "#{object.class}.find(#{object.id}).#{method}" : display_name
76
74
  kgs = kwargs || {}
77
- kwargs_str = kgs.map { |(k, v)| ", #{k}: #{deep_de_ar_ize(v)}"}.join("")
75
+ kwargs_str = kgs.map { |(k, v)| ", #{k}: #{deep_de_ar_ize(v)}" }.join
78
76
  "#{obj_name}(#{args.map { |a| deep_de_ar_ize(a) }.join(', ')}#{kwargs_str})"
79
77
  end
80
78
  end
@@ -1,85 +1,86 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'fugit'
3
+ require "fugit"
4
4
 
5
5
  module Delayed
6
- class Periodic
7
- attr_reader :name, :cron
6
+ class Periodic
7
+ attr_reader :name, :cron
8
8
 
9
- def encode_with(coder)
10
- coder.scalar("!ruby/Delayed::Periodic", @name)
11
- end
9
+ def encode_with(coder)
10
+ coder.scalar("!ruby/Delayed::Periodic", @name)
11
+ end
12
12
 
13
- cattr_accessor :scheduled, :overrides
14
- self.scheduled = {}
15
- self.overrides = {}
13
+ cattr_accessor :scheduled, :overrides
14
+ self.scheduled = {}
15
+ self.overrides = {}
16
16
 
17
- def self.add_overrides(overrides)
18
- overrides.each do |name, cron_line|
19
- # throws error if the line is malformed
20
- Fugit.do_parse_cron(cron_line)
17
+ def self.add_overrides(overrides)
18
+ overrides.each do |_name, cron_line|
19
+ # throws error if the line is malformed
20
+ Fugit.do_parse_cron(cron_line)
21
+ end
22
+ self.overrides.merge!(overrides)
21
23
  end
22
- self.overrides.merge!(overrides)
23
- end
24
24
 
25
- def self.cron(job_name, cron_line, job_args = {}, &block)
26
- raise ArgumentError, "job #{job_name} already scheduled!" if self.scheduled[job_name]
27
- cron_line = overrides[job_name] || cron_line
28
- self.scheduled[job_name] = self.new(job_name, cron_line, job_args, block)
29
- end
25
+ def self.cron(job_name, cron_line, job_args = {}, &block)
26
+ raise ArgumentError, "job #{job_name} already scheduled!" if scheduled[job_name]
30
27
 
31
- def self.audit_queue
32
- # we used to queue up a job in a strand here, and perform the audit inside that job
33
- # however, now that we're using singletons for scheduling periodic jobs,
34
- # it's fine to just do the audit in-line here without risk of creating duplicates
35
- perform_audit!
36
- end
28
+ cron_line = overrides[job_name] || cron_line
29
+ scheduled[job_name] = new(job_name, cron_line, job_args, block)
30
+ end
37
31
 
38
- # make sure all periodic jobs are scheduled for their next run in the job queue
39
- # this auditing should run on the strand
40
- def self.perform_audit!
41
- self.scheduled.each { |name, periodic| periodic.enqueue }
42
- end
32
+ def self.audit_queue
33
+ # we used to queue up a job in a strand here, and perform the audit inside that job
34
+ # however, now that we're using singletons for scheduling periodic jobs,
35
+ # it's fine to just do the audit in-line here without risk of creating duplicates
36
+ perform_audit!
37
+ end
43
38
 
44
- def initialize(name, cron_line, job_args, block)
45
- @name = name
46
- @cron = Fugit.do_parse_cron(cron_line)
47
- @job_args = { :priority => Delayed::LOW_PRIORITY }.merge(job_args.symbolize_keys)
48
- @block = block
49
- end
39
+ # make sure all periodic jobs are scheduled for their next run in the job queue
40
+ # this auditing should run on the strand
41
+ def self.perform_audit!
42
+ scheduled.each { |_name, periodic| periodic.enqueue }
43
+ end
50
44
 
51
- def enqueue
52
- Delayed::Job.enqueue(self, **enqueue_args)
53
- end
45
+ def initialize(name, cron_line, job_args, block)
46
+ @name = name
47
+ @cron = Fugit.do_parse_cron(cron_line)
48
+ @job_args = { priority: Delayed::LOW_PRIORITY }.merge(job_args.symbolize_keys)
49
+ @block = block
50
+ end
54
51
 
55
- def enqueue_args
56
- inferred_args = {
57
- max_attempts: 1,
58
- run_at: @cron.next_time(Delayed::Periodic.now).utc.to_time,
59
- singleton: tag,
60
- on_conflict: :patient
61
- }
62
- @job_args.merge(inferred_args)
63
- end
52
+ def enqueue
53
+ Delayed::Job.enqueue(self, **enqueue_args)
54
+ end
64
55
 
65
- def perform
66
- @block.call()
67
- ensure
68
- begin
69
- enqueue
70
- rescue
71
- # double fail! the auditor will have to catch this.
72
- Rails.logger.error "Failure enqueueing periodic job! #{@name} #{$!.inspect}"
56
+ def enqueue_args
57
+ inferred_args = {
58
+ max_attempts: 1,
59
+ run_at: @cron.next_time(Delayed::Periodic.now).utc.to_time,
60
+ singleton: tag,
61
+ on_conflict: :patient
62
+ }
63
+ @job_args.merge(inferred_args)
73
64
  end
74
- end
75
65
 
76
- def tag
77
- "periodic: #{@name}"
78
- end
79
- alias_method :display_name, :tag
66
+ def perform
67
+ @block.call
68
+ ensure
69
+ begin
70
+ enqueue
71
+ rescue
72
+ # double fail! the auditor will have to catch this.
73
+ Rails.logger.error "Failure enqueueing periodic job! #{@name} #{$!.inspect}"
74
+ end
75
+ end
76
+
77
+ def tag
78
+ "periodic: #{@name}"
79
+ end
80
+ alias display_name tag
80
81
 
81
- def self.now
82
- Time.zone.now
82
+ def self.now
83
+ Time.zone.now
84
+ end
83
85
  end
84
86
  end
85
- end
@@ -1,6 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'active_support/core_ext/class/attribute'
3
+ require "active_support/core_ext/class/attribute"
4
4
 
5
5
  module Delayed
6
6
  class Plugin
@@ -11,9 +11,7 @@ module Delayed
11
11
  end
12
12
 
13
13
  def self.inject!
14
- unless @injected
15
- self.callback_block.call(Delayed::Worker.lifecycle) if self.callback_block
16
- end
14
+ callback_block&.call(Delayed::Worker.lifecycle) unless @injected
17
15
  @injected = true
18
16
  end
19
17
 
data/lib/delayed/pool.rb CHANGED
@@ -1,248 +1,254 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Delayed
4
- class Pool
5
- include Delayed::Logging
4
+ class Pool
5
+ include Delayed::Logging
6
6
 
7
- mattr_accessor :on_fork
8
- self.on_fork = ->{ }
7
+ mattr_accessor :on_fork
8
+ self.on_fork = -> {}
9
9
 
10
- SIGNALS = %i{INT TERM QUIT}
11
- POOL_SLEEP_PERIOD = 5
10
+ SIGNALS = %i[INT TERM QUIT].freeze
11
+ POOL_SLEEP_PERIOD = 5
12
12
 
13
- attr_reader :workers
13
+ attr_reader :workers
14
14
 
15
- def initialize(*args)
16
- if args.first.is_a?(Hash)
17
- @config = args.first
18
- else
19
- warn "Calling Delayed::Pool.new directly is deprecated. Use `Delayed::CLI.new.run()` instead."
15
+ def initialize(*args)
16
+ if args.first.is_a?(Hash)
17
+ @config = args.first
18
+ else
19
+ warn "Calling Delayed::Pool.new directly is deprecated. Use `Delayed::CLI.new.run()` instead."
20
+ end
21
+ @workers = {}
22
+ @signal_queue = []
23
+ @self_pipe = IO.pipe
20
24
  end
21
- @workers = {}
22
- @signal_queue = []
23
- @self_pipe = IO.pipe
24
- end
25
25
 
26
- def run
27
- warn "Delayed::Pool#run is deprecated and will be removed. Use `Delayed::CLI.new.run()` instead."
28
- Delayed::CLI.new.run()
29
- end
26
+ def run
27
+ warn "Delayed::Pool#run is deprecated and will be removed. Use `Delayed::CLI.new.run()` instead."
28
+ Delayed::CLI.new.run
29
+ end
30
30
 
31
- def start
32
- say "Started job master", :info
33
- SIGNALS.each { |sig| trap(sig) { @signal_queue << sig; wake_up } }
34
- $0 = procname
35
- # fork to handle unlocking (to prevent polluting the parent with worker objects)
36
- unlock_pid = fork_with_reconnects do
37
- unlock_orphaned_jobs
31
+ def start
32
+ say "Started job master", :info
33
+ SIGNALS.each do |sig|
34
+ trap(sig) do
35
+ @signal_queue << sig
36
+ wake_up
37
+ end
38
+ end
39
+ $0 = procname
40
+ # fork to handle unlocking (to prevent polluting the parent with worker objects)
41
+ unlock_pid = fork_with_reconnects do
42
+ unlock_orphaned_jobs
43
+ end
44
+ Process.wait unlock_pid
45
+
46
+ spawn_periodic_auditor
47
+ spawn_abandoned_job_cleanup
48
+ spawn_all_workers
49
+ say "Workers spawned"
50
+ join
51
+ say "Shutting down"
52
+ stop
53
+ reap_all_children
54
+ rescue Exception => e # rubocop:disable Lint/RescueException
55
+ say "Job master died with error: #{e.inspect}\n#{e.backtrace.join("\n")}", :fatal
56
+ raise
38
57
  end
39
- Process.wait unlock_pid
40
-
41
- spawn_periodic_auditor
42
- spawn_abandoned_job_cleanup
43
- spawn_all_workers
44
- say "Workers spawned"
45
- join
46
- say "Shutting down"
47
- stop
48
- reap_all_children
49
- rescue Exception => e
50
- say "Job master died with error: #{e.inspect}\n#{e.backtrace.join("\n")}", :fatal
51
- raise
52
- end
53
58
 
54
- protected
59
+ protected
55
60
 
56
- def procname
57
- "delayed_jobs_pool#{Settings.pool_procname_suffix}"
58
- end
61
+ def procname
62
+ "delayed_jobs_pool#{Settings.pool_procname_suffix}"
63
+ end
59
64
 
60
- def unlock_orphaned_jobs(worker = nil, pid = nil)
61
- return if Settings.disable_automatic_orphan_unlocking
65
+ def unlock_orphaned_jobs(_worker = nil, pid = nil)
66
+ return if Settings.disable_automatic_orphan_unlocking
62
67
 
63
- unlocked_jobs = Delayed::Job.unlock_orphaned_jobs(pid)
64
- say "Unlocked #{unlocked_jobs} orphaned jobs" if unlocked_jobs > 0
65
- ActiveRecord::Base.connection_handler.clear_all_connections! unless Rails.env.test?
66
- end
68
+ unlocked_jobs = Delayed::Job.unlock_orphaned_jobs(pid)
69
+ say "Unlocked #{unlocked_jobs} orphaned jobs" if unlocked_jobs.positive?
70
+ ActiveRecord::Base.connection_handler.clear_all_connections! unless Rails.env.test?
71
+ end
67
72
 
68
- def spawn_all_workers
69
- ActiveRecord::Base.connection_handler.clear_all_connections!
73
+ def spawn_all_workers
74
+ ActiveRecord::Base.connection_handler.clear_all_connections!
70
75
 
71
- if @config[:work_queue] == 'parent_process'
72
- @work_queue = WorkQueue::ParentProcess.new
73
- spawn_work_queue
74
- end
76
+ if @config[:work_queue] == "parent_process"
77
+ @work_queue = WorkQueue::ParentProcess.new
78
+ spawn_work_queue
79
+ end
75
80
 
76
- @config[:workers].each do |worker_config|
77
- (worker_config[:workers] || 1).times { spawn_worker(worker_config) }
81
+ @config[:workers].each do |worker_config|
82
+ (worker_config[:workers] || 1).times { spawn_worker(worker_config) }
83
+ end
78
84
  end
79
- end
80
85
 
81
- def spawn_work_queue
82
- parent_pid = Process.pid
83
- pid = fork_with_reconnects do
84
- $0 = "delayed_jobs_work_queue#{Settings.pool_procname_suffix}"
85
- @work_queue.server(parent_pid: parent_pid).run
86
+ def spawn_work_queue
87
+ parent_pid = Process.pid
88
+ pid = fork_with_reconnects do
89
+ $0 = "delayed_jobs_work_queue#{Settings.pool_procname_suffix}"
90
+ @work_queue.server(parent_pid: parent_pid).run
91
+ end
92
+ workers[pid] = :work_queue
86
93
  end
87
- workers[pid] = :work_queue
88
- end
89
94
 
90
- def spawn_worker(worker_config)
91
- if worker_config[:periodic]
92
- return # backwards compat
93
- else
95
+ def spawn_worker(worker_config)
96
+ return if worker_config[:periodic] # backwards compat
97
+
94
98
  worker_config[:parent_pid] = Process.pid
95
99
  worker_config[:work_queue] = @work_queue.client if @work_queue
96
100
  worker = Delayed::Worker.new(worker_config)
97
- end
98
101
 
99
- pid = fork_with_reconnects do
100
- worker.start
102
+ pid = fork_with_reconnects do
103
+ worker.start
104
+ end
105
+ workers[pid] = worker
101
106
  end
102
- workers[pid] = worker
103
- end
104
107
 
105
- # child processes need to reconnect so they don't accidentally share redis or
106
- # db connections with the parent
107
- def fork_with_reconnects
108
- fork do
109
- @self_pipe.each(&:close) # sub-processes don't need to wake us up; keep their FDs clean
110
- Pool.on_fork.()
111
- Delayed::Job.reconnect!
112
- yield
108
+ # child processes need to reconnect so they don't accidentally share redis or
109
+ # db connections with the parent
110
+ def fork_with_reconnects
111
+ fork do
112
+ @self_pipe.each(&:close) # sub-processes don't need to wake us up; keep their FDs clean
113
+ Pool.on_fork.call
114
+ Delayed::Job.reconnect!
115
+ yield
116
+ end
113
117
  end
114
- end
115
118
 
116
- def spawn_abandoned_job_cleanup
117
- return if Settings.disable_abandoned_job_cleanup
118
- cleanup_interval_in_minutes = 60
119
- @abandoned_cleanup_thread = Thread.new do
120
- # every hour (staggered by process)
121
- # check for dead jobs and cull them.
122
- # Will actually be more often based on the
123
- # number of worker nodes in the pool. This will actually
124
- # be a max of N times per hour where N is the number of workers,
125
- # but they won't overrun each other because the health check
126
- # takes an advisory lock internally
127
- sleep(rand(cleanup_interval_in_minutes * 60))
128
- loop do
129
- schedule_abandoned_job_cleanup
130
- sleep(cleanup_interval_in_minutes * 60)
119
+ def spawn_abandoned_job_cleanup
120
+ return if Settings.disable_abandoned_job_cleanup
121
+
122
+ cleanup_interval_in_minutes = 60
123
+ @abandoned_cleanup_thread = Thread.new do
124
+ # every hour (staggered by process)
125
+ # check for dead jobs and cull them.
126
+ # Will actually be more often based on the
127
+ # number of worker nodes in the pool. This will actually
128
+ # be a max of N times per hour where N is the number of workers,
129
+ # but they won't overrun each other because the health check
130
+ # takes an advisory lock internally
131
+ sleep(rand(cleanup_interval_in_minutes * 60))
132
+ loop do
133
+ schedule_abandoned_job_cleanup
134
+ sleep(cleanup_interval_in_minutes * 60)
135
+ end
131
136
  end
132
137
  end
133
- end
134
138
 
135
- def schedule_abandoned_job_cleanup
136
- pid = fork_with_reconnects do
137
- # we want to avoid db connections in the main pool process
138
- $0 = "delayed_abandoned_job_cleanup"
139
- Delayed::Worker::HealthCheck.reschedule_abandoned_jobs
139
+ def schedule_abandoned_job_cleanup
140
+ pid = fork_with_reconnects do
141
+ # we want to avoid db connections in the main pool process
142
+ $0 = "delayed_abandoned_job_cleanup"
143
+ Delayed::Worker::HealthCheck.reschedule_abandoned_jobs
144
+ end
145
+ workers[pid] = :abandoned_job_cleanup
140
146
  end
141
- workers[pid] = :abandoned_job_cleanup
142
- end
143
147
 
144
- def spawn_periodic_auditor
145
- return if Settings.disable_periodic_jobs
148
+ def spawn_periodic_auditor
149
+ return if Settings.disable_periodic_jobs
146
150
 
147
- @periodic_thread = Thread.new do
148
- # schedule the initial audit immediately on startup
149
- schedule_periodic_audit
150
- # initial sleep is randomized, for some staggering in the audit calls
151
- # since job processors are usually all restarted at the same time
152
- sleep(rand(15 * 60))
153
- loop do
151
+ @periodic_thread = Thread.new do
152
+ # schedule the initial audit immediately on startup
154
153
  schedule_periodic_audit
155
- sleep(15 * 60)
154
+ # initial sleep is randomized, for some staggering in the audit calls
155
+ # since job processors are usually all restarted at the same time
156
+ sleep(rand(15 * 60))
157
+ loop do
158
+ schedule_periodic_audit
159
+ sleep(15 * 60)
160
+ end
156
161
  end
157
162
  end
158
- end
159
163
 
160
- def schedule_periodic_audit
161
- pid = fork_with_reconnects do
162
- # we want to avoid db connections in the main pool process
163
- $0 = "delayed_periodic_audit_scheduler"
164
- Delayed::Periodic.audit_queue
164
+ def schedule_periodic_audit
165
+ pid = fork_with_reconnects do
166
+ # we want to avoid db connections in the main pool process
167
+ $0 = "delayed_periodic_audit_scheduler"
168
+ Delayed::Periodic.audit_queue
169
+ end
170
+ workers[pid] = :periodic_audit
165
171
  end
166
- workers[pid] = :periodic_audit
167
- end
168
172
 
169
- def join
170
- loop do
171
- maintain_children
172
- case sig = @signal_queue.shift
173
- when nil
174
- pool_sleep
175
- when :QUIT
176
- break
177
- when :TERM, :INT
178
- stop(graceful: false) if Settings.kill_workers_on_exit
179
- break
180
- else
181
- logger.warn("Unexpected signal received: #{sig}")
173
+ def join
174
+ loop do
175
+ maintain_children
176
+ case sig = @signal_queue.shift
177
+ when nil
178
+ pool_sleep
179
+ when :QUIT
180
+ break
181
+ when :TERM, :INT
182
+ stop(graceful: false) if Settings.kill_workers_on_exit
183
+ break
184
+ else
185
+ logger.warn("Unexpected signal received: #{sig}")
186
+ end
182
187
  end
183
188
  end
184
- end
185
189
 
186
- def pool_sleep
187
- IO.select([@self_pipe[0]], nil, nil, POOL_SLEEP_PERIOD)
188
- @self_pipe[0].read_nonblock(11, exception: false)
189
- end
190
+ def pool_sleep
191
+ IO.select([@self_pipe[0]], nil, nil, POOL_SLEEP_PERIOD)
192
+ @self_pipe[0].read_nonblock(11, exception: false)
193
+ end
190
194
 
191
- def stop(graceful: true, timeout: Settings.slow_exit_timeout)
192
- signal_for_children = graceful ? :QUIT : :TERM
193
- if Settings.kill_workers_on_exit
194
- limit = Time.now + timeout
195
- until @workers.empty? || Time.now >= limit
195
+ def stop(graceful: true, timeout: Settings.slow_exit_timeout)
196
+ signal_for_children = graceful ? :QUIT : :TERM
197
+ if Settings.kill_workers_on_exit
198
+ limit = Time.zone.now + timeout
199
+ until @workers.empty? || Time.zone.now >= limit
200
+ signal_all_children(signal_for_children)
201
+ # Give our children some time to process the signal before checking if
202
+ # they've exited
203
+ sleep(0.5)
204
+ reap_all_children.each { |pid| @workers.delete(pid) }
205
+ end
206
+
207
+ # We really want to give the workers every oportunity to clean up after
208
+ # themselves before murdering them.
209
+ stop(graceful: false, timeout: 2) if graceful
210
+ signal_all_children(:KILL)
211
+ else
196
212
  signal_all_children(signal_for_children)
197
- # Give our children some time to process the signal before checking if
198
- # they've exited
199
- sleep(0.5)
200
- reap_all_children.each { |pid| @workers.delete(pid) }
201
213
  end
214
+ end
202
215
 
203
- # We really want to give the workers every oportunity to clean up after
204
- # themselves before murdering them.
205
- stop(graceful: false, timeout: 2) if graceful
206
- signal_all_children(:KILL)
207
- else
208
- signal_all_children(signal_for_children)
216
+ def signal_all_children(signal)
217
+ workers.each_key { |pid| signal_child(signal, pid) }
209
218
  end
210
- end
211
219
 
212
- def signal_all_children(signal)
213
- workers.keys.each { |pid| signal_child(signal, pid) }
214
- end
220
+ def signal_child(signal, pid)
221
+ Process.kill(signal, pid)
222
+ rescue Erron::ESRCH
223
+ workers.delete(pid)
224
+ end
215
225
 
216
- def signal_child(signal, pid)
217
- Process.kill(signal, pid)
218
- rescue Erron::ESRCH
219
- workers.delete(pid)
220
- end
226
+ # Respawn all children that have exited since we last checked
227
+ def maintain_children
228
+ reap_all_children.each do |pid|
229
+ respawn_child(pid)
230
+ end
231
+ end
232
+
233
+ # Reaps processes that have exited or just returns if none have exited
234
+ #
235
+ # @return Array An array of child pids that have exited
236
+ def reap_all_children
237
+ exited_pids = []
238
+ loop do
239
+ pid = Process.wait(-1, Process::WNOHANG)
240
+ break unless pid
221
241
 
222
- # Respawn all children that have exited since we last checked
223
- def maintain_children
224
- reap_all_children.each do |pid|
225
- respawn_child(pid)
242
+ exited_pids << pid
243
+ rescue Errno::ECHILD
244
+ break
245
+ end
246
+ exited_pids
226
247
  end
227
- end
228
248
 
229
- # Reaps processes that have exited or just returns if none have exited
230
- #
231
- # @return Array An array of child pids that have exited
232
- def reap_all_children
233
- exited_pids = []
234
- begin
235
- pid = Process.wait(-1, Process::WNOHANG)
236
- break unless pid
237
- exited_pids << pid
238
- rescue Errno::ECHILD
239
- break
240
- end while true # I hate doing loops this way but it's the only way to make the rescue work
241
- exited_pids
242
- end
249
+ def respawn_child(child)
250
+ return unless workers.include?(child)
243
251
 
244
- def respawn_child(child)
245
- if workers.include?(child)
246
252
  worker = workers.delete(child)
247
253
  case worker
248
254
  when :periodic_audit
@@ -262,10 +268,9 @@ class Pool
262
268
  spawn_worker(worker.config)
263
269
  end
264
270
  end
265
- end
266
271
 
267
- def wake_up
268
- @self_pipe[1].write_nonblock('.', exception: false)
272
+ def wake_up
273
+ @self_pipe[1].write_nonblock(".", exception: false)
274
+ end
269
275
  end
270
276
  end
271
- end