inst-jobs 2.0.0 → 3.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (98) hide show
  1. checksums.yaml +4 -4
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
  3. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
  4. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
  5. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
  6. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
  7. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
  8. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
  9. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
  10. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
  11. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
  12. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
  13. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
  14. data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
  15. data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
  16. data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
  17. data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
  18. data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
  19. data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
  20. data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
  21. data/db/migrate/20210812210128_add_singleton_column.rb +200 -0
  22. data/db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb +27 -0
  23. data/db/migrate/20210928174754_fix_singleton_condition_in_before_insert.rb +56 -0
  24. data/db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb +27 -0
  25. data/exe/inst_jobs +3 -2
  26. data/lib/delayed/backend/active_record.rb +211 -168
  27. data/lib/delayed/backend/base.rb +110 -72
  28. data/lib/delayed/batch.rb +11 -9
  29. data/lib/delayed/cli.rb +98 -84
  30. data/lib/delayed/core_ext/kernel.rb +4 -2
  31. data/lib/delayed/daemon.rb +70 -74
  32. data/lib/delayed/job_tracking.rb +26 -25
  33. data/lib/delayed/lifecycle.rb +27 -23
  34. data/lib/delayed/log_tailer.rb +17 -17
  35. data/lib/delayed/logging.rb +13 -16
  36. data/lib/delayed/message_sending.rb +43 -52
  37. data/lib/delayed/performable_method.rb +6 -8
  38. data/lib/delayed/periodic.rb +72 -68
  39. data/lib/delayed/plugin.rb +2 -4
  40. data/lib/delayed/pool.rb +205 -168
  41. data/lib/delayed/server/helpers.rb +6 -6
  42. data/lib/delayed/server.rb +51 -54
  43. data/lib/delayed/settings.rb +94 -81
  44. data/lib/delayed/testing.rb +21 -22
  45. data/lib/delayed/version.rb +1 -1
  46. data/lib/delayed/work_queue/in_process.rb +21 -17
  47. data/lib/delayed/work_queue/parent_process/client.rb +55 -53
  48. data/lib/delayed/work_queue/parent_process/server.rb +245 -207
  49. data/lib/delayed/work_queue/parent_process.rb +52 -53
  50. data/lib/delayed/worker/consul_health_check.rb +32 -33
  51. data/lib/delayed/worker/health_check.rb +34 -26
  52. data/lib/delayed/worker/null_health_check.rb +3 -1
  53. data/lib/delayed/worker/process_helper.rb +8 -9
  54. data/lib/delayed/worker.rb +272 -241
  55. data/lib/delayed/yaml_extensions.rb +12 -10
  56. data/lib/delayed_job.rb +37 -37
  57. data/lib/inst-jobs.rb +1 -1
  58. data/spec/active_record_job_spec.rb +143 -139
  59. data/spec/delayed/cli_spec.rb +7 -7
  60. data/spec/delayed/daemon_spec.rb +10 -9
  61. data/spec/delayed/message_sending_spec.rb +16 -9
  62. data/spec/delayed/periodic_spec.rb +14 -21
  63. data/spec/delayed/server_spec.rb +38 -38
  64. data/spec/delayed/settings_spec.rb +26 -25
  65. data/spec/delayed/work_queue/in_process_spec.rb +7 -8
  66. data/spec/delayed/work_queue/parent_process/client_spec.rb +17 -12
  67. data/spec/delayed/work_queue/parent_process/server_spec.rb +117 -41
  68. data/spec/delayed/work_queue/parent_process_spec.rb +21 -23
  69. data/spec/delayed/worker/consul_health_check_spec.rb +37 -50
  70. data/spec/delayed/worker/health_check_spec.rb +60 -52
  71. data/spec/delayed/worker_spec.rb +44 -21
  72. data/spec/sample_jobs.rb +45 -15
  73. data/spec/shared/delayed_batch.rb +74 -67
  74. data/spec/shared/delayed_method.rb +143 -102
  75. data/spec/shared/performable_method.rb +39 -38
  76. data/spec/shared/shared_backend.rb +550 -437
  77. data/spec/shared/testing.rb +14 -14
  78. data/spec/shared/worker.rb +156 -148
  79. data/spec/shared_jobs_specs.rb +13 -13
  80. data/spec/spec_helper.rb +53 -55
  81. metadata +148 -82
  82. data/lib/delayed/backend/redis/bulk_update.lua +0 -50
  83. data/lib/delayed/backend/redis/destroy_job.lua +0 -2
  84. data/lib/delayed/backend/redis/enqueue.lua +0 -29
  85. data/lib/delayed/backend/redis/fail_job.lua +0 -5
  86. data/lib/delayed/backend/redis/find_available.lua +0 -3
  87. data/lib/delayed/backend/redis/functions.rb +0 -59
  88. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
  89. data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
  90. data/lib/delayed/backend/redis/job.rb +0 -535
  91. data/lib/delayed/backend/redis/set_running.lua +0 -5
  92. data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
  93. data/spec/gemfiles/42.gemfile +0 -7
  94. data/spec/gemfiles/50.gemfile +0 -7
  95. data/spec/gemfiles/51.gemfile +0 -7
  96. data/spec/gemfiles/52.gemfile +0 -7
  97. data/spec/gemfiles/60.gemfile +0 -7
  98. data/spec/redis_job_spec.rb +0 -148
@@ -1,88 +1,92 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'fugit'
3
+ require "fugit"
4
4
 
5
5
  module Delayed
6
- class Periodic
7
- attr_reader :name, :cron
6
+ class Periodic
7
+ attr_reader :name, :cron
8
8
 
9
- def encode_with(coder)
10
- coder.scalar("!ruby/Delayed::Periodic", @name)
11
- end
9
+ def encode_with(coder)
10
+ coder.scalar("!ruby/Delayed::Periodic", @name)
11
+ end
12
12
 
13
- cattr_accessor :scheduled, :overrides
14
- self.scheduled = {}
15
- self.overrides = {}
13
+ cattr_accessor :scheduled, :overrides
14
+ self.scheduled = {}
15
+ self.overrides = {}
16
16
 
17
- def self.add_overrides(overrides)
18
- overrides.each do |name, cron_line|
19
- # throws error if the line is malformed
20
- Fugit.do_parse_cron(cron_line)
17
+ def self.add_overrides(overrides)
18
+ overrides.each do |_name, cron_line|
19
+ # throws error if the line is malformed
20
+ Fugit.do_parse_cron(cron_line)
21
+ end
22
+ self.overrides.merge!(overrides)
21
23
  end
22
- self.overrides.merge!(overrides)
23
- end
24
24
 
25
- def self.cron(job_name, cron_line, job_args = {}, &block)
26
- raise ArgumentError, "job #{job_name} already scheduled!" if self.scheduled[job_name]
27
- cron_line = overrides[job_name] || cron_line
28
- self.scheduled[job_name] = self.new(job_name, cron_line, job_args, block)
29
- end
25
+ def self.cron(job_name, cron_line, job_args = {}, &block)
26
+ raise ArgumentError, "job #{job_name} already scheduled!" if scheduled[job_name]
30
27
 
31
- def self.audit_queue
32
- # we used to queue up a job in a strand here, and perform the audit inside that job
33
- # however, now that we're using singletons for scheduling periodic jobs,
34
- # it's fine to just do the audit in-line here without risk of creating duplicates
35
- perform_audit!
36
- end
28
+ cron_line = overrides[job_name] || cron_line
29
+ scheduled[job_name] = new(job_name, cron_line, job_args, block)
30
+ end
37
31
 
38
- # make sure all periodic jobs are scheduled for their next run in the job queue
39
- # this auditing should run on the strand
40
- def self.perform_audit!
41
- self.scheduled.each { |name, periodic| periodic.enqueue }
42
- end
32
+ def self.audit_queue
33
+ # we used to queue up a job in a strand here, and perform the audit inside that job
34
+ # however, now that we're using singletons for scheduling periodic jobs,
35
+ # it's fine to just do the audit in-line here without risk of creating duplicates
36
+ Delayed::Job.transaction do
37
+ # for db performance reasons, we only need one process doing this at a time
38
+ # so if we can't get an advisory lock, just abort. we'll try again soon
39
+ return unless Delayed::Job.attempt_advisory_lock("Delayed::Periodic#audit_queue")
43
40
 
44
- def initialize(name, cron_line, job_args, block)
45
- @name = name
46
- @cron = Fugit.do_parse_cron(cron_line)
47
- @job_args = { :priority => Delayed::LOW_PRIORITY }.merge(job_args.symbolize_keys)
48
- @block = block
49
- end
41
+ perform_audit!
42
+ end
43
+ end
50
44
 
51
- def enqueue
52
- Delayed::Job.enqueue(self, **enqueue_args)
53
- end
45
+ # make sure all periodic jobs are scheduled for their next run in the job queue
46
+ # this auditing should run on the strand
47
+ def self.perform_audit!
48
+ scheduled.each { |_name, periodic| periodic.enqueue }
49
+ end
54
50
 
55
- def enqueue_args
56
- inferred_args = {
57
- max_attempts: 1,
58
- run_at: @cron.next_time(Delayed::Periodic.now).utc.to_time,
59
- singleton: (@job_args[:singleton] == false ? nil : tag),
60
- # yes, checking for whether it is actually the boolean literal false,
61
- # which means the consuming code really does not want this job to be
62
- # a singleton at all.
63
- on_conflict: :patient
64
- }
65
- @job_args.merge(inferred_args)
66
- end
51
+ def initialize(name, cron_line, job_args, block)
52
+ @name = name
53
+ @cron = Fugit.do_parse_cron(cron_line)
54
+ @job_args = { priority: Delayed::LOW_PRIORITY }.merge(job_args.symbolize_keys)
55
+ @block = block
56
+ end
67
57
 
68
- def perform
69
- @block.call()
70
- ensure
71
- begin
72
- enqueue
73
- rescue
74
- # double fail! the auditor will have to catch this.
75
- Rails.logger.error "Failure enqueueing periodic job! #{@name} #{$!.inspect}"
58
+ def enqueue
59
+ Delayed::Job.enqueue(self, **enqueue_args)
76
60
  end
77
- end
78
61
 
79
- def tag
80
- "periodic: #{@name}"
81
- end
82
- alias_method :display_name, :tag
62
+ def enqueue_args
63
+ inferred_args = {
64
+ max_attempts: 1,
65
+ run_at: @cron.next_time(Delayed::Periodic.now).utc.to_time,
66
+ singleton: tag,
67
+ on_conflict: :patient
68
+ }
69
+ @job_args.merge(inferred_args)
70
+ end
83
71
 
84
- def self.now
85
- Time.zone.now
72
+ def perform
73
+ @block.call
74
+ ensure
75
+ begin
76
+ enqueue
77
+ rescue
78
+ # double fail! the auditor will have to catch this.
79
+ Rails.logger.error "Failure enqueueing periodic job! #{@name} #{$!.inspect}"
80
+ end
81
+ end
82
+
83
+ def tag
84
+ "periodic: #{@name}"
85
+ end
86
+ alias display_name tag
87
+
88
+ def self.now
89
+ Time.zone.now
90
+ end
86
91
  end
87
92
  end
88
- end
@@ -1,6 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'active_support/core_ext/class/attribute'
3
+ require "active_support/core_ext/class/attribute"
4
4
 
5
5
  module Delayed
6
6
  class Plugin
@@ -11,9 +11,7 @@ module Delayed
11
11
  end
12
12
 
13
13
  def self.inject!
14
- unless @injected
15
- self.callback_block.call(Delayed::Worker.lifecycle) if self.callback_block
16
- end
14
+ callback_block&.call(Delayed::Worker.lifecycle) unless @injected
17
15
  @injected = true
18
16
  end
19
17
 
data/lib/delayed/pool.rb CHANGED
@@ -1,222 +1,260 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Delayed
4
- class Pool
5
- include Delayed::Logging
4
+ class Pool
5
+ include Delayed::Logging
6
6
 
7
- mattr_accessor :on_fork
8
- self.on_fork = ->{ }
7
+ mattr_accessor :on_fork
8
+ self.on_fork = -> {}
9
9
 
10
- SIGNALS = %i{INT TERM QUIT}
11
- POOL_SLEEP_PERIOD = 5
10
+ SIGNALS = %i[INT TERM QUIT].freeze
11
+ POOL_SLEEP_PERIOD = 5
12
12
 
13
- attr_reader :workers
13
+ attr_reader :workers
14
14
 
15
- def initialize(*args)
16
- if args.first.is_a?(Hash)
17
- @config = args.first
18
- else
19
- warn "Calling Delayed::Pool.new directly is deprecated. Use `Delayed::CLI.new.run()` instead."
15
+ def initialize(*args)
16
+ if args.first.is_a?(Hash)
17
+ @config = args.first
18
+ else
19
+ warn "Calling Delayed::Pool.new directly is deprecated. Use `Delayed::CLI.new.run()` instead."
20
+ end
21
+ @workers = {}
22
+ @signal_queue = []
23
+ @self_pipe = IO.pipe
20
24
  end
21
- @workers = {}
22
- @signal_queue = []
23
- @self_pipe = IO.pipe
24
- end
25
25
 
26
- def run
27
- warn "Delayed::Pool#run is deprecated and will be removed. Use `Delayed::CLI.new.run()` instead."
28
- Delayed::CLI.new.run()
29
- end
26
+ def run
27
+ warn "Delayed::Pool#run is deprecated and will be removed. Use `Delayed::CLI.new.run()` instead."
28
+ Delayed::CLI.new.run
29
+ end
30
30
 
31
- def start
32
- say "Started job master", :info
33
- SIGNALS.each { |sig| trap(sig) { @signal_queue << sig; wake_up } }
34
- $0 = procname
35
- # fork to handle unlocking (to prevent polluting the parent with worker objects)
36
- unlock_pid = fork_with_reconnects do
37
- unlock_orphaned_jobs
38
- end
39
- Process.wait unlock_pid
40
-
41
- spawn_periodic_auditor
42
- spawn_all_workers
43
- say "Workers spawned"
44
- join
45
- say "Shutting down"
46
- stop
47
- reap_all_children
48
- rescue Exception => e
49
- say "Job master died with error: #{e.inspect}\n#{e.backtrace.join("\n")}", :fatal
50
- raise
51
- end
31
+ def start
32
+ say "Started job master", :info
33
+ SIGNALS.each do |sig|
34
+ trap(sig) do
35
+ @signal_queue << sig
36
+ wake_up
37
+ end
38
+ end
39
+ $0 = procname
40
+ # fork to handle unlocking (to prevent polluting the parent with worker objects)
41
+ unlock_pid = fork_with_reconnects do
42
+ unlock_orphaned_jobs
43
+ end
44
+ Process.wait unlock_pid
45
+
46
+ spawn_periodic_auditor
47
+ spawn_abandoned_job_cleanup
48
+ spawn_all_workers
49
+ say "Workers spawned"
50
+ join
51
+ say "Shutting down"
52
+ stop
53
+ reap_all_children
54
+ rescue Exception => e # rubocop:disable Lint/RescueException
55
+ say "Job master died with error: #{e.inspect}\n#{e.backtrace.join("\n")}", :fatal
56
+ raise
57
+ end
52
58
 
53
- protected
59
+ protected
54
60
 
55
- def procname
56
- "delayed_jobs_pool#{Settings.pool_procname_suffix}"
57
- end
61
+ def procname
62
+ "delayed_jobs_pool#{Settings.pool_procname_suffix}"
63
+ end
58
64
 
59
- def unlock_orphaned_jobs(worker = nil, pid = nil)
60
- return if Settings.disable_automatic_orphan_unlocking
65
+ def unlock_orphaned_jobs(_worker = nil, pid = nil)
66
+ return if Settings.disable_automatic_orphan_unlocking
61
67
 
62
- unlocked_jobs = Delayed::Job.unlock_orphaned_jobs(pid)
63
- say "Unlocked #{unlocked_jobs} orphaned jobs" if unlocked_jobs > 0
64
- ActiveRecord::Base.connection_handler.clear_all_connections! unless Rails.env.test?
65
- end
68
+ unlocked_jobs = Delayed::Job.unlock_orphaned_jobs(pid)
69
+ say "Unlocked #{unlocked_jobs} orphaned jobs" if unlocked_jobs.positive?
70
+ ActiveRecord::Base.connection_handler.clear_all_connections! unless Rails.env.test?
71
+ end
66
72
 
67
- def spawn_all_workers
68
- ActiveRecord::Base.connection_handler.clear_all_connections!
73
+ def spawn_all_workers
74
+ ActiveRecord::Base.connection_handler.clear_all_connections!
69
75
 
70
- if @config[:work_queue] == 'parent_process'
71
- @work_queue = WorkQueue::ParentProcess.new
72
- spawn_work_queue
73
- end
76
+ if @config[:work_queue] == "parent_process"
77
+ @work_queue = WorkQueue::ParentProcess.new
78
+ spawn_work_queue
79
+ end
74
80
 
75
- @config[:workers].each do |worker_config|
76
- (worker_config[:workers] || 1).times { spawn_worker(worker_config) }
81
+ @config[:workers].each do |worker_config|
82
+ (worker_config[:workers] || 1).times { spawn_worker(worker_config) }
83
+ end
77
84
  end
78
- end
79
85
 
80
- def spawn_work_queue
81
- parent_pid = Process.pid
82
- pid = fork_with_reconnects do
83
- $0 = "delayed_jobs_work_queue#{Settings.pool_procname_suffix}"
84
- @work_queue.server(parent_pid: parent_pid).run
86
+ def spawn_work_queue
87
+ parent_pid = Process.pid
88
+ pid = fork_with_reconnects do
89
+ $0 = "delayed_jobs_work_queue#{Settings.pool_procname_suffix}"
90
+ @work_queue.server(parent_pid: parent_pid).run
91
+ end
92
+ workers[pid] = :work_queue
85
93
  end
86
- workers[pid] = :work_queue
87
- end
88
94
 
89
- def spawn_worker(worker_config)
90
- if worker_config[:periodic]
91
- return # backwards compat
92
- else
95
+ def spawn_worker(worker_config)
96
+ return if worker_config[:periodic] # backwards compat
97
+
93
98
  worker_config[:parent_pid] = Process.pid
94
99
  worker_config[:work_queue] = @work_queue.client if @work_queue
95
100
  worker = Delayed::Worker.new(worker_config)
101
+
102
+ pid = fork_with_reconnects do
103
+ worker.start
104
+ end
105
+ workers[pid] = worker
96
106
  end
97
107
 
98
- pid = fork_with_reconnects do
99
- worker.start
108
+ # child processes need to reconnect so they don't accidentally share redis or
109
+ # db connections with the parent
110
+ def fork_with_reconnects
111
+ fork do
112
+ @self_pipe.each(&:close) # sub-processes don't need to wake us up; keep their FDs clean
113
+ Pool.on_fork.call
114
+ Delayed::Job.reconnect!
115
+ yield
116
+ end
100
117
  end
101
- workers[pid] = worker
102
- end
103
118
 
104
- # child processes need to reconnect so they don't accidentally share redis or
105
- # db connections with the parent
106
- def fork_with_reconnects
107
- fork do
108
- Pool.on_fork.()
109
- Delayed::Job.reconnect!
110
- yield
119
+ def spawn_abandoned_job_cleanup
120
+ return if Settings.disable_abandoned_job_cleanup
121
+
122
+ cleanup_interval_in_minutes = 60
123
+ @abandoned_cleanup_thread = Thread.new do
124
+ # every hour (staggered by process)
125
+ # check for dead jobs and cull them.
126
+ # Will actually be more often based on the
127
+ # number of worker nodes in the pool. This will actually
128
+ # be a max of N times per hour where N is the number of workers,
129
+ # but they won't overrun each other because the health check
130
+ # takes an advisory lock internally
131
+ sleep(rand(cleanup_interval_in_minutes * 60))
132
+ loop do
133
+ schedule_abandoned_job_cleanup
134
+ sleep(cleanup_interval_in_minutes * 60)
135
+ end
136
+ end
111
137
  end
112
- end
113
138
 
114
- def spawn_periodic_auditor
115
- return if Settings.disable_periodic_jobs
139
+ def schedule_abandoned_job_cleanup
140
+ pid = fork_with_reconnects do
141
+ # we want to avoid db connections in the main pool process
142
+ $0 = "delayed_abandoned_job_cleanup"
143
+ Delayed::Worker::HealthCheck.reschedule_abandoned_jobs
144
+ end
145
+ workers[pid] = :abandoned_job_cleanup
146
+ end
116
147
 
117
- @periodic_thread = Thread.new do
118
- # schedule the initial audit immediately on startup
119
- schedule_periodic_audit
120
- # initial sleep is randomized, for some staggering in the audit calls
121
- # since job processors are usually all restarted at the same time
122
- sleep(rand(15 * 60))
123
- loop do
148
+ def spawn_periodic_auditor
149
+ return if Settings.disable_periodic_jobs
150
+
151
+ @periodic_thread = Thread.new do
152
+ # schedule the initial audit immediately on startup
124
153
  schedule_periodic_audit
125
- sleep(15 * 60)
154
+ # initial sleep is randomized, for some staggering in the audit calls
155
+ # since job processors are usually all restarted at the same time
156
+ sleep(rand(15 * 60))
157
+ loop do
158
+ schedule_periodic_audit
159
+ sleep(15 * 60)
160
+ end
126
161
  end
127
162
  end
128
- end
129
163
 
130
- def schedule_periodic_audit
131
- pid = fork_with_reconnects do
132
- # we want to avoid db connections in the main pool process
133
- $0 = "delayed_periodic_audit_scheduler"
134
- Delayed::Periodic.audit_queue
164
+ def schedule_periodic_audit
165
+ pid = fork_with_reconnects do
166
+ # we want to avoid db connections in the main pool process
167
+ $0 = "delayed_periodic_audit_scheduler"
168
+ Delayed::Periodic.audit_queue
169
+ end
170
+ workers[pid] = :periodic_audit
135
171
  end
136
- workers[pid] = :periodic_audit
137
- end
138
172
 
139
- def join
140
- loop do
141
- maintain_children
142
- case sig = @signal_queue.shift
143
- when nil
144
- pool_sleep
145
- when :QUIT
146
- break
147
- when :TERM, :INT
148
- stop(graceful: false) if Settings.kill_workers_on_exit
149
- break
150
- else
151
- logger.warn("Unexpected signal received: #{sig}")
173
+ def join
174
+ loop do
175
+ maintain_children
176
+ case sig = @signal_queue.shift
177
+ when nil
178
+ pool_sleep
179
+ when :QUIT
180
+ break
181
+ when :TERM, :INT
182
+ stop(graceful: false) if Settings.kill_workers_on_exit
183
+ break
184
+ else
185
+ logger.warn("Unexpected signal received: #{sig}")
186
+ end
152
187
  end
153
188
  end
154
- end
155
189
 
156
- def pool_sleep
157
- IO.select([@self_pipe[0]], nil, nil, POOL_SLEEP_PERIOD)
158
- @self_pipe[0].read_nonblock(11, exception: false)
159
- end
190
+ def pool_sleep
191
+ @self_pipe[0].wait_readable(POOL_SLEEP_PERIOD)
192
+ @self_pipe[0].read_nonblock(11, exception: false)
193
+ end
194
+
195
+ def stop(graceful: true, timeout: Settings.slow_exit_timeout)
196
+ signal_for_children = graceful ? :QUIT : :TERM
197
+ if Settings.kill_workers_on_exit
198
+ limit = Time.zone.now + timeout
199
+ until @workers.empty? || Time.zone.now >= limit
200
+ signal_all_children(signal_for_children)
201
+ # Give our children some time to process the signal before checking if
202
+ # they've exited
203
+ sleep(0.5)
204
+ reap_all_children.each { |pid| @workers.delete(pid) }
205
+ end
160
206
 
161
- def stop(graceful: true, timeout: Settings.slow_exit_timeout)
162
- signal_for_children = graceful ? :QUIT : :TERM
163
- if Settings.kill_workers_on_exit
164
- limit = Time.now + timeout
165
- until @workers.empty? || Time.now >= limit
207
+ # We really want to give the workers every oportunity to clean up after
208
+ # themselves before murdering them.
209
+ stop(graceful: false, timeout: 2) if graceful
210
+ signal_all_children(:KILL)
211
+ else
166
212
  signal_all_children(signal_for_children)
167
- # Give our children some time to process the signal before checking if
168
- # they've exited
169
- sleep(0.5)
170
- reap_all_children.each { |pid| @workers.delete(pid) }
171
213
  end
214
+ end
172
215
 
173
- # We really want to give the workers every oportunity to clean up after
174
- # themselves before murdering them.
175
- stop(graceful: false, timeout: 2) if graceful
176
- signal_all_children(:KILL)
177
- else
178
- signal_all_children(signal_for_children)
216
+ def signal_all_children(signal)
217
+ workers.each_key { |pid| signal_child(signal, pid) }
179
218
  end
180
- end
181
219
 
182
- def signal_all_children(signal)
183
- workers.keys.each { |pid| signal_child(signal, pid) }
184
- end
220
+ def signal_child(signal, pid)
221
+ Process.kill(signal, pid)
222
+ rescue Erron::ESRCH
223
+ workers.delete(pid)
224
+ end
185
225
 
186
- def signal_child(signal, pid)
187
- Process.kill(signal, pid)
188
- rescue Erron::ESRCH
189
- workers.delete(pid)
190
- end
226
+ # Respawn all children that have exited since we last checked
227
+ def maintain_children
228
+ reap_all_children.each do |pid|
229
+ respawn_child(pid)
230
+ end
231
+ end
232
+
233
+ # Reaps processes that have exited or just returns if none have exited
234
+ #
235
+ # @return Array An array of child pids that have exited
236
+ def reap_all_children
237
+ exited_pids = []
238
+ loop do
239
+ pid = Process.wait(-1, Process::WNOHANG)
240
+ break unless pid
191
241
 
192
- # Respawn all children that have exited since we last checked
193
- def maintain_children
194
- reap_all_children.each do |pid|
195
- respawn_child(pid)
242
+ exited_pids << pid
243
+ rescue Errno::ECHILD
244
+ break
245
+ end
246
+ exited_pids
196
247
  end
197
- end
198
248
 
199
- # Reaps processes that have exited or just returns if none have exited
200
- #
201
- # @return Array An array of child pids that have exited
202
- def reap_all_children
203
- exited_pids = []
204
- begin
205
- pid = Process.wait(-1, Process::WNOHANG)
206
- break unless pid
207
- exited_pids << pid
208
- rescue Errno::ECHILD
209
- break
210
- end while true # I hate doing loops this way but it's the only way to make the rescue work
211
- exited_pids
212
- end
249
+ def respawn_child(child)
250
+ return unless workers.include?(child)
213
251
 
214
- def respawn_child(child)
215
- if workers.include?(child)
216
252
  worker = workers.delete(child)
217
253
  case worker
218
254
  when :periodic_audit
219
255
  say "ran auditor: #{worker}"
256
+ when :abandoned_job_cleanup
257
+ say "ran cleanup: #{worker}"
220
258
  when :work_queue
221
259
  say "work queue exited, restarting", :info
222
260
  spawn_work_queue
@@ -230,10 +268,9 @@ class Pool
230
268
  spawn_worker(worker.config)
231
269
  end
232
270
  end
233
- end
234
271
 
235
- def wake_up
236
- @self_pipe[1].write_nonblock('.', exception: false)
272
+ def wake_up
273
+ @self_pipe[1].write_nonblock(".", exception: false)
274
+ end
237
275
  end
238
276
  end
239
- end
@@ -8,21 +8,21 @@ module Delayed
8
8
  end
9
9
 
10
10
  def url_path(*path_parts)
11
- [path_prefix, path_parts].join('/').squeeze('/')
11
+ [path_prefix, path_parts].join("/").squeeze("/")
12
12
  end
13
13
 
14
14
  def path_prefix
15
- request.env['SCRIPT_NAME']
15
+ request.env["SCRIPT_NAME"]
16
16
  end
17
17
 
18
18
  def render_javascript_env
19
19
  {
20
20
  Routes: {
21
21
  root: path_prefix,
22
- running: url_path('running'),
23
- tags: url_path('tags'),
24
- jobs: url_path('jobs'),
25
- bulkUpdate: url_path('bulk_update'),
22
+ running: url_path("running"),
23
+ tags: url_path("tags"),
24
+ jobs: url_path("jobs"),
25
+ bulkUpdate: url_path("bulk_update")
26
26
  }
27
27
  }.to_json
28
28
  end