inst-jobs 2.3.1 → 2.4.8

Sign up to get free protection for your applications and to get access to all the features.
Files changed (94) hide show
  1. checksums.yaml +4 -4
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
  3. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
  4. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
  5. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
  6. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
  7. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
  8. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
  9. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
  10. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
  11. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
  12. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
  13. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
  14. data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
  15. data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
  16. data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
  17. data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
  18. data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
  19. data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
  20. data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
  21. data/db/migrate/20210812210128_add_singleton_column.rb +200 -0
  22. data/db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb +27 -0
  23. data/exe/inst_jobs +3 -2
  24. data/lib/delayed/backend/active_record.rb +204 -150
  25. data/lib/delayed/backend/base.rb +107 -77
  26. data/lib/delayed/batch.rb +11 -9
  27. data/lib/delayed/cli.rb +98 -84
  28. data/lib/delayed/core_ext/kernel.rb +4 -2
  29. data/lib/delayed/daemon.rb +70 -74
  30. data/lib/delayed/job_tracking.rb +26 -25
  31. data/lib/delayed/lifecycle.rb +27 -24
  32. data/lib/delayed/log_tailer.rb +17 -17
  33. data/lib/delayed/logging.rb +13 -16
  34. data/lib/delayed/message_sending.rb +43 -52
  35. data/lib/delayed/performable_method.rb +6 -8
  36. data/lib/delayed/periodic.rb +72 -65
  37. data/lib/delayed/plugin.rb +2 -4
  38. data/lib/delayed/pool.rb +198 -192
  39. data/lib/delayed/server/helpers.rb +6 -6
  40. data/lib/delayed/server.rb +51 -54
  41. data/lib/delayed/settings.rb +93 -81
  42. data/lib/delayed/testing.rb +21 -22
  43. data/lib/delayed/version.rb +1 -1
  44. data/lib/delayed/work_queue/in_process.rb +21 -17
  45. data/lib/delayed/work_queue/parent_process/client.rb +55 -53
  46. data/lib/delayed/work_queue/parent_process/server.rb +219 -208
  47. data/lib/delayed/work_queue/parent_process.rb +52 -53
  48. data/lib/delayed/worker/consul_health_check.rb +21 -19
  49. data/lib/delayed/worker/health_check.rb +29 -22
  50. data/lib/delayed/worker/null_health_check.rb +3 -1
  51. data/lib/delayed/worker/process_helper.rb +8 -9
  52. data/lib/delayed/worker.rb +271 -261
  53. data/lib/delayed/yaml_extensions.rb +12 -10
  54. data/lib/delayed_job.rb +37 -38
  55. data/lib/inst-jobs.rb +1 -1
  56. data/spec/active_record_job_spec.rb +129 -136
  57. data/spec/delayed/cli_spec.rb +7 -7
  58. data/spec/delayed/daemon_spec.rb +8 -8
  59. data/spec/delayed/message_sending_spec.rb +16 -9
  60. data/spec/delayed/periodic_spec.rb +13 -12
  61. data/spec/delayed/server_spec.rb +38 -38
  62. data/spec/delayed/settings_spec.rb +26 -25
  63. data/spec/delayed/work_queue/in_process_spec.rb +7 -7
  64. data/spec/delayed/work_queue/parent_process/client_spec.rb +16 -12
  65. data/spec/delayed/work_queue/parent_process/server_spec.rb +43 -40
  66. data/spec/delayed/work_queue/parent_process_spec.rb +21 -21
  67. data/spec/delayed/worker/consul_health_check_spec.rb +22 -22
  68. data/spec/delayed/worker/health_check_spec.rb +60 -52
  69. data/spec/delayed/worker_spec.rb +28 -25
  70. data/spec/sample_jobs.rb +45 -15
  71. data/spec/shared/delayed_batch.rb +74 -67
  72. data/spec/shared/delayed_method.rb +143 -102
  73. data/spec/shared/performable_method.rb +39 -38
  74. data/spec/shared/shared_backend.rb +550 -437
  75. data/spec/shared/testing.rb +14 -14
  76. data/spec/shared/worker.rb +155 -147
  77. data/spec/shared_jobs_specs.rb +13 -13
  78. data/spec/spec_helper.rb +46 -41
  79. metadata +79 -53
  80. data/lib/delayed/backend/redis/bulk_update.lua +0 -50
  81. data/lib/delayed/backend/redis/destroy_job.lua +0 -2
  82. data/lib/delayed/backend/redis/enqueue.lua +0 -29
  83. data/lib/delayed/backend/redis/fail_job.lua +0 -5
  84. data/lib/delayed/backend/redis/find_available.lua +0 -3
  85. data/lib/delayed/backend/redis/functions.rb +0 -59
  86. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
  87. data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
  88. data/lib/delayed/backend/redis/job.rb +0 -528
  89. data/lib/delayed/backend/redis/set_running.lua +0 -5
  90. data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
  91. data/spec/gemfiles/52.gemfile +0 -7
  92. data/spec/gemfiles/60.gemfile +0 -7
  93. data/spec/gemfiles/61.gemfile +0 -7
  94. data/spec/redis_job_spec.rb +0 -148
@@ -1,85 +1,92 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'fugit'
3
+ require "fugit"
4
4
 
5
5
  module Delayed
6
- class Periodic
7
- attr_reader :name, :cron
6
+ class Periodic
7
+ attr_reader :name, :cron
8
8
 
9
- def encode_with(coder)
10
- coder.scalar("!ruby/Delayed::Periodic", @name)
11
- end
9
+ def encode_with(coder)
10
+ coder.scalar("!ruby/Delayed::Periodic", @name)
11
+ end
12
12
 
13
- cattr_accessor :scheduled, :overrides
14
- self.scheduled = {}
15
- self.overrides = {}
13
+ cattr_accessor :scheduled, :overrides
14
+ self.scheduled = {}
15
+ self.overrides = {}
16
16
 
17
- def self.add_overrides(overrides)
18
- overrides.each do |name, cron_line|
19
- # throws error if the line is malformed
20
- Fugit.do_parse_cron(cron_line)
17
+ def self.add_overrides(overrides)
18
+ overrides.each do |_name, cron_line|
19
+ # throws error if the line is malformed
20
+ Fugit.do_parse_cron(cron_line)
21
+ end
22
+ self.overrides.merge!(overrides)
21
23
  end
22
- self.overrides.merge!(overrides)
23
- end
24
24
 
25
- def self.cron(job_name, cron_line, job_args = {}, &block)
26
- raise ArgumentError, "job #{job_name} already scheduled!" if self.scheduled[job_name]
27
- cron_line = overrides[job_name] || cron_line
28
- self.scheduled[job_name] = self.new(job_name, cron_line, job_args, block)
29
- end
25
+ def self.cron(job_name, cron_line, job_args = {}, &block)
26
+ raise ArgumentError, "job #{job_name} already scheduled!" if scheduled[job_name]
30
27
 
31
- def self.audit_queue
32
- # we used to queue up a job in a strand here, and perform the audit inside that job
33
- # however, now that we're using singletons for scheduling periodic jobs,
34
- # it's fine to just do the audit in-line here without risk of creating duplicates
35
- perform_audit!
36
- end
28
+ cron_line = overrides[job_name] || cron_line
29
+ scheduled[job_name] = new(job_name, cron_line, job_args, block)
30
+ end
37
31
 
38
- # make sure all periodic jobs are scheduled for their next run in the job queue
39
- # this auditing should run on the strand
40
- def self.perform_audit!
41
- self.scheduled.each { |name, periodic| periodic.enqueue }
42
- end
32
+ def self.audit_queue
33
+ # we used to queue up a job in a strand here, and perform the audit inside that job
34
+ # however, now that we're using singletons for scheduling periodic jobs,
35
+ # it's fine to just do the audit in-line here without risk of creating duplicates
36
+ Delayed::Job.transaction do
37
+ # for db performance reasons, we only need one process doing this at a time
38
+ # so if we can't get an advisory lock, just abort. we'll try again soon
39
+ return unless Delayed::Job.attempt_advisory_lock("Delayed::Periodic#audit_queue")
43
40
 
44
- def initialize(name, cron_line, job_args, block)
45
- @name = name
46
- @cron = Fugit.do_parse_cron(cron_line)
47
- @job_args = { :priority => Delayed::LOW_PRIORITY }.merge(job_args.symbolize_keys)
48
- @block = block
49
- end
41
+ perform_audit!
42
+ end
43
+ end
50
44
 
51
- def enqueue
52
- Delayed::Job.enqueue(self, **enqueue_args)
53
- end
45
+ # make sure all periodic jobs are scheduled for their next run in the job queue
46
+ # this auditing should run on the strand
47
+ def self.perform_audit!
48
+ scheduled.each { |_name, periodic| periodic.enqueue }
49
+ end
54
50
 
55
- def enqueue_args
56
- inferred_args = {
57
- max_attempts: 1,
58
- run_at: @cron.next_time(Delayed::Periodic.now).utc.to_time,
59
- singleton: tag,
60
- on_conflict: :patient
61
- }
62
- @job_args.merge(inferred_args)
63
- end
51
+ def initialize(name, cron_line, job_args, block)
52
+ @name = name
53
+ @cron = Fugit.do_parse_cron(cron_line)
54
+ @job_args = { priority: Delayed::LOW_PRIORITY }.merge(job_args.symbolize_keys)
55
+ @block = block
56
+ end
64
57
 
65
- def perform
66
- @block.call()
67
- ensure
68
- begin
69
- enqueue
70
- rescue
71
- # double fail! the auditor will have to catch this.
72
- Rails.logger.error "Failure enqueueing periodic job! #{@name} #{$!.inspect}"
58
+ def enqueue
59
+ Delayed::Job.enqueue(self, **enqueue_args)
73
60
  end
74
- end
75
61
 
76
- def tag
77
- "periodic: #{@name}"
78
- end
79
- alias_method :display_name, :tag
62
+ def enqueue_args
63
+ inferred_args = {
64
+ max_attempts: 1,
65
+ run_at: @cron.next_time(Delayed::Periodic.now).utc.to_time,
66
+ singleton: tag,
67
+ on_conflict: :patient
68
+ }
69
+ @job_args.merge(inferred_args)
70
+ end
80
71
 
81
- def self.now
82
- Time.zone.now
72
+ def perform
73
+ @block.call
74
+ ensure
75
+ begin
76
+ enqueue
77
+ rescue
78
+ # double fail! the auditor will have to catch this.
79
+ Rails.logger.error "Failure enqueueing periodic job! #{@name} #{$!.inspect}"
80
+ end
81
+ end
82
+
83
+ def tag
84
+ "periodic: #{@name}"
85
+ end
86
+ alias display_name tag
87
+
88
+ def self.now
89
+ Time.zone.now
90
+ end
83
91
  end
84
92
  end
85
- end
@@ -1,6 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'active_support/core_ext/class/attribute'
3
+ require "active_support/core_ext/class/attribute"
4
4
 
5
5
  module Delayed
6
6
  class Plugin
@@ -11,9 +11,7 @@ module Delayed
11
11
  end
12
12
 
13
13
  def self.inject!
14
- unless @injected
15
- self.callback_block.call(Delayed::Worker.lifecycle) if self.callback_block
16
- end
14
+ callback_block&.call(Delayed::Worker.lifecycle) unless @injected
17
15
  @injected = true
18
16
  end
19
17
 
data/lib/delayed/pool.rb CHANGED
@@ -1,247 +1,254 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Delayed
4
- class Pool
5
- include Delayed::Logging
4
+ class Pool
5
+ include Delayed::Logging
6
6
 
7
- mattr_accessor :on_fork
8
- self.on_fork = ->{ }
7
+ mattr_accessor :on_fork
8
+ self.on_fork = -> {}
9
9
 
10
- SIGNALS = %i{INT TERM QUIT}
11
- POOL_SLEEP_PERIOD = 5
10
+ SIGNALS = %i[INT TERM QUIT].freeze
11
+ POOL_SLEEP_PERIOD = 5
12
12
 
13
- attr_reader :workers
13
+ attr_reader :workers
14
14
 
15
- def initialize(*args)
16
- if args.first.is_a?(Hash)
17
- @config = args.first
18
- else
19
- warn "Calling Delayed::Pool.new directly is deprecated. Use `Delayed::CLI.new.run()` instead."
15
+ def initialize(*args)
16
+ if args.first.is_a?(Hash)
17
+ @config = args.first
18
+ else
19
+ warn "Calling Delayed::Pool.new directly is deprecated. Use `Delayed::CLI.new.run()` instead."
20
+ end
21
+ @workers = {}
22
+ @signal_queue = []
23
+ @self_pipe = IO.pipe
20
24
  end
21
- @workers = {}
22
- @signal_queue = []
23
- @self_pipe = IO.pipe
24
- end
25
25
 
26
- def run
27
- warn "Delayed::Pool#run is deprecated and will be removed. Use `Delayed::CLI.new.run()` instead."
28
- Delayed::CLI.new.run()
29
- end
26
+ def run
27
+ warn "Delayed::Pool#run is deprecated and will be removed. Use `Delayed::CLI.new.run()` instead."
28
+ Delayed::CLI.new.run
29
+ end
30
30
 
31
- def start
32
- say "Started job master", :info
33
- SIGNALS.each { |sig| trap(sig) { @signal_queue << sig; wake_up } }
34
- $0 = procname
35
- # fork to handle unlocking (to prevent polluting the parent with worker objects)
36
- unlock_pid = fork_with_reconnects do
37
- unlock_orphaned_jobs
31
+ def start
32
+ say "Started job master", :info
33
+ SIGNALS.each do |sig|
34
+ trap(sig) do
35
+ @signal_queue << sig
36
+ wake_up
37
+ end
38
+ end
39
+ $0 = procname
40
+ # fork to handle unlocking (to prevent polluting the parent with worker objects)
41
+ unlock_pid = fork_with_reconnects do
42
+ unlock_orphaned_jobs
43
+ end
44
+ Process.wait unlock_pid
45
+
46
+ spawn_periodic_auditor
47
+ spawn_abandoned_job_cleanup
48
+ spawn_all_workers
49
+ say "Workers spawned"
50
+ join
51
+ say "Shutting down"
52
+ stop
53
+ reap_all_children
54
+ rescue Exception => e # rubocop:disable Lint/RescueException
55
+ say "Job master died with error: #{e.inspect}\n#{e.backtrace.join("\n")}", :fatal
56
+ raise
38
57
  end
39
- Process.wait unlock_pid
40
-
41
- spawn_periodic_auditor
42
- spawn_abandoned_job_cleanup
43
- spawn_all_workers
44
- say "Workers spawned"
45
- join
46
- say "Shutting down"
47
- stop
48
- reap_all_children
49
- rescue Exception => e
50
- say "Job master died with error: #{e.inspect}\n#{e.backtrace.join("\n")}", :fatal
51
- raise
52
- end
53
58
 
54
- protected
59
+ protected
55
60
 
56
- def procname
57
- "delayed_jobs_pool#{Settings.pool_procname_suffix}"
58
- end
61
+ def procname
62
+ "delayed_jobs_pool#{Settings.pool_procname_suffix}"
63
+ end
59
64
 
60
- def unlock_orphaned_jobs(worker = nil, pid = nil)
61
- return if Settings.disable_automatic_orphan_unlocking
65
+ def unlock_orphaned_jobs(_worker = nil, pid = nil)
66
+ return if Settings.disable_automatic_orphan_unlocking
62
67
 
63
- unlocked_jobs = Delayed::Job.unlock_orphaned_jobs(pid)
64
- say "Unlocked #{unlocked_jobs} orphaned jobs" if unlocked_jobs > 0
65
- ActiveRecord::Base.connection_handler.clear_all_connections! unless Rails.env.test?
66
- end
68
+ unlocked_jobs = Delayed::Job.unlock_orphaned_jobs(pid)
69
+ say "Unlocked #{unlocked_jobs} orphaned jobs" if unlocked_jobs.positive?
70
+ ActiveRecord::Base.connection_handler.clear_all_connections! unless Rails.env.test?
71
+ end
67
72
 
68
- def spawn_all_workers
69
- ActiveRecord::Base.connection_handler.clear_all_connections!
73
+ def spawn_all_workers
74
+ ActiveRecord::Base.connection_handler.clear_all_connections!
70
75
 
71
- if @config[:work_queue] == 'parent_process'
72
- @work_queue = WorkQueue::ParentProcess.new
73
- spawn_work_queue
74
- end
76
+ if @config[:work_queue] == "parent_process"
77
+ @work_queue = WorkQueue::ParentProcess.new
78
+ spawn_work_queue
79
+ end
75
80
 
76
- @config[:workers].each do |worker_config|
77
- (worker_config[:workers] || 1).times { spawn_worker(worker_config) }
81
+ @config[:workers].each do |worker_config|
82
+ (worker_config[:workers] || 1).times { spawn_worker(worker_config) }
83
+ end
78
84
  end
79
- end
80
85
 
81
- def spawn_work_queue
82
- parent_pid = Process.pid
83
- pid = fork_with_reconnects do
84
- $0 = "delayed_jobs_work_queue#{Settings.pool_procname_suffix}"
85
- @work_queue.server(parent_pid: parent_pid).run
86
+ def spawn_work_queue
87
+ parent_pid = Process.pid
88
+ pid = fork_with_reconnects do
89
+ $0 = "delayed_jobs_work_queue#{Settings.pool_procname_suffix}"
90
+ @work_queue.server(parent_pid: parent_pid).run
91
+ end
92
+ workers[pid] = :work_queue
86
93
  end
87
- workers[pid] = :work_queue
88
- end
89
94
 
90
- def spawn_worker(worker_config)
91
- if worker_config[:periodic]
92
- return # backwards compat
93
- else
95
+ def spawn_worker(worker_config)
96
+ return if worker_config[:periodic] # backwards compat
97
+
94
98
  worker_config[:parent_pid] = Process.pid
95
99
  worker_config[:work_queue] = @work_queue.client if @work_queue
96
100
  worker = Delayed::Worker.new(worker_config)
97
- end
98
101
 
99
- pid = fork_with_reconnects do
100
- worker.start
102
+ pid = fork_with_reconnects do
103
+ worker.start
104
+ end
105
+ workers[pid] = worker
101
106
  end
102
- workers[pid] = worker
103
- end
104
107
 
105
- # child processes need to reconnect so they don't accidentally share redis or
106
- # db connections with the parent
107
- def fork_with_reconnects
108
- fork do
109
- Pool.on_fork.()
110
- Delayed::Job.reconnect!
111
- yield
108
+ # child processes need to reconnect so they don't accidentally share redis or
109
+ # db connections with the parent
110
+ def fork_with_reconnects
111
+ fork do
112
+ @self_pipe.each(&:close) # sub-processes don't need to wake us up; keep their FDs clean
113
+ Pool.on_fork.call
114
+ Delayed::Job.reconnect!
115
+ yield
116
+ end
112
117
  end
113
- end
114
118
 
115
- def spawn_abandoned_job_cleanup
116
- return if Settings.disable_abandoned_job_cleanup
117
- cleanup_interval_in_minutes = 60
118
- @abandoned_cleanup_thread = Thread.new do
119
- # every hour (staggered by process)
120
- # check for dead jobs and cull them.
121
- # Will actually be more often based on the
122
- # number of worker nodes in the pool. This will actually
123
- # be a max of N times per hour where N is the number of workers,
124
- # but they won't overrun each other because the health check
125
- # takes an advisory lock internally
126
- sleep(rand(cleanup_interval_in_minutes * 60))
127
- loop do
128
- schedule_abandoned_job_cleanup
129
- sleep(cleanup_interval_in_minutes * 60)
119
+ def spawn_abandoned_job_cleanup
120
+ return if Settings.disable_abandoned_job_cleanup
121
+
122
+ cleanup_interval_in_minutes = 60
123
+ @abandoned_cleanup_thread = Thread.new do
124
+ # every hour (staggered by process)
125
+ # check for dead jobs and cull them.
126
+ # Will actually be more often based on the
127
+ # number of worker nodes in the pool. This will actually
128
+ # be a max of N times per hour where N is the number of workers,
129
+ # but they won't overrun each other because the health check
130
+ # takes an advisory lock internally
131
+ sleep(rand(cleanup_interval_in_minutes * 60))
132
+ loop do
133
+ schedule_abandoned_job_cleanup
134
+ sleep(cleanup_interval_in_minutes * 60)
135
+ end
130
136
  end
131
137
  end
132
- end
133
138
 
134
- def schedule_abandoned_job_cleanup
135
- pid = fork_with_reconnects do
136
- # we want to avoid db connections in the main pool process
137
- $0 = "delayed_abandoned_job_cleanup"
138
- Delayed::Worker::HealthCheck.reschedule_abandoned_jobs
139
+ def schedule_abandoned_job_cleanup
140
+ pid = fork_with_reconnects do
141
+ # we want to avoid db connections in the main pool process
142
+ $0 = "delayed_abandoned_job_cleanup"
143
+ Delayed::Worker::HealthCheck.reschedule_abandoned_jobs
144
+ end
145
+ workers[pid] = :abandoned_job_cleanup
139
146
  end
140
- workers[pid] = :abandoned_job_cleanup
141
- end
142
147
 
143
- def spawn_periodic_auditor
144
- return if Settings.disable_periodic_jobs
148
+ def spawn_periodic_auditor
149
+ return if Settings.disable_periodic_jobs
145
150
 
146
- @periodic_thread = Thread.new do
147
- # schedule the initial audit immediately on startup
148
- schedule_periodic_audit
149
- # initial sleep is randomized, for some staggering in the audit calls
150
- # since job processors are usually all restarted at the same time
151
- sleep(rand(15 * 60))
152
- loop do
151
+ @periodic_thread = Thread.new do
152
+ # schedule the initial audit immediately on startup
153
153
  schedule_periodic_audit
154
- sleep(15 * 60)
154
+ # initial sleep is randomized, for some staggering in the audit calls
155
+ # since job processors are usually all restarted at the same time
156
+ sleep(rand(15 * 60))
157
+ loop do
158
+ schedule_periodic_audit
159
+ sleep(15 * 60)
160
+ end
155
161
  end
156
162
  end
157
- end
158
163
 
159
- def schedule_periodic_audit
160
- pid = fork_with_reconnects do
161
- # we want to avoid db connections in the main pool process
162
- $0 = "delayed_periodic_audit_scheduler"
163
- Delayed::Periodic.audit_queue
164
+ def schedule_periodic_audit
165
+ pid = fork_with_reconnects do
166
+ # we want to avoid db connections in the main pool process
167
+ $0 = "delayed_periodic_audit_scheduler"
168
+ Delayed::Periodic.audit_queue
169
+ end
170
+ workers[pid] = :periodic_audit
164
171
  end
165
- workers[pid] = :periodic_audit
166
- end
167
172
 
168
- def join
169
- loop do
170
- maintain_children
171
- case sig = @signal_queue.shift
172
- when nil
173
- pool_sleep
174
- when :QUIT
175
- break
176
- when :TERM, :INT
177
- stop(graceful: false) if Settings.kill_workers_on_exit
178
- break
179
- else
180
- logger.warn("Unexpected signal received: #{sig}")
173
+ def join
174
+ loop do
175
+ maintain_children
176
+ case sig = @signal_queue.shift
177
+ when nil
178
+ pool_sleep
179
+ when :QUIT
180
+ break
181
+ when :TERM, :INT
182
+ stop(graceful: false) if Settings.kill_workers_on_exit
183
+ break
184
+ else
185
+ logger.warn("Unexpected signal received: #{sig}")
186
+ end
181
187
  end
182
188
  end
183
- end
184
189
 
185
- def pool_sleep
186
- IO.select([@self_pipe[0]], nil, nil, POOL_SLEEP_PERIOD)
187
- @self_pipe[0].read_nonblock(11, exception: false)
188
- end
190
+ def pool_sleep
191
+ @self_pipe[0].wait_readable(POOL_SLEEP_PERIOD)
192
+ @self_pipe[0].read_nonblock(11, exception: false)
193
+ end
189
194
 
190
- def stop(graceful: true, timeout: Settings.slow_exit_timeout)
191
- signal_for_children = graceful ? :QUIT : :TERM
192
- if Settings.kill_workers_on_exit
193
- limit = Time.now + timeout
194
- until @workers.empty? || Time.now >= limit
195
+ def stop(graceful: true, timeout: Settings.slow_exit_timeout)
196
+ signal_for_children = graceful ? :QUIT : :TERM
197
+ if Settings.kill_workers_on_exit
198
+ limit = Time.zone.now + timeout
199
+ until @workers.empty? || Time.zone.now >= limit
200
+ signal_all_children(signal_for_children)
201
+ # Give our children some time to process the signal before checking if
202
+ # they've exited
203
+ sleep(0.5)
204
+ reap_all_children.each { |pid| @workers.delete(pid) }
205
+ end
206
+
207
+ # We really want to give the workers every oportunity to clean up after
208
+ # themselves before murdering them.
209
+ stop(graceful: false, timeout: 2) if graceful
210
+ signal_all_children(:KILL)
211
+ else
195
212
  signal_all_children(signal_for_children)
196
- # Give our children some time to process the signal before checking if
197
- # they've exited
198
- sleep(0.5)
199
- reap_all_children.each { |pid| @workers.delete(pid) }
200
213
  end
214
+ end
201
215
 
202
- # We really want to give the workers every oportunity to clean up after
203
- # themselves before murdering them.
204
- stop(graceful: false, timeout: 2) if graceful
205
- signal_all_children(:KILL)
206
- else
207
- signal_all_children(signal_for_children)
216
+ def signal_all_children(signal)
217
+ workers.each_key { |pid| signal_child(signal, pid) }
208
218
  end
209
- end
210
219
 
211
- def signal_all_children(signal)
212
- workers.keys.each { |pid| signal_child(signal, pid) }
213
- end
220
+ def signal_child(signal, pid)
221
+ Process.kill(signal, pid)
222
+ rescue Erron::ESRCH
223
+ workers.delete(pid)
224
+ end
214
225
 
215
- def signal_child(signal, pid)
216
- Process.kill(signal, pid)
217
- rescue Erron::ESRCH
218
- workers.delete(pid)
219
- end
226
+ # Respawn all children that have exited since we last checked
227
+ def maintain_children
228
+ reap_all_children.each do |pid|
229
+ respawn_child(pid)
230
+ end
231
+ end
232
+
233
+ # Reaps processes that have exited or just returns if none have exited
234
+ #
235
+ # @return Array An array of child pids that have exited
236
+ def reap_all_children
237
+ exited_pids = []
238
+ loop do
239
+ pid = Process.wait(-1, Process::WNOHANG)
240
+ break unless pid
220
241
 
221
- # Respawn all children that have exited since we last checked
222
- def maintain_children
223
- reap_all_children.each do |pid|
224
- respawn_child(pid)
242
+ exited_pids << pid
243
+ rescue Errno::ECHILD
244
+ break
245
+ end
246
+ exited_pids
225
247
  end
226
- end
227
248
 
228
- # Reaps processes that have exited or just returns if none have exited
229
- #
230
- # @return Array An array of child pids that have exited
231
- def reap_all_children
232
- exited_pids = []
233
- begin
234
- pid = Process.wait(-1, Process::WNOHANG)
235
- break unless pid
236
- exited_pids << pid
237
- rescue Errno::ECHILD
238
- break
239
- end while true # I hate doing loops this way but it's the only way to make the rescue work
240
- exited_pids
241
- end
249
+ def respawn_child(child)
250
+ return unless workers.include?(child)
242
251
 
243
- def respawn_child(child)
244
- if workers.include?(child)
245
252
  worker = workers.delete(child)
246
253
  case worker
247
254
  when :periodic_audit
@@ -261,10 +268,9 @@ class Pool
261
268
  spawn_worker(worker.config)
262
269
  end
263
270
  end
264
- end
265
271
 
266
- def wake_up
267
- @self_pipe[1].write_nonblock('.', exception: false)
272
+ def wake_up
273
+ @self_pipe[1].write_nonblock(".", exception: false)
274
+ end
268
275
  end
269
276
  end
270
- end
@@ -8,21 +8,21 @@ module Delayed
8
8
  end
9
9
 
10
10
  def url_path(*path_parts)
11
- [path_prefix, path_parts].join('/').squeeze('/')
11
+ [path_prefix, path_parts].join("/").squeeze("/")
12
12
  end
13
13
 
14
14
  def path_prefix
15
- request.env['SCRIPT_NAME']
15
+ request.env["SCRIPT_NAME"]
16
16
  end
17
17
 
18
18
  def render_javascript_env
19
19
  {
20
20
  Routes: {
21
21
  root: path_prefix,
22
- running: url_path('running'),
23
- tags: url_path('tags'),
24
- jobs: url_path('jobs'),
25
- bulkUpdate: url_path('bulk_update'),
22
+ running: url_path("running"),
23
+ tags: url_path("tags"),
24
+ jobs: url_path("jobs"),
25
+ bulkUpdate: url_path("bulk_update")
26
26
  }
27
27
  }.to_json
28
28
  end