inst-jobs 2.0.0 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
- data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
- data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
- data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
- data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
- data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
- data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
- data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
- data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
- data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
- data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
- data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
- data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
- data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
- data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
- data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
- data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
- data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
- data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
- data/db/migrate/20210812210128_add_singleton_column.rb +200 -0
- data/db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb +27 -0
- data/db/migrate/20210928174754_fix_singleton_condition_in_before_insert.rb +56 -0
- data/db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb +27 -0
- data/exe/inst_jobs +3 -2
- data/lib/delayed/backend/active_record.rb +211 -168
- data/lib/delayed/backend/base.rb +110 -72
- data/lib/delayed/batch.rb +11 -9
- data/lib/delayed/cli.rb +98 -84
- data/lib/delayed/core_ext/kernel.rb +4 -2
- data/lib/delayed/daemon.rb +70 -74
- data/lib/delayed/job_tracking.rb +26 -25
- data/lib/delayed/lifecycle.rb +27 -23
- data/lib/delayed/log_tailer.rb +17 -17
- data/lib/delayed/logging.rb +13 -16
- data/lib/delayed/message_sending.rb +43 -52
- data/lib/delayed/performable_method.rb +6 -8
- data/lib/delayed/periodic.rb +72 -68
- data/lib/delayed/plugin.rb +2 -4
- data/lib/delayed/pool.rb +205 -168
- data/lib/delayed/server/helpers.rb +6 -6
- data/lib/delayed/server.rb +51 -54
- data/lib/delayed/settings.rb +94 -81
- data/lib/delayed/testing.rb +21 -22
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/work_queue/in_process.rb +21 -17
- data/lib/delayed/work_queue/parent_process/client.rb +55 -53
- data/lib/delayed/work_queue/parent_process/server.rb +245 -207
- data/lib/delayed/work_queue/parent_process.rb +52 -53
- data/lib/delayed/worker/consul_health_check.rb +32 -33
- data/lib/delayed/worker/health_check.rb +34 -26
- data/lib/delayed/worker/null_health_check.rb +3 -1
- data/lib/delayed/worker/process_helper.rb +8 -9
- data/lib/delayed/worker.rb +272 -241
- data/lib/delayed/yaml_extensions.rb +12 -10
- data/lib/delayed_job.rb +37 -37
- data/lib/inst-jobs.rb +1 -1
- data/spec/active_record_job_spec.rb +143 -139
- data/spec/delayed/cli_spec.rb +7 -7
- data/spec/delayed/daemon_spec.rb +10 -9
- data/spec/delayed/message_sending_spec.rb +16 -9
- data/spec/delayed/periodic_spec.rb +14 -21
- data/spec/delayed/server_spec.rb +38 -38
- data/spec/delayed/settings_spec.rb +26 -25
- data/spec/delayed/work_queue/in_process_spec.rb +7 -8
- data/spec/delayed/work_queue/parent_process/client_spec.rb +17 -12
- data/spec/delayed/work_queue/parent_process/server_spec.rb +117 -41
- data/spec/delayed/work_queue/parent_process_spec.rb +21 -23
- data/spec/delayed/worker/consul_health_check_spec.rb +37 -50
- data/spec/delayed/worker/health_check_spec.rb +60 -52
- data/spec/delayed/worker_spec.rb +44 -21
- data/spec/sample_jobs.rb +45 -15
- data/spec/shared/delayed_batch.rb +74 -67
- data/spec/shared/delayed_method.rb +143 -102
- data/spec/shared/performable_method.rb +39 -38
- data/spec/shared/shared_backend.rb +550 -437
- data/spec/shared/testing.rb +14 -14
- data/spec/shared/worker.rb +156 -148
- data/spec/shared_jobs_specs.rb +13 -13
- data/spec/spec_helper.rb +53 -55
- metadata +148 -82
- data/lib/delayed/backend/redis/bulk_update.lua +0 -50
- data/lib/delayed/backend/redis/destroy_job.lua +0 -2
- data/lib/delayed/backend/redis/enqueue.lua +0 -29
- data/lib/delayed/backend/redis/fail_job.lua +0 -5
- data/lib/delayed/backend/redis/find_available.lua +0 -3
- data/lib/delayed/backend/redis/functions.rb +0 -59
- data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
- data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
- data/lib/delayed/backend/redis/job.rb +0 -535
- data/lib/delayed/backend/redis/set_running.lua +0 -5
- data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
- data/spec/gemfiles/42.gemfile +0 -7
- data/spec/gemfiles/50.gemfile +0 -7
- data/spec/gemfiles/51.gemfile +0 -7
- data/spec/gemfiles/52.gemfile +0 -7
- data/spec/gemfiles/60.gemfile +0 -7
- data/spec/redis_job_spec.rb +0 -148
data/lib/delayed/periodic.rb
CHANGED
@@ -1,88 +1,92 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require
|
3
|
+
require "fugit"
|
4
4
|
|
5
5
|
module Delayed
|
6
|
-
class Periodic
|
7
|
-
|
6
|
+
class Periodic
|
7
|
+
attr_reader :name, :cron
|
8
8
|
|
9
|
-
|
10
|
-
|
11
|
-
|
9
|
+
def encode_with(coder)
|
10
|
+
coder.scalar("!ruby/Delayed::Periodic", @name)
|
11
|
+
end
|
12
12
|
|
13
|
-
|
14
|
-
|
15
|
-
|
13
|
+
cattr_accessor :scheduled, :overrides
|
14
|
+
self.scheduled = {}
|
15
|
+
self.overrides = {}
|
16
16
|
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
17
|
+
def self.add_overrides(overrides)
|
18
|
+
overrides.each do |_name, cron_line|
|
19
|
+
# throws error if the line is malformed
|
20
|
+
Fugit.do_parse_cron(cron_line)
|
21
|
+
end
|
22
|
+
self.overrides.merge!(overrides)
|
21
23
|
end
|
22
|
-
self.overrides.merge!(overrides)
|
23
|
-
end
|
24
24
|
|
25
|
-
|
26
|
-
|
27
|
-
cron_line = overrides[job_name] || cron_line
|
28
|
-
self.scheduled[job_name] = self.new(job_name, cron_line, job_args, block)
|
29
|
-
end
|
25
|
+
def self.cron(job_name, cron_line, job_args = {}, &block)
|
26
|
+
raise ArgumentError, "job #{job_name} already scheduled!" if scheduled[job_name]
|
30
27
|
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
# it's fine to just do the audit in-line here without risk of creating duplicates
|
35
|
-
perform_audit!
|
36
|
-
end
|
28
|
+
cron_line = overrides[job_name] || cron_line
|
29
|
+
scheduled[job_name] = new(job_name, cron_line, job_args, block)
|
30
|
+
end
|
37
31
|
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
32
|
+
def self.audit_queue
|
33
|
+
# we used to queue up a job in a strand here, and perform the audit inside that job
|
34
|
+
# however, now that we're using singletons for scheduling periodic jobs,
|
35
|
+
# it's fine to just do the audit in-line here without risk of creating duplicates
|
36
|
+
Delayed::Job.transaction do
|
37
|
+
# for db performance reasons, we only need one process doing this at a time
|
38
|
+
# so if we can't get an advisory lock, just abort. we'll try again soon
|
39
|
+
return unless Delayed::Job.attempt_advisory_lock("Delayed::Periodic#audit_queue")
|
43
40
|
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
@job_args = { :priority => Delayed::LOW_PRIORITY }.merge(job_args.symbolize_keys)
|
48
|
-
@block = block
|
49
|
-
end
|
41
|
+
perform_audit!
|
42
|
+
end
|
43
|
+
end
|
50
44
|
|
51
|
-
|
52
|
-
|
53
|
-
|
45
|
+
# make sure all periodic jobs are scheduled for their next run in the job queue
|
46
|
+
# this auditing should run on the strand
|
47
|
+
def self.perform_audit!
|
48
|
+
scheduled.each { |_name, periodic| periodic.enqueue }
|
49
|
+
end
|
54
50
|
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
# which means the consuming code really does not want this job to be
|
62
|
-
# a singleton at all.
|
63
|
-
on_conflict: :patient
|
64
|
-
}
|
65
|
-
@job_args.merge(inferred_args)
|
66
|
-
end
|
51
|
+
def initialize(name, cron_line, job_args, block)
|
52
|
+
@name = name
|
53
|
+
@cron = Fugit.do_parse_cron(cron_line)
|
54
|
+
@job_args = { priority: Delayed::LOW_PRIORITY }.merge(job_args.symbolize_keys)
|
55
|
+
@block = block
|
56
|
+
end
|
67
57
|
|
68
|
-
|
69
|
-
|
70
|
-
ensure
|
71
|
-
begin
|
72
|
-
enqueue
|
73
|
-
rescue
|
74
|
-
# double fail! the auditor will have to catch this.
|
75
|
-
Rails.logger.error "Failure enqueueing periodic job! #{@name} #{$!.inspect}"
|
58
|
+
def enqueue
|
59
|
+
Delayed::Job.enqueue(self, **enqueue_args)
|
76
60
|
end
|
77
|
-
end
|
78
61
|
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
62
|
+
def enqueue_args
|
63
|
+
inferred_args = {
|
64
|
+
max_attempts: 1,
|
65
|
+
run_at: @cron.next_time(Delayed::Periodic.now).utc.to_time,
|
66
|
+
singleton: tag,
|
67
|
+
on_conflict: :patient
|
68
|
+
}
|
69
|
+
@job_args.merge(inferred_args)
|
70
|
+
end
|
83
71
|
|
84
|
-
|
85
|
-
|
72
|
+
def perform
|
73
|
+
@block.call
|
74
|
+
ensure
|
75
|
+
begin
|
76
|
+
enqueue
|
77
|
+
rescue
|
78
|
+
# double fail! the auditor will have to catch this.
|
79
|
+
Rails.logger.error "Failure enqueueing periodic job! #{@name} #{$!.inspect}"
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
def tag
|
84
|
+
"periodic: #{@name}"
|
85
|
+
end
|
86
|
+
alias display_name tag
|
87
|
+
|
88
|
+
def self.now
|
89
|
+
Time.zone.now
|
90
|
+
end
|
86
91
|
end
|
87
92
|
end
|
88
|
-
end
|
data/lib/delayed/plugin.rb
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require
|
3
|
+
require "active_support/core_ext/class/attribute"
|
4
4
|
|
5
5
|
module Delayed
|
6
6
|
class Plugin
|
@@ -11,9 +11,7 @@ module Delayed
|
|
11
11
|
end
|
12
12
|
|
13
13
|
def self.inject!
|
14
|
-
unless @injected
|
15
|
-
self.callback_block.call(Delayed::Worker.lifecycle) if self.callback_block
|
16
|
-
end
|
14
|
+
callback_block&.call(Delayed::Worker.lifecycle) unless @injected
|
17
15
|
@injected = true
|
18
16
|
end
|
19
17
|
|
data/lib/delayed/pool.rb
CHANGED
@@ -1,222 +1,260 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module Delayed
|
4
|
-
class Pool
|
5
|
-
|
4
|
+
class Pool
|
5
|
+
include Delayed::Logging
|
6
6
|
|
7
|
-
|
8
|
-
|
7
|
+
mattr_accessor :on_fork
|
8
|
+
self.on_fork = -> {}
|
9
9
|
|
10
|
-
|
11
|
-
|
10
|
+
SIGNALS = %i[INT TERM QUIT].freeze
|
11
|
+
POOL_SLEEP_PERIOD = 5
|
12
12
|
|
13
|
-
|
13
|
+
attr_reader :workers
|
14
14
|
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
15
|
+
def initialize(*args)
|
16
|
+
if args.first.is_a?(Hash)
|
17
|
+
@config = args.first
|
18
|
+
else
|
19
|
+
warn "Calling Delayed::Pool.new directly is deprecated. Use `Delayed::CLI.new.run()` instead."
|
20
|
+
end
|
21
|
+
@workers = {}
|
22
|
+
@signal_queue = []
|
23
|
+
@self_pipe = IO.pipe
|
20
24
|
end
|
21
|
-
@workers = {}
|
22
|
-
@signal_queue = []
|
23
|
-
@self_pipe = IO.pipe
|
24
|
-
end
|
25
25
|
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
26
|
+
def run
|
27
|
+
warn "Delayed::Pool#run is deprecated and will be removed. Use `Delayed::CLI.new.run()` instead."
|
28
|
+
Delayed::CLI.new.run
|
29
|
+
end
|
30
30
|
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
31
|
+
def start
|
32
|
+
say "Started job master", :info
|
33
|
+
SIGNALS.each do |sig|
|
34
|
+
trap(sig) do
|
35
|
+
@signal_queue << sig
|
36
|
+
wake_up
|
37
|
+
end
|
38
|
+
end
|
39
|
+
$0 = procname
|
40
|
+
# fork to handle unlocking (to prevent polluting the parent with worker objects)
|
41
|
+
unlock_pid = fork_with_reconnects do
|
42
|
+
unlock_orphaned_jobs
|
43
|
+
end
|
44
|
+
Process.wait unlock_pid
|
45
|
+
|
46
|
+
spawn_periodic_auditor
|
47
|
+
spawn_abandoned_job_cleanup
|
48
|
+
spawn_all_workers
|
49
|
+
say "Workers spawned"
|
50
|
+
join
|
51
|
+
say "Shutting down"
|
52
|
+
stop
|
53
|
+
reap_all_children
|
54
|
+
rescue Exception => e # rubocop:disable Lint/RescueException
|
55
|
+
say "Job master died with error: #{e.inspect}\n#{e.backtrace.join("\n")}", :fatal
|
56
|
+
raise
|
57
|
+
end
|
52
58
|
|
53
|
-
|
59
|
+
protected
|
54
60
|
|
55
|
-
|
56
|
-
|
57
|
-
|
61
|
+
def procname
|
62
|
+
"delayed_jobs_pool#{Settings.pool_procname_suffix}"
|
63
|
+
end
|
58
64
|
|
59
|
-
|
60
|
-
|
65
|
+
def unlock_orphaned_jobs(_worker = nil, pid = nil)
|
66
|
+
return if Settings.disable_automatic_orphan_unlocking
|
61
67
|
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
68
|
+
unlocked_jobs = Delayed::Job.unlock_orphaned_jobs(pid)
|
69
|
+
say "Unlocked #{unlocked_jobs} orphaned jobs" if unlocked_jobs.positive?
|
70
|
+
ActiveRecord::Base.connection_handler.clear_all_connections! unless Rails.env.test?
|
71
|
+
end
|
66
72
|
|
67
|
-
|
68
|
-
|
73
|
+
def spawn_all_workers
|
74
|
+
ActiveRecord::Base.connection_handler.clear_all_connections!
|
69
75
|
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
76
|
+
if @config[:work_queue] == "parent_process"
|
77
|
+
@work_queue = WorkQueue::ParentProcess.new
|
78
|
+
spawn_work_queue
|
79
|
+
end
|
74
80
|
|
75
|
-
|
76
|
-
|
81
|
+
@config[:workers].each do |worker_config|
|
82
|
+
(worker_config[:workers] || 1).times { spawn_worker(worker_config) }
|
83
|
+
end
|
77
84
|
end
|
78
|
-
end
|
79
85
|
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
86
|
+
def spawn_work_queue
|
87
|
+
parent_pid = Process.pid
|
88
|
+
pid = fork_with_reconnects do
|
89
|
+
$0 = "delayed_jobs_work_queue#{Settings.pool_procname_suffix}"
|
90
|
+
@work_queue.server(parent_pid: parent_pid).run
|
91
|
+
end
|
92
|
+
workers[pid] = :work_queue
|
85
93
|
end
|
86
|
-
workers[pid] = :work_queue
|
87
|
-
end
|
88
94
|
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
else
|
95
|
+
def spawn_worker(worker_config)
|
96
|
+
return if worker_config[:periodic] # backwards compat
|
97
|
+
|
93
98
|
worker_config[:parent_pid] = Process.pid
|
94
99
|
worker_config[:work_queue] = @work_queue.client if @work_queue
|
95
100
|
worker = Delayed::Worker.new(worker_config)
|
101
|
+
|
102
|
+
pid = fork_with_reconnects do
|
103
|
+
worker.start
|
104
|
+
end
|
105
|
+
workers[pid] = worker
|
96
106
|
end
|
97
107
|
|
98
|
-
|
99
|
-
|
108
|
+
# child processes need to reconnect so they don't accidentally share redis or
|
109
|
+
# db connections with the parent
|
110
|
+
def fork_with_reconnects
|
111
|
+
fork do
|
112
|
+
@self_pipe.each(&:close) # sub-processes don't need to wake us up; keep their FDs clean
|
113
|
+
Pool.on_fork.call
|
114
|
+
Delayed::Job.reconnect!
|
115
|
+
yield
|
116
|
+
end
|
100
117
|
end
|
101
|
-
workers[pid] = worker
|
102
|
-
end
|
103
118
|
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
119
|
+
def spawn_abandoned_job_cleanup
|
120
|
+
return if Settings.disable_abandoned_job_cleanup
|
121
|
+
|
122
|
+
cleanup_interval_in_minutes = 60
|
123
|
+
@abandoned_cleanup_thread = Thread.new do
|
124
|
+
# every hour (staggered by process)
|
125
|
+
# check for dead jobs and cull them.
|
126
|
+
# Will actually be more often based on the
|
127
|
+
# number of worker nodes in the pool. This will actually
|
128
|
+
# be a max of N times per hour where N is the number of workers,
|
129
|
+
# but they won't overrun each other because the health check
|
130
|
+
# takes an advisory lock internally
|
131
|
+
sleep(rand(cleanup_interval_in_minutes * 60))
|
132
|
+
loop do
|
133
|
+
schedule_abandoned_job_cleanup
|
134
|
+
sleep(cleanup_interval_in_minutes * 60)
|
135
|
+
end
|
136
|
+
end
|
111
137
|
end
|
112
|
-
end
|
113
138
|
|
114
|
-
|
115
|
-
|
139
|
+
def schedule_abandoned_job_cleanup
|
140
|
+
pid = fork_with_reconnects do
|
141
|
+
# we want to avoid db connections in the main pool process
|
142
|
+
$0 = "delayed_abandoned_job_cleanup"
|
143
|
+
Delayed::Worker::HealthCheck.reschedule_abandoned_jobs
|
144
|
+
end
|
145
|
+
workers[pid] = :abandoned_job_cleanup
|
146
|
+
end
|
116
147
|
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
sleep(rand(15 * 60))
|
123
|
-
loop do
|
148
|
+
def spawn_periodic_auditor
|
149
|
+
return if Settings.disable_periodic_jobs
|
150
|
+
|
151
|
+
@periodic_thread = Thread.new do
|
152
|
+
# schedule the initial audit immediately on startup
|
124
153
|
schedule_periodic_audit
|
125
|
-
sleep
|
154
|
+
# initial sleep is randomized, for some staggering in the audit calls
|
155
|
+
# since job processors are usually all restarted at the same time
|
156
|
+
sleep(rand(15 * 60))
|
157
|
+
loop do
|
158
|
+
schedule_periodic_audit
|
159
|
+
sleep(15 * 60)
|
160
|
+
end
|
126
161
|
end
|
127
162
|
end
|
128
|
-
end
|
129
163
|
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
164
|
+
def schedule_periodic_audit
|
165
|
+
pid = fork_with_reconnects do
|
166
|
+
# we want to avoid db connections in the main pool process
|
167
|
+
$0 = "delayed_periodic_audit_scheduler"
|
168
|
+
Delayed::Periodic.audit_queue
|
169
|
+
end
|
170
|
+
workers[pid] = :periodic_audit
|
135
171
|
end
|
136
|
-
workers[pid] = :periodic_audit
|
137
|
-
end
|
138
172
|
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
173
|
+
def join
|
174
|
+
loop do
|
175
|
+
maintain_children
|
176
|
+
case sig = @signal_queue.shift
|
177
|
+
when nil
|
178
|
+
pool_sleep
|
179
|
+
when :QUIT
|
180
|
+
break
|
181
|
+
when :TERM, :INT
|
182
|
+
stop(graceful: false) if Settings.kill_workers_on_exit
|
183
|
+
break
|
184
|
+
else
|
185
|
+
logger.warn("Unexpected signal received: #{sig}")
|
186
|
+
end
|
152
187
|
end
|
153
188
|
end
|
154
|
-
end
|
155
189
|
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
190
|
+
def pool_sleep
|
191
|
+
@self_pipe[0].wait_readable(POOL_SLEEP_PERIOD)
|
192
|
+
@self_pipe[0].read_nonblock(11, exception: false)
|
193
|
+
end
|
194
|
+
|
195
|
+
def stop(graceful: true, timeout: Settings.slow_exit_timeout)
|
196
|
+
signal_for_children = graceful ? :QUIT : :TERM
|
197
|
+
if Settings.kill_workers_on_exit
|
198
|
+
limit = Time.zone.now + timeout
|
199
|
+
until @workers.empty? || Time.zone.now >= limit
|
200
|
+
signal_all_children(signal_for_children)
|
201
|
+
# Give our children some time to process the signal before checking if
|
202
|
+
# they've exited
|
203
|
+
sleep(0.5)
|
204
|
+
reap_all_children.each { |pid| @workers.delete(pid) }
|
205
|
+
end
|
160
206
|
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
207
|
+
# We really want to give the workers every oportunity to clean up after
|
208
|
+
# themselves before murdering them.
|
209
|
+
stop(graceful: false, timeout: 2) if graceful
|
210
|
+
signal_all_children(:KILL)
|
211
|
+
else
|
166
212
|
signal_all_children(signal_for_children)
|
167
|
-
# Give our children some time to process the signal before checking if
|
168
|
-
# they've exited
|
169
|
-
sleep(0.5)
|
170
|
-
reap_all_children.each { |pid| @workers.delete(pid) }
|
171
213
|
end
|
214
|
+
end
|
172
215
|
|
173
|
-
|
174
|
-
|
175
|
-
stop(graceful: false, timeout: 2) if graceful
|
176
|
-
signal_all_children(:KILL)
|
177
|
-
else
|
178
|
-
signal_all_children(signal_for_children)
|
216
|
+
def signal_all_children(signal)
|
217
|
+
workers.each_key { |pid| signal_child(signal, pid) }
|
179
218
|
end
|
180
|
-
end
|
181
219
|
|
182
|
-
|
183
|
-
|
184
|
-
|
220
|
+
def signal_child(signal, pid)
|
221
|
+
Process.kill(signal, pid)
|
222
|
+
rescue Erron::ESRCH
|
223
|
+
workers.delete(pid)
|
224
|
+
end
|
185
225
|
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
226
|
+
# Respawn all children that have exited since we last checked
|
227
|
+
def maintain_children
|
228
|
+
reap_all_children.each do |pid|
|
229
|
+
respawn_child(pid)
|
230
|
+
end
|
231
|
+
end
|
232
|
+
|
233
|
+
# Reaps processes that have exited or just returns if none have exited
|
234
|
+
#
|
235
|
+
# @return Array An array of child pids that have exited
|
236
|
+
def reap_all_children
|
237
|
+
exited_pids = []
|
238
|
+
loop do
|
239
|
+
pid = Process.wait(-1, Process::WNOHANG)
|
240
|
+
break unless pid
|
191
241
|
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
242
|
+
exited_pids << pid
|
243
|
+
rescue Errno::ECHILD
|
244
|
+
break
|
245
|
+
end
|
246
|
+
exited_pids
|
196
247
|
end
|
197
|
-
end
|
198
248
|
|
199
|
-
|
200
|
-
|
201
|
-
# @return Array An array of child pids that have exited
|
202
|
-
def reap_all_children
|
203
|
-
exited_pids = []
|
204
|
-
begin
|
205
|
-
pid = Process.wait(-1, Process::WNOHANG)
|
206
|
-
break unless pid
|
207
|
-
exited_pids << pid
|
208
|
-
rescue Errno::ECHILD
|
209
|
-
break
|
210
|
-
end while true # I hate doing loops this way but it's the only way to make the rescue work
|
211
|
-
exited_pids
|
212
|
-
end
|
249
|
+
def respawn_child(child)
|
250
|
+
return unless workers.include?(child)
|
213
251
|
|
214
|
-
def respawn_child(child)
|
215
|
-
if workers.include?(child)
|
216
252
|
worker = workers.delete(child)
|
217
253
|
case worker
|
218
254
|
when :periodic_audit
|
219
255
|
say "ran auditor: #{worker}"
|
256
|
+
when :abandoned_job_cleanup
|
257
|
+
say "ran cleanup: #{worker}"
|
220
258
|
when :work_queue
|
221
259
|
say "work queue exited, restarting", :info
|
222
260
|
spawn_work_queue
|
@@ -230,10 +268,9 @@ class Pool
|
|
230
268
|
spawn_worker(worker.config)
|
231
269
|
end
|
232
270
|
end
|
233
|
-
end
|
234
271
|
|
235
|
-
|
236
|
-
|
272
|
+
def wake_up
|
273
|
+
@self_pipe[1].write_nonblock(".", exception: false)
|
274
|
+
end
|
237
275
|
end
|
238
276
|
end
|
239
|
-
end
|
@@ -8,21 +8,21 @@ module Delayed
|
|
8
8
|
end
|
9
9
|
|
10
10
|
def url_path(*path_parts)
|
11
|
-
[path_prefix, path_parts].join(
|
11
|
+
[path_prefix, path_parts].join("/").squeeze("/")
|
12
12
|
end
|
13
13
|
|
14
14
|
def path_prefix
|
15
|
-
request.env[
|
15
|
+
request.env["SCRIPT_NAME"]
|
16
16
|
end
|
17
17
|
|
18
18
|
def render_javascript_env
|
19
19
|
{
|
20
20
|
Routes: {
|
21
21
|
root: path_prefix,
|
22
|
-
running: url_path(
|
23
|
-
tags: url_path(
|
24
|
-
jobs: url_path(
|
25
|
-
bulkUpdate: url_path(
|
22
|
+
running: url_path("running"),
|
23
|
+
tags: url_path("tags"),
|
24
|
+
jobs: url_path("jobs"),
|
25
|
+
bulkUpdate: url_path("bulk_update")
|
26
26
|
}
|
27
27
|
}.to_json
|
28
28
|
end
|