inst-jobs 2.3.2 → 2.4.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
- data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
- data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
- data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
- data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
- data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
- data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
- data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
- data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
- data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
- data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
- data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
- data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
- data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
- data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
- data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
- data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
- data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
- data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
- data/db/migrate/20210812210128_add_singleton_column.rb +203 -0
- data/exe/inst_jobs +3 -2
- data/lib/delayed/backend/active_record.rb +182 -148
- data/lib/delayed/backend/base.rb +79 -74
- data/lib/delayed/batch.rb +11 -9
- data/lib/delayed/cli.rb +98 -84
- data/lib/delayed/core_ext/kernel.rb +4 -2
- data/lib/delayed/daemon.rb +70 -74
- data/lib/delayed/job_tracking.rb +26 -25
- data/lib/delayed/lifecycle.rb +27 -24
- data/lib/delayed/log_tailer.rb +17 -17
- data/lib/delayed/logging.rb +13 -16
- data/lib/delayed/message_sending.rb +42 -51
- data/lib/delayed/performable_method.rb +5 -7
- data/lib/delayed/periodic.rb +66 -65
- data/lib/delayed/plugin.rb +2 -4
- data/lib/delayed/pool.rb +198 -193
- data/lib/delayed/server/helpers.rb +6 -6
- data/lib/delayed/server.rb +51 -54
- data/lib/delayed/settings.rb +93 -81
- data/lib/delayed/testing.rb +21 -22
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/work_queue/in_process.rb +21 -18
- data/lib/delayed/work_queue/parent_process/client.rb +54 -55
- data/lib/delayed/work_queue/parent_process/server.rb +215 -209
- data/lib/delayed/work_queue/parent_process.rb +52 -53
- data/lib/delayed/worker/consul_health_check.rb +21 -19
- data/lib/delayed/worker/health_check.rb +21 -12
- data/lib/delayed/worker/null_health_check.rb +3 -1
- data/lib/delayed/worker/process_helper.rb +8 -9
- data/lib/delayed/worker.rb +271 -265
- data/lib/delayed/yaml_extensions.rb +12 -10
- data/lib/delayed_job.rb +37 -38
- data/lib/inst-jobs.rb +1 -1
- data/spec/active_record_job_spec.rb +128 -135
- data/spec/delayed/cli_spec.rb +7 -7
- data/spec/delayed/daemon_spec.rb +8 -8
- data/spec/delayed/message_sending_spec.rb +16 -9
- data/spec/delayed/periodic_spec.rb +13 -12
- data/spec/delayed/server_spec.rb +38 -38
- data/spec/delayed/settings_spec.rb +26 -25
- data/spec/delayed/work_queue/in_process_spec.rb +7 -7
- data/spec/delayed/work_queue/parent_process/client_spec.rb +15 -11
- data/spec/delayed/work_queue/parent_process/server_spec.rb +43 -40
- data/spec/delayed/work_queue/parent_process_spec.rb +21 -21
- data/spec/delayed/worker/consul_health_check_spec.rb +22 -22
- data/spec/delayed/worker/health_check_spec.rb +51 -49
- data/spec/delayed/worker_spec.rb +28 -25
- data/spec/gemfiles/52.gemfile +5 -3
- data/spec/gemfiles/52.gemfile.lock +240 -0
- data/spec/gemfiles/60.gemfile +5 -3
- data/spec/gemfiles/60.gemfile.lock +1 -1
- data/spec/gemfiles/61.gemfile +5 -3
- data/spec/sample_jobs.rb +45 -15
- data/spec/shared/delayed_batch.rb +74 -67
- data/spec/shared/delayed_method.rb +143 -102
- data/spec/shared/performable_method.rb +39 -38
- data/spec/shared/shared_backend.rb +534 -441
- data/spec/shared/testing.rb +14 -14
- data/spec/shared/worker.rb +155 -147
- data/spec/shared_jobs_specs.rb +13 -13
- data/spec/spec_helper.rb +43 -40
- metadata +75 -56
- data/lib/delayed/backend/redis/bulk_update.lua +0 -50
- data/lib/delayed/backend/redis/destroy_job.lua +0 -2
- data/lib/delayed/backend/redis/enqueue.lua +0 -29
- data/lib/delayed/backend/redis/fail_job.lua +0 -5
- data/lib/delayed/backend/redis/find_available.lua +0 -3
- data/lib/delayed/backend/redis/functions.rb +0 -59
- data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
- data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
- data/lib/delayed/backend/redis/job.rb +0 -528
- data/lib/delayed/backend/redis/set_running.lua +0 -5
- data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
- data/spec/redis_job_spec.rb +0 -148
data/lib/delayed/version.rb
CHANGED
@@ -1,24 +1,27 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module Delayed
|
4
|
-
module WorkQueue
|
5
|
-
# The simplest possible implementation of a WorkQueue -- just turns around and
|
6
|
-
# queries the queue inline.
|
7
|
-
class InProcess
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
4
|
+
module WorkQueue
|
5
|
+
# The simplest possible implementation of a WorkQueue -- just turns around and
|
6
|
+
# queries the queue inline.
|
7
|
+
class InProcess
|
8
|
+
def get_and_lock_next_available(worker_name, worker_config)
|
9
|
+
Delayed::Worker.lifecycle.run_callbacks(:work_queue_pop, self, worker_config) do
|
10
|
+
Delayed::Job.get_and_lock_next_available(
|
11
|
+
worker_name,
|
12
|
+
worker_config[:queue],
|
13
|
+
worker_config[:min_priority],
|
14
|
+
worker_config[:max_priority]
|
15
|
+
)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
# intentional nops for compatibility w/ parent process
|
20
|
+
def init; end
|
21
|
+
|
22
|
+
def close; end
|
23
|
+
|
24
|
+
def wake_up; end
|
15
25
|
end
|
16
26
|
end
|
17
|
-
|
18
|
-
# intentional nops for compatibility w/ parent process
|
19
|
-
def init; end
|
20
|
-
def close; end
|
21
|
-
def wake_up; end
|
22
|
-
end
|
23
|
-
end
|
24
27
|
end
|
@@ -1,76 +1,75 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module Delayed
|
4
|
-
module WorkQueue
|
5
|
-
class ParentProcess
|
6
|
-
|
7
|
-
|
4
|
+
module WorkQueue
|
5
|
+
class ParentProcess
|
6
|
+
class Client
|
7
|
+
attr_reader :addrinfo
|
8
8
|
|
9
|
-
|
9
|
+
include Delayed::Logging
|
10
10
|
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
11
|
+
def initialize(addrinfo, config: Settings.parent_process)
|
12
|
+
@addrinfo = addrinfo
|
13
|
+
@connect_timeout = config["client_connect_timeout"] || 2
|
14
|
+
end
|
15
15
|
|
16
|
-
|
17
|
-
|
18
|
-
|
16
|
+
def init
|
17
|
+
@self_pipe ||= IO.pipe # rubocop:disable Naming/MemoizedInstanceVariableName
|
18
|
+
end
|
19
19
|
|
20
|
-
|
21
|
-
|
22
|
-
|
20
|
+
def close
|
21
|
+
reset_connection
|
22
|
+
end
|
23
23
|
|
24
|
-
|
25
|
-
|
24
|
+
def get_and_lock_next_available(worker_name, worker_config)
|
25
|
+
Marshal.dump([worker_name, worker_config], socket)
|
26
26
|
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
27
|
+
# We're assuming there won't ever be a partial write here so we only need
|
28
|
+
# to wait for anything to be available on the 'wire', this is a valid
|
29
|
+
# assumption because we control the server and it's a Unix domain socket,
|
30
|
+
# not TCP.
|
31
|
+
if socket.eof?
|
32
|
+
# Other end closed gracefully, so should we
|
33
|
+
logger.debug("server closed connection")
|
34
|
+
return reset_connection
|
35
|
+
end
|
36
|
+
|
37
|
+
readers, = IO.select([socket, @self_pipe[0]])
|
36
38
|
|
37
|
-
|
39
|
+
if readers.include?(@self_pipe[0])
|
40
|
+
# we're probably exiting so we just want to break out of the blocking read
|
41
|
+
logger.debug("Broke out of select due to being awakened, exiting")
|
42
|
+
else
|
43
|
+
Marshal.load(socket).tap do |response|
|
44
|
+
unless response.nil? || (response.is_a?(Delayed::Job) && response.locked_by == worker_name)
|
45
|
+
raise(ProtocolError, "response is not a locked job: #{response.inspect}")
|
46
|
+
end
|
38
47
|
|
39
|
-
|
40
|
-
|
41
|
-
logger.debug("Broke out of select due to being awakened, exiting")
|
42
|
-
else
|
43
|
-
Marshal.load(socket).tap do |response|
|
44
|
-
unless response.nil? || (response.is_a?(Delayed::Job) && response.locked_by == worker_name)
|
45
|
-
raise(ProtocolError, "response is not a locked job: #{response.inspect}")
|
48
|
+
logger.debug("Received job #{response.id}")
|
49
|
+
end
|
46
50
|
end
|
47
|
-
|
51
|
+
rescue SystemCallError, IOError => e
|
52
|
+
logger.error("Work queue connection lost, reestablishing on next poll. (#{e})")
|
53
|
+
# The work queue process died. Return nil to signal the worker
|
54
|
+
# process should sleep as if no job was found, and then retry.
|
55
|
+
reset_connection
|
48
56
|
end
|
49
|
-
end
|
50
|
-
rescue SystemCallError, IOError => ex
|
51
|
-
logger.error("Work queue connection lost, reestablishing on next poll. (#{ex})")
|
52
|
-
# The work queue process died. Return nil to signal the worker
|
53
|
-
# process should sleep as if no job was found, and then retry.
|
54
|
-
reset_connection
|
55
|
-
end
|
56
57
|
|
57
|
-
|
58
|
-
|
59
|
-
|
58
|
+
def wake_up
|
59
|
+
@self_pipe[1].write_nonblock(".", exception: false)
|
60
|
+
end
|
60
61
|
|
61
|
-
|
62
|
+
private
|
62
63
|
|
63
|
-
|
64
|
-
|
65
|
-
|
64
|
+
def socket
|
65
|
+
@socket ||= @addrinfo.connect(timeout: @connect_timeout)
|
66
|
+
end
|
66
67
|
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
68
|
+
def reset_connection
|
69
|
+
@socket&.close
|
70
|
+
@socket = nil
|
71
|
+
end
|
71
72
|
end
|
72
73
|
end
|
73
74
|
end
|
74
75
|
end
|
75
|
-
end
|
76
|
-
end
|
@@ -1,243 +1,249 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module Delayed
|
4
|
-
module WorkQueue
|
5
|
-
class ParentProcess
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
4
|
+
module WorkQueue
|
5
|
+
class ParentProcess
|
6
|
+
class Server
|
7
|
+
attr_reader :clients, :listen_socket
|
8
|
+
|
9
|
+
include Delayed::Logging
|
10
|
+
SIGNALS = %i[INT TERM QUIT].freeze
|
11
|
+
|
12
|
+
def initialize(listen_socket, parent_pid: nil, config: Settings.parent_process)
|
13
|
+
@listen_socket = listen_socket
|
14
|
+
@parent_pid = parent_pid
|
15
|
+
@clients = {}
|
16
|
+
@waiting_clients = {}
|
17
|
+
@prefetched_jobs = {}
|
18
|
+
|
19
|
+
@config = config
|
20
|
+
@client_timeout = config["server_socket_timeout"] || 10.0 # left for backwards compat
|
21
|
+
|
22
|
+
@exit = false
|
23
|
+
@self_pipe = IO.pipe
|
24
|
+
end
|
25
25
|
|
26
|
-
|
27
|
-
|
28
|
-
|
26
|
+
def connected_clients
|
27
|
+
@clients.size
|
28
|
+
end
|
29
29
|
|
30
|
-
|
31
|
-
|
32
|
-
|
30
|
+
def all_workers_idle?
|
31
|
+
@clients.none? { |_, c| c.working }
|
32
|
+
end
|
33
33
|
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
34
|
+
# run the server queue worker
|
35
|
+
# this method does not return, only exits or raises an exception
|
36
|
+
def run
|
37
|
+
logger.debug "Starting work queue process"
|
38
|
+
|
39
|
+
SIGNALS.each do |sig|
|
40
|
+
# We're not doing any aggressive exiting here since we really want
|
41
|
+
# prefetched jobs to be unlocked and we're going to wake up the process
|
42
|
+
# from the IO.select we're using to wait on clients.
|
43
|
+
trap(sig) do
|
44
|
+
@exit = true
|
45
|
+
@self_pipe[1].write_nonblock(".", exception: false)
|
46
|
+
end
|
47
|
+
end
|
38
48
|
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
49
|
+
last_orphaned_prefetched_jobs_purge = Job.db_time_now - rand(15 * 60)
|
50
|
+
until exit?
|
51
|
+
run_once
|
52
|
+
if last_orphaned_prefetched_jobs_purge + 15 * 60 < Job.db_time_now
|
53
|
+
Job.unlock_orphaned_prefetched_jobs
|
54
|
+
last_orphaned_prefetched_jobs_purge = Job.db_time_now
|
55
|
+
end
|
56
|
+
end
|
57
|
+
rescue => e
|
58
|
+
logger.error "WorkQueue Server died: #{e.inspect}"
|
59
|
+
raise
|
60
|
+
ensure
|
61
|
+
unlock_all_prefetched_jobs
|
62
|
+
end
|
45
63
|
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
64
|
+
def run_once
|
65
|
+
handles = @clients.keys + [@listen_socket, @self_pipe[0]]
|
66
|
+
# if we're currently idle, then force a "latency" to job fetching - don't
|
67
|
+
# fetch recently queued jobs, allowing busier workers to fetch them first.
|
68
|
+
# if they're not keeping up, the jobs will slip back in time, and suddenly we'll become
|
69
|
+
# active and quickly pick up all the jobs we can. The latency is calculated to ensure that
|
70
|
+
# an active worker is guaranteed to have attempted to fetch new jobs in the meantime
|
71
|
+
forced_latency = Settings.sleep_delay + Settings.sleep_delay_stagger * 2 if all_workers_idle?
|
72
|
+
timeout = Settings.sleep_delay + (rand * Settings.sleep_delay_stagger)
|
73
|
+
readable, = IO.select(handles, nil, nil, timeout)
|
74
|
+
readable&.each { |s| handle_read(s) }
|
75
|
+
Delayed::Worker.lifecycle.run_callbacks(:check_for_work, self) do
|
76
|
+
check_for_work(forced_latency: forced_latency)
|
77
|
+
end
|
78
|
+
unlock_timed_out_prefetched_jobs
|
52
79
|
end
|
53
|
-
end
|
54
80
|
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
81
|
+
def handle_read(socket)
|
82
|
+
if socket == @listen_socket
|
83
|
+
handle_accept
|
84
|
+
elsif socket == @self_pipe[0]
|
85
|
+
# We really don't care about the contents of the pipe, we just need to
|
86
|
+
# wake up.
|
87
|
+
@self_pipe[0].read_nonblock(11, exception: false)
|
88
|
+
else
|
89
|
+
handle_request(socket)
|
90
|
+
end
|
91
|
+
end
|
61
92
|
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
readable, _, _ = IO.select(handles, nil, nil, timeout)
|
72
|
-
if readable
|
73
|
-
readable.each { |s| handle_read(s) }
|
74
|
-
end
|
75
|
-
Delayed::Worker.lifecycle.run_callbacks(:check_for_work, self) do
|
76
|
-
check_for_work(forced_latency: forced_latency)
|
77
|
-
end
|
78
|
-
unlock_timed_out_prefetched_jobs
|
79
|
-
end
|
93
|
+
# Any error on the listen socket other than WaitReadable will bubble up
|
94
|
+
# and terminate the work queue process, to be restarted by the parent daemon.
|
95
|
+
def handle_accept
|
96
|
+
socket, _addr = @listen_socket.accept_nonblock
|
97
|
+
@clients[socket] = ClientState.new(false, socket) if socket
|
98
|
+
rescue IO::WaitReadable
|
99
|
+
logger.error("Server attempted to read listen_socket but failed with IO::WaitReadable")
|
100
|
+
# ignore and just try accepting again next time through the loop
|
101
|
+
end
|
80
102
|
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
103
|
+
def handle_request(socket)
|
104
|
+
# There is an assumption here that the client will never send a partial
|
105
|
+
# request and then leave the socket open. Doing so would leave us hanging
|
106
|
+
# in Marshal.load forever. This is only a reasonable assumption because we
|
107
|
+
# control the client.
|
108
|
+
client = @clients[socket]
|
109
|
+
if socket.eof?
|
110
|
+
logger.debug("Client #{client.name} closed connection")
|
111
|
+
return drop_socket(socket)
|
112
|
+
end
|
113
|
+
worker_name, worker_config = Marshal.load(socket)
|
114
|
+
client.name = worker_name
|
115
|
+
client.working = false
|
116
|
+
(@waiting_clients[worker_config] ||= []) << client
|
117
|
+
rescue SystemCallError, IOError => e
|
118
|
+
logger.error("Receiving message from client (#{socket}) failed: #{e.inspect}")
|
119
|
+
drop_socket(socket)
|
120
|
+
end
|
92
121
|
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
122
|
+
def check_for_work(forced_latency: nil)
|
123
|
+
@waiting_clients.each do |(worker_config, workers)|
|
124
|
+
prefetched_jobs = @prefetched_jobs[worker_config] ||= []
|
125
|
+
logger.debug("I have #{prefetched_jobs.length} jobs for #{workers.length} waiting workers")
|
126
|
+
while !prefetched_jobs.empty? && !workers.empty?
|
127
|
+
job = prefetched_jobs.shift
|
128
|
+
client = workers.shift
|
129
|
+
# couldn't re-lock it for some reason
|
130
|
+
logger.debug("Transferring prefetched job to #{client.name}")
|
131
|
+
unless job.transfer_lock!(from: prefetch_owner, to: client.name)
|
132
|
+
workers.unshift(client)
|
133
|
+
next
|
134
|
+
end
|
135
|
+
client.working = true
|
136
|
+
begin
|
137
|
+
logger.debug("Sending prefetched job #{job.id} to #{client.name}")
|
138
|
+
client_timeout { Marshal.dump(job, client.socket) }
|
139
|
+
rescue SystemCallError, IOError, Timeout::Error => e
|
140
|
+
logger.error("Failed to send pre-fetched job to #{client.name}: #{e.inspect}")
|
141
|
+
drop_socket(client.socket)
|
142
|
+
Delayed::Job.unlock([job])
|
143
|
+
end
|
144
|
+
end
|
104
145
|
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
146
|
+
next if workers.empty?
|
147
|
+
|
148
|
+
logger.debug("Fetching new work for #{workers.length} workers")
|
149
|
+
jobs_to_send = []
|
150
|
+
|
151
|
+
Delayed::Worker.lifecycle.run_callbacks(:work_queue_pop, self, worker_config) do
|
152
|
+
recipients = workers.map(&:name)
|
153
|
+
|
154
|
+
response = Delayed::Job.get_and_lock_next_available(
|
155
|
+
recipients,
|
156
|
+
worker_config[:queue],
|
157
|
+
worker_config[:min_priority],
|
158
|
+
worker_config[:max_priority],
|
159
|
+
prefetch: Settings.fetch_batch_size * (worker_config[:workers] || 1) - recipients.length,
|
160
|
+
prefetch_owner: prefetch_owner,
|
161
|
+
forced_latency: forced_latency
|
162
|
+
)
|
163
|
+
logger.debug(
|
164
|
+
"Fetched and locked #{response.values.flatten.size} new jobs for workers (#{response.keys.join(', ')})."
|
165
|
+
)
|
166
|
+
response.each do |(worker_name, locked_jobs)|
|
167
|
+
if worker_name == prefetch_owner
|
168
|
+
# it's actually an array of all the extra jobs
|
169
|
+
logger.debug(
|
170
|
+
"Adding prefetched jobs #{locked_jobs.length} to prefetched array (size: #{prefetched_jobs.count})"
|
171
|
+
)
|
172
|
+
prefetched_jobs.concat(locked_jobs)
|
173
|
+
next
|
174
|
+
end
|
175
|
+
client = workers.find { |worker| worker.name == worker_name }
|
176
|
+
client.working = true
|
177
|
+
jobs_to_send << [client, locked_jobs]
|
178
|
+
end
|
179
|
+
end
|
123
180
|
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
next
|
136
|
-
end
|
137
|
-
client.working = true
|
138
|
-
begin
|
139
|
-
logger.debug("Sending prefetched job #{job.id} to #{client.name}")
|
140
|
-
client_timeout { Marshal.dump(job, client.socket) }
|
141
|
-
rescue SystemCallError, IOError, Timeout::Error => ex
|
142
|
-
logger.error("Failed to send pre-fetched job to #{client.name}: #{ex.inspect}")
|
143
|
-
drop_socket(client.socket)
|
144
|
-
Delayed::Job.unlock([job])
|
181
|
+
jobs_to_send.each do |(recipient, job_to_send)|
|
182
|
+
@waiting_clients[worker_config].delete(client)
|
183
|
+
begin
|
184
|
+
logger.debug("Sending job #{job_to_send.id} to #{recipient.name}")
|
185
|
+
client_timeout { Marshal.dump(job_to_send, recipient.socket) }
|
186
|
+
rescue SystemCallError, IOError, Timeout::Error => e
|
187
|
+
logger.error("Failed to send job to #{recipient.name}: #{e.inspect}")
|
188
|
+
drop_socket(recipient.socket)
|
189
|
+
Delayed::Job.unlock([job_to_send])
|
190
|
+
end
|
191
|
+
end
|
145
192
|
end
|
146
193
|
end
|
147
194
|
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
response = Delayed::Job.get_and_lock_next_available(
|
157
|
-
recipients,
|
158
|
-
worker_config[:queue],
|
159
|
-
worker_config[:min_priority],
|
160
|
-
worker_config[:max_priority],
|
161
|
-
prefetch: Settings.fetch_batch_size * (worker_config[:workers] || 1) - recipients.length,
|
162
|
-
prefetch_owner: prefetch_owner,
|
163
|
-
forced_latency: forced_latency)
|
164
|
-
logger.debug("Fetched and locked #{response.values.flatten.size} new jobs for workers (#{response.keys.join(', ')}).")
|
165
|
-
response.each do |(worker_name, job)|
|
166
|
-
if worker_name == prefetch_owner
|
167
|
-
# it's actually an array of all the extra jobs
|
168
|
-
logger.debug("Adding prefetched jobs #{job.length} to prefetched array (size: #{prefetched_jobs.count})")
|
169
|
-
prefetched_jobs.concat(job)
|
170
|
-
next
|
195
|
+
def unlock_timed_out_prefetched_jobs
|
196
|
+
@prefetched_jobs.each do |(worker_config, jobs)|
|
197
|
+
next if jobs.empty?
|
198
|
+
|
199
|
+
if jobs.first.locked_at < Time.now.utc - Settings.parent_process[:prefetched_jobs_timeout]
|
200
|
+
Delayed::Job.unlock(jobs)
|
201
|
+
@prefetched_jobs[worker_config] = []
|
171
202
|
end
|
172
|
-
client = workers.find { |worker| worker.name == worker_name }
|
173
|
-
client.working = true
|
174
|
-
jobs_to_send << [client, job]
|
175
203
|
end
|
176
204
|
end
|
177
205
|
|
178
|
-
|
179
|
-
@
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
rescue SystemCallError, IOError, Timeout::Error => ex
|
184
|
-
logger.error("Failed to send job to #{client.name}: #{ex.inspect}")
|
185
|
-
drop_socket(client.socket)
|
186
|
-
Delayed::Job.unlock([job])
|
206
|
+
def unlock_all_prefetched_jobs
|
207
|
+
@prefetched_jobs.each do |(_worker_config, jobs)|
|
208
|
+
next if jobs.empty?
|
209
|
+
|
210
|
+
Delayed::Job.unlock(jobs)
|
187
211
|
end
|
212
|
+
@prefetched_jobs = {}
|
188
213
|
end
|
189
|
-
end
|
190
|
-
end
|
191
214
|
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
215
|
+
def drop_socket(socket)
|
216
|
+
# this socket went away
|
217
|
+
begin
|
218
|
+
socket.close
|
219
|
+
rescue IOError
|
220
|
+
nil
|
221
|
+
end
|
222
|
+
client = @clients[socket]
|
223
|
+
@clients.delete(socket)
|
224
|
+
@waiting_clients.each do |(_config, workers)|
|
225
|
+
workers.delete(client)
|
226
|
+
end
|
198
227
|
end
|
199
|
-
end
|
200
|
-
end
|
201
|
-
|
202
|
-
def unlock_all_prefetched_jobs
|
203
|
-
@prefetched_jobs.each do |(_worker_config, jobs)|
|
204
|
-
next if jobs.empty?
|
205
|
-
Delayed::Job.unlock(jobs)
|
206
|
-
end
|
207
|
-
@prefetched_jobs = {}
|
208
|
-
end
|
209
228
|
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
socket.close
|
214
|
-
rescue IOError
|
215
|
-
end
|
216
|
-
client = @clients[socket]
|
217
|
-
@clients.delete(socket)
|
218
|
-
@waiting_clients.each do |(_config, workers)|
|
219
|
-
workers.delete(client)
|
220
|
-
end
|
221
|
-
end
|
229
|
+
def exit?
|
230
|
+
!!@exit || parent_exited?
|
231
|
+
end
|
222
232
|
|
223
|
-
|
224
|
-
|
225
|
-
|
233
|
+
def prefetch_owner
|
234
|
+
"prefetch:#{Socket.gethostname rescue 'X'}"
|
235
|
+
end
|
226
236
|
|
227
|
-
|
228
|
-
|
229
|
-
|
237
|
+
def parent_exited?
|
238
|
+
@parent_pid && @parent_pid != Process.ppid
|
239
|
+
end
|
230
240
|
|
231
|
-
|
232
|
-
|
233
|
-
|
241
|
+
def client_timeout(&block)
|
242
|
+
Timeout.timeout(@client_timeout, &block)
|
243
|
+
end
|
234
244
|
|
235
|
-
|
236
|
-
|
245
|
+
ClientState = Struct.new(:working, :socket, :name)
|
246
|
+
end
|
237
247
|
end
|
238
|
-
|
239
|
-
ClientState = Struct.new(:working, :socket, :name)
|
240
248
|
end
|
241
249
|
end
|
242
|
-
end
|
243
|
-
end
|