inst-jobs 0.12.3 → 0.13.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/delayed/backend/active_record.rb +40 -3
- data/lib/delayed/backend/base.rb +7 -0
- data/lib/delayed/backend/redis/job.rb +13 -3
- data/lib/delayed/logging.rb +32 -0
- data/lib/delayed/settings.rb +28 -2
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/work_queue/parent_process.rb +24 -180
- data/lib/delayed/work_queue/parent_process/client.rb +54 -0
- data/lib/delayed/work_queue/parent_process/server.rb +200 -0
- data/lib/delayed_job.rb +1 -0
- data/spec/active_record_job_spec.rb +28 -0
- data/spec/delayed/settings_spec.rb +7 -0
- data/spec/delayed/work_queue/parent_process/client_spec.rb +102 -0
- data/spec/delayed/work_queue/parent_process/server_spec.rb +162 -0
- data/spec/delayed/work_queue/parent_process_spec.rb +29 -164
- data/spec/gemfiles/42.gemfile.lock +44 -46
- data/spec/gemfiles/50.gemfile.lock +48 -48
- data/spec/shared/shared_backend.rb +17 -0
- metadata +9 -2
@@ -0,0 +1,54 @@
|
|
1
|
+
module Delayed
|
2
|
+
module WorkQueue
|
3
|
+
class ParentProcess
|
4
|
+
class Client
|
5
|
+
attr_reader :addrinfo
|
6
|
+
|
7
|
+
include Delayed::Logging
|
8
|
+
|
9
|
+
def initialize(addrinfo, config: Settings.parent_process)
|
10
|
+
@addrinfo = addrinfo
|
11
|
+
@connect_timeout = config['client_connect_timeout'] || 2
|
12
|
+
@receive_timeout = config['client_receive_timeout'] || 10
|
13
|
+
end
|
14
|
+
|
15
|
+
def get_and_lock_next_available(worker_name, worker_config)
|
16
|
+
Marshal.dump([worker_name, worker_config], socket)
|
17
|
+
|
18
|
+
# We're assuming there won't ever be a partial write here so we only need
|
19
|
+
# to wait for anything to be available on the 'wire', this is a valid
|
20
|
+
# assumption because we control the server and it's a Unix domain socket,
|
21
|
+
# not TCP.
|
22
|
+
if socket.wait_readable(@receive_timeout)
|
23
|
+
return reset_connection if socket.eof? # Other end closed gracefully, so should we
|
24
|
+
Marshal.load(socket).tap do |response|
|
25
|
+
unless response.nil? || (response.is_a?(Delayed::Job) && response.locked_by == worker_name)
|
26
|
+
raise(ProtocolError, "response is not a locked job: #{response.inspect}")
|
27
|
+
end
|
28
|
+
end
|
29
|
+
else
|
30
|
+
reset_connection
|
31
|
+
end
|
32
|
+
rescue SystemCallError, IOError => ex
|
33
|
+
logger.error("Work queue connection lost, reestablishing on next poll. (#{ex})")
|
34
|
+
# The work queue process died. Return nil to signal the worker
|
35
|
+
# process should sleep as if no job was found, and then retry.
|
36
|
+
reset_connection
|
37
|
+
end
|
38
|
+
|
39
|
+
private
|
40
|
+
|
41
|
+
def socket
|
42
|
+
@socket ||= @addrinfo.connect(timeout: @connect_timeout)
|
43
|
+
end
|
44
|
+
|
45
|
+
def reset_connection
|
46
|
+
if @socket
|
47
|
+
@socket.close
|
48
|
+
@socket = nil
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
@@ -0,0 +1,200 @@
|
|
1
|
+
module Delayed
|
2
|
+
module WorkQueue
|
3
|
+
class ParentProcess
|
4
|
+
class Server
|
5
|
+
attr_reader :clients, :listen_socket
|
6
|
+
|
7
|
+
include Delayed::Logging
|
8
|
+
|
9
|
+
def initialize(listen_socket, parent_pid: nil, config: Settings.parent_process)
|
10
|
+
@listen_socket = listen_socket
|
11
|
+
@parent_pid = parent_pid
|
12
|
+
@clients = {}
|
13
|
+
@waiting_clients = {}
|
14
|
+
@pending_work = {}
|
15
|
+
|
16
|
+
@config = config
|
17
|
+
@client_timeout = config['server_socket_timeout'] || 10.0 # left for backwards compat
|
18
|
+
@receive_timeout = config['server_receive_timeout'] || 10.0
|
19
|
+
end
|
20
|
+
|
21
|
+
def connected_clients
|
22
|
+
@clients.size
|
23
|
+
end
|
24
|
+
|
25
|
+
def all_workers_idle?
|
26
|
+
!@clients.any? { |_, c| c.working }
|
27
|
+
end
|
28
|
+
|
29
|
+
# run the server queue worker
|
30
|
+
# this method does not return, only exits or raises an exception
|
31
|
+
def run
|
32
|
+
logger.debug "Starting work queue process"
|
33
|
+
|
34
|
+
last_orphaned_pending_jobs_purge = Job.db_time_now - rand(15 * 60)
|
35
|
+
while !exit?
|
36
|
+
run_once
|
37
|
+
if last_orphaned_pending_jobs_purge + 15 * 60 < Job.db_time_now
|
38
|
+
Job.unlock_orphaned_pending_jobs
|
39
|
+
last_orphaned_pending_jobs_purge = Job.db_time_now
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
rescue => e
|
44
|
+
logger.debug "WorkQueue Server died: #{e.inspect}", :error
|
45
|
+
raise
|
46
|
+
ensure
|
47
|
+
purge_all_pending_work
|
48
|
+
end
|
49
|
+
|
50
|
+
def run_once
|
51
|
+
handles = @clients.keys + [@listen_socket]
|
52
|
+
timeout = Settings.sleep_delay + (rand * Settings.sleep_delay_stagger)
|
53
|
+
readable, _, _ = IO.select(handles, nil, nil, timeout)
|
54
|
+
if readable
|
55
|
+
readable.each { |s| handle_read(s) }
|
56
|
+
end
|
57
|
+
check_for_work
|
58
|
+
purge_extra_pending_work
|
59
|
+
end
|
60
|
+
|
61
|
+
def handle_read(socket)
|
62
|
+
if socket == @listen_socket
|
63
|
+
handle_accept
|
64
|
+
else
|
65
|
+
handle_request(socket)
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
# Any error on the listen socket other than WaitReadable will bubble up
|
70
|
+
# and terminate the work queue process, to be restarted by the parent daemon.
|
71
|
+
def handle_accept
|
72
|
+
socket, _addr = @listen_socket.accept_nonblock
|
73
|
+
if socket
|
74
|
+
@clients[socket] = ClientState.new(false, socket)
|
75
|
+
end
|
76
|
+
rescue IO::WaitReadable
|
77
|
+
logger.error("Server attempted to read listen_socket but failed with IO::WaitReadable")
|
78
|
+
# ignore and just try accepting again next time through the loop
|
79
|
+
end
|
80
|
+
|
81
|
+
def handle_request(socket)
|
82
|
+
# There is an assumption here that the client will never send a partial
|
83
|
+
# request and then leave the socket open. Doing so would leave us hanging
|
84
|
+
# in Marshal.load forever. This is only a reasonable assumption because we
|
85
|
+
# control the client. Also, in theory, we shouldn't need to call
|
86
|
+
# #wait_readable to ensure there is data available since we just used
|
87
|
+
# select(2) to get this handle but it's better to be safe than sorry.
|
88
|
+
if socket.wait_readable(@receive_timeout)
|
89
|
+
worker_name, worker_config = Marshal.load(socket)
|
90
|
+
client = @clients[socket]
|
91
|
+
client.name = worker_name
|
92
|
+
client.working = false
|
93
|
+
(@waiting_clients[worker_config] ||= []) << client
|
94
|
+
else
|
95
|
+
drop_socket(socket)
|
96
|
+
end
|
97
|
+
|
98
|
+
rescue SystemCallError, IOError => ex
|
99
|
+
logger.error("Receiving message from client (#{socket}) failed: #{ex.inspect}")
|
100
|
+
drop_socket(socket)
|
101
|
+
end
|
102
|
+
|
103
|
+
def check_for_work
|
104
|
+
@waiting_clients.each do |(worker_config, workers)|
|
105
|
+
pending_work = @pending_work[worker_config] ||= []
|
106
|
+
logger.debug("I have #{pending_work.length} jobs for #{workers.length} waiting workers")
|
107
|
+
while !pending_work.empty? && !workers.empty?
|
108
|
+
job = pending_work.shift
|
109
|
+
client = workers.shift
|
110
|
+
# couldn't re-lock it for some reason
|
111
|
+
unless job.transfer_lock!(from: pending_jobs_owner, to: client.name)
|
112
|
+
workers.unshift(client)
|
113
|
+
next
|
114
|
+
end
|
115
|
+
begin
|
116
|
+
client_timeout { Marshal.dump(job, client.socket) }
|
117
|
+
rescue SystemCallError, IOError, Timeout::Error
|
118
|
+
drop_socket(client.socket)
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
next if workers.empty?
|
123
|
+
|
124
|
+
Delayed::Worker.lifecycle.run_callbacks(:work_queue_pop, self, worker_config) do
|
125
|
+
recipients = workers.map(&:name)
|
126
|
+
|
127
|
+
response = Delayed::Job.get_and_lock_next_available(
|
128
|
+
recipients,
|
129
|
+
worker_config[:queue],
|
130
|
+
worker_config[:min_priority],
|
131
|
+
worker_config[:max_priority],
|
132
|
+
extra_jobs: Settings.fetch_batch_size * (worker_config[:workers] || 1) - recipients.length,
|
133
|
+
extra_jobs_owner: pending_jobs_owner)
|
134
|
+
response.each do |(worker_name, job)|
|
135
|
+
if worker_name == pending_jobs_owner
|
136
|
+
# it's actually an array of all the extra jobs
|
137
|
+
pending_work.concat(job)
|
138
|
+
next
|
139
|
+
end
|
140
|
+
client = workers.find { |worker| worker.name == worker_name }
|
141
|
+
client.working = true
|
142
|
+
@waiting_clients[worker_config].delete(client)
|
143
|
+
begin
|
144
|
+
client_timeout { Marshal.dump(job, client.socket) }
|
145
|
+
rescue SystemCallError, IOError, Timeout::Error
|
146
|
+
drop_socket(client.socket)
|
147
|
+
end
|
148
|
+
end
|
149
|
+
end
|
150
|
+
end
|
151
|
+
end
|
152
|
+
|
153
|
+
def purge_extra_pending_work
|
154
|
+
@pending_work.each do |(worker_config, jobs)|
|
155
|
+
next if jobs.empty?
|
156
|
+
if jobs.first.locked_at < Time.now.utc - Settings.parent_process[:pending_jobs_idle_timeout]
|
157
|
+
Delayed::Job.unlock(jobs)
|
158
|
+
@pending_work[worker_config] = []
|
159
|
+
end
|
160
|
+
end
|
161
|
+
end
|
162
|
+
|
163
|
+
def purge_all_pending_work
|
164
|
+
@pending_work.each do |(_worker_config, jobs)|
|
165
|
+
next if jobs.empty?
|
166
|
+
Delayed::Job.unlock(jobs)
|
167
|
+
end
|
168
|
+
@pending_work = {}
|
169
|
+
end
|
170
|
+
|
171
|
+
def drop_socket(socket)
|
172
|
+
# this socket went away
|
173
|
+
begin
|
174
|
+
socket.close
|
175
|
+
rescue IOError
|
176
|
+
end
|
177
|
+
@clients.delete(socket)
|
178
|
+
end
|
179
|
+
|
180
|
+
def exit?
|
181
|
+
parent_exited?
|
182
|
+
end
|
183
|
+
|
184
|
+
def pending_jobs_owner
|
185
|
+
"work_queue:#{Socket.gethostname rescue 'X'}"
|
186
|
+
end
|
187
|
+
|
188
|
+
def parent_exited?
|
189
|
+
@parent_pid && @parent_pid != Process.ppid
|
190
|
+
end
|
191
|
+
|
192
|
+
def client_timeout
|
193
|
+
Timeout.timeout(@client_timeout) { yield }
|
194
|
+
end
|
195
|
+
|
196
|
+
ClientState = Struct.new(:working, :socket, :name)
|
197
|
+
end
|
198
|
+
end
|
199
|
+
end
|
200
|
+
end
|
data/lib/delayed_job.rb
CHANGED
@@ -244,6 +244,22 @@ describe 'Delayed::Backed::ActiveRecord::Job' do
|
|
244
244
|
end
|
245
245
|
end
|
246
246
|
|
247
|
+
it "unlocks orphaned jobs from work queue" do
|
248
|
+
job1 = Delayed::Job.new(:tag => 'tag')
|
249
|
+
job2 = Delayed::Job.new(:tag => 'tag')
|
250
|
+
|
251
|
+
job1.create_and_lock!("work_queue:a")
|
252
|
+
job1.locked_at = Delayed::Job.db_time_now - 15 * 60
|
253
|
+
job1.save!
|
254
|
+
job2.create_and_lock!("work_queue:a")
|
255
|
+
|
256
|
+
expect(Delayed::Job.unlock_orphaned_pending_jobs).to eq 1
|
257
|
+
expect(Delayed::Job.unlock_orphaned_pending_jobs).to eq 0
|
258
|
+
|
259
|
+
expect(Delayed::Job.find(job1.id).locked_by).to be_nil
|
260
|
+
expect(Delayed::Job.find(job2.id).locked_by).to eq 'work_queue:a'
|
261
|
+
end
|
262
|
+
|
247
263
|
it "allows fetching multiple jobs at once" do
|
248
264
|
jobs = 3.times.map { Delayed::Job.create :payload_object => SimpleJob.new }
|
249
265
|
locked_jobs = Delayed::Job.get_and_lock_next_available(['worker1', 'worker2'])
|
@@ -252,4 +268,16 @@ describe 'Delayed::Backed::ActiveRecord::Job' do
|
|
252
268
|
locked_jobs.values.should == jobs[0..1]
|
253
269
|
jobs.map(&:reload).map(&:locked_by).should == ['worker1', 'worker2', nil]
|
254
270
|
end
|
271
|
+
|
272
|
+
it "allows fetching extra jobs" do
|
273
|
+
jobs = 5.times.map { Delayed::Job.create :payload_object => SimpleJob.new }
|
274
|
+
locked_jobs = Delayed::Job.get_and_lock_next_available(['worker1'],
|
275
|
+
extra_jobs: 2,
|
276
|
+
extra_jobs_owner: 'work_queue')
|
277
|
+
expect(locked_jobs.length).to eq 2
|
278
|
+
expect(locked_jobs.keys).to eq ['worker1', 'work_queue']
|
279
|
+
expect(locked_jobs['worker1']).to eq jobs[0]
|
280
|
+
expect(locked_jobs['work_queue']).to eq jobs[1..2]
|
281
|
+
jobs.map(&:reload).map(&:locked_by).should == ['worker1', 'work_queue', 'work_queue', nil, nil]
|
282
|
+
end
|
255
283
|
end
|
@@ -29,4 +29,11 @@ default:
|
|
29
29
|
described_class.apply_worker_config!('last_ditch_logfile' => true)
|
30
30
|
end
|
31
31
|
end
|
32
|
+
|
33
|
+
describe '.parent_process_client_timeout=' do
|
34
|
+
it 'must update the value in the parent_process settings hash' do
|
35
|
+
Delayed::Settings.parent_process_client_timeout = 42
|
36
|
+
expect(Delayed::Settings.parent_process['server_socket_timeout']).to eq 42
|
37
|
+
end
|
38
|
+
end
|
32
39
|
end
|
@@ -0,0 +1,102 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
RSpec.describe Delayed::WorkQueue::ParentProcess::Client do
|
4
|
+
let(:subject) { described_class.new(addrinfo) }
|
5
|
+
let(:addrinfo) { double('Addrinfo') }
|
6
|
+
let(:connection) { double('Socket') }
|
7
|
+
let(:job) { Delayed::Job.new(locked_by: "worker_name") }
|
8
|
+
let(:worker_config) { { queue: "queue_name", min_priority: 1, max_priority: 2 } }
|
9
|
+
let(:args) { ["worker_name", worker_config] }
|
10
|
+
let(:job_args) { [["worker_name"], "queue_name", 1, 2] }
|
11
|
+
|
12
|
+
before :all do
|
13
|
+
FileUtils.mkdir_p(Delayed::Settings.expand_rails_path('tmp'))
|
14
|
+
Delayed.select_backend(Delayed::Backend::ActiveRecord::Job)
|
15
|
+
end
|
16
|
+
|
17
|
+
after :all do
|
18
|
+
Delayed.send(:remove_const, :Job)
|
19
|
+
end
|
20
|
+
|
21
|
+
it 'marshals the given arguments to the server and returns the response' do
|
22
|
+
expect(addrinfo).to receive(:connect).once.and_return(connection)
|
23
|
+
expect(connection).to receive(:wait_readable).with(10.0).and_return(connection)
|
24
|
+
expect(connection).to receive(:eof?).and_return(false)
|
25
|
+
expect(Marshal).to receive(:dump).with(args, connection).ordered
|
26
|
+
expect(Marshal).to receive(:load).with(connection).and_return(job).ordered
|
27
|
+
response = subject.get_and_lock_next_available(*args)
|
28
|
+
expect(response).to eq(job)
|
29
|
+
end
|
30
|
+
|
31
|
+
it 'returns nil and then reconnects on receive timeout' do
|
32
|
+
expect(addrinfo).to receive(:connect).once.and_return(connection)
|
33
|
+
expect(connection).to receive(:wait_readable).with(10.0).and_return(nil)
|
34
|
+
expect(Marshal).to receive(:dump).with(args, connection).ordered
|
35
|
+
expect(connection).to receive(:close)
|
36
|
+
response = subject.get_and_lock_next_available(*args)
|
37
|
+
expect(response).to be_nil
|
38
|
+
|
39
|
+
expect(addrinfo).to receive(:connect).once.and_return(connection)
|
40
|
+
expect(Marshal).to receive(:dump).with(args, connection)
|
41
|
+
expect(connection).to receive(:wait_readable).with(10.0).and_return(connection)
|
42
|
+
expect(connection).to receive(:eof?).and_return(false)
|
43
|
+
expect(Marshal).to receive(:load).with(connection).and_return(job)
|
44
|
+
response = subject.get_and_lock_next_available(*args)
|
45
|
+
expect(response).to eq(job)
|
46
|
+
end
|
47
|
+
|
48
|
+
it 'returns nil and then reconnects on socket write error' do
|
49
|
+
expect(addrinfo).to receive(:connect).once.and_return(connection)
|
50
|
+
expect(Marshal).to receive(:dump).and_raise(SystemCallError.new("failure"))
|
51
|
+
expect(connection).to receive(:close)
|
52
|
+
response = subject.get_and_lock_next_available(*args)
|
53
|
+
expect(response).to be_nil
|
54
|
+
|
55
|
+
expect(addrinfo).to receive(:connect).once.and_return(connection)
|
56
|
+
expect(Marshal).to receive(:dump).with(args, connection)
|
57
|
+
expect(connection).to receive(:wait_readable).with(10.0).and_return(connection)
|
58
|
+
expect(connection).to receive(:eof?).and_return(false)
|
59
|
+
expect(Marshal).to receive(:load).with(connection).and_return(job)
|
60
|
+
response = subject.get_and_lock_next_available(*args)
|
61
|
+
expect(response).to eq(job)
|
62
|
+
end
|
63
|
+
|
64
|
+
it 'returns nil and then reconnects when the socket indicates eof' do
|
65
|
+
expect(addrinfo).to receive(:connect).once.and_return(connection)
|
66
|
+
expect(connection).to receive(:wait_readable).with(10.0).and_return(true)
|
67
|
+
expect(connection).to receive(:eof?).and_return(true)
|
68
|
+
expect(Marshal).to receive(:dump).with(args, connection).ordered
|
69
|
+
expect(connection).to receive(:close)
|
70
|
+
response = subject.get_and_lock_next_available(*args)
|
71
|
+
expect(response).to be_nil
|
72
|
+
|
73
|
+
expect(addrinfo).to receive(:connect).once.and_return(connection)
|
74
|
+
expect(Marshal).to receive(:dump).with(args, connection)
|
75
|
+
expect(connection).to receive(:wait_readable).with(10.0).and_return(connection)
|
76
|
+
expect(connection).to receive(:eof?).and_return(false)
|
77
|
+
expect(Marshal).to receive(:load).with(connection).and_return(job)
|
78
|
+
response = subject.get_and_lock_next_available(*args)
|
79
|
+
expect(response).to eq(job)
|
80
|
+
end
|
81
|
+
|
82
|
+
it 'errors if the response is not a locked job' do
|
83
|
+
expect(addrinfo).to receive(:connect).once.and_return(connection)
|
84
|
+
expect(Marshal).to receive(:dump).with(args, connection)
|
85
|
+
expect(Marshal).to receive(:load).with(connection).and_return(:not_a_job)
|
86
|
+
expect(connection).to receive(:wait_readable).with(10.0).and_return(connection)
|
87
|
+
expect(connection).to receive(:eof?).and_return(false)
|
88
|
+
|
89
|
+
expect { subject.get_and_lock_next_available(*args) }.to raise_error(Delayed::WorkQueue::ParentProcess::ProtocolError)
|
90
|
+
end
|
91
|
+
|
92
|
+
it 'errors if the response is a job not locked by this worker' do
|
93
|
+
expect(addrinfo).to receive(:connect).once.and_return(connection)
|
94
|
+
expect(Marshal).to receive(:dump).with(args, connection)
|
95
|
+
job.locked_by = "somebody_else"
|
96
|
+
expect(Marshal).to receive(:load).with(connection).and_return(job)
|
97
|
+
expect(connection).to receive(:wait_readable).with(10.0).and_return(connection)
|
98
|
+
expect(connection).to receive(:eof?).and_return(false)
|
99
|
+
|
100
|
+
expect { subject.get_and_lock_next_available(*args) }.to raise_error(Delayed::WorkQueue::ParentProcess::ProtocolError)
|
101
|
+
end
|
102
|
+
end
|
@@ -0,0 +1,162 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
RSpec.describe Delayed::WorkQueue::ParentProcess::Server do
|
4
|
+
let(:parent) { Delayed::WorkQueue::ParentProcess.new }
|
5
|
+
let(:subject) { described_class.new(listen_socket) }
|
6
|
+
let(:listen_socket) { Socket.unix_server_socket(parent.server_address) }
|
7
|
+
let(:job) { :a_job }
|
8
|
+
let(:worker_config) { { queue: "queue_name", min_priority: 1, max_priority: 2 } }
|
9
|
+
let(:args) { ["worker_name", worker_config] }
|
10
|
+
let(:job_args) { [["worker_name"], "queue_name", 1, 2, hash_including(extra_jobs: 4)] }
|
11
|
+
|
12
|
+
before :all do
|
13
|
+
Delayed.select_backend(Delayed::Backend::ActiveRecord::Job)
|
14
|
+
Delayed::Settings.parent_process = {
|
15
|
+
'server_address' => '/tmp/inst-jobs-test.sock'
|
16
|
+
}
|
17
|
+
end
|
18
|
+
|
19
|
+
after :all do
|
20
|
+
Delayed.send(:remove_const, :Job)
|
21
|
+
Delayed::Settings.parent_process = {}
|
22
|
+
end
|
23
|
+
|
24
|
+
after :each do
|
25
|
+
File.unlink('/tmp/inst-jobs-test.sock') if File.exist?('/tmp/inst-jobs-test.sock')
|
26
|
+
end
|
27
|
+
|
28
|
+
it 'accepts new clients' do
|
29
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
30
|
+
expect { subject.run_once }.to change(subject, :connected_clients).by(1)
|
31
|
+
end
|
32
|
+
|
33
|
+
it 'queries the queue on client request' do
|
34
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
35
|
+
subject.run_once
|
36
|
+
|
37
|
+
expect(Delayed::Job).to receive(:get_and_lock_next_available).with(*job_args).and_return('worker_name' => job)
|
38
|
+
Marshal.dump(args, client)
|
39
|
+
subject.run_once
|
40
|
+
expect(client).to be_ready
|
41
|
+
expect(Marshal.load(client)).to eq(job)
|
42
|
+
end
|
43
|
+
|
44
|
+
it 'can pop multiple jobs at once' do
|
45
|
+
client1 = Socket.unix(subject.listen_socket.local_address.unix_path)
|
46
|
+
subject.run_once
|
47
|
+
client2 = Socket.unix(subject.listen_socket.local_address.unix_path)
|
48
|
+
subject.run_once
|
49
|
+
|
50
|
+
job_args = [["worker_name1", "worker_name2"], "queue_name", 1, 2, hash_including(extra_jobs: 3)]
|
51
|
+
jobs = { 'worker_name1' => :job1, 'worker_name2' => :job2 }
|
52
|
+
|
53
|
+
expect(Delayed::Job).to receive(:get_and_lock_next_available).with(*job_args).and_return(jobs)
|
54
|
+
Marshal.dump(["worker_name1", worker_config], client1)
|
55
|
+
Marshal.dump(["worker_name2", worker_config], client2)
|
56
|
+
subject.run_once
|
57
|
+
expect(Marshal.load(client1)).to eq(:job1)
|
58
|
+
expect(Marshal.load(client2)).to eq(:job2)
|
59
|
+
end
|
60
|
+
|
61
|
+
it 'will fetch and use extra jobs' do
|
62
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
63
|
+
subject.run_once
|
64
|
+
|
65
|
+
allow(subject).to receive(:pending_jobs_owner).and_return('work_queue:X')
|
66
|
+
job_args = [["worker_name1"], "queue_name", 1, 2, extra_jobs: 4, extra_jobs_owner: 'work_queue:X']
|
67
|
+
job2 = Delayed::Job.new(:tag => 'tag')
|
68
|
+
job2.create_and_lock!('work_queue:X')
|
69
|
+
job3 = Delayed::Job.new(:tag => 'tag')
|
70
|
+
job3.create_and_lock!('work_queue:X')
|
71
|
+
jobs = { 'worker_name1' => :job1, 'work_queue:X' => [job2, job3]}
|
72
|
+
|
73
|
+
expect(Delayed::Job).to receive(:get_and_lock_next_available).once.with(*job_args).and_return(jobs)
|
74
|
+
Marshal.dump(["worker_name1", worker_config], client)
|
75
|
+
subject.run_once
|
76
|
+
expect(Marshal.load(client)).to eq(:job1)
|
77
|
+
Marshal.dump(["worker_name1", worker_config], client)
|
78
|
+
subject.run_once
|
79
|
+
expect(Marshal.load(client)).to eq(job2)
|
80
|
+
end
|
81
|
+
|
82
|
+
it "doesn't respond immediately if there are no jobs available" do
|
83
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
84
|
+
subject.run_once
|
85
|
+
|
86
|
+
expect(Delayed::Job).to receive(:get_and_lock_next_available).with(*job_args).and_return({}).ordered
|
87
|
+
Marshal.dump(args, client)
|
88
|
+
subject.run_once
|
89
|
+
expect(client).not_to be_ready
|
90
|
+
|
91
|
+
# next time around, return the result
|
92
|
+
expect(Delayed::Job).to receive(:get_and_lock_next_available).with(*job_args).and_return('worker_name' => job).ordered
|
93
|
+
allow(Delayed::Settings).to receive(:sleep_delay).and_return(0)
|
94
|
+
allow(Delayed::Settings).to receive(:sleep_delay_stagger).and_return(0)
|
95
|
+
subject.run_once
|
96
|
+
expect(client).to be_ready
|
97
|
+
expect(Marshal.load(client)).to eq(job)
|
98
|
+
end
|
99
|
+
|
100
|
+
it 'drops the client on i/o error' do
|
101
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
102
|
+
subject.run_once
|
103
|
+
|
104
|
+
Marshal.dump(args, client)
|
105
|
+
|
106
|
+
expect(Marshal).to receive(:load).and_raise(IOError.new("socket went away"))
|
107
|
+
expect { subject.run_once }.to change(subject, :connected_clients).by(-1)
|
108
|
+
end
|
109
|
+
|
110
|
+
it 'drops the client on timeout' do
|
111
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
112
|
+
subject.run_once
|
113
|
+
|
114
|
+
Marshal.dump(args, client)
|
115
|
+
|
116
|
+
server_client_socket = subject.clients.keys.first
|
117
|
+
|
118
|
+
expect(server_client_socket).to receive(:wait_readable).and_return(false)
|
119
|
+
expect { subject.run_once }.to change(subject, :connected_clients).by(-1)
|
120
|
+
end
|
121
|
+
|
122
|
+
it 'tracks when clients are idle' do
|
123
|
+
expect(subject.all_workers_idle?).to be(true)
|
124
|
+
|
125
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
126
|
+
subject.run_once
|
127
|
+
expect(subject.all_workers_idle?).to be(true)
|
128
|
+
|
129
|
+
expect(Delayed::Job).to receive(:get_and_lock_next_available).with(*job_args).and_return('worker_name' => job)
|
130
|
+
Marshal.dump(args, client)
|
131
|
+
subject.run_once
|
132
|
+
expect(subject.all_workers_idle?).to be(false)
|
133
|
+
|
134
|
+
expect(Delayed::Job).to receive(:get_and_lock_next_available).with(*job_args).and_return({})
|
135
|
+
Marshal.dump(args, client)
|
136
|
+
subject.run_once
|
137
|
+
expect(subject.all_workers_idle?).to be(true)
|
138
|
+
end
|
139
|
+
|
140
|
+
it 'triggers the lifecycle event around the pop' do
|
141
|
+
called = false
|
142
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
143
|
+
subject.run_once
|
144
|
+
|
145
|
+
Delayed::Worker.lifecycle.around(:work_queue_pop) do |queue, &cb|
|
146
|
+
expect(subject.all_workers_idle?).to be(true)
|
147
|
+
expect(queue).to eq(subject)
|
148
|
+
expect(Delayed::Job).to receive(:get_and_lock_next_available).with(*job_args).and_return('worker_name' => job)
|
149
|
+
called = true
|
150
|
+
res = cb.call(queue)
|
151
|
+
expect(subject.all_workers_idle?).to be(false)
|
152
|
+
res
|
153
|
+
end
|
154
|
+
|
155
|
+
Marshal.dump(args, client)
|
156
|
+
subject.run_once
|
157
|
+
|
158
|
+
expect(Marshal.load(client)).to eq(job)
|
159
|
+
expect(called).to eq(true)
|
160
|
+
end
|
161
|
+
end
|
162
|
+
|