inst-jobs 0.13.2 → 0.13.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 9b0d5ed365c051a94b8b26f7fc43b66ddf472221
4
- data.tar.gz: 53d13170b1a9f85ea4e9064932e674b6059063ba
3
+ metadata.gz: 87a4b51b71b02dd1ab9e4817dffbc6b4780c4224
4
+ data.tar.gz: e6a9a26a33f424950c37c83238085233be136e72
5
5
  SHA512:
6
- metadata.gz: d911c43ce7aadb8d1373c96d82829ea2ecfc4ffecb1b8ea1243b713cbcb30aa57b6721fa77eea5446b72bb741db128dbf4ba7da9c4dcf57b6e34fa87261483d7
7
- data.tar.gz: 02ba0072c4d9c1335fcf8d70e91368a47e34b9ae2d2748a2749def1f0f0e4d4ce76114ad0baf7da5bdf23e09abab0079466f1430076dec959adb096fd72698c8
6
+ metadata.gz: 21bb8f405b710a970a8e70570934d6344ebdb25d91b811542f6d8cc4fc1008d8550570960497a1f15d8a27c110a61e2dba1a819cf1c7982980f5269cb49bedfe
7
+ data.tar.gz: 2eff9e07b7b083bee75d4536d50434f92b12f860c6390160ab90ff1737164fd83917b4d59fe64495278be2f75affe3cf63684d64b3a3930dfeb15a7d9a96feee
@@ -202,8 +202,8 @@ module Delayed
202
202
  queue = Delayed::Settings.queue,
203
203
  min_priority = nil,
204
204
  max_priority = nil,
205
- extra_jobs: 0,
206
- extra_jobs_owner: nil)
205
+ prefetch: 0,
206
+ prefetch_owner: nil)
207
207
 
208
208
  check_queue(queue)
209
209
  check_priorities(min_priority, max_priority)
@@ -218,7 +218,7 @@ module Delayed
218
218
  effective_worker_names = Array(worker_names)
219
219
 
220
220
  target_jobs = all_available(queue, min_priority, max_priority).
221
- limit(effective_worker_names.length + extra_jobs).
221
+ limit(effective_worker_names.length + prefetch).
222
222
  lock
223
223
  jobs_with_row_number = all.from(target_jobs).
224
224
  select("id, ROW_NUMBER() OVER () AS row_number")
@@ -226,8 +226,8 @@ module Delayed
226
226
  effective_worker_names.each_with_index do |worker, i|
227
227
  updates << "WHEN #{i + 1} THEN #{connection.quote(worker)} "
228
228
  end
229
- if extra_jobs_owner
230
- updates << "ELSE #{connection.quote(extra_jobs_owner)} "
229
+ if prefetch_owner
230
+ updates << "ELSE #{connection.quote(prefetch_owner)} "
231
231
  end
232
232
  updates << "END, locked_at = #{connection.quote(db_time_now)}"
233
233
  # joins and returning in an update! just bypass AR
@@ -238,8 +238,8 @@ module Delayed
238
238
  # all of the jobs we tried to lock had already been locked by someone else
239
239
  if worker_names.is_a?(Array)
240
240
  result = jobs.index_by(&:locked_by)
241
- # all of the extras can come back as an array
242
- result[extra_jobs_owner] = jobs.select { |j| j.locked_by == extra_jobs_owner } if extra_jobs_owner
241
+ # all of the prefetched jobs can come back as an array
242
+ result[prefetch_owner] = jobs.select { |j| j.locked_by == prefetch_owner } if prefetch_owner
243
243
  return result
244
244
  else
245
245
  return jobs.first
@@ -326,6 +326,11 @@ module Delayed
326
326
  end
327
327
  end
328
328
 
329
+ def self.unlock_orphaned_prefetched_jobs
330
+ horizon = db_time_now - Settings.parent_process[:prefetched_jobs_timeout] * 4
331
+ where("locked_by LIKE 'prefetch:%' AND locked_at<?", horizon).update_all(locked_at: nil, locked_by: nil)
332
+ end
333
+
329
334
  def self.unlock(jobs)
330
335
  unlocked = where(id: jobs).update_all(locked_at: nil, locked_by: nil)
331
336
  jobs.each(&:unlock)
@@ -118,9 +118,9 @@ module Delayed
118
118
  Time.now.utc
119
119
  end
120
120
 
121
- def unlock_orphaned_pending_jobs
122
- horizon = db_time_now - Settings.parent_process[:pending_jobs_idle_timeout] * 4
123
- orphaned_jobs = running_jobs.select { |job| job.locked_by.start_with?('work_queue:') && job.locked_at < horizon }
121
+ def unlock_orphaned_prefetched_jobs
122
+ horizon = db_time_now - Settings.parent_process[:prefetched_jobs_timeout] * 4
123
+ orphaned_jobs = running_jobs.select { |job| job.locked_by.start_with?('prefetch:') && job.locked_at < horizon }
124
124
  return 0 if orphaned_jobs.empty?
125
125
  unlock(orphaned_jobs)
126
126
  end
@@ -222,8 +222,8 @@ class Job
222
222
  queue = Delayed::Settings.queue,
223
223
  min_priority = Delayed::MIN_PRIORITY,
224
224
  max_priority = Delayed::MAX_PRIORITY,
225
- extra_jobs: nil,
226
- extra_jobs_owner: nil)
225
+ prefetch: nil,
226
+ prefetch_owner: nil)
227
227
 
228
228
  check_queue(queue)
229
229
  check_priorities(min_priority, max_priority)
@@ -34,7 +34,7 @@ module Delayed
34
34
 
35
35
  PARENT_PROCESS_DEFAULTS = {
36
36
  server_socket_timeout: 10.0,
37
- pending_jobs_idle_timeout: 30.0,
37
+ prefetched_jobs_timeout: 30.0,
38
38
 
39
39
  client_connect_timeout: 2.0,
40
40
 
@@ -1,3 +1,3 @@
1
1
  module Delayed
2
- VERSION = "0.13.2"
2
+ VERSION = "0.13.3"
3
3
  end
@@ -18,11 +18,17 @@ class ParentProcess
18
18
  # to wait for anything to be available on the 'wire', this is a valid
19
19
  # assumption because we control the server and it's a Unix domain socket,
20
20
  # not TCP.
21
- return reset_connection if socket.eof? # Other end closed gracefully, so should we
21
+ if socket.eof?
22
+ # Other end closed gracefully, so should we
23
+ logger.debug("server closed connection")
24
+ return reset_connection
25
+ end
26
+
22
27
  Marshal.load(socket).tap do |response|
23
28
  unless response.nil? || (response.is_a?(Delayed::Job) && response.locked_by == worker_name)
24
29
  raise(ProtocolError, "response is not a locked job: #{response.inspect}")
25
30
  end
31
+ logger.debug("Received job #{response.id}")
26
32
  end
27
33
  rescue SystemCallError, IOError => ex
28
34
  logger.error("Work queue connection lost, reestablishing on next poll. (#{ex})")
@@ -11,7 +11,7 @@ class ParentProcess
11
11
  @parent_pid = parent_pid
12
12
  @clients = {}
13
13
  @waiting_clients = {}
14
- @pending_work = {}
14
+ @prefetched_jobs = {}
15
15
 
16
16
  @config = config
17
17
  @client_timeout = config['server_socket_timeout'] || 10.0 # left for backwards compat
@@ -30,12 +30,12 @@ class ParentProcess
30
30
  def run
31
31
  logger.debug "Starting work queue process"
32
32
 
33
- last_orphaned_pending_jobs_purge = Job.db_time_now - rand(15 * 60)
33
+ last_orphaned_prefetched_jobs_purge = Job.db_time_now - rand(15 * 60)
34
34
  while !exit?
35
35
  run_once
36
- if last_orphaned_pending_jobs_purge + 15 * 60 < Job.db_time_now
37
- Job.unlock_orphaned_pending_jobs
38
- last_orphaned_pending_jobs_purge = Job.db_time_now
36
+ if last_orphaned_prefetched_jobs_purge + 15 * 60 < Job.db_time_now
37
+ Job.unlock_orphaned_prefetched_jobs
38
+ last_orphaned_prefetched_jobs_purge = Job.db_time_now
39
39
  end
40
40
  end
41
41
 
@@ -43,7 +43,7 @@ class ParentProcess
43
43
  logger.debug "WorkQueue Server died: #{e.inspect}", :error
44
44
  raise
45
45
  ensure
46
- purge_all_pending_work
46
+ unlock_all_prefetched_jobs
47
47
  end
48
48
 
49
49
  def run_once
@@ -54,7 +54,7 @@ class ParentProcess
54
54
  readable.each { |s| handle_read(s) }
55
55
  end
56
56
  check_for_work
57
- purge_extra_pending_work
57
+ unlock_timed_out_prefetched_jobs
58
58
  end
59
59
 
60
60
  def handle_read(socket)
@@ -82,9 +82,12 @@ class ParentProcess
82
82
  # request and then leave the socket open. Doing so would leave us hanging
83
83
  # in Marshal.load forever. This is only a reasonable assumption because we
84
84
  # control the client.
85
- return drop_socket(socket) if socket.eof?
86
- worker_name, worker_config = Marshal.load(socket)
87
85
  client = @clients[socket]
86
+ if socket.eof?
87
+ logger.debug("Client #{client.name} closed connection")
88
+ return drop_socket(socket)
89
+ end
90
+ worker_name, worker_config = Marshal.load(socket)
88
91
  client.name = worker_name
89
92
  client.working = false
90
93
  (@waiting_clients[worker_config] ||= []) << client
@@ -95,17 +98,18 @@ class ParentProcess
95
98
 
96
99
  def check_for_work
97
100
  @waiting_clients.each do |(worker_config, workers)|
98
- pending_work = @pending_work[worker_config] ||= []
99
- logger.debug("I have #{pending_work.length} jobs for #{workers.length} waiting workers")
100
- while !pending_work.empty? && !workers.empty?
101
- job = pending_work.shift
101
+ prefetched_jobs = @prefetched_jobs[worker_config] ||= []
102
+ logger.debug("I have #{prefetched_jobs.length} jobs for #{workers.length} waiting workers")
103
+ while !prefetched_jobs.empty? && !workers.empty?
104
+ job = prefetched_jobs.shift
102
105
  client = workers.shift
103
106
  # couldn't re-lock it for some reason
104
- unless job.transfer_lock!(from: pending_jobs_owner, to: client.name)
107
+ unless job.transfer_lock!(from: prefetch_owner, to: client.name)
105
108
  workers.unshift(client)
106
109
  next
107
110
  end
108
111
  begin
112
+ logger.debug("Sending prefetched job #{job.id} to #{client.name}")
109
113
  client_timeout { Marshal.dump(job, client.socket) }
110
114
  rescue SystemCallError, IOError, Timeout::Error => ex
111
115
  logger.error("Failed to send pre-fetched job to #{client.name}: #{ex.inspect}")
@@ -124,18 +128,19 @@ class ParentProcess
124
128
  worker_config[:queue],
125
129
  worker_config[:min_priority],
126
130
  worker_config[:max_priority],
127
- extra_jobs: Settings.fetch_batch_size * (worker_config[:workers] || 1) - recipients.length,
128
- extra_jobs_owner: pending_jobs_owner)
131
+ prefetch: Settings.fetch_batch_size * (worker_config[:workers] || 1) - recipients.length,
132
+ prefetch_owner: prefetch_owner)
129
133
  response.each do |(worker_name, job)|
130
- if worker_name == pending_jobs_owner
134
+ if worker_name == prefetch_owner
131
135
  # it's actually an array of all the extra jobs
132
- pending_work.concat(job)
136
+ prefetched_jobs.concat(job)
133
137
  next
134
138
  end
135
139
  client = workers.find { |worker| worker.name == worker_name }
136
140
  client.working = true
137
141
  @waiting_clients[worker_config].delete(client)
138
142
  begin
143
+ logger.debug("Sending job #{job.id} to #{client.name}")
139
144
  client_timeout { Marshal.dump(job, client.socket) }
140
145
  rescue SystemCallError, IOError, Timeout::Error => ex
141
146
  logger.error("Failed to send job to #{client.name}: #{ex.inspect}")
@@ -147,22 +152,22 @@ class ParentProcess
147
152
  end
148
153
  end
149
154
 
150
- def purge_extra_pending_work
151
- @pending_work.each do |(worker_config, jobs)|
155
+ def unlock_timed_out_prefetched_jobs
156
+ @prefetched_jobs.each do |(worker_config, jobs)|
152
157
  next if jobs.empty?
153
- if jobs.first.locked_at < Time.now.utc - Settings.parent_process[:pending_jobs_idle_timeout]
158
+ if jobs.first.locked_at < Time.now.utc - Settings.parent_process[:prefetched_jobs_timeout]
154
159
  Delayed::Job.unlock(jobs)
155
- @pending_work[worker_config] = []
160
+ @prefetched_jobs[worker_config] = []
156
161
  end
157
162
  end
158
163
  end
159
164
 
160
- def purge_all_pending_work
161
- @pending_work.each do |(_worker_config, jobs)|
165
+ def unlock_all_prefetched_jobs
166
+ @prefetched_jobs.each do |(_worker_config, jobs)|
162
167
  next if jobs.empty?
163
168
  Delayed::Job.unlock(jobs)
164
169
  end
165
- @pending_work = {}
170
+ @prefetched_jobs = {}
166
171
  end
167
172
 
168
173
  def drop_socket(socket)
@@ -171,15 +176,19 @@ class ParentProcess
171
176
  socket.close
172
177
  rescue IOError
173
178
  end
179
+ client = @clients[socket]
174
180
  @clients.delete(socket)
181
+ @waiting_clients.each do |(_config, workers)|
182
+ workers.delete(client)
183
+ end
175
184
  end
176
185
 
177
186
  def exit?
178
187
  parent_exited?
179
188
  end
180
189
 
181
- def pending_jobs_owner
182
- "work_queue:#{Socket.gethostname rescue 'X'}"
190
+ def prefetch_owner
191
+ "prefetch:#{Socket.gethostname rescue 'X'}"
183
192
  end
184
193
 
185
194
  def parent_exited?
@@ -244,20 +244,20 @@ describe 'Delayed::Backed::ActiveRecord::Job' do
244
244
  end
245
245
  end
246
246
 
247
- it "unlocks orphaned jobs from work queue" do
247
+ it "unlocks orphaned prefetched_jobs" do
248
248
  job1 = Delayed::Job.new(:tag => 'tag')
249
249
  job2 = Delayed::Job.new(:tag => 'tag')
250
250
 
251
- job1.create_and_lock!("work_queue:a")
251
+ job1.create_and_lock!("prefetch:a")
252
252
  job1.locked_at = Delayed::Job.db_time_now - 15 * 60
253
253
  job1.save!
254
- job2.create_and_lock!("work_queue:a")
254
+ job2.create_and_lock!("prefetch:a")
255
255
 
256
- expect(Delayed::Job.unlock_orphaned_pending_jobs).to eq 1
257
- expect(Delayed::Job.unlock_orphaned_pending_jobs).to eq 0
256
+ expect(Delayed::Job.unlock_orphaned_prefetched_jobs).to eq 1
257
+ expect(Delayed::Job.unlock_orphaned_prefetched_jobs).to eq 0
258
258
 
259
259
  expect(Delayed::Job.find(job1.id).locked_by).to be_nil
260
- expect(Delayed::Job.find(job2.id).locked_by).to eq 'work_queue:a'
260
+ expect(Delayed::Job.find(job2.id).locked_by).to eq 'prefetch:a'
261
261
  end
262
262
 
263
263
  it "allows fetching multiple jobs at once" do
@@ -272,8 +272,8 @@ describe 'Delayed::Backed::ActiveRecord::Job' do
272
272
  it "allows fetching extra jobs" do
273
273
  jobs = 5.times.map { Delayed::Job.create :payload_object => SimpleJob.new }
274
274
  locked_jobs = Delayed::Job.get_and_lock_next_available(['worker1'],
275
- extra_jobs: 2,
276
- extra_jobs_owner: 'work_queue')
275
+ prefetch: 2,
276
+ prefetch_owner: 'work_queue')
277
277
  expect(locked_jobs.length).to eq 2
278
278
  expect(locked_jobs.keys).to eq ['worker1', 'work_queue']
279
279
  expect(locked_jobs['worker1']).to eq jobs[0]
@@ -1,13 +1,25 @@
1
1
  require 'spec_helper'
2
2
 
3
3
  RSpec.describe Delayed::WorkQueue::ParentProcess::Server do
4
+ class JobClass
5
+ attr_reader :id
6
+
7
+ def initialize
8
+ @id = rand
9
+ end
10
+
11
+ def ==(other)
12
+ self.id == other.id
13
+ end
14
+ end
15
+
4
16
  let(:parent) { Delayed::WorkQueue::ParentProcess.new }
5
17
  let(:subject) { described_class.new(listen_socket) }
6
18
  let(:listen_socket) { Socket.unix_server_socket(parent.server_address) }
7
- let(:job) { :a_job }
19
+ let(:job) { JobClass.new }
8
20
  let(:worker_config) { { queue: "queue_name", min_priority: 1, max_priority: 2 } }
9
21
  let(:args) { ["worker_name", worker_config] }
10
- let(:job_args) { [["worker_name"], "queue_name", 1, 2, hash_including(extra_jobs: 4)] }
22
+ let(:job_args) { [["worker_name"], "queue_name", 1, 2, hash_including(prefetch: 4)] }
11
23
 
12
24
  before :all do
13
25
  Delayed.select_backend(Delayed::Backend::ActiveRecord::Job)
@@ -47,33 +59,35 @@ RSpec.describe Delayed::WorkQueue::ParentProcess::Server do
47
59
  client2 = Socket.unix(subject.listen_socket.local_address.unix_path)
48
60
  subject.run_once
49
61
 
50
- job_args = [["worker_name1", "worker_name2"], "queue_name", 1, 2, hash_including(extra_jobs: 3)]
51
- jobs = { 'worker_name1' => :job1, 'worker_name2' => :job2 }
62
+ job1 = JobClass.new
63
+ job2 = JobClass.new
64
+ job_args = [["worker_name1", "worker_name2"], "queue_name", 1, 2, hash_including(prefetch: 3)]
65
+ jobs = { 'worker_name1' => job1, 'worker_name2' => job2 }
52
66
 
53
67
  expect(Delayed::Job).to receive(:get_and_lock_next_available).with(*job_args).and_return(jobs)
54
68
  Marshal.dump(["worker_name1", worker_config], client1)
55
69
  Marshal.dump(["worker_name2", worker_config], client2)
56
70
  subject.run_once
57
- expect(Marshal.load(client1)).to eq(:job1)
58
- expect(Marshal.load(client2)).to eq(:job2)
71
+ expect(Marshal.load(client1)).to eq(job1)
72
+ expect(Marshal.load(client2)).to eq(job2)
59
73
  end
60
74
 
61
- it 'will fetch and use extra jobs' do
75
+ it 'will prefetch and use jobs' do
62
76
  client = Socket.unix(subject.listen_socket.local_address.unix_path)
63
77
  subject.run_once
64
78
 
65
- allow(subject).to receive(:pending_jobs_owner).and_return('work_queue:X')
66
- job_args = [["worker_name1"], "queue_name", 1, 2, extra_jobs: 4, extra_jobs_owner: 'work_queue:X']
79
+ allow(subject).to receive(:prefetch_owner).and_return('work_queue:X')
80
+ job_args = [["worker_name1"], "queue_name", 1, 2, prefetch: 4, prefetch_owner: 'work_queue:X']
67
81
  job2 = Delayed::Job.new(:tag => 'tag')
68
82
  job2.create_and_lock!('work_queue:X')
69
83
  job3 = Delayed::Job.new(:tag => 'tag')
70
84
  job3.create_and_lock!('work_queue:X')
71
- jobs = { 'worker_name1' => :job1, 'work_queue:X' => [job2, job3]}
85
+ jobs = { 'worker_name1' => job, 'work_queue:X' => [job2, job3]}
72
86
 
73
87
  expect(Delayed::Job).to receive(:get_and_lock_next_available).once.with(*job_args).and_return(jobs)
74
88
  Marshal.dump(["worker_name1", worker_config], client)
75
89
  subject.run_once
76
- expect(Marshal.load(client)).to eq(:job1)
90
+ expect(Marshal.load(client)).to eq(job)
77
91
  Marshal.dump(["worker_name1", worker_config], client)
78
92
  subject.run_once
79
93
  expect(Marshal.load(client)).to eq(job2)
@@ -112,11 +126,35 @@ RSpec.describe Delayed::WorkQueue::ParentProcess::Server do
112
126
  subject.run_once
113
127
 
114
128
  Marshal.dump(args, client)
129
+ # make sure the server knows the client is waiting for a job
130
+ subject.run_once
131
+
132
+ client.close
133
+ expect { subject.run_once }.to change(subject, :connected_clients).by(-1)
134
+ expect(subject.instance_variable_get(:@waiting_clients).first.last).to eq []
135
+ end
136
+
137
+ it 'drops the client when a write fails' do
138
+ client = Socket.unix(subject.listen_socket.local_address.unix_path)
139
+ subject.run_once
140
+
141
+ Marshal.dump(args, client)
142
+ subject.run_once
143
+
144
+ client.close
115
145
 
116
146
  server_client_socket = subject.clients.keys.first
147
+ # don't let the server see the close and process it there; we want to check a failure later
148
+ expect(subject).to receive(:handle_request).with(server_client_socket)
117
149
 
118
- expect(server_client_socket).to receive(:eof?).and_return(true)
119
- expect { subject.run_once }.to change(subject, :connected_clients).by(-1)
150
+ expect(Delayed::Job).to receive(:get_and_lock_next_available).with(*job_args).and_return('worker_name' => job)
151
+ # the job gets unlocked
152
+ expect(Delayed::Job).to receive(:unlock).with([job])
153
+ subject.run_once
154
+
155
+ # and the server removes the client from both of its internal state arrays
156
+ expect(subject.connected_clients).to eq 0
157
+ expect(subject.instance_variable_get(:@waiting_clients).first.last).to eq []
120
158
  end
121
159
 
122
160
  it 'tracks when clients are idle' do
@@ -41,7 +41,7 @@ Delayed::Backend::Redis::Job.redis.select ENV['TEST_ENV_NUMBER']
41
41
 
42
42
  connection_config = {
43
43
  adapter: :postgresql,
44
- host: ENV['TEST_DB_HOST'],
44
+ host: ENV['TEST_DB_HOST'].presence,
45
45
  encoding: 'utf8',
46
46
  username: ENV['TEST_DB_USERNAME'],
47
47
  database: ENV['TEST_DB_DATABASE'],
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: inst-jobs
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.13.2
4
+ version: 0.13.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Tobias Luetke
@@ -45,14 +45,14 @@ dependencies:
45
45
  requirements:
46
46
  - - "~>"
47
47
  - !ruby/object:Gem::Version
48
- version: 3.3.2
48
+ version: '3.4'
49
49
  type: :runtime
50
50
  prerelease: false
51
51
  version_requirements: !ruby/object:Gem::Requirement
52
52
  requirements:
53
53
  - - "~>"
54
54
  - !ruby/object:Gem::Version
55
- version: 3.3.2
55
+ version: '3.4'
56
56
  - !ruby/object:Gem::Dependency
57
57
  name: redis
58
58
  requirement: !ruby/object:Gem::Requirement