inst-jobs 2.4.9 → 3.0.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb +27 -0
- data/db/migrate/20211101190934_update_after_delete_trigger_for_singleton_index.rb +137 -0
- data/lib/delayed/backend/active_record.rb +5 -10
- data/lib/delayed/backend/base.rb +3 -1
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/work_queue/parent_process/server.rb +40 -13
- data/lib/delayed/worker/process_helper.rb +3 -3
- data/spec/active_record_job_spec.rb +3 -3
- data/spec/delayed/work_queue/parent_process/server_spec.rb +72 -0
- data/spec/shared/shared_backend.rb +17 -0
- data/spec/spec_helper.rb +6 -5
- metadata +44 -11
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 55850c386085b90bbb9df7ff954d40ed7f2035268860ec8b47dfc11f11ec7e51
|
4
|
+
data.tar.gz: 53065a1f09e8cd30271bc9b57ab58d3bcfff8cf1b9590568379427093de9cbe9
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 8f7d579cddb80890c6158a1384f6828ae91272a864308528ad84e6dc47b3e4769062f91de7ebf9641f4b766c21c571a419385ee57bec9b6e5531f39be30a50a8
|
7
|
+
data.tar.gz: 887648362dec636b3e1f6938e66dd25b97f42a3e606c5d8466768d5b2ed5137ea50b67c68ce9ec817df86f117ef0660f6ddd3456c86e4871965f89704cc161f3
|
@@ -0,0 +1,27 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class UpdateConflictingSingletonFunctionToUseIndex < ActiveRecord::Migration[5.2]
|
4
|
+
def up
|
5
|
+
execute(<<~SQL)
|
6
|
+
CREATE OR REPLACE FUNCTION delayed_jobs_before_unlock_delete_conflicting_singletons_row_fn () RETURNS trigger AS $$
|
7
|
+
BEGIN
|
8
|
+
DELETE FROM delayed_jobs WHERE id<>OLD.id AND singleton=OLD.singleton AND locked_by IS NULL;
|
9
|
+
RETURN NEW;
|
10
|
+
END;
|
11
|
+
$$ LANGUAGE plpgsql;
|
12
|
+
SQL
|
13
|
+
end
|
14
|
+
|
15
|
+
def down
|
16
|
+
execute(<<~SQL)
|
17
|
+
CREATE OR REPLACE FUNCTION delayed_jobs_before_unlock_delete_conflicting_singletons_row_fn () RETURNS trigger AS $$
|
18
|
+
BEGIN
|
19
|
+
IF EXISTS (SELECT 1 FROM delayed_jobs j2 WHERE j2.singleton=OLD.singleton) THEN
|
20
|
+
DELETE FROM delayed_jobs WHERE id<>OLD.id AND singleton=OLD.singleton;
|
21
|
+
END IF;
|
22
|
+
RETURN NEW;
|
23
|
+
END;
|
24
|
+
$$ LANGUAGE plpgsql;
|
25
|
+
SQL
|
26
|
+
end
|
27
|
+
end
|
@@ -0,0 +1,137 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class UpdateAfterDeleteTriggerForSingletonIndex < ActiveRecord::Migration[6.0]
|
4
|
+
def up
|
5
|
+
execute(<<~SQL)
|
6
|
+
CREATE OR REPLACE FUNCTION delayed_jobs_after_delete_row_tr_fn () RETURNS trigger AS $$
|
7
|
+
DECLARE
|
8
|
+
running_count integer;
|
9
|
+
should_lock boolean;
|
10
|
+
should_be_precise boolean;
|
11
|
+
update_query varchar;
|
12
|
+
skip_locked varchar;
|
13
|
+
BEGIN
|
14
|
+
IF OLD.strand IS NOT NULL THEN
|
15
|
+
should_lock := true;
|
16
|
+
should_be_precise := OLD.id % (OLD.max_concurrent * 4) = 0;
|
17
|
+
|
18
|
+
IF NOT should_be_precise AND OLD.max_concurrent > 16 THEN
|
19
|
+
running_count := (SELECT COUNT(*) FROM (
|
20
|
+
SELECT 1 as one FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
|
21
|
+
) subquery_for_count);
|
22
|
+
should_lock := running_count < OLD.max_concurrent;
|
23
|
+
END IF;
|
24
|
+
|
25
|
+
IF should_lock THEN
|
26
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(OLD.strand));
|
27
|
+
END IF;
|
28
|
+
|
29
|
+
-- note that we don't really care if the row we're deleting has a singleton, or if it even
|
30
|
+
-- matches the row(s) we're going to update. we just need to make sure that whatever
|
31
|
+
-- singleton we grab isn't already running (which is a simple existence check, since
|
32
|
+
-- the unique indexes ensure there is at most one singleton running, and one queued)
|
33
|
+
update_query := 'UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
|
34
|
+
SELECT id FROM delayed_jobs j2
|
35
|
+
WHERE next_in_strand=false AND
|
36
|
+
j2.strand=$1.strand AND
|
37
|
+
(j2.singleton IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.singleton=j2.singleton AND j3.id<>j2.id AND (j3.locked_by IS NULL OR j3.locked_by IS NOT NULL)))
|
38
|
+
ORDER BY j2.strand_order_override ASC, j2.id ASC
|
39
|
+
LIMIT ';
|
40
|
+
|
41
|
+
IF should_be_precise THEN
|
42
|
+
running_count := (SELECT COUNT(*) FROM (
|
43
|
+
SELECT 1 FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
|
44
|
+
) s);
|
45
|
+
IF running_count < OLD.max_concurrent THEN
|
46
|
+
update_query := update_query || '($1.max_concurrent - $2)';
|
47
|
+
ELSE
|
48
|
+
-- we have too many running already; just bail
|
49
|
+
RETURN OLD;
|
50
|
+
END IF;
|
51
|
+
ELSE
|
52
|
+
update_query := update_query || '1';
|
53
|
+
|
54
|
+
-- n-strands don't require precise ordering; we can make this query more performant
|
55
|
+
IF OLD.max_concurrent > 1 THEN
|
56
|
+
skip_locked := ' SKIP LOCKED';
|
57
|
+
END IF;
|
58
|
+
END IF;
|
59
|
+
|
60
|
+
update_query := update_query || ' FOR UPDATE' || COALESCE(skip_locked, '') || ')';
|
61
|
+
EXECUTE update_query USING OLD, running_count;
|
62
|
+
ELSIF OLD.singleton IS NOT NULL THEN
|
63
|
+
UPDATE delayed_jobs SET next_in_strand = 't' WHERE singleton=OLD.singleton AND next_in_strand=false AND locked_by IS NULL;
|
64
|
+
END IF;
|
65
|
+
RETURN OLD;
|
66
|
+
END;
|
67
|
+
$$ LANGUAGE plpgsql;
|
68
|
+
SQL
|
69
|
+
end
|
70
|
+
|
71
|
+
def down
|
72
|
+
execute(<<~SQL)
|
73
|
+
CREATE OR REPLACE FUNCTION delayed_jobs_after_delete_row_tr_fn () RETURNS trigger AS $$
|
74
|
+
DECLARE
|
75
|
+
running_count integer;
|
76
|
+
should_lock boolean;
|
77
|
+
should_be_precise boolean;
|
78
|
+
update_query varchar;
|
79
|
+
skip_locked varchar;
|
80
|
+
BEGIN
|
81
|
+
IF OLD.strand IS NOT NULL THEN
|
82
|
+
should_lock := true;
|
83
|
+
should_be_precise := OLD.id % (OLD.max_concurrent * 4) = 0;
|
84
|
+
|
85
|
+
IF NOT should_be_precise AND OLD.max_concurrent > 16 THEN
|
86
|
+
running_count := (SELECT COUNT(*) FROM (
|
87
|
+
SELECT 1 as one FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
|
88
|
+
) subquery_for_count);
|
89
|
+
should_lock := running_count < OLD.max_concurrent;
|
90
|
+
END IF;
|
91
|
+
|
92
|
+
IF should_lock THEN
|
93
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(OLD.strand));
|
94
|
+
END IF;
|
95
|
+
|
96
|
+
-- note that we don't really care if the row we're deleting has a singleton, or if it even
|
97
|
+
-- matches the row(s) we're going to update. we just need to make sure that whatever
|
98
|
+
-- singleton we grab isn't already running (which is a simple existence check, since
|
99
|
+
-- the unique indexes ensure there is at most one singleton running, and one queued)
|
100
|
+
update_query := 'UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
|
101
|
+
SELECT id FROM delayed_jobs j2
|
102
|
+
WHERE next_in_strand=false AND
|
103
|
+
j2.strand=$1.strand AND
|
104
|
+
(j2.singleton IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.singleton=j2.singleton AND j3.id<>j2.id))
|
105
|
+
ORDER BY j2.strand_order_override ASC, j2.id ASC
|
106
|
+
LIMIT ';
|
107
|
+
|
108
|
+
IF should_be_precise THEN
|
109
|
+
running_count := (SELECT COUNT(*) FROM (
|
110
|
+
SELECT 1 FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
|
111
|
+
) s);
|
112
|
+
IF running_count < OLD.max_concurrent THEN
|
113
|
+
update_query := update_query || '($1.max_concurrent - $2)';
|
114
|
+
ELSE
|
115
|
+
-- we have too many running already; just bail
|
116
|
+
RETURN OLD;
|
117
|
+
END IF;
|
118
|
+
ELSE
|
119
|
+
update_query := update_query || '1';
|
120
|
+
|
121
|
+
-- n-strands don't require precise ordering; we can make this query more performant
|
122
|
+
IF OLD.max_concurrent > 1 THEN
|
123
|
+
skip_locked := ' SKIP LOCKED';
|
124
|
+
END IF;
|
125
|
+
END IF;
|
126
|
+
|
127
|
+
update_query := update_query || ' FOR UPDATE' || COALESCE(skip_locked, '') || ')';
|
128
|
+
EXECUTE update_query USING OLD, running_count;
|
129
|
+
ELSIF OLD.singleton IS NOT NULL THEN
|
130
|
+
UPDATE delayed_jobs SET next_in_strand = 't' WHERE singleton=OLD.singleton AND next_in_strand=false;
|
131
|
+
END IF;
|
132
|
+
RETURN OLD;
|
133
|
+
END;
|
134
|
+
$$ LANGUAGE plpgsql;
|
135
|
+
SQL
|
136
|
+
end
|
137
|
+
end
|
@@ -62,14 +62,9 @@ module Delayed
|
|
62
62
|
_write_attribute(column, current_time) unless attribute_present?(column)
|
63
63
|
end
|
64
64
|
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
values = attributes_with_values(attribute_names)
|
69
|
-
else
|
70
|
-
attribute_names = partial_writes? ? keys_for_partial_write : self.attribute_names
|
71
|
-
values = attributes_with_values_for_create(attribute_names)
|
72
|
-
end
|
65
|
+
attribute_names = attribute_names_for_partial_writes
|
66
|
+
attribute_names = attributes_for_create(attribute_names)
|
67
|
+
values = attributes_with_values(attribute_names)
|
73
68
|
|
74
69
|
im = self.class.arel_table.compile_insert(self.class.send(:_substitute_values, values))
|
75
70
|
|
@@ -314,7 +309,7 @@ module Delayed
|
|
314
309
|
if Settings.silence_periodic_log
|
315
310
|
::ActiveRecord::Base.logger.silence(&block)
|
316
311
|
else
|
317
|
-
|
312
|
+
yield
|
318
313
|
end
|
319
314
|
end
|
320
315
|
|
@@ -553,7 +548,7 @@ module Delayed
|
|
553
548
|
|
554
549
|
def fail!
|
555
550
|
attrs = attributes
|
556
|
-
attrs["original_job_id"] = attrs.delete("id")
|
551
|
+
attrs["original_job_id"] = attrs.delete("id") if Failed.columns_hash.key?("original_job_id")
|
557
552
|
attrs["failed_at"] ||= self.class.db_time_now
|
558
553
|
attrs.delete("next_in_strand")
|
559
554
|
attrs.delete("max_concurrent")
|
data/lib/delayed/backend/base.rb
CHANGED
@@ -176,7 +176,9 @@ module Delayed
|
|
176
176
|
ignores = []
|
177
177
|
loop do
|
178
178
|
batch_scope = ignores.empty? ? jobs : jobs.where.not(id: ignores)
|
179
|
-
|
179
|
+
# if we don't reload this it's possible to keep getting the
|
180
|
+
# same array each loop even after the jobs have been deleted.
|
181
|
+
batch = batch_scope.reload.to_a
|
180
182
|
break if batch.empty?
|
181
183
|
|
182
184
|
batch.each do |job|
|
data/lib/delayed/version.rb
CHANGED
@@ -1,5 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
require "activerecord-pg-extensions"
|
4
|
+
|
3
5
|
module Delayed
|
4
6
|
module WorkQueue
|
5
7
|
class ParentProcess
|
@@ -179,7 +181,7 @@ module Delayed
|
|
179
181
|
end
|
180
182
|
|
181
183
|
jobs_to_send.each do |(recipient, job_to_send)|
|
182
|
-
@waiting_clients[worker_config].delete(
|
184
|
+
@waiting_clients[worker_config].delete(recipient)
|
183
185
|
begin
|
184
186
|
logger.debug("Sending job #{job_to_send.id} to #{recipient.name}")
|
185
187
|
client_timeout { Marshal.dump(job_to_send, recipient.socket) }
|
@@ -192,29 +194,54 @@ module Delayed
|
|
192
194
|
end
|
193
195
|
end
|
194
196
|
|
195
|
-
def
|
197
|
+
def unlock_prefetched_jobs
|
196
198
|
@prefetched_jobs.each do |(worker_config, jobs)|
|
197
199
|
next if jobs.empty?
|
198
|
-
next
|
200
|
+
next if block_given? && !yield(jobs)
|
199
201
|
|
200
|
-
Delayed::Job.
|
202
|
+
connection = Delayed::Job.connection
|
203
|
+
connection.transaction do
|
204
|
+
# make absolutely sure we don't get hung up and leave things
|
205
|
+
# locked in the database
|
206
|
+
if connection.postgresql_version >= 9_06_00 # rubocop:disable Style/NumericLiterals
|
207
|
+
connection.idle_in_transaction_session_timeout = 5
|
208
|
+
end
|
209
|
+
# relatively short timeout for acquiring the lock
|
210
|
+
connection.statement_timeout = Settings.sleep_delay
|
201
211
|
Delayed::Job.advisory_lock(Delayed::Job.prefetch_jobs_lock_name)
|
212
|
+
|
213
|
+
# this query might take longer, and we really want to get it
|
214
|
+
# done if we got the lock, but still don't want an inadvertent
|
215
|
+
# hang
|
216
|
+
connection.statement_timeout = 30
|
202
217
|
Delayed::Job.unlock(jobs)
|
218
|
+
@prefetched_jobs[worker_config] = []
|
203
219
|
end
|
204
|
-
|
220
|
+
rescue ActiveRecord::QueryCanceled
|
221
|
+
# ignore; we'll retry anyway
|
222
|
+
logger.warn("unable to unlock prefetched jobs; skipping for now")
|
223
|
+
rescue ActiveRecord::StatementInvalid
|
224
|
+
# see if we dropped the connection
|
225
|
+
raise if connection.active?
|
226
|
+
|
227
|
+
# otherwise just reconnect and let it retry
|
228
|
+
logger.warn("failed to unlock prefetched jobs - connection terminated; skipping for now")
|
229
|
+
Delayed::Job.clear_all_connections!
|
205
230
|
end
|
206
231
|
end
|
207
232
|
|
208
|
-
def
|
209
|
-
|
210
|
-
|
233
|
+
def unlock_timed_out_prefetched_jobs
|
234
|
+
unlock_prefetched_jobs do |jobs|
|
235
|
+
jobs.first.locked_at < Time.now.utc - Settings.parent_process[:prefetched_jobs_timeout]
|
236
|
+
end
|
237
|
+
end
|
211
238
|
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
239
|
+
def unlock_all_prefetched_jobs
|
240
|
+
# we try really hard; it may not have done any work if it timed out
|
241
|
+
10.times do
|
242
|
+
unlock_prefetched_jobs
|
243
|
+
break if @prefetched_jobs.each_value.all?(&:empty?)
|
216
244
|
end
|
217
|
-
@prefetched_jobs = {}
|
218
245
|
end
|
219
246
|
|
220
247
|
def drop_socket(socket)
|
@@ -5,10 +5,10 @@ module Delayed
|
|
5
5
|
module ProcessHelper
|
6
6
|
STAT_LINUX = "stat --format=%%Y /proc/$WORKER_PID"
|
7
7
|
STAT_MAC = "ps -o lstart -p $WORKER_PID"
|
8
|
-
STAT = RUBY_PLATFORM
|
8
|
+
STAT = RUBY_PLATFORM.include?("darwin") ? STAT_MAC : STAT_LINUX
|
9
9
|
ALIVE_CHECK_LINUX = '[ -d "/proc/$WORKER_PID" ]'
|
10
10
|
ALIVE_CHECK_MAC = "ps -p $WORKER_PID > /dev/null"
|
11
|
-
ALIVE_CHECK = RUBY_PLATFORM
|
11
|
+
ALIVE_CHECK = RUBY_PLATFORM.include?("darwin") ? ALIVE_CHECK_MAC : ALIVE_CHECK_LINUX
|
12
12
|
SCRIPT_TEMPLATE = <<-BASH
|
13
13
|
WORKER_PID="%<pid>d" # an example, filled from ruby when the check is created
|
14
14
|
ORIGINAL_MTIME="%<mtime>s" # an example, filled from ruby when the check is created
|
@@ -29,7 +29,7 @@ module Delayed
|
|
29
29
|
BASH
|
30
30
|
|
31
31
|
def self.mtime(pid)
|
32
|
-
if RUBY_PLATFORM
|
32
|
+
if RUBY_PLATFORM.include?("darwin")
|
33
33
|
`ps -o lstart -p #{pid}`.sub(/\n$/, "").presence
|
34
34
|
else
|
35
35
|
File::Stat.new("/proc/#{pid}").mtime.to_i.to_s rescue nil
|
@@ -219,14 +219,14 @@ describe "Delayed::Backed::ActiveRecord::Job" do
|
|
219
219
|
end
|
220
220
|
|
221
221
|
it "gets process ids from locked_by" do
|
222
|
-
3
|
222
|
+
Array.new(3) { Delayed::Job.create payload_object: SimpleJob.new }
|
223
223
|
Delayed::Job.get_and_lock_next_available(["job42:2", "job42:9001"])
|
224
224
|
expect(Delayed::Job.processes_locked_locally(name: "job42").sort).to eq [2, 9001]
|
225
225
|
expect(Delayed::Job.processes_locked_locally(name: "jobnotme")).to be_empty
|
226
226
|
end
|
227
227
|
|
228
228
|
it "allows fetching multiple jobs at once" do
|
229
|
-
jobs = 3
|
229
|
+
jobs = Array.new(3) { Delayed::Job.create payload_object: SimpleJob.new }
|
230
230
|
locked_jobs = Delayed::Job.get_and_lock_next_available(%w[worker1 worker2])
|
231
231
|
expect(locked_jobs.length).to eq(2)
|
232
232
|
expect(locked_jobs.keys).to eq(%w[worker1 worker2])
|
@@ -235,7 +235,7 @@ describe "Delayed::Backed::ActiveRecord::Job" do
|
|
235
235
|
end
|
236
236
|
|
237
237
|
it "allows fetching extra jobs" do
|
238
|
-
jobs = 5
|
238
|
+
jobs = Array.new(5) { Delayed::Job.create payload_object: SimpleJob.new }
|
239
239
|
locked_jobs = Delayed::Job.get_and_lock_next_available(["worker1"],
|
240
240
|
prefetch: 2,
|
241
241
|
prefetch_owner: "work_queue")
|
@@ -24,6 +24,10 @@ RSpec.describe Delayed::WorkQueue::ParentProcess::Server do
|
|
24
24
|
let(:args) { ["worker_name", worker_config] }
|
25
25
|
let(:job_args) { [["worker_name"], "queue_name", 1, 2, hash_including(prefetch: 4)] }
|
26
26
|
|
27
|
+
before do
|
28
|
+
Delayed::Worker.lifecycle.reset!
|
29
|
+
end
|
30
|
+
|
27
31
|
before :all do
|
28
32
|
Delayed.select_backend(Delayed::Backend::ActiveRecord::Job)
|
29
33
|
Delayed::Settings.parent_process = {
|
@@ -37,6 +41,7 @@ RSpec.describe Delayed::WorkQueue::ParentProcess::Server do
|
|
37
41
|
|
38
42
|
after do
|
39
43
|
File.unlink("/tmp/inst-jobs-test.sock") if File.exist?("/tmp/inst-jobs-test.sock")
|
44
|
+
Delayed::Worker.lifecycle.reset!
|
40
45
|
end
|
41
46
|
|
42
47
|
it "accepts new clients" do
|
@@ -99,6 +104,53 @@ RSpec.describe Delayed::WorkQueue::ParentProcess::Server do
|
|
99
104
|
expect(Marshal.load(client)).to eq(job2)
|
100
105
|
end
|
101
106
|
|
107
|
+
context "prefetched job unlocking" do
|
108
|
+
let(:job_args) do
|
109
|
+
[["worker_name1"], "queue_name", 1, 2,
|
110
|
+
{ prefetch: 4, prefetch_owner: "prefetch:work_queue:X", forced_latency: 6.0 }]
|
111
|
+
end
|
112
|
+
let(:job2) { Delayed::Job.new(tag: "tag").tap { |j| j.create_and_lock!("prefetch:work_queue:X") } }
|
113
|
+
let(:job3) { Delayed::Job.new(tag: "tag").tap { |j| j.create_and_lock!("prefetch:work_queue:X") } }
|
114
|
+
|
115
|
+
before do
|
116
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
117
|
+
subject.run_once
|
118
|
+
|
119
|
+
jobs = { "worker_name1" => job, "prefetch:work_queue:X" => [job2, job3] }
|
120
|
+
allow(subject).to receive(:prefetch_owner).and_return("prefetch:work_queue:X")
|
121
|
+
allow(Delayed::Job).to receive(:get_and_lock_next_available).once.with(*job_args).and_return(jobs)
|
122
|
+
Marshal.dump(["worker_name1", worker_config], client)
|
123
|
+
subject.run_once
|
124
|
+
end
|
125
|
+
|
126
|
+
it "doesn't unlock anything if nothing is timed out" do
|
127
|
+
expect(Delayed::Job).not_to receive(:advisory_lock)
|
128
|
+
expect(Delayed::Job).not_to receive(:unlock)
|
129
|
+
subject.unlock_timed_out_prefetched_jobs
|
130
|
+
end
|
131
|
+
|
132
|
+
it "unlocks timed out prefetched jobs" do
|
133
|
+
allow(Delayed::Settings).to receive(:parent_process).and_return(prefetched_jobs_timeout: -1)
|
134
|
+
expect(Delayed::Job).to receive(:unlock).with([job2, job3])
|
135
|
+
subject.unlock_timed_out_prefetched_jobs
|
136
|
+
expect(subject.instance_variable_get(:@prefetched_jobs).values.sum(&:length)).to eq 0
|
137
|
+
end
|
138
|
+
|
139
|
+
it "fails gracefully if the lock times out" do
|
140
|
+
allow(Delayed::Settings).to receive(:parent_process).and_return(prefetched_jobs_timeout: -1)
|
141
|
+
expect(Delayed::Job).not_to receive(:unlock)
|
142
|
+
expect(Delayed::Job).to receive(:advisory_lock).and_raise(ActiveRecord::QueryCanceled)
|
143
|
+
subject.unlock_timed_out_prefetched_jobs
|
144
|
+
expect(subject.instance_variable_get(:@prefetched_jobs).values.sum(&:length)).to eq 2
|
145
|
+
end
|
146
|
+
|
147
|
+
it "unlocks all jobs" do
|
148
|
+
expect(Delayed::Job).to receive(:unlock).with([job2, job3])
|
149
|
+
subject.unlock_all_prefetched_jobs
|
150
|
+
expect(subject.instance_variable_get(:@prefetched_jobs).values.sum(&:length)).to eq 0
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
102
154
|
it "doesn't respond immediately if there are no jobs available" do
|
103
155
|
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
104
156
|
subject.run_once
|
@@ -205,4 +257,24 @@ RSpec.describe Delayed::WorkQueue::ParentProcess::Server do
|
|
205
257
|
expect(Marshal.load(client)).to eq(job)
|
206
258
|
expect(called).to eq(true)
|
207
259
|
end
|
260
|
+
|
261
|
+
it "deletes the correct worker when transferring jobs" do
|
262
|
+
client1 = Socket.unix(subject.listen_socket.local_address.unix_path)
|
263
|
+
client2 = Socket.unix(subject.listen_socket.local_address.unix_path)
|
264
|
+
subject.run_once
|
265
|
+
subject.run_once
|
266
|
+
|
267
|
+
Marshal.dump(args, client1)
|
268
|
+
Marshal.dump(["worker_name2", worker_config], client2)
|
269
|
+
subject.run_once
|
270
|
+
subject.run_once
|
271
|
+
|
272
|
+
waiting_clients = subject.instance_variable_get(:@waiting_clients)
|
273
|
+
expect(waiting_clients.first.last.length).to eq 2
|
274
|
+
|
275
|
+
expect(Delayed::Job).to receive(:get_and_lock_next_available).and_return("worker_name" => job,
|
276
|
+
"worker_name2" => job)
|
277
|
+
subject.run_once
|
278
|
+
expect(waiting_clients.first.last).to be_empty
|
279
|
+
end
|
208
280
|
end
|
@@ -1,5 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
require "timeout"
|
4
|
+
|
3
5
|
module InDelayedJobTest
|
4
6
|
def self.check_in_job
|
5
7
|
Delayed::Job.in_delayed_job?.should == true
|
@@ -960,6 +962,21 @@ shared_examples_for "a backend" do
|
|
960
962
|
end
|
961
963
|
end
|
962
964
|
|
965
|
+
it "removes an un-reschedulable job" do
|
966
|
+
change_setting(Delayed::Settings, :max_attempts, -1) do
|
967
|
+
job = Delayed::Job.new(tag: "tag")
|
968
|
+
`echo ''`
|
969
|
+
child_pid = $?.pid
|
970
|
+
job.create_and_lock!("Jobworker:#{child_pid}")
|
971
|
+
Timeout.timeout(1) do
|
972
|
+
# if this takes longer than a second it's hung
|
973
|
+
# in an infinite loop, which would be bad.
|
974
|
+
expect(Delayed::Job.unlock_orphaned_jobs(nil, "Jobworker")).to eq(1)
|
975
|
+
end
|
976
|
+
expect { Delayed::Job.find(job.id) }.to raise_error(ActiveRecord::RecordNotFound)
|
977
|
+
end
|
978
|
+
end
|
979
|
+
|
963
980
|
it "unlocks orphaned jobs given a pid" do
|
964
981
|
change_setting(Delayed::Settings, :max_attempts, 2) do
|
965
982
|
job1 = Delayed::Job.new(tag: "tag")
|
data/spec/spec_helper.rb
CHANGED
@@ -58,11 +58,7 @@ connection_config = {
|
|
58
58
|
}
|
59
59
|
|
60
60
|
def migrate(file)
|
61
|
-
|
62
|
-
ActiveRecord::MigrationContext.new(file, ActiveRecord::SchemaMigration).migrate
|
63
|
-
else
|
64
|
-
ActiveRecord::MigrationContext.new(file).migrate
|
65
|
-
end
|
61
|
+
ActiveRecord::MigrationContext.new(file, ActiveRecord::SchemaMigration).migrate
|
66
62
|
end
|
67
63
|
|
68
64
|
# create the test db if it does not exist, to help out wwtd
|
@@ -73,6 +69,11 @@ rescue ActiveRecord::StatementInvalid
|
|
73
69
|
nil
|
74
70
|
end
|
75
71
|
ActiveRecord::Base.establish_connection(connection_config)
|
72
|
+
|
73
|
+
# we need to ensure this callback is called for activerecord-pg-extensions,
|
74
|
+
# which isn't running because we're not using Rails to setup the database
|
75
|
+
ActiveRecord::PGExtensions::Railtie.run_initializers
|
76
|
+
|
76
77
|
# TODO: reset db and migrate again, to test migrations
|
77
78
|
|
78
79
|
migrate("db/migrate")
|
metadata
CHANGED
@@ -1,15 +1,16 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: inst-jobs
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version:
|
4
|
+
version: 3.0.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
|
-
-
|
8
|
-
-
|
7
|
+
- Cody Cutrer
|
8
|
+
- Ethan Vizitei
|
9
|
+
- Jacob Burroughs
|
9
10
|
autorequire:
|
10
11
|
bindir: exe
|
11
12
|
cert_chain: []
|
12
|
-
date: 2021-
|
13
|
+
date: 2021-11-08 00:00:00.000000000 Z
|
13
14
|
dependencies:
|
14
15
|
- !ruby/object:Gem::Dependency
|
15
16
|
name: activerecord
|
@@ -17,28 +18,42 @@ dependencies:
|
|
17
18
|
requirements:
|
18
19
|
- - ">="
|
19
20
|
- !ruby/object:Gem::Version
|
20
|
-
version: '
|
21
|
+
version: '6.0'
|
21
22
|
type: :runtime
|
22
23
|
prerelease: false
|
23
24
|
version_requirements: !ruby/object:Gem::Requirement
|
24
25
|
requirements:
|
25
26
|
- - ">="
|
26
27
|
- !ruby/object:Gem::Version
|
27
|
-
version: '
|
28
|
+
version: '6.0'
|
29
|
+
- !ruby/object:Gem::Dependency
|
30
|
+
name: activerecord-pg-extensions
|
31
|
+
requirement: !ruby/object:Gem::Requirement
|
32
|
+
requirements:
|
33
|
+
- - "~>"
|
34
|
+
- !ruby/object:Gem::Version
|
35
|
+
version: '0.4'
|
36
|
+
type: :runtime
|
37
|
+
prerelease: false
|
38
|
+
version_requirements: !ruby/object:Gem::Requirement
|
39
|
+
requirements:
|
40
|
+
- - "~>"
|
41
|
+
- !ruby/object:Gem::Version
|
42
|
+
version: '0.4'
|
28
43
|
- !ruby/object:Gem::Dependency
|
29
44
|
name: activesupport
|
30
45
|
requirement: !ruby/object:Gem::Requirement
|
31
46
|
requirements:
|
32
47
|
- - ">="
|
33
48
|
- !ruby/object:Gem::Version
|
34
|
-
version: '
|
49
|
+
version: '6.0'
|
35
50
|
type: :runtime
|
36
51
|
prerelease: false
|
37
52
|
version_requirements: !ruby/object:Gem::Requirement
|
38
53
|
requirements:
|
39
54
|
- - ">="
|
40
55
|
- !ruby/object:Gem::Version
|
41
|
-
version: '
|
56
|
+
version: '6.0'
|
42
57
|
- !ruby/object:Gem::Dependency
|
43
58
|
name: after_transaction_commit
|
44
59
|
requirement: !ruby/object:Gem::Requirement
|
@@ -93,14 +108,14 @@ dependencies:
|
|
93
108
|
requirements:
|
94
109
|
- - ">="
|
95
110
|
- !ruby/object:Gem::Version
|
96
|
-
version: '
|
111
|
+
version: '6.0'
|
97
112
|
type: :runtime
|
98
113
|
prerelease: false
|
99
114
|
version_requirements: !ruby/object:Gem::Requirement
|
100
115
|
requirements:
|
101
116
|
- - ">="
|
102
117
|
- !ruby/object:Gem::Version
|
103
|
-
version: '
|
118
|
+
version: '6.0'
|
104
119
|
- !ruby/object:Gem::Dependency
|
105
120
|
name: appraisal
|
106
121
|
requirement: !ruby/object:Gem::Requirement
|
@@ -283,6 +298,20 @@ dependencies:
|
|
283
298
|
- - "~>"
|
284
299
|
- !ruby/object:Gem::Version
|
285
300
|
version: '1.19'
|
301
|
+
- !ruby/object:Gem::Dependency
|
302
|
+
name: rubocop-performance
|
303
|
+
requirement: !ruby/object:Gem::Requirement
|
304
|
+
requirements:
|
305
|
+
- - "~>"
|
306
|
+
- !ruby/object:Gem::Version
|
307
|
+
version: 1.12.0
|
308
|
+
type: :development
|
309
|
+
prerelease: false
|
310
|
+
version_requirements: !ruby/object:Gem::Requirement
|
311
|
+
requirements:
|
312
|
+
- - "~>"
|
313
|
+
- !ruby/object:Gem::Version
|
314
|
+
version: 1.12.0
|
286
315
|
- !ruby/object:Gem::Dependency
|
287
316
|
name: rubocop-rails
|
288
317
|
requirement: !ruby/object:Gem::Requirement
|
@@ -397,7 +426,9 @@ dependencies:
|
|
397
426
|
version: 1.4.0
|
398
427
|
description:
|
399
428
|
email:
|
400
|
-
-
|
429
|
+
- cody@instructure.com
|
430
|
+
- evizitei@instructure.com
|
431
|
+
- jburroughs@instructure.com
|
401
432
|
executables:
|
402
433
|
- inst_jobs
|
403
434
|
extensions: []
|
@@ -433,6 +464,8 @@ files:
|
|
433
464
|
- db/migrate/20210812210128_add_singleton_column.rb
|
434
465
|
- db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb
|
435
466
|
- db/migrate/20210928174754_fix_singleton_condition_in_before_insert.rb
|
467
|
+
- db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb
|
468
|
+
- db/migrate/20211101190934_update_after_delete_trigger_for_singleton_index.rb
|
436
469
|
- exe/inst_jobs
|
437
470
|
- lib/delayed/backend/active_record.rb
|
438
471
|
- lib/delayed/backend/base.rb
|