inst-jobs 2.4.10 → 3.0.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb +27 -0
- data/db/migrate/20211101190934_update_after_delete_trigger_for_singleton_index.rb +137 -0
- data/lib/delayed/backend/active_record.rb +5 -10
- data/lib/delayed/backend/base.rb +3 -1
- data/lib/delayed/rails_reloader_plugin.rb +30 -0
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/work_queue/parent_process/server.rb +39 -12
- data/lib/delayed/worker/process_helper.rb +3 -3
- data/lib/delayed/worker.rb +3 -21
- data/spec/active_record_job_spec.rb +3 -3
- data/spec/delayed/work_queue/parent_process/server_spec.rb +47 -0
- data/spec/delayed/worker_spec.rb +6 -1
- data/spec/shared/shared_backend.rb +17 -0
- data/spec/spec_helper.rb +6 -5
- metadata +66 -32
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: fb38d77d225501a3f9ca85561266adf7f2b873d187d0ee9e820e30f773fc5846
|
4
|
+
data.tar.gz: 0bdbb7e6609d0c228de6906b68ff41b7a5218b89832dd9f8958ed261d7a39fd2
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 76c59051987d523e8465c98a0aa03edfa7ee6df1fc090ebeb8238f827d3f5487dccd61f92c20acf87df9b53153de03bb866d577f44eb3d0db18bd3cd9fa44f66
|
7
|
+
data.tar.gz: 0262e34514a1919ff9715b1707d7c99dab2ddf4f3ef0c98e06441b61709ddb2bb36c763b73cc5a5512a03e2b2a253b507818da313213a621c40879d086fa9333
|
@@ -0,0 +1,27 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class UpdateConflictingSingletonFunctionToUseIndex < ActiveRecord::Migration[5.2]
|
4
|
+
def up
|
5
|
+
execute(<<~SQL)
|
6
|
+
CREATE OR REPLACE FUNCTION delayed_jobs_before_unlock_delete_conflicting_singletons_row_fn () RETURNS trigger AS $$
|
7
|
+
BEGIN
|
8
|
+
DELETE FROM delayed_jobs WHERE id<>OLD.id AND singleton=OLD.singleton AND locked_by IS NULL;
|
9
|
+
RETURN NEW;
|
10
|
+
END;
|
11
|
+
$$ LANGUAGE plpgsql;
|
12
|
+
SQL
|
13
|
+
end
|
14
|
+
|
15
|
+
def down
|
16
|
+
execute(<<~SQL)
|
17
|
+
CREATE OR REPLACE FUNCTION delayed_jobs_before_unlock_delete_conflicting_singletons_row_fn () RETURNS trigger AS $$
|
18
|
+
BEGIN
|
19
|
+
IF EXISTS (SELECT 1 FROM delayed_jobs j2 WHERE j2.singleton=OLD.singleton) THEN
|
20
|
+
DELETE FROM delayed_jobs WHERE id<>OLD.id AND singleton=OLD.singleton;
|
21
|
+
END IF;
|
22
|
+
RETURN NEW;
|
23
|
+
END;
|
24
|
+
$$ LANGUAGE plpgsql;
|
25
|
+
SQL
|
26
|
+
end
|
27
|
+
end
|
@@ -0,0 +1,137 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class UpdateAfterDeleteTriggerForSingletonIndex < ActiveRecord::Migration[6.0]
|
4
|
+
def up
|
5
|
+
execute(<<~SQL)
|
6
|
+
CREATE OR REPLACE FUNCTION delayed_jobs_after_delete_row_tr_fn () RETURNS trigger AS $$
|
7
|
+
DECLARE
|
8
|
+
running_count integer;
|
9
|
+
should_lock boolean;
|
10
|
+
should_be_precise boolean;
|
11
|
+
update_query varchar;
|
12
|
+
skip_locked varchar;
|
13
|
+
BEGIN
|
14
|
+
IF OLD.strand IS NOT NULL THEN
|
15
|
+
should_lock := true;
|
16
|
+
should_be_precise := OLD.id % (OLD.max_concurrent * 4) = 0;
|
17
|
+
|
18
|
+
IF NOT should_be_precise AND OLD.max_concurrent > 16 THEN
|
19
|
+
running_count := (SELECT COUNT(*) FROM (
|
20
|
+
SELECT 1 as one FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
|
21
|
+
) subquery_for_count);
|
22
|
+
should_lock := running_count < OLD.max_concurrent;
|
23
|
+
END IF;
|
24
|
+
|
25
|
+
IF should_lock THEN
|
26
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(OLD.strand));
|
27
|
+
END IF;
|
28
|
+
|
29
|
+
-- note that we don't really care if the row we're deleting has a singleton, or if it even
|
30
|
+
-- matches the row(s) we're going to update. we just need to make sure that whatever
|
31
|
+
-- singleton we grab isn't already running (which is a simple existence check, since
|
32
|
+
-- the unique indexes ensure there is at most one singleton running, and one queued)
|
33
|
+
update_query := 'UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
|
34
|
+
SELECT id FROM delayed_jobs j2
|
35
|
+
WHERE next_in_strand=false AND
|
36
|
+
j2.strand=$1.strand AND
|
37
|
+
(j2.singleton IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.singleton=j2.singleton AND j3.id<>j2.id AND (j3.locked_by IS NULL OR j3.locked_by IS NOT NULL)))
|
38
|
+
ORDER BY j2.strand_order_override ASC, j2.id ASC
|
39
|
+
LIMIT ';
|
40
|
+
|
41
|
+
IF should_be_precise THEN
|
42
|
+
running_count := (SELECT COUNT(*) FROM (
|
43
|
+
SELECT 1 FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
|
44
|
+
) s);
|
45
|
+
IF running_count < OLD.max_concurrent THEN
|
46
|
+
update_query := update_query || '($1.max_concurrent - $2)';
|
47
|
+
ELSE
|
48
|
+
-- we have too many running already; just bail
|
49
|
+
RETURN OLD;
|
50
|
+
END IF;
|
51
|
+
ELSE
|
52
|
+
update_query := update_query || '1';
|
53
|
+
|
54
|
+
-- n-strands don't require precise ordering; we can make this query more performant
|
55
|
+
IF OLD.max_concurrent > 1 THEN
|
56
|
+
skip_locked := ' SKIP LOCKED';
|
57
|
+
END IF;
|
58
|
+
END IF;
|
59
|
+
|
60
|
+
update_query := update_query || ' FOR UPDATE' || COALESCE(skip_locked, '') || ')';
|
61
|
+
EXECUTE update_query USING OLD, running_count;
|
62
|
+
ELSIF OLD.singleton IS NOT NULL THEN
|
63
|
+
UPDATE delayed_jobs SET next_in_strand = 't' WHERE singleton=OLD.singleton AND next_in_strand=false AND locked_by IS NULL;
|
64
|
+
END IF;
|
65
|
+
RETURN OLD;
|
66
|
+
END;
|
67
|
+
$$ LANGUAGE plpgsql;
|
68
|
+
SQL
|
69
|
+
end
|
70
|
+
|
71
|
+
def down
|
72
|
+
execute(<<~SQL)
|
73
|
+
CREATE OR REPLACE FUNCTION delayed_jobs_after_delete_row_tr_fn () RETURNS trigger AS $$
|
74
|
+
DECLARE
|
75
|
+
running_count integer;
|
76
|
+
should_lock boolean;
|
77
|
+
should_be_precise boolean;
|
78
|
+
update_query varchar;
|
79
|
+
skip_locked varchar;
|
80
|
+
BEGIN
|
81
|
+
IF OLD.strand IS NOT NULL THEN
|
82
|
+
should_lock := true;
|
83
|
+
should_be_precise := OLD.id % (OLD.max_concurrent * 4) = 0;
|
84
|
+
|
85
|
+
IF NOT should_be_precise AND OLD.max_concurrent > 16 THEN
|
86
|
+
running_count := (SELECT COUNT(*) FROM (
|
87
|
+
SELECT 1 as one FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
|
88
|
+
) subquery_for_count);
|
89
|
+
should_lock := running_count < OLD.max_concurrent;
|
90
|
+
END IF;
|
91
|
+
|
92
|
+
IF should_lock THEN
|
93
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(OLD.strand));
|
94
|
+
END IF;
|
95
|
+
|
96
|
+
-- note that we don't really care if the row we're deleting has a singleton, or if it even
|
97
|
+
-- matches the row(s) we're going to update. we just need to make sure that whatever
|
98
|
+
-- singleton we grab isn't already running (which is a simple existence check, since
|
99
|
+
-- the unique indexes ensure there is at most one singleton running, and one queued)
|
100
|
+
update_query := 'UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
|
101
|
+
SELECT id FROM delayed_jobs j2
|
102
|
+
WHERE next_in_strand=false AND
|
103
|
+
j2.strand=$1.strand AND
|
104
|
+
(j2.singleton IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.singleton=j2.singleton AND j3.id<>j2.id))
|
105
|
+
ORDER BY j2.strand_order_override ASC, j2.id ASC
|
106
|
+
LIMIT ';
|
107
|
+
|
108
|
+
IF should_be_precise THEN
|
109
|
+
running_count := (SELECT COUNT(*) FROM (
|
110
|
+
SELECT 1 FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
|
111
|
+
) s);
|
112
|
+
IF running_count < OLD.max_concurrent THEN
|
113
|
+
update_query := update_query || '($1.max_concurrent - $2)';
|
114
|
+
ELSE
|
115
|
+
-- we have too many running already; just bail
|
116
|
+
RETURN OLD;
|
117
|
+
END IF;
|
118
|
+
ELSE
|
119
|
+
update_query := update_query || '1';
|
120
|
+
|
121
|
+
-- n-strands don't require precise ordering; we can make this query more performant
|
122
|
+
IF OLD.max_concurrent > 1 THEN
|
123
|
+
skip_locked := ' SKIP LOCKED';
|
124
|
+
END IF;
|
125
|
+
END IF;
|
126
|
+
|
127
|
+
update_query := update_query || ' FOR UPDATE' || COALESCE(skip_locked, '') || ')';
|
128
|
+
EXECUTE update_query USING OLD, running_count;
|
129
|
+
ELSIF OLD.singleton IS NOT NULL THEN
|
130
|
+
UPDATE delayed_jobs SET next_in_strand = 't' WHERE singleton=OLD.singleton AND next_in_strand=false;
|
131
|
+
END IF;
|
132
|
+
RETURN OLD;
|
133
|
+
END;
|
134
|
+
$$ LANGUAGE plpgsql;
|
135
|
+
SQL
|
136
|
+
end
|
137
|
+
end
|
@@ -62,14 +62,9 @@ module Delayed
|
|
62
62
|
_write_attribute(column, current_time) unless attribute_present?(column)
|
63
63
|
end
|
64
64
|
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
values = attributes_with_values(attribute_names)
|
69
|
-
else
|
70
|
-
attribute_names = partial_writes? ? keys_for_partial_write : self.attribute_names
|
71
|
-
values = attributes_with_values_for_create(attribute_names)
|
72
|
-
end
|
65
|
+
attribute_names = attribute_names_for_partial_writes
|
66
|
+
attribute_names = attributes_for_create(attribute_names)
|
67
|
+
values = attributes_with_values(attribute_names)
|
73
68
|
|
74
69
|
im = self.class.arel_table.compile_insert(self.class.send(:_substitute_values, values))
|
75
70
|
|
@@ -314,7 +309,7 @@ module Delayed
|
|
314
309
|
if Settings.silence_periodic_log
|
315
310
|
::ActiveRecord::Base.logger.silence(&block)
|
316
311
|
else
|
317
|
-
|
312
|
+
yield
|
318
313
|
end
|
319
314
|
end
|
320
315
|
|
@@ -553,7 +548,7 @@ module Delayed
|
|
553
548
|
|
554
549
|
def fail!
|
555
550
|
attrs = attributes
|
556
|
-
attrs["original_job_id"] = attrs.delete("id")
|
551
|
+
attrs["original_job_id"] = attrs.delete("id") if Failed.columns_hash.key?("original_job_id")
|
557
552
|
attrs["failed_at"] ||= self.class.db_time_now
|
558
553
|
attrs.delete("next_in_strand")
|
559
554
|
attrs.delete("max_concurrent")
|
data/lib/delayed/backend/base.rb
CHANGED
@@ -176,7 +176,9 @@ module Delayed
|
|
176
176
|
ignores = []
|
177
177
|
loop do
|
178
178
|
batch_scope = ignores.empty? ? jobs : jobs.where.not(id: ignores)
|
179
|
-
|
179
|
+
# if we don't reload this it's possible to keep getting the
|
180
|
+
# same array each loop even after the jobs have been deleted.
|
181
|
+
batch = batch_scope.reload.to_a
|
180
182
|
break if batch.empty?
|
181
183
|
|
182
184
|
batch.each do |job|
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "delayed/plugin"
|
4
|
+
|
5
|
+
module Delayed
|
6
|
+
class RailsReloaderPlugin < Plugin
|
7
|
+
callbacks do |lifecycle|
|
8
|
+
app = Rails.application
|
9
|
+
if app && !app.config.cache_classes
|
10
|
+
lifecycle.around(:perform) do |worker, job, &block|
|
11
|
+
reload = !app.config.reload_classes_only_on_change || app.reloaders.any?(&:updated?)
|
12
|
+
|
13
|
+
if reload
|
14
|
+
if defined?(ActiveSupport::Reloader)
|
15
|
+
Rails.application.reloader.reload!
|
16
|
+
else
|
17
|
+
ActionDispatch::Reloader.prepare!
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
begin
|
22
|
+
block.call(worker, job)
|
23
|
+
ensure
|
24
|
+
ActionDispatch::Reloader.cleanup! if reload && !defined?(ActiveSupport::Reloader)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
data/lib/delayed/version.rb
CHANGED
@@ -1,5 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
require "activerecord-pg-extensions"
|
4
|
+
|
3
5
|
module Delayed
|
4
6
|
module WorkQueue
|
5
7
|
class ParentProcess
|
@@ -192,29 +194,54 @@ module Delayed
|
|
192
194
|
end
|
193
195
|
end
|
194
196
|
|
195
|
-
def
|
197
|
+
def unlock_prefetched_jobs
|
196
198
|
@prefetched_jobs.each do |(worker_config, jobs)|
|
197
199
|
next if jobs.empty?
|
198
|
-
next
|
200
|
+
next if block_given? && !yield(jobs)
|
199
201
|
|
200
|
-
Delayed::Job.
|
202
|
+
connection = Delayed::Job.connection
|
203
|
+
connection.transaction do
|
204
|
+
# make absolutely sure we don't get hung up and leave things
|
205
|
+
# locked in the database
|
206
|
+
if connection.postgresql_version >= 9_06_00 # rubocop:disable Style/NumericLiterals
|
207
|
+
connection.idle_in_transaction_session_timeout = 5
|
208
|
+
end
|
209
|
+
# relatively short timeout for acquiring the lock
|
210
|
+
connection.statement_timeout = Settings.sleep_delay
|
201
211
|
Delayed::Job.advisory_lock(Delayed::Job.prefetch_jobs_lock_name)
|
212
|
+
|
213
|
+
# this query might take longer, and we really want to get it
|
214
|
+
# done if we got the lock, but still don't want an inadvertent
|
215
|
+
# hang
|
216
|
+
connection.statement_timeout = 30
|
202
217
|
Delayed::Job.unlock(jobs)
|
218
|
+
@prefetched_jobs[worker_config] = []
|
203
219
|
end
|
204
|
-
|
220
|
+
rescue ActiveRecord::QueryCanceled
|
221
|
+
# ignore; we'll retry anyway
|
222
|
+
logger.warn("unable to unlock prefetched jobs; skipping for now")
|
223
|
+
rescue ActiveRecord::StatementInvalid
|
224
|
+
# see if we dropped the connection
|
225
|
+
raise if connection.active?
|
226
|
+
|
227
|
+
# otherwise just reconnect and let it retry
|
228
|
+
logger.warn("failed to unlock prefetched jobs - connection terminated; skipping for now")
|
229
|
+
Delayed::Job.clear_all_connections!
|
205
230
|
end
|
206
231
|
end
|
207
232
|
|
208
|
-
def
|
209
|
-
|
210
|
-
|
233
|
+
def unlock_timed_out_prefetched_jobs
|
234
|
+
unlock_prefetched_jobs do |jobs|
|
235
|
+
jobs.first.locked_at < Time.now.utc - Settings.parent_process[:prefetched_jobs_timeout]
|
236
|
+
end
|
237
|
+
end
|
211
238
|
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
239
|
+
def unlock_all_prefetched_jobs
|
240
|
+
# we try really hard; it may not have done any work if it timed out
|
241
|
+
10.times do
|
242
|
+
unlock_prefetched_jobs
|
243
|
+
break if @prefetched_jobs.each_value.all?(&:empty?)
|
216
244
|
end
|
217
|
-
@prefetched_jobs = {}
|
218
245
|
end
|
219
246
|
|
220
247
|
def drop_socket(socket)
|
@@ -5,10 +5,10 @@ module Delayed
|
|
5
5
|
module ProcessHelper
|
6
6
|
STAT_LINUX = "stat --format=%%Y /proc/$WORKER_PID"
|
7
7
|
STAT_MAC = "ps -o lstart -p $WORKER_PID"
|
8
|
-
STAT = RUBY_PLATFORM
|
8
|
+
STAT = RUBY_PLATFORM.include?("darwin") ? STAT_MAC : STAT_LINUX
|
9
9
|
ALIVE_CHECK_LINUX = '[ -d "/proc/$WORKER_PID" ]'
|
10
10
|
ALIVE_CHECK_MAC = "ps -p $WORKER_PID > /dev/null"
|
11
|
-
ALIVE_CHECK = RUBY_PLATFORM
|
11
|
+
ALIVE_CHECK = RUBY_PLATFORM.include?("darwin") ? ALIVE_CHECK_MAC : ALIVE_CHECK_LINUX
|
12
12
|
SCRIPT_TEMPLATE = <<-BASH
|
13
13
|
WORKER_PID="%<pid>d" # an example, filled from ruby when the check is created
|
14
14
|
ORIGINAL_MTIME="%<mtime>s" # an example, filled from ruby when the check is created
|
@@ -29,7 +29,7 @@ module Delayed
|
|
29
29
|
BASH
|
30
30
|
|
31
31
|
def self.mtime(pid)
|
32
|
-
if RUBY_PLATFORM
|
32
|
+
if RUBY_PLATFORM.include?("darwin")
|
33
33
|
`ps -o lstart -p #{pid}`.sub(/\n$/, "").presence
|
34
34
|
else
|
35
35
|
File::Stat.new("/proc/#{pid}").mtime.to_i.to_s rescue nil
|
data/lib/delayed/worker.rb
CHANGED
@@ -1,5 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
require "delayed/rails_reloader_plugin"
|
4
|
+
|
3
5
|
module Delayed
|
4
6
|
class TimeoutError < RuntimeError; end
|
5
7
|
|
@@ -70,27 +72,7 @@ module Delayed
|
|
70
72
|
|
71
73
|
@signal_queue = []
|
72
74
|
|
73
|
-
|
74
|
-
if app && !app.config.cache_classes
|
75
|
-
Delayed::Worker.lifecycle.around(:perform) do |worker, job, &block|
|
76
|
-
reload = app.config.reload_classes_only_on_change != true || app.reloaders.map(&:updated?).any?
|
77
|
-
|
78
|
-
if reload
|
79
|
-
if defined?(ActiveSupport::Reloader)
|
80
|
-
Rails.application.reloader.reload!
|
81
|
-
else
|
82
|
-
ActionDispatch::Reloader.prepare!
|
83
|
-
end
|
84
|
-
end
|
85
|
-
|
86
|
-
begin
|
87
|
-
block.call(worker, job)
|
88
|
-
ensure
|
89
|
-
ActionDispatch::Reloader.cleanup! if reload && !defined?(ActiveSupport::Reloader)
|
90
|
-
end
|
91
|
-
end
|
92
|
-
end
|
93
|
-
|
75
|
+
plugins << Delayed::RailsReloaderPlugin
|
94
76
|
plugins.each(&:inject!)
|
95
77
|
end
|
96
78
|
|
@@ -219,14 +219,14 @@ describe "Delayed::Backed::ActiveRecord::Job" do
|
|
219
219
|
end
|
220
220
|
|
221
221
|
it "gets process ids from locked_by" do
|
222
|
-
3
|
222
|
+
Array.new(3) { Delayed::Job.create payload_object: SimpleJob.new }
|
223
223
|
Delayed::Job.get_and_lock_next_available(["job42:2", "job42:9001"])
|
224
224
|
expect(Delayed::Job.processes_locked_locally(name: "job42").sort).to eq [2, 9001]
|
225
225
|
expect(Delayed::Job.processes_locked_locally(name: "jobnotme")).to be_empty
|
226
226
|
end
|
227
227
|
|
228
228
|
it "allows fetching multiple jobs at once" do
|
229
|
-
jobs = 3
|
229
|
+
jobs = Array.new(3) { Delayed::Job.create payload_object: SimpleJob.new }
|
230
230
|
locked_jobs = Delayed::Job.get_and_lock_next_available(%w[worker1 worker2])
|
231
231
|
expect(locked_jobs.length).to eq(2)
|
232
232
|
expect(locked_jobs.keys).to eq(%w[worker1 worker2])
|
@@ -235,7 +235,7 @@ describe "Delayed::Backed::ActiveRecord::Job" do
|
|
235
235
|
end
|
236
236
|
|
237
237
|
it "allows fetching extra jobs" do
|
238
|
-
jobs = 5
|
238
|
+
jobs = Array.new(5) { Delayed::Job.create payload_object: SimpleJob.new }
|
239
239
|
locked_jobs = Delayed::Job.get_and_lock_next_available(["worker1"],
|
240
240
|
prefetch: 2,
|
241
241
|
prefetch_owner: "work_queue")
|
@@ -104,6 +104,53 @@ RSpec.describe Delayed::WorkQueue::ParentProcess::Server do
|
|
104
104
|
expect(Marshal.load(client)).to eq(job2)
|
105
105
|
end
|
106
106
|
|
107
|
+
context "prefetched job unlocking" do
|
108
|
+
let(:job_args) do
|
109
|
+
[["worker_name1"], "queue_name", 1, 2,
|
110
|
+
{ prefetch: 4, prefetch_owner: "prefetch:work_queue:X", forced_latency: 6.0 }]
|
111
|
+
end
|
112
|
+
let(:job2) { Delayed::Job.new(tag: "tag").tap { |j| j.create_and_lock!("prefetch:work_queue:X") } }
|
113
|
+
let(:job3) { Delayed::Job.new(tag: "tag").tap { |j| j.create_and_lock!("prefetch:work_queue:X") } }
|
114
|
+
|
115
|
+
before do
|
116
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
117
|
+
subject.run_once
|
118
|
+
|
119
|
+
jobs = { "worker_name1" => job, "prefetch:work_queue:X" => [job2, job3] }
|
120
|
+
allow(subject).to receive(:prefetch_owner).and_return("prefetch:work_queue:X")
|
121
|
+
allow(Delayed::Job).to receive(:get_and_lock_next_available).once.with(*job_args).and_return(jobs)
|
122
|
+
Marshal.dump(["worker_name1", worker_config], client)
|
123
|
+
subject.run_once
|
124
|
+
end
|
125
|
+
|
126
|
+
it "doesn't unlock anything if nothing is timed out" do
|
127
|
+
expect(Delayed::Job).not_to receive(:advisory_lock)
|
128
|
+
expect(Delayed::Job).not_to receive(:unlock)
|
129
|
+
subject.unlock_timed_out_prefetched_jobs
|
130
|
+
end
|
131
|
+
|
132
|
+
it "unlocks timed out prefetched jobs" do
|
133
|
+
allow(Delayed::Settings).to receive(:parent_process).and_return(prefetched_jobs_timeout: -1)
|
134
|
+
expect(Delayed::Job).to receive(:unlock).with([job2, job3])
|
135
|
+
subject.unlock_timed_out_prefetched_jobs
|
136
|
+
expect(subject.instance_variable_get(:@prefetched_jobs).values.sum(&:length)).to eq 0
|
137
|
+
end
|
138
|
+
|
139
|
+
it "fails gracefully if the lock times out" do
|
140
|
+
allow(Delayed::Settings).to receive(:parent_process).and_return(prefetched_jobs_timeout: -1)
|
141
|
+
expect(Delayed::Job).not_to receive(:unlock)
|
142
|
+
expect(Delayed::Job).to receive(:advisory_lock).and_raise(ActiveRecord::QueryCanceled)
|
143
|
+
subject.unlock_timed_out_prefetched_jobs
|
144
|
+
expect(subject.instance_variable_get(:@prefetched_jobs).values.sum(&:length)).to eq 2
|
145
|
+
end
|
146
|
+
|
147
|
+
it "unlocks all jobs" do
|
148
|
+
expect(Delayed::Job).to receive(:unlock).with([job2, job3])
|
149
|
+
subject.unlock_all_prefetched_jobs
|
150
|
+
expect(subject.instance_variable_get(:@prefetched_jobs).values.sum(&:length)).to eq 0
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
107
154
|
it "doesn't respond immediately if there are no jobs available" do
|
108
155
|
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
109
156
|
subject.run_once
|
data/spec/delayed/worker_spec.rb
CHANGED
@@ -44,7 +44,7 @@ describe Delayed::Worker do
|
|
44
44
|
expect(output_count).to eq(1)
|
45
45
|
end
|
46
46
|
|
47
|
-
it "reloads" do
|
47
|
+
it "reloads Rails classes (never more than once)" do
|
48
48
|
fake_application = double("Rails.application",
|
49
49
|
config: double("Rails.application.config",
|
50
50
|
cache_classes: false,
|
@@ -59,6 +59,11 @@ describe Delayed::Worker do
|
|
59
59
|
expect(ActionDispatch::Reloader).to receive(:cleanup!).once
|
60
60
|
end
|
61
61
|
job = double(job_attrs)
|
62
|
+
|
63
|
+
# Create extra workers to make sure we don't reload multiple times
|
64
|
+
described_class.new(worker_config.dup)
|
65
|
+
described_class.new(worker_config.dup)
|
66
|
+
|
62
67
|
subject.perform(job)
|
63
68
|
end
|
64
69
|
end
|
@@ -1,5 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
require "timeout"
|
4
|
+
|
3
5
|
module InDelayedJobTest
|
4
6
|
def self.check_in_job
|
5
7
|
Delayed::Job.in_delayed_job?.should == true
|
@@ -960,6 +962,21 @@ shared_examples_for "a backend" do
|
|
960
962
|
end
|
961
963
|
end
|
962
964
|
|
965
|
+
it "removes an un-reschedulable job" do
|
966
|
+
change_setting(Delayed::Settings, :max_attempts, -1) do
|
967
|
+
job = Delayed::Job.new(tag: "tag")
|
968
|
+
`echo ''`
|
969
|
+
child_pid = $?.pid
|
970
|
+
job.create_and_lock!("Jobworker:#{child_pid}")
|
971
|
+
Timeout.timeout(1) do
|
972
|
+
# if this takes longer than a second it's hung
|
973
|
+
# in an infinite loop, which would be bad.
|
974
|
+
expect(Delayed::Job.unlock_orphaned_jobs(nil, "Jobworker")).to eq(1)
|
975
|
+
end
|
976
|
+
expect { Delayed::Job.find(job.id) }.to raise_error(ActiveRecord::RecordNotFound)
|
977
|
+
end
|
978
|
+
end
|
979
|
+
|
963
980
|
it "unlocks orphaned jobs given a pid" do
|
964
981
|
change_setting(Delayed::Settings, :max_attempts, 2) do
|
965
982
|
job1 = Delayed::Job.new(tag: "tag")
|
data/spec/spec_helper.rb
CHANGED
@@ -58,11 +58,7 @@ connection_config = {
|
|
58
58
|
}
|
59
59
|
|
60
60
|
def migrate(file)
|
61
|
-
|
62
|
-
ActiveRecord::MigrationContext.new(file, ActiveRecord::SchemaMigration).migrate
|
63
|
-
else
|
64
|
-
ActiveRecord::MigrationContext.new(file).migrate
|
65
|
-
end
|
61
|
+
ActiveRecord::MigrationContext.new(file, ActiveRecord::SchemaMigration).migrate
|
66
62
|
end
|
67
63
|
|
68
64
|
# create the test db if it does not exist, to help out wwtd
|
@@ -73,6 +69,11 @@ rescue ActiveRecord::StatementInvalid
|
|
73
69
|
nil
|
74
70
|
end
|
75
71
|
ActiveRecord::Base.establish_connection(connection_config)
|
72
|
+
|
73
|
+
# we need to ensure this callback is called for activerecord-pg-extensions,
|
74
|
+
# which isn't running because we're not using Rails to setup the database
|
75
|
+
ActiveRecord::PGExtensions::Railtie.run_initializers
|
76
|
+
|
76
77
|
# TODO: reset db and migrate again, to test migrations
|
77
78
|
|
78
79
|
migrate("db/migrate")
|
metadata
CHANGED
@@ -1,15 +1,16 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: inst-jobs
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version:
|
4
|
+
version: 3.0.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
|
-
-
|
8
|
-
-
|
9
|
-
|
7
|
+
- Cody Cutrer
|
8
|
+
- Ethan Vizitei
|
9
|
+
- Jacob Burroughs
|
10
|
+
autorequire:
|
10
11
|
bindir: exe
|
11
12
|
cert_chain: []
|
12
|
-
date: 2021-
|
13
|
+
date: 2021-11-30 00:00:00.000000000 Z
|
13
14
|
dependencies:
|
14
15
|
- !ruby/object:Gem::Dependency
|
15
16
|
name: activerecord
|
@@ -17,28 +18,42 @@ dependencies:
|
|
17
18
|
requirements:
|
18
19
|
- - ">="
|
19
20
|
- !ruby/object:Gem::Version
|
20
|
-
version: '
|
21
|
+
version: '6.0'
|
21
22
|
type: :runtime
|
22
23
|
prerelease: false
|
23
24
|
version_requirements: !ruby/object:Gem::Requirement
|
24
25
|
requirements:
|
25
26
|
- - ">="
|
26
27
|
- !ruby/object:Gem::Version
|
27
|
-
version: '
|
28
|
+
version: '6.0'
|
29
|
+
- !ruby/object:Gem::Dependency
|
30
|
+
name: activerecord-pg-extensions
|
31
|
+
requirement: !ruby/object:Gem::Requirement
|
32
|
+
requirements:
|
33
|
+
- - "~>"
|
34
|
+
- !ruby/object:Gem::Version
|
35
|
+
version: '0.4'
|
36
|
+
type: :runtime
|
37
|
+
prerelease: false
|
38
|
+
version_requirements: !ruby/object:Gem::Requirement
|
39
|
+
requirements:
|
40
|
+
- - "~>"
|
41
|
+
- !ruby/object:Gem::Version
|
42
|
+
version: '0.4'
|
28
43
|
- !ruby/object:Gem::Dependency
|
29
44
|
name: activesupport
|
30
45
|
requirement: !ruby/object:Gem::Requirement
|
31
46
|
requirements:
|
32
47
|
- - ">="
|
33
48
|
- !ruby/object:Gem::Version
|
34
|
-
version: '
|
49
|
+
version: '6.0'
|
35
50
|
type: :runtime
|
36
51
|
prerelease: false
|
37
52
|
version_requirements: !ruby/object:Gem::Requirement
|
38
53
|
requirements:
|
39
54
|
- - ">="
|
40
55
|
- !ruby/object:Gem::Version
|
41
|
-
version: '
|
56
|
+
version: '6.0'
|
42
57
|
- !ruby/object:Gem::Dependency
|
43
58
|
name: after_transaction_commit
|
44
59
|
requirement: !ruby/object:Gem::Requirement
|
@@ -93,14 +108,14 @@ dependencies:
|
|
93
108
|
requirements:
|
94
109
|
- - ">="
|
95
110
|
- !ruby/object:Gem::Version
|
96
|
-
version: '
|
111
|
+
version: '6.0'
|
97
112
|
type: :runtime
|
98
113
|
prerelease: false
|
99
114
|
version_requirements: !ruby/object:Gem::Requirement
|
100
115
|
requirements:
|
101
116
|
- - ">="
|
102
117
|
- !ruby/object:Gem::Version
|
103
|
-
version: '
|
118
|
+
version: '6.0'
|
104
119
|
- !ruby/object:Gem::Dependency
|
105
120
|
name: appraisal
|
106
121
|
requirement: !ruby/object:Gem::Requirement
|
@@ -283,6 +298,20 @@ dependencies:
|
|
283
298
|
- - "~>"
|
284
299
|
- !ruby/object:Gem::Version
|
285
300
|
version: '1.19'
|
301
|
+
- !ruby/object:Gem::Dependency
|
302
|
+
name: rubocop-performance
|
303
|
+
requirement: !ruby/object:Gem::Requirement
|
304
|
+
requirements:
|
305
|
+
- - "~>"
|
306
|
+
- !ruby/object:Gem::Version
|
307
|
+
version: 1.12.0
|
308
|
+
type: :development
|
309
|
+
prerelease: false
|
310
|
+
version_requirements: !ruby/object:Gem::Requirement
|
311
|
+
requirements:
|
312
|
+
- - "~>"
|
313
|
+
- !ruby/object:Gem::Version
|
314
|
+
version: 1.12.0
|
286
315
|
- !ruby/object:Gem::Dependency
|
287
316
|
name: rubocop-rails
|
288
317
|
requirement: !ruby/object:Gem::Requirement
|
@@ -395,9 +424,11 @@ dependencies:
|
|
395
424
|
- - "~>"
|
396
425
|
- !ruby/object:Gem::Version
|
397
426
|
version: 1.4.0
|
398
|
-
description:
|
427
|
+
description:
|
399
428
|
email:
|
400
|
-
-
|
429
|
+
- cody@instructure.com
|
430
|
+
- evizitei@instructure.com
|
431
|
+
- jburroughs@instructure.com
|
401
432
|
executables:
|
402
433
|
- inst_jobs
|
403
434
|
extensions: []
|
@@ -433,6 +464,8 @@ files:
|
|
433
464
|
- db/migrate/20210812210128_add_singleton_column.rb
|
434
465
|
- db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb
|
435
466
|
- db/migrate/20210928174754_fix_singleton_condition_in_before_insert.rb
|
467
|
+
- db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb
|
468
|
+
- db/migrate/20211101190934_update_after_delete_trigger_for_singleton_index.rb
|
436
469
|
- exe/inst_jobs
|
437
470
|
- lib/delayed/backend/active_record.rb
|
438
471
|
- lib/delayed/backend/base.rb
|
@@ -450,6 +483,7 @@ files:
|
|
450
483
|
- lib/delayed/periodic.rb
|
451
484
|
- lib/delayed/plugin.rb
|
452
485
|
- lib/delayed/pool.rb
|
486
|
+
- lib/delayed/rails_reloader_plugin.rb
|
453
487
|
- lib/delayed/server.rb
|
454
488
|
- lib/delayed/server/helpers.rb
|
455
489
|
- lib/delayed/server/public/css/app.css
|
@@ -498,7 +532,7 @@ files:
|
|
498
532
|
homepage: https://github.com/instructure/inst-jobs
|
499
533
|
licenses: []
|
500
534
|
metadata: {}
|
501
|
-
post_install_message:
|
535
|
+
post_install_message:
|
502
536
|
rdoc_options: []
|
503
537
|
require_paths:
|
504
538
|
- lib
|
@@ -513,32 +547,32 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
513
547
|
- !ruby/object:Gem::Version
|
514
548
|
version: '0'
|
515
549
|
requirements: []
|
516
|
-
rubygems_version: 3.2.
|
517
|
-
signing_key:
|
550
|
+
rubygems_version: 3.2.15
|
551
|
+
signing_key:
|
518
552
|
specification_version: 4
|
519
553
|
summary: Instructure-maintained fork of delayed_job
|
520
554
|
test_files:
|
521
|
-
- spec/
|
522
|
-
- spec/spec_helper.rb
|
523
|
-
- spec/shared_jobs_specs.rb
|
524
|
-
- spec/shared/performable_method.rb
|
525
|
-
- spec/shared/testing.rb
|
526
|
-
- spec/shared/delayed_batch.rb
|
527
|
-
- spec/shared/worker.rb
|
528
|
-
- spec/shared/delayed_method.rb
|
529
|
-
- spec/shared/shared_backend.rb
|
530
|
-
- spec/migrate/20140924140513_add_story_table.rb
|
531
|
-
- spec/delayed/server_spec.rb
|
555
|
+
- spec/active_record_job_spec.rb
|
532
556
|
- spec/delayed/cli_spec.rb
|
533
557
|
- spec/delayed/daemon_spec.rb
|
534
|
-
- spec/delayed/worker_spec.rb
|
535
|
-
- spec/delayed/periodic_spec.rb
|
536
558
|
- spec/delayed/message_sending_spec.rb
|
559
|
+
- spec/delayed/periodic_spec.rb
|
560
|
+
- spec/delayed/server_spec.rb
|
537
561
|
- spec/delayed/settings_spec.rb
|
538
562
|
- spec/delayed/work_queue/in_process_spec.rb
|
539
|
-
- spec/delayed/work_queue/parent_process_spec.rb
|
540
563
|
- spec/delayed/work_queue/parent_process/client_spec.rb
|
541
564
|
- spec/delayed/work_queue/parent_process/server_spec.rb
|
542
|
-
- spec/delayed/
|
565
|
+
- spec/delayed/work_queue/parent_process_spec.rb
|
543
566
|
- spec/delayed/worker/consul_health_check_spec.rb
|
544
|
-
- spec/
|
567
|
+
- spec/delayed/worker/health_check_spec.rb
|
568
|
+
- spec/delayed/worker_spec.rb
|
569
|
+
- spec/migrate/20140924140513_add_story_table.rb
|
570
|
+
- spec/sample_jobs.rb
|
571
|
+
- spec/shared/delayed_batch.rb
|
572
|
+
- spec/shared/delayed_method.rb
|
573
|
+
- spec/shared/performable_method.rb
|
574
|
+
- spec/shared/shared_backend.rb
|
575
|
+
- spec/shared/testing.rb
|
576
|
+
- spec/shared/worker.rb
|
577
|
+
- spec/shared_jobs_specs.rb
|
578
|
+
- spec/spec_helper.rb
|