inst-jobs 3.0.7 → 3.0.10
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/db/migrate/20220127091200_fix_singleton_unique_constraint.rb +31 -0
- data/db/migrate/20220128084800_update_insert_trigger_for_singleton_unique_constraint_change.rb +60 -0
- data/db/migrate/20220128084900_update_delete_trigger_for_singleton_unique_constraint_change.rb +209 -0
- data/db/migrate/20220203063200_remove_old_singleton_index.rb +31 -0
- data/lib/delayed/backend/active_record.rb +15 -4
- data/lib/delayed/backend/base.rb +1 -0
- data/lib/delayed/periodic.rb +1 -1
- data/lib/delayed/settings.rb +1 -1
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/worker/health_check.rb +1 -1
- data/spec/active_record_job_spec.rb +13 -13
- data/spec/delayed/work_queue/in_process_spec.rb +1 -1
- data/spec/delayed/work_queue/parent_process/server_spec.rb +1 -1
- data/spec/delayed/worker_spec.rb +1 -1
- data/spec/shared/delayed_batch.rb +3 -3
- data/spec/shared/delayed_method.rb +1 -1
- data/spec/shared/shared_backend.rb +32 -32
- data/spec/shared/worker.rb +3 -3
- data/spec/spec_helper.rb +4 -2
- metadata +30 -39
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 80d0bc1d05ae57467d66361e81fa51a5b45e3fa391250005c59229554757ec08
|
4
|
+
data.tar.gz: 32d3df9f6085199fa1e9586330c6ba255f898602eb546ea94f49de52b628e7df
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 3befc529f42b4ebcba81aacea9bb4c4401a9daa06708c545572b6d6485db85b3d650b9d12baec0dadad9c7387b30ec5e051493b2992a0f6cd41ced9bcfca2485
|
7
|
+
data.tar.gz: 3a3629c08198e0e17455468fd670f4acdd05b28d76b4fbe88516f50692852b6edaf4ed29b6fc26babdfbcca669c9502e88c37616a24cbed5e82611664a3262de
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class FixSingletonUniqueConstraint < ActiveRecord::Migration[5.2]
|
4
|
+
disable_ddl_transaction!
|
5
|
+
|
6
|
+
def up
|
7
|
+
rename_index :delayed_jobs, "index_delayed_jobs_on_singleton_not_running", "index_delayed_jobs_on_singleton_not_running_old"
|
8
|
+
rename_index :delayed_jobs, "index_delayed_jobs_on_singleton_running", "index_delayed_jobs_on_singleton_running_old"
|
9
|
+
|
10
|
+
# only one job can be queued in a singleton
|
11
|
+
add_index :delayed_jobs,
|
12
|
+
:singleton,
|
13
|
+
where: "singleton IS NOT NULL AND (locked_by IS NULL OR locked_by = '#{::Delayed::Backend::Base::ON_HOLD_LOCKED_BY}')",
|
14
|
+
unique: true,
|
15
|
+
name: "index_delayed_jobs_on_singleton_not_running",
|
16
|
+
algorithm: :concurrently
|
17
|
+
|
18
|
+
# only one job can be running for a singleton
|
19
|
+
add_index :delayed_jobs,
|
20
|
+
:singleton,
|
21
|
+
where: "singleton IS NOT NULL AND locked_by IS NOT NULL AND locked_by <> '#{::Delayed::Backend::Base::ON_HOLD_LOCKED_BY}'",
|
22
|
+
unique: true,
|
23
|
+
name: "index_delayed_jobs_on_singleton_running",
|
24
|
+
algorithm: :concurrently
|
25
|
+
end
|
26
|
+
|
27
|
+
def down
|
28
|
+
remove_index :delayed_jobs, name: "index_delayed_jobs_on_singleton_not_running_old"
|
29
|
+
remove_index :delayed_jobs, name: "index_delayed_jobs_on_singleton_running_old"
|
30
|
+
end
|
31
|
+
end
|
data/db/migrate/20220128084800_update_insert_trigger_for_singleton_unique_constraint_change.rb
ADDED
@@ -0,0 +1,60 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class UpdateInsertTriggerForSingletonUniqueConstraintChange < ActiveRecord::Migration[5.2]
|
4
|
+
def change
|
5
|
+
reversible do |direction|
|
6
|
+
direction.up do
|
7
|
+
execute(<<~SQL)
|
8
|
+
CREATE OR REPLACE FUNCTION delayed_jobs_before_insert_row_tr_fn () RETURNS trigger AS $$
|
9
|
+
BEGIN
|
10
|
+
IF NEW.strand IS NOT NULL THEN
|
11
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(NEW.strand));
|
12
|
+
IF (SELECT COUNT(*) FROM (
|
13
|
+
SELECT 1 FROM delayed_jobs WHERE strand = NEW.strand AND next_in_strand=true LIMIT NEW.max_concurrent
|
14
|
+
) s) = NEW.max_concurrent THEN
|
15
|
+
NEW.next_in_strand := false;
|
16
|
+
END IF;
|
17
|
+
END IF;
|
18
|
+
IF NEW.singleton IS NOT NULL THEN
|
19
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(CONCAT('singleton:', NEW.singleton)));
|
20
|
+
-- this condition seems silly, but it forces postgres to use the two partial indexes on singleton,
|
21
|
+
-- rather than doing a seq scan
|
22
|
+
PERFORM 1 FROM delayed_jobs WHERE singleton = NEW.singleton AND (locked_by IS NULL OR locked_by = '#{::Delayed::Backend::Base::ON_HOLD_LOCKED_BY}' OR locked_by <> '#{::Delayed::Backend::Base::ON_HOLD_LOCKED_BY}');
|
23
|
+
IF FOUND THEN
|
24
|
+
NEW.next_in_strand := false;
|
25
|
+
END IF;
|
26
|
+
END IF;
|
27
|
+
RETURN NEW;
|
28
|
+
END;
|
29
|
+
$$ LANGUAGE plpgsql;
|
30
|
+
SQL
|
31
|
+
end
|
32
|
+
direction.down do
|
33
|
+
execute(<<~SQL)
|
34
|
+
CREATE OR REPLACE FUNCTION delayed_jobs_before_insert_row_tr_fn () RETURNS trigger AS $$
|
35
|
+
BEGIN
|
36
|
+
IF NEW.strand IS NOT NULL THEN
|
37
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(NEW.strand));
|
38
|
+
IF (SELECT COUNT(*) FROM (
|
39
|
+
SELECT 1 FROM delayed_jobs WHERE strand = NEW.strand AND next_in_strand=true LIMIT NEW.max_concurrent
|
40
|
+
) s) = NEW.max_concurrent THEN
|
41
|
+
NEW.next_in_strand := false;
|
42
|
+
END IF;
|
43
|
+
END IF;
|
44
|
+
IF NEW.singleton IS NOT NULL THEN
|
45
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(CONCAT('singleton:', NEW.singleton)));
|
46
|
+
-- this condition seems silly, but it forces postgres to use the two partial indexes on singleton,
|
47
|
+
-- rather than doing a seq scan
|
48
|
+
PERFORM 1 FROM delayed_jobs WHERE singleton = NEW.singleton AND (locked_by IS NULL OR locked_by IS NOT NULL);
|
49
|
+
IF FOUND THEN
|
50
|
+
NEW.next_in_strand := false;
|
51
|
+
END IF;
|
52
|
+
END IF;
|
53
|
+
RETURN NEW;
|
54
|
+
END;
|
55
|
+
$$ LANGUAGE plpgsql;
|
56
|
+
SQL
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
data/db/migrate/20220128084900_update_delete_trigger_for_singleton_unique_constraint_change.rb
ADDED
@@ -0,0 +1,209 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class UpdateDeleteTriggerForSingletonUniqueConstraintChange < ActiveRecord::Migration[6.0]
|
4
|
+
def up
|
5
|
+
execute(<<~SQL)
|
6
|
+
CREATE OR REPLACE FUNCTION delayed_jobs_after_delete_row_tr_fn () RETURNS trigger AS $$
|
7
|
+
DECLARE
|
8
|
+
next_strand varchar;
|
9
|
+
running_count integer;
|
10
|
+
should_lock boolean;
|
11
|
+
should_be_precise boolean;
|
12
|
+
update_query varchar;
|
13
|
+
skip_locked varchar;
|
14
|
+
transition boolean;
|
15
|
+
BEGIN
|
16
|
+
IF OLD.strand IS NOT NULL THEN
|
17
|
+
should_lock := true;
|
18
|
+
should_be_precise := OLD.id % (OLD.max_concurrent * 4) = 0;
|
19
|
+
|
20
|
+
IF NOT should_be_precise AND OLD.max_concurrent > 16 THEN
|
21
|
+
running_count := (SELECT COUNT(*) FROM (
|
22
|
+
SELECT 1 as one FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
|
23
|
+
) subquery_for_count);
|
24
|
+
should_lock := running_count < OLD.max_concurrent;
|
25
|
+
END IF;
|
26
|
+
|
27
|
+
IF should_lock THEN
|
28
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(OLD.strand));
|
29
|
+
END IF;
|
30
|
+
|
31
|
+
-- note that we don't really care if the row we're deleting has a singleton, or if it even
|
32
|
+
-- matches the row(s) we're going to update. we just need to make sure that whatever
|
33
|
+
-- singleton we grab isn't already running (which is a simple existence check, since
|
34
|
+
-- the unique indexes ensure there is at most one singleton running, and one queued)
|
35
|
+
update_query := 'UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
|
36
|
+
SELECT id FROM delayed_jobs j2
|
37
|
+
WHERE next_in_strand=false AND
|
38
|
+
j2.strand=$1.strand AND
|
39
|
+
(j2.singleton IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.singleton=j2.singleton AND j3.id<>j2.id AND (j3.locked_by IS NULL OR j3.locked_by = ''#{::Delayed::Backend::Base::ON_HOLD_LOCKED_BY}'' OR j3.locked_by <> ''#{::Delayed::Backend::Base::ON_HOLD_LOCKED_BY}'')))
|
40
|
+
ORDER BY j2.strand_order_override ASC, j2.id ASC
|
41
|
+
LIMIT ';
|
42
|
+
|
43
|
+
IF should_be_precise THEN
|
44
|
+
running_count := (SELECT COUNT(*) FROM (
|
45
|
+
SELECT 1 FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
|
46
|
+
) s);
|
47
|
+
IF running_count < OLD.max_concurrent THEN
|
48
|
+
update_query := update_query || '($1.max_concurrent - $2)';
|
49
|
+
ELSE
|
50
|
+
-- we have too many running already; just bail
|
51
|
+
RETURN OLD;
|
52
|
+
END IF;
|
53
|
+
ELSE
|
54
|
+
update_query := update_query || '1';
|
55
|
+
|
56
|
+
-- n-strands don't require precise ordering; we can make this query more performant
|
57
|
+
IF OLD.max_concurrent > 1 THEN
|
58
|
+
skip_locked := ' SKIP LOCKED';
|
59
|
+
END IF;
|
60
|
+
END IF;
|
61
|
+
|
62
|
+
update_query := update_query || ' FOR UPDATE' || COALESCE(skip_locked, '') || ')';
|
63
|
+
EXECUTE update_query USING OLD, running_count;
|
64
|
+
END IF;
|
65
|
+
|
66
|
+
IF OLD.singleton IS NOT NULL THEN
|
67
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(CONCAT('singleton:', OLD.singleton)));
|
68
|
+
|
69
|
+
transition := EXISTS (SELECT 1 FROM delayed_jobs AS j1 WHERE j1.singleton = OLD.singleton AND j1.strand IS DISTINCT FROM OLD.strand AND locked_by IS NULL);
|
70
|
+
|
71
|
+
IF transition THEN
|
72
|
+
next_strand := (SELECT j1.strand FROM delayed_jobs AS j1 WHERE j1.singleton = OLD.singleton AND j1.strand IS DISTINCT FROM OLD.strand AND locked_by IS NULL AND j1.strand IS NOT NULL LIMIT 1);
|
73
|
+
|
74
|
+
IF next_strand IS NOT NULL THEN
|
75
|
+
-- if the singleton has a new strand defined, we need to lock it to ensure we obey n_strand constraints --
|
76
|
+
IF NOT pg_try_advisory_xact_lock(half_md5_as_bigint(next_strand)) THEN
|
77
|
+
-- a failure to acquire the lock means that another process already has it and will thus handle this singleton --
|
78
|
+
RETURN OLD;
|
79
|
+
END IF;
|
80
|
+
END IF;
|
81
|
+
ELSIF OLD.strand IS NOT NULL THEN
|
82
|
+
-- if there is no transition and there is a strand then we have already handled this singleton in the case above --
|
83
|
+
RETURN OLD;
|
84
|
+
END IF;
|
85
|
+
|
86
|
+
-- handles transitioning a singleton from stranded to not stranded --
|
87
|
+
-- handles transitioning a singleton from unstranded to stranded --
|
88
|
+
-- handles transitioning a singleton from strand A to strand B --
|
89
|
+
-- these transitions are a relatively rare case, so we take a shortcut and --
|
90
|
+
-- only start the next singleton if its strand does not currently have any running jobs --
|
91
|
+
-- if it does, the next stranded job that finishes will start this singleton if it can --
|
92
|
+
UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
|
93
|
+
SELECT id FROM delayed_jobs j2
|
94
|
+
WHERE next_in_strand=false AND
|
95
|
+
j2.singleton=OLD.singleton AND
|
96
|
+
j2.locked_by IS NULL AND
|
97
|
+
(j2.strand IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.strand=j2.strand AND j3.id<>j2.id))
|
98
|
+
FOR UPDATE
|
99
|
+
);
|
100
|
+
END IF;
|
101
|
+
RETURN OLD;
|
102
|
+
END;
|
103
|
+
$$ LANGUAGE plpgsql;
|
104
|
+
SQL
|
105
|
+
end
|
106
|
+
|
107
|
+
def down
|
108
|
+
execute(<<~SQL)
|
109
|
+
CREATE OR REPLACE FUNCTION delayed_jobs_after_delete_row_tr_fn () RETURNS trigger AS $$
|
110
|
+
DECLARE
|
111
|
+
next_strand varchar;
|
112
|
+
running_count integer;
|
113
|
+
should_lock boolean;
|
114
|
+
should_be_precise boolean;
|
115
|
+
update_query varchar;
|
116
|
+
skip_locked varchar;
|
117
|
+
transition boolean;
|
118
|
+
BEGIN
|
119
|
+
IF OLD.strand IS NOT NULL THEN
|
120
|
+
should_lock := true;
|
121
|
+
should_be_precise := OLD.id % (OLD.max_concurrent * 4) = 0;
|
122
|
+
|
123
|
+
IF NOT should_be_precise AND OLD.max_concurrent > 16 THEN
|
124
|
+
running_count := (SELECT COUNT(*) FROM (
|
125
|
+
SELECT 1 as one FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
|
126
|
+
) subquery_for_count);
|
127
|
+
should_lock := running_count < OLD.max_concurrent;
|
128
|
+
END IF;
|
129
|
+
|
130
|
+
IF should_lock THEN
|
131
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(OLD.strand));
|
132
|
+
END IF;
|
133
|
+
|
134
|
+
-- note that we don't really care if the row we're deleting has a singleton, or if it even
|
135
|
+
-- matches the row(s) we're going to update. we just need to make sure that whatever
|
136
|
+
-- singleton we grab isn't already running (which is a simple existence check, since
|
137
|
+
-- the unique indexes ensure there is at most one singleton running, and one queued)
|
138
|
+
update_query := 'UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
|
139
|
+
SELECT id FROM delayed_jobs j2
|
140
|
+
WHERE next_in_strand=false AND
|
141
|
+
j2.strand=$1.strand AND
|
142
|
+
(j2.singleton IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.singleton=j2.singleton AND j3.id<>j2.id AND (j3.locked_by IS NULL OR j3.locked_by IS NOT NULL)))
|
143
|
+
ORDER BY j2.strand_order_override ASC, j2.id ASC
|
144
|
+
LIMIT ';
|
145
|
+
|
146
|
+
IF should_be_precise THEN
|
147
|
+
running_count := (SELECT COUNT(*) FROM (
|
148
|
+
SELECT 1 FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
|
149
|
+
) s);
|
150
|
+
IF running_count < OLD.max_concurrent THEN
|
151
|
+
update_query := update_query || '($1.max_concurrent - $2)';
|
152
|
+
ELSE
|
153
|
+
-- we have too many running already; just bail
|
154
|
+
RETURN OLD;
|
155
|
+
END IF;
|
156
|
+
ELSE
|
157
|
+
update_query := update_query || '1';
|
158
|
+
|
159
|
+
-- n-strands don't require precise ordering; we can make this query more performant
|
160
|
+
IF OLD.max_concurrent > 1 THEN
|
161
|
+
skip_locked := ' SKIP LOCKED';
|
162
|
+
END IF;
|
163
|
+
END IF;
|
164
|
+
|
165
|
+
update_query := update_query || ' FOR UPDATE' || COALESCE(skip_locked, '') || ')';
|
166
|
+
EXECUTE update_query USING OLD, running_count;
|
167
|
+
END IF;
|
168
|
+
|
169
|
+
IF OLD.singleton IS NOT NULL THEN
|
170
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(CONCAT('singleton:', OLD.singleton)));
|
171
|
+
|
172
|
+
transition := EXISTS (SELECT 1 FROM delayed_jobs AS j1 WHERE j1.singleton = OLD.singleton AND j1.strand IS DISTINCT FROM OLD.strand AND locked_by IS NULL);
|
173
|
+
|
174
|
+
IF transition THEN
|
175
|
+
next_strand := (SELECT j1.strand FROM delayed_jobs AS j1 WHERE j1.singleton = OLD.singleton AND j1.strand IS DISTINCT FROM OLD.strand AND locked_by IS NULL AND j1.strand IS NOT NULL LIMIT 1);
|
176
|
+
|
177
|
+
IF next_strand IS NOT NULL THEN
|
178
|
+
-- if the singleton has a new strand defined, we need to lock it to ensure we obey n_strand constraints --
|
179
|
+
IF NOT pg_try_advisory_xact_lock(half_md5_as_bigint(next_strand)) THEN
|
180
|
+
-- a failure to acquire the lock means that another process already has it and will thus handle this singleton --
|
181
|
+
RETURN OLD;
|
182
|
+
END IF;
|
183
|
+
END IF;
|
184
|
+
ELSIF OLD.strand IS NOT NULL THEN
|
185
|
+
-- if there is no transition and there is a strand then we have already handled this singleton in the case above --
|
186
|
+
RETURN OLD;
|
187
|
+
END IF;
|
188
|
+
|
189
|
+
-- handles transitioning a singleton from stranded to not stranded --
|
190
|
+
-- handles transitioning a singleton from unstranded to stranded --
|
191
|
+
-- handles transitioning a singleton from strand A to strand B --
|
192
|
+
-- these transitions are a relatively rare case, so we take a shortcut and --
|
193
|
+
-- only start the next singleton if its strand does not currently have any running jobs --
|
194
|
+
-- if it does, the next stranded job that finishes will start this singleton if it can --
|
195
|
+
UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
|
196
|
+
SELECT id FROM delayed_jobs j2
|
197
|
+
WHERE next_in_strand=false AND
|
198
|
+
j2.singleton=OLD.singleton AND
|
199
|
+
j2.locked_by IS NULL AND
|
200
|
+
(j2.strand IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.strand=j2.strand AND j3.id<>j2.id))
|
201
|
+
FOR UPDATE
|
202
|
+
);
|
203
|
+
END IF;
|
204
|
+
RETURN OLD;
|
205
|
+
END;
|
206
|
+
$$ LANGUAGE plpgsql;
|
207
|
+
SQL
|
208
|
+
end
|
209
|
+
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class RemoveOldSingletonIndex < ActiveRecord::Migration[5.2]
|
4
|
+
disable_ddl_transaction!
|
5
|
+
|
6
|
+
def up
|
7
|
+
remove_index :delayed_jobs, name: "index_delayed_jobs_on_singleton_not_running_old"
|
8
|
+
remove_index :delayed_jobs, name: "index_delayed_jobs_on_singleton_running_old"
|
9
|
+
end
|
10
|
+
|
11
|
+
def down
|
12
|
+
rename_index :delayed_jobs, "index_delayed_jobs_on_singleton_not_running", "index_delayed_jobs_on_singleton_not_running_old"
|
13
|
+
rename_index :delayed_jobs, "index_delayed_jobs_on_singleton_running", "index_delayed_jobs_on_singleton_running_old"
|
14
|
+
|
15
|
+
# only one job can be queued in a singleton
|
16
|
+
add_index :delayed_jobs,
|
17
|
+
:singleton,
|
18
|
+
where: "singleton IS NOT NULL AND locked_by IS NULL",
|
19
|
+
unique: true,
|
20
|
+
name: "index_delayed_jobs_on_singleton_not_running",
|
21
|
+
algorithm: :concurrently
|
22
|
+
|
23
|
+
# only one job can be running for a singleton
|
24
|
+
add_index :delayed_jobs,
|
25
|
+
:singleton,
|
26
|
+
where: "singleton IS NOT NULL AND locked_by IS NOT NULL",
|
27
|
+
unique: true,
|
28
|
+
name: "index_delayed_jobs_on_singleton_running",
|
29
|
+
algorithm: :concurrently
|
30
|
+
end
|
31
|
+
end
|
@@ -62,11 +62,21 @@ module Delayed
|
|
62
62
|
_write_attribute(column, current_time) unless attribute_present?(column)
|
63
63
|
end
|
64
64
|
|
65
|
-
attribute_names =
|
65
|
+
attribute_names = if Rails.version < "7.0"
|
66
|
+
attribute_names_for_partial_writes
|
67
|
+
else
|
68
|
+
attribute_names_for_partial_inserts
|
69
|
+
end
|
66
70
|
attribute_names = attributes_for_create(attribute_names)
|
67
71
|
values = attributes_with_values(attribute_names)
|
68
72
|
|
69
|
-
im =
|
73
|
+
im = if Rails.version < "7.0"
|
74
|
+
self.class.arel_table.compile_insert(self.class.send(:_substitute_values, values))
|
75
|
+
else
|
76
|
+
im = Arel::InsertManager.new(self.class.arel_table)
|
77
|
+
im.insert(values.transform_keys { |name| self.class.arel_table[name] })
|
78
|
+
im
|
79
|
+
end
|
70
80
|
|
71
81
|
lock_and_insert = values["strand"] && instance_of?(Job)
|
72
82
|
# can't use prepared statements if we're combining multiple statemenets
|
@@ -101,7 +111,8 @@ module Delayed
|
|
101
111
|
# but we don't need to lock when inserting into Delayed::Failed
|
102
112
|
if values["strand"] && instance_of?(Job)
|
103
113
|
fn_name = connection.quote_table_name("half_md5_as_bigint")
|
104
|
-
|
114
|
+
quoted_strand = connection.quote(Rails.version < "7.0" ? values["strand"] : values["strand"].value)
|
115
|
+
sql = "SELECT pg_advisory_xact_lock(#{fn_name}(#{quoted_strand})); #{sql}"
|
105
116
|
end
|
106
117
|
result = connection.execute(sql, "#{self.class} Create")
|
107
118
|
self.id = result.values.first&.first
|
@@ -471,7 +482,7 @@ module Delayed
|
|
471
482
|
transaction do
|
472
483
|
# for db performance reasons, we only need one process doing this at a time
|
473
484
|
# so if we can't get an advisory lock, just abort. we'll try again soon
|
474
|
-
|
485
|
+
next unless attempt_advisory_lock(prefetch_jobs_lock_name)
|
475
486
|
|
476
487
|
horizon = db_time_now - (Settings.parent_process[:prefetched_jobs_timeout] * 4)
|
477
488
|
where("locked_by LIKE 'prefetch:%' AND locked_at<?", horizon).update_all(locked_at: nil, locked_by: nil)
|
data/lib/delayed/backend/base.rb
CHANGED
data/lib/delayed/periodic.rb
CHANGED
@@ -36,7 +36,7 @@ module Delayed
|
|
36
36
|
Delayed::Job.transaction do
|
37
37
|
# for db performance reasons, we only need one process doing this at a time
|
38
38
|
# so if we can't get an advisory lock, just abort. we'll try again soon
|
39
|
-
|
39
|
+
next unless Delayed::Job.attempt_advisory_lock("Delayed::Periodic#audit_queue")
|
40
40
|
|
41
41
|
perform_audit!
|
42
42
|
end
|
data/lib/delayed/settings.rb
CHANGED
@@ -135,7 +135,7 @@ module Delayed
|
|
135
135
|
self.num_strands = ->(_strand_name) {}
|
136
136
|
self.default_job_options = -> { {} }
|
137
137
|
self.job_detailed_log_format = lambda { |job|
|
138
|
-
job.to_json(include_root: false, only: %w[tag strand priority attempts created_at max_attempts source])
|
138
|
+
job.to_json(include_root: false, only: %w[tag strand singleton priority attempts created_at max_attempts source])
|
139
139
|
}
|
140
140
|
|
141
141
|
# Send workers KILL after QUIT if they haven't exited within the
|
data/lib/delayed/version.rb
CHANGED
@@ -34,7 +34,7 @@ module Delayed
|
|
34
34
|
# no other worker is trying to do this right now (and if we abandon the
|
35
35
|
# operation, the transaction will end, releasing the advisory lock).
|
36
36
|
result = Delayed::Job.attempt_advisory_lock("Delayed::Worker::HealthCheck#reschedule_abandoned_jobs")
|
37
|
-
|
37
|
+
next unless result
|
38
38
|
|
39
39
|
horizon = 5.minutes.ago
|
40
40
|
|
@@ -28,13 +28,13 @@ describe "Delayed::Backed::ActiveRecord::Job" do
|
|
28
28
|
|
29
29
|
it "does not allow a second worker to get exclusive access if already successfully processed by worker1" do
|
30
30
|
@job.destroy
|
31
|
-
expect(@job_copy_for_worker2.send(:lock_exclusively!, "worker2")).to
|
31
|
+
expect(@job_copy_for_worker2.send(:lock_exclusively!, "worker2")).to be(false)
|
32
32
|
end
|
33
33
|
|
34
34
|
it "doesn't allow a second worker to get exclusive access if failed to be " \
|
35
35
|
"processed by worker1 and run_at time is now in future (due to backing off behaviour)" do
|
36
36
|
@job.update(attempts: 1, run_at: 1.day.from_now)
|
37
|
-
expect(@job_copy_for_worker2.send(:lock_exclusively!, "worker2")).to
|
37
|
+
expect(@job_copy_for_worker2.send(:lock_exclusively!, "worker2")).to be(false)
|
38
38
|
end
|
39
39
|
|
40
40
|
it "selects the next job at random if enabled" do
|
@@ -55,13 +55,13 @@ describe "Delayed::Backed::ActiveRecord::Job" do
|
|
55
55
|
|
56
56
|
it "unlocks a successfully locked job and persist the job's unlocked state" do
|
57
57
|
job = Delayed::Job.create payload_object: SimpleJob.new
|
58
|
-
expect(job.send(:lock_exclusively!, "worker1")).to
|
58
|
+
expect(job.send(:lock_exclusively!, "worker1")).to be(true)
|
59
59
|
job.reload
|
60
60
|
job.unlock
|
61
61
|
job.save!
|
62
62
|
job.reload
|
63
|
-
expect(job.locked_by).to
|
64
|
-
expect(job.locked_at).to
|
63
|
+
expect(job.locked_by).to be_nil
|
64
|
+
expect(job.locked_at).to be_nil
|
65
65
|
end
|
66
66
|
|
67
67
|
describe "bulk_update failed jobs" do
|
@@ -85,7 +85,7 @@ describe "Delayed::Backed::ActiveRecord::Job" do
|
|
85
85
|
before do
|
86
86
|
2.times do
|
87
87
|
j = Delayed::Job.create(payload_object: SimpleJob.new)
|
88
|
-
expect(j.send(:lock_exclusively!, "worker1")).to
|
88
|
+
expect(j.send(:lock_exclusively!, "worker1")).to be(true)
|
89
89
|
j.fail!
|
90
90
|
end
|
91
91
|
end
|
@@ -170,13 +170,13 @@ describe "Delayed::Backed::ActiveRecord::Job" do
|
|
170
170
|
it "sets one job as next_in_strand at a time with max_concurrent of 1" do
|
171
171
|
job1 = Delayed::Job.enqueue(SimpleJob.new, n_strand: ["njobs"])
|
172
172
|
job1.reload
|
173
|
-
expect(job1.next_in_strand).to
|
173
|
+
expect(job1.next_in_strand).to be(true)
|
174
174
|
job2 = Delayed::Job.enqueue(SimpleJob.new, n_strand: ["njobs"])
|
175
175
|
job2.reload
|
176
|
-
expect(job2.next_in_strand).to
|
176
|
+
expect(job2.next_in_strand).to be(false)
|
177
177
|
run_job(job1)
|
178
178
|
job2.reload
|
179
|
-
expect(job2.next_in_strand).to
|
179
|
+
expect(job2.next_in_strand).to be(true)
|
180
180
|
end
|
181
181
|
|
182
182
|
it "sets multiple jobs as next_in_strand at a time based on max_concurrent" do
|
@@ -187,16 +187,16 @@ describe "Delayed::Backed::ActiveRecord::Job" do
|
|
187
187
|
}) do
|
188
188
|
job1 = Delayed::Job.enqueue(SimpleJob.new, n_strand: ["njobs"])
|
189
189
|
job1.reload
|
190
|
-
expect(job1.next_in_strand).to
|
190
|
+
expect(job1.next_in_strand).to be(true)
|
191
191
|
job2 = Delayed::Job.enqueue(SimpleJob.new, n_strand: ["njobs"])
|
192
192
|
job2.reload
|
193
|
-
expect(job2.next_in_strand).to
|
193
|
+
expect(job2.next_in_strand).to be(true)
|
194
194
|
job3 = Delayed::Job.enqueue(SimpleJob.new, n_strand: ["njobs"])
|
195
195
|
job3.reload
|
196
|
-
expect(job3.next_in_strand).to
|
196
|
+
expect(job3.next_in_strand).to be(false)
|
197
197
|
run_job(job1)
|
198
198
|
job3.reload
|
199
|
-
expect(job3.next_in_strand).to
|
199
|
+
expect(job3.next_in_strand).to be(true)
|
200
200
|
end
|
201
201
|
end
|
202
202
|
end
|
@@ -255,7 +255,7 @@ RSpec.describe Delayed::WorkQueue::ParentProcess::Server do
|
|
255
255
|
subject.run_once
|
256
256
|
|
257
257
|
expect(Marshal.load(client)).to eq(job)
|
258
|
-
expect(called).to
|
258
|
+
expect(called).to be(true)
|
259
259
|
end
|
260
260
|
|
261
261
|
it "deletes the correct worker when transferring jobs" do
|
data/spec/delayed/worker_spec.rb
CHANGED
@@ -81,7 +81,7 @@ describe Delayed::Worker do
|
|
81
81
|
short_log_format = subject.log_job(job, :short)
|
82
82
|
expect(short_log_format).to eq("RSpec::Mocks::Double")
|
83
83
|
long_format = subject.log_job(job, :long)
|
84
|
-
expect(long_format).to eq("RSpec::Mocks::Double {\"priority\":25,\"attempts\":0,\"created_at\":null,\"tag\":\"RSpec::Mocks::Double#perform\",\"max_attempts\":null,\"strand\":\"test_jobs\",\"source\":null}") # rubocop:disable Layout/LineLength
|
84
|
+
expect(long_format).to eq("RSpec::Mocks::Double {\"priority\":25,\"attempts\":0,\"created_at\":null,\"tag\":\"RSpec::Mocks::Double#perform\",\"max_attempts\":null,\"strand\":\"test_jobs\",\"source\":null,\"singleton\":null}") # rubocop:disable Layout/LineLength
|
85
85
|
end
|
86
86
|
|
87
87
|
it "logging format can be changed with settings" do
|
@@ -13,10 +13,10 @@ shared_examples_for "Delayed::Batch" do
|
|
13
13
|
batch_jobs = Delayed::Job.find_available(5)
|
14
14
|
regular_jobs = Delayed::Job.list_jobs(:future, 5)
|
15
15
|
expect(regular_jobs.size).to eq(1)
|
16
|
-
expect(regular_jobs.first.batch?).to
|
16
|
+
expect(regular_jobs.first.batch?).to be(false)
|
17
17
|
expect(batch_jobs.size).to eq(1)
|
18
18
|
batch_job = batch_jobs.first
|
19
|
-
expect(batch_job.batch?).to
|
19
|
+
expect(batch_job.batch?).to be(true)
|
20
20
|
expect(batch_job.payload_object.mode).to eq(:serial)
|
21
21
|
expect(batch_job.payload_object.jobs.map do |j|
|
22
22
|
[j.payload_object.object, j.payload_object.method, j.payload_object.args]
|
@@ -50,7 +50,7 @@ shared_examples_for "Delayed::Batch" do
|
|
50
50
|
expect(Delayed::Job.jobs_count(:current)).to eq(1)
|
51
51
|
|
52
52
|
batch_job = Delayed::Job.find_available(1).first
|
53
|
-
expect(batch_job.batch?).to
|
53
|
+
expect(batch_job.batch?).to be(true)
|
54
54
|
jobs = batch_job.payload_object.jobs
|
55
55
|
expect(jobs.size).to eq(2)
|
56
56
|
expect(jobs[0]).to be_new_record
|
@@ -140,7 +140,7 @@ shared_examples_for "random ruby objects" do
|
|
140
140
|
obj.test_method(7, synchronous: true)
|
141
141
|
expect(obj.ran).to eq([7])
|
142
142
|
obj.ran = nil
|
143
|
-
expect(obj.ran).to
|
143
|
+
expect(obj.ran).to be_nil
|
144
144
|
obj.test_method(8, 9, synchronous: true)
|
145
145
|
expect(obj.ran).to eq([8, 9])
|
146
146
|
end
|
@@ -233,7 +233,7 @@ shared_examples_for "a backend" do
|
|
233
233
|
describe "#transfer_lock" do
|
234
234
|
it "works" do
|
235
235
|
job = create_job(locked_by: "worker", locked_at: Delayed::Job.db_time_now)
|
236
|
-
expect(job.transfer_lock!(from: "worker", to: "worker2")).to
|
236
|
+
expect(job.transfer_lock!(from: "worker", to: "worker2")).to be true
|
237
237
|
expect(Delayed::Job.find(job.id).locked_by).to eq "worker2"
|
238
238
|
end
|
239
239
|
end
|
@@ -243,13 +243,13 @@ shared_examples_for "a backend" do
|
|
243
243
|
job1 = create_job(strand: "myjobs")
|
244
244
|
job2 = create_job(strand: "myjobs")
|
245
245
|
expect(Delayed::Job.get_and_lock_next_available("w1")).to eq(job1)
|
246
|
-
expect(Delayed::Job.get_and_lock_next_available("w2")).to
|
246
|
+
expect(Delayed::Job.get_and_lock_next_available("w2")).to be_nil
|
247
247
|
job1.destroy
|
248
248
|
# update time since the failed lock pushed it forward
|
249
249
|
job2.run_at = 1.minute.ago
|
250
250
|
job2.save!
|
251
251
|
expect(Delayed::Job.get_and_lock_next_available("w3")).to eq(job2)
|
252
|
-
expect(Delayed::Job.get_and_lock_next_available("w4")).to
|
252
|
+
expect(Delayed::Job.get_and_lock_next_available("w4")).to be_nil
|
253
253
|
end
|
254
254
|
|
255
255
|
it "fails to lock if an earlier job gets locked" do
|
@@ -301,7 +301,7 @@ shared_examples_for "a backend" do
|
|
301
301
|
locked = [Delayed::Job.get_and_lock_next_available("w1"),
|
302
302
|
Delayed::Job.get_and_lock_next_available("w2")]
|
303
303
|
expect(jobs).to eq locked
|
304
|
-
expect(Delayed::Job.get_and_lock_next_available("w3")).to
|
304
|
+
expect(Delayed::Job.get_and_lock_next_available("w3")).to be_nil
|
305
305
|
end
|
306
306
|
|
307
307
|
it "does not interfere with jobs in other strands" do
|
@@ -309,7 +309,7 @@ shared_examples_for "a backend" do
|
|
309
309
|
locked = [Delayed::Job.get_and_lock_next_available("w1"),
|
310
310
|
Delayed::Job.get_and_lock_next_available("w2")]
|
311
311
|
expect(jobs).to eq locked
|
312
|
-
expect(Delayed::Job.get_and_lock_next_available("w3")).to
|
312
|
+
expect(Delayed::Job.get_and_lock_next_available("w3")).to be_nil
|
313
313
|
end
|
314
314
|
|
315
315
|
it "does not find next jobs when given no priority" do
|
@@ -317,7 +317,7 @@ shared_examples_for "a backend" do
|
|
317
317
|
first = Delayed::Job.get_and_lock_next_available("w1", Delayed::Settings.queue, nil, nil)
|
318
318
|
second = Delayed::Job.get_and_lock_next_available("w2", Delayed::Settings.queue, nil, nil)
|
319
319
|
expect(first).to eq jobs.first
|
320
|
-
expect(second).to
|
320
|
+
expect(second).to be_nil
|
321
321
|
end
|
322
322
|
|
323
323
|
it "complains if you pass more than one strand-based option" do
|
@@ -538,11 +538,11 @@ shared_examples_for "a backend" do
|
|
538
538
|
Delayed::Job.get_and_lock_next_available("w1")
|
539
539
|
@job2 = create_job(singleton: "myjobs")
|
540
540
|
|
541
|
-
expect(@job1.reload.next_in_strand).to
|
542
|
-
expect(@job2.reload.next_in_strand).to
|
541
|
+
expect(@job1.reload.next_in_strand).to be true
|
542
|
+
expect(@job2.reload.next_in_strand).to be false
|
543
543
|
|
544
544
|
@job1.destroy
|
545
|
-
expect(@job2.reload.next_in_strand).to
|
545
|
+
expect(@job2.reload.next_in_strand).to be true
|
546
546
|
end
|
547
547
|
|
548
548
|
it "handles transitions correctly when going from not stranded to stranded" do
|
@@ -552,13 +552,13 @@ shared_examples_for "a backend" do
|
|
552
552
|
Delayed::Job.get_and_lock_next_available("w1")
|
553
553
|
@job3 = create_job(singleton: "myjobs", strand: "myjobs2")
|
554
554
|
|
555
|
-
expect(@job1.reload.next_in_strand).to
|
556
|
-
expect(@job2.reload.next_in_strand).to
|
557
|
-
expect(@job3.reload.next_in_strand).to
|
555
|
+
expect(@job1.reload.next_in_strand).to be true
|
556
|
+
expect(@job2.reload.next_in_strand).to be true
|
557
|
+
expect(@job3.reload.next_in_strand).to be false
|
558
558
|
|
559
559
|
@job2.destroy
|
560
|
-
expect(@job1.reload.next_in_strand).to
|
561
|
-
expect(@job3.reload.next_in_strand).to
|
560
|
+
expect(@job1.reload.next_in_strand).to be true
|
561
|
+
expect(@job3.reload.next_in_strand).to be true
|
562
562
|
end
|
563
563
|
|
564
564
|
it "does not violate n_strand=1 constraints when going from not stranded to stranded" do
|
@@ -568,13 +568,13 @@ shared_examples_for "a backend" do
|
|
568
568
|
Delayed::Job.get_and_lock_next_available("w1")
|
569
569
|
@job3 = create_job(singleton: "myjobs", strand: "myjobs")
|
570
570
|
|
571
|
-
expect(@job1.reload.next_in_strand).to
|
572
|
-
expect(@job2.reload.next_in_strand).to
|
573
|
-
expect(@job3.reload.next_in_strand).to
|
571
|
+
expect(@job1.reload.next_in_strand).to be true
|
572
|
+
expect(@job2.reload.next_in_strand).to be true
|
573
|
+
expect(@job3.reload.next_in_strand).to be false
|
574
574
|
|
575
575
|
@job2.destroy
|
576
|
-
expect(@job1.reload.next_in_strand).to
|
577
|
-
expect(@job3.reload.next_in_strand).to
|
576
|
+
expect(@job1.reload.next_in_strand).to be true
|
577
|
+
expect(@job3.reload.next_in_strand).to be false
|
578
578
|
end
|
579
579
|
|
580
580
|
it "handles transitions correctly when going from stranded to another strand" do
|
@@ -582,11 +582,11 @@ shared_examples_for "a backend" do
|
|
582
582
|
Delayed::Job.get_and_lock_next_available("w1")
|
583
583
|
@job2 = create_job(singleton: "myjobs", strand: "myjobs2")
|
584
584
|
|
585
|
-
expect(@job1.reload.next_in_strand).to
|
586
|
-
expect(@job2.reload.next_in_strand).to
|
585
|
+
expect(@job1.reload.next_in_strand).to be true
|
586
|
+
expect(@job2.reload.next_in_strand).to be false
|
587
587
|
|
588
588
|
@job1.destroy
|
589
|
-
expect(@job2.reload.next_in_strand).to
|
589
|
+
expect(@job2.reload.next_in_strand).to be true
|
590
590
|
end
|
591
591
|
|
592
592
|
it "does not violate n_strand=1 constraints when going from stranded to another strand" do
|
@@ -596,24 +596,24 @@ shared_examples_for "a backend" do
|
|
596
596
|
Delayed::Job.get_and_lock_next_available("w1")
|
597
597
|
@job3 = create_job(singleton: "myjobs", strand: "myjobs2")
|
598
598
|
|
599
|
-
expect(@job1.reload.next_in_strand).to
|
600
|
-
expect(@job2.reload.next_in_strand).to
|
601
|
-
expect(@job3.reload.next_in_strand).to
|
599
|
+
expect(@job1.reload.next_in_strand).to be true
|
600
|
+
expect(@job2.reload.next_in_strand).to be true
|
601
|
+
expect(@job3.reload.next_in_strand).to be false
|
602
602
|
|
603
603
|
@job2.destroy
|
604
|
-
expect(@job1.reload.next_in_strand).to
|
605
|
-
expect(@job3.reload.next_in_strand).to
|
604
|
+
expect(@job1.reload.next_in_strand).to be true
|
605
|
+
expect(@job3.reload.next_in_strand).to be false
|
606
606
|
end
|
607
607
|
|
608
608
|
it "creates first as true, and second as false, then transitions to second when deleted" do
|
609
609
|
@job1 = create_job(singleton: "myjobs")
|
610
610
|
Delayed::Job.get_and_lock_next_available("w1")
|
611
611
|
@job2 = create_job(singleton: "myjobs")
|
612
|
-
expect(@job1.reload.next_in_strand).to
|
613
|
-
expect(@job2.reload.next_in_strand).to
|
612
|
+
expect(@job1.reload.next_in_strand).to be true
|
613
|
+
expect(@job2.reload.next_in_strand).to be false
|
614
614
|
|
615
615
|
@job1.destroy
|
616
|
-
expect(@job2.reload.next_in_strand).to
|
616
|
+
expect(@job2.reload.next_in_strand).to be true
|
617
617
|
end
|
618
618
|
|
619
619
|
it "when combined with a strand" do
|
@@ -834,9 +834,9 @@ shared_examples_for "a backend" do
|
|
834
834
|
|
835
835
|
it "sets in_delayed_job?" do
|
836
836
|
job = InDelayedJobTest.delay(ignore_transaction: true).check_in_job
|
837
|
-
expect(Delayed::Job.in_delayed_job?).to
|
837
|
+
expect(Delayed::Job.in_delayed_job?).to be(false)
|
838
838
|
job.invoke_job
|
839
|
-
expect(Delayed::Job.in_delayed_job?).to
|
839
|
+
expect(Delayed::Job.in_delayed_job?).to be(false)
|
840
840
|
end
|
841
841
|
|
842
842
|
it "fails on job creation if an unsaved AR object is used" do
|
data/spec/shared/worker.rb
CHANGED
@@ -244,7 +244,7 @@ shared_examples_for "Delayed::Worker" do
|
|
244
244
|
end
|
245
245
|
|
246
246
|
it "is failed if it failed more than Settings.max_attempts times" do
|
247
|
-
expect(@job.failed_at).to
|
247
|
+
expect(@job.failed_at).to be_nil
|
248
248
|
Delayed::Settings.max_attempts.times { @job.reschedule }
|
249
249
|
expect(Delayed::Job.list_jobs(:failed, 100).size).to eq(1)
|
250
250
|
end
|
@@ -252,7 +252,7 @@ shared_examples_for "Delayed::Worker" do
|
|
252
252
|
it "is not failed if it failed fewer than Settings.max_attempts times" do
|
253
253
|
(Delayed::Settings.max_attempts - 1).times { @job.reschedule }
|
254
254
|
@job = Delayed::Job.find(@job.id)
|
255
|
-
expect(@job.failed_at).to
|
255
|
+
expect(@job.failed_at).to be_nil
|
256
256
|
end
|
257
257
|
|
258
258
|
it "is failed if it has expired" do
|
@@ -396,7 +396,7 @@ shared_examples_for "Delayed::Worker" do
|
|
396
396
|
expect(@worker).to receive(:exit?).and_return(true)
|
397
397
|
Delayed::Worker.lifecycle.before(:execute) { |w| w == @worker && fired = true }
|
398
398
|
@worker.start
|
399
|
-
expect(fired).to
|
399
|
+
expect(fired).to be(true)
|
400
400
|
end
|
401
401
|
end
|
402
402
|
|
data/spec/spec_helper.rb
CHANGED
@@ -55,14 +55,16 @@ connection_config = {
|
|
55
55
|
encoding: "utf8",
|
56
56
|
username: ENV["TEST_DB_USERNAME"],
|
57
57
|
database: ENV["TEST_DB_DATABASE"],
|
58
|
-
min_messages: "notice"
|
58
|
+
min_messages: "notice",
|
59
|
+
# Ensure the pool is big enough the deadlock tests don't get starved for connections by rails instead
|
60
|
+
pool: 20
|
59
61
|
}
|
60
62
|
|
61
63
|
def migrate(file)
|
62
64
|
ActiveRecord::MigrationContext.new(file, ActiveRecord::SchemaMigration).migrate
|
63
65
|
end
|
64
66
|
|
65
|
-
# create the test db if it does not exist
|
67
|
+
# create the test db if it does not exist
|
66
68
|
ActiveRecord::Base.establish_connection(connection_config.merge(database: "postgres"))
|
67
69
|
begin
|
68
70
|
ActiveRecord::Base.connection.create_database(connection_config[:database])
|
metadata
CHANGED
@@ -1,16 +1,16 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: inst-jobs
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 3.0.
|
4
|
+
version: 3.0.10
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Cody Cutrer
|
8
8
|
- Ethan Vizitei
|
9
9
|
- Jacob Burroughs
|
10
|
-
autorequire:
|
10
|
+
autorequire:
|
11
11
|
bindir: exe
|
12
12
|
cert_chain: []
|
13
|
-
date: 2022-
|
13
|
+
date: 2022-03-21 00:00:00.000000000 Z
|
14
14
|
dependencies:
|
15
15
|
- !ruby/object:Gem::Dependency
|
16
16
|
name: activerecord
|
@@ -32,14 +32,14 @@ dependencies:
|
|
32
32
|
requirements:
|
33
33
|
- - "~>"
|
34
34
|
- !ruby/object:Gem::Version
|
35
|
-
version:
|
35
|
+
version: 0.4.4
|
36
36
|
type: :runtime
|
37
37
|
prerelease: false
|
38
38
|
version_requirements: !ruby/object:Gem::Requirement
|
39
39
|
requirements:
|
40
40
|
- - "~>"
|
41
41
|
- !ruby/object:Gem::Version
|
42
|
-
version:
|
42
|
+
version: 0.4.4
|
43
43
|
- !ruby/object:Gem::Dependency
|
44
44
|
name: activesupport
|
45
45
|
requirement: !ruby/object:Gem::Requirement
|
@@ -410,21 +410,7 @@ dependencies:
|
|
410
410
|
- - ">="
|
411
411
|
- !ruby/object:Gem::Version
|
412
412
|
version: '0'
|
413
|
-
|
414
|
-
name: wwtd
|
415
|
-
requirement: !ruby/object:Gem::Requirement
|
416
|
-
requirements:
|
417
|
-
- - "~>"
|
418
|
-
- !ruby/object:Gem::Version
|
419
|
-
version: 1.4.0
|
420
|
-
type: :development
|
421
|
-
prerelease: false
|
422
|
-
version_requirements: !ruby/object:Gem::Requirement
|
423
|
-
requirements:
|
424
|
-
- - "~>"
|
425
|
-
- !ruby/object:Gem::Version
|
426
|
-
version: 1.4.0
|
427
|
-
description:
|
413
|
+
description:
|
428
414
|
email:
|
429
415
|
- cody@instructure.com
|
430
416
|
- evizitei@instructure.com
|
@@ -469,6 +455,10 @@ files:
|
|
469
455
|
- db/migrate/20211207094200_update_after_delete_trigger_for_singleton_transition_cases.rb
|
470
456
|
- db/migrate/20211220112800_fix_singleton_race_condition_insert.rb
|
471
457
|
- db/migrate/20211220113000_fix_singleton_race_condition_delete.rb
|
458
|
+
- db/migrate/20220127091200_fix_singleton_unique_constraint.rb
|
459
|
+
- db/migrate/20220128084800_update_insert_trigger_for_singleton_unique_constraint_change.rb
|
460
|
+
- db/migrate/20220128084900_update_delete_trigger_for_singleton_unique_constraint_change.rb
|
461
|
+
- db/migrate/20220203063200_remove_old_singleton_index.rb
|
472
462
|
- exe/inst_jobs
|
473
463
|
- lib/delayed/backend/active_record.rb
|
474
464
|
- lib/delayed/backend/base.rb
|
@@ -534,8 +524,9 @@ files:
|
|
534
524
|
- spec/spec_helper.rb
|
535
525
|
homepage: https://github.com/instructure/inst-jobs
|
536
526
|
licenses: []
|
537
|
-
metadata:
|
538
|
-
|
527
|
+
metadata:
|
528
|
+
rubygems_mfa_required: 'true'
|
529
|
+
post_install_message:
|
539
530
|
rdoc_options: []
|
540
531
|
require_paths:
|
541
532
|
- lib
|
@@ -551,31 +542,31 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
551
542
|
version: '0'
|
552
543
|
requirements: []
|
553
544
|
rubygems_version: 3.1.4
|
554
|
-
signing_key:
|
545
|
+
signing_key:
|
555
546
|
specification_version: 4
|
556
547
|
summary: Instructure-maintained fork of delayed_job
|
557
548
|
test_files:
|
558
|
-
- spec/
|
549
|
+
- spec/sample_jobs.rb
|
550
|
+
- spec/spec_helper.rb
|
551
|
+
- spec/shared_jobs_specs.rb
|
552
|
+
- spec/shared/performable_method.rb
|
553
|
+
- spec/shared/testing.rb
|
554
|
+
- spec/shared/delayed_batch.rb
|
555
|
+
- spec/shared/worker.rb
|
556
|
+
- spec/shared/delayed_method.rb
|
557
|
+
- spec/shared/shared_backend.rb
|
558
|
+
- spec/migrate/20140924140513_add_story_table.rb
|
559
|
+
- spec/delayed/server_spec.rb
|
559
560
|
- spec/delayed/cli_spec.rb
|
560
561
|
- spec/delayed/daemon_spec.rb
|
561
|
-
- spec/delayed/
|
562
|
+
- spec/delayed/worker_spec.rb
|
562
563
|
- spec/delayed/periodic_spec.rb
|
563
|
-
- spec/delayed/
|
564
|
+
- spec/delayed/message_sending_spec.rb
|
564
565
|
- spec/delayed/settings_spec.rb
|
565
566
|
- spec/delayed/work_queue/in_process_spec.rb
|
567
|
+
- spec/delayed/work_queue/parent_process_spec.rb
|
566
568
|
- spec/delayed/work_queue/parent_process/client_spec.rb
|
567
569
|
- spec/delayed/work_queue/parent_process/server_spec.rb
|
568
|
-
- spec/delayed/work_queue/parent_process_spec.rb
|
569
|
-
- spec/delayed/worker/consul_health_check_spec.rb
|
570
570
|
- spec/delayed/worker/health_check_spec.rb
|
571
|
-
- spec/delayed/
|
572
|
-
- spec/
|
573
|
-
- spec/sample_jobs.rb
|
574
|
-
- spec/shared/delayed_batch.rb
|
575
|
-
- spec/shared/delayed_method.rb
|
576
|
-
- spec/shared/performable_method.rb
|
577
|
-
- spec/shared/shared_backend.rb
|
578
|
-
- spec/shared/testing.rb
|
579
|
-
- spec/shared/worker.rb
|
580
|
-
- spec/shared_jobs_specs.rb
|
581
|
-
- spec/spec_helper.rb
|
571
|
+
- spec/delayed/worker/consul_health_check_spec.rb
|
572
|
+
- spec/active_record_job_spec.rb
|