switchman-inst-jobs 3.2.6 → 4.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/db/migrate/20101216224513_create_delayed_jobs.rb +1 -1
- data/db/migrate/20110208031356_add_delayed_jobs_tag.rb +1 -1
- data/db/migrate/20110426161613_add_delayed_jobs_max_attempts.rb +1 -1
- data/db/migrate/20110516225834_add_delayed_jobs_strand.rb +1 -1
- data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +1 -1
- data/db/migrate/20110610213249_optimize_delayed_jobs.rb +1 -1
- data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +1 -1
- data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +1 -1
- data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -1
- data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +1 -1
- data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +2 -2
- data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
- data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
- data/db/migrate/20140505215131_add_failed_jobs_original_job_id.rb +1 -1
- data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -2
- data/db/migrate/20140505223637_drop_failed_jobs_original_id.rb +1 -1
- data/db/migrate/20140512213941_add_source_to_jobs.rb +1 -1
- data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +1 -1
- data/db/migrate/20151123210429_add_expires_at_to_jobs.rb +1 -1
- data/db/migrate/20151210162949_improve_max_concurrent.rb +1 -1
- data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +1 -1
- data/db/migrate/20170308045400_add_shard_id_to_delayed_jobs.rb +1 -11
- data/db/migrate/20170308045401_add_delayed_jobs_shard_id_to_switchman_shards.rb +5 -0
- data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +1 -1
- data/db/migrate/20190726154743_make_critical_columns_not_null.rb +1 -1
- data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +2 -2
- data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +1 -1
- data/db/migrate/20200825011002_add_strand_order_override.rb +2 -2
- data/db/migrate/20211220112800_fix_singleton_race_condition_insert.rb +59 -0
- data/db/migrate/20211220113000_fix_singleton_race_condition_delete.rb +207 -0
- data/lib/switchman_inst_jobs/active_record/connection_adapters/connection_pool.rb +15 -0
- data/lib/switchman_inst_jobs/delayed/backend/base.rb +9 -8
- data/lib/switchman_inst_jobs/delayed/pool.rb +1 -1
- data/lib/switchman_inst_jobs/delayed/worker/health_check.rb +10 -12
- data/lib/switchman_inst_jobs/delayed/worker.rb +2 -2
- data/lib/switchman_inst_jobs/engine.rb +6 -4
- data/lib/switchman_inst_jobs/jobs_migrator.rb +25 -21
- data/lib/switchman_inst_jobs/switchman/shard.rb +8 -21
- data/lib/switchman_inst_jobs/version.rb +1 -1
- data/lib/switchman_inst_jobs.rb +4 -0
- metadata +60 -22
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f07c44f5a58d897c8dbcc46a40906cde8c6a99b39723286a405669cf159019e4
|
4
|
+
data.tar.gz: cf730bfc3d3ad6d2c7da7712c3c097322421820b60f2d6af025ef610f0f3aa09
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 9189978ce61d257fb25bbf57c23a36998437b3c4aa6adb93798af07ea878caf244a1514c5711c5190b39836169607e869f65b89ed71cc08d44fabfd9d6fe0e8e
|
7
|
+
data.tar.gz: d4a76e521ee4ba38bb2172a1ffabcc4b139a204d24690a2662ca7e8587d367ba88d98db5912c716421c96940f309296f6b35e6b292025326d61e32e03217d001
|
@@ -1,8 +1,8 @@
|
|
1
1
|
class IndexJobsOnLockedBy < ActiveRecord::Migration[4.2]
|
2
|
-
disable_ddl_transaction!
|
2
|
+
disable_ddl_transaction!
|
3
3
|
|
4
4
|
def connection
|
5
|
-
Delayed::Backend::ActiveRecord::
|
5
|
+
Delayed::Backend::ActiveRecord::AbstractJob.connection
|
6
6
|
end
|
7
7
|
|
8
8
|
def up
|
@@ -1,8 +1,8 @@
|
|
1
1
|
class AddJobsRunAtIndex < ActiveRecord::Migration[4.2]
|
2
|
-
disable_ddl_transaction!
|
2
|
+
disable_ddl_transaction!
|
3
3
|
|
4
4
|
def connection
|
5
|
-
Delayed::Backend::ActiveRecord::
|
5
|
+
Delayed::Backend::ActiveRecord::AbstractJob.connection
|
6
6
|
end
|
7
7
|
|
8
8
|
def up
|
@@ -1,11 +1,11 @@
|
|
1
1
|
class CopyFailedJobsOriginalId < ActiveRecord::Migration[4.2]
|
2
2
|
def connection
|
3
|
-
Delayed::Backend::ActiveRecord::
|
3
|
+
Delayed::Backend::ActiveRecord::AbstractJob.connection
|
4
4
|
end
|
5
5
|
|
6
6
|
def up
|
7
7
|
# this is a smaller, less frequently accessed table, so we just update all at once
|
8
|
-
Delayed::Backend::ActiveRecord::Job::Failed.where(
|
8
|
+
Delayed::Backend::ActiveRecord::Job::Failed.where(original_job_id: nil).update_all('original_job_id = original_id')
|
9
9
|
end
|
10
10
|
|
11
11
|
def down; end
|
@@ -2,7 +2,7 @@ class AddShardIdToDelayedJobs < ActiveRecord::Migration[4.2]
|
|
2
2
|
disable_ddl_transaction!
|
3
3
|
|
4
4
|
def connection
|
5
|
-
Delayed::Backend::ActiveRecord::
|
5
|
+
Delayed::Backend::ActiveRecord::AbstractJob.connection
|
6
6
|
end
|
7
7
|
|
8
8
|
def up
|
@@ -11,19 +11,9 @@ class AddShardIdToDelayedJobs < ActiveRecord::Migration[4.2]
|
|
11
11
|
|
12
12
|
add_column :failed_jobs, :shard_id, :integer, limit: 8
|
13
13
|
add_index :failed_jobs, :shard_id, algorithm: :concurrently
|
14
|
-
|
15
|
-
add_column :switchman_shards, :delayed_jobs_shard_id, :integer, limit: 8
|
16
|
-
add_foreign_key(
|
17
|
-
:switchman_shards,
|
18
|
-
:switchman_shards,
|
19
|
-
column: :delayed_jobs_shard_id
|
20
|
-
)
|
21
14
|
end
|
22
15
|
|
23
16
|
def down
|
24
|
-
remove_foreign_key :switchman_shards, column: :delayed_jobs_shard_id
|
25
|
-
remove_column :switchman_shards, :delayed_jobs_shard_id
|
26
|
-
|
27
17
|
remove_index :failed_jobs, :shard_id
|
28
18
|
remove_column :failed_jobs, :shard_id
|
29
19
|
|
@@ -1,8 +1,8 @@
|
|
1
1
|
class AddIdToGetDelayedJobsIndex < ActiveRecord::Migration[4.2]
|
2
|
-
disable_ddl_transaction!
|
2
|
+
disable_ddl_transaction!
|
3
3
|
|
4
4
|
def connection
|
5
|
-
Delayed::
|
5
|
+
Delayed::Backend::ActiveRecord::AbstractJob.connection
|
6
6
|
end
|
7
7
|
|
8
8
|
def up
|
@@ -1,8 +1,8 @@
|
|
1
1
|
class AddStrandOrderOverride < ActiveRecord::Migration[4.2]
|
2
|
-
disable_ddl_transaction!
|
2
|
+
disable_ddl_transaction!
|
3
3
|
|
4
4
|
def connection
|
5
|
-
Delayed::
|
5
|
+
Delayed::Backend::ActiveRecord::AbstractJob.connection
|
6
6
|
end
|
7
7
|
|
8
8
|
def up
|
@@ -0,0 +1,59 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class FixSingletonRaceConditionInsert < ActiveRecord::Migration[5.2]
|
4
|
+
def change
|
5
|
+
reversible do |direction|
|
6
|
+
direction.up do
|
7
|
+
execute(<<~SQL)
|
8
|
+
CREATE OR REPLACE FUNCTION #{connection.quote_table_name('delayed_jobs_before_insert_row_tr_fn')} () RETURNS trigger AS $$
|
9
|
+
BEGIN
|
10
|
+
IF NEW.strand IS NOT NULL THEN
|
11
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(NEW.strand));
|
12
|
+
IF (SELECT COUNT(*) FROM (
|
13
|
+
SELECT 1 FROM delayed_jobs WHERE strand = NEW.strand AND next_in_strand=true LIMIT NEW.max_concurrent
|
14
|
+
) s) = NEW.max_concurrent THEN
|
15
|
+
NEW.next_in_strand := false;
|
16
|
+
END IF;
|
17
|
+
END IF;
|
18
|
+
IF NEW.singleton IS NOT NULL THEN
|
19
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(CONCAT('singleton:', NEW.singleton)));
|
20
|
+
-- this condition seems silly, but it forces postgres to use the two partial indexes on singleton,
|
21
|
+
-- rather than doing a seq scan
|
22
|
+
PERFORM 1 FROM delayed_jobs WHERE singleton = NEW.singleton AND (locked_by IS NULL OR locked_by IS NOT NULL);
|
23
|
+
IF FOUND THEN
|
24
|
+
NEW.next_in_strand := false;
|
25
|
+
END IF;
|
26
|
+
END IF;
|
27
|
+
RETURN NEW;
|
28
|
+
END;
|
29
|
+
$$ LANGUAGE plpgsql SET search_path TO #{::Switchman::Shard.current.name};
|
30
|
+
SQL
|
31
|
+
end
|
32
|
+
direction.down do
|
33
|
+
execute(<<~SQL)
|
34
|
+
CREATE OR REPLACE FUNCTION #{connection.quote_table_name('delayed_jobs_before_insert_row_tr_fn')} () RETURNS trigger AS $$
|
35
|
+
BEGIN
|
36
|
+
IF NEW.strand IS NOT NULL THEN
|
37
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(NEW.strand));
|
38
|
+
IF (SELECT COUNT(*) FROM (
|
39
|
+
SELECT 1 FROM delayed_jobs WHERE strand = NEW.strand AND next_in_strand=true LIMIT NEW.max_concurrent
|
40
|
+
) s) = NEW.max_concurrent THEN
|
41
|
+
NEW.next_in_strand := false;
|
42
|
+
END IF;
|
43
|
+
END IF;
|
44
|
+
IF NEW.singleton IS NOT NULL THEN
|
45
|
+
-- this condition seems silly, but it forces postgres to use the two partial indexes on singleton,
|
46
|
+
-- rather than doing a seq scan
|
47
|
+
PERFORM 1 FROM delayed_jobs WHERE singleton = NEW.singleton AND (locked_by IS NULL OR locked_by IS NOT NULL);
|
48
|
+
IF FOUND THEN
|
49
|
+
NEW.next_in_strand := false;
|
50
|
+
END IF;
|
51
|
+
END IF;
|
52
|
+
RETURN NEW;
|
53
|
+
END;
|
54
|
+
$$ LANGUAGE plpgsql SET search_path TO #{::Switchman::Shard.current.name};
|
55
|
+
SQL
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
@@ -0,0 +1,207 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class FixSingletonRaceConditionDelete < ActiveRecord::Migration[5.2]
|
4
|
+
def up
|
5
|
+
execute(<<~SQL)
|
6
|
+
CREATE OR REPLACE FUNCTION #{connection.quote_table_name('delayed_jobs_after_delete_row_tr_fn')} () RETURNS trigger AS $$
|
7
|
+
DECLARE
|
8
|
+
next_strand varchar;
|
9
|
+
running_count integer;
|
10
|
+
should_lock boolean;
|
11
|
+
should_be_precise boolean;
|
12
|
+
update_query varchar;
|
13
|
+
skip_locked varchar;
|
14
|
+
transition boolean;
|
15
|
+
BEGIN
|
16
|
+
IF OLD.strand IS NOT NULL THEN
|
17
|
+
should_lock := true;
|
18
|
+
should_be_precise := OLD.id % (OLD.max_concurrent * 4) = 0;
|
19
|
+
|
20
|
+
IF NOT should_be_precise AND OLD.max_concurrent > 16 THEN
|
21
|
+
running_count := (SELECT COUNT(*) FROM (
|
22
|
+
SELECT 1 as one FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
|
23
|
+
) subquery_for_count);
|
24
|
+
should_lock := running_count < OLD.max_concurrent;
|
25
|
+
END IF;
|
26
|
+
|
27
|
+
IF should_lock THEN
|
28
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(OLD.strand));
|
29
|
+
END IF;
|
30
|
+
|
31
|
+
-- note that we don't really care if the row we're deleting has a singleton, or if it even
|
32
|
+
-- matches the row(s) we're going to update. we just need to make sure that whatever
|
33
|
+
-- singleton we grab isn't already running (which is a simple existence check, since
|
34
|
+
-- the unique indexes ensure there is at most one singleton running, and one queued)
|
35
|
+
update_query := 'UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
|
36
|
+
SELECT id FROM delayed_jobs j2
|
37
|
+
WHERE next_in_strand=false AND
|
38
|
+
j2.strand=$1.strand AND
|
39
|
+
(j2.singleton IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.singleton=j2.singleton AND j3.id<>j2.id AND (j3.locked_by IS NULL OR j3.locked_by IS NOT NULL)))
|
40
|
+
ORDER BY j2.strand_order_override ASC, j2.id ASC
|
41
|
+
LIMIT ';
|
42
|
+
|
43
|
+
IF should_be_precise THEN
|
44
|
+
running_count := (SELECT COUNT(*) FROM (
|
45
|
+
SELECT 1 FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
|
46
|
+
) s);
|
47
|
+
IF running_count < OLD.max_concurrent THEN
|
48
|
+
update_query := update_query || '($1.max_concurrent - $2)';
|
49
|
+
ELSE
|
50
|
+
-- we have too many running already; just bail
|
51
|
+
RETURN OLD;
|
52
|
+
END IF;
|
53
|
+
ELSE
|
54
|
+
update_query := update_query || '1';
|
55
|
+
|
56
|
+
-- n-strands don't require precise ordering; we can make this query more performant
|
57
|
+
IF OLD.max_concurrent > 1 THEN
|
58
|
+
skip_locked := ' SKIP LOCKED';
|
59
|
+
END IF;
|
60
|
+
END IF;
|
61
|
+
|
62
|
+
update_query := update_query || ' FOR UPDATE' || COALESCE(skip_locked, '') || ')';
|
63
|
+
EXECUTE update_query USING OLD, running_count;
|
64
|
+
END IF;
|
65
|
+
|
66
|
+
IF OLD.singleton IS NOT NULL THEN
|
67
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(CONCAT('singleton:', OLD.singleton)));
|
68
|
+
|
69
|
+
transition := EXISTS (SELECT 1 FROM delayed_jobs AS j1 WHERE j1.singleton = OLD.singleton AND j1.strand IS DISTINCT FROM OLD.strand AND locked_by IS NULL);
|
70
|
+
|
71
|
+
IF transition THEN
|
72
|
+
next_strand := (SELECT j1.strand FROM delayed_jobs AS j1 WHERE j1.singleton = OLD.singleton AND j1.strand IS DISTINCT FROM OLD.strand AND locked_by IS NULL AND j1.strand IS NOT NULL LIMIT 1);
|
73
|
+
|
74
|
+
IF next_strand IS NOT NULL THEN
|
75
|
+
-- if the singleton has a new strand defined, we need to lock it to ensure we obey n_strand constraints --
|
76
|
+
IF NOT pg_try_advisory_xact_lock(half_md5_as_bigint(next_strand)) THEN
|
77
|
+
-- a failure to acquire the lock means that another process already has it and will thus handle this singleton --
|
78
|
+
RETURN OLD;
|
79
|
+
END IF;
|
80
|
+
END IF;
|
81
|
+
ELSIF OLD.strand IS NOT NULL THEN
|
82
|
+
-- if there is no transition and there is a strand then we have already handled this singleton in the case above --
|
83
|
+
RETURN OLD;
|
84
|
+
END IF;
|
85
|
+
|
86
|
+
-- handles transitioning a singleton from stranded to not stranded --
|
87
|
+
-- handles transitioning a singleton from unstranded to stranded --
|
88
|
+
-- handles transitioning a singleton from strand A to strand B --
|
89
|
+
-- these transitions are a relatively rare case, so we take a shortcut and --
|
90
|
+
-- only start the next singleton if its strand does not currently have any running jobs --
|
91
|
+
-- if it does, the next stranded job that finishes will start this singleton if it can --
|
92
|
+
UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
|
93
|
+
SELECT id FROM delayed_jobs j2
|
94
|
+
WHERE next_in_strand=false AND
|
95
|
+
j2.singleton=OLD.singleton AND
|
96
|
+
j2.locked_by IS NULL AND
|
97
|
+
(j2.strand IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.strand=j2.strand AND j3.id<>j2.id))
|
98
|
+
FOR UPDATE
|
99
|
+
);
|
100
|
+
END IF;
|
101
|
+
RETURN OLD;
|
102
|
+
END;
|
103
|
+
$$ LANGUAGE plpgsql SET search_path TO #{::Switchman::Shard.current.name};
|
104
|
+
SQL
|
105
|
+
end
|
106
|
+
|
107
|
+
def down
|
108
|
+
execute(<<~SQL)
|
109
|
+
CREATE OR REPLACE FUNCTION #{connection.quote_table_name('delayed_jobs_after_delete_row_tr_fn')} () RETURNS trigger AS $$
|
110
|
+
DECLARE
|
111
|
+
next_strand varchar;
|
112
|
+
running_count integer;
|
113
|
+
should_lock boolean;
|
114
|
+
should_be_precise boolean;
|
115
|
+
update_query varchar;
|
116
|
+
skip_locked varchar;
|
117
|
+
transition boolean;
|
118
|
+
BEGIN
|
119
|
+
IF OLD.strand IS NOT NULL THEN
|
120
|
+
should_lock := true;
|
121
|
+
should_be_precise := OLD.id % (OLD.max_concurrent * 4) = 0;
|
122
|
+
|
123
|
+
IF NOT should_be_precise AND OLD.max_concurrent > 16 THEN
|
124
|
+
running_count := (SELECT COUNT(*) FROM (
|
125
|
+
SELECT 1 as one FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
|
126
|
+
) subquery_for_count);
|
127
|
+
should_lock := running_count < OLD.max_concurrent;
|
128
|
+
END IF;
|
129
|
+
|
130
|
+
IF should_lock THEN
|
131
|
+
PERFORM pg_advisory_xact_lock(half_md5_as_bigint(OLD.strand));
|
132
|
+
END IF;
|
133
|
+
|
134
|
+
-- note that we don't really care if the row we're deleting has a singleton, or if it even
|
135
|
+
-- matches the row(s) we're going to update. we just need to make sure that whatever
|
136
|
+
-- singleton we grab isn't already running (which is a simple existence check, since
|
137
|
+
-- the unique indexes ensure there is at most one singleton running, and one queued)
|
138
|
+
update_query := 'UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
|
139
|
+
SELECT id FROM delayed_jobs j2
|
140
|
+
WHERE next_in_strand=false AND
|
141
|
+
j2.strand=$1.strand AND
|
142
|
+
(j2.singleton IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.singleton=j2.singleton AND j3.id<>j2.id AND (j3.locked_by IS NULL OR j3.locked_by IS NOT NULL)))
|
143
|
+
ORDER BY j2.strand_order_override ASC, j2.id ASC
|
144
|
+
LIMIT ';
|
145
|
+
|
146
|
+
IF should_be_precise THEN
|
147
|
+
running_count := (SELECT COUNT(*) FROM (
|
148
|
+
SELECT 1 FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
|
149
|
+
) s);
|
150
|
+
IF running_count < OLD.max_concurrent THEN
|
151
|
+
update_query := update_query || '($1.max_concurrent - $2)';
|
152
|
+
ELSE
|
153
|
+
-- we have too many running already; just bail
|
154
|
+
RETURN OLD;
|
155
|
+
END IF;
|
156
|
+
ELSE
|
157
|
+
update_query := update_query || '1';
|
158
|
+
|
159
|
+
-- n-strands don't require precise ordering; we can make this query more performant
|
160
|
+
IF OLD.max_concurrent > 1 THEN
|
161
|
+
skip_locked := ' SKIP LOCKED';
|
162
|
+
END IF;
|
163
|
+
END IF;
|
164
|
+
|
165
|
+
update_query := update_query || ' FOR UPDATE' || COALESCE(skip_locked, '') || ')';
|
166
|
+
EXECUTE update_query USING OLD, running_count;
|
167
|
+
END IF;
|
168
|
+
|
169
|
+
IF OLD.singleton IS NOT NULL THEN
|
170
|
+
transition := EXISTS (SELECT 1 FROM delayed_jobs AS j1 WHERE j1.singleton = OLD.singleton AND j1.strand IS DISTINCT FROM OLD.strand AND locked_by IS NULL);
|
171
|
+
|
172
|
+
IF transition THEN
|
173
|
+
next_strand := (SELECT j1.strand FROM delayed_jobs AS j1 WHERE j1.singleton = OLD.singleton AND j1.strand IS DISTINCT FROM OLD.strand AND locked_by IS NULL AND j1.strand IS NOT NULL LIMIT 1);
|
174
|
+
|
175
|
+
IF next_strand IS NOT NULL THEN
|
176
|
+
-- if the singleton has a new strand defined, we need to lock it to ensure we obey n_strand constraints --
|
177
|
+
IF NOT pg_try_advisory_xact_lock(half_md5_as_bigint(next_strand)) THEN
|
178
|
+
-- a failure to acquire the lock means that another process already has it and will thus handle this singleton --
|
179
|
+
RETURN OLD;
|
180
|
+
END IF;
|
181
|
+
END IF;
|
182
|
+
ELSIF OLD.strand IS NOT NULL THEN
|
183
|
+
-- if there is no transition and there is a strand then we have already handled this singleton in the case above --
|
184
|
+
RETURN OLD;
|
185
|
+
END IF;
|
186
|
+
|
187
|
+
-- handles transitioning a singleton from stranded to not stranded --
|
188
|
+
-- handles transitioning a singleton from unstranded to stranded --
|
189
|
+
-- handles transitioning a singleton from strand A to strand B --
|
190
|
+
-- these transitions are a relatively rare case, so we take a shortcut and --
|
191
|
+
-- only start the next singleton if its strand does not currently have any running jobs --
|
192
|
+
-- if it does, the next stranded job that finishes will start this singleton if it can --
|
193
|
+
UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
|
194
|
+
SELECT id FROM delayed_jobs j2
|
195
|
+
WHERE next_in_strand=false AND
|
196
|
+
j2.singleton=OLD.singleton AND
|
197
|
+
j2.locked_by IS NULL AND
|
198
|
+
(j2.strand IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.strand=j2.strand AND j3.id<>j2.id))
|
199
|
+
FOR UPDATE
|
200
|
+
);
|
201
|
+
END IF;
|
202
|
+
RETURN OLD;
|
203
|
+
END;
|
204
|
+
$$ LANGUAGE plpgsql SET search_path TO #{::Switchman::Shard.current.name};
|
205
|
+
SQL
|
206
|
+
end
|
207
|
+
end
|
@@ -0,0 +1,15 @@
|
|
1
|
+
module SwitchmanInstJobs
|
2
|
+
module ActiveRecord
|
3
|
+
module ConnectionAdapters
|
4
|
+
module ConnectionPool
|
5
|
+
def shard
|
6
|
+
if connection_klass == ::Delayed::Backend::ActiveRecord::AbstractJob
|
7
|
+
return shard_stack.last || ::Switchman::Shard.current(::ActiveRecord::Base).delayed_jobs_shard
|
8
|
+
end
|
9
|
+
|
10
|
+
super
|
11
|
+
end
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
@@ -24,19 +24,18 @@ module SwitchmanInstJobs
|
|
24
24
|
# In general this will only happen in unusual circumstances like tests
|
25
25
|
# also if migrations are running, always use the current shard's job shard
|
26
26
|
if ::ActiveRecord::Migration.open_migrations.zero? &&
|
27
|
-
current_shard.delayed_jobs_shard !=
|
27
|
+
current_shard.delayed_jobs_shard !=
|
28
|
+
::Switchman::Shard.current(::Delayed::Backend::ActiveRecord::AbstractJob)
|
28
29
|
enqueue_job.call
|
29
30
|
else
|
30
|
-
::Switchman::Shard.
|
31
|
-
current_shard = ::Switchman::Shard.lookup(current_shard.id)
|
32
|
-
end
|
31
|
+
current_shard = ::Switchman::Shard.lookup(current_shard.id)
|
33
32
|
current_job_shard = current_shard.delayed_jobs_shard
|
34
33
|
|
35
34
|
if (options[:singleton] || options[:strand]) && current_shard.block_stranded
|
36
35
|
enqueue_options[:next_in_strand] = false
|
37
36
|
end
|
38
37
|
|
39
|
-
current_job_shard.activate(
|
38
|
+
current_job_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
|
40
39
|
enqueue_job.call
|
41
40
|
end
|
42
41
|
end
|
@@ -50,7 +49,7 @@ module SwitchmanInstJobs
|
|
50
49
|
shard_ids = configured_shard_ids
|
51
50
|
if shard_ids.any?
|
52
51
|
shards = shard_ids.map { |shard_id| ::Delayed::Worker.shard(shard_id) }
|
53
|
-
::Switchman::Shard.with_each_shard(shards, [
|
52
|
+
::Switchman::Shard.with_each_shard(shards, [::Delayed::Backend::ActiveRecord::AbstractJob]) do
|
54
53
|
super
|
55
54
|
end
|
56
55
|
else
|
@@ -61,7 +60,9 @@ module SwitchmanInstJobs
|
|
61
60
|
|
62
61
|
def self.prepended(base)
|
63
62
|
base.singleton_class.prepend(ClassMethods)
|
64
|
-
|
63
|
+
return unless base.name == 'Delayed::Backend::ActiveRecord::Job'
|
64
|
+
|
65
|
+
::Delayed::Backend::ActiveRecord::AbstractJob.sharded_model
|
65
66
|
end
|
66
67
|
|
67
68
|
def current_shard
|
@@ -90,7 +91,7 @@ module SwitchmanInstJobs
|
|
90
91
|
raise ShardNotFoundError, shard_id unless current_shard
|
91
92
|
|
92
93
|
current_shard.activate { super }
|
93
|
-
rescue
|
94
|
+
rescue PG::ConnectionBad, PG::UndefinedTable
|
94
95
|
# likely a missing shard with a stale cache
|
95
96
|
current_shard.send(:clear_cache)
|
96
97
|
::Switchman::Shard.clear_cache
|
@@ -20,7 +20,7 @@ module SwitchmanInstJobs
|
|
20
20
|
shard_ids = @config[:workers].pluck(:shard).uniq
|
21
21
|
shards = shard_ids.map { |shard_id| ::Delayed::Worker.shard(shard_id) }
|
22
22
|
end
|
23
|
-
::Switchman::Shard.with_each_shard(shards, [
|
23
|
+
::Switchman::Shard.with_each_shard(shards, [::Delayed::Backend::ActiveRecord::AbstractJob]) do
|
24
24
|
super
|
25
25
|
end
|
26
26
|
end
|
@@ -21,18 +21,16 @@ module SwitchmanInstJobs
|
|
21
21
|
def reschedule_abandoned_jobs
|
22
22
|
shard_ids = ::SwitchmanInstJobs::Delayed::Settings.configured_shard_ids
|
23
23
|
shards = shard_ids.map { |shard_id| ::Delayed::Worker.shard(shard_id) }
|
24
|
-
::Switchman::Shard.with_each_shard(shards,
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
super()
|
35
|
-
end
|
24
|
+
::Switchman::Shard.with_each_shard(shards,
|
25
|
+
[::ActiveRecord::Base, ::Delayed::Backend::ActiveRecord::AbstractJob]) do
|
26
|
+
munge_service_name(::Switchman::Shard.current) do
|
27
|
+
# because this rescheduling process is running on every host, we need
|
28
|
+
# to make sure that it's functioning for each shard the current
|
29
|
+
# host is programmed to interact with, but ONLY for those shards.
|
30
|
+
# reading the config lets us iterate over any shards this host should
|
31
|
+
# work with and lets us pick the correct service name to identify which
|
32
|
+
# hosts are currently alive and valid via the health checks
|
33
|
+
super()
|
36
34
|
end
|
37
35
|
end
|
38
36
|
end
|
@@ -16,14 +16,14 @@ module SwitchmanInstJobs
|
|
16
16
|
end
|
17
17
|
|
18
18
|
def start
|
19
|
-
shard.activate(
|
19
|
+
shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) { super }
|
20
20
|
end
|
21
21
|
|
22
22
|
# Worker#run is usually only called from Worker#start, but if the worker
|
23
23
|
# is called directly from the console, we want to make sure it still gets
|
24
24
|
# the right shard activated.
|
25
25
|
def run
|
26
|
-
shard.activate(
|
26
|
+
shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) { super }
|
27
27
|
end
|
28
28
|
|
29
29
|
def shard
|
@@ -11,7 +11,9 @@ module SwitchmanInstJobs
|
|
11
11
|
|
12
12
|
::Delayed::Worker.lifecycle.around(:work_queue_pop) do |worker, config, &block|
|
13
13
|
if config[:shard]
|
14
|
-
::Switchman::Shard.lookup(config[:shard]).activate(
|
14
|
+
::Switchman::Shard.lookup(config[:shard]).activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
|
15
|
+
block.call(worker, config)
|
16
|
+
end
|
15
17
|
else
|
16
18
|
block.call(worker, config)
|
17
19
|
end
|
@@ -23,8 +25,8 @@ module SwitchmanInstJobs
|
|
23
25
|
::Switchman::Shard.clear_cache
|
24
26
|
::Switchman::Shard.default.activate do
|
25
27
|
current_job_shard = ::Switchman::Shard.lookup(job.shard_id).delayed_jobs_shard
|
26
|
-
if current_job_shard != ::Switchman::Shard.current(
|
27
|
-
current_job_shard.activate(
|
28
|
+
if current_job_shard != ::Switchman::Shard.current(::Delayed::Backend::ActiveRecord::AbstractJob)
|
29
|
+
current_job_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
|
28
30
|
j = ::Delayed::Job.where(strand: job.strand).next_in_strand_order.first
|
29
31
|
j.update_column(:next_in_strand, true) if j && !j.next_in_strand
|
30
32
|
end
|
@@ -43,7 +45,7 @@ module SwitchmanInstJobs
|
|
43
45
|
end
|
44
46
|
|
45
47
|
config.after_initialize do
|
46
|
-
::Switchman::Shard.default.delayed_jobs_shard.activate!(
|
48
|
+
::Switchman::Shard.default.delayed_jobs_shard.activate!(::Delayed::Backend::ActiveRecord::AbstractJob)
|
47
49
|
end
|
48
50
|
end
|
49
51
|
end
|
@@ -13,10 +13,10 @@ module SwitchmanInstJobs
|
|
13
13
|
return yield if shards.empty?
|
14
14
|
|
15
15
|
shard = shards.pop
|
16
|
-
current_shard = ::Switchman::Shard.current(
|
17
|
-
shard.activate(
|
16
|
+
current_shard = ::Switchman::Shard.current(::Delayed::Backend::ActiveRecord::AbstractJob)
|
17
|
+
shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
|
18
18
|
::Delayed::Job.transaction do
|
19
|
-
current_shard.activate(
|
19
|
+
current_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
|
20
20
|
transaction_on(shards, &block)
|
21
21
|
end
|
22
22
|
end
|
@@ -45,11 +45,11 @@ module SwitchmanInstJobs
|
|
45
45
|
# rubocop:disable Style/CombinableLoops
|
46
46
|
# We first migrate strands so that we can stop blocking strands before we migrate unstranded jobs
|
47
47
|
source_shards.each do |s|
|
48
|
-
::Switchman::Shard.lookup(s).activate(
|
48
|
+
::Switchman::Shard.lookup(s).activate(::Delayed::Backend::ActiveRecord::AbstractJob) { migrate_strands }
|
49
49
|
end
|
50
50
|
|
51
51
|
source_shards.each do |s|
|
52
|
-
::Switchman::Shard.lookup(s).activate(
|
52
|
+
::Switchman::Shard.lookup(s).activate(::Delayed::Backend::ActiveRecord::AbstractJob) { migrate_everything }
|
53
53
|
end
|
54
54
|
ensure_unblock_stranded_for(shard_map.map(&:first))
|
55
55
|
# rubocop:enable Style/CombinableLoops
|
@@ -72,7 +72,7 @@ module SwitchmanInstJobs
|
|
72
72
|
|
73
73
|
def clear_shard_cache(debug_message = nil)
|
74
74
|
::Switchman.cache.clear
|
75
|
-
Rails.logger.debug
|
75
|
+
Rails.logger.debug { "Waiting for caches to clear #{debug_message}" }
|
76
76
|
# Wait a little over the 60 second in-process shard cache clearing
|
77
77
|
# threshold to ensure that all new stranded jobs are now being
|
78
78
|
# enqueued with next_in_strand: false
|
@@ -99,7 +99,7 @@ module SwitchmanInstJobs
|
|
99
99
|
# 4) no running job, jobs moved: set next_in_strand=true on the first of
|
100
100
|
# those (= do nothing since it should already be true)
|
101
101
|
|
102
|
-
source_shard = ::Switchman::Shard.current(
|
102
|
+
source_shard = ::Switchman::Shard.current(::Delayed::Backend::ActiveRecord::AbstractJob)
|
103
103
|
strand_scope = ::Delayed::Job.shard(source_shard).where.not(strand: nil)
|
104
104
|
shard_map = build_shard_map(strand_scope, source_shard)
|
105
105
|
shard_map.each do |(target_shard, source_shard_ids)|
|
@@ -108,7 +108,7 @@ module SwitchmanInstJobs
|
|
108
108
|
# 1) is taken care of because it should not show up here in strands
|
109
109
|
strands = shard_scope.distinct.order(:strand).pluck(:strand)
|
110
110
|
|
111
|
-
target_shard.activate(
|
111
|
+
target_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
|
112
112
|
strands.each do |strand|
|
113
113
|
transaction_on([source_shard, target_shard]) do
|
114
114
|
this_strand_scope = shard_scope.where(strand: strand)
|
@@ -167,7 +167,7 @@ module SwitchmanInstJobs
|
|
167
167
|
end
|
168
168
|
|
169
169
|
def unblock_strands(target_shard)
|
170
|
-
target_shard.activate(
|
170
|
+
target_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
|
171
171
|
loop do
|
172
172
|
# We only want to unlock stranded jobs where they don't belong to a blocked shard (if they *do* belong)
|
173
173
|
# to a blocked shard, they must be part of a concurrent jobs migration from a different source shard to
|
@@ -186,8 +186,8 @@ module SwitchmanInstJobs
|
|
186
186
|
end
|
187
187
|
|
188
188
|
def migrate_everything
|
189
|
-
source_shard = ::Switchman::Shard.current(
|
190
|
-
scope = ::Delayed::Job.shard(source_shard).where(
|
189
|
+
source_shard = ::Switchman::Shard.current(::Delayed::Backend::ActiveRecord::AbstractJob)
|
190
|
+
scope = ::Delayed::Job.shard(source_shard).where(strand: nil)
|
191
191
|
|
192
192
|
shard_map = build_shard_map(scope, source_shard)
|
193
193
|
shard_map.each do |(target_shard, source_shard_ids)|
|
@@ -220,16 +220,20 @@ module SwitchmanInstJobs
|
|
220
220
|
# Adapted from get_and_lock_next_available in delayed/backend/active_record.rb
|
221
221
|
target_jobs = scope.limit(1000).lock('FOR UPDATE SKIP LOCKED')
|
222
222
|
|
223
|
-
query = source_shard.activate(
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
223
|
+
query = source_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
|
224
|
+
<<~SQL
|
225
|
+
WITH limited_jobs AS (#{target_jobs.to_sql})
|
226
|
+
UPDATE #{::Delayed::Job.quoted_table_name}
|
227
|
+
SET locked_by = #{::Delayed::Job.connection.quote(::Delayed::Backend::Base::ON_HOLD_LOCKED_BY)},
|
228
|
+
locked_at = #{::Delayed::Job.connection.quote(::Delayed::Job.db_time_now)}
|
229
|
+
FROM limited_jobs WHERE limited_jobs.id=#{::Delayed::Job.quoted_table_name}.id
|
230
|
+
RETURNING #{::Delayed::Job.quoted_table_name}.*
|
231
|
+
SQL
|
230
232
|
end
|
231
233
|
|
232
|
-
jobs = source_shard.activate(
|
234
|
+
jobs = source_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
|
235
|
+
::Delayed::Job.find_by_sql(query)
|
236
|
+
end
|
233
237
|
new_jobs = jobs.map do |job|
|
234
238
|
new_job = job.dup
|
235
239
|
new_job.shard = target_shard
|
@@ -247,10 +251,10 @@ module SwitchmanInstJobs
|
|
247
251
|
new_job
|
248
252
|
end
|
249
253
|
transaction_on([source_shard, target_shard]) do
|
250
|
-
target_shard.activate(
|
254
|
+
target_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
|
251
255
|
bulk_insert_jobs(new_jobs)
|
252
256
|
end
|
253
|
-
source_shard.activate(
|
257
|
+
source_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
|
254
258
|
::Delayed::Job.delete(jobs)
|
255
259
|
end
|
256
260
|
end
|
@@ -22,12 +22,12 @@ module SwitchmanInstJobs
|
|
22
22
|
def hold_jobs!(wait: false)
|
23
23
|
self.jobs_held = true
|
24
24
|
save! if changed?
|
25
|
-
delayed_jobs_shard.activate(
|
25
|
+
delayed_jobs_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
|
26
26
|
lock_jobs_for_hold
|
27
27
|
end
|
28
28
|
return unless wait
|
29
29
|
|
30
|
-
delayed_jobs_shard.activate(
|
30
|
+
delayed_jobs_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
|
31
31
|
while ::Delayed::Job.where(shard_id: id).
|
32
32
|
where.not(locked_at: nil).
|
33
33
|
where.not(locked_by: ::Delayed::Backend::Base::ON_HOLD_LOCKED_BY).exists?
|
@@ -47,7 +47,7 @@ module SwitchmanInstJobs
|
|
47
47
|
Rails.logger.debug('Waiting for caches to clear')
|
48
48
|
sleep(65)
|
49
49
|
end
|
50
|
-
delayed_jobs_shard.activate(
|
50
|
+
delayed_jobs_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
|
51
51
|
::Delayed::Job.where(locked_by: ::Delayed::Backend::Base::ON_HOLD_LOCKED_BY, shard_id: id).
|
52
52
|
in_batches(of: 10_000).
|
53
53
|
update_all(
|
@@ -75,22 +75,14 @@ module SwitchmanInstJobs
|
|
75
75
|
remove_instance_variable(:@delayed_jobs_shards) if instance_variable_defined?(:@delayed_jobs_shards)
|
76
76
|
end
|
77
77
|
|
78
|
-
def current(category = :primary)
|
79
|
-
if category == :delayed_jobs
|
80
|
-
active_shards[category] || super(:primary).delayed_jobs_shard
|
81
|
-
else
|
82
|
-
super
|
83
|
-
end
|
84
|
-
end
|
85
|
-
|
86
78
|
def activate!(categories)
|
87
79
|
if !@skip_delayed_job_auto_activation &&
|
88
|
-
!categories[
|
89
|
-
categories[
|
90
|
-
categories[
|
80
|
+
!categories[::Delayed::Backend::ActiveRecord::AbstractJob] &&
|
81
|
+
categories[::ActiveRecord::Base] &&
|
82
|
+
categories[::ActiveRecord::Base] != ::Switchman::Shard.current(::ActiveRecord::Base)
|
91
83
|
skip_delayed_job_auto_activation do
|
92
|
-
categories[
|
93
|
-
categories[
|
84
|
+
categories[::Delayed::Backend::ActiveRecord::AbstractJob] =
|
85
|
+
categories[::ActiveRecord::Base].delayed_jobs_shard
|
94
86
|
end
|
95
87
|
end
|
96
88
|
super
|
@@ -104,11 +96,6 @@ module SwitchmanInstJobs
|
|
104
96
|
@skip_delayed_job_auto_activation = was
|
105
97
|
end
|
106
98
|
|
107
|
-
def create
|
108
|
-
db = ::Switchman::DatabaseServer.server_for_new_shard
|
109
|
-
db.create_new_shard
|
110
|
-
end
|
111
|
-
|
112
99
|
def periodic_clear_shard_cache
|
113
100
|
# TODO: make this configurable
|
114
101
|
@timed_cache ||= TimedCache.new(-> { 60.to_i.seconds.ago }) do
|
data/lib/switchman_inst_jobs.rb
CHANGED
@@ -5,6 +5,9 @@ module SwitchmanInstJobs
|
|
5
5
|
cattr_accessor :delayed_jobs_shard_fallback
|
6
6
|
|
7
7
|
def self.initialize_active_record
|
8
|
+
::ActiveRecord::ConnectionAdapters::ConnectionPool.prepend(
|
9
|
+
ActiveRecord::ConnectionAdapters::ConnectionPool
|
10
|
+
)
|
8
11
|
::ActiveRecord::ConnectionAdapters::PostgreSQLAdapter.prepend(
|
9
12
|
ActiveRecord::ConnectionAdapters::PostgreSQLAdapter
|
10
13
|
)
|
@@ -32,6 +35,7 @@ module SwitchmanInstJobs
|
|
32
35
|
end
|
33
36
|
end
|
34
37
|
|
38
|
+
require 'switchman_inst_jobs/active_record/connection_adapters/connection_pool'
|
35
39
|
require 'switchman_inst_jobs/active_record/connection_adapters/postgresql_adapter'
|
36
40
|
require 'switchman_inst_jobs/active_record/migration'
|
37
41
|
require 'switchman_inst_jobs/delayed/settings'
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: switchman-inst-jobs
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version:
|
4
|
+
version: 4.0.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Bryan Petty
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2021-12-
|
11
|
+
date: 2021-12-20 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: inst-jobs
|
@@ -50,34 +50,40 @@ dependencies:
|
|
50
50
|
requirements:
|
51
51
|
- - ">="
|
52
52
|
- !ruby/object:Gem::Version
|
53
|
-
version: '
|
53
|
+
version: '6.1'
|
54
54
|
- - "<"
|
55
55
|
- !ruby/object:Gem::Version
|
56
|
-
version: '6.
|
56
|
+
version: '6.2'
|
57
57
|
type: :runtime
|
58
58
|
prerelease: false
|
59
59
|
version_requirements: !ruby/object:Gem::Requirement
|
60
60
|
requirements:
|
61
61
|
- - ">="
|
62
62
|
- !ruby/object:Gem::Version
|
63
|
-
version: '
|
63
|
+
version: '6.1'
|
64
64
|
- - "<"
|
65
65
|
- !ruby/object:Gem::Version
|
66
|
-
version: '6.
|
66
|
+
version: '6.2'
|
67
67
|
- !ruby/object:Gem::Dependency
|
68
68
|
name: switchman
|
69
69
|
requirement: !ruby/object:Gem::Requirement
|
70
70
|
requirements:
|
71
71
|
- - "~>"
|
72
72
|
- !ruby/object:Gem::Version
|
73
|
-
version: '
|
73
|
+
version: '3.0'
|
74
|
+
- - ">="
|
75
|
+
- !ruby/object:Gem::Version
|
76
|
+
version: 3.0.1
|
74
77
|
type: :runtime
|
75
78
|
prerelease: false
|
76
79
|
version_requirements: !ruby/object:Gem::Requirement
|
77
80
|
requirements:
|
78
81
|
- - "~>"
|
79
82
|
- !ruby/object:Gem::Version
|
80
|
-
version: '
|
83
|
+
version: '3.0'
|
84
|
+
- - ">="
|
85
|
+
- !ruby/object:Gem::Version
|
86
|
+
version: 3.0.1
|
81
87
|
- !ruby/object:Gem::Dependency
|
82
88
|
name: bundler
|
83
89
|
requirement: !ruby/object:Gem::Requirement
|
@@ -110,16 +116,16 @@ dependencies:
|
|
110
116
|
name: diplomat
|
111
117
|
requirement: !ruby/object:Gem::Requirement
|
112
118
|
requirements:
|
113
|
-
- - "
|
119
|
+
- - "~>"
|
114
120
|
- !ruby/object:Gem::Version
|
115
|
-
version:
|
121
|
+
version: 2.5.1
|
116
122
|
type: :development
|
117
123
|
prerelease: false
|
118
124
|
version_requirements: !ruby/object:Gem::Requirement
|
119
125
|
requirements:
|
120
|
-
- - "
|
126
|
+
- - "~>"
|
121
127
|
- !ruby/object:Gem::Version
|
122
|
-
version:
|
128
|
+
version: 2.5.1
|
123
129
|
- !ruby/object:Gem::Dependency
|
124
130
|
name: newrelic_rpm
|
125
131
|
requirement: !ruby/object:Gem::Requirement
|
@@ -182,70 +188,98 @@ dependencies:
|
|
182
188
|
requirements:
|
183
189
|
- - "~>"
|
184
190
|
- !ruby/object:Gem::Version
|
185
|
-
version: '3.
|
191
|
+
version: '3.10'
|
186
192
|
type: :development
|
187
193
|
prerelease: false
|
188
194
|
version_requirements: !ruby/object:Gem::Requirement
|
189
195
|
requirements:
|
190
196
|
- - "~>"
|
191
197
|
- !ruby/object:Gem::Version
|
192
|
-
version: '3.
|
198
|
+
version: '3.10'
|
193
199
|
- !ruby/object:Gem::Dependency
|
194
200
|
name: rspec-rails
|
195
201
|
requirement: !ruby/object:Gem::Requirement
|
196
202
|
requirements:
|
197
203
|
- - "~>"
|
198
204
|
- !ruby/object:Gem::Version
|
199
|
-
version: '
|
205
|
+
version: '5.0'
|
200
206
|
type: :development
|
201
207
|
prerelease: false
|
202
208
|
version_requirements: !ruby/object:Gem::Requirement
|
203
209
|
requirements:
|
204
210
|
- - "~>"
|
205
211
|
- !ruby/object:Gem::Version
|
206
|
-
version: '
|
212
|
+
version: '5.0'
|
207
213
|
- !ruby/object:Gem::Dependency
|
208
214
|
name: rubocop
|
209
215
|
requirement: !ruby/object:Gem::Requirement
|
210
216
|
requirements:
|
211
217
|
- - "~>"
|
212
218
|
- !ruby/object:Gem::Version
|
213
|
-
version: 1.
|
219
|
+
version: '1.15'
|
214
220
|
type: :development
|
215
221
|
prerelease: false
|
216
222
|
version_requirements: !ruby/object:Gem::Requirement
|
217
223
|
requirements:
|
218
224
|
- - "~>"
|
219
225
|
- !ruby/object:Gem::Version
|
220
|
-
version: 1.
|
226
|
+
version: '1.15'
|
221
227
|
- !ruby/object:Gem::Dependency
|
222
228
|
name: rubocop-rails
|
223
229
|
requirement: !ruby/object:Gem::Requirement
|
224
230
|
requirements:
|
225
231
|
- - "~>"
|
226
232
|
- !ruby/object:Gem::Version
|
227
|
-
version: 2.
|
233
|
+
version: '2.10'
|
234
|
+
type: :development
|
235
|
+
prerelease: false
|
236
|
+
version_requirements: !ruby/object:Gem::Requirement
|
237
|
+
requirements:
|
238
|
+
- - "~>"
|
239
|
+
- !ruby/object:Gem::Version
|
240
|
+
version: '2.10'
|
241
|
+
- !ruby/object:Gem::Dependency
|
242
|
+
name: rubocop-rake
|
243
|
+
requirement: !ruby/object:Gem::Requirement
|
244
|
+
requirements:
|
245
|
+
- - "~>"
|
246
|
+
- !ruby/object:Gem::Version
|
247
|
+
version: '0.6'
|
248
|
+
type: :development
|
249
|
+
prerelease: false
|
250
|
+
version_requirements: !ruby/object:Gem::Requirement
|
251
|
+
requirements:
|
252
|
+
- - "~>"
|
253
|
+
- !ruby/object:Gem::Version
|
254
|
+
version: '0.6'
|
255
|
+
- !ruby/object:Gem::Dependency
|
256
|
+
name: rubocop-rspec
|
257
|
+
requirement: !ruby/object:Gem::Requirement
|
258
|
+
requirements:
|
259
|
+
- - "~>"
|
260
|
+
- !ruby/object:Gem::Version
|
261
|
+
version: '2.4'
|
228
262
|
type: :development
|
229
263
|
prerelease: false
|
230
264
|
version_requirements: !ruby/object:Gem::Requirement
|
231
265
|
requirements:
|
232
266
|
- - "~>"
|
233
267
|
- !ruby/object:Gem::Version
|
234
|
-
version: 2.
|
268
|
+
version: '2.4'
|
235
269
|
- !ruby/object:Gem::Dependency
|
236
270
|
name: simplecov
|
237
271
|
requirement: !ruby/object:Gem::Requirement
|
238
272
|
requirements:
|
239
273
|
- - "~>"
|
240
274
|
- !ruby/object:Gem::Version
|
241
|
-
version: '0.
|
275
|
+
version: '0.21'
|
242
276
|
type: :development
|
243
277
|
prerelease: false
|
244
278
|
version_requirements: !ruby/object:Gem::Requirement
|
245
279
|
requirements:
|
246
280
|
- - "~>"
|
247
281
|
- !ruby/object:Gem::Version
|
248
|
-
version: '0.
|
282
|
+
version: '0.21'
|
249
283
|
- !ruby/object:Gem::Dependency
|
250
284
|
name: wwtd
|
251
285
|
requirement: !ruby/object:Gem::Requirement
|
@@ -289,6 +323,7 @@ files:
|
|
289
323
|
- db/migrate/20151210162949_improve_max_concurrent.rb
|
290
324
|
- db/migrate/20161206323555_add_back_default_string_limits_jobs.rb
|
291
325
|
- db/migrate/20170308045400_add_shard_id_to_delayed_jobs.rb
|
326
|
+
- db/migrate/20170308045401_add_delayed_jobs_shard_id_to_switchman_shards.rb
|
292
327
|
- db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb
|
293
328
|
- db/migrate/20190726154743_make_critical_columns_not_null.rb
|
294
329
|
- db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb
|
@@ -303,8 +338,11 @@ files:
|
|
303
338
|
- db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb
|
304
339
|
- db/migrate/20211101190934_update_after_delete_trigger_for_singleton_index.rb
|
305
340
|
- db/migrate/20211207094200_update_after_delete_trigger_for_singleton_transition_cases.rb
|
341
|
+
- db/migrate/20211220112800_fix_singleton_race_condition_insert.rb
|
342
|
+
- db/migrate/20211220113000_fix_singleton_race_condition_delete.rb
|
306
343
|
- lib/switchman-inst-jobs.rb
|
307
344
|
- lib/switchman_inst_jobs.rb
|
345
|
+
- lib/switchman_inst_jobs/active_record/connection_adapters/connection_pool.rb
|
308
346
|
- lib/switchman_inst_jobs/active_record/connection_adapters/postgresql_adapter.rb
|
309
347
|
- lib/switchman_inst_jobs/active_record/migration.rb
|
310
348
|
- lib/switchman_inst_jobs/delayed/backend/base.rb
|