switchman-inst-jobs 3.0.5 → 3.2.10
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
- data/db/migrate/20210812210128_add_singleton_column.rb +200 -0
- data/db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb +27 -0
- data/db/migrate/20210928174754_fix_singleton_condition_in_before_insert.rb +56 -0
- data/db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb +27 -0
- data/db/migrate/20211101190934_update_after_delete_trigger_for_singleton_index.rb +137 -0
- data/db/migrate/20211207094200_update_after_delete_trigger_for_singleton_transition_cases.rb +171 -0
- data/db/migrate/20211220112800_fix_singleton_race_condition_insert.rb +59 -0
- data/db/migrate/20211220113000_fix_singleton_race_condition_delete.rb +207 -0
- data/db/migrate/20220127091200_fix_singleton_unique_constraint.rb +31 -0
- data/db/migrate/20220128084800_update_insert_trigger_for_singleton_unique_constraint_change.rb +60 -0
- data/db/migrate/20220128084900_update_delete_trigger_for_singleton_unique_constraint_change.rb +209 -0
- data/db/migrate/20220203063200_remove_old_singleton_index.rb +31 -0
- data/lib/switchman_inst_jobs/delayed/backend/base.rb +5 -4
- data/lib/switchman_inst_jobs/delayed/settings.rb +9 -0
- data/lib/switchman_inst_jobs/delayed/worker/health_check.rb +15 -14
- data/lib/switchman_inst_jobs/engine.rb +6 -2
- data/lib/switchman_inst_jobs/jobs_migrator.rb +166 -93
- data/lib/switchman_inst_jobs/switchman/shard.rb +8 -1
- data/lib/switchman_inst_jobs/version.rb +1 -1
- data/lib/switchman_inst_jobs.rb +1 -4
- metadata +33 -17
@@ -18,22 +18,23 @@ module SwitchmanInstJobs
|
|
18
18
|
::Delayed::Settings.worker_health_check_config['service_name'] = original_service_name
|
19
19
|
end
|
20
20
|
|
21
|
-
def reschedule_abandoned_jobs
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
21
|
+
def reschedule_abandoned_jobs
|
22
|
+
shard_ids = ::SwitchmanInstJobs::Delayed::Settings.configured_shard_ids
|
23
|
+
shards = shard_ids.map { |shard_id| ::Delayed::Worker.shard(shard_id) }
|
24
|
+
::Switchman::Shard.with_each_shard(shards, [:delayed_jobs]) do
|
25
|
+
dj_shard = ::Switchman::Shard.current(:delayed_jobs)
|
26
|
+
dj_shard.activate do
|
27
|
+
munge_service_name(dj_shard) do
|
28
|
+
# because this rescheduling process is running on every host, we need
|
29
|
+
# to make sure that it's functioning for each shard the current
|
30
|
+
# host is programmed to interact with, but ONLY for those shards.
|
31
|
+
# reading the config lets us iterate over any shards this host should
|
32
|
+
# work with and lets us pick the correct service name to identify which
|
33
|
+
# hosts are currently alive and valid via the health checks
|
34
|
+
super()
|
35
|
+
end
|
27
36
|
end
|
28
37
|
end
|
29
|
-
|
30
|
-
::Switchman::Shard.with_each_shard(shards, [:delayed_jobs], exception: :ignore) do
|
31
|
-
shard = ::Switchman::Shard.current(:delayed_jobs)
|
32
|
-
singleton = <<~SINGLETON
|
33
|
-
periodic: Delayed::Worker::HealthCheck.reschedule_abandoned_jobs:#{shard.id}
|
34
|
-
SINGLETON
|
35
|
-
delay(singleton: singleton).reschedule_abandoned_jobs(call_super: shard)
|
36
|
-
end
|
37
38
|
end
|
38
39
|
end
|
39
40
|
end
|
@@ -19,13 +19,17 @@ module SwitchmanInstJobs
|
|
19
19
|
|
20
20
|
# Ensure jobs get unblocked on the new shard if they exist
|
21
21
|
::Delayed::Worker.lifecycle.after(:perform) do |_worker, job|
|
22
|
-
if job.strand
|
22
|
+
if job.strand || job.singleton
|
23
|
+
column = job.strand ? :strand : :singleton
|
24
|
+
|
23
25
|
::Switchman::Shard.clear_cache
|
24
26
|
::Switchman::Shard.default.activate do
|
25
27
|
current_job_shard = ::Switchman::Shard.lookup(job.shard_id).delayed_jobs_shard
|
26
28
|
if current_job_shard != ::Switchman::Shard.current(:delayed_jobs)
|
27
29
|
current_job_shard.activate(:delayed_jobs) do
|
28
|
-
|
30
|
+
::Delayed::Job.where(source: 'JobsMigrator::StrandBlocker', **{ column => job.try(column) }).delete_all
|
31
|
+
|
32
|
+
j = ::Delayed::Job.where(**{ column => job.try(column) }).next_in_strand_order.first
|
29
33
|
j.update_column(:next_in_strand, true) if j && !j.next_in_strand
|
30
34
|
end
|
31
35
|
end
|
@@ -1,7 +1,3 @@
|
|
1
|
-
# Just disabling all the rubocop metrics for this file for now,
|
2
|
-
# as it is a direct port-in of existing code
|
3
|
-
|
4
|
-
# rubocop:disable Metrics/BlockLength, Metrics/MethodLength, Metrics/AbcSize, Metrics/ClassLength
|
5
1
|
require 'set'
|
6
2
|
require 'parallel'
|
7
3
|
|
@@ -39,16 +35,12 @@ module SwitchmanInstJobs
|
|
39
35
|
|
40
36
|
# Do the updates in batches and then just clear redis instead of clearing them one at a time
|
41
37
|
target_shards.each do |target_shard, shards|
|
42
|
-
|
38
|
+
updates = { delayed_jobs_shard_id: target_shard, block_stranded: true }
|
39
|
+
updates[:updated_at] = Time.zone.now if ::Switchman::Shard.column_names.include?('updated_at')
|
40
|
+
::Switchman::Shard.where(id: shards).update_all(updates)
|
43
41
|
end
|
44
42
|
clear_shard_cache
|
45
43
|
|
46
|
-
# Wait a little over the 60 second in-process shard cache clearing
|
47
|
-
# threshold to ensure that all new stranded jobs are now being
|
48
|
-
# enqueued with next_in_strand: false
|
49
|
-
Rails.logger.debug('Waiting for caches to clear')
|
50
|
-
sleep(65) unless @skip_cache_wait
|
51
|
-
|
52
44
|
::Switchman::Shard.clear_cache
|
53
45
|
# rubocop:disable Style/CombinableLoops
|
54
46
|
# We first migrate strands so that we can stop blocking strands before we migrate unstranded jobs
|
@@ -59,11 +51,33 @@ module SwitchmanInstJobs
|
|
59
51
|
source_shards.each do |s|
|
60
52
|
::Switchman::Shard.lookup(s).activate(:delayed_jobs) { migrate_everything }
|
61
53
|
end
|
54
|
+
ensure_unblock_stranded_for(shard_map.map(&:first))
|
62
55
|
# rubocop:enable Style/CombinableLoops
|
63
56
|
end
|
64
57
|
|
65
|
-
|
58
|
+
# if :migrate_strands ran on any shards that fell into scenario 1, then
|
59
|
+
# block_stranded never got flipped, so do that now.
|
60
|
+
def ensure_unblock_stranded_for(shards)
|
61
|
+
shards = ::Switchman::Shard.where(id: shards, block_stranded: true).to_a
|
62
|
+
return unless shards.any?
|
63
|
+
|
64
|
+
::Switchman::Shard.where(id: shards).update_all(block_stranded: false)
|
65
|
+
clear_shard_cache
|
66
|
+
|
67
|
+
# shards is an array of shard objects that is now stale cause block_stranded has been updated.
|
68
|
+
shards.map(&:delayed_jobs_shard).uniq.each do |dj_shard|
|
69
|
+
unblock_strands(dj_shard)
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
def clear_shard_cache(debug_message = nil)
|
66
74
|
::Switchman.cache.clear
|
75
|
+
Rails.logger.debug("Waiting for caches to clear #{debug_message}")
|
76
|
+
# Wait a little over the 60 second in-process shard cache clearing
|
77
|
+
# threshold to ensure that all new stranded jobs are now being
|
78
|
+
# enqueued with next_in_strand: false
|
79
|
+
# @skip_cache_wait is for spec usage only
|
80
|
+
sleep(65) unless @skip_cache_wait
|
67
81
|
end
|
68
82
|
|
69
83
|
# This method expects that all relevant shards already have block_stranded: true
|
@@ -75,7 +89,9 @@ module SwitchmanInstJobs
|
|
75
89
|
migrate_everything
|
76
90
|
end
|
77
91
|
|
78
|
-
def migrate_strands
|
92
|
+
def migrate_strands(batch_size: 1_000)
|
93
|
+
source_shard = ::Switchman::Shard.current(:delayed_jobs)
|
94
|
+
|
79
95
|
# there are 4 scenarios to deal with here
|
80
96
|
# 1) no running job, no jobs moved: do nothing
|
81
97
|
# 2) running job, no jobs moved; create blocker with next_in_strand=false
|
@@ -84,97 +100,137 @@ module SwitchmanInstJobs
|
|
84
100
|
# those (= do nothing since it should already be false)
|
85
101
|
# 4) no running job, jobs moved: set next_in_strand=true on the first of
|
86
102
|
# those (= do nothing since it should already be true)
|
103
|
+
handler = lambda { |scope, column, blocker_job_kwargs = {}, advisory_lock_cb = nil|
|
104
|
+
shard_map = build_shard_map(scope, source_shard)
|
105
|
+
shard_map.each do |(target_shard, source_shard_ids)|
|
106
|
+
shard_scope = scope.where(shard_id: source_shard_ids)
|
87
107
|
|
88
|
-
|
89
|
-
|
90
|
-
shard_map = build_shard_map(strand_scope, source_shard)
|
91
|
-
shard_map.each do |(target_shard, source_shard_ids)|
|
92
|
-
shard_scope = strand_scope.where(shard_id: source_shard_ids)
|
93
|
-
|
94
|
-
# 1) is taken care of because it should not show up here in strands
|
95
|
-
strands = shard_scope.distinct.order(:strand).pluck(:strand)
|
96
|
-
|
97
|
-
target_shard.activate(:delayed_jobs) do
|
98
|
-
strands.each do |strand|
|
99
|
-
transaction_on([source_shard, target_shard]) do
|
100
|
-
this_strand_scope = shard_scope.where(strand: strand)
|
101
|
-
# we want to copy all the jobs except the one that is still running.
|
102
|
-
jobs_scope = this_strand_scope.where(locked_by: nil)
|
103
|
-
|
104
|
-
# 2) and part of 3) are taken care of here by creating a blocker
|
105
|
-
# job with next_in_strand = false. as soon as the current
|
106
|
-
# running job is finished it should set next_in_strand
|
107
|
-
# We lock it to ensure that the jobs worker can't delete it until we are done moving the strand
|
108
|
-
# Since we only unlock it on the new jobs queue *after* deleting from the original
|
109
|
-
# the lock ensures the blocker always gets unlocked
|
110
|
-
first = this_strand_scope.where.not(locked_by: nil).next_in_strand_order.lock.first
|
111
|
-
if first
|
112
|
-
first_job = ::Delayed::Job.create!(strand: strand, next_in_strand: false)
|
113
|
-
first_job.payload_object = ::Delayed::PerformableMethod.new(Kernel, :sleep, args: [0])
|
114
|
-
first_job.queue = first.queue
|
115
|
-
first_job.tag = 'Kernel.sleep'
|
116
|
-
first_job.source = 'JobsMigrator::StrandBlocker'
|
117
|
-
first_job.max_attempts = 1
|
118
|
-
# If we ever have jobs left over from 9999 jobs moves of a single shard,
|
119
|
-
# something has gone terribly wrong
|
120
|
-
first_job.strand_order_override = -9999
|
121
|
-
first_job.save!
|
122
|
-
# the rest of 3) is taken care of here
|
123
|
-
# make sure that all the jobs moved over are NOT next in strand
|
124
|
-
::Delayed::Job.where(next_in_strand: true, strand: strand, locked_by: nil).
|
125
|
-
update_all(next_in_strand: false)
|
126
|
-
end
|
108
|
+
# 1) is taken care of because it should not show up here in strands
|
109
|
+
values = shard_scope.distinct.order(column).pluck(column)
|
127
110
|
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
111
|
+
target_shard.activate(:delayed_jobs) do
|
112
|
+
values.each do |value|
|
113
|
+
transaction_on([source_shard, target_shard]) do
|
114
|
+
source_shard.activate(::Delayed::Backend::ActiveRecord::AbstractJob) do
|
115
|
+
advisory_lock_cb&.call(value)
|
116
|
+
end
|
117
|
+
|
118
|
+
value_scope = shard_scope.where(**{ column => value })
|
119
|
+
# we want to copy all the jobs except the one that is still running.
|
120
|
+
jobs_scope = value_scope.where(locked_by: nil)
|
121
|
+
|
122
|
+
# 2) and part of 3) are taken care of here by creating a blocker
|
123
|
+
# job with next_in_strand = false. as soon as the current
|
124
|
+
# running job is finished it should set next_in_strand
|
125
|
+
# We lock it to ensure that the jobs worker can't delete it until we are done moving the strand
|
126
|
+
# Since we only unlock it on the new jobs queue *after* deleting from the original
|
127
|
+
# the lock ensures the blocker always gets unlocked
|
128
|
+
first = value_scope.where.not(locked_by: nil).next_in_strand_order.lock.first
|
129
|
+
if first
|
130
|
+
create_blocker_job(
|
131
|
+
queue: first.queue,
|
132
|
+
shard_id: first.shard_id,
|
133
|
+
**{ column => value },
|
134
|
+
**blocker_job_kwargs
|
135
|
+
)
|
136
|
+
|
137
|
+
# the rest of 3) is taken care of here
|
138
|
+
# make sure that all the jobs moved over are NOT next in strand
|
139
|
+
::Delayed::Job.where(next_in_strand: true, locked_by: nil, **{ column => value }).
|
140
|
+
update_all(next_in_strand: false)
|
141
|
+
end
|
142
|
+
|
143
|
+
# 4) is taken care of here, by leaving next_in_strand alone and
|
144
|
+
# it should execute on the new shard
|
145
|
+
batch_move_jobs(
|
146
|
+
target_shard: target_shard,
|
147
|
+
source_shard: source_shard,
|
148
|
+
scope: jobs_scope,
|
149
|
+
batch_size: batch_size
|
150
|
+
) do |job, new_job|
|
151
|
+
# This ensures jobs enqueued on the old jobs shard run before jobs on the new jobs queue
|
152
|
+
new_job.strand_order_override = job.strand_order_override - 1
|
153
|
+
end
|
137
154
|
end
|
138
155
|
end
|
139
156
|
end
|
157
|
+
end
|
158
|
+
}
|
159
|
+
|
160
|
+
strand_scope = ::Delayed::Job.shard(source_shard).where.not(strand: nil)
|
161
|
+
singleton_scope = ::Delayed::Job.shard(source_shard).where('strand IS NULL AND singleton IS NOT NULL')
|
162
|
+
all_scope = ::Delayed::Job.shard(source_shard).where('strand IS NOT NULL OR singleton IS NOT NULL')
|
163
|
+
|
164
|
+
singleton_blocker_additional_kwargs = {
|
165
|
+
locked_at: DateTime.now,
|
166
|
+
locked_by: ::Delayed::Backend::Base::ON_HOLD_BLOCKER
|
167
|
+
}
|
140
168
|
|
169
|
+
quoted_function_name = ::Delayed::Job.connection.quote_table_name('half_md5_as_bigint')
|
170
|
+
strand_advisory_lock_fn = lambda do |value|
|
171
|
+
::Delayed::Job.connection.execute("SELECT pg_advisory_xact_lock(#{quoted_function_name}('#{value}'))")
|
172
|
+
end
|
173
|
+
|
174
|
+
singleton_advisory_lock_fn = lambda do |value|
|
175
|
+
::Delayed::Job.connection.execute(
|
176
|
+
"SELECT pg_advisory_xact_lock(#{quoted_function_name}('singleton:#{value}'))"
|
177
|
+
)
|
178
|
+
end
|
179
|
+
|
180
|
+
handler.call(strand_scope, :strand, {}, strand_advisory_lock_fn)
|
181
|
+
handler.call(singleton_scope, :singleton, singleton_blocker_additional_kwargs, singleton_advisory_lock_fn)
|
182
|
+
|
183
|
+
shard_map = build_shard_map(all_scope, source_shard)
|
184
|
+
shard_map.each do |(target_shard, source_shard_ids)|
|
185
|
+
target_shard.activate(:delayed_jobs) do
|
141
186
|
updated = ::Switchman::Shard.where(id: source_shard_ids, block_stranded: true).
|
142
187
|
update_all(block_stranded: false)
|
143
188
|
# If this is being manually re-run for some reason to clean something up, don't wait for nothing to happen
|
144
|
-
unless updated.zero?
|
145
|
-
|
146
|
-
# Wait a little over the 60 second in-process shard cache clearing
|
147
|
-
# threshold to ensure that all new stranded jobs are now being
|
148
|
-
# enqueued with next_in_strand: false
|
149
|
-
Rails.logger.debug("Waiting for caches to clear (#{source_shard.id} -> #{target_shard.id})")
|
150
|
-
# for spec usage only
|
151
|
-
sleep(65) unless @skip_cache_wait
|
152
|
-
end
|
189
|
+
clear_shard_cache("(#{source_shard.id} -> #{target_shard.id})") unless updated.zero?
|
190
|
+
|
153
191
|
::Switchman::Shard.clear_cache
|
154
192
|
# At this time, let's unblock all the strands on the target shard that aren't being held by a blocker
|
155
193
|
# but actually could have run and we just didn't know it because we didn't know if they had jobs
|
156
194
|
# on the source shard
|
157
|
-
target_shard
|
158
|
-
loop do
|
159
|
-
# We only want to unlock stranded jobs where they don't belong to a blocked shard (if they *do* belong)
|
160
|
-
# to a blocked shard, they must be part of a concurrent jobs migration from a different source shard to
|
161
|
-
# this target shard, so we shouldn't unlock them yet. We only ever unlock one job here to keep the
|
162
|
-
# logic cleaner; if the job is n-stranded, after the first one runs, the trigger will unlock larger
|
163
|
-
# batches
|
164
|
-
break if ::Delayed::Job.where(id: ::Delayed::Job.select('DISTINCT ON (strand) id').
|
165
|
-
where.not(strand: nil).
|
166
|
-
where.not(shard_id: ::Switchman::Shard.where(block_stranded: true).pluck(:id)).where(
|
167
|
-
::Delayed::Job.select(1).from("#{::Delayed::Job.quoted_table_name} dj2").
|
168
|
-
where("dj2.next_in_strand = true OR dj2.source = 'JobsMigrator::StrandBlocker'").
|
169
|
-
where('dj2.strand = delayed_jobs.strand').arel.exists.not
|
170
|
-
).order(:strand, :strand_order_override, :id)).limit(500).update_all(next_in_strand: true).zero?
|
171
|
-
end
|
172
|
-
end
|
195
|
+
unblock_strands(target_shard)
|
173
196
|
end
|
174
197
|
end
|
175
198
|
end
|
176
199
|
|
177
|
-
def
|
200
|
+
def unblock_strands(target_shard, batch_size: 10_000)
|
201
|
+
block_stranded_ids = ::Switchman::Shard.where(block_stranded: true).pluck(:id)
|
202
|
+
query = lambda { |column, scope|
|
203
|
+
::Delayed::Job.
|
204
|
+
where(id: ::Delayed::Job.select("DISTINCT ON (#{column}) id").
|
205
|
+
where(scope).
|
206
|
+
where.not(shard_id: block_stranded_ids).
|
207
|
+
where(
|
208
|
+
::Delayed::Job.select(1).from("#{::Delayed::Job.quoted_table_name} dj2").
|
209
|
+
where("dj2.next_in_strand = true OR dj2.source = 'JobsMigrator::StrandBlocker'").
|
210
|
+
where("dj2.#{column} = delayed_jobs.#{column}").arel.exists.not
|
211
|
+
).
|
212
|
+
order(column, :strand_order_override, :id)).limit(batch_size)
|
213
|
+
}
|
214
|
+
|
215
|
+
target_shard.activate(:delayed_jobs) do
|
216
|
+
# We only want to unlock stranded jobs where they don't belong to a blocked shard (if they *do* belong)
|
217
|
+
# to a blocked shard, they must be part of a concurrent jobs migration from a different source shard to
|
218
|
+
# this target shard, so we shouldn't unlock them yet. We only ever unlock one job here to keep the
|
219
|
+
# logic cleaner; if the job is n-stranded, after the first one runs, the trigger will unlock larger
|
220
|
+
# batches
|
221
|
+
|
222
|
+
loop do
|
223
|
+
break if query.call(:strand, 'strand IS NOT NULL').update_all(next_in_strand: true).zero?
|
224
|
+
end
|
225
|
+
|
226
|
+
loop do
|
227
|
+
break if query.call(:singleton,
|
228
|
+
'strand IS NULL AND singleton IS NOT NULL').update_all(next_in_strand: true).zero?
|
229
|
+
end
|
230
|
+
end
|
231
|
+
end
|
232
|
+
|
233
|
+
def migrate_everything(batch_size: 1_000)
|
178
234
|
source_shard = ::Switchman::Shard.current(:delayed_jobs)
|
179
235
|
scope = ::Delayed::Job.shard(source_shard).where('strand IS NULL')
|
180
236
|
|
@@ -183,13 +239,26 @@ module SwitchmanInstJobs
|
|
183
239
|
batch_move_jobs(
|
184
240
|
target_shard: target_shard,
|
185
241
|
source_shard: source_shard,
|
186
|
-
scope: scope.where(shard_id: source_shard_ids).where(locked_by: nil)
|
242
|
+
scope: scope.where(shard_id: source_shard_ids).where(locked_by: nil),
|
243
|
+
batch_size: batch_size
|
187
244
|
)
|
188
245
|
end
|
189
246
|
end
|
190
247
|
|
191
248
|
private
|
192
249
|
|
250
|
+
def create_blocker_job(**kwargs)
|
251
|
+
first_job = ::Delayed::Job.create!(**kwargs, next_in_strand: false)
|
252
|
+
first_job.payload_object = ::Delayed::PerformableMethod.new(Kernel, :sleep, args: [0])
|
253
|
+
first_job.tag = 'Kernel.sleep'
|
254
|
+
first_job.source = 'JobsMigrator::StrandBlocker'
|
255
|
+
first_job.max_attempts = 1
|
256
|
+
# If we ever have jobs left over from 9999 jobs moves of a single shard,
|
257
|
+
# something has gone terribly wrong
|
258
|
+
first_job.strand_order_override = -9999
|
259
|
+
first_job.save!
|
260
|
+
end
|
261
|
+
|
193
262
|
def build_shard_map(scope, source_shard)
|
194
263
|
shard_ids = scope.distinct.pluck(:shard_id)
|
195
264
|
|
@@ -204,10 +273,10 @@ module SwitchmanInstJobs
|
|
204
273
|
shard_map
|
205
274
|
end
|
206
275
|
|
207
|
-
def batch_move_jobs(target_shard:, source_shard:, scope:)
|
276
|
+
def batch_move_jobs(target_shard:, source_shard:, scope:, batch_size:)
|
208
277
|
while scope.exists?
|
209
278
|
# Adapted from get_and_lock_next_available in delayed/backend/active_record.rb
|
210
|
-
target_jobs = scope.limit(
|
279
|
+
target_jobs = scope.limit(batch_size).lock('FOR UPDATE SKIP LOCKED')
|
211
280
|
|
212
281
|
query = source_shard.activate(:delayed_jobs) do
|
213
282
|
"WITH limited_jobs AS (#{target_jobs.to_sql}) " \
|
@@ -266,7 +335,10 @@ module SwitchmanInstJobs
|
|
266
335
|
connection = ::Delayed::Job.connection
|
267
336
|
quoted_keys = keys.map { |k| connection.quote_column_name(k) }.join(', ')
|
268
337
|
|
269
|
-
connection.execute
|
338
|
+
connection.execute 'DROP TABLE IF EXISTS delayed_jobs_bulk_copy'
|
339
|
+
connection.execute "CREATE TEMPORARY TABLE delayed_jobs_bulk_copy
|
340
|
+
(LIKE #{::Delayed::Job.quoted_table_name} INCLUDING DEFAULTS)"
|
341
|
+
connection.execute "COPY delayed_jobs_bulk_copy (#{quoted_keys}) FROM STDIN"
|
270
342
|
records.map do |record|
|
271
343
|
connection.raw_connection.put_copy_data("#{keys.map { |k| quote_text(record[k]) }.join("\t")}\n")
|
272
344
|
end
|
@@ -278,6 +350,9 @@ module SwitchmanInstJobs
|
|
278
350
|
rescue StandardError => e
|
279
351
|
raise connection.send(:translate_exception, e, 'COPY FROM STDIN')
|
280
352
|
end
|
353
|
+
connection.execute "INSERT INTO #{::Delayed::Job.quoted_table_name} (#{quoted_keys})
|
354
|
+
SELECT #{quoted_keys} FROM delayed_jobs_bulk_copy
|
355
|
+
ON CONFLICT (singleton) WHERE singleton IS NOT NULL AND locked_by IS NULL DO NOTHING"
|
281
356
|
result.cmd_tuples
|
282
357
|
end
|
283
358
|
|
@@ -295,5 +370,3 @@ module SwitchmanInstJobs
|
|
295
370
|
end
|
296
371
|
end
|
297
372
|
end
|
298
|
-
|
299
|
-
# rubocop:enable Metrics/BlockLength, Metrics/MethodLength, Metrics/AbcSize, Metrics/ClassLength
|
@@ -39,7 +39,14 @@ module SwitchmanInstJobs
|
|
39
39
|
|
40
40
|
def unhold_jobs!
|
41
41
|
self.jobs_held = false
|
42
|
-
|
42
|
+
if changed?
|
43
|
+
save!
|
44
|
+
# Wait a little over the 60 second in-process shard cache clearing
|
45
|
+
# threshold to ensure that all new jobs are now being enqueued
|
46
|
+
# unlocked
|
47
|
+
Rails.logger.debug('Waiting for caches to clear')
|
48
|
+
sleep(65)
|
49
|
+
end
|
43
50
|
delayed_jobs_shard.activate(:delayed_jobs) do
|
44
51
|
::Delayed::Job.where(locked_by: ::Delayed::Backend::Base::ON_HOLD_LOCKED_BY, shard_id: id).
|
45
52
|
in_batches(of: 10_000).
|
data/lib/switchman_inst_jobs.rb
CHANGED
@@ -14,10 +14,6 @@ module SwitchmanInstJobs
|
|
14
14
|
::Delayed::Backend::ActiveRecord::Job.prepend(
|
15
15
|
Delayed::Backend::Base
|
16
16
|
)
|
17
|
-
::Delayed::Backend::Redis::Job.prepend(
|
18
|
-
Delayed::Backend::Base
|
19
|
-
)
|
20
|
-
::Delayed::Backend::Redis::Job.column :shard_id, :integer
|
21
17
|
::Delayed::Pool.prepend Delayed::Pool
|
22
18
|
::Delayed::Worker.prepend Delayed::Worker
|
23
19
|
::Delayed::Worker::HealthCheck.prepend Delayed::Worker::HealthCheck
|
@@ -38,6 +34,7 @@ end
|
|
38
34
|
|
39
35
|
require 'switchman_inst_jobs/active_record/connection_adapters/postgresql_adapter'
|
40
36
|
require 'switchman_inst_jobs/active_record/migration'
|
37
|
+
require 'switchman_inst_jobs/delayed/settings'
|
41
38
|
require 'switchman_inst_jobs/delayed/backend/base'
|
42
39
|
require 'switchman_inst_jobs/delayed/message_sending'
|
43
40
|
require 'switchman_inst_jobs/delayed/pool'
|
metadata
CHANGED
@@ -1,35 +1,35 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: switchman-inst-jobs
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 3.
|
4
|
+
version: 3.2.10
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Bryan Petty
|
8
|
-
autorequire:
|
8
|
+
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2022-02-23 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: inst-jobs
|
15
15
|
requirement: !ruby/object:Gem::Requirement
|
16
16
|
requirements:
|
17
|
-
- - "~>"
|
18
|
-
- !ruby/object:Gem::Version
|
19
|
-
version: '1.0'
|
20
17
|
- - ">="
|
21
18
|
- !ruby/object:Gem::Version
|
22
|
-
version:
|
19
|
+
version: 2.4.9
|
20
|
+
- - "<"
|
21
|
+
- !ruby/object:Gem::Version
|
22
|
+
version: '4.0'
|
23
23
|
type: :runtime
|
24
24
|
prerelease: false
|
25
25
|
version_requirements: !ruby/object:Gem::Requirement
|
26
26
|
requirements:
|
27
|
-
- - "~>"
|
28
|
-
- !ruby/object:Gem::Version
|
29
|
-
version: '1.0'
|
30
27
|
- - ">="
|
31
28
|
- !ruby/object:Gem::Version
|
32
|
-
version:
|
29
|
+
version: 2.4.9
|
30
|
+
- - "<"
|
31
|
+
- !ruby/object:Gem::Version
|
32
|
+
version: '4.0'
|
33
33
|
- !ruby/object:Gem::Dependency
|
34
34
|
name: parallel
|
35
35
|
requirement: !ruby/object:Gem::Requirement
|
@@ -107,7 +107,7 @@ dependencies:
|
|
107
107
|
- !ruby/object:Gem::Version
|
108
108
|
version: '0'
|
109
109
|
- !ruby/object:Gem::Dependency
|
110
|
-
name:
|
110
|
+
name: diplomat
|
111
111
|
requirement: !ruby/object:Gem::Requirement
|
112
112
|
requirements:
|
113
113
|
- - ">="
|
@@ -260,7 +260,7 @@ dependencies:
|
|
260
260
|
- - "~>"
|
261
261
|
- !ruby/object:Gem::Version
|
262
262
|
version: '1.4'
|
263
|
-
description:
|
263
|
+
description:
|
264
264
|
email:
|
265
265
|
- bpetty@instructure.com
|
266
266
|
executables: []
|
@@ -296,6 +296,19 @@ files:
|
|
296
296
|
- db/migrate/20200822014259_add_block_stranded_to_switchman_shards.rb
|
297
297
|
- db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb
|
298
298
|
- db/migrate/20200825011002_add_strand_order_override.rb
|
299
|
+
- db/migrate/20210809145804_add_n_strand_index.rb
|
300
|
+
- db/migrate/20210812210128_add_singleton_column.rb
|
301
|
+
- db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb
|
302
|
+
- db/migrate/20210928174754_fix_singleton_condition_in_before_insert.rb
|
303
|
+
- db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb
|
304
|
+
- db/migrate/20211101190934_update_after_delete_trigger_for_singleton_index.rb
|
305
|
+
- db/migrate/20211207094200_update_after_delete_trigger_for_singleton_transition_cases.rb
|
306
|
+
- db/migrate/20211220112800_fix_singleton_race_condition_insert.rb
|
307
|
+
- db/migrate/20211220113000_fix_singleton_race_condition_delete.rb
|
308
|
+
- db/migrate/20220127091200_fix_singleton_unique_constraint.rb
|
309
|
+
- db/migrate/20220128084800_update_insert_trigger_for_singleton_unique_constraint_change.rb
|
310
|
+
- db/migrate/20220128084900_update_delete_trigger_for_singleton_unique_constraint_change.rb
|
311
|
+
- db/migrate/20220203063200_remove_old_singleton_index.rb
|
299
312
|
- lib/switchman-inst-jobs.rb
|
300
313
|
- lib/switchman_inst_jobs.rb
|
301
314
|
- lib/switchman_inst_jobs/active_record/connection_adapters/postgresql_adapter.rb
|
@@ -303,6 +316,7 @@ files:
|
|
303
316
|
- lib/switchman_inst_jobs/delayed/backend/base.rb
|
304
317
|
- lib/switchman_inst_jobs/delayed/message_sending.rb
|
305
318
|
- lib/switchman_inst_jobs/delayed/pool.rb
|
319
|
+
- lib/switchman_inst_jobs/delayed/settings.rb
|
306
320
|
- lib/switchman_inst_jobs/delayed/worker.rb
|
307
321
|
- lib/switchman_inst_jobs/delayed/worker/health_check.rb
|
308
322
|
- lib/switchman_inst_jobs/engine.rb
|
@@ -318,8 +332,10 @@ files:
|
|
318
332
|
homepage: https://github.com/instructure/switchman-inst-jobs
|
319
333
|
licenses:
|
320
334
|
- MIT
|
321
|
-
metadata:
|
322
|
-
|
335
|
+
metadata:
|
336
|
+
allowed_push_host: https://rubygems.org
|
337
|
+
rubygems_mfa_required: 'true'
|
338
|
+
post_install_message:
|
323
339
|
rdoc_options: []
|
324
340
|
require_paths:
|
325
341
|
- lib
|
@@ -327,7 +343,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
327
343
|
requirements:
|
328
344
|
- - ">="
|
329
345
|
- !ruby/object:Gem::Version
|
330
|
-
version: '2.
|
346
|
+
version: '2.6'
|
331
347
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
332
348
|
requirements:
|
333
349
|
- - ">="
|
@@ -335,7 +351,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
335
351
|
version: '0'
|
336
352
|
requirements: []
|
337
353
|
rubygems_version: 3.1.4
|
338
|
-
signing_key:
|
354
|
+
signing_key:
|
339
355
|
specification_version: 4
|
340
356
|
summary: Switchman and Instructure Jobs compatibility gem.
|
341
357
|
test_files: []
|