canvas_sync 0.22.0.beta6 → 0.22.0.beta8
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/canvas_sync/job_uniqueness/compat/sidekiq.rb +1 -1
- data/lib/canvas_sync/job_uniqueness/lock_context.rb +6 -0
- data/lib/canvas_sync/job_uniqueness/locksmith.rb +4 -2
- data/lib/canvas_sync/job_uniqueness/on_conflict/reschedule.rb +2 -2
- data/lib/canvas_sync/job_uniqueness/strategy/base.rb +10 -9
- data/lib/canvas_sync/version.rb +1 -1
- data/spec/job_uniqueness/on_conflict/reschedule_spec.rb +1 -1
- data/spec/job_uniqueness/spec_helper.rb +3 -0
- data/spec/job_uniqueness/strategy/base_spec.rb +2 -2
- metadata +1 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d547f65f6811b0e9016de79ad87cff6789a82bc1d02b100c0280a6591a7b44c5
|
4
|
+
data.tar.gz: 12bc5403297ceb4b13b1c29003f7ca8349b2ca4c7f54236fcc44c306d8cd56fc
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: f6e4f2cb3de6d071837e29fbdbafa2f628018d82f302658e0a24a181bb2d5c00557372972c77ef924a6243f8e2fd861ef6a9488c25e2018092265398b589d303
|
7
|
+
data.tar.gz: 7b21c43a71b7c44674e89afcd0b55996ccac7dae346fc917304e95be9d01870580a46b117ff241e81fa1fde10594e6fc1c12d8f0b16be7cf6295fa1bebb04461
|
@@ -6,18 +6,23 @@ module CanvasSync::JobUniqueness
|
|
6
6
|
context_class.new(data, **kwargs)
|
7
7
|
end
|
8
8
|
|
9
|
+
attr_reader :lock_id
|
10
|
+
|
9
11
|
# { job_clazz, jid, queue, args?, kwargs?, base_key? }
|
10
12
|
def initialize(data, job_instance: nil, config: nil)
|
11
13
|
@base_key = data[:base_key]
|
12
14
|
@context_data = data
|
13
15
|
@job_instance = job_instance
|
14
16
|
@config = config || @context_data[:config]
|
17
|
+
|
18
|
+
@lock_id ||= data[:lid] || Thread.current[:unique_jobs_previous_context]&.lock_id || job_id
|
15
19
|
end
|
16
20
|
|
17
21
|
# This is primarily for rehydrating in a Batch Callback, so it is unlikely that args and kwargs are needed.
|
18
22
|
# Honestly, base_key and job_clazz are probably the only needed values
|
19
23
|
def serialize
|
20
24
|
{
|
25
|
+
lid: lock_id,
|
21
26
|
clazz: self.class.to_s,
|
22
27
|
job_clazz: @context_data[:job_clazz].to_s,
|
23
28
|
jid: @context_data[:jid],
|
@@ -29,6 +34,7 @@ module CanvasSync::JobUniqueness
|
|
29
34
|
# Properties to cache on the serialized Job object to prevent issues arising from code changes between enqueue and perform
|
30
35
|
def cache_data
|
31
36
|
{
|
37
|
+
lid: lock_id,
|
32
38
|
base_key: base_key,
|
33
39
|
job_score: job_score,
|
34
40
|
# TODO Should config also be cached on the Job at time of enqueue?
|
@@ -14,7 +14,9 @@ module CanvasSync::JobUniqueness
|
|
14
14
|
|
15
15
|
def initialize(key, lock_context, redis_pool = nil)
|
16
16
|
@lock_context = lock_context
|
17
|
-
|
17
|
+
# We use the "lock_id" (which is just the first JID that a job appeared as) so that Unlock batches may properly unlock the batch
|
18
|
+
# TODO For locks that do use a wrapping Batch, it may be nice to use the BID here instead
|
19
|
+
@job_id = lock_context.lock_id
|
18
20
|
@item = lock_context
|
19
21
|
@key = SidekiqUniqueJobs::Key.new(key)
|
20
22
|
|
@@ -28,7 +30,7 @@ module CanvasSync::JobUniqueness
|
|
28
30
|
:"limit" => lcfg[:limit],
|
29
31
|
})
|
30
32
|
|
31
|
-
@redis_pool
|
33
|
+
@redis_pool = redis_pool
|
32
34
|
end
|
33
35
|
|
34
36
|
def locked_jids
|
@@ -4,12 +4,12 @@ module CanvasSync::JobUniqueness
|
|
4
4
|
valid_for :perform
|
5
5
|
|
6
6
|
def call
|
7
|
-
Thread.current[:
|
7
|
+
Thread.current[:unique_jobs_previous_context] = lock_context
|
8
8
|
rescheduled = lock_context.reenqueue(
|
9
9
|
schedule_in: schedule_in,
|
10
10
|
)
|
11
11
|
ensure
|
12
|
-
Thread.current[:
|
12
|
+
Thread.current[:unique_jobs_previous_context] = nil
|
13
13
|
end
|
14
14
|
|
15
15
|
def schedule_in
|
@@ -43,7 +43,7 @@ module CanvasSync::JobUniqueness
|
|
43
43
|
end
|
44
44
|
|
45
45
|
def wrap_in_batch(&blk)
|
46
|
-
if Thread.current[:
|
46
|
+
if Thread.current[:unique_jobs_previous_context] # Ensure we don't re-wrap in a batch when rescheduling
|
47
47
|
return blk.call
|
48
48
|
end
|
49
49
|
|
@@ -74,7 +74,7 @@ module CanvasSync::JobUniqueness
|
|
74
74
|
CanvasSync::JobUniqueness.logger.debug("Context data: #{opts[:lock_context]}")
|
75
75
|
strategy_class = opts[:lock_strategy].constantize
|
76
76
|
lock_context = LockContext.from_serialized(opts[:lock_context])
|
77
|
-
CanvasSync::JobUniqueness.logger.debug("Rehydrated LockContext: #{lock_context.
|
77
|
+
CanvasSync::JobUniqueness.logger.debug("Rehydrated LockContext: #{lock_context.lock_id} #{lock_context.debug_data}")
|
78
78
|
strategy = strategy_class.new(lock_context)
|
79
79
|
# TODO Should this route through LockContext#handle_lifecycle!?
|
80
80
|
strategy.batch_callback(opts[:event].to_sym, batch_status)
|
@@ -83,13 +83,14 @@ module CanvasSync::JobUniqueness
|
|
83
83
|
def lock!(purpose, wait: nil)
|
84
84
|
locked = nil
|
85
85
|
if purpose == :enqueue
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
86
|
+
# We don't need to swap_locks anymore because we maintain a consistent lock_id across re-enqueues
|
87
|
+
# if Thread.current[:unique_jobs_previous_context].present?
|
88
|
+
# locked = locksmith.swap_locks(Thread.current[:unique_jobs_previous_context].job_id)
|
89
|
+
# else
|
90
|
+
locked = locksmith.lock()
|
91
|
+
# end
|
91
92
|
elsif purpose == :perform
|
92
|
-
locked = locksmith.execute { lock_context.
|
93
|
+
locked = locksmith.execute { lock_context.lock_id }
|
93
94
|
end
|
94
95
|
|
95
96
|
CanvasSync::JobUniqueness.logger.debug { "Requested lock of #{key} for #{purpose} - (#{locked || 'Not Obtained!'})" }
|
@@ -98,7 +99,7 @@ module CanvasSync::JobUniqueness
|
|
98
99
|
end
|
99
100
|
|
100
101
|
def unlock()
|
101
|
-
CanvasSync::JobUniqueness.logger.debug { "Trying to unlock #{key}
|
102
|
+
CanvasSync::JobUniqueness.logger.debug { "Trying to unlock #{key} for LID #{lock_context.lock_id}" }
|
102
103
|
result = locksmith.unlock
|
103
104
|
CanvasSync::JobUniqueness.logger.debug { "Unlocked #{key} - (#{result || 'Not Unlocked!'})" }
|
104
105
|
end
|
data/lib/canvas_sync/version.rb
CHANGED
@@ -6,7 +6,7 @@ RSpec.describe CanvasSync::JobUniqueness::OnConflict::Reschedule do
|
|
6
6
|
|
7
7
|
it "calls reenqueue" do
|
8
8
|
expect(lock_context).to receive(:reenqueue) do |*args, **kwargs|
|
9
|
-
expect(Thread.current[:
|
9
|
+
expect(Thread.current[:unique_jobs_previous_context]).to be_present
|
10
10
|
end
|
11
11
|
on_conflict.call
|
12
12
|
end
|
@@ -1,6 +1,9 @@
|
|
1
1
|
require_relative "../spec_helper"
|
2
2
|
require "canvas_sync/job_uniqueness/job_uniqueness"
|
3
3
|
|
4
|
+
require 'redis'
|
5
|
+
Redis.silence_deprecations = true
|
6
|
+
|
4
7
|
Dir[File.join(File.dirname(__FILE__), "./support/**/*.rb")].sort.each { |f| require f }
|
5
8
|
|
6
9
|
Sidekiq::Testing.server_middleware do |chain|
|
@@ -9,11 +9,11 @@ RSpec.describe CanvasSync::JobUniqueness::Strategy::Base do
|
|
9
9
|
|
10
10
|
describe '#wrap_in_batch' do
|
11
11
|
it 'does not wrap if re-enqueuing' do
|
12
|
-
Thread.current[:
|
12
|
+
Thread.current[:unique_jobs_previous_context] = context
|
13
13
|
expect(CanvasSync::JobBatches::Batch).not_to receive(:new)
|
14
14
|
strategy.send(:wrap_in_batch, &->{ })
|
15
15
|
ensure
|
16
|
-
Thread.current[:
|
16
|
+
Thread.current[:unique_jobs_previous_context] = nil
|
17
17
|
end
|
18
18
|
|
19
19
|
context 'when the job fails' do
|