canvas_sync 0.21.1 → 0.22.0.beta1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/canvas_sync/concerns/auto_relations.rb +11 -0
- data/lib/canvas_sync/config.rb +3 -5
- data/lib/canvas_sync/generators/templates/models/rubric.rb +2 -1
- data/lib/canvas_sync/job_batches/batch.rb +432 -402
- data/lib/canvas_sync/job_batches/callback.rb +100 -114
- data/lib/canvas_sync/job_batches/chain_builder.rb +194 -196
- data/lib/canvas_sync/job_batches/{active_job.rb → compat/active_job.rb} +2 -2
- data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/helpers.rb +1 -1
- data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web.rb +3 -3
- data/lib/canvas_sync/job_batches/{sidekiq.rb → compat/sidekiq.rb} +35 -22
- data/lib/canvas_sync/job_batches/compat.rb +20 -0
- data/lib/canvas_sync/job_batches/context_hash.rb +124 -126
- data/lib/canvas_sync/job_batches/jobs/base_job.rb +2 -4
- data/lib/canvas_sync/job_batches/jobs/concurrent_batch_job.rb +14 -16
- data/lib/canvas_sync/job_batches/jobs/managed_batch_job.rb +125 -127
- data/lib/canvas_sync/job_batches/jobs/serial_batch_job.rb +14 -16
- data/lib/canvas_sync/job_batches/pool.rb +193 -195
- data/lib/canvas_sync/job_batches/redis_model.rb +50 -52
- data/lib/canvas_sync/job_batches/redis_script.rb +129 -131
- data/lib/canvas_sync/job_batches/status.rb +85 -87
- data/lib/canvas_sync/job_uniqueness/compat/active_job.rb +75 -0
- data/lib/canvas_sync/job_uniqueness/compat/sidekiq.rb +135 -0
- data/lib/canvas_sync/job_uniqueness/compat.rb +20 -0
- data/lib/canvas_sync/job_uniqueness/configuration.rb +25 -0
- data/lib/canvas_sync/job_uniqueness/job_uniqueness.rb +47 -0
- data/lib/canvas_sync/job_uniqueness/lock_context.rb +171 -0
- data/lib/canvas_sync/job_uniqueness/locksmith.rb +92 -0
- data/lib/canvas_sync/job_uniqueness/on_conflict/base.rb +32 -0
- data/lib/canvas_sync/job_uniqueness/on_conflict/log.rb +13 -0
- data/lib/canvas_sync/job_uniqueness/on_conflict/null_strategy.rb +9 -0
- data/lib/canvas_sync/job_uniqueness/on_conflict/raise.rb +11 -0
- data/lib/canvas_sync/job_uniqueness/on_conflict/reject.rb +21 -0
- data/lib/canvas_sync/job_uniqueness/on_conflict/reschedule.rb +20 -0
- data/lib/canvas_sync/job_uniqueness/on_conflict.rb +41 -0
- data/lib/canvas_sync/job_uniqueness/strategy/base.rb +104 -0
- data/lib/canvas_sync/job_uniqueness/strategy/until_and_while_executing.rb +35 -0
- data/lib/canvas_sync/job_uniqueness/strategy/until_executed.rb +20 -0
- data/lib/canvas_sync/job_uniqueness/strategy/until_executing.rb +20 -0
- data/lib/canvas_sync/job_uniqueness/strategy/until_expired.rb +16 -0
- data/lib/canvas_sync/job_uniqueness/strategy/while_executing.rb +26 -0
- data/lib/canvas_sync/job_uniqueness/strategy.rb +27 -0
- data/lib/canvas_sync/job_uniqueness/unique_job_common.rb +79 -0
- data/lib/canvas_sync/misc_helper.rb +1 -1
- data/lib/canvas_sync/version.rb +1 -1
- data/lib/canvas_sync.rb +4 -3
- data/spec/dummy/app/models/rubric.rb +2 -1
- data/spec/dummy/config/environments/test.rb +1 -1
- data/spec/job_batching/batch_spec.rb +49 -7
- data/spec/job_batching/{active_job_spec.rb → compat/active_job_spec.rb} +2 -2
- data/spec/job_batching/{sidekiq_spec.rb → compat/sidekiq_spec.rb} +14 -12
- data/spec/job_batching/flow_spec.rb +1 -1
- data/spec/job_batching/integration_helper.rb +1 -1
- data/spec/job_batching/status_spec.rb +2 -2
- data/spec/job_uniqueness/compat/active_job_spec.rb +49 -0
- data/spec/job_uniqueness/compat/sidekiq_spec.rb +68 -0
- data/spec/job_uniqueness/lock_context_spec.rb +95 -0
- data/spec/job_uniqueness/on_conflict/log_spec.rb +11 -0
- data/spec/job_uniqueness/on_conflict/raise_spec.rb +10 -0
- data/spec/job_uniqueness/on_conflict/reschedule_spec.rb +24 -0
- data/spec/job_uniqueness/on_conflict_spec.rb +16 -0
- data/spec/job_uniqueness/spec_helper.rb +14 -0
- data/spec/job_uniqueness/strategy/base_spec.rb +100 -0
- data/spec/job_uniqueness/strategy/until_and_while_executing_spec.rb +48 -0
- data/spec/job_uniqueness/strategy/until_executed_spec.rb +23 -0
- data/spec/job_uniqueness/strategy/until_executing_spec.rb +23 -0
- data/spec/job_uniqueness/strategy/until_expired_spec.rb +23 -0
- data/spec/job_uniqueness/strategy/while_executing_spec.rb +33 -0
- data/spec/job_uniqueness/support/lock_strategy.rb +28 -0
- data/spec/job_uniqueness/support/on_conflict.rb +24 -0
- data/spec/job_uniqueness/support/test_worker.rb +19 -0
- data/spec/job_uniqueness/unique_job_common_spec.rb +45 -0
- data/spec/spec_helper.rb +1 -1
- metadata +278 -204
- /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/batches_assets/css/styles.less +0 -0
- /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/batches_assets/js/batch_tree.js +0 -0
- /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/batches_assets/js/util.js +0 -0
- /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/_batch_tree.erb +0 -0
- /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/_batches_table.erb +0 -0
- /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/_common.erb +0 -0
- /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/_jobs_table.erb +0 -0
- /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/_pagination.erb +0 -0
- /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/batch.erb +0 -0
- /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/batches.erb +0 -0
- /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/pool.erb +0 -0
- /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/pools.erb +0 -0
@@ -1,245 +1,243 @@
|
|
1
|
-
module CanvasSync
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
initialize_new(**kwargs)
|
24
|
-
end
|
1
|
+
module CanvasSync::JobBatches
|
2
|
+
class Pool
|
3
|
+
include RedisModel
|
4
|
+
|
5
|
+
POOL_REFILL = RedisScript.new(Pathname.new(__FILE__) + "../pool_refill.lua")
|
6
|
+
|
7
|
+
attr_reader :pid
|
8
|
+
redis_attr :description
|
9
|
+
redis_attr :created_at
|
10
|
+
redis_attr :concurrency, :int
|
11
|
+
redis_attr :complete_count, :int
|
12
|
+
redis_attr :order
|
13
|
+
redis_attr :on_failed_job, :symbol
|
14
|
+
redis_attr :clean_when_empty, :bool
|
15
|
+
|
16
|
+
def initialize(pooolid = nil, **kwargs)
|
17
|
+
if pooolid
|
18
|
+
@existing = true
|
19
|
+
@pid = pooolid
|
20
|
+
else
|
21
|
+
@pid = SecureRandom.urlsafe_base64(10)
|
22
|
+
initialize_new(**kwargs)
|
25
23
|
end
|
24
|
+
end
|
26
25
|
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
26
|
+
def self.from_pid(pid)
|
27
|
+
raise "PID must be given" unless pid.present?
|
28
|
+
new(pid)
|
29
|
+
end
|
31
30
|
|
32
|
-
|
33
|
-
|
34
|
-
|
31
|
+
def <<(job_desc)
|
32
|
+
add_job(job_desc)
|
33
|
+
end
|
35
34
|
|
36
|
-
|
37
|
-
|
38
|
-
|
35
|
+
def add_job(job_desc)
|
36
|
+
add_jobs([job_desc])
|
37
|
+
end
|
39
38
|
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
job_desc = job_desc.symbolize_keys
|
49
|
-
job_desc = job_desc.merge!(
|
50
|
-
job: job_desc[:job].to_s,
|
51
|
-
pool_wrapper_batch: wrapper.bid,
|
52
|
-
)
|
53
|
-
|
54
|
-
push_job_to_pool(job_desc)
|
55
|
-
end
|
56
|
-
refill_allotment unless skip_refill
|
57
|
-
end
|
39
|
+
def add_jobs(job_descs, skip_refill: false)
|
40
|
+
job_descs.each do |job_desc|
|
41
|
+
wrapper = Batch.new
|
42
|
+
wrapper.description = "Pool Job Wrapper (PID: #{pid})"
|
43
|
+
checkin_event = (on_failed_job == :wait) ? :success : :complete
|
44
|
+
wrapper.on(checkin_event, "#{self.class.to_s}.job_checked_in", pool_id: pid)
|
45
|
+
wrapper.jobs {}
|
58
46
|
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
ensure
|
65
|
-
let_close!
|
66
|
-
end
|
67
|
-
else
|
68
|
-
redis.hset(redis_key, 'keep_open', 'true')
|
69
|
-
end
|
70
|
-
end
|
47
|
+
job_desc = job_desc.symbolize_keys
|
48
|
+
job_desc = job_desc.merge!(
|
49
|
+
job: job_desc[:job].to_s,
|
50
|
+
pool_wrapper_batch: wrapper.bid,
|
51
|
+
)
|
71
52
|
|
72
|
-
|
73
|
-
redis.hset(redis_key, 'keep_open', 'false')
|
74
|
-
cleanup_if_empty
|
53
|
+
push_job_to_pool(job_desc)
|
75
54
|
end
|
55
|
+
refill_allotment unless skip_refill
|
56
|
+
end
|
76
57
|
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
)
|
58
|
+
def keep_open!
|
59
|
+
if block_given?
|
60
|
+
begin
|
61
|
+
keep_open!
|
62
|
+
yield
|
63
|
+
ensure
|
64
|
+
let_close!
|
85
65
|
end
|
66
|
+
else
|
67
|
+
redis.hset(redis_key, 'keep_open', 'true')
|
86
68
|
end
|
69
|
+
end
|
87
70
|
|
88
|
-
|
89
|
-
|
71
|
+
def let_close!
|
72
|
+
redis.hset(redis_key, 'keep_open', 'false')
|
73
|
+
cleanup_if_empty
|
74
|
+
end
|
90
75
|
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
76
|
+
def cleanup_redis
|
77
|
+
Batch.logger.debug {"Cleaning redis of pool #{pid}"}
|
78
|
+
redis do |r|
|
79
|
+
r.zrem("pools", pid)
|
80
|
+
r.unlink(
|
81
|
+
"#{redis_key}",
|
82
|
+
"#{redis_key}-jobs",
|
83
|
+
)
|
84
|
+
end
|
85
|
+
end
|
98
86
|
|
99
|
-
|
87
|
+
def cleanup_if_empty
|
88
|
+
self.order
|
100
89
|
|
101
|
-
|
102
|
-
|
103
|
-
|
90
|
+
activec, pactivec, pendingc, clean_when_empty, keep_open = redis.multi do |r|
|
91
|
+
r.hlen("#{redis_key}-active")
|
92
|
+
r.hget(redis_key, "_active_count")
|
93
|
+
pending_count(r)
|
94
|
+
r.hget(redis_key, 'clean_when_empty')
|
95
|
+
r.hget(redis_key, 'keep_open')
|
104
96
|
end
|
105
97
|
|
106
|
-
|
107
|
-
r.hlen("#{redis_key}-active") + r.hincrby(redis_key, "_active_count", 0)
|
108
|
-
end
|
98
|
+
return if keep_open == 'true' || clean_when_empty == 'false'
|
109
99
|
|
110
|
-
|
111
|
-
|
100
|
+
if activec <= 0 && (pactivec.try(:to_i) || 0) <= 0 && pendingc <= 0
|
101
|
+
cleanup_redis
|
112
102
|
end
|
103
|
+
end
|
113
104
|
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
case order.to_sym
|
118
|
-
when :fifo, :lifo
|
119
|
-
r.llen(jobs_key)
|
120
|
-
when :random
|
121
|
-
r.scard(jobs_key)
|
122
|
-
when :priority
|
123
|
-
r.zcard(jobs_key)
|
124
|
-
end
|
125
|
-
end
|
105
|
+
def active_count(r = redis)
|
106
|
+
r.hlen("#{redis_key}-active") + r.hincrby(redis_key, "_active_count", 0)
|
107
|
+
end
|
126
108
|
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
end
|
109
|
+
def active_jobs(r = redis)
|
110
|
+
r.hvals("#{redis_key}-active").map {|desc| JSON.parse(desc)[0] }
|
111
|
+
end
|
131
112
|
|
132
|
-
|
133
|
-
|
134
|
-
|
113
|
+
def pending_count(r = redis)
|
114
|
+
jobs_key = "#{redis_key}-jobs"
|
115
|
+
order = self.order || 'fifo'
|
116
|
+
case order.to_sym
|
117
|
+
when :fifo, :lifo
|
118
|
+
r.llen(jobs_key)
|
119
|
+
when :random
|
120
|
+
r.scard(jobs_key)
|
121
|
+
when :priority
|
122
|
+
r.zcard(jobs_key)
|
135
123
|
end
|
124
|
+
end
|
136
125
|
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
r.zrem("pools", pid) if Batch.cleanup_redis_index_for("POOLID-#{pid}", suffixes)
|
142
|
-
end
|
143
|
-
end
|
126
|
+
def job_checked_in(status, options)
|
127
|
+
active_count = refill_allotment(status.bid)
|
128
|
+
cleanup_if_empty unless active_count > 0
|
129
|
+
end
|
144
130
|
|
145
|
-
|
131
|
+
def self.job_checked_in(status, options)
|
132
|
+
pid = options['pool_id']
|
133
|
+
from_pid(pid).job_checked_in(status, options)
|
134
|
+
end
|
146
135
|
|
147
|
-
|
148
|
-
|
136
|
+
# Administrative/console method to cleanup expired pools from the WebUI
|
137
|
+
def self.cleanup_redis_index!
|
138
|
+
suffixes = ["", "-active", "-jobs"]
|
139
|
+
r.zrangebyscore("pools", "0", Batch::BID_EXPIRE_TTL.seconds.ago.to_i).each do |pid|
|
140
|
+
r.zrem("pools", pid) if Batch.cleanup_redis_index_for("POOLID-#{pid}", suffixes)
|
149
141
|
end
|
142
|
+
end
|
150
143
|
|
151
|
-
|
152
|
-
active_count, job_descs = POOL_REFILL.call(redis, [redis_key, "#{redis_key}-jobs", "#{redis_key}-active"], [checkin_bid || ""])
|
153
|
-
return active_count if active_count < 0
|
144
|
+
protected
|
154
145
|
|
155
|
-
|
146
|
+
def redis_key
|
147
|
+
"POOLID-#{pid}"
|
148
|
+
end
|
156
149
|
|
157
|
-
|
158
|
-
|
159
|
-
|
150
|
+
def refill_allotment(checkin_bid = nil)
|
151
|
+
active_count, job_descs = POOL_REFILL.call(redis, [redis_key, "#{redis_key}-jobs", "#{redis_key}-active"], [checkin_bid || ""])
|
152
|
+
return active_count if active_count < 0
|
160
153
|
|
161
|
-
|
162
|
-
begin
|
163
|
-
job_json = pending_job_descs.shift
|
164
|
-
job_desc = ::ActiveJob::Arguments.deserialize(JSON.parse(job_json))[0]&.symbolize_keys
|
154
|
+
pending_job_descs = job_descs.dup
|
165
155
|
|
166
|
-
|
156
|
+
added_jobs = {}
|
157
|
+
failed_to_add_jobs = []
|
158
|
+
add_exception = nil
|
167
159
|
|
168
|
-
|
169
|
-
|
170
|
-
|
160
|
+
while pending_job_descs.count > 0
|
161
|
+
begin
|
162
|
+
job_json = pending_job_descs.shift
|
163
|
+
job_desc = ::ActiveJob::Arguments.deserialize(JSON.parse(job_json))[0]&.symbolize_keys
|
171
164
|
|
172
|
-
|
173
|
-
rescue => ex
|
174
|
-
failed_to_add_jobs << job_json
|
175
|
-
add_exception = ex
|
176
|
-
end
|
177
|
-
end
|
165
|
+
wbid = job_desc[:pool_wrapper_batch]
|
178
166
|
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
r.hincrby(redis_key, "_active_count", -job_descs.count)
|
167
|
+
Batch.new(wbid).jobs do
|
168
|
+
ChainBuilder.enqueue_job(job_desc)
|
169
|
+
end
|
183
170
|
|
184
|
-
|
185
|
-
|
186
|
-
|
171
|
+
added_jobs[wbid] = job_json
|
172
|
+
rescue => ex
|
173
|
+
failed_to_add_jobs << job_json
|
174
|
+
add_exception = ex
|
187
175
|
end
|
176
|
+
end
|
188
177
|
|
189
|
-
|
190
|
-
#
|
191
|
-
#
|
192
|
-
|
193
|
-
Batch.logger.error {"Error popping jobs from Pool #{pid}: #{add_exception}"}
|
194
|
-
raise add_exception
|
195
|
-
end
|
178
|
+
redis.multi do |r|
|
179
|
+
r.mapped_hmset("#{redis_key}-active", added_jobs) if added_jobs.count > 0
|
180
|
+
# Release reserved slots now that we've added the jobs to `-active`
|
181
|
+
r.hincrby(redis_key, "_active_count", -job_descs.count)
|
196
182
|
|
197
|
-
|
183
|
+
r.expire(redis_key, Batch::BID_EXPIRE_TTL)
|
184
|
+
r.expire("#{redis_key}-active", Batch::BID_EXPIRE_TTL)
|
185
|
+
r.expire("#{redis_key}-jobs", Batch::BID_EXPIRE_TTL)
|
198
186
|
end
|
199
187
|
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
redis.multi do |r|
|
208
|
-
case order.to_sym
|
209
|
-
when :fifo, :lifo
|
210
|
-
r.rpush(jobs_key, job_json)
|
211
|
-
when :random
|
212
|
-
r.sadd(jobs_key, job_json)
|
213
|
-
when :priority
|
214
|
-
r.zadd(jobs_key, job_desc[:priority] || 0, job_json)
|
215
|
-
end
|
216
|
-
r.expire(redis_key, Batch::BID_EXPIRE_TTL)
|
217
|
-
r.expire(jobs_key, Batch::BID_EXPIRE_TTL)
|
218
|
-
end
|
188
|
+
# If this happens, we end up in a bad state (as we don't try to re-add items to the pool or refill_allotment again), but
|
189
|
+
# this should be a _really_ rare case that should only occur if we've lost connection to Redis or something, so we're
|
190
|
+
# operating on the assumption that if we get here, any recovery logic will fail too
|
191
|
+
if add_exception.present?
|
192
|
+
Batch.logger.error {"Error popping jobs from Pool #{pid}: #{add_exception}"}
|
193
|
+
raise add_exception
|
219
194
|
end
|
220
195
|
|
221
|
-
|
222
|
-
|
223
|
-
end
|
224
|
-
delegate :redis, to: :class
|
196
|
+
active_count + added_jobs.count
|
197
|
+
end
|
225
198
|
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
199
|
+
def push_job_to_pool(job_desc)
|
200
|
+
jobs_key = "#{redis_key}-jobs"
|
201
|
+
# This allows duplicate jobs when a Redis Set is used
|
202
|
+
job_desc[:_pool_random_key_] = SecureRandom.urlsafe_base64(10)
|
203
|
+
job_json = JSON.unparse(::ActiveJob::Arguments.serialize([job_desc]))
|
204
|
+
order = self.order
|
205
|
+
|
206
|
+
redis.multi do |r|
|
207
|
+
case order.to_sym
|
208
|
+
when :fifo, :lifo
|
209
|
+
r.rpush(jobs_key, job_json)
|
210
|
+
when :random
|
211
|
+
r.sadd(jobs_key, job_json)
|
212
|
+
when :priority
|
213
|
+
r.zadd(jobs_key, job_desc[:priority] || 0, job_json)
|
214
|
+
end
|
215
|
+
r.expire(redis_key, Batch::BID_EXPIRE_TTL)
|
216
|
+
r.expire(jobs_key, Batch::BID_EXPIRE_TTL)
|
230
217
|
end
|
218
|
+
end
|
231
219
|
|
232
|
-
|
220
|
+
def self.redis(&blk)
|
221
|
+
Batch.redis &blk
|
222
|
+
end
|
223
|
+
delegate :redis, to: :class
|
233
224
|
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
225
|
+
def flush_pending_attrs
|
226
|
+
super
|
227
|
+
redis.expire(redis_key, Batch::BID_EXPIRE_TTL)
|
228
|
+
redis.zadd("pools", created_at, pid)
|
229
|
+
end
|
230
|
+
|
231
|
+
private
|
232
|
+
|
233
|
+
def initialize_new(concurrency: nil, order: :fifo, clean_when_empty: true, on_failed_job: :wait, description: nil)
|
234
|
+
self.created_at = Time.now.utc.to_f
|
235
|
+
self.description = description
|
236
|
+
self.order = order
|
237
|
+
self.concurrency = concurrency
|
238
|
+
self.clean_when_empty = clean_when_empty
|
239
|
+
self.on_failed_job = on_failed_job
|
240
|
+
flush_pending_attrs
|
243
241
|
end
|
244
242
|
end
|
245
243
|
end
|
@@ -1,69 +1,67 @@
|
|
1
|
-
module CanvasSync
|
2
|
-
module
|
3
|
-
|
4
|
-
extend ActiveSupport::Concern
|
1
|
+
module CanvasSync::JobBatches
|
2
|
+
module RedisModel
|
3
|
+
extend ActiveSupport::Concern
|
5
4
|
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
end
|
15
|
-
persist_bid_attr('#{key}', value)
|
5
|
+
class_methods do
|
6
|
+
def redis_attr(key, type = :string, read_only: true)
|
7
|
+
class_eval <<-RUBY, __FILE__, __LINE__ + 1
|
8
|
+
def #{key}=(value)
|
9
|
+
raise "#{key} is read-only once the batch has been started" if #{read_only.to_s} && (@initialized || @existing)
|
10
|
+
@#{key} = value
|
11
|
+
if :#{type} == :json
|
12
|
+
value = JSON.unparse(value)
|
16
13
|
end
|
14
|
+
persist_bid_attr('#{key}', value)
|
15
|
+
end
|
17
16
|
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
end
|
33
|
-
@#{key} = value
|
17
|
+
def #{key}
|
18
|
+
return @#{key} if defined?(@#{key})
|
19
|
+
if (@initialized || @existing)
|
20
|
+
value = read_bid_attr('#{key}')
|
21
|
+
if :#{type} == :bool
|
22
|
+
value = value == 'true'
|
23
|
+
elsif :#{type} == :int
|
24
|
+
value = value.to_i
|
25
|
+
elsif :#{type} == :float
|
26
|
+
value = value.to_f
|
27
|
+
elsif :#{type} == :json
|
28
|
+
value = JSON.parse(value)
|
29
|
+
elsif :#{type} == :symbol
|
30
|
+
value = value&.to_sym
|
34
31
|
end
|
32
|
+
@#{key} = value
|
35
33
|
end
|
36
|
-
|
37
|
-
|
34
|
+
end
|
35
|
+
RUBY
|
38
36
|
end
|
37
|
+
end
|
39
38
|
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
end
|
39
|
+
def persist_bid_attr(attribute, value)
|
40
|
+
if @initialized || @existing
|
41
|
+
redis do |r|
|
42
|
+
r.multi do |r|
|
43
|
+
r.hset(redis_key, attribute, value.to_s)
|
44
|
+
r.expire(redis_key, Batch::BID_EXPIRE_TTL)
|
47
45
|
end
|
48
|
-
else
|
49
|
-
@pending_attrs ||= {}
|
50
|
-
@pending_attrs[attribute] = value.to_s
|
51
46
|
end
|
47
|
+
else
|
48
|
+
@pending_attrs ||= {}
|
49
|
+
@pending_attrs[attribute] = value.to_s
|
52
50
|
end
|
51
|
+
end
|
53
52
|
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
end
|
53
|
+
def read_bid_attr(attribute)
|
54
|
+
redis do |r|
|
55
|
+
r.hget(redis_key, attribute)
|
58
56
|
end
|
57
|
+
end
|
59
58
|
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
end
|
64
|
-
@initialized = true
|
65
|
-
@pending_attrs = {}
|
59
|
+
def flush_pending_attrs
|
60
|
+
redis do |r|
|
61
|
+
r.mapped_hmset(redis_key, @pending_attrs)
|
66
62
|
end
|
63
|
+
@initialized = true
|
64
|
+
@pending_attrs = {}
|
67
65
|
end
|
68
66
|
end
|
69
67
|
end
|