canvas_sync 0.21.1 → 0.22.0.beta1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. checksums.yaml +4 -4
  2. data/lib/canvas_sync/concerns/auto_relations.rb +11 -0
  3. data/lib/canvas_sync/config.rb +3 -5
  4. data/lib/canvas_sync/generators/templates/models/rubric.rb +2 -1
  5. data/lib/canvas_sync/job_batches/batch.rb +432 -402
  6. data/lib/canvas_sync/job_batches/callback.rb +100 -114
  7. data/lib/canvas_sync/job_batches/chain_builder.rb +194 -196
  8. data/lib/canvas_sync/job_batches/{active_job.rb → compat/active_job.rb} +2 -2
  9. data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/helpers.rb +1 -1
  10. data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web.rb +3 -3
  11. data/lib/canvas_sync/job_batches/{sidekiq.rb → compat/sidekiq.rb} +35 -22
  12. data/lib/canvas_sync/job_batches/compat.rb +20 -0
  13. data/lib/canvas_sync/job_batches/context_hash.rb +124 -126
  14. data/lib/canvas_sync/job_batches/jobs/base_job.rb +2 -4
  15. data/lib/canvas_sync/job_batches/jobs/concurrent_batch_job.rb +14 -16
  16. data/lib/canvas_sync/job_batches/jobs/managed_batch_job.rb +125 -127
  17. data/lib/canvas_sync/job_batches/jobs/serial_batch_job.rb +14 -16
  18. data/lib/canvas_sync/job_batches/pool.rb +193 -195
  19. data/lib/canvas_sync/job_batches/redis_model.rb +50 -52
  20. data/lib/canvas_sync/job_batches/redis_script.rb +129 -131
  21. data/lib/canvas_sync/job_batches/status.rb +85 -87
  22. data/lib/canvas_sync/job_uniqueness/compat/active_job.rb +75 -0
  23. data/lib/canvas_sync/job_uniqueness/compat/sidekiq.rb +135 -0
  24. data/lib/canvas_sync/job_uniqueness/compat.rb +20 -0
  25. data/lib/canvas_sync/job_uniqueness/configuration.rb +25 -0
  26. data/lib/canvas_sync/job_uniqueness/job_uniqueness.rb +47 -0
  27. data/lib/canvas_sync/job_uniqueness/lock_context.rb +171 -0
  28. data/lib/canvas_sync/job_uniqueness/locksmith.rb +92 -0
  29. data/lib/canvas_sync/job_uniqueness/on_conflict/base.rb +32 -0
  30. data/lib/canvas_sync/job_uniqueness/on_conflict/log.rb +13 -0
  31. data/lib/canvas_sync/job_uniqueness/on_conflict/null_strategy.rb +9 -0
  32. data/lib/canvas_sync/job_uniqueness/on_conflict/raise.rb +11 -0
  33. data/lib/canvas_sync/job_uniqueness/on_conflict/reject.rb +21 -0
  34. data/lib/canvas_sync/job_uniqueness/on_conflict/reschedule.rb +20 -0
  35. data/lib/canvas_sync/job_uniqueness/on_conflict.rb +41 -0
  36. data/lib/canvas_sync/job_uniqueness/strategy/base.rb +104 -0
  37. data/lib/canvas_sync/job_uniqueness/strategy/until_and_while_executing.rb +35 -0
  38. data/lib/canvas_sync/job_uniqueness/strategy/until_executed.rb +20 -0
  39. data/lib/canvas_sync/job_uniqueness/strategy/until_executing.rb +20 -0
  40. data/lib/canvas_sync/job_uniqueness/strategy/until_expired.rb +16 -0
  41. data/lib/canvas_sync/job_uniqueness/strategy/while_executing.rb +26 -0
  42. data/lib/canvas_sync/job_uniqueness/strategy.rb +27 -0
  43. data/lib/canvas_sync/job_uniqueness/unique_job_common.rb +79 -0
  44. data/lib/canvas_sync/misc_helper.rb +1 -1
  45. data/lib/canvas_sync/version.rb +1 -1
  46. data/lib/canvas_sync.rb +4 -3
  47. data/spec/dummy/app/models/rubric.rb +2 -1
  48. data/spec/dummy/config/environments/test.rb +1 -1
  49. data/spec/job_batching/batch_spec.rb +49 -7
  50. data/spec/job_batching/{active_job_spec.rb → compat/active_job_spec.rb} +2 -2
  51. data/spec/job_batching/{sidekiq_spec.rb → compat/sidekiq_spec.rb} +14 -12
  52. data/spec/job_batching/flow_spec.rb +1 -1
  53. data/spec/job_batching/integration_helper.rb +1 -1
  54. data/spec/job_batching/status_spec.rb +2 -2
  55. data/spec/job_uniqueness/compat/active_job_spec.rb +49 -0
  56. data/spec/job_uniqueness/compat/sidekiq_spec.rb +68 -0
  57. data/spec/job_uniqueness/lock_context_spec.rb +95 -0
  58. data/spec/job_uniqueness/on_conflict/log_spec.rb +11 -0
  59. data/spec/job_uniqueness/on_conflict/raise_spec.rb +10 -0
  60. data/spec/job_uniqueness/on_conflict/reschedule_spec.rb +24 -0
  61. data/spec/job_uniqueness/on_conflict_spec.rb +16 -0
  62. data/spec/job_uniqueness/spec_helper.rb +14 -0
  63. data/spec/job_uniqueness/strategy/base_spec.rb +100 -0
  64. data/spec/job_uniqueness/strategy/until_and_while_executing_spec.rb +48 -0
  65. data/spec/job_uniqueness/strategy/until_executed_spec.rb +23 -0
  66. data/spec/job_uniqueness/strategy/until_executing_spec.rb +23 -0
  67. data/spec/job_uniqueness/strategy/until_expired_spec.rb +23 -0
  68. data/spec/job_uniqueness/strategy/while_executing_spec.rb +33 -0
  69. data/spec/job_uniqueness/support/lock_strategy.rb +28 -0
  70. data/spec/job_uniqueness/support/on_conflict.rb +24 -0
  71. data/spec/job_uniqueness/support/test_worker.rb +19 -0
  72. data/spec/job_uniqueness/unique_job_common_spec.rb +45 -0
  73. data/spec/spec_helper.rb +1 -1
  74. metadata +278 -204
  75. /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/batches_assets/css/styles.less +0 -0
  76. /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/batches_assets/js/batch_tree.js +0 -0
  77. /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/batches_assets/js/util.js +0 -0
  78. /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/_batch_tree.erb +0 -0
  79. /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/_batches_table.erb +0 -0
  80. /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/_common.erb +0 -0
  81. /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/_jobs_table.erb +0 -0
  82. /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/_pagination.erb +0 -0
  83. /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/batch.erb +0 -0
  84. /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/batches.erb +0 -0
  85. /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/pool.erb +0 -0
  86. /data/lib/canvas_sync/job_batches/{sidekiq → compat/sidekiq}/web/views/pools.erb +0 -0
@@ -1,245 +1,243 @@
1
- module CanvasSync
2
- module JobBatches
3
- class Pool
4
- include RedisModel
5
-
6
- POOL_REFILL = RedisScript.new(Pathname.new(__FILE__) + "../pool_refill.lua")
7
-
8
- attr_reader :pid
9
- redis_attr :description
10
- redis_attr :created_at
11
- redis_attr :concurrency, :int
12
- redis_attr :complete_count, :int
13
- redis_attr :order
14
- redis_attr :on_failed_job, :symbol
15
- redis_attr :clean_when_empty, :bool
16
-
17
- def initialize(pooolid = nil, **kwargs)
18
- if pooolid
19
- @existing = true
20
- @pid = pooolid
21
- else
22
- @pid = SecureRandom.urlsafe_base64(10)
23
- initialize_new(**kwargs)
24
- end
1
+ module CanvasSync::JobBatches
2
+ class Pool
3
+ include RedisModel
4
+
5
+ POOL_REFILL = RedisScript.new(Pathname.new(__FILE__) + "../pool_refill.lua")
6
+
7
+ attr_reader :pid
8
+ redis_attr :description
9
+ redis_attr :created_at
10
+ redis_attr :concurrency, :int
11
+ redis_attr :complete_count, :int
12
+ redis_attr :order
13
+ redis_attr :on_failed_job, :symbol
14
+ redis_attr :clean_when_empty, :bool
15
+
16
+ def initialize(pooolid = nil, **kwargs)
17
+ if pooolid
18
+ @existing = true
19
+ @pid = pooolid
20
+ else
21
+ @pid = SecureRandom.urlsafe_base64(10)
22
+ initialize_new(**kwargs)
25
23
  end
24
+ end
26
25
 
27
- def self.from_pid(pid)
28
- raise "PID must be given" unless pid.present?
29
- new(pid)
30
- end
26
+ def self.from_pid(pid)
27
+ raise "PID must be given" unless pid.present?
28
+ new(pid)
29
+ end
31
30
 
32
- def <<(job_desc)
33
- add_job(job_desc)
34
- end
31
+ def <<(job_desc)
32
+ add_job(job_desc)
33
+ end
35
34
 
36
- def add_job(job_desc)
37
- add_jobs([job_desc])
38
- end
35
+ def add_job(job_desc)
36
+ add_jobs([job_desc])
37
+ end
39
38
 
40
- def add_jobs(job_descs, skip_refill: false)
41
- job_descs.each do |job_desc|
42
- wrapper = Batch.new
43
- wrapper.description = "Pool Job Wrapper (PID: #{pid})"
44
- checkin_event = (on_failed_job == :wait) ? :success : :complete
45
- wrapper.on(checkin_event, "#{self.class.to_s}.job_checked_in", pool_id: pid)
46
- wrapper.jobs {}
47
-
48
- job_desc = job_desc.symbolize_keys
49
- job_desc = job_desc.merge!(
50
- job: job_desc[:job].to_s,
51
- pool_wrapper_batch: wrapper.bid,
52
- )
53
-
54
- push_job_to_pool(job_desc)
55
- end
56
- refill_allotment unless skip_refill
57
- end
39
+ def add_jobs(job_descs, skip_refill: false)
40
+ job_descs.each do |job_desc|
41
+ wrapper = Batch.new
42
+ wrapper.description = "Pool Job Wrapper (PID: #{pid})"
43
+ checkin_event = (on_failed_job == :wait) ? :success : :complete
44
+ wrapper.on(checkin_event, "#{self.class.to_s}.job_checked_in", pool_id: pid)
45
+ wrapper.jobs {}
58
46
 
59
- def keep_open!
60
- if block_given?
61
- begin
62
- keep_open!
63
- yield
64
- ensure
65
- let_close!
66
- end
67
- else
68
- redis.hset(redis_key, 'keep_open', 'true')
69
- end
70
- end
47
+ job_desc = job_desc.symbolize_keys
48
+ job_desc = job_desc.merge!(
49
+ job: job_desc[:job].to_s,
50
+ pool_wrapper_batch: wrapper.bid,
51
+ )
71
52
 
72
- def let_close!
73
- redis.hset(redis_key, 'keep_open', 'false')
74
- cleanup_if_empty
53
+ push_job_to_pool(job_desc)
75
54
  end
55
+ refill_allotment unless skip_refill
56
+ end
76
57
 
77
- def cleanup_redis
78
- Batch.logger.debug {"Cleaning redis of pool #{pid}"}
79
- redis do |r|
80
- r.zrem("pools", pid)
81
- r.unlink(
82
- "#{redis_key}",
83
- "#{redis_key}-jobs",
84
- )
58
+ def keep_open!
59
+ if block_given?
60
+ begin
61
+ keep_open!
62
+ yield
63
+ ensure
64
+ let_close!
85
65
  end
66
+ else
67
+ redis.hset(redis_key, 'keep_open', 'true')
86
68
  end
69
+ end
87
70
 
88
- def cleanup_if_empty
89
- self.order
71
+ def let_close!
72
+ redis.hset(redis_key, 'keep_open', 'false')
73
+ cleanup_if_empty
74
+ end
90
75
 
91
- activec, pactivec, pendingc, clean_when_empty, keep_open = redis.multi do |r|
92
- r.hlen("#{redis_key}-active")
93
- r.hget(redis_key, "_active_count")
94
- pending_count(r)
95
- r.hget(redis_key, 'clean_when_empty')
96
- r.hget(redis_key, 'keep_open')
97
- end
76
+ def cleanup_redis
77
+ Batch.logger.debug {"Cleaning redis of pool #{pid}"}
78
+ redis do |r|
79
+ r.zrem("pools", pid)
80
+ r.unlink(
81
+ "#{redis_key}",
82
+ "#{redis_key}-jobs",
83
+ )
84
+ end
85
+ end
98
86
 
99
- return if keep_open == 'true' || clean_when_empty == 'false'
87
+ def cleanup_if_empty
88
+ self.order
100
89
 
101
- if activec <= 0 && (pactivec.try(:to_i) || 0) <= 0 && pendingc <= 0
102
- cleanup_redis
103
- end
90
+ activec, pactivec, pendingc, clean_when_empty, keep_open = redis.multi do |r|
91
+ r.hlen("#{redis_key}-active")
92
+ r.hget(redis_key, "_active_count")
93
+ pending_count(r)
94
+ r.hget(redis_key, 'clean_when_empty')
95
+ r.hget(redis_key, 'keep_open')
104
96
  end
105
97
 
106
- def active_count(r = redis)
107
- r.hlen("#{redis_key}-active") + r.hincrby(redis_key, "_active_count", 0)
108
- end
98
+ return if keep_open == 'true' || clean_when_empty == 'false'
109
99
 
110
- def active_jobs(r = redis)
111
- r.hvals("#{redis_key}-active").map {|desc| JSON.parse(desc)[0] }
100
+ if activec <= 0 && (pactivec.try(:to_i) || 0) <= 0 && pendingc <= 0
101
+ cleanup_redis
112
102
  end
103
+ end
113
104
 
114
- def pending_count(r = redis)
115
- jobs_key = "#{redis_key}-jobs"
116
- order = self.order || 'fifo'
117
- case order.to_sym
118
- when :fifo, :lifo
119
- r.llen(jobs_key)
120
- when :random
121
- r.scard(jobs_key)
122
- when :priority
123
- r.zcard(jobs_key)
124
- end
125
- end
105
+ def active_count(r = redis)
106
+ r.hlen("#{redis_key}-active") + r.hincrby(redis_key, "_active_count", 0)
107
+ end
126
108
 
127
- def job_checked_in(status, options)
128
- active_count = refill_allotment(status.bid)
129
- cleanup_if_empty unless active_count > 0
130
- end
109
+ def active_jobs(r = redis)
110
+ r.hvals("#{redis_key}-active").map {|desc| JSON.parse(desc)[0] }
111
+ end
131
112
 
132
- def self.job_checked_in(status, options)
133
- pid = options['pool_id']
134
- from_pid(pid).job_checked_in(status, options)
113
+ def pending_count(r = redis)
114
+ jobs_key = "#{redis_key}-jobs"
115
+ order = self.order || 'fifo'
116
+ case order.to_sym
117
+ when :fifo, :lifo
118
+ r.llen(jobs_key)
119
+ when :random
120
+ r.scard(jobs_key)
121
+ when :priority
122
+ r.zcard(jobs_key)
135
123
  end
124
+ end
136
125
 
137
- # Administrative/console method to cleanup expired pools from the WebUI
138
- def self.cleanup_redis_index!
139
- suffixes = ["", "-active", "-jobs"]
140
- r.zrangebyscore("pools", "0", Batch::BID_EXPIRE_TTL.seconds.ago.to_i).each do |pid|
141
- r.zrem("pools", pid) if Batch.cleanup_redis_index_for("POOLID-#{pid}", suffixes)
142
- end
143
- end
126
+ def job_checked_in(status, options)
127
+ active_count = refill_allotment(status.bid)
128
+ cleanup_if_empty unless active_count > 0
129
+ end
144
130
 
145
- protected
131
+ def self.job_checked_in(status, options)
132
+ pid = options['pool_id']
133
+ from_pid(pid).job_checked_in(status, options)
134
+ end
146
135
 
147
- def redis_key
148
- "POOLID-#{pid}"
136
+ # Administrative/console method to cleanup expired pools from the WebUI
137
+ def self.cleanup_redis_index!
138
+ suffixes = ["", "-active", "-jobs"]
139
+ r.zrangebyscore("pools", "0", Batch::BID_EXPIRE_TTL.seconds.ago.to_i).each do |pid|
140
+ r.zrem("pools", pid) if Batch.cleanup_redis_index_for("POOLID-#{pid}", suffixes)
149
141
  end
142
+ end
150
143
 
151
- def refill_allotment(checkin_bid = nil)
152
- active_count, job_descs = POOL_REFILL.call(redis, [redis_key, "#{redis_key}-jobs", "#{redis_key}-active"], [checkin_bid || ""])
153
- return active_count if active_count < 0
144
+ protected
154
145
 
155
- pending_job_descs = job_descs.dup
146
+ def redis_key
147
+ "POOLID-#{pid}"
148
+ end
156
149
 
157
- added_jobs = {}
158
- failed_to_add_jobs = []
159
- add_exception = nil
150
+ def refill_allotment(checkin_bid = nil)
151
+ active_count, job_descs = POOL_REFILL.call(redis, [redis_key, "#{redis_key}-jobs", "#{redis_key}-active"], [checkin_bid || ""])
152
+ return active_count if active_count < 0
160
153
 
161
- while pending_job_descs.count > 0
162
- begin
163
- job_json = pending_job_descs.shift
164
- job_desc = ::ActiveJob::Arguments.deserialize(JSON.parse(job_json))[0]&.symbolize_keys
154
+ pending_job_descs = job_descs.dup
165
155
 
166
- wbid = job_desc[:pool_wrapper_batch]
156
+ added_jobs = {}
157
+ failed_to_add_jobs = []
158
+ add_exception = nil
167
159
 
168
- Batch.new(wbid).jobs do
169
- ChainBuilder.enqueue_job(job_desc)
170
- end
160
+ while pending_job_descs.count > 0
161
+ begin
162
+ job_json = pending_job_descs.shift
163
+ job_desc = ::ActiveJob::Arguments.deserialize(JSON.parse(job_json))[0]&.symbolize_keys
171
164
 
172
- added_jobs[wbid] = job_json
173
- rescue => ex
174
- failed_to_add_jobs << job_json
175
- add_exception = ex
176
- end
177
- end
165
+ wbid = job_desc[:pool_wrapper_batch]
178
166
 
179
- redis.multi do |r|
180
- r.mapped_hmset("#{redis_key}-active", added_jobs) if added_jobs.count > 0
181
- # Release reserved slots now that we've added the jobs to `-active`
182
- r.hincrby(redis_key, "_active_count", -job_descs.count)
167
+ Batch.new(wbid).jobs do
168
+ ChainBuilder.enqueue_job(job_desc)
169
+ end
183
170
 
184
- r.expire(redis_key, Batch::BID_EXPIRE_TTL)
185
- r.expire("#{redis_key}-active", Batch::BID_EXPIRE_TTL)
186
- r.expire("#{redis_key}-jobs", Batch::BID_EXPIRE_TTL)
171
+ added_jobs[wbid] = job_json
172
+ rescue => ex
173
+ failed_to_add_jobs << job_json
174
+ add_exception = ex
187
175
  end
176
+ end
188
177
 
189
- # If this happens, we end up in a bad state (as we don't try to re-add items to the pool or refill_allotment again), but
190
- # this should be a _really_ rare case that should only occur if we've lost connection to Redis or something, so we're
191
- # operating on the assumption that if we get here, any recovery logic will fail too
192
- if add_exception.present?
193
- Batch.logger.error {"Error popping jobs from Pool #{pid}: #{add_exception}"}
194
- raise add_exception
195
- end
178
+ redis.multi do |r|
179
+ r.mapped_hmset("#{redis_key}-active", added_jobs) if added_jobs.count > 0
180
+ # Release reserved slots now that we've added the jobs to `-active`
181
+ r.hincrby(redis_key, "_active_count", -job_descs.count)
196
182
 
197
- active_count + added_jobs.count
183
+ r.expire(redis_key, Batch::BID_EXPIRE_TTL)
184
+ r.expire("#{redis_key}-active", Batch::BID_EXPIRE_TTL)
185
+ r.expire("#{redis_key}-jobs", Batch::BID_EXPIRE_TTL)
198
186
  end
199
187
 
200
- def push_job_to_pool(job_desc)
201
- jobs_key = "#{redis_key}-jobs"
202
- # This allows duplicate jobs when a Redis Set is used
203
- job_desc[:_pool_random_key_] = SecureRandom.urlsafe_base64(10)
204
- job_json = JSON.unparse(::ActiveJob::Arguments.serialize([job_desc]))
205
- order = self.order
206
-
207
- redis.multi do |r|
208
- case order.to_sym
209
- when :fifo, :lifo
210
- r.rpush(jobs_key, job_json)
211
- when :random
212
- r.sadd(jobs_key, job_json)
213
- when :priority
214
- r.zadd(jobs_key, job_desc[:priority] || 0, job_json)
215
- end
216
- r.expire(redis_key, Batch::BID_EXPIRE_TTL)
217
- r.expire(jobs_key, Batch::BID_EXPIRE_TTL)
218
- end
188
+ # If this happens, we end up in a bad state (as we don't try to re-add items to the pool or refill_allotment again), but
189
+ # this should be a _really_ rare case that should only occur if we've lost connection to Redis or something, so we're
190
+ # operating on the assumption that if we get here, any recovery logic will fail too
191
+ if add_exception.present?
192
+ Batch.logger.error {"Error popping jobs from Pool #{pid}: #{add_exception}"}
193
+ raise add_exception
219
194
  end
220
195
 
221
- def self.redis(&blk)
222
- Batch.redis &blk
223
- end
224
- delegate :redis, to: :class
196
+ active_count + added_jobs.count
197
+ end
225
198
 
226
- def flush_pending_attrs
227
- super
228
- redis.expire(redis_key, Batch::BID_EXPIRE_TTL)
229
- redis.zadd("pools", created_at, pid)
199
+ def push_job_to_pool(job_desc)
200
+ jobs_key = "#{redis_key}-jobs"
201
+ # This allows duplicate jobs when a Redis Set is used
202
+ job_desc[:_pool_random_key_] = SecureRandom.urlsafe_base64(10)
203
+ job_json = JSON.unparse(::ActiveJob::Arguments.serialize([job_desc]))
204
+ order = self.order
205
+
206
+ redis.multi do |r|
207
+ case order.to_sym
208
+ when :fifo, :lifo
209
+ r.rpush(jobs_key, job_json)
210
+ when :random
211
+ r.sadd(jobs_key, job_json)
212
+ when :priority
213
+ r.zadd(jobs_key, job_desc[:priority] || 0, job_json)
214
+ end
215
+ r.expire(redis_key, Batch::BID_EXPIRE_TTL)
216
+ r.expire(jobs_key, Batch::BID_EXPIRE_TTL)
230
217
  end
218
+ end
231
219
 
232
- private
220
+ def self.redis(&blk)
221
+ Batch.redis &blk
222
+ end
223
+ delegate :redis, to: :class
233
224
 
234
- def initialize_new(concurrency: nil, order: :fifo, clean_when_empty: true, on_failed_job: :wait, description: nil)
235
- self.created_at = Time.now.utc.to_f
236
- self.description = description
237
- self.order = order
238
- self.concurrency = concurrency
239
- self.clean_when_empty = clean_when_empty
240
- self.on_failed_job = on_failed_job
241
- flush_pending_attrs
242
- end
225
+ def flush_pending_attrs
226
+ super
227
+ redis.expire(redis_key, Batch::BID_EXPIRE_TTL)
228
+ redis.zadd("pools", created_at, pid)
229
+ end
230
+
231
+ private
232
+
233
+ def initialize_new(concurrency: nil, order: :fifo, clean_when_empty: true, on_failed_job: :wait, description: nil)
234
+ self.created_at = Time.now.utc.to_f
235
+ self.description = description
236
+ self.order = order
237
+ self.concurrency = concurrency
238
+ self.clean_when_empty = clean_when_empty
239
+ self.on_failed_job = on_failed_job
240
+ flush_pending_attrs
243
241
  end
244
242
  end
245
243
  end
@@ -1,69 +1,67 @@
1
- module CanvasSync
2
- module JobBatches
3
- module RedisModel
4
- extend ActiveSupport::Concern
1
+ module CanvasSync::JobBatches
2
+ module RedisModel
3
+ extend ActiveSupport::Concern
5
4
 
6
- class_methods do
7
- def redis_attr(key, type = :string, read_only: true)
8
- class_eval <<-RUBY, __FILE__, __LINE__ + 1
9
- def #{key}=(value)
10
- raise "#{key} is read-only once the batch has been started" if #{read_only.to_s} && (@initialized || @existing)
11
- @#{key} = value
12
- if :#{type} == :json
13
- value = JSON.unparse(value)
14
- end
15
- persist_bid_attr('#{key}', value)
5
+ class_methods do
6
+ def redis_attr(key, type = :string, read_only: true)
7
+ class_eval <<-RUBY, __FILE__, __LINE__ + 1
8
+ def #{key}=(value)
9
+ raise "#{key} is read-only once the batch has been started" if #{read_only.to_s} && (@initialized || @existing)
10
+ @#{key} = value
11
+ if :#{type} == :json
12
+ value = JSON.unparse(value)
16
13
  end
14
+ persist_bid_attr('#{key}', value)
15
+ end
17
16
 
18
- def #{key}
19
- return @#{key} if defined?(@#{key})
20
- if (@initialized || @existing)
21
- value = read_bid_attr('#{key}')
22
- if :#{type} == :bool
23
- value = value == 'true'
24
- elsif :#{type} == :int
25
- value = value.to_i
26
- elsif :#{type} == :float
27
- value = value.to_f
28
- elsif :#{type} == :json
29
- value = JSON.parse(value)
30
- elsif :#{type} == :symbol
31
- value = value&.to_sym
32
- end
33
- @#{key} = value
17
+ def #{key}
18
+ return @#{key} if defined?(@#{key})
19
+ if (@initialized || @existing)
20
+ value = read_bid_attr('#{key}')
21
+ if :#{type} == :bool
22
+ value = value == 'true'
23
+ elsif :#{type} == :int
24
+ value = value.to_i
25
+ elsif :#{type} == :float
26
+ value = value.to_f
27
+ elsif :#{type} == :json
28
+ value = JSON.parse(value)
29
+ elsif :#{type} == :symbol
30
+ value = value&.to_sym
34
31
  end
32
+ @#{key} = value
35
33
  end
36
- RUBY
37
- end
34
+ end
35
+ RUBY
38
36
  end
37
+ end
39
38
 
40
- def persist_bid_attr(attribute, value)
41
- if @initialized || @existing
42
- redis do |r|
43
- r.multi do |r|
44
- r.hset(redis_key, attribute, value.to_s)
45
- r.expire(redis_key, Batch::BID_EXPIRE_TTL)
46
- end
39
+ def persist_bid_attr(attribute, value)
40
+ if @initialized || @existing
41
+ redis do |r|
42
+ r.multi do |r|
43
+ r.hset(redis_key, attribute, value.to_s)
44
+ r.expire(redis_key, Batch::BID_EXPIRE_TTL)
47
45
  end
48
- else
49
- @pending_attrs ||= {}
50
- @pending_attrs[attribute] = value.to_s
51
46
  end
47
+ else
48
+ @pending_attrs ||= {}
49
+ @pending_attrs[attribute] = value.to_s
52
50
  end
51
+ end
53
52
 
54
- def read_bid_attr(attribute)
55
- redis do |r|
56
- r.hget(redis_key, attribute)
57
- end
53
+ def read_bid_attr(attribute)
54
+ redis do |r|
55
+ r.hget(redis_key, attribute)
58
56
  end
57
+ end
59
58
 
60
- def flush_pending_attrs
61
- redis do |r|
62
- r.mapped_hmset(redis_key, @pending_attrs)
63
- end
64
- @initialized = true
65
- @pending_attrs = {}
59
+ def flush_pending_attrs
60
+ redis do |r|
61
+ r.mapped_hmset(redis_key, @pending_attrs)
66
62
  end
63
+ @initialized = true
64
+ @pending_attrs = {}
67
65
  end
68
66
  end
69
67
  end