canvas_sync 0.18.12 → 0.19.0.beta2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +12 -9
- data/lib/canvas_sync/concerns/ability_helper.rb +19 -11
- data/lib/canvas_sync/concerns/account/base.rb +15 -0
- data/lib/canvas_sync/job_batches/batch.rb +46 -2
- data/lib/canvas_sync/job_batches/callback.rb +7 -4
- data/lib/canvas_sync/job_batches/chain_builder.rb +38 -74
- data/lib/canvas_sync/job_batches/jobs/concurrent_batch_job.rb +5 -5
- data/lib/canvas_sync/job_batches/jobs/managed_batch_job.rb +54 -22
- data/lib/canvas_sync/job_batches/jobs/serial_batch_job.rb +5 -5
- data/lib/canvas_sync/job_batches/pool.rb +83 -54
- data/lib/canvas_sync/job_batches/pool_refill.lua +40 -0
- data/lib/canvas_sync/job_batches/sidekiq/web/views/_jobs_table.erb +2 -0
- data/lib/canvas_sync/job_batches/sidekiq/web/views/pool.erb +10 -1
- data/lib/canvas_sync/job_batches/sidekiq/web.rb +3 -1
- data/lib/canvas_sync/jobs/canvas_process_waiter.rb +3 -35
- data/lib/canvas_sync/misc_helper.rb +48 -0
- data/lib/canvas_sync/version.rb +1 -1
- data/lib/canvas_sync.rb +2 -9
- data/spec/canvas_sync/canvas_sync_spec.rb +201 -115
- data/spec/canvas_sync/jobs/canvas_process_waiter_spec.rb +0 -48
- data/spec/canvas_sync/misc_helper_spec.rb +58 -0
- data/spec/dummy/log/test.log +69092 -0
- data/spec/job_batching/pool_spec.rb +161 -0
- data/spec/job_batching/support/base_job.rb +1 -1
- metadata +10 -5
- data/lib/canvas_sync/job_batches/hincr_max.lua +0 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: bc839ab64ec8076616ca77afa2d66f727ed48e0a1ee6ffd10b8a843f32444c03
|
4
|
+
data.tar.gz: 2f2706695832f1c0ca6a11708695cba06019947109d3a4cb4e2af4dc4748b488
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ba8d9abc64bd0df6fa2eb59c018ce265f85625fbbe5b83a82b53def9c0e731a6863a425e52bfac0c36dd435d91d1ec067a4a25ed199dd2302261f2a185fb1e14
|
7
|
+
data.tar.gz: 16935770f2023e2e6892a5f3fe1fd05473855a9fb45d459d87b382f618500200fdaf9578457904388c0e074a7ab26acd977f0c7f6ea8a518f74b6d78d810aa37
|
data/README.md
CHANGED
@@ -128,13 +128,12 @@ chain = CanvasSync.default_provisioning_report_chain(
|
|
128
128
|
)
|
129
129
|
|
130
130
|
# Add a custom job to the end of the chain.
|
131
|
-
chain << { job: CanvasSyncCompleteWorker,
|
132
|
-
chain << { job: CanvasSyncCompleteWorker, options: { job_id: job.id } } # If an options key is provided, it will be automatically appended to the end of the :parameters array
|
131
|
+
chain << { job: CanvasSyncCompleteWorker, args: [job.id], kwargs: { job_id: job.id } }
|
133
132
|
|
134
133
|
chain.process!
|
135
134
|
|
136
135
|
# The chain object provides a fairly extensive API:
|
137
|
-
chain.insert({ job: SomeOtherJob }) # Adds the job to the end of the chain
|
136
|
+
chain.insert({ job: SomeOtherJob, args: [], kwargs: {} }) # Adds the job to the end of the chain
|
138
137
|
chain.insert_at(0, { job: SomeOtherJob }) # Adds the job to the beginning of the chain
|
139
138
|
chain.insert({ job: SomeOtherJob }, after: 'CanvasSync::Jobs::SyncTermsJob') # Adds the job right after the SyncTermsJob
|
140
139
|
chain.insert({ job: SomeOtherJob }, before: 'CanvasSync::Jobs::SyncTermsJob') # Adds the job right before the SyncTermsJob
|
@@ -163,7 +162,7 @@ end
|
|
163
162
|
|
164
163
|
You must implement a job that will enqueue a report starter for your report. (TODO: would be nice to make some sort of builder for this, so you just define the report and its params and then the gem runs it in a pre-defined job.)
|
165
164
|
|
166
|
-
Let's say we have a custom Canvas report called "my_really_cool_report_csv". First, we would need to create a job class that will enqueue a report starter.
|
165
|
+
Let's say we have a custom Canvas report called "my_really_cool_report_csv". First, we would need to create a job class that will enqueue a report starter.
|
167
166
|
|
168
167
|
```ruby
|
169
168
|
class MyReallyCoolReportJob < CanvasSync::Jobs::ReportStarter
|
@@ -309,7 +308,8 @@ pool_id = pool.pid
|
|
309
308
|
# Add a job to the pool
|
310
309
|
pool << {
|
311
310
|
job: SomeJob, # The Class of a ActiveJob Job or Sidekiq Worker
|
312
|
-
|
311
|
+
args: [1, 2, 3], # Array of params to pass th e Job
|
312
|
+
kwargs: {},
|
313
313
|
priority: 100, # Only effective if order=:priority, higher is higher
|
314
314
|
}
|
315
315
|
|
@@ -317,7 +317,8 @@ pool << {
|
|
317
317
|
pool.add_jobs([
|
318
318
|
{
|
319
319
|
job: SomeJob, # The Class of a ActiveJob Job or Sidekiq Worker
|
320
|
-
|
320
|
+
args: [1, 2, 3], # Array of params to pass th e Job
|
321
|
+
kwargs: {},
|
321
322
|
priority: 100, # Only effective if order=:priority, higher is higher
|
322
323
|
},
|
323
324
|
# ...
|
@@ -367,7 +368,7 @@ If you want your own jobs to also log to the table all you have to do is have yo
|
|
367
368
|
|
368
369
|
If you want to be able to utilize the `CanvasSync::JobLog` without `ActiveJob` (so you can get access to `Sidekiq` features that `ActiveJob` doesn't support), then add the following to an initializer in your Rails app:
|
369
370
|
|
370
|
-
```
|
371
|
+
```ruby
|
371
372
|
Sidekiq.configure_server do |config|
|
372
373
|
config.server_middleware do |chain|
|
373
374
|
chain.add CanvasSync::Sidekiq::Middleware
|
@@ -397,7 +398,7 @@ chain.process!
|
|
397
398
|
|
398
399
|
You can configure CanvasSync settings by doing the following:
|
399
400
|
|
400
|
-
```
|
401
|
+
```ruby
|
401
402
|
CanvasSync.configure do |config|
|
402
403
|
config.classes_to_only_log_errors_on << "ClassToOnlyLogErrorsOn"
|
403
404
|
end
|
@@ -412,7 +413,7 @@ You can pass in global_options to a job chain. Global options are added to the
|
|
412
413
|
various internal processes.
|
413
414
|
|
414
415
|
Pass global options into a job chain, using the options param nested in a :global key.
|
415
|
-
options: { global: {...} }
|
416
|
+
`options: { global: {...} }`
|
416
417
|
|
417
418
|
report_timeout (integer): Number of days until a Canvas report should timeout. Default is 1.
|
418
419
|
report_compilation_timeout (integer): Number of days until a Canvas report should timeout. Default is 1 hour.
|
@@ -421,12 +422,14 @@ report_max_tries (integer): The number of times to attempt a report be
|
|
421
422
|
if it has an 'error' status in Canvas or is deleted.
|
422
423
|
|
423
424
|
This is an example job chain with global options:
|
425
|
+
```ruby
|
424
426
|
job_chain = CanvasSync.default_provisioning_report_chain(
|
425
427
|
MODELS_TO_SYNC,
|
426
428
|
term_scope: :active,
|
427
429
|
full_sync_every: 'sunday',
|
428
430
|
options: { global: { report_timeout: 2 } }
|
429
431
|
)
|
432
|
+
```
|
430
433
|
|
431
434
|
## Handling Job errors
|
432
435
|
|
@@ -35,6 +35,23 @@ module CanvasSync::Concerns
|
|
35
35
|
labels.is_a?(String) ? labels.split(',') : []
|
36
36
|
end
|
37
37
|
|
38
|
+
def canvas_account_role_labels(account = 'self')
|
39
|
+
account = 'self' if account.to_s == "root"
|
40
|
+
account = account.canvas_id if account.respond_to?(:canvas_id)
|
41
|
+
|
42
|
+
if "::Admin".safe_constantize && ::Admin < ::ActiveRecord::Base
|
43
|
+
account = current_organization.canvas_account_id if account == 'self'
|
44
|
+
adm_query = ::Admin.where(canvas_account_id: account, workflow_state: "active")
|
45
|
+
adm_query.pluck(:role_name)
|
46
|
+
else
|
47
|
+
Rails.cache.fetch([self.class.name, "AccountAdminLinks", account, canvas_user_id], expires_in: 1.hour) do
|
48
|
+
admin_entries = canvas_sync_client.account_admins(account, user_id: [canvas_user_id])
|
49
|
+
admin_entries = admin_entries.select{|ent| ent[:workflow_state] == 'active' }
|
50
|
+
admin_entries.map{|ent| ent[:role] }
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
38
55
|
def lti_roles
|
39
56
|
@lti_roles ||= RoleStore.new(launch_params["https://purl.imsglobal.org/spec/lti/claim/roles"] || launch_params['ext_roles'] || '')
|
40
57
|
end
|
@@ -148,16 +165,7 @@ module CanvasSync::Concerns
|
|
148
165
|
end
|
149
166
|
|
150
167
|
def canvas_root_account_roles
|
151
|
-
role_labels =
|
152
|
-
adm_query = ::Admin.where(canvas_account_id: current_organization.canvas_account_id, workflow_state: "active")
|
153
|
-
adm_query.pluck(:role_name)
|
154
|
-
else
|
155
|
-
Rails.cache.fetch([self.class.name, "RootAccountAdminLinks", canvas_user_id], expires_in: 1.hour) do
|
156
|
-
admin_entries = canvas_sync_client.account_admins('self', user_id: [canvas_user_id])
|
157
|
-
admin_entries = admin_entries.select{|ent| ent[:workflow_state] == 'active' }
|
158
|
-
admin_entries.map{|ent| ent[:role] }
|
159
|
-
end
|
160
|
-
end
|
168
|
+
role_labels = canvas_account_role_labels('self')
|
161
169
|
::Role.for_labels(role_labels, ::Account.find_by(canvas_parent_account_id: nil))
|
162
170
|
end
|
163
171
|
|
@@ -171,7 +179,7 @@ module CanvasSync::Concerns
|
|
171
179
|
|
172
180
|
def canvas_super_user?
|
173
181
|
cache_on_session(:canvas_super_user?) do
|
174
|
-
canvas_site_admin? ||
|
182
|
+
canvas_site_admin? || canvas_account_role_labels(:root).include?("Account Admin")
|
175
183
|
end
|
176
184
|
end
|
177
185
|
|
@@ -0,0 +1,15 @@
|
|
1
|
+
module CanvasSync::Concerns
|
2
|
+
module Account
|
3
|
+
module Base
|
4
|
+
extend ActiveSupport::Concern
|
5
|
+
|
6
|
+
CanvasSync::Record.define_feature self, default: true
|
7
|
+
|
8
|
+
class_methods do
|
9
|
+
def root_account
|
10
|
+
where(canvas_parent_account_id: nil).last
|
11
|
+
end
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
@@ -28,6 +28,7 @@ module CanvasSync
|
|
28
28
|
delegate :redis, to: :class
|
29
29
|
|
30
30
|
BID_EXPIRE_TTL = 90.days.to_i
|
31
|
+
INDEX_ALL_BATCHES = false
|
31
32
|
SCHEDULE_CALLBACK = RedisScript.new(Pathname.new(__FILE__) + "../schedule_callback.lua")
|
32
33
|
BID_HIERARCHY = RedisScript.new(Pathname.new(__FILE__) + "../hier_batch_ids.lua")
|
33
34
|
|
@@ -202,7 +203,7 @@ module CanvasSync
|
|
202
203
|
|
203
204
|
def flush_pending_attrs
|
204
205
|
super
|
205
|
-
redis.zadd("batches", created_at, bid)
|
206
|
+
redis.zadd("batches", created_at, bid) if INDEX_ALL_BATCHES
|
206
207
|
end
|
207
208
|
|
208
209
|
private
|
@@ -323,8 +324,13 @@ module CanvasSync
|
|
323
324
|
end
|
324
325
|
|
325
326
|
queue ||= "default"
|
326
|
-
parent_bid = !parent_bid || parent_bid.empty? ? nil : parent_bid
|
327
|
+
parent_bid = !parent_bid || parent_bid.empty? ? nil : parent_bid # Basically parent_bid.blank?
|
328
|
+
|
329
|
+
# Internal callback params. If this is present, we're trying to enqueue callbacks for a callback, which is a special case that
|
330
|
+
# indicates that the callback completed and we need to close the triggering batch (which is in a done-but-not-cleaned state)
|
327
331
|
callback_params = JSON.parse(callback_params) if callback_params.present?
|
332
|
+
|
333
|
+
# User-configured parameters/arguments to pass to the callback
|
328
334
|
callback_args = callbacks.reduce([]) do |memo, jcb|
|
329
335
|
cb = JSON.load(jcb)
|
330
336
|
memo << [cb['callback'], event.to_s, cb['opts'], bid, parent_bid]
|
@@ -341,6 +347,8 @@ module CanvasSync
|
|
341
347
|
if should_schedule_batch
|
342
348
|
logger.debug {"Enqueue callback bid: #{bid} event: #{event} args: #{callback_args.inspect}"}
|
343
349
|
|
350
|
+
# Create a new Batch to handle the callbacks and add it to the _parent_ batch
|
351
|
+
# (this ensures that the parent's lifecycle status can't change until the child's callbacks are done)
|
344
352
|
with_batch(parent_bid) do
|
345
353
|
cb_batch = self.new
|
346
354
|
cb_batch.callback_params = {
|
@@ -357,9 +365,12 @@ module CanvasSync
|
|
357
365
|
end
|
358
366
|
|
359
367
|
if callback_params.present?
|
368
|
+
# This is a callback for a callback. Passing `origin` to the Finalizer allows it to also cleanup the original/callback-triggering batch
|
360
369
|
opts['origin'] = callback_params
|
361
370
|
end
|
362
371
|
|
372
|
+
# The Finalizer marks this batch as complete, bumps any necessary counters, cleans up this Batch _if_ no callbacks were scheduled,
|
373
|
+
# and enqueues parent-Batch callbacks if needed.
|
363
374
|
logger.debug {"Run batch finalizer bid: #{bid} event: #{event} args: #{callback_args.inspect}"}
|
364
375
|
finalizer = Batch::Callback::Finalize.new
|
365
376
|
status = Status.new bid
|
@@ -398,6 +409,39 @@ module CanvasSync
|
|
398
409
|
cleanup_redis(bid)
|
399
410
|
end
|
400
411
|
|
412
|
+
# Internal method to cleanup a Redis Hash and related keys
|
413
|
+
def cleanup_redis_index_for(key, suffixes = [""])
|
414
|
+
if r.hget(k, "created_at").present?
|
415
|
+
r.multi do |r|
|
416
|
+
suffixes.each do |suffix|
|
417
|
+
r.expire(key + suffix, BID_EXPIRE_TTL)
|
418
|
+
end
|
419
|
+
end
|
420
|
+
false
|
421
|
+
else
|
422
|
+
r.multi do |r|
|
423
|
+
suffixes.each do |suffix|
|
424
|
+
r.unlink(key + suffix)
|
425
|
+
end
|
426
|
+
end
|
427
|
+
true
|
428
|
+
end
|
429
|
+
end
|
430
|
+
|
431
|
+
# Administrative/console method to cleanup expired batches from the WebUI
|
432
|
+
def cleanup_redis_index!
|
433
|
+
suffixes = ["", "-callbacks-complete", "-callbacks-success", "-failed", "-dead", "-batches-success", "-batches-complete", "-batches-failed", "-bids", "-jids", "-pending_callbacks"]
|
434
|
+
|
435
|
+
cleanup_index = ->(index) {
|
436
|
+
r.zrangebyscore(index, "0", BID_EXPIRE_TTL.seconds.ago.to_i).each do |bid|
|
437
|
+
r.zrem(index, bid) if cleanup_redis_index_for("BID-#{bid}", suffixes)
|
438
|
+
end
|
439
|
+
}
|
440
|
+
|
441
|
+
cleanup_index.call("BID-ROOT-bids")
|
442
|
+
cleanup_index.call("batches")
|
443
|
+
end
|
444
|
+
|
401
445
|
def redis(&blk)
|
402
446
|
return RedisProxy.new unless block_given?
|
403
447
|
|
@@ -112,10 +112,13 @@ module CanvasSync
|
|
112
112
|
end
|
113
113
|
|
114
114
|
if parent_bid && !(pending.to_i.zero? && children == success)
|
115
|
-
#
|
116
|
-
# if the parent is complete we trigger
|
117
|
-
#
|
118
|
-
#
|
115
|
+
# If batch was not successfull check and see if its parent is complete
|
116
|
+
# if the parent is complete we trigger its complete callback.
|
117
|
+
#
|
118
|
+
# Otherwise, we don't want to to trigger the parent's :complete here (and
|
119
|
+
# instead opt to have success tigger parent :complete) - this
|
120
|
+
# allows the success callback to add additional jobs to the parent batch
|
121
|
+
# before triggering :complete.
|
119
122
|
|
120
123
|
Batch.logger.debug {"Finalize parent complete bid: #{parent_bid}"}
|
121
124
|
_, _, complete, pending, children, failure = Batch.redis do |r|
|
@@ -8,12 +8,13 @@ module CanvasSync
|
|
8
8
|
def initialize(base_type = SerialBatchJob)
|
9
9
|
if base_type.is_a?(Hash)
|
10
10
|
@base_job = base_type
|
11
|
+
@base_job[:args] ||= @base_job[:parameters] || []
|
12
|
+
@base_job[:kwargs] ||= {}
|
11
13
|
else
|
12
|
-
@base_job =
|
13
|
-
job: base_type,
|
14
|
-
parameters: [],
|
15
|
-
}
|
14
|
+
@base_job = build_job_hash(base_type)
|
16
15
|
end
|
16
|
+
|
17
|
+
self.class.get_chain_parameter(base_job)
|
17
18
|
end
|
18
19
|
|
19
20
|
def process!
|
@@ -25,23 +26,25 @@ module CanvasSync
|
|
25
26
|
if key.is_a?(Class)
|
26
27
|
get_sub_chain(key)
|
27
28
|
else
|
29
|
+
# Legacy Support
|
30
|
+
key = :args if key == :parameters
|
31
|
+
|
28
32
|
@base_job[key]
|
29
33
|
end
|
30
34
|
end
|
31
35
|
|
32
|
-
def
|
33
|
-
|
34
|
-
end
|
36
|
+
def args; return self[:args]; end
|
37
|
+
def kwargs; return self[:kwargs]; end
|
35
38
|
|
36
39
|
def <<(new_job)
|
37
40
|
insert_at(-1, new_job)
|
38
41
|
end
|
39
42
|
|
40
|
-
def insert_at(position, new_jobs, *args, &blk)
|
43
|
+
def insert_at(position, new_jobs, *args, **kwargs, &blk)
|
41
44
|
chain = self.class.get_chain_parameter(base_job)
|
42
45
|
if new_jobs.is_a?(Class) || new_jobs.is_a?(String)
|
43
|
-
new_jobs = build_job_hash(new_jobs,
|
44
|
-
elsif args.
|
46
|
+
new_jobs = build_job_hash(new_jobs, args: args, kwargs: kwargs, &blk)
|
47
|
+
elsif args.count > 0 || kwargs.count > 0
|
45
48
|
raise "Unexpected number of arguments"
|
46
49
|
end
|
47
50
|
new_jobs = [new_jobs] unless new_jobs.is_a?(Array)
|
@@ -51,8 +54,7 @@ module CanvasSync
|
|
51
54
|
def insert(new_jobs, *args, **kwargs, &blk)
|
52
55
|
if new_jobs.is_a?(Class) || new_jobs.is_a?(String)
|
53
56
|
job_kwargs = kwargs.except(*VALID_PLACEMENT_PARAMETERS)
|
54
|
-
args
|
55
|
-
new_jobs = build_job_hash(new_jobs, *args, &blk)
|
57
|
+
new_jobs = build_job_hash(new_jobs, args: args, kwargs: job_kwargs, &blk)
|
56
58
|
kwargs = kwargs.slice(*VALID_PLACEMENT_PARAMETERS)
|
57
59
|
else
|
58
60
|
invalid_params = kwargs.keys - VALID_PLACEMENT_PARAMETERS
|
@@ -124,14 +126,6 @@ module CanvasSync
|
|
124
126
|
end
|
125
127
|
end
|
126
128
|
|
127
|
-
# Legacy Support
|
128
|
-
def merge_options(job, options)
|
129
|
-
find_matching_jobs(job).each do |j, parent, index|
|
130
|
-
j[:options] ||= {}
|
131
|
-
j[:options].deep_merge!(options)
|
132
|
-
end
|
133
|
-
end
|
134
|
-
|
135
129
|
def apply_block(&blk)
|
136
130
|
return unless blk.present?
|
137
131
|
instance_exec(&blk)
|
@@ -139,10 +133,11 @@ module CanvasSync
|
|
139
133
|
|
140
134
|
private
|
141
135
|
|
142
|
-
def build_job_hash(job,
|
136
|
+
def build_job_hash(job, args: [], kwargs: {}, &blk)
|
143
137
|
hsh = {
|
144
138
|
job: job,
|
145
|
-
|
139
|
+
args: args,
|
140
|
+
kwargs: kwargs,
|
146
141
|
}
|
147
142
|
self.class.new(hsh).apply_block(&blk) if blk.present?
|
148
143
|
hsh
|
@@ -180,9 +175,17 @@ module CanvasSync
|
|
180
175
|
end
|
181
176
|
|
182
177
|
class << self
|
183
|
-
|
178
|
+
# Support builder syntaxt/DSL
|
179
|
+
# Chain.build(ConcurrentBatchJob) do
|
180
|
+
# insert(SomeJob, arg1, kwarg: 1)
|
181
|
+
# insert(SerialBatchJob) do
|
182
|
+
# insert(SomeJob, arg1, kwarg: 1)
|
183
|
+
# end
|
184
|
+
# end
|
185
|
+
def build(job, *args, **kwargs, &blk)
|
184
186
|
new(job).tap do |ch|
|
185
|
-
ch[:
|
187
|
+
ch.base_job[:args] = args
|
188
|
+
ch.base_job[:kwargs] = kwargs
|
186
189
|
ch.apply_block(&blk)
|
187
190
|
end
|
188
191
|
end
|
@@ -205,8 +208,11 @@ module CanvasSync
|
|
205
208
|
end
|
206
209
|
|
207
210
|
key = _job_type_definitions[job_def[:job].to_s][:chain_parameter]
|
208
|
-
|
209
|
-
|
211
|
+
if key.is_a?(Numeric)
|
212
|
+
job_def[:args][key] ||= []
|
213
|
+
else
|
214
|
+
job_def[:kwargs][key] ||= []
|
215
|
+
end
|
210
216
|
end
|
211
217
|
|
212
218
|
# TODO: Add a Chain progress web View
|
@@ -219,18 +225,19 @@ module CanvasSync
|
|
219
225
|
# > [DONE] Use a Lua script to find child batch IDs. Support max_depth, items_per_depth, top_depth_slice parameters
|
220
226
|
def enqueue_job(job_def)
|
221
227
|
job_class = job_def[:job].constantize
|
222
|
-
|
228
|
+
job_args = job_def[:args] || job_def[:parameters] || []
|
229
|
+
job_kwargs = job_def[:kwargs] || {}
|
223
230
|
|
224
231
|
# Legacy Support
|
225
232
|
if job_def[:options]
|
226
|
-
|
227
|
-
|
233
|
+
job_args << {} unless job_args[-1].is_a?(Hash)
|
234
|
+
job_args[-1].merge!(job_def[:options])
|
228
235
|
end
|
229
236
|
|
230
237
|
if job_class.respond_to? :perform_async
|
231
|
-
job_class.perform_async(*
|
238
|
+
job_class.perform_async(*job_args, **job_kwargs)
|
232
239
|
else
|
233
|
-
job_class.perform_later(*
|
240
|
+
job_class.perform_later(*job_args, **job_kwargs)
|
234
241
|
end
|
235
242
|
end
|
236
243
|
end
|
@@ -238,48 +245,5 @@ module CanvasSync
|
|
238
245
|
|
239
246
|
ChainBuilder.register_chain_job(ConcurrentBatchJob, 0)
|
240
247
|
ChainBuilder.register_chain_job(SerialBatchJob, 0)
|
241
|
-
|
242
|
-
class ParamsMapper
|
243
|
-
def initialize(backend)
|
244
|
-
@backend = backend
|
245
|
-
end
|
246
|
-
|
247
|
-
def [](key)
|
248
|
-
get_parameter(key)
|
249
|
-
end
|
250
|
-
|
251
|
-
def []=(key, value)
|
252
|
-
set_parameter(key, value)
|
253
|
-
end
|
254
|
-
|
255
|
-
def to_a
|
256
|
-
@backend
|
257
|
-
end
|
258
|
-
|
259
|
-
private
|
260
|
-
|
261
|
-
def get_parameter(key)
|
262
|
-
if key.is_a?(Numeric)
|
263
|
-
@backend[key]
|
264
|
-
else
|
265
|
-
kwargs = @backend.last
|
266
|
-
return nil unless kwargs.is_a?(Hash)
|
267
|
-
kwargs[key]
|
268
|
-
end
|
269
|
-
end
|
270
|
-
|
271
|
-
def set_parameter(key, value)
|
272
|
-
if key.is_a?(Numeric)
|
273
|
-
@backend[key] = value
|
274
|
-
else
|
275
|
-
kwargs = @backend.last
|
276
|
-
unless kwargs.is_a?(Hash)
|
277
|
-
kwargs = {}
|
278
|
-
@backend.push(kwargs)
|
279
|
-
end
|
280
|
-
kwargs[key] = value
|
281
|
-
end
|
282
|
-
end
|
283
|
-
end
|
284
248
|
end
|
285
249
|
end
|
@@ -3,19 +3,19 @@ require_relative './base_job'
|
|
3
3
|
module CanvasSync
|
4
4
|
module JobBatches
|
5
5
|
class ConcurrentBatchJob < BaseJob
|
6
|
-
def self.make_batch(sub_jobs,
|
6
|
+
def self.make_batch(sub_jobs, **kwargs, &blk)
|
7
7
|
ManagedBatchJob.make_batch(
|
8
8
|
sub_jobs,
|
9
|
+
**kwargs,
|
9
10
|
ordered: false,
|
10
11
|
concurrency: true,
|
11
|
-
|
12
|
-
desc_prefix: 'ConcurrentBatchJob',
|
12
|
+
desc_prefix: 'ConcurrentBatchJob: ',
|
13
13
|
&blk
|
14
14
|
)
|
15
15
|
end
|
16
16
|
|
17
|
-
def perform(sub_jobs,
|
18
|
-
self.class.make_batch(sub_jobs,
|
17
|
+
def perform(sub_jobs, **kwargs)
|
18
|
+
self.class.make_batch(sub_jobs, **kwargs)
|
19
19
|
end
|
20
20
|
end
|
21
21
|
end
|
@@ -3,7 +3,7 @@ require_relative './base_job'
|
|
3
3
|
module CanvasSync
|
4
4
|
module JobBatches
|
5
5
|
class ManagedBatchJob < BaseJob
|
6
|
-
def self.make_batch(sub_jobs, ordered: true, concurrency: nil, context: nil, desc_prefix: nil, &blk)
|
6
|
+
def self.make_batch(sub_jobs, ordered: true, concurrency: nil, context: nil, preflight_check: nil, desc_prefix: nil, &blk)
|
7
7
|
desc_prefix ||= ''
|
8
8
|
|
9
9
|
if concurrency == 0 || concurrency == nil || concurrency == true
|
@@ -13,6 +13,7 @@ module CanvasSync
|
|
13
13
|
end
|
14
14
|
|
15
15
|
root_batch = Batch.new
|
16
|
+
man_batch_id = nil
|
16
17
|
|
17
18
|
if concurrency < sub_jobs.count
|
18
19
|
man_batch_id = SecureRandom.urlsafe_base64(10)
|
@@ -22,6 +23,7 @@ module CanvasSync
|
|
22
23
|
r.hset("MNGBID-#{man_batch_id}", "root_bid", root_batch.bid)
|
23
24
|
r.hset("MNGBID-#{man_batch_id}", "ordered", ordered ? 1 : 0)
|
24
25
|
r.hset("MNGBID-#{man_batch_id}", "concurrency", concurrency)
|
26
|
+
r.hset("MNGBID-#{man_batch_id}", "preflight_check", preflight_check) if preflight_check.present?
|
25
27
|
r.expire("MNGBID-#{man_batch_id}", Batch::BID_EXPIRE_TTL)
|
26
28
|
|
27
29
|
mapped_sub_jobs = sub_jobs.each_with_index.map do |j, i|
|
@@ -48,12 +50,14 @@ module CanvasSync
|
|
48
50
|
|
49
51
|
blk.call(ManagedBatchProxy.new(root_batch)) if blk.present?
|
50
52
|
|
51
|
-
root_batch.description = "#{desc_prefix}
|
53
|
+
root_batch.description = "#{desc_prefix}#{root_batch.description || 'Root'}"
|
54
|
+
|
55
|
+
root_batch.context["managed_batch_bid"] = man_batch_id if man_batch_id
|
52
56
|
|
53
57
|
if concurrency < sub_jobs.count
|
54
58
|
root_batch.jobs {}
|
55
59
|
concurrency.times do
|
56
|
-
perform_next_sequence_job(man_batch_id)
|
60
|
+
perform_next_sequence_job(man_batch_id, skip_preflight: true)
|
57
61
|
end
|
58
62
|
else
|
59
63
|
root_batch.jobs do
|
@@ -62,10 +66,12 @@ module CanvasSync
|
|
62
66
|
end
|
63
67
|
end
|
64
68
|
end
|
69
|
+
|
70
|
+
root_batch
|
65
71
|
end
|
66
72
|
|
67
|
-
def perform(sub_jobs,
|
68
|
-
self.class.make_batch(sub_jobs,
|
73
|
+
def perform(sub_jobs, **kwargs)
|
74
|
+
self.class.make_batch(sub_jobs, **kwargs)
|
69
75
|
end
|
70
76
|
|
71
77
|
def self.cleanup_redis(status, options)
|
@@ -85,37 +91,63 @@ module CanvasSync
|
|
85
91
|
|
86
92
|
protected
|
87
93
|
|
88
|
-
def self.perform_next_sequence_job(man_batch_id)
|
89
|
-
root_bid, ordered = Batch.redis do |r|
|
94
|
+
def self.perform_next_sequence_job(man_batch_id, skip_preflight: false)
|
95
|
+
root_bid, ordered, preflight_check = Batch.redis do |r|
|
90
96
|
r.multi do |r|
|
91
97
|
r.hget("MNGBID-#{man_batch_id}", "root_bid")
|
92
98
|
r.hget("MNGBID-#{man_batch_id}", "ordered")
|
99
|
+
r.hget("MNGBID-#{man_batch_id}", "preflight_check")
|
93
100
|
end
|
94
101
|
end
|
95
102
|
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
r.lpop("MNGBID-#{man_batch_id}-jobs")
|
103
|
+
if !skip_preflight && preflight_check.present?
|
104
|
+
if preflight_check.include?(".")
|
105
|
+
clazz, method_name = preflight_check.split('.')
|
106
|
+
clazz = clazz.constantize
|
101
107
|
else
|
102
|
-
|
108
|
+
clazz = Object
|
109
|
+
method_name = preflight_check
|
103
110
|
end
|
111
|
+
preflight_check = ->(*args) { clazz.send(method_name, *args) }
|
112
|
+
else
|
113
|
+
preflight_check = ->(*args) { true }
|
104
114
|
end
|
105
115
|
|
106
|
-
|
116
|
+
ordered = CanvasSync::MiscHelper.to_boolean(ordered)
|
117
|
+
|
118
|
+
loop do
|
119
|
+
next_job_json = Batch.redis do |r|
|
120
|
+
if ordered
|
121
|
+
r.lpop("MNGBID-#{man_batch_id}-jobs")
|
122
|
+
else
|
123
|
+
r.spop("MNGBID-#{man_batch_id}-jobs")
|
124
|
+
end
|
125
|
+
end
|
107
126
|
|
108
|
-
|
109
|
-
next_job = ::ActiveJob::Arguments.deserialize(next_job)[0]
|
127
|
+
break unless next_job_json.present?
|
110
128
|
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
129
|
+
next_job = JSON.parse(next_job_json)
|
130
|
+
next_job = ::ActiveJob::Arguments.deserialize(next_job)[0]
|
131
|
+
|
132
|
+
preflight_result = preflight_check.call(next_job)
|
133
|
+
if preflight_result == :abort
|
134
|
+
cleanup_redis(nil, { "managed_batch_id" => man_batch_id })
|
135
|
+
break
|
136
|
+
elsif !preflight_check
|
137
|
+
next
|
138
|
+
end
|
139
|
+
|
140
|
+
Batch.new(root_bid).jobs do
|
141
|
+
Batch.new.tap do |batch|
|
142
|
+
batch.description = "Managed Batch Fiber (#{man_batch_id})"
|
143
|
+
batch.on(:success, "#{self.to_s}.job_succeeded_callback", managed_batch_id: man_batch_id)
|
144
|
+
batch.jobs do
|
145
|
+
ChainBuilder.enqueue_job(next_job)
|
146
|
+
end
|
117
147
|
end
|
118
148
|
end
|
149
|
+
|
150
|
+
break
|
119
151
|
end
|
120
152
|
end
|
121
153
|
|
@@ -3,19 +3,19 @@ require_relative './base_job'
|
|
3
3
|
module CanvasSync
|
4
4
|
module JobBatches
|
5
5
|
class SerialBatchJob < BaseJob
|
6
|
-
def self.make_batch(sub_jobs,
|
6
|
+
def self.make_batch(sub_jobs, **kwargs, &blk)
|
7
7
|
ManagedBatchJob.make_batch(
|
8
8
|
sub_jobs,
|
9
|
+
**kwargs,
|
9
10
|
ordered: true,
|
10
11
|
concurrency: false,
|
11
|
-
|
12
|
-
desc_prefix: 'SerialBatchJob',
|
12
|
+
desc_prefix: 'SerialBatchJob: ',
|
13
13
|
&blk
|
14
14
|
)
|
15
15
|
end
|
16
16
|
|
17
|
-
def perform(sub_jobs,
|
18
|
-
self.class.make_batch(sub_jobs,
|
17
|
+
def perform(sub_jobs, **kwargs)
|
18
|
+
self.class.make_batch(sub_jobs, **kwargs)
|
19
19
|
end
|
20
20
|
end
|
21
21
|
end
|