canvas_sync 0.18.12 → 0.19.0.beta2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,7 +3,7 @@ module CanvasSync
3
3
  class Pool
4
4
  include RedisModel
5
5
 
6
- HINCR_MAX = RedisScript.new(Pathname.new(__FILE__) + "../hincr_max.lua")
6
+ POOL_REFILL = RedisScript.new(Pathname.new(__FILE__) + "../pool_refill.lua")
7
7
 
8
8
  attr_reader :pid
9
9
  redis_attr :description
@@ -36,7 +36,7 @@ module CanvasSync
36
36
  add_jobs([job_desc])
37
37
  end
38
38
 
39
- def add_jobs(job_descs)
39
+ def add_jobs(job_descs, skip_refill: false)
40
40
  job_descs.each do |job_desc|
41
41
  wrapper = Batch.new
42
42
  wrapper.description = "Pool Job Wrapper (PID: #{pid})"
@@ -52,7 +52,7 @@ module CanvasSync
52
52
 
53
53
  push_job_to_pool(job_desc)
54
54
  end
55
- refill_allotment
55
+ refill_allotment unless skip_refill
56
56
  end
57
57
 
58
58
  def keep_open!
@@ -64,19 +64,13 @@ module CanvasSync
64
64
  let_close!
65
65
  end
66
66
  else
67
- redis.hset(redis_key, 'keep_open', true)
67
+ redis.hset(redis_key, 'keep_open', 'true')
68
68
  end
69
69
  end
70
70
 
71
71
  def let_close!
72
- _, active_count = redis.multi do |r|
73
- r.hset(redis_key, 'keep_open', false)
74
- r.hincrby(redis_key, "active_count", 0)
75
- end
76
-
77
- if active_count == 0 && pending_count == 0
78
- cleanup_redis if clean_when_empty
79
- end
72
+ redis.hset(redis_key, 'keep_open', 'false')
73
+ cleanup_if_empty
80
74
  end
81
75
 
82
76
  def cleanup_redis
@@ -90,8 +84,26 @@ module CanvasSync
90
84
  end
91
85
  end
92
86
 
93
- def active_count
94
- redis.hincrby(redis_key, "active_count", 0)
87
+ def cleanup_if_empty
88
+ self.order
89
+
90
+ activec, pactivec, pendingc, clean_when_empty, keep_open = redis.multi do |r|
91
+ r.scard("#{redis_key}-active")
92
+ r.hincrby(redis_key, "_active_count", 0)
93
+ pending_count(r)
94
+ r.hget(redis_key, 'clean_when_empty')
95
+ r.hget(redis_key, 'keep_open')
96
+ end
97
+
98
+ return if keep_open == 'true' || clean_when_empty == 'false'
99
+
100
+ if activec <= 0 && pactivec <= 0 && pendingc <= 0
101
+ cleanup_redis
102
+ end
103
+ end
104
+
105
+ def active_count(r = redis)
106
+ r.scard("#{redis_key}-active") + r.hincrby(redis_key, "_active_count", 0)
95
107
  end
96
108
 
97
109
  def pending_count(r = redis)
@@ -108,24 +120,9 @@ module CanvasSync
108
120
  end
109
121
 
110
122
  def job_checked_in(status, options)
111
- active_count, pending_count = redis do |r|
112
- return unless r.exists?(redis_key)
113
-
114
- # Make sure this is loaded outside of the pipeline
115
- self.order
116
-
117
- redis.multi do |r|
118
- r.hincrby(redis_key, "active_count", -1)
119
- self.pending_count(r)
120
- end
121
- end
122
-
123
- added_count = refill_allotment
124
- if active_count == 0 && added_count == 0 && pending_count == 0
125
- if clean_when_empty && redis.hget(redis_key, 'keep_open') != 'true'
126
- cleanup_redis
127
- end
128
- end
123
+ redis.srem("#{redis_key}-active", status.bid)
124
+ active_count = refill_allotment
125
+ cleanup_if_empty unless active_count > 0
129
126
  end
130
127
 
131
128
  def self.job_checked_in(status, options)
@@ -133,6 +130,14 @@ module CanvasSync
133
130
  from_pid(pid).job_checked_in(status, options)
134
131
  end
135
132
 
133
+ # Administrative/console method to cleanup expired pools from the WebUI
134
+ def self.cleanup_redis_index!
135
+ suffixes = ["", "-active", "-jobs"]
136
+ r.zrangebyscore("pools", "0", Batch::BID_EXPIRE_TTL.seconds.ago.to_i).each do |pid|
137
+ r.zrem("pools", pid) if Batch.cleanup_redis_index_for("POOLID-#{pid}", suffixes)
138
+ end
139
+ end
140
+
136
141
  protected
137
142
 
138
143
  def redis_key
@@ -140,31 +145,52 @@ module CanvasSync
140
145
  end
141
146
 
142
147
  def refill_allotment
143
- jobs_added = 0
144
- limit = concurrency.to_i
145
- redis do |r|
146
- current_count = 0
147
- while true
148
- current_count = HINCR_MAX.call(r, [redis_key], ["active_count", limit]).to_i
149
- if current_count < limit
150
- job_desc = pop_job_from_pool
151
- if job_desc.present?
152
- Batch.new(job_desc[:pool_wrapper_batch]).jobs do
153
- ChainBuilder.enqueue_job(job_desc)
154
- end
155
- jobs_added += 1
156
- else
157
- r.hincrby(redis_key, "active_count", -1)
158
- break
159
- end
160
- else
161
- break
148
+ active_count, job_descs = POOL_REFILL.call(redis, [redis_key, "#{redis_key}-jobs", "#{redis_key}-active"], [])
149
+ return active_count if active_count < 0
150
+
151
+ pending_job_descs = job_descs.dup
152
+
153
+ added_jobs = []
154
+ failed_to_add_jobs = []
155
+ add_exception = nil
156
+
157
+ while pending_job_descs.count > 0
158
+ begin
159
+ job_json = pending_job_descs.shift
160
+ job_desc = ::ActiveJob::Arguments.deserialize(JSON.parse(job_json))[0]&.symbolize_keys
161
+
162
+ wbid = job_desc[:pool_wrapper_batch]
163
+
164
+ Batch.new(wbid).jobs do
165
+ ChainBuilder.enqueue_job(job_desc)
162
166
  end
167
+
168
+ added_jobs << wbid
169
+ rescue => ex
170
+ failed_to_add_jobs << job_json
171
+ add_exception = ex
163
172
  end
173
+ end
174
+
175
+ redis.multi do |r|
176
+ r.sadd("#{redis_key}-active", added_jobs) if added_jobs.count > 0
177
+ # Release reserved slots now that we've added the jobs to `-active`
178
+ r.hincrby(redis_key, "_active_count", -job_descs.count)
179
+
164
180
  r.expire(redis_key, Batch::BID_EXPIRE_TTL)
181
+ r.expire("#{redis_key}-active", Batch::BID_EXPIRE_TTL)
165
182
  r.expire("#{redis_key}-jobs", Batch::BID_EXPIRE_TTL)
166
183
  end
167
- jobs_added
184
+
185
+ # If this happens, we end up in a bad state (as we don't try to re-add items to the pool or refill_allotment again), but
186
+ # this should be a _really_ rare case that should only occur if we've lost connection to Redis or something, so we're
187
+ # operating on the assumption that if we get here, any recovery logic will fail too
188
+ if add_exception.present?
189
+ Batch.logger.error {"Error popping jobs from Pool #{pid}: #{add_exception}"}
190
+ raise add_exception
191
+ end
192
+
193
+ active_count + added_jobs.count
168
194
  end
169
195
 
170
196
  def push_job_to_pool(job_desc)
@@ -183,13 +209,15 @@ module CanvasSync
183
209
  when :priority
184
210
  r.zadd(jobs_key, job_desc[:priority] || 0, job_json)
185
211
  end
212
+ r.expire(redis_key, Batch::BID_EXPIRE_TTL)
186
213
  r.expire(jobs_key, Batch::BID_EXPIRE_TTL)
187
214
  end
188
215
  end
189
216
 
217
+ # @deprecated
190
218
  def pop_job_from_pool
191
219
  jobs_key = "#{redis_key}-jobs"
192
- order = self.order
220
+ order = self.order || 'fifo'
193
221
 
194
222
  job_json = case order.to_sym
195
223
  when :fifo
@@ -199,7 +227,7 @@ module CanvasSync
199
227
  when :random
200
228
  redis.spop(jobs_key)
201
229
  when :priority
202
- redis.zpopmax(jobs_key)
230
+ redis.zpopmax(jobs_key)&.[](0)
203
231
  end
204
232
 
205
233
  return nil unless job_json.present?
@@ -214,6 +242,7 @@ module CanvasSync
214
242
 
215
243
  def flush_pending_attrs
216
244
  super
245
+ redis.expire(redis_key, Batch::BID_EXPIRE_TTL)
217
246
  redis.zadd("pools", created_at, pid)
218
247
  end
219
248
 
@@ -0,0 +1,40 @@
1
+
2
+ local poolkey = KEYS[1]
3
+ local qkey = KEYS[2]
4
+ local activekey = KEYS[3]
5
+
6
+ if redis.call('EXISTS', poolkey) == 0 then
7
+ return { -1, {} } -- pool doesn't exist
8
+ end
9
+
10
+ local pool_type = redis.call('HGET', poolkey, "order")
11
+ local allotment = tonumber(redis.call("HGET", poolkey, "concurrency"))
12
+ local active = redis.call("SCARD", activekey) + (redis.call("HGET", poolkey, "_active_count") or 0)
13
+
14
+ local pop_count = allotment - active
15
+
16
+ local popped_items = {}
17
+
18
+ if pop_count > 0 then
19
+ if pool_type == "fifo" then
20
+ popped_items = redis.call("LPOP", qkey, pop_count) or {}
21
+ elseif pool_type == "lifo" then
22
+ popped_items = redis.call("RPOP", qkey, pop_count) or {}
23
+ elseif pool_type == "random" then
24
+ popped_items = redis.call("SPOP", qkey, pop_count) or {}
25
+ elseif pool_type == "priority" then
26
+ local temp_items = redis.call("ZPOPMAX", qkey, pop_count) or {}
27
+ for i,v in ipairs(temp_items) do
28
+ if i % 2 == 1 then
29
+ table.insert(popped_items, v)
30
+ end
31
+ end
32
+ end
33
+ end
34
+
35
+ -- Reserve slots for these jobs while we return to Ruby and deserialize them
36
+ -- This could also be inlined by just storing a key in the queue and storing parameters
37
+ -- in a Hash, but this seems more efficient.
38
+ redis.call('HINCRBY', poolkey, "_active_count", #popped_items)
39
+
40
+ return { active, popped_items }
@@ -1,6 +1,7 @@
1
1
  <table class="table table-striped table-bordered table-hover">
2
2
  <thead>
3
3
  <tr>
4
+ <th><%= t('JID') %></th>
4
5
  <th><%= t('Job Class') %></th>
5
6
  <th><%= t('Parameters') %></th>
6
7
  </tr>
@@ -8,6 +9,7 @@
8
9
 
9
10
  <% @jobs.each do |job_desc| %>
10
11
  <tr>
12
+ <td><%= job_desc[:jid] %></td>
11
13
  <td><%= job_desc['job'] %></td>
12
14
  <td>
13
15
  <code class="code-wrap">
@@ -60,6 +60,9 @@
60
60
  <th><%= t('Job Class') %></th>
61
61
  <th><%= t('Parameters') %></th>
62
62
  <th><%= t('Wrapper Batch BID') %></th>
63
+ <% if @pool.order == 'priority' %>
64
+ <th><%= t('Priority') %></th>
65
+ <% end %>
63
66
  </tr>
64
67
  </thead>
65
68
 
@@ -68,10 +71,16 @@
68
71
  <td><%= job_desc['job'] %></td>
69
72
  <td>
70
73
  <code class="code-wrap">
71
- <div class="args-extended"><%= job_desc['parameters'].to_json %></div>
74
+ <div class="args-extended">
75
+ <%= job_desc['args']&.to_json %>
76
+ <%= job_desc['kwargs']&.to_json %>
77
+ </div>
72
78
  </code>
73
79
  </td>
74
80
  <td><a href="<%= root_path %>batches/<%= job_desc['pool_wrapper_batch'] %>"><%= job_desc['pool_wrapper_batch'] %></a></td>
81
+ <% if @pool.order == 'priority' %>
82
+ <td><%= job_desc['priority'] %></td>
83
+ <% end %>
75
84
  </tr>
76
85
  <% end %>
77
86
  </table>
@@ -45,7 +45,9 @@ module CanvasSync::JobBatches::Sidekiq
45
45
  @sub_batches = @sub_batches.map {|b, score| CanvasSync::JobBatches::Batch.new(b) }
46
46
 
47
47
  @current_jobs_page, @total_jobs_size, @jobs = page("BID-#{@batch.bid}-jids", params['job_page'], @count)
48
- @jobs = @jobs.map {|jid, score| jid }
48
+ @jobs = @jobs.map do |jid, score|
49
+ { jid: jid, }
50
+ end
49
51
 
50
52
  erb(get_template(:batch))
51
53
  end
@@ -7,9 +7,9 @@ module CanvasSync::Jobs
7
7
  response = canvas_sync_client.get(progress_url)
8
8
  status = kwargs[:status_key].present? ? response[kwargs[:status_key]] : response['workflow_state'] || response['status']
9
9
 
10
- if %w[completed complete].include? status
10
+ if %w[completed complete imported imported_with_messages].include? status
11
11
  InvokeCallbackWorker.perform_later(build_next_job(next_job, kwargs, response)) if next_job
12
- elsif %w[failed error].include? status
12
+ elsif %w[failed error failed_with_messages].include? status
13
13
  if kwargs[:on_failure].is_a?(Hash)
14
14
  InvokeCallbackWorker.perform_later(build_next_job(kwargs[:on_failure], kwargs, response))
15
15
  else
@@ -33,40 +33,8 @@ module CanvasSync::Jobs
33
33
 
34
34
  # This is a separate job so that, if it fails and a retry is triggered, it doesn't query the API needlessly
35
35
  class InvokeCallbackWorker < ActiveJob::Base
36
- # rubocop:disable Metrics/PerceivedComplexity
37
36
  def perform(job)
38
- job = job.symbolize_keys
39
-
40
- params = job[:args] || []
41
- params << job[:kwargs].symbolize_keys if job[:kwargs]
42
- # params[-1] = params[-1].symbolize_keys if params[-1].is_a?(Hash)
43
-
44
- if job[:model]
45
- model_class = load_constant(job[:model])
46
- find_by = job[:find_by]
47
- target = find_by.is_a?(Hash) ? model_class.find_by(find_by) : model_class.find_by(id: find_by)
48
- target.send(job[:method], *params)
49
- elsif job[:class]
50
- target = load_constant(job[:class])
51
- target.send(job[:method], *params)
52
- elsif job[:instance_of]
53
- target = load_constant(job[:instance_of]).new
54
- target.send(job[:method], *params)
55
- elsif job[:job]
56
- job_class = load_constant(job[:job])
57
- job_class = job_class.set(job[:options]) if job[:options].present?
58
- if job_class < ActiveJob::Base
59
- job_class.perform_later(*params)
60
- else
61
- job_class.perform_async(*params)
62
- end
63
- end
64
- end
65
- # rubocop:enable Metrics/PerceivedComplexity
66
-
67
- def load_constant(const)
68
- const = const.constantize if const.is_a?(String)
69
- const
37
+ CanvasSync::MiscHelper.invoke_task(job)
70
38
  end
71
39
  end
72
40
  end
@@ -11,5 +11,53 @@ module CanvasSync
11
11
  ActiveRecord::Type::Boolean.new.deserialize(v)
12
12
  end
13
13
  end
14
+
15
+ def self.invoke_task(job)
16
+ job = job.symbolize_keys
17
+
18
+ job_args = job[:args] || job[:parameters] || []
19
+ job_kwargs = job[:kwargs] || {}
20
+
21
+ if (mthd = job[:method]) && !(job[:class] || job[:instance_of] || job[:model])
22
+ if mthd.include?('#')
23
+ clazz, method = clazz.split("#")
24
+ job[:instance_of] = clazz
25
+ job[:method] = method
26
+ elsif mthd.include?('.')
27
+ clazz, method = mthd.split(".")
28
+ job[:class] = clazz
29
+ job[:method] = method
30
+ end
31
+ end
32
+
33
+ if job[:model]
34
+ # TODO Support globalid
35
+ model_class = load_constant(job[:model])
36
+ find_by = job[:find_by]
37
+ target = find_by.is_a?(Hash) ? model_class.find_by(find_by) : model_class.find_by(id: find_by)
38
+ target.send(job[:method], *job_args, **job_kwargs)
39
+ elsif job[:class]
40
+ target = load_constant(job[:class])
41
+ target.send(job[:method], *job_args, **job_kwargs)
42
+ elsif job[:instance_of]
43
+ target = load_constant(job[:instance_of]).new
44
+ target.send(job[:method], *job_args, **job_kwargs)
45
+ elsif job[:job]
46
+ job_class = load_constant(job[:job])
47
+ job_class = job_class.set(job[:options]) if job[:options].present?
48
+ if job_class < ActiveJob::Base
49
+ job_class.perform_later(*job_args, **job_kwargs)
50
+ else
51
+ job_args << job_kwargs.symbolize_keys if job_kwargs
52
+ # job_args[-1] = job_args[-1].symbolize_keys if job_args[-1].is_a?(Hash)
53
+ job_class.perform_async(*job_args)
54
+ end
55
+ end
56
+ end
57
+
58
+ def self.load_constant(const)
59
+ const = const.constantize if const.is_a?(String)
60
+ const
61
+ end
14
62
  end
15
63
  end
@@ -1,3 +1,3 @@
1
1
  module CanvasSync
2
- VERSION = "0.18.12".freeze
2
+ VERSION = "0.19.0.beta2".freeze
3
3
  end
data/lib/canvas_sync.rb CHANGED
@@ -193,8 +193,7 @@ module CanvasSync
193
193
 
194
194
  term_parent_chain = current_chain
195
195
 
196
- per_term_chain = JobBatches::ChainBuilder.new(model_job_map[:terms])
197
- per_term_chain.params[:term_scope] = term_scope
196
+ per_term_chain = JobBatches::ChainBuilder.build(model_job_map[:terms], term_scope: term_scope)
198
197
  current_chain = per_term_chain
199
198
 
200
199
  term_scoped_models.each do |mdl|
@@ -226,8 +225,6 @@ module CanvasSync
226
225
  globals: {},
227
226
  &blk
228
227
  )
229
- root_chain = JobBatches::ChainBuilder.new(CanvasSync::Jobs::BeginSyncChainJob)
230
-
231
228
  global_options = {
232
229
  legacy_support: legacy_support,
233
230
  updated_after: updated_after,
@@ -237,11 +234,7 @@ module CanvasSync
237
234
  global_options[:account_id] = account_id if account_id.present?
238
235
  global_options.merge!(globals) if globals
239
236
 
240
- root_chain.params[1] = global_options
241
-
242
- root_chain.apply_block(&blk)
243
-
244
- root_chain
237
+ JobBatches::ChainBuilder.build(CanvasSync::Jobs::BeginSyncChainJob, [], global_options, &blk)
245
238
  end
246
239
 
247
240
  def group_by_job_options(model_list, options_hash, only_split: nil, default_key: :provisioning)