canvas_sync 0.17.2 → 0.17.3.beta1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,7 +17,7 @@ module CanvasSync
17
17
  def local_bid
18
18
  bid = @bid_stack[-1]
19
19
  while bid.present?
20
- bhash = reolve_hash(bid)
20
+ bhash = resolve_hash(bid)
21
21
  return bid if bhash
22
22
  bid = get_parent_bid(bid)
23
23
  end
@@ -49,7 +49,7 @@ module CanvasSync
49
49
  def [](key)
50
50
  bid = @bid_stack[-1]
51
51
  while bid.present?
52
- bhash = reolve_hash(bid)
52
+ bhash = resolve_hash(bid)
53
53
  return bhash[key] if bhash&.key?(key)
54
54
  bid = get_parent_bid(bid)
55
55
  end
@@ -94,7 +94,7 @@ module CanvasSync
94
94
  private
95
95
 
96
96
  def get_parent_hash(bid)
97
- reolve_hash(get_parent_bid(bid)).freeze
97
+ resolve_hash(get_parent_bid(bid)).freeze
98
98
  end
99
99
 
100
100
  def get_parent_bid(bid)
@@ -105,13 +105,15 @@ module CanvasSync
105
105
  if index >= 0
106
106
  @bid_stack[index]
107
107
  else
108
- pbid = Batch.redis { |r| r.hget("BID-#{bid}", "parent_bid") }
108
+ pbid = Batch.redis do |r|
109
+ r.hget("BID-#{bid}", "parent_bid") || r.hget("BID-#{bid}", "callback_for")
110
+ end
109
111
  @bid_stack.unshift(pbid)
110
112
  pbid
111
113
  end
112
114
  end
113
115
 
114
- def reolve_hash(bid)
116
+ def resolve_hash(bid)
115
117
  return nil unless bid.present?
116
118
  return @hash_map[bid] if @hash_map.key?(bid)
117
119
 
@@ -137,6 +139,7 @@ module CanvasSync
137
139
  end
138
140
 
139
141
  def load_all
142
+ resolve_hash(@bid_stack[0]).freeze
140
143
  while @bid_stack[0].present?
141
144
  get_parent_hash(@bid_stack[0])
142
145
  end
@@ -0,0 +1,5 @@
1
+ local r=redis.call('HGET', KEYS[1], ARGV[1])
2
+ if r == false or r < ARGV[2] then
3
+ redis.call('HINCRBY', KEYS[1], ARGV[1], 1)
4
+ end
5
+ return r or 0
@@ -0,0 +1,99 @@
1
+ require_relative './base_job'
2
+
3
+ module CanvasSync
4
+ module JobBatches
5
+ class ManagedBatchJob < BaseJob
6
+ def perform(sub_jobs, context: nil, ordered: true, concurrency: nil)
7
+ man_batch_id = SecureRandom.urlsafe_base64(10)
8
+
9
+ if concurrency == 0 || concurrency == nil || concurrency == true
10
+ concurrency = sub_jobs.count
11
+ elsif concurrency == false
12
+ concurrency = 1
13
+ end
14
+
15
+ root_batch = Batch.new
16
+
17
+ Batch.redis do |r|
18
+ r.multi do
19
+ r.hset("MNGBID-#{man_batch_id}", "root_bid", root_batch.bid)
20
+ r.hset("MNGBID-#{man_batch_id}", "ordered", ordered)
21
+ r.hset("MNGBID-#{man_batch_id}", "concurrency", concurrency)
22
+ r.expire("MNGBID-#{man_batch_id}", Batch::BID_EXPIRE_TTL)
23
+
24
+ mapped_sub_jobs = sub_jobs.each_with_index.map do |j, i|
25
+ j['_mngbid_index_'] = i # This allows duplicate jobs when a Redis Set is used
26
+ j = ActiveJob::Arguments.serialize([j])
27
+ JSON.unparse(j)
28
+ end
29
+ if ordered
30
+ r.rpush("MNGBID-#{man_batch_id}-jobs", mapped_sub_jobs)
31
+ else
32
+ r.sadd("MNGBID-#{man_batch_id}-jobs", mapped_sub_jobs)
33
+ end
34
+ r.expire("MNGBID-#{man_batch_id}-jobs", Batch::BID_EXPIRE_TTL)
35
+ end
36
+ end
37
+
38
+ root_batch.description = "Managed Batch Root (#{man_batch_id})"
39
+ root_batch.allow_context_changes = (concurrency == 1)
40
+ root_batch.context = context
41
+ root_batch.on(:success, "#{self.class.to_s}.cleanup_redis", managed_batch_id: man_batch_id)
42
+ root_batch.jobs {}
43
+
44
+ concurrency.times do
45
+ self.class.perform_next_sequence_job(man_batch_id)
46
+ end
47
+ end
48
+
49
+ def self.cleanup_redis(status, options)
50
+ man_batch_id = options['managed_batch_id']
51
+ Batch.redis do |r|
52
+ r.del(
53
+ "MNGBID-#{man_batch_id}",
54
+ "MNGBID-#{man_batch_id}-jobs",
55
+ )
56
+ end
57
+ end
58
+
59
+ def self.job_succeeded_callback(status, options)
60
+ man_batch_id = options['managed_batch_id']
61
+ perform_next_sequence_job(man_batch_id)
62
+ end
63
+
64
+ protected
65
+
66
+ def self.perform_next_sequence_job(man_batch_id)
67
+ root_bid, ordered = Batch.redis do |r|
68
+ r.multi do
69
+ r.hget("MNGBID-#{man_batch_id}", "root_bid")
70
+ r.hget("MNGBID-#{man_batch_id}", "ordered")
71
+ end
72
+ end
73
+
74
+ next_job_json = Batch.redis do |r|
75
+ if ordered
76
+ r.lpop("MNGBID-#{man_batch_id}-jobs")
77
+ else
78
+ r.spop("MNGBID-#{man_batch_id}-jobs")
79
+ end
80
+ end
81
+
82
+ return unless next_job_json.present?
83
+
84
+ next_job = JSON.parse(next_job_json)
85
+ next_job = ActiveJob::Arguments.deserialize(next_job)[0]
86
+
87
+ Batch.new(root_bid).jobs do
88
+ Batch.new.tap do |batch|
89
+ batch.description = "Managed Batch Fiber (#{man_batch_id})"
90
+ batch.on(:success, "#{self.to_s}.job_succeeded_callback", managed_batch_id: man_batch_id)
91
+ batch.jobs do
92
+ ChainBuilder.enqueue_job(next_job)
93
+ end
94
+ end
95
+ end
96
+ end
97
+ end
98
+ end
99
+ end
@@ -4,71 +4,12 @@ module CanvasSync
4
4
  module JobBatches
5
5
  class SerialBatchJob < BaseJob
6
6
  def perform(sub_jobs, context: nil)
7
- serial_id = SecureRandom.urlsafe_base64(10)
8
-
9
- root_batch = Batch.new
10
-
11
- Batch.redis do |r|
12
- r.multi do
13
- mapped_sub_jobs = sub_jobs.map do |j|
14
- j = ActiveJob::Arguments.serialize([j])
15
- JSON.unparse(j)
16
- end
17
- r.hset("SERBID-#{serial_id}", "root_bid", root_batch.bid)
18
- r.expire("SERBID-#{serial_id}", Batch::BID_EXPIRE_TTL)
19
- r.rpush("SERBID-#{serial_id}-jobs", mapped_sub_jobs)
20
- r.expire("SERBID-#{serial_id}-jobs", Batch::BID_EXPIRE_TTL)
21
- end
22
- end
23
-
24
- root_batch.description = "Serial Batch Root (#{serial_id})"
25
- root_batch.allow_context_changes = true
26
- root_batch.context = context
27
- root_batch.on(:success, "#{self.class.to_s}.cleanup_redis", serial_batch_id: serial_id)
28
- root_batch.jobs {}
29
-
30
- self.class.perform_next_sequence_job(serial_id)
31
- end
32
-
33
- def self.cleanup_redis(status, options)
34
- serial_id = options['serial_batch_id']
35
- Batch.redis do |r|
36
- r.del(
37
- "SERBID-#{serial_id}",
38
- "SERBID-#{serial_id}-jobs",
39
- )
40
- end
41
- end
42
-
43
- def self.job_succeeded_callback(status, options)
44
- serial_id = options['serial_batch_id']
45
- perform_next_sequence_job(serial_id)
46
- end
47
-
48
- protected
49
-
50
- def self.perform_next_sequence_job(serial_id)
51
- root_bid, next_job_json = Batch.redis do |r|
52
- r.multi do
53
- r.hget("SERBID-#{serial_id}", "root_bid")
54
- r.lpop("SERBID-#{serial_id}-jobs")
55
- end
56
- end
57
-
58
- return unless next_job_json.present?
59
-
60
- next_job = JSON.parse(next_job_json)
61
- next_job = ActiveJob::Arguments.deserialize(next_job)[0]
62
-
63
- Batch.new(root_bid).jobs do
64
- Batch.new.tap do |batch|
65
- batch.description = "Serial Batch Fiber (#{serial_id})"
66
- batch.on(:success, "#{self.to_s}.job_succeeded_callback", serial_batch_id: serial_id)
67
- batch.jobs do
68
- ChainBuilder.enqueue_job(next_job)
69
- end
70
- end
71
- end
7
+ ManagedBatchJob.new.perform(
8
+ sub_jobs,
9
+ context: context,
10
+ ordered: true,
11
+ concurrency: false,
12
+ )
72
13
  end
73
14
  end
74
15
  end
@@ -0,0 +1,209 @@
1
+ module CanvasSync
2
+ module JobBatches
3
+ class Pool
4
+ include RedisModel
5
+
6
+ HINCR_MAX = RedisScript.new(Pathname.new(__FILE__) + "../hincr_max.lua")
7
+
8
+ attr_reader :pid
9
+ redis_attr :description
10
+ redis_attr :created_at
11
+ redis_attr :concurrency, :int
12
+ redis_attr :order
13
+ redis_attr :on_failed_job
14
+ redis_attr :clean_when_empty, :bool
15
+
16
+ def initialize(pooolid = nil, **kwargs)
17
+ if pooolid
18
+ @existing = true
19
+ @pid = pooolid
20
+ else
21
+ @pid = SecureRandom.urlsafe_base64(10)
22
+ initialize_new(**kwargs)
23
+ end
24
+ end
25
+
26
+ def self.from_pid(pid)
27
+ new(pid)
28
+ end
29
+
30
+ def <<(job_desc)
31
+ add_job(job_desc)
32
+ end
33
+
34
+ def add_job(job_desc)
35
+ add_jobs([job_desc])
36
+ end
37
+
38
+ def add_jobs(job_descs)
39
+ job_descs.each do |job_desc|
40
+ wrapper = Batch.new
41
+ wrapper.description = "Pool Job Wrapper"
42
+ wrapper.on(:complete, "#{self.class.to_s}.job_completed_callback", pool_id: pid)
43
+ wrapper.on(:success, "#{self.class.to_s}.job_success_callback", pool_id: pid)
44
+ wrapper.jobs {}
45
+
46
+ job_desc = job_desc.with_indifferent_access
47
+ job_desc = job_desc.merge!(
48
+ job: job_desc[:job].to_s,
49
+ pool_wrapper_batch: wrapper.bid,
50
+ )
51
+
52
+ push_job_to_pool(job_desc)
53
+ end
54
+ refill_allotment
55
+ end
56
+
57
+ def cleanup_redis
58
+ Batch.logger.debug {"Cleaning redis of pool #{pid}"}
59
+ redis do |r|
60
+ r.del(
61
+ "#{redis_key}",
62
+ "#{redis_key}-jobs",
63
+ )
64
+ end
65
+ end
66
+
67
+ def handle_event(event, options)
68
+ queue_next = false
69
+ if event == :success && on_failed_job.to_sym == :wait # We trigger on the complete event, unless on_failed_job is :wait
70
+ queue_next = true
71
+ elsif event == :complete
72
+ if on_failed_job.to_sym != :wait
73
+ queue_next = true
74
+ end
75
+ end
76
+
77
+ if queue_next
78
+ active_count = redis do |r|
79
+ r.hincrby(redis_key, "active_count", -1)
80
+ end
81
+ added_count = refill_allotment
82
+
83
+ if active_count == 0 && added_count == 0
84
+ cleanup_redis if clean_when_empty
85
+ end
86
+ end
87
+ end
88
+
89
+ def self.job_completed_callback(status, options)
90
+ pid = options['pool_id']
91
+ from_pid(pid).handle_event(:complete, options)
92
+ end
93
+
94
+ def self.job_success_callback(status, options)
95
+ pid = options['pool_id']
96
+ from_pid(pid).handle_event(:success, options)
97
+ end
98
+
99
+ protected
100
+
101
+ def redis_key
102
+ "POOLID-#{pid}"
103
+ end
104
+
105
+ def refill_allotment
106
+ jobs_added = 0
107
+ limit = concurrency.to_i
108
+ redis do |r|
109
+ current_count = 0
110
+ while true
111
+ current_count = HINCR_MAX.call(r, [redis_key], ["active_count", limit]).to_i
112
+ if current_count < limit
113
+ job_desc = pop_job_from_pool
114
+ if job_desc.present?
115
+ Batch.new(job_desc['pool_wrapper_batch']).jobs do
116
+ ChainBuilder.enqueue_job(job_desc)
117
+ end
118
+ jobs_added += 1
119
+ else
120
+ r.hincrby(redis_key, "active_count", -1)
121
+ break
122
+ end
123
+ else
124
+ break
125
+ end
126
+ end
127
+ r.expire(redis_key, Batch::BID_EXPIRE_TTL)
128
+ r.expire("#{redis_key}-jobs", Batch::BID_EXPIRE_TTL)
129
+ end
130
+ jobs_added
131
+ end
132
+
133
+ def push_job_to_pool(job_desc)
134
+ jobs_key = "#{redis_key}-jobs"
135
+ # This allows duplicate jobs when a Redis Set is used
136
+ job_desc['_pool_random_key_'] = SecureRandom.urlsafe_base64(10)
137
+ job_json = JSON.unparse(ActiveJob::Arguments.serialize([job_desc]))
138
+ order = self.order
139
+
140
+ redis do |r|
141
+ r.multi do
142
+ case order.to_sym
143
+ when :fifo, :lifo
144
+ r.rpush(jobs_key, job_json)
145
+ when :random
146
+ r.sadd(jobs_key, job_json)
147
+ when :priority
148
+ r.zadd(jobs_key, job_desc[:priority] || 0, job_json)
149
+ end
150
+ r.expire(jobs_key, Batch::BID_EXPIRE_TTL)
151
+ end
152
+ end
153
+ end
154
+
155
+ def pop_job_from_pool
156
+ jobs_key = "#{redis_key}-jobs"
157
+ order = self.order
158
+
159
+ job_json = nil
160
+ redis do |r|
161
+ job_json = case order.to_sym
162
+ when :fifo
163
+ r.lpop(jobs_key)
164
+ when :lifo
165
+ r.rpop(jobs_key)
166
+ when :random
167
+ r.spop(jobs_key)
168
+ when :priority
169
+ r.zpopmax(jobs_key)
170
+ end
171
+ end
172
+
173
+ return nil unless job_json.present?
174
+
175
+ ActiveJob::Arguments.deserialize(JSON.parse(job_json))[0]
176
+ end
177
+
178
+ def pending_count
179
+ order = self.order
180
+ redis do |r|
181
+ case order.to_sym
182
+ when :fifo, :lifo
183
+ r.llen(jobs_key)
184
+ when :random
185
+ r.scard(jobs_key)
186
+ when :priority
187
+ r.zcard(jobs_key)
188
+ end
189
+ end
190
+ end
191
+
192
+ def self.redis(&blk)
193
+ Batch.redis &blk
194
+ end
195
+ delegate :redis, to: :class
196
+
197
+ private
198
+
199
+ def initialize_new(concurrency: nil, order: :fifo, clean_when_empty: true, on_failed_job: :wait)
200
+ self.created_at = Time.now.utc.to_f
201
+ self.order = order
202
+ self.concurrency = concurrency
203
+ self.clean_when_empty = clean_when_empty
204
+ self.on_failed_job = on_failed_job
205
+ flush_pending_attrs
206
+ end
207
+ end
208
+ end
209
+ end
@@ -0,0 +1,67 @@
1
+ module CanvasSync
2
+ module JobBatches
3
+ module RedisModel
4
+ extend ActiveSupport::Concern
5
+
6
+ class_methods do
7
+ def redis_attr(key, type = :string, read_only: true)
8
+ class_eval <<-RUBY, __FILE__, __LINE__ + 1
9
+ def #{key}=(value)
10
+ raise "#{key} is read-only once the batch has been started" if #{read_only.to_s} && (@initialized || @existing)
11
+ @#{key} = value
12
+ if :#{type} == :json
13
+ value = JSON.unparse(value)
14
+ end
15
+ persist_bid_attr('#{key}', value)
16
+ end
17
+
18
+ def #{key}
19
+ return @#{key} if defined?(@#{key})
20
+ if (@initialized || @existing)
21
+ value = read_bid_attr('#{key}')
22
+ if :#{type} == :bool
23
+ value = value == 'true'
24
+ elsif :#{type} == :int
25
+ value = value.to_i
26
+ elsif :#{type} == :float
27
+ value = value.to_f
28
+ elsif :#{type} == :json
29
+ value = JSON.parse(value)
30
+ end
31
+ @#{key} = value
32
+ end
33
+ end
34
+ RUBY
35
+ end
36
+ end
37
+
38
+ def persist_bid_attr(attribute, value)
39
+ if @initialized || @existing
40
+ redis do |r|
41
+ r.multi do
42
+ r.hset(redis_key, attribute, value)
43
+ r.expire(redis_key, Batch::BID_EXPIRE_TTL)
44
+ end
45
+ end
46
+ else
47
+ @pending_attrs ||= {}
48
+ @pending_attrs[attribute] = value
49
+ end
50
+ end
51
+
52
+ def read_bid_attr(attribute)
53
+ redis do |r|
54
+ r.hget(redis_key, attribute)
55
+ end
56
+ end
57
+
58
+ def flush_pending_attrs
59
+ redis do |r|
60
+ r.mapped_hmset(redis_key, @pending_attrs)
61
+ end
62
+ @initialized = true
63
+ @pending_attrs = {}
64
+ end
65
+ end
66
+ end
67
+ end