canvas_sync 0.17.0.beta14 → 0.17.3.beta1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +58 -0
- data/lib/canvas_sync.rb +24 -5
- data/lib/canvas_sync/importers/bulk_importer.rb +7 -4
- data/lib/canvas_sync/job_batches/batch.rb +75 -95
- data/lib/canvas_sync/job_batches/callback.rb +19 -29
- data/lib/canvas_sync/job_batches/context_hash.rb +8 -5
- data/lib/canvas_sync/job_batches/hincr_max.lua +5 -0
- data/lib/canvas_sync/job_batches/jobs/managed_batch_job.rb +99 -0
- data/lib/canvas_sync/job_batches/jobs/serial_batch_job.rb +6 -65
- data/lib/canvas_sync/job_batches/pool.rb +209 -0
- data/lib/canvas_sync/job_batches/redis_model.rb +67 -0
- data/lib/canvas_sync/job_batches/redis_script.rb +163 -0
- data/lib/canvas_sync/job_batches/sidekiq.rb +22 -1
- data/lib/canvas_sync/job_batches/status.rb +0 -5
- data/lib/canvas_sync/jobs/begin_sync_chain_job.rb +4 -2
- data/lib/canvas_sync/jobs/report_starter.rb +1 -0
- data/lib/canvas_sync/processors/assignment_groups_processor.rb +3 -2
- data/lib/canvas_sync/processors/assignments_processor.rb +3 -2
- data/lib/canvas_sync/processors/context_module_items_processor.rb +3 -2
- data/lib/canvas_sync/processors/context_modules_processor.rb +3 -2
- data/lib/canvas_sync/processors/normal_processor.rb +2 -1
- data/lib/canvas_sync/processors/provisioning_report_processor.rb +10 -2
- data/lib/canvas_sync/processors/submissions_processor.rb +3 -2
- data/lib/canvas_sync/version.rb +1 -1
- data/spec/dummy/log/test.log +67741 -0
- data/spec/job_batching/batch_aware_job_spec.rb +1 -0
- data/spec/job_batching/batch_spec.rb +72 -15
- data/spec/job_batching/callback_spec.rb +1 -1
- data/spec/job_batching/flow_spec.rb +0 -1
- data/spec/job_batching/integration/fail_then_succeed.rb +42 -0
- data/spec/job_batching/integration_helper.rb +6 -4
- data/spec/job_batching/sidekiq_spec.rb +1 -0
- data/spec/job_batching/status_spec.rb +1 -17
- metadata +9 -2
@@ -2,6 +2,7 @@ module CanvasSync
|
|
2
2
|
module JobBatches
|
3
3
|
class Batch
|
4
4
|
module Callback
|
5
|
+
mattr_accessor :worker_class
|
5
6
|
|
6
7
|
VALID_CALLBACKS = %w[success complete dead].freeze
|
7
8
|
|
@@ -47,41 +48,28 @@ module CanvasSync
|
|
47
48
|
end
|
48
49
|
end
|
49
50
|
|
50
|
-
|
51
|
-
class SidekiqCallbackWorker
|
52
|
-
include ::Sidekiq::Worker
|
53
|
-
include CallbackWorkerCommon
|
54
|
-
|
55
|
-
def self.enqueue_all(args, queue)
|
56
|
-
return if args.empty?
|
57
|
-
|
58
|
-
::Sidekiq::Client.push_bulk(
|
59
|
-
'class' => self,
|
60
|
-
'args' => args,
|
61
|
-
'queue' => queue
|
62
|
-
)
|
63
|
-
end
|
64
|
-
end
|
65
|
-
Worker = SidekiqCallbackWorker
|
66
|
-
else
|
67
|
-
Worker = ActiveJobCallbackWorker
|
68
|
-
end
|
51
|
+
worker_class = ActiveJobCallbackWorker
|
69
52
|
|
70
53
|
class Finalize
|
71
|
-
def dispatch
|
54
|
+
def dispatch(status, opts)
|
55
|
+
is_callback_batch = opts['origin'].present?
|
56
|
+
has_callback_batch = opts['callback_bid'].present?
|
57
|
+
|
72
58
|
bid = opts["bid"]
|
73
|
-
callback_bid = status.bid
|
74
59
|
event = opts["event"].to_sym
|
75
|
-
callback_batch = bid != callback_bid
|
76
60
|
|
77
|
-
Batch.logger.debug {"Finalize #{event} batch id: #{opts["bid"]}, callback batch id: #{callback_bid} callback_batch #{
|
61
|
+
Batch.logger.debug {"Finalize #{event} batch id: #{opts["bid"]}, callback batch id: #{callback_bid} callback_batch #{is_callback_batch}"}
|
78
62
|
|
79
63
|
batch_status = Status.new bid
|
80
64
|
send(event, bid, batch_status, batch_status.parent_bid)
|
81
65
|
|
82
|
-
|
83
|
-
|
84
|
-
|
66
|
+
if event == :success && !has_callback_batch
|
67
|
+
Batch.cleanup_redis(bid)
|
68
|
+
end
|
69
|
+
|
70
|
+
if event == :success && is_callback_batch && opts['origin']['event'].to_sym == :success
|
71
|
+
Batch.cleanup_redis(opts['origin']['for_bid'])
|
72
|
+
end
|
85
73
|
end
|
86
74
|
|
87
75
|
def success(bid, status, parent_bid)
|
@@ -102,8 +90,8 @@ module CanvasSync
|
|
102
90
|
r.scard("BID-#{parent_bid}-failed")
|
103
91
|
end
|
104
92
|
end
|
105
|
-
#
|
106
|
-
#
|
93
|
+
# If the job finished successfully and parent batch is completed, call parent :complete callback
|
94
|
+
# Parent :success callback will be called by its :complete callback
|
107
95
|
if complete == children && pending == failure
|
108
96
|
Batch.logger.debug {"Finalize parent complete bid: #{parent_bid}"}
|
109
97
|
Batch.enqueue_callbacks(:complete, parent_bid)
|
@@ -119,10 +107,12 @@ module CanvasSync
|
|
119
107
|
end
|
120
108
|
end
|
121
109
|
|
122
|
-
#
|
110
|
+
# If the batch was successful run :success callback, which will call the parent's :complete callback (if necessary)
|
111
|
+
# Also, only trigger the success callback if the :complete callback_batch was successful
|
123
112
|
if pending.to_i.zero? && children == success
|
124
113
|
Batch.enqueue_callbacks(:success, bid)
|
125
114
|
|
115
|
+
# otherwise check for a parent and call its :complete if needed
|
126
116
|
elsif parent_bid
|
127
117
|
# if batch was not successfull check and see if its parent is complete
|
128
118
|
# if the parent is complete we trigger the complete callback
|
@@ -17,7 +17,7 @@ module CanvasSync
|
|
17
17
|
def local_bid
|
18
18
|
bid = @bid_stack[-1]
|
19
19
|
while bid.present?
|
20
|
-
bhash =
|
20
|
+
bhash = resolve_hash(bid)
|
21
21
|
return bid if bhash
|
22
22
|
bid = get_parent_bid(bid)
|
23
23
|
end
|
@@ -49,7 +49,7 @@ module CanvasSync
|
|
49
49
|
def [](key)
|
50
50
|
bid = @bid_stack[-1]
|
51
51
|
while bid.present?
|
52
|
-
bhash =
|
52
|
+
bhash = resolve_hash(bid)
|
53
53
|
return bhash[key] if bhash&.key?(key)
|
54
54
|
bid = get_parent_bid(bid)
|
55
55
|
end
|
@@ -94,7 +94,7 @@ module CanvasSync
|
|
94
94
|
private
|
95
95
|
|
96
96
|
def get_parent_hash(bid)
|
97
|
-
|
97
|
+
resolve_hash(get_parent_bid(bid)).freeze
|
98
98
|
end
|
99
99
|
|
100
100
|
def get_parent_bid(bid)
|
@@ -105,13 +105,15 @@ module CanvasSync
|
|
105
105
|
if index >= 0
|
106
106
|
@bid_stack[index]
|
107
107
|
else
|
108
|
-
pbid = Batch.redis
|
108
|
+
pbid = Batch.redis do |r|
|
109
|
+
r.hget("BID-#{bid}", "parent_bid") || r.hget("BID-#{bid}", "callback_for")
|
110
|
+
end
|
109
111
|
@bid_stack.unshift(pbid)
|
110
112
|
pbid
|
111
113
|
end
|
112
114
|
end
|
113
115
|
|
114
|
-
def
|
116
|
+
def resolve_hash(bid)
|
115
117
|
return nil unless bid.present?
|
116
118
|
return @hash_map[bid] if @hash_map.key?(bid)
|
117
119
|
|
@@ -137,6 +139,7 @@ module CanvasSync
|
|
137
139
|
end
|
138
140
|
|
139
141
|
def load_all
|
142
|
+
resolve_hash(@bid_stack[0]).freeze
|
140
143
|
while @bid_stack[0].present?
|
141
144
|
get_parent_hash(@bid_stack[0])
|
142
145
|
end
|
@@ -0,0 +1,99 @@
|
|
1
|
+
require_relative './base_job'
|
2
|
+
|
3
|
+
module CanvasSync
|
4
|
+
module JobBatches
|
5
|
+
class ManagedBatchJob < BaseJob
|
6
|
+
def perform(sub_jobs, context: nil, ordered: true, concurrency: nil)
|
7
|
+
man_batch_id = SecureRandom.urlsafe_base64(10)
|
8
|
+
|
9
|
+
if concurrency == 0 || concurrency == nil || concurrency == true
|
10
|
+
concurrency = sub_jobs.count
|
11
|
+
elsif concurrency == false
|
12
|
+
concurrency = 1
|
13
|
+
end
|
14
|
+
|
15
|
+
root_batch = Batch.new
|
16
|
+
|
17
|
+
Batch.redis do |r|
|
18
|
+
r.multi do
|
19
|
+
r.hset("MNGBID-#{man_batch_id}", "root_bid", root_batch.bid)
|
20
|
+
r.hset("MNGBID-#{man_batch_id}", "ordered", ordered)
|
21
|
+
r.hset("MNGBID-#{man_batch_id}", "concurrency", concurrency)
|
22
|
+
r.expire("MNGBID-#{man_batch_id}", Batch::BID_EXPIRE_TTL)
|
23
|
+
|
24
|
+
mapped_sub_jobs = sub_jobs.each_with_index.map do |j, i|
|
25
|
+
j['_mngbid_index_'] = i # This allows duplicate jobs when a Redis Set is used
|
26
|
+
j = ActiveJob::Arguments.serialize([j])
|
27
|
+
JSON.unparse(j)
|
28
|
+
end
|
29
|
+
if ordered
|
30
|
+
r.rpush("MNGBID-#{man_batch_id}-jobs", mapped_sub_jobs)
|
31
|
+
else
|
32
|
+
r.sadd("MNGBID-#{man_batch_id}-jobs", mapped_sub_jobs)
|
33
|
+
end
|
34
|
+
r.expire("MNGBID-#{man_batch_id}-jobs", Batch::BID_EXPIRE_TTL)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
root_batch.description = "Managed Batch Root (#{man_batch_id})"
|
39
|
+
root_batch.allow_context_changes = (concurrency == 1)
|
40
|
+
root_batch.context = context
|
41
|
+
root_batch.on(:success, "#{self.class.to_s}.cleanup_redis", managed_batch_id: man_batch_id)
|
42
|
+
root_batch.jobs {}
|
43
|
+
|
44
|
+
concurrency.times do
|
45
|
+
self.class.perform_next_sequence_job(man_batch_id)
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
def self.cleanup_redis(status, options)
|
50
|
+
man_batch_id = options['managed_batch_id']
|
51
|
+
Batch.redis do |r|
|
52
|
+
r.del(
|
53
|
+
"MNGBID-#{man_batch_id}",
|
54
|
+
"MNGBID-#{man_batch_id}-jobs",
|
55
|
+
)
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
def self.job_succeeded_callback(status, options)
|
60
|
+
man_batch_id = options['managed_batch_id']
|
61
|
+
perform_next_sequence_job(man_batch_id)
|
62
|
+
end
|
63
|
+
|
64
|
+
protected
|
65
|
+
|
66
|
+
def self.perform_next_sequence_job(man_batch_id)
|
67
|
+
root_bid, ordered = Batch.redis do |r|
|
68
|
+
r.multi do
|
69
|
+
r.hget("MNGBID-#{man_batch_id}", "root_bid")
|
70
|
+
r.hget("MNGBID-#{man_batch_id}", "ordered")
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
next_job_json = Batch.redis do |r|
|
75
|
+
if ordered
|
76
|
+
r.lpop("MNGBID-#{man_batch_id}-jobs")
|
77
|
+
else
|
78
|
+
r.spop("MNGBID-#{man_batch_id}-jobs")
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
return unless next_job_json.present?
|
83
|
+
|
84
|
+
next_job = JSON.parse(next_job_json)
|
85
|
+
next_job = ActiveJob::Arguments.deserialize(next_job)[0]
|
86
|
+
|
87
|
+
Batch.new(root_bid).jobs do
|
88
|
+
Batch.new.tap do |batch|
|
89
|
+
batch.description = "Managed Batch Fiber (#{man_batch_id})"
|
90
|
+
batch.on(:success, "#{self.to_s}.job_succeeded_callback", managed_batch_id: man_batch_id)
|
91
|
+
batch.jobs do
|
92
|
+
ChainBuilder.enqueue_job(next_job)
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
98
|
+
end
|
99
|
+
end
|
@@ -4,71 +4,12 @@ module CanvasSync
|
|
4
4
|
module JobBatches
|
5
5
|
class SerialBatchJob < BaseJob
|
6
6
|
def perform(sub_jobs, context: nil)
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
mapped_sub_jobs = sub_jobs.map do |j|
|
14
|
-
j = ActiveJob::Arguments.serialize([j])
|
15
|
-
JSON.unparse(j)
|
16
|
-
end
|
17
|
-
r.hset("SERBID-#{serial_id}", "root_bid", root_batch.bid)
|
18
|
-
r.expire("SERBID-#{serial_id}", Batch::BID_EXPIRE_TTL)
|
19
|
-
r.rpush("SERBID-#{serial_id}-jobs", mapped_sub_jobs)
|
20
|
-
r.expire("SERBID-#{serial_id}-jobs", Batch::BID_EXPIRE_TTL)
|
21
|
-
end
|
22
|
-
end
|
23
|
-
|
24
|
-
root_batch.description = "Serial Batch Root (#{serial_id})"
|
25
|
-
root_batch.allow_context_changes = true
|
26
|
-
root_batch.context = context
|
27
|
-
root_batch.on(:success, "#{self.class.to_s}.cleanup_redis", serial_batch_id: serial_id)
|
28
|
-
root_batch.jobs {}
|
29
|
-
|
30
|
-
self.class.perform_next_sequence_job(serial_id)
|
31
|
-
end
|
32
|
-
|
33
|
-
def self.cleanup_redis(status, options)
|
34
|
-
serial_id = options['serial_batch_id']
|
35
|
-
Batch.redis do |r|
|
36
|
-
r.del(
|
37
|
-
"SERBID-#{serial_id}",
|
38
|
-
"SERBID-#{serial_id}-jobs",
|
39
|
-
)
|
40
|
-
end
|
41
|
-
end
|
42
|
-
|
43
|
-
def self.job_succeeded_callback(status, options)
|
44
|
-
serial_id = options['serial_batch_id']
|
45
|
-
perform_next_sequence_job(serial_id)
|
46
|
-
end
|
47
|
-
|
48
|
-
protected
|
49
|
-
|
50
|
-
def self.perform_next_sequence_job(serial_id)
|
51
|
-
root_bid, next_job_json = Batch.redis do |r|
|
52
|
-
r.multi do
|
53
|
-
r.hget("SERBID-#{serial_id}", "root_bid")
|
54
|
-
r.lpop("SERBID-#{serial_id}-jobs")
|
55
|
-
end
|
56
|
-
end
|
57
|
-
|
58
|
-
return unless next_job_json.present?
|
59
|
-
|
60
|
-
next_job = JSON.parse(next_job_json)
|
61
|
-
next_job = ActiveJob::Arguments.deserialize(next_job)[0]
|
62
|
-
|
63
|
-
Batch.new(root_bid).jobs do
|
64
|
-
Batch.new.tap do |batch|
|
65
|
-
batch.description = "Serial Batch Fiber (#{serial_id})"
|
66
|
-
batch.on(:success, "#{self.to_s}.job_succeeded_callback", serial_batch_id: serial_id)
|
67
|
-
batch.jobs do
|
68
|
-
ChainBuilder.enqueue_job(next_job)
|
69
|
-
end
|
70
|
-
end
|
71
|
-
end
|
7
|
+
ManagedBatchJob.new.perform(
|
8
|
+
sub_jobs,
|
9
|
+
context: context,
|
10
|
+
ordered: true,
|
11
|
+
concurrency: false,
|
12
|
+
)
|
72
13
|
end
|
73
14
|
end
|
74
15
|
end
|
@@ -0,0 +1,209 @@
|
|
1
|
+
module CanvasSync
|
2
|
+
module JobBatches
|
3
|
+
class Pool
|
4
|
+
include RedisModel
|
5
|
+
|
6
|
+
HINCR_MAX = RedisScript.new(Pathname.new(__FILE__) + "../hincr_max.lua")
|
7
|
+
|
8
|
+
attr_reader :pid
|
9
|
+
redis_attr :description
|
10
|
+
redis_attr :created_at
|
11
|
+
redis_attr :concurrency, :int
|
12
|
+
redis_attr :order
|
13
|
+
redis_attr :on_failed_job
|
14
|
+
redis_attr :clean_when_empty, :bool
|
15
|
+
|
16
|
+
def initialize(pooolid = nil, **kwargs)
|
17
|
+
if pooolid
|
18
|
+
@existing = true
|
19
|
+
@pid = pooolid
|
20
|
+
else
|
21
|
+
@pid = SecureRandom.urlsafe_base64(10)
|
22
|
+
initialize_new(**kwargs)
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
def self.from_pid(pid)
|
27
|
+
new(pid)
|
28
|
+
end
|
29
|
+
|
30
|
+
def <<(job_desc)
|
31
|
+
add_job(job_desc)
|
32
|
+
end
|
33
|
+
|
34
|
+
def add_job(job_desc)
|
35
|
+
add_jobs([job_desc])
|
36
|
+
end
|
37
|
+
|
38
|
+
def add_jobs(job_descs)
|
39
|
+
job_descs.each do |job_desc|
|
40
|
+
wrapper = Batch.new
|
41
|
+
wrapper.description = "Pool Job Wrapper"
|
42
|
+
wrapper.on(:complete, "#{self.class.to_s}.job_completed_callback", pool_id: pid)
|
43
|
+
wrapper.on(:success, "#{self.class.to_s}.job_success_callback", pool_id: pid)
|
44
|
+
wrapper.jobs {}
|
45
|
+
|
46
|
+
job_desc = job_desc.with_indifferent_access
|
47
|
+
job_desc = job_desc.merge!(
|
48
|
+
job: job_desc[:job].to_s,
|
49
|
+
pool_wrapper_batch: wrapper.bid,
|
50
|
+
)
|
51
|
+
|
52
|
+
push_job_to_pool(job_desc)
|
53
|
+
end
|
54
|
+
refill_allotment
|
55
|
+
end
|
56
|
+
|
57
|
+
def cleanup_redis
|
58
|
+
Batch.logger.debug {"Cleaning redis of pool #{pid}"}
|
59
|
+
redis do |r|
|
60
|
+
r.del(
|
61
|
+
"#{redis_key}",
|
62
|
+
"#{redis_key}-jobs",
|
63
|
+
)
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
def handle_event(event, options)
|
68
|
+
queue_next = false
|
69
|
+
if event == :success && on_failed_job.to_sym == :wait # We trigger on the complete event, unless on_failed_job is :wait
|
70
|
+
queue_next = true
|
71
|
+
elsif event == :complete
|
72
|
+
if on_failed_job.to_sym != :wait
|
73
|
+
queue_next = true
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
if queue_next
|
78
|
+
active_count = redis do |r|
|
79
|
+
r.hincrby(redis_key, "active_count", -1)
|
80
|
+
end
|
81
|
+
added_count = refill_allotment
|
82
|
+
|
83
|
+
if active_count == 0 && added_count == 0
|
84
|
+
cleanup_redis if clean_when_empty
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
89
|
+
def self.job_completed_callback(status, options)
|
90
|
+
pid = options['pool_id']
|
91
|
+
from_pid(pid).handle_event(:complete, options)
|
92
|
+
end
|
93
|
+
|
94
|
+
def self.job_success_callback(status, options)
|
95
|
+
pid = options['pool_id']
|
96
|
+
from_pid(pid).handle_event(:success, options)
|
97
|
+
end
|
98
|
+
|
99
|
+
protected
|
100
|
+
|
101
|
+
def redis_key
|
102
|
+
"POOLID-#{pid}"
|
103
|
+
end
|
104
|
+
|
105
|
+
def refill_allotment
|
106
|
+
jobs_added = 0
|
107
|
+
limit = concurrency.to_i
|
108
|
+
redis do |r|
|
109
|
+
current_count = 0
|
110
|
+
while true
|
111
|
+
current_count = HINCR_MAX.call(r, [redis_key], ["active_count", limit]).to_i
|
112
|
+
if current_count < limit
|
113
|
+
job_desc = pop_job_from_pool
|
114
|
+
if job_desc.present?
|
115
|
+
Batch.new(job_desc['pool_wrapper_batch']).jobs do
|
116
|
+
ChainBuilder.enqueue_job(job_desc)
|
117
|
+
end
|
118
|
+
jobs_added += 1
|
119
|
+
else
|
120
|
+
r.hincrby(redis_key, "active_count", -1)
|
121
|
+
break
|
122
|
+
end
|
123
|
+
else
|
124
|
+
break
|
125
|
+
end
|
126
|
+
end
|
127
|
+
r.expire(redis_key, Batch::BID_EXPIRE_TTL)
|
128
|
+
r.expire("#{redis_key}-jobs", Batch::BID_EXPIRE_TTL)
|
129
|
+
end
|
130
|
+
jobs_added
|
131
|
+
end
|
132
|
+
|
133
|
+
def push_job_to_pool(job_desc)
|
134
|
+
jobs_key = "#{redis_key}-jobs"
|
135
|
+
# This allows duplicate jobs when a Redis Set is used
|
136
|
+
job_desc['_pool_random_key_'] = SecureRandom.urlsafe_base64(10)
|
137
|
+
job_json = JSON.unparse(ActiveJob::Arguments.serialize([job_desc]))
|
138
|
+
order = self.order
|
139
|
+
|
140
|
+
redis do |r|
|
141
|
+
r.multi do
|
142
|
+
case order.to_sym
|
143
|
+
when :fifo, :lifo
|
144
|
+
r.rpush(jobs_key, job_json)
|
145
|
+
when :random
|
146
|
+
r.sadd(jobs_key, job_json)
|
147
|
+
when :priority
|
148
|
+
r.zadd(jobs_key, job_desc[:priority] || 0, job_json)
|
149
|
+
end
|
150
|
+
r.expire(jobs_key, Batch::BID_EXPIRE_TTL)
|
151
|
+
end
|
152
|
+
end
|
153
|
+
end
|
154
|
+
|
155
|
+
def pop_job_from_pool
|
156
|
+
jobs_key = "#{redis_key}-jobs"
|
157
|
+
order = self.order
|
158
|
+
|
159
|
+
job_json = nil
|
160
|
+
redis do |r|
|
161
|
+
job_json = case order.to_sym
|
162
|
+
when :fifo
|
163
|
+
r.lpop(jobs_key)
|
164
|
+
when :lifo
|
165
|
+
r.rpop(jobs_key)
|
166
|
+
when :random
|
167
|
+
r.spop(jobs_key)
|
168
|
+
when :priority
|
169
|
+
r.zpopmax(jobs_key)
|
170
|
+
end
|
171
|
+
end
|
172
|
+
|
173
|
+
return nil unless job_json.present?
|
174
|
+
|
175
|
+
ActiveJob::Arguments.deserialize(JSON.parse(job_json))[0]
|
176
|
+
end
|
177
|
+
|
178
|
+
def pending_count
|
179
|
+
order = self.order
|
180
|
+
redis do |r|
|
181
|
+
case order.to_sym
|
182
|
+
when :fifo, :lifo
|
183
|
+
r.llen(jobs_key)
|
184
|
+
when :random
|
185
|
+
r.scard(jobs_key)
|
186
|
+
when :priority
|
187
|
+
r.zcard(jobs_key)
|
188
|
+
end
|
189
|
+
end
|
190
|
+
end
|
191
|
+
|
192
|
+
def self.redis(&blk)
|
193
|
+
Batch.redis &blk
|
194
|
+
end
|
195
|
+
delegate :redis, to: :class
|
196
|
+
|
197
|
+
private
|
198
|
+
|
199
|
+
def initialize_new(concurrency: nil, order: :fifo, clean_when_empty: true, on_failed_job: :wait)
|
200
|
+
self.created_at = Time.now.utc.to_f
|
201
|
+
self.order = order
|
202
|
+
self.concurrency = concurrency
|
203
|
+
self.clean_when_empty = clean_when_empty
|
204
|
+
self.on_failed_job = on_failed_job
|
205
|
+
flush_pending_attrs
|
206
|
+
end
|
207
|
+
end
|
208
|
+
end
|
209
|
+
end
|