workerholic 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +1 -0
- data/.rspec +1 -0
- data/Gemfile +12 -0
- data/Gemfile.lock +42 -0
- data/LICENSE +21 -0
- data/README.md +2 -0
- data/app_test/job_test.rb +20 -0
- data/app_test/run.rb +10 -0
- data/lib/server.rb +13 -0
- data/lib/workerholic.rb +47 -0
- data/lib/workerholic/adapters/active_job_adapter.rb +24 -0
- data/lib/workerholic/job.rb +49 -0
- data/lib/workerholic/job_processor.rb +29 -0
- data/lib/workerholic/job_retry.rb +32 -0
- data/lib/workerholic/job_scheduler.rb +47 -0
- data/lib/workerholic/job_serializer.rb +12 -0
- data/lib/workerholic/job_wrapper.rb +32 -0
- data/lib/workerholic/log_manager.rb +17 -0
- data/lib/workerholic/manager.rb +40 -0
- data/lib/workerholic/queue.rb +30 -0
- data/lib/workerholic/sorted_set.rb +26 -0
- data/lib/workerholic/statistics.rb +21 -0
- data/lib/workerholic/storage.rb +80 -0
- data/lib/workerholic/worker.rb +43 -0
- data/lib/workerholic/worker_balancer.rb +128 -0
- data/spec/helpers/helper_methods.rb +15 -0
- data/spec/helpers/job_tests.rb +17 -0
- data/spec/integration/dequeuing_and_job_processing_spec.rb +24 -0
- data/spec/integration/enqueuing_jobs_spec.rb +53 -0
- data/spec/job_processor_spec.rb +62 -0
- data/spec/job_retry_spec.rb +59 -0
- data/spec/job_scheduler_spec.rb +66 -0
- data/spec/job_serializer_spec.rb +28 -0
- data/spec/job_wrapper_spec.rb +27 -0
- data/spec/manager_spec.rb +26 -0
- data/spec/queue_spec.rb +16 -0
- data/spec/sorted_set.rb +25 -0
- data/spec/spec_helper.rb +18 -0
- data/spec/statistics_spec.rb +25 -0
- data/spec/storage_spec.rb +17 -0
- data/spec/worker_spec.rb +59 -0
- data/workerholic.gemspec +26 -0
- metadata +180 -0
@@ -0,0 +1,40 @@
|
|
1
|
+
module Workerholic
|
2
|
+
# Handles polling from Redis and hands job to worker
|
3
|
+
class Manager
|
4
|
+
attr_reader :workers, :scheduler, :worker_balancer
|
5
|
+
|
6
|
+
def initialize(opts = {})
|
7
|
+
@workers = []
|
8
|
+
Workerholic.workers_count.times { @workers << Worker.new }
|
9
|
+
|
10
|
+
@scheduler = JobScheduler.new
|
11
|
+
@worker_balancer = WorkerBalancer.new(workers: workers, auto_balance: opts[:auto_balance])
|
12
|
+
end
|
13
|
+
|
14
|
+
def start
|
15
|
+
worker_balancer.start
|
16
|
+
workers.each(&:work)
|
17
|
+
scheduler.start
|
18
|
+
sleep
|
19
|
+
rescue SystemExit, Interrupt
|
20
|
+
puts "\nWorkerholic is now shutting down. We are letting the workers finish their current jobs..."
|
21
|
+
shutdown
|
22
|
+
exit
|
23
|
+
end
|
24
|
+
|
25
|
+
def shutdown
|
26
|
+
workers.each(&:kill)
|
27
|
+
worker_balancer.kill
|
28
|
+
scheduler.kill
|
29
|
+
end
|
30
|
+
|
31
|
+
=begin
|
32
|
+
def regenerate_workers
|
33
|
+
inactive_workers = WORKERS_COUNT - workers.size
|
34
|
+
if inactive_workers > 0
|
35
|
+
inactive_workers.times { @workers << Worker.new }
|
36
|
+
end
|
37
|
+
end
|
38
|
+
=end
|
39
|
+
end
|
40
|
+
end
|
@@ -0,0 +1,30 @@
|
|
1
|
+
module Workerholic
|
2
|
+
# Handles background job enqueueing/dequeuing functionality
|
3
|
+
class Queue
|
4
|
+
attr_reader :storage, :name
|
5
|
+
|
6
|
+
def initialize(name = 'workerholic:queue:main')
|
7
|
+
@storage = Storage::RedisWrapper.new
|
8
|
+
@name = name
|
9
|
+
@logger = LogManager.new
|
10
|
+
end
|
11
|
+
|
12
|
+
def enqueue(serialized_job)
|
13
|
+
storage.push(name, serialized_job)
|
14
|
+
@logger.log('info', "Your job was placed in the #{name} queue on #{Time.now}.")
|
15
|
+
end
|
16
|
+
|
17
|
+
def dequeue
|
18
|
+
job_info = storage.pop(name)
|
19
|
+
job_info.last if job_info
|
20
|
+
end
|
21
|
+
|
22
|
+
def empty?
|
23
|
+
storage.list_length(name).zero?
|
24
|
+
end
|
25
|
+
|
26
|
+
def size
|
27
|
+
storage.list_length(name)
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
module Workerholic
|
2
|
+
class SortedSet
|
3
|
+
attr_reader :storage, :name
|
4
|
+
|
5
|
+
def initialize(name = 'workerholic:scheduled_jobs')
|
6
|
+
@storage = Storage::RedisWrapper.new
|
7
|
+
@name = name
|
8
|
+
end
|
9
|
+
|
10
|
+
def add(serialized_job, score)
|
11
|
+
storage.add_to_set(name, score, serialized_job)
|
12
|
+
end
|
13
|
+
|
14
|
+
def remove(score)
|
15
|
+
storage.remove_from_set(name, score)
|
16
|
+
end
|
17
|
+
|
18
|
+
def peek
|
19
|
+
storage.peek(name)
|
20
|
+
end
|
21
|
+
|
22
|
+
def empty?
|
23
|
+
storage.set_empty?(name) == 0
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
module Workerholic
|
2
|
+
class Statistics
|
3
|
+
attr_accessor :enqueued_at, :retry_count, :errors, :started_at, :completed_at
|
4
|
+
|
5
|
+
def initialize(options={})
|
6
|
+
@enqueued_at = options[:enqueued_at]
|
7
|
+
@errors = options[:errors] || []
|
8
|
+
@started_at = options[:started_at]
|
9
|
+
@completed_at = options[:completed_at]
|
10
|
+
end
|
11
|
+
|
12
|
+
def to_hash
|
13
|
+
{
|
14
|
+
enqueued_at: enqueued_at,
|
15
|
+
errors: errors,
|
16
|
+
started_at: started_at,
|
17
|
+
completed_at: completed_at
|
18
|
+
}
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
@@ -0,0 +1,80 @@
|
|
1
|
+
module Workerholic
|
2
|
+
class Storage
|
3
|
+
# Wraps redis-rb gem methods for enqueueing/dequeuing purposes
|
4
|
+
class RedisWrapper
|
5
|
+
attr_reader :redis, :retries
|
6
|
+
|
7
|
+
def initialize
|
8
|
+
@retries = 0
|
9
|
+
@redis = Workerholic.redis_pool
|
10
|
+
|
11
|
+
redis.with do |conn|
|
12
|
+
conn.ping
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
def list_length(key)
|
17
|
+
execute { |conn| conn.llen(key) }
|
18
|
+
end
|
19
|
+
|
20
|
+
def push(key, value)
|
21
|
+
execute { |conn| conn.rpush(key, value) }
|
22
|
+
end
|
23
|
+
|
24
|
+
# blocking pop from Redis queue
|
25
|
+
def pop(key, timeout = 1)
|
26
|
+
execute { |conn| conn.blpop(key, timeout) }
|
27
|
+
end
|
28
|
+
|
29
|
+
def add_to_set(key, score, value)
|
30
|
+
execute { |conn| conn.zadd(key, score, value) }
|
31
|
+
end
|
32
|
+
|
33
|
+
def peek(key)
|
34
|
+
execute { |conn| conn.zrange(key, 0, 0, with_scores: true).first }
|
35
|
+
end
|
36
|
+
|
37
|
+
def remove_from_set(key, score)
|
38
|
+
execute { |conn| conn.zremrangebyscore(key, score, score) }
|
39
|
+
end
|
40
|
+
|
41
|
+
def set_empty?(key)
|
42
|
+
execute { |conn| conn.zcount(key, 0, '+inf') }
|
43
|
+
end
|
44
|
+
|
45
|
+
def fetch_queue_names
|
46
|
+
execute { |conn| conn.scan(0, match: 'workerholic:queue*').last }
|
47
|
+
end
|
48
|
+
|
49
|
+
class RedisCannotRecover < Redis::CannotConnectError; end
|
50
|
+
|
51
|
+
private
|
52
|
+
|
53
|
+
def execute
|
54
|
+
begin
|
55
|
+
result = redis.with { |conn| yield conn }
|
56
|
+
reset_retries
|
57
|
+
rescue Redis::CannotConnectError
|
58
|
+
# LogManager might want to output our retries to the user
|
59
|
+
@retries += 1
|
60
|
+
if retries_exhausted?
|
61
|
+
raise RedisCannotRecover, 'Redis reconnect retries exhausted. Main Workerholic thread will be terminated now.'
|
62
|
+
end
|
63
|
+
|
64
|
+
sleep(5)
|
65
|
+
retry
|
66
|
+
end
|
67
|
+
|
68
|
+
result
|
69
|
+
end
|
70
|
+
|
71
|
+
def retries_exhausted?
|
72
|
+
retries == 5
|
73
|
+
end
|
74
|
+
|
75
|
+
def reset_retries
|
76
|
+
@retries = 0
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
module Workerholic
|
2
|
+
# handles job execution in threads
|
3
|
+
class Worker
|
4
|
+
attr_reader :thread
|
5
|
+
attr_accessor :alive, :queue
|
6
|
+
|
7
|
+
def initialize(queue=nil)
|
8
|
+
@queue = queue
|
9
|
+
@alive = true
|
10
|
+
@logger = LogManager.new
|
11
|
+
end
|
12
|
+
|
13
|
+
def work
|
14
|
+
@thread = Thread.new do
|
15
|
+
while alive
|
16
|
+
serialized_job = poll
|
17
|
+
JobProcessor.new(serialized_job).process if serialized_job
|
18
|
+
end
|
19
|
+
|
20
|
+
#puts "DONE!"
|
21
|
+
end
|
22
|
+
rescue ThreadError => e
|
23
|
+
@logger.log('info', e.message)
|
24
|
+
raise Interrupt
|
25
|
+
end
|
26
|
+
|
27
|
+
def kill
|
28
|
+
self.alive = false
|
29
|
+
thread.join if thread
|
30
|
+
end
|
31
|
+
|
32
|
+
private
|
33
|
+
|
34
|
+
def poll
|
35
|
+
if queue
|
36
|
+
queue.dequeue
|
37
|
+
else
|
38
|
+
sleep 0.1
|
39
|
+
nil
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
@@ -0,0 +1,128 @@
|
|
1
|
+
module Workerholic
|
2
|
+
class WorkerBalancer
|
3
|
+
attr_reader :storage, :workers, :thread, :alive, :auto
|
4
|
+
attr_accessor :queues
|
5
|
+
|
6
|
+
def initialize(opts = {})
|
7
|
+
@storage = Storage::RedisWrapper.new
|
8
|
+
@queues = fetch_queues
|
9
|
+
@workers = opts[:workers] || []
|
10
|
+
@alive = true
|
11
|
+
@logger = LogManager.new
|
12
|
+
@auto = opts[:auto_balance]
|
13
|
+
end
|
14
|
+
|
15
|
+
def start
|
16
|
+
if auto
|
17
|
+
auto_balance_workers
|
18
|
+
else
|
19
|
+
evenly_balance_workers
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
def kill
|
24
|
+
thread.kill
|
25
|
+
thread.join
|
26
|
+
end
|
27
|
+
|
28
|
+
private
|
29
|
+
|
30
|
+
def auto_balance_workers
|
31
|
+
@thread = Thread.new do
|
32
|
+
while alive
|
33
|
+
self.queues = fetch_queues
|
34
|
+
|
35
|
+
total_workers_count = assign_one_worker_per_queue
|
36
|
+
|
37
|
+
remaining_workers_count = workers.size - (total_workers_count + 1)
|
38
|
+
average_job_count_per_worker = total_jobs / remaining_workers_count.to_f
|
39
|
+
|
40
|
+
queues.each do |queue|
|
41
|
+
workers_count = queue.size / average_job_count_per_worker
|
42
|
+
|
43
|
+
if workers_count % 1 == 0.5
|
44
|
+
workers_count = workers_count.floor
|
45
|
+
else
|
46
|
+
workers_count = workers_count.round
|
47
|
+
end
|
48
|
+
|
49
|
+
assign_workers_to_queue(queue, workers_count, total_workers_count)
|
50
|
+
|
51
|
+
total_workers_count += workers_count
|
52
|
+
end
|
53
|
+
|
54
|
+
distribute_unassigned_worker(total_workers_count)
|
55
|
+
output_balancer_stats
|
56
|
+
|
57
|
+
sleep 2
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
def evenly_balance_workers
|
63
|
+
@thread = Thread.new do
|
64
|
+
while alive
|
65
|
+
self.queues = fetch_queues
|
66
|
+
|
67
|
+
total_workers_count = assign_one_worker_per_queue
|
68
|
+
|
69
|
+
remaining_workers_count = workers.size - (total_workers_count + 1)
|
70
|
+
|
71
|
+
queues.each do |queue|
|
72
|
+
workers_count = remaining_workers_count / queues.size
|
73
|
+
assign_workers_to_queue(queue, workers_count, total_workers_count)
|
74
|
+
total_workers_count += workers_count
|
75
|
+
end
|
76
|
+
|
77
|
+
distribute_unassigned_worker(total_workers_count)
|
78
|
+
output_balancer_stats
|
79
|
+
|
80
|
+
sleep 2
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
|
85
|
+
def distribute_unassigned_worker(total_workers_count)
|
86
|
+
workers[workers.size - 1].queue = queues.sample if workers.size - total_workers_count == 1
|
87
|
+
end
|
88
|
+
|
89
|
+
def output_balancer_stats
|
90
|
+
@logger.log('info', queues.map { |q| { name: q.name, size: q.size } })
|
91
|
+
@logger.log('info', current_workers_count_per_queue)
|
92
|
+
end
|
93
|
+
|
94
|
+
def assign_one_worker_per_queue
|
95
|
+
index = 0
|
96
|
+
while index < queues.size && index < workers.size
|
97
|
+
workers[index].queue = queues[index]
|
98
|
+
index += 1
|
99
|
+
end
|
100
|
+
|
101
|
+
index
|
102
|
+
end
|
103
|
+
|
104
|
+
def fetch_queues
|
105
|
+
storage.fetch_queue_names.map { |queue_name| Queue.new(queue_name) }
|
106
|
+
end
|
107
|
+
|
108
|
+
def total_jobs
|
109
|
+
@queues.map(&:size).reduce(:+) || 0
|
110
|
+
end
|
111
|
+
|
112
|
+
def assign_workers_to_queue(queue, workers_count, total_workers_count)
|
113
|
+
total_workers_count.upto(total_workers_count + workers_count - 1) do |i|
|
114
|
+
workers.to_a[i].queue = Queue.new(queue.name)
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
def current_workers_count_per_queue
|
119
|
+
workers.reduce({}) do |result, worker|
|
120
|
+
if worker.queue
|
121
|
+
result[worker.queue.name] = result[worker.queue.name] ? result[worker.queue.name] + 1 : 1
|
122
|
+
end
|
123
|
+
|
124
|
+
result
|
125
|
+
end
|
126
|
+
end
|
127
|
+
end
|
128
|
+
end
|
@@ -0,0 +1,15 @@
|
|
1
|
+
TEST_QUEUE = 'workerholic:queue:_test_queue'
|
2
|
+
TEST_SCHEDULED_SORTED_SET = 'workerholic:test:scheduled_jobs'
|
3
|
+
|
4
|
+
def expect_during(duration_in_secs, target)
|
5
|
+
timeout = Time.now.to_f + duration_in_secs
|
6
|
+
|
7
|
+
while Time.now.to_f <= timeout
|
8
|
+
result = yield
|
9
|
+
return if result == target
|
10
|
+
|
11
|
+
sleep(0.001)
|
12
|
+
end
|
13
|
+
|
14
|
+
expect(result).to eq(target)
|
15
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
class SimpleJobTest
|
2
|
+
include Workerholic::Job
|
3
|
+
job_options queue_name: TEST_QUEUE
|
4
|
+
|
5
|
+
def perform(s)
|
6
|
+
s
|
7
|
+
end
|
8
|
+
end
|
9
|
+
|
10
|
+
class ComplexJobTest
|
11
|
+
include Workerholic::Job
|
12
|
+
job_options queue_name: TEST_QUEUE
|
13
|
+
|
14
|
+
def perform(arg1, arg2, arg3)
|
15
|
+
[arg1, arg2, arg3]
|
16
|
+
end
|
17
|
+
end
|
@@ -0,0 +1,24 @@
|
|
1
|
+
require_relative '../spec_helper'
|
2
|
+
|
3
|
+
describe 'dequeuing and processesing of jobs' do
|
4
|
+
let(:redis) { Redis.new }
|
5
|
+
before { redis.del(TEST_QUEUE) }
|
6
|
+
|
7
|
+
xit 'successfully dequeues and process a simple job' do
|
8
|
+
serialized_job = Workerholic::JobSerializer.serialize(
|
9
|
+
class: SimpleJobTest,
|
10
|
+
arguments: ['test job']
|
11
|
+
)
|
12
|
+
redis.rpush(TEST_QUEUE, serialized_job)
|
13
|
+
manager = Workerholic::Manager.new
|
14
|
+
|
15
|
+
Thread.new { manager.start }
|
16
|
+
expect_during(1, false) { redis.exists(TEST_QUEUE) }
|
17
|
+
end
|
18
|
+
|
19
|
+
it 'successfully dequeues and process a complex job'
|
20
|
+
|
21
|
+
context 'user interrupts process' do
|
22
|
+
it 'finishes executing the current job before gracefully shutting down'
|
23
|
+
end
|
24
|
+
end
|
@@ -0,0 +1,53 @@
|
|
1
|
+
require_relative '../spec_helper'
|
2
|
+
|
3
|
+
describe 'enqueuing jobs to Redis' do
|
4
|
+
let(:redis) { Redis.new }
|
5
|
+
before { redis.del(TEST_QUEUE) }
|
6
|
+
|
7
|
+
context 'successfully creates a job and enqueues it in Redis' do
|
8
|
+
it 'enqueues a simple job in redis' do
|
9
|
+
SimpleJobTest.new.perform_async('test job')
|
10
|
+
serialized_job = redis.lpop(TEST_QUEUE)
|
11
|
+
job_from_redis = Workerholic::JobSerializer.deserialize(serialized_job)
|
12
|
+
|
13
|
+
expected_job = Workerholic::JobWrapper.new(class: SimpleJobTest, arguments: ['test job'])
|
14
|
+
expected_job.statistics.enqueued_at = job_from_redis.statistics.enqueued_at
|
15
|
+
|
16
|
+
expect(job_from_redis.to_hash).to eq(expected_job.to_hash)
|
17
|
+
end
|
18
|
+
|
19
|
+
it 'enqueues a complex job in redis' do
|
20
|
+
ComplexJobTest.new.perform_async('test job', { a: 1, b: 2 }, [1, 2, 3])
|
21
|
+
serialized_job = redis.lpop(TEST_QUEUE)
|
22
|
+
job_from_redis = Workerholic::JobSerializer.deserialize(serialized_job)
|
23
|
+
|
24
|
+
expected_job = Workerholic::JobWrapper.new(
|
25
|
+
class: ComplexJobTest,
|
26
|
+
arguments: ['test job', { a: 1, b: 2 }, [1, 2, 3]]
|
27
|
+
)
|
28
|
+
expected_job.statistics.enqueued_at = job_from_redis.statistics.enqueued_at
|
29
|
+
|
30
|
+
expect(job_from_redis.to_hash).to eq(expected_job.to_hash)
|
31
|
+
end
|
32
|
+
|
33
|
+
it 'enqueues a job with the right statistics' do
|
34
|
+
SimpleJobTest.new.perform_async('test_job')
|
35
|
+
serialized_job = redis.lpop(TEST_QUEUE)
|
36
|
+
job_from_redis = Workerholic::JobSerializer.deserialize(serialized_job)
|
37
|
+
|
38
|
+
expect(job_from_redis.statistics.enqueued_at).to be < Time.now.to_f
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
context 'handles user errors' do
|
43
|
+
it 'raises an error if Redis server is not running' do
|
44
|
+
allow(Workerholic::Storage::RedisWrapper).to receive(:new).and_raise(Redis::CannotConnectError)
|
45
|
+
|
46
|
+
expect { SimpleJobTest.new.perform_async('test job') }.to raise_error(Redis::CannotConnectError)
|
47
|
+
end
|
48
|
+
|
49
|
+
it 'raises an error when wrong number of arguments is specified to perform_async' do
|
50
|
+
expect { SimpleJobTest.new.perform_async(1, 2, 3) }.to raise_error(ArgumentError)
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|