rjob 0.4.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/bin/rjob +4 -0
- data/lib/rjob.rb +38 -0
- data/lib/rjob/cli.rb +51 -0
- data/lib/rjob/context.rb +109 -0
- data/lib/rjob/job.rb +54 -0
- data/lib/rjob/job_processor.rb +80 -0
- data/lib/rjob/recurring.rb +18 -0
- data/lib/rjob/recurring_job.rb +63 -0
- data/lib/rjob/scripts.rb +45 -0
- data/lib/rjob/scripts/check_leadership.rb +28 -0
- data/lib/rjob/scripts/enqueue_job.rb +21 -0
- data/lib/rjob/scripts/enqueue_scheduled_jobs.rb +38 -0
- data/lib/rjob/scripts/redis_script.rb +13 -0
- data/lib/rjob/scripts/retry_job.rb +27 -0
- data/lib/rjob/scripts/return_job_execution.rb +19 -0
- data/lib/rjob/scripts/scan_buckets.rb +23 -0
- data/lib/rjob/scripts/schedule_job_at.rb +21 -0
- data/lib/rjob/version.rb +4 -0
- data/lib/rjob/worker.rb +17 -0
- data/lib/rjob/worker_process.rb +387 -0
- metadata +166 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 5f95e67c5a4c2304306b39db54fbd9a38a934a5d47eb78763db9bd1485181ebc
|
4
|
+
data.tar.gz: e69f59a709d82b346df5dcaf3037a864a7ad509898a0ea38de70f8b41611c477
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 4be5f01c00b8f5b428366d1d049c2bda14ae76aba0ccaff38567e1d89433a534d513c1195fe01aaae5562f7499d70edb8b6431e124d31fbc385f0c67671034d2
|
7
|
+
data.tar.gz: 97b94969dc9a440386a25e36db30f1712fff4f11d2a5eddd42809926509f7d13713f9271ded055e84f3d2390e998d51a2080a38d4903178155fbdf1fe5dbb5b2
|
data/bin/rjob
ADDED
data/lib/rjob.rb
ADDED
@@ -0,0 +1,38 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'redis'
|
4
|
+
require 'msgpack'
|
5
|
+
|
6
|
+
require 'connection_pool'
|
7
|
+
require 'concurrent-ruby'
|
8
|
+
require 'socket'
|
9
|
+
require 'securerandom'
|
10
|
+
|
11
|
+
require 'rjob/version'
|
12
|
+
require 'rjob/context'
|
13
|
+
|
14
|
+
require 'rjob/worker'
|
15
|
+
|
16
|
+
require 'rjob/job'
|
17
|
+
require 'rjob/job_processor'
|
18
|
+
|
19
|
+
require 'rjob/scripts'
|
20
|
+
|
21
|
+
module Rjob
|
22
|
+
def self.configure(&block)
|
23
|
+
::Rjob::Context.configure(&block)
|
24
|
+
end
|
25
|
+
|
26
|
+
def self.enqueue(job_class, *args)
|
27
|
+
::Rjob::Context.instance.enqueue_job(job_class, args)
|
28
|
+
end
|
29
|
+
|
30
|
+
def self.schedule_in(seconds_from_now, job_class, *args)
|
31
|
+
t = Time.now.to_i + seconds_from_now
|
32
|
+
::Rjob::Context.instance.schedule_job_at(t, job_class, args)
|
33
|
+
end
|
34
|
+
|
35
|
+
def self.schedule_at(timestamp, job_class, *args)
|
36
|
+
::Rjob::Context.instance.schedule_job_at(timestamp.to_i, job_class, args)
|
37
|
+
end
|
38
|
+
end
|
data/lib/rjob/cli.rb
ADDED
@@ -0,0 +1,51 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Rjob
|
4
|
+
end
|
5
|
+
|
6
|
+
class Rjob::CLI
|
7
|
+
def self.boot; new.boot(ARGV); end
|
8
|
+
|
9
|
+
def initialize
|
10
|
+
@use_rails = false
|
11
|
+
@run_workers = false
|
12
|
+
end
|
13
|
+
|
14
|
+
def boot(args)
|
15
|
+
STDOUT.sync = true
|
16
|
+
STDERR.sync = true
|
17
|
+
|
18
|
+
parse_cli_args(args)
|
19
|
+
|
20
|
+
if @use_rails
|
21
|
+
require File.join(Dir.pwd, "config/environment")
|
22
|
+
end
|
23
|
+
|
24
|
+
run_workers if @run_workers
|
25
|
+
end
|
26
|
+
|
27
|
+
def run_workers
|
28
|
+
require "rjob"
|
29
|
+
require "rjob/worker_process"
|
30
|
+
|
31
|
+
worker = Rjob::WorkerProcess.new(Rjob::Context.instance)
|
32
|
+
worker.run_forever
|
33
|
+
end
|
34
|
+
|
35
|
+
private
|
36
|
+
|
37
|
+
def parse_cli_args(args)
|
38
|
+
while args.length > 0 do
|
39
|
+
arg = args.shift
|
40
|
+
if arg == "--use-rails"
|
41
|
+
@use_rails = true
|
42
|
+
elsif arg == "--run-workers"
|
43
|
+
@run_workers = true
|
44
|
+
else
|
45
|
+
puts "Unrecognized argument: #{arg}"
|
46
|
+
puts "Exiting now"
|
47
|
+
exit 1
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
data/lib/rjob/context.rb
ADDED
@@ -0,0 +1,109 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class Rjob::Context
|
4
|
+
attr_reader :config
|
5
|
+
attr_reader :prefix
|
6
|
+
attr_reader :bucket_count
|
7
|
+
attr_reader :logger
|
8
|
+
attr_reader :job_wrapper_proc
|
9
|
+
attr_reader :script_runner
|
10
|
+
attr_reader :recurring_jobs
|
11
|
+
|
12
|
+
def self.instance
|
13
|
+
return @instance if @instance
|
14
|
+
raise "Rjob is not configured. Please call Rjob.configure first"
|
15
|
+
end
|
16
|
+
|
17
|
+
def self.set_instance(instance)
|
18
|
+
@instance = instance
|
19
|
+
end
|
20
|
+
|
21
|
+
# Available options:
|
22
|
+
#
|
23
|
+
# :redis - (passed to Redis.new)
|
24
|
+
# :max_threads - paralallelism
|
25
|
+
# :bucket_count - defaults to 32
|
26
|
+
# :redis_pool_size - redis connection pool size. Defaults to 10
|
27
|
+
# :prefix - defaults to "rjob"
|
28
|
+
# :job_wrapper_proc - defaults to none
|
29
|
+
#
|
30
|
+
def self.configure
|
31
|
+
raise "Already configured!: #{@instance}" if @instance
|
32
|
+
config = {}
|
33
|
+
yield(config)
|
34
|
+
set_instance(new(config))
|
35
|
+
end
|
36
|
+
|
37
|
+
def initialize(config)
|
38
|
+
@config = config.dup
|
39
|
+
@pool_size = @config.fetch(:redis_pool_size, 10)
|
40
|
+
|
41
|
+
@bucket_count = config.fetch(:bucket_count, 32)
|
42
|
+
@prefix = config.fetch(:prefix, 'rjob')
|
43
|
+
@logger = config[:logger]
|
44
|
+
@job_wrapper_proc = config[:job_wrapper_proc]
|
45
|
+
@script_runner = Rjob::Scripts::ScriptRunner.new
|
46
|
+
@recurring_jobs = nil
|
47
|
+
|
48
|
+
if config.key?(:recurring_jobs)
|
49
|
+
require "rjob/recurring"
|
50
|
+
|
51
|
+
@recurring_jobs = config[:recurring_jobs].map do |defn|
|
52
|
+
Rjob::RecurringJob.from_definition(self, defn)
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
initialize_connection_pool
|
57
|
+
load_redis_scripts
|
58
|
+
end
|
59
|
+
|
60
|
+
def redis(&block)
|
61
|
+
@pool.with(&block)
|
62
|
+
end
|
63
|
+
|
64
|
+
def enqueue_job(job_class, args)
|
65
|
+
redis(&method(:enqueue_job_with_redis).curry[job_class, args])
|
66
|
+
end
|
67
|
+
|
68
|
+
def enqueue_job_with_redis(job_class, args, r)
|
69
|
+
job_data = MessagePack.pack([job_class.to_s, args])
|
70
|
+
@script_runner.exec(r, :enqueue_job, [], [@prefix, @bucket_count, job_data])
|
71
|
+
end
|
72
|
+
|
73
|
+
def schedule_job_at(timestamp, job_class, args)
|
74
|
+
job_data = MessagePack.pack([job_class.to_s, args])
|
75
|
+
|
76
|
+
redis do |r|
|
77
|
+
@script_runner.exec(r, :schedule_job_at, [], [timestamp.to_s, job_data, @prefix, @bucket_count])
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
def fetch_worker_class(class_name:)
|
82
|
+
demodularize_class(class_name)
|
83
|
+
end
|
84
|
+
|
85
|
+
def demodularize_class(name)
|
86
|
+
const = Kernel
|
87
|
+
name.split('::').each do |n|
|
88
|
+
const = const.const_get(n)
|
89
|
+
end
|
90
|
+
const
|
91
|
+
end
|
92
|
+
|
93
|
+
def create_redis_connection
|
94
|
+
redis_args = @config[:redis]
|
95
|
+
Redis.new(redis_args)
|
96
|
+
end
|
97
|
+
|
98
|
+
private
|
99
|
+
|
100
|
+
def load_redis_scripts
|
101
|
+
@pool.with do |redis|
|
102
|
+
@script_runner.load_all_scripts(redis)
|
103
|
+
end
|
104
|
+
end
|
105
|
+
|
106
|
+
def initialize_connection_pool
|
107
|
+
@pool = ConnectionPool.new(size: @pool_size) { create_redis_connection }
|
108
|
+
end
|
109
|
+
end
|
data/lib/rjob/job.rb
ADDED
@@ -0,0 +1,54 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class Rjob::Job
|
4
|
+
DeserializationError = Class.new(StandardError)
|
5
|
+
|
6
|
+
attr_accessor :id
|
7
|
+
attr_accessor :retry_num
|
8
|
+
attr_reader :payload
|
9
|
+
attr_reader :context
|
10
|
+
|
11
|
+
def initialize(context)
|
12
|
+
@context = context
|
13
|
+
end
|
14
|
+
|
15
|
+
def worker_class_name
|
16
|
+
@deserialized_payload[0]
|
17
|
+
end
|
18
|
+
|
19
|
+
def worker_class
|
20
|
+
@context.fetch_worker_class(class_name: worker_class_name)
|
21
|
+
end
|
22
|
+
|
23
|
+
def worker_args
|
24
|
+
@deserialized_payload[1]
|
25
|
+
end
|
26
|
+
|
27
|
+
def payload=(str)
|
28
|
+
@payload = str
|
29
|
+
@deserialized_payload = MessagePack.unpack(str)
|
30
|
+
end
|
31
|
+
|
32
|
+
def serialize
|
33
|
+
"#{@id}!#{@retry_num}!#{@payload}".force_encoding(Encoding::ASCII_8BIT)
|
34
|
+
end
|
35
|
+
|
36
|
+
def self.deserialize(context, job_str)
|
37
|
+
first = job_str.index('!')
|
38
|
+
second = job_str.index('!', first + 1)
|
39
|
+
|
40
|
+
if first == nil || second == nil
|
41
|
+
raise DeserializationError.new("Malformed job string: '#{job_str}'")
|
42
|
+
end
|
43
|
+
|
44
|
+
begin
|
45
|
+
new(context).tap do |job|
|
46
|
+
job.id = job_str[0...first]
|
47
|
+
job.retry_num = job_str[(first + 1)...second].to_i
|
48
|
+
job.payload = job_str[(second + 1)..-1]
|
49
|
+
end
|
50
|
+
rescue MessagePack::MalformedFormatError => e
|
51
|
+
raise DeserializationError.new("Malformed job msgpack payload: #{e.message}")
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
@@ -0,0 +1,80 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Processes one single job.
|
4
|
+
class Rjob::JobProcessor
|
5
|
+
attr_reader :context
|
6
|
+
attr_reader :error
|
7
|
+
attr_reader :job_str
|
8
|
+
attr_reader :job
|
9
|
+
|
10
|
+
def initialize(context, job_str)
|
11
|
+
@context = context
|
12
|
+
@job_str = job_str
|
13
|
+
@error = nil
|
14
|
+
@force_dont_retry = false
|
15
|
+
@success = false
|
16
|
+
end
|
17
|
+
|
18
|
+
def success?
|
19
|
+
@success
|
20
|
+
end
|
21
|
+
|
22
|
+
def stop_retry?
|
23
|
+
@force_dont_retry
|
24
|
+
end
|
25
|
+
|
26
|
+
def run
|
27
|
+
job = Rjob::Job.deserialize(@context, @job_str)
|
28
|
+
@job = job
|
29
|
+
|
30
|
+
job_args = job.worker_args
|
31
|
+
|
32
|
+
worker_class = begin
|
33
|
+
job.worker_class
|
34
|
+
rescue NameError
|
35
|
+
@error = { message: "No worker class '#{job.worker_class_name}'" }
|
36
|
+
@force_dont_retry = true
|
37
|
+
return
|
38
|
+
end
|
39
|
+
|
40
|
+
begin
|
41
|
+
worker_instance = worker_class.new(@context, job)
|
42
|
+
worker_instance.perform(*job_args)
|
43
|
+
@success = true
|
44
|
+
rescue Exception => e
|
45
|
+
@error = { error_class: e.class, message: e.message }
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
# @thread_pool.post do
|
51
|
+
# begin
|
52
|
+
# klass, args = MessagePack.unpack(job_data)
|
53
|
+
# ::Rjob::WorkerThread.new(self, klass, args).run
|
54
|
+
# rescue Exception => e
|
55
|
+
# @failed_count.increment
|
56
|
+
# handle_failed_job(job, bucket, e, klass, args)
|
57
|
+
# ensure
|
58
|
+
# @processed_count.increment
|
59
|
+
# end
|
60
|
+
# end
|
61
|
+
# rescue Rjob::Job::DeserializationError => e
|
62
|
+
# @error = { exception: e }
|
63
|
+
# end
|
64
|
+
# end
|
65
|
+
|
66
|
+
#
|
67
|
+
# class Rjob::WorkerThread
|
68
|
+
# def initialize(worker, klass, args)
|
69
|
+
# @worker = worker
|
70
|
+
# @context = @worker.context
|
71
|
+
# @prefix = @context.prefix
|
72
|
+
# @job_class, @job_args = klass, args
|
73
|
+
# end
|
74
|
+
#
|
75
|
+
# def run
|
76
|
+
# klass = @context.demodularize_class(@job_class)
|
77
|
+
# klass.perform(@job_args)
|
78
|
+
# end
|
79
|
+
# end
|
80
|
+
#
|
@@ -0,0 +1,18 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# This file just loads dependencies needed for running
|
4
|
+
# recurring jobs
|
5
|
+
|
6
|
+
require 'openssl'
|
7
|
+
require 'time'
|
8
|
+
require 'date'
|
9
|
+
|
10
|
+
begin
|
11
|
+
require 'fugit'
|
12
|
+
rescue LoadError => e
|
13
|
+
puts("The gem 'fugit' is required when recurring_jobs config is set for Rjob")
|
14
|
+
raise(e)
|
15
|
+
end
|
16
|
+
|
17
|
+
require 'rjob'
|
18
|
+
require 'rjob/recurring_job'
|
@@ -0,0 +1,63 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class Rjob::RecurringJob
|
4
|
+
attr_reader :context
|
5
|
+
attr_reader :cron
|
6
|
+
attr_reader :job_class_name
|
7
|
+
attr_reader :job_arguments
|
8
|
+
attr_reader :unique_id
|
9
|
+
|
10
|
+
def initialize(context, cron, job_class_name, job_arguments, unique_id=nil)
|
11
|
+
@context = context
|
12
|
+
@cron = cron
|
13
|
+
@job_class_name = job_class_name
|
14
|
+
@job_class = nil
|
15
|
+
@job_arguments = job_arguments
|
16
|
+
|
17
|
+
@unique_id = unique_id
|
18
|
+
|
19
|
+
generate_unique_id! unless @unique_id
|
20
|
+
end
|
21
|
+
|
22
|
+
def maybe_enqueue(redis)
|
23
|
+
key_name = "#{@context.prefix}:recurring:1:#{@unique_id}:lastrun"
|
24
|
+
current_time = Time.now
|
25
|
+
|
26
|
+
last_run_str = redis.get(key_name)
|
27
|
+
last_run = last_run_str ? Time.parse(last_run_str) : (current_time - 1)
|
28
|
+
|
29
|
+
next_run_on = @cron.next_time(last_run)
|
30
|
+
should_run = (current_time >= next_run_on.to_t)
|
31
|
+
|
32
|
+
@context.enqueue_job_with_redis(job_class, job_arguments, redis) if should_run
|
33
|
+
|
34
|
+
if should_run || last_run_str == nil
|
35
|
+
redis.set(key_name, current_time.utc.to_s, ex: @cron.rough_frequency * 2)
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
def job_class
|
40
|
+
@job_class ||= @context.demodularize_class(@job_class_name)
|
41
|
+
end
|
42
|
+
|
43
|
+
def self.from_definition(context, defn)
|
44
|
+
new(
|
45
|
+
context,
|
46
|
+
Fugit.parse(defn[:cron]),
|
47
|
+
defn[:job_class].to_s,
|
48
|
+
defn[:arguments],
|
49
|
+
defn[:unique_id]
|
50
|
+
)
|
51
|
+
end
|
52
|
+
|
53
|
+
private
|
54
|
+
|
55
|
+
def generate_unique_id!
|
56
|
+
digest = ::OpenSSL::Digest.new('sha256')
|
57
|
+
digest << @job_class_name
|
58
|
+
digest << @cron.original
|
59
|
+
@job_arguments.each { |x| digest << x.to_s }
|
60
|
+
|
61
|
+
@unique_id = digest.digest
|
62
|
+
end
|
63
|
+
end
|
data/lib/rjob/scripts.rb
ADDED
@@ -0,0 +1,45 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Rjob::Scripts
|
4
|
+
SCRIPTS = {
|
5
|
+
check_leadership: 'CheckLeadership',
|
6
|
+
enqueue_job: 'EnqueueJob',
|
7
|
+
schedule_job_at: 'ScheduleJobAt',
|
8
|
+
scan_buckets: 'ScanBuckets',
|
9
|
+
retry_job: 'RetryJob',
|
10
|
+
return_job_execution: 'ReturnJobExecution',
|
11
|
+
enqueue_scheduled_jobs: 'EnqueueScheduledJobs',
|
12
|
+
}.freeze
|
13
|
+
|
14
|
+
class ScriptRunner
|
15
|
+
def initialize
|
16
|
+
@scripts = {}
|
17
|
+
end
|
18
|
+
|
19
|
+
def load_all_scripts(redis)
|
20
|
+
SCRIPTS.each do |file_name, class_name|
|
21
|
+
klass = Rjob::Scripts.const_get(class_name)
|
22
|
+
script = klass.new
|
23
|
+
@scripts[file_name] = script
|
24
|
+
load_script(redis, script)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
def exec(redis, name, *args)
|
29
|
+
script = @scripts[name]
|
30
|
+
redis.evalsha(script.sha1, *args)
|
31
|
+
end
|
32
|
+
|
33
|
+
private
|
34
|
+
|
35
|
+
def load_script(redis, script)
|
36
|
+
script.sha1 = redis.script(:load, script.lua_script)
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
require 'rjob/scripts/redis_script'
|
42
|
+
|
43
|
+
Rjob::Scripts::SCRIPTS.each do |file_name, class_name|
|
44
|
+
require "rjob/scripts/#{file_name}"
|
45
|
+
end
|
@@ -0,0 +1,28 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class Rjob::Scripts::CheckLeadership < Rjob::Scripts::RedisScript
|
4
|
+
def arg_params
|
5
|
+
%i(worker_name time_now prefix heartbeat_timeout)
|
6
|
+
end
|
7
|
+
|
8
|
+
def lua_script
|
9
|
+
<<~LUA
|
10
|
+
local worker_name = ARGV[1]
|
11
|
+
local time_now = ARGV[2]
|
12
|
+
local prefix = ARGV[3]
|
13
|
+
local heartbeat_timeout = tonumber(ARGV[4])
|
14
|
+
local r = redis
|
15
|
+
if r.call('setnx', prefix .. ':leaderworker', worker_name) == 1 then
|
16
|
+
return worker_name
|
17
|
+
else
|
18
|
+
local leader = r.call('get', prefix .. ':leaderworker')
|
19
|
+
local last_hb = tonumber(r.call('hget', prefix .. ':worker:' .. leader, 'heartbeat'))
|
20
|
+
if last_hb == nil or time_now - last_hb > heartbeat_timeout then
|
21
|
+
r.call('set', prefix .. ':leaderworker', worker_name)
|
22
|
+
return worker_name
|
23
|
+
end
|
24
|
+
return leader
|
25
|
+
end
|
26
|
+
LUA
|
27
|
+
end
|
28
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class Rjob::Scripts::EnqueueJob < Rjob::Scripts::RedisScript
|
4
|
+
def arg_params
|
5
|
+
%i(prefix bucket_count job_data)
|
6
|
+
end
|
7
|
+
|
8
|
+
def lua_script
|
9
|
+
<<~LUA
|
10
|
+
local prefix = ARGV[1]
|
11
|
+
local bucket_count = tonumber(ARGV[2])
|
12
|
+
local job_data = ARGV[3]
|
13
|
+
local r = redis
|
14
|
+
local job_id = r.call('incr', prefix .. ':next')
|
15
|
+
local bucket = job_id % bucket_count
|
16
|
+
r.call('lpush', prefix .. ':jobs:' .. bucket, job_id .. '!0!' .. job_data)
|
17
|
+
r.call('publish', prefix .. ':jobs', bucket)
|
18
|
+
return job_id
|
19
|
+
LUA
|
20
|
+
end
|
21
|
+
end
|
@@ -0,0 +1,38 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class Rjob::Scripts::EnqueueScheduledJobs < Rjob::Scripts::RedisScript
|
4
|
+
def arg_params
|
5
|
+
%i(time_now job_limit bucket_no)
|
6
|
+
end
|
7
|
+
|
8
|
+
def key_params
|
9
|
+
%i(scheduled_key dest_key jobs_key)
|
10
|
+
end
|
11
|
+
|
12
|
+
def lua_script
|
13
|
+
<<~LUA
|
14
|
+
local r = redis
|
15
|
+
local time_now = ARGV[1]
|
16
|
+
local job_limit = ARGV[2]
|
17
|
+
local bucket_no = ARGV[3]
|
18
|
+
|
19
|
+
local scheduled_key = KEYS[1]
|
20
|
+
local dest_key = KEYS[2]
|
21
|
+
local jobs_key = KEYS[3]
|
22
|
+
|
23
|
+
local jobs = r.call('zrangebyscore', scheduled_key, 0, time_now, 'limit', 0, job_limit)
|
24
|
+
if #jobs == 0 then
|
25
|
+
return 0
|
26
|
+
end
|
27
|
+
|
28
|
+
local i
|
29
|
+
for i=1, #jobs do
|
30
|
+
r.call('lpush', dest_key, jobs[i])
|
31
|
+
end
|
32
|
+
r.call('zrem', scheduled_key, unpack(jobs))
|
33
|
+
r.call('publish', jobs_key, bucket_no)
|
34
|
+
|
35
|
+
return #jobs
|
36
|
+
LUA
|
37
|
+
end
|
38
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class Rjob::Scripts::RetryJob < Rjob::Scripts::RedisScript
|
4
|
+
def arg_params
|
5
|
+
%i(next_retry_at retry_num bucket job_id job_payload prefix)
|
6
|
+
end
|
7
|
+
|
8
|
+
def lua_script
|
9
|
+
<<~LUA
|
10
|
+
local timestamp = ARGV[1]
|
11
|
+
local retry_num = ARGV[2]
|
12
|
+
local bucket = ARGV[3]
|
13
|
+
local job_id = ARGV[4]
|
14
|
+
local job_payload = ARGV[5]
|
15
|
+
local prefix = ARGV[6]
|
16
|
+
local r = redis
|
17
|
+
|
18
|
+
local curr_job = job_id .. '!' .. retry_num .. '!' .. job_payload
|
19
|
+
local new_job = job_id .. '!' .. (retry_num + 1) .. '!' .. job_payload
|
20
|
+
|
21
|
+
r.call('lrem', prefix .. ':jobs:' .. bucket .. ':working', 1, curr_job)
|
22
|
+
r.call('zadd', prefix .. ':scheduled:' .. bucket, timestamp, new_job)
|
23
|
+
|
24
|
+
return job_id
|
25
|
+
LUA
|
26
|
+
end
|
27
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class Rjob::Scripts::ReturnJobExecution < Rjob::Scripts::RedisScript
|
4
|
+
def arg_params
|
5
|
+
%i(job bucket prefix)
|
6
|
+
end
|
7
|
+
|
8
|
+
def lua_script
|
9
|
+
<<~LUA
|
10
|
+
local job = ARGV[1]
|
11
|
+
local bucket = ARGV[2]
|
12
|
+
local prefix = ARGV[3]
|
13
|
+
local r = redis
|
14
|
+
r.call('lrem', prefix .. ':jobs:' .. bucket .. ':working', 1, job)
|
15
|
+
r.call('rpush', prefix .. ':jobs:' .. bucket, job)
|
16
|
+
return 1
|
17
|
+
LUA
|
18
|
+
end
|
19
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class Rjob::Scripts::ScanBuckets < Rjob::Scripts::RedisScript
|
4
|
+
def arg_params
|
5
|
+
%i(prefix bucket_count)
|
6
|
+
end
|
7
|
+
|
8
|
+
def lua_script
|
9
|
+
<<~LUA
|
10
|
+
local prefix = ARGV[1]
|
11
|
+
local bucket_count = ARGV[2]
|
12
|
+
local r = redis
|
13
|
+
local i
|
14
|
+
for i=0,bucket_count-1 do
|
15
|
+
local len = r.call('llen', prefix .. ':jobs:' .. i)
|
16
|
+
if len > 0 then
|
17
|
+
r.call('publish', prefix .. ':jobs', i)
|
18
|
+
end
|
19
|
+
end
|
20
|
+
return 1
|
21
|
+
LUA
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class Rjob::Scripts::ScheduleJobAt < Rjob::Scripts::RedisScript
|
4
|
+
def arg_params
|
5
|
+
%i(timestamp job prefix bucket_count)
|
6
|
+
end
|
7
|
+
|
8
|
+
def lua_script
|
9
|
+
<<~LUA
|
10
|
+
local timestamp = ARGV[1]
|
11
|
+
local job = ARGV[2]
|
12
|
+
local prefix = ARGV[3]
|
13
|
+
local bucket_count = tonumber(ARGV[4])
|
14
|
+
local r = redis
|
15
|
+
local job_id = r.call('incr', prefix .. ':next')
|
16
|
+
local bucket = job_id % bucket_count
|
17
|
+
r.call('zadd', prefix .. ':scheduled:' .. bucket, timestamp, job_id .. '!0!' .. job)
|
18
|
+
return job_id
|
19
|
+
LUA
|
20
|
+
end
|
21
|
+
end
|
data/lib/rjob/version.rb
ADDED
data/lib/rjob/worker.rb
ADDED
@@ -0,0 +1,387 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# TODO: find a mechanism to recover from jobs that went to working but never returned
|
4
|
+
|
5
|
+
class Rjob::WorkerProcess
|
6
|
+
ITERATION_TIMEOUT = 2
|
7
|
+
HEARTBEAT_TIMEOUT = 15
|
8
|
+
|
9
|
+
StopSubscription = Class.new(StandardError)
|
10
|
+
|
11
|
+
attr_reader :context
|
12
|
+
attr_reader :worker_name
|
13
|
+
attr_reader :state
|
14
|
+
attr_reader :leader
|
15
|
+
|
16
|
+
def initialize(context)
|
17
|
+
@context = context
|
18
|
+
@prefix = @context.prefix
|
19
|
+
@pubsub_redis = @context.create_redis_connection
|
20
|
+
|
21
|
+
init_worker_name
|
22
|
+
|
23
|
+
@iteration_no = 0
|
24
|
+
@max_queue_size = 20
|
25
|
+
max_threads = @context.config.fetch(:max_threads, 2)
|
26
|
+
|
27
|
+
@subscription_thread = nil
|
28
|
+
@thread_pool = Concurrent::ThreadPoolExecutor.new(
|
29
|
+
min_threads: [2, max_threads].min,
|
30
|
+
max_threads: max_threads,
|
31
|
+
max_queue: @max_queue_size,
|
32
|
+
fallback_policy: :abort # Concurrent::RejectedExecutionError
|
33
|
+
)
|
34
|
+
|
35
|
+
@processed_count = Concurrent::AtomicFixnum.new
|
36
|
+
@failed_count = Concurrent::AtomicFixnum.new
|
37
|
+
@returned_count = Concurrent::AtomicFixnum.new
|
38
|
+
|
39
|
+
@leader = nil
|
40
|
+
@state = :new
|
41
|
+
end
|
42
|
+
|
43
|
+
def run_forever
|
44
|
+
register_worker
|
45
|
+
|
46
|
+
Signal.trap("INT") do
|
47
|
+
if @state == :exiting
|
48
|
+
puts "Force exit requested. Exiting immediately"
|
49
|
+
exit 1
|
50
|
+
else
|
51
|
+
@state = :exiting
|
52
|
+
puts "Exiting..."
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
@state = :running
|
57
|
+
loop do
|
58
|
+
break if @state == :exited
|
59
|
+
run_iteration
|
60
|
+
end
|
61
|
+
ensure
|
62
|
+
unregister_worker
|
63
|
+
end
|
64
|
+
|
65
|
+
private
|
66
|
+
|
67
|
+
def disable_subscription_thread
|
68
|
+
return unless @subscription_thread
|
69
|
+
@subscription_thread.raise(StopSubscription.new)
|
70
|
+
@subscription_thread = nil
|
71
|
+
end
|
72
|
+
|
73
|
+
def enable_subscription_thread
|
74
|
+
return if @subscription_thread
|
75
|
+
|
76
|
+
@subscription_thread = Thread.new do
|
77
|
+
begin
|
78
|
+
@pubsub_redis.subscribe("#{@prefix}:jobs") do |on|
|
79
|
+
on.message do |_, bucket_no|
|
80
|
+
loop do
|
81
|
+
break unless @state == :running
|
82
|
+
break unless start_processing_message_from_bucket(bucket_no)
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
rescue StopSubscription => e
|
87
|
+
@pubsub_redis.disconnect rescue nil
|
88
|
+
rescue StandardError => e
|
89
|
+
puts "staaahp -> #{e}"
|
90
|
+
raise e
|
91
|
+
exit 1
|
92
|
+
end
|
93
|
+
end
|
94
|
+
@subscription_thread.run
|
95
|
+
end
|
96
|
+
|
97
|
+
def run_iteration
|
98
|
+
begin
|
99
|
+
stop_threshold = (@max_queue_size * 0.7).to_i
|
100
|
+
if @thread_pool.queue_length >= stop_threshold || @state != :running
|
101
|
+
disable_subscription_thread
|
102
|
+
elsif @state == :running
|
103
|
+
if !@subscription_thread
|
104
|
+
enable_subscription_thread
|
105
|
+
sleep(ITERATION_TIMEOUT)
|
106
|
+
scan_buckets
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
if @state == :exiting
|
111
|
+
if @thread_pool.shutdown?
|
112
|
+
@state = :exited
|
113
|
+
elsif !@thread_pool.shuttingdown?
|
114
|
+
@thread_pool.shutdown
|
115
|
+
else
|
116
|
+
puts "Waiting shutdown..."
|
117
|
+
end
|
118
|
+
end
|
119
|
+
|
120
|
+
report_stats
|
121
|
+
|
122
|
+
check_leadership
|
123
|
+
|
124
|
+
if leader? && @state == :running
|
125
|
+
exercise_leadership if @iteration_no % 2 == 0
|
126
|
+
end
|
127
|
+
|
128
|
+
@iteration_no += 1
|
129
|
+
sleep(ITERATION_TIMEOUT) unless @state == :exited
|
130
|
+
rescue StandardError => e
|
131
|
+
raise e
|
132
|
+
end
|
133
|
+
end
|
134
|
+
|
135
|
+
def check_leadership
|
136
|
+
@context.redis do |r|
|
137
|
+
if leader? && @state == :exiting
|
138
|
+
r.call('del', "#{@prefix}:leaderworker")
|
139
|
+
return
|
140
|
+
end
|
141
|
+
|
142
|
+
@leader = @context.script_runner.exec(r, :check_leadership,
|
143
|
+
[], [
|
144
|
+
@worker_name,
|
145
|
+
Time.now.to_i,
|
146
|
+
@prefix,
|
147
|
+
HEARTBEAT_TIMEOUT
|
148
|
+
])
|
149
|
+
end
|
150
|
+
end
|
151
|
+
|
152
|
+
def leader?
|
153
|
+
@leader && @leader == @worker_name
|
154
|
+
end
|
155
|
+
|
156
|
+
def report_stats
|
157
|
+
key_prefix = "#{@prefix}:worker:#{@worker_name}"
|
158
|
+
state_data = {
|
159
|
+
heartbeat: Time.now.to_i,
|
160
|
+
queue_length: @thread_pool.queue_length,
|
161
|
+
processed: @processed_count.value,
|
162
|
+
failed: @failed_count.value,
|
163
|
+
returned: @returned_count.value,
|
164
|
+
state: @state
|
165
|
+
}
|
166
|
+
|
167
|
+
@context.redis do |r|
|
168
|
+
r.pipelined do
|
169
|
+
state_data.each do |k, v|
|
170
|
+
r.hset(key_prefix, k, v.to_s)
|
171
|
+
end
|
172
|
+
end
|
173
|
+
end
|
174
|
+
end
|
175
|
+
|
176
|
+
def scan_buckets
|
177
|
+
@context.redis do |r|
|
178
|
+
@context.script_runner.exec(r, :scan_buckets, [], [@prefix, @context.bucket_count])
|
179
|
+
end
|
180
|
+
end
|
181
|
+
|
182
|
+
def start_processing_message_from_bucket(bucket)
|
183
|
+
job_str = @context.redis do |r|
|
184
|
+
r.rpoplpush("#{@prefix}:jobs:#{bucket}", "#{@prefix}:jobs:#{bucket}:working")
|
185
|
+
end
|
186
|
+
|
187
|
+
return false if job_str == nil
|
188
|
+
|
189
|
+
# move to inside thread
|
190
|
+
job_processor = Rjob::JobProcessor.new(context, job_str)
|
191
|
+
|
192
|
+
begin
|
193
|
+
@thread_pool.post do
|
194
|
+
|
195
|
+
using_app_wrapper do
|
196
|
+
job_processor.run
|
197
|
+
end
|
198
|
+
|
199
|
+
if !job_processor.success?
|
200
|
+
@failed_count.increment
|
201
|
+
handle_job_processing_failure(bucket, job_processor)
|
202
|
+
else
|
203
|
+
remove_job_from_working(job_str, bucket)
|
204
|
+
end
|
205
|
+
end
|
206
|
+
rescue Concurrent::RejectedExecutionError => e
|
207
|
+
@returned_count.increment
|
208
|
+
return_job_execution(job_str, bucket)
|
209
|
+
ensure
|
210
|
+
@processed_count.increment
|
211
|
+
end
|
212
|
+
end
|
213
|
+
|
214
|
+
def remove_job_from_working(job_str, bucket)
|
215
|
+
@context.redis do |r|
|
216
|
+
r.lrem("#{@prefix}:jobs:#{bucket}:working", 1, job_str)
|
217
|
+
end
|
218
|
+
end
|
219
|
+
|
220
|
+
def retry_job(job, bucket, next_retry_at)
|
221
|
+
@context.redis do |r|
|
222
|
+
@context.script_runner.exec(r, :retry_job, [],
|
223
|
+
[
|
224
|
+
next_retry_at.to_s,
|
225
|
+
job.retry_num,
|
226
|
+
bucket,
|
227
|
+
job.id.to_s,
|
228
|
+
job.payload,
|
229
|
+
@prefix
|
230
|
+
])
|
231
|
+
end
|
232
|
+
end
|
233
|
+
|
234
|
+
def handle_job_processing_failure(bucket, job_processor)
|
235
|
+
job = job_processor.job
|
236
|
+
error = job_processor.error
|
237
|
+
|
238
|
+
if !error
|
239
|
+
error = { message: "Unknown error" }
|
240
|
+
end
|
241
|
+
|
242
|
+
if @context.logger.respond_to?(:info)
|
243
|
+
@context.logger.info("Job '#{job.worker_class_name}' with args '#{job.worker_args}' failed: #{error}")
|
244
|
+
end
|
245
|
+
|
246
|
+
if job_processor.stop_retry?
|
247
|
+
move_job_to_dead(job_processor.job_str, bucket, error)
|
248
|
+
return
|
249
|
+
end
|
250
|
+
|
251
|
+
retry_options = job.worker_class.retry_options
|
252
|
+
|
253
|
+
if retry_options[:retry]
|
254
|
+
exceptions = retry_options.fetch(:exceptions, [StandardError])
|
255
|
+
should_handle = exceptions.any? { |e| e >= error[:error_class] }
|
256
|
+
|
257
|
+
retry_proc = retry_options[:next_retry_proc] || (proc { |x| 3 * x ** 4 + 15 })
|
258
|
+
max_retries = retry_options.fetch(:max_retries, 16) # retry for ~2 days
|
259
|
+
|
260
|
+
new_retry_num = job.retry_num + 1
|
261
|
+
|
262
|
+
if should_handle && new_retry_num <= max_retries
|
263
|
+
next_retry_at = Time.now.to_i + retry_proc.call(new_retry_num)
|
264
|
+
retry_job(job, bucket, next_retry_at)
|
265
|
+
return
|
266
|
+
end
|
267
|
+
end
|
268
|
+
|
269
|
+
move_job_to_dead(job_processor.job_str, bucket, error)
|
270
|
+
end
|
271
|
+
|
272
|
+
# TODO: this should probably be in a single redis pipelined operation
|
273
|
+
def move_job_to_dead(job_str, bucket, error)
|
274
|
+
push_job_to_dead(job_str, bucket, error)
|
275
|
+
remove_job_from_working(job_str, bucket)
|
276
|
+
end
|
277
|
+
|
278
|
+
def push_job_to_dead(job_str, bucket, error)
|
279
|
+
error_payload = MessagePack.pack({
|
280
|
+
when: Time.now.to_i,
|
281
|
+
error_class: error[:error_class].to_s,
|
282
|
+
full_message: error[:message],
|
283
|
+
job: job_str
|
284
|
+
})
|
285
|
+
|
286
|
+
@context.redis do |r|
|
287
|
+
r.lpush("#{@prefix}:dead", error_payload)
|
288
|
+
end
|
289
|
+
end
|
290
|
+
|
291
|
+
# When a job previously went to working state and we want to
|
292
|
+
# put it back (re-enqueue it).
|
293
|
+
#
|
294
|
+
# This mostly happens when we picked a job for processing but realized
|
295
|
+
# that we don't actually have the resources to process it at the moment.
|
296
|
+
def return_job_execution(job, bucket)
|
297
|
+
@context.redis do |r|
|
298
|
+
@context.script_runner.exec(r, :return_job_execution, [], [job, bucket, @prefix])
|
299
|
+
end
|
300
|
+
end
|
301
|
+
|
302
|
+
def register_worker
|
303
|
+
report_stats
|
304
|
+
|
305
|
+
@context.redis do |r|
|
306
|
+
r.lpush("#{@prefix}:workers", @worker_name)
|
307
|
+
end
|
308
|
+
end
|
309
|
+
|
310
|
+
def unregister_worker
|
311
|
+
@context.redis do |r|
|
312
|
+
r.lrem("#{@prefix}:workers", 1, @worker_name)
|
313
|
+
r.del("#{@prefix}:worker:#{@worker_name}")
|
314
|
+
end
|
315
|
+
end
|
316
|
+
|
317
|
+
def init_worker_name
|
318
|
+
host = Socket.gethostname
|
319
|
+
rand_factor = SecureRandom.alphanumeric(24)
|
320
|
+
@worker_name = [host, rand_factor].join('-')
|
321
|
+
end
|
322
|
+
|
323
|
+
def exercise_leadership
|
324
|
+
enqueue_scheduled_jobs
|
325
|
+
|
326
|
+
scan_buckets
|
327
|
+
|
328
|
+
enqueue_recurring_jobs
|
329
|
+
end
|
330
|
+
|
331
|
+
def enqueue_recurring_jobs
|
332
|
+
recurring_jobs = @context.recurring_jobs
|
333
|
+
return unless recurring_jobs
|
334
|
+
|
335
|
+
# Make sure all classes are loaded without error
|
336
|
+
recurring_jobs.each(&:job_class)
|
337
|
+
|
338
|
+
@context.redis do |redis|
|
339
|
+
recurring_jobs.each do |rj|
|
340
|
+
rj.maybe_enqueue(redis)
|
341
|
+
end
|
342
|
+
end
|
343
|
+
end
|
344
|
+
|
345
|
+
def enqueue_scheduled_jobs
|
346
|
+
time_now = Time.now.to_i
|
347
|
+
job_limit = 100
|
348
|
+
|
349
|
+
# Let's not be caught in an infinite loop. Thus, loop max 10 times
|
350
|
+
10.times do
|
351
|
+
re_run = false
|
352
|
+
|
353
|
+
@context.redis do |r|
|
354
|
+
(0...@context.bucket_count).each do |bucket|
|
355
|
+
num_jobs = @context.script_runner.exec(r, :enqueue_scheduled_jobs,
|
356
|
+
[
|
357
|
+
"#{@prefix}:scheduled:#{bucket}",
|
358
|
+
"#{@prefix}:jobs:#{bucket}",
|
359
|
+
"#{@prefix}:jobs"
|
360
|
+
], [
|
361
|
+
time_now, job_limit, bucket
|
362
|
+
])
|
363
|
+
|
364
|
+
re_run = true if num_jobs == job_limit
|
365
|
+
end
|
366
|
+
end
|
367
|
+
|
368
|
+
break unless re_run
|
369
|
+
end
|
370
|
+
end
|
371
|
+
|
372
|
+
def using_app_wrapper(&blk)
|
373
|
+
call_block = if @context.job_wrapper_proc != nil
|
374
|
+
proc do
|
375
|
+
@context.job_wrapper_proc.call(blk)
|
376
|
+
end
|
377
|
+
else
|
378
|
+
blk
|
379
|
+
end
|
380
|
+
|
381
|
+
if defined?(::Rails)
|
382
|
+
::Rails.application.executor.wrap(&call_block)
|
383
|
+
else
|
384
|
+
call_block.call
|
385
|
+
end
|
386
|
+
end
|
387
|
+
end
|
metadata
ADDED
@@ -0,0 +1,166 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: rjob
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.4.3
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- André D. Piske
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2021-03-27 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: oj
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - "<"
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '4'
|
20
|
+
type: :runtime
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - "<"
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '4'
|
27
|
+
- !ruby/object:Gem::Dependency
|
28
|
+
name: multi_json
|
29
|
+
requirement: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - "<"
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: '2'
|
34
|
+
type: :runtime
|
35
|
+
prerelease: false
|
36
|
+
version_requirements: !ruby/object:Gem::Requirement
|
37
|
+
requirements:
|
38
|
+
- - "<"
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
version: '2'
|
41
|
+
- !ruby/object:Gem::Dependency
|
42
|
+
name: redis
|
43
|
+
requirement: !ruby/object:Gem::Requirement
|
44
|
+
requirements:
|
45
|
+
- - "~>"
|
46
|
+
- !ruby/object:Gem::Version
|
47
|
+
version: '4.1'
|
48
|
+
type: :runtime
|
49
|
+
prerelease: false
|
50
|
+
version_requirements: !ruby/object:Gem::Requirement
|
51
|
+
requirements:
|
52
|
+
- - "~>"
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: '4.1'
|
55
|
+
- !ruby/object:Gem::Dependency
|
56
|
+
name: msgpack
|
57
|
+
requirement: !ruby/object:Gem::Requirement
|
58
|
+
requirements:
|
59
|
+
- - "~>"
|
60
|
+
- !ruby/object:Gem::Version
|
61
|
+
version: '1.3'
|
62
|
+
type: :runtime
|
63
|
+
prerelease: false
|
64
|
+
version_requirements: !ruby/object:Gem::Requirement
|
65
|
+
requirements:
|
66
|
+
- - "~>"
|
67
|
+
- !ruby/object:Gem::Version
|
68
|
+
version: '1.3'
|
69
|
+
- !ruby/object:Gem::Dependency
|
70
|
+
name: connection_pool
|
71
|
+
requirement: !ruby/object:Gem::Requirement
|
72
|
+
requirements:
|
73
|
+
- - ">="
|
74
|
+
- !ruby/object:Gem::Version
|
75
|
+
version: 2.2.2
|
76
|
+
- - "<"
|
77
|
+
- !ruby/object:Gem::Version
|
78
|
+
version: '3'
|
79
|
+
type: :runtime
|
80
|
+
prerelease: false
|
81
|
+
version_requirements: !ruby/object:Gem::Requirement
|
82
|
+
requirements:
|
83
|
+
- - ">="
|
84
|
+
- !ruby/object:Gem::Version
|
85
|
+
version: 2.2.2
|
86
|
+
- - "<"
|
87
|
+
- !ruby/object:Gem::Version
|
88
|
+
version: '3'
|
89
|
+
- !ruby/object:Gem::Dependency
|
90
|
+
name: concurrent-ruby
|
91
|
+
requirement: !ruby/object:Gem::Requirement
|
92
|
+
requirements:
|
93
|
+
- - "~>"
|
94
|
+
- !ruby/object:Gem::Version
|
95
|
+
version: 1.1.6
|
96
|
+
type: :runtime
|
97
|
+
prerelease: false
|
98
|
+
version_requirements: !ruby/object:Gem::Requirement
|
99
|
+
requirements:
|
100
|
+
- - "~>"
|
101
|
+
- !ruby/object:Gem::Version
|
102
|
+
version: 1.1.6
|
103
|
+
- !ruby/object:Gem::Dependency
|
104
|
+
name: rake
|
105
|
+
requirement: !ruby/object:Gem::Requirement
|
106
|
+
requirements:
|
107
|
+
- - "~>"
|
108
|
+
- !ruby/object:Gem::Version
|
109
|
+
version: '13.0'
|
110
|
+
type: :runtime
|
111
|
+
prerelease: false
|
112
|
+
version_requirements: !ruby/object:Gem::Requirement
|
113
|
+
requirements:
|
114
|
+
- - "~>"
|
115
|
+
- !ruby/object:Gem::Version
|
116
|
+
version: '13.0'
|
117
|
+
description: 'RJob: asynchronous job processing'
|
118
|
+
email: andrepiske@gmail.com
|
119
|
+
executables:
|
120
|
+
- rjob
|
121
|
+
extensions: []
|
122
|
+
extra_rdoc_files: []
|
123
|
+
files:
|
124
|
+
- bin/rjob
|
125
|
+
- lib/rjob.rb
|
126
|
+
- lib/rjob/cli.rb
|
127
|
+
- lib/rjob/context.rb
|
128
|
+
- lib/rjob/job.rb
|
129
|
+
- lib/rjob/job_processor.rb
|
130
|
+
- lib/rjob/recurring.rb
|
131
|
+
- lib/rjob/recurring_job.rb
|
132
|
+
- lib/rjob/scripts.rb
|
133
|
+
- lib/rjob/scripts/check_leadership.rb
|
134
|
+
- lib/rjob/scripts/enqueue_job.rb
|
135
|
+
- lib/rjob/scripts/enqueue_scheduled_jobs.rb
|
136
|
+
- lib/rjob/scripts/redis_script.rb
|
137
|
+
- lib/rjob/scripts/retry_job.rb
|
138
|
+
- lib/rjob/scripts/return_job_execution.rb
|
139
|
+
- lib/rjob/scripts/scan_buckets.rb
|
140
|
+
- lib/rjob/scripts/schedule_job_at.rb
|
141
|
+
- lib/rjob/version.rb
|
142
|
+
- lib/rjob/worker.rb
|
143
|
+
- lib/rjob/worker_process.rb
|
144
|
+
homepage: https://gitlab.com/andrepiske/rjob
|
145
|
+
licenses: []
|
146
|
+
metadata: {}
|
147
|
+
post_install_message:
|
148
|
+
rdoc_options: []
|
149
|
+
require_paths:
|
150
|
+
- lib
|
151
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
152
|
+
requirements:
|
153
|
+
- - ">="
|
154
|
+
- !ruby/object:Gem::Version
|
155
|
+
version: '0'
|
156
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
157
|
+
requirements:
|
158
|
+
- - ">="
|
159
|
+
- !ruby/object:Gem::Version
|
160
|
+
version: '0'
|
161
|
+
requirements: []
|
162
|
+
rubygems_version: 3.2.15
|
163
|
+
signing_key:
|
164
|
+
specification_version: 4
|
165
|
+
summary: Asynchronous job processing
|
166
|
+
test_files: []
|