jobba 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +10 -0
- data/.rspec +2 -0
- data/.ruby-gemset +1 -0
- data/.ruby-version +1 -0
- data/.travis.yml +15 -0
- data/Gemfile +4 -0
- data/LICENSE.txt +21 -0
- data/README.md +377 -0
- data/Rakefile +6 -0
- data/bin/console +14 -0
- data/bin/setup +7 -0
- data/jobba.gemspec +30 -0
- data/lib/jobba/clause.rb +44 -0
- data/lib/jobba/clause_factory.rb +82 -0
- data/lib/jobba/common.rb +17 -0
- data/lib/jobba/configuration.rb +10 -0
- data/lib/jobba/exceptions.rb +5 -0
- data/lib/jobba/query.rb +84 -0
- data/lib/jobba/state.rb +59 -0
- data/lib/jobba/status.rb +304 -0
- data/lib/jobba/statuses.rb +74 -0
- data/lib/jobba/time.rb +21 -0
- data/lib/jobba/utils.rb +25 -0
- data/lib/jobba/version.rb +3 -0
- data/lib/jobba.rb +44 -0
- metadata +167 -0
data/lib/jobba/query.rb
ADDED
@@ -0,0 +1,84 @@
|
|
1
|
+
require 'jobba/clause'
|
2
|
+
require 'jobba/clause_factory'
|
3
|
+
|
4
|
+
class Jobba::Query
|
5
|
+
|
6
|
+
include Jobba::Common
|
7
|
+
|
8
|
+
def where(options)
|
9
|
+
options.each do |kk,vv|
|
10
|
+
clauses.push(Jobba::ClauseFactory.new_clause(kk,vv))
|
11
|
+
end
|
12
|
+
|
13
|
+
self
|
14
|
+
end
|
15
|
+
|
16
|
+
def count
|
17
|
+
run(&COUNT_STATUSES)
|
18
|
+
end
|
19
|
+
|
20
|
+
def empty?
|
21
|
+
count == 0
|
22
|
+
end
|
23
|
+
|
24
|
+
# At the end of a chain of `where`s, the user will call methods that expect
|
25
|
+
# to run on the result of the executed `where`s. So if we don't know what
|
26
|
+
# the method is, execute the `where`s and pass the method to its output.
|
27
|
+
|
28
|
+
def method_missing(method_name, *args)
|
29
|
+
if Jobba::Statuses.instance_methods.include?(method_name)
|
30
|
+
run(&GET_STATUSES).send(method_name, *args)
|
31
|
+
else
|
32
|
+
super
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
def respond_to?(method_name)
|
37
|
+
Jobba::Statuses.instance_methods.include?(method_name) || super
|
38
|
+
end
|
39
|
+
|
40
|
+
protected
|
41
|
+
|
42
|
+
attr_accessor :clauses
|
43
|
+
|
44
|
+
def initialize
|
45
|
+
@clauses = []
|
46
|
+
end
|
47
|
+
|
48
|
+
GET_STATUSES = ->(working_set) {
|
49
|
+
ids = Jobba.redis.zrange(working_set, 0, -1)
|
50
|
+
Jobba::Statuses.new(ids)
|
51
|
+
}
|
52
|
+
|
53
|
+
COUNT_STATUSES = ->(working_set) {
|
54
|
+
Jobba.redis.zcard(working_set)
|
55
|
+
}
|
56
|
+
|
57
|
+
def run(&working_set_block)
|
58
|
+
|
59
|
+
# TODO PUT IN MULTI BLOCKS WHERE WE CAN!
|
60
|
+
|
61
|
+
load_default_clause if clauses.empty?
|
62
|
+
working_set = nil
|
63
|
+
|
64
|
+
clauses.each_with_index do |clause, ii|
|
65
|
+
clause_set = clause.to_new_set
|
66
|
+
|
67
|
+
if working_set.nil?
|
68
|
+
working_set = clause_set
|
69
|
+
else
|
70
|
+
redis.zinterstore(working_set, [working_set, clause_set], weights: [0, 0])
|
71
|
+
redis.del(clause_set)
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
working_set_block.call(working_set).tap do
|
76
|
+
redis.del(working_set)
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
def load_default_clause
|
81
|
+
where(state: Jobba::State::ALL.collect(&:name))
|
82
|
+
end
|
83
|
+
|
84
|
+
end
|
data/lib/jobba/state.rb
ADDED
@@ -0,0 +1,59 @@
|
|
1
|
+
class Jobba::State
|
2
|
+
|
3
|
+
attr_reader :name, :timestamp_name
|
4
|
+
|
5
|
+
def initialize(name, timestamp_name)
|
6
|
+
@name = name
|
7
|
+
@timestamp_name = timestamp_name
|
8
|
+
end
|
9
|
+
|
10
|
+
def self.from_name(state_name)
|
11
|
+
ALL.select{|state| state.name == state_name}.first
|
12
|
+
end
|
13
|
+
|
14
|
+
UNQUEUED = new('unqueued', 'recorded_at')
|
15
|
+
QUEUED = new('queued', 'queued_at')
|
16
|
+
WORKING = new('working', 'started_at')
|
17
|
+
SUCCEEDED = new('succeeded', 'succeeded_at')
|
18
|
+
FAILED = new('failed', 'failed_at')
|
19
|
+
KILLED = new('killed', 'killed_at')
|
20
|
+
UNKNOWN = new('unknown', 'recorded_at')
|
21
|
+
|
22
|
+
ALL = [
|
23
|
+
UNQUEUED,
|
24
|
+
QUEUED,
|
25
|
+
WORKING,
|
26
|
+
SUCCEEDED,
|
27
|
+
FAILED,
|
28
|
+
KILLED,
|
29
|
+
UNKNOWN
|
30
|
+
].freeze
|
31
|
+
|
32
|
+
COMPLETED = [
|
33
|
+
SUCCEEDED,
|
34
|
+
FAILED
|
35
|
+
].freeze
|
36
|
+
|
37
|
+
INCOMPLETE = [
|
38
|
+
UNQUEUED,
|
39
|
+
QUEUED,
|
40
|
+
WORKING,
|
41
|
+
KILLED
|
42
|
+
].freeze
|
43
|
+
|
44
|
+
ENTERABLE = [
|
45
|
+
UNQUEUED,
|
46
|
+
QUEUED,
|
47
|
+
WORKING,
|
48
|
+
SUCCEEDED,
|
49
|
+
FAILED,
|
50
|
+
KILLED,
|
51
|
+
UNKNOWN
|
52
|
+
].freeze
|
53
|
+
|
54
|
+
end
|
55
|
+
|
56
|
+
|
57
|
+
|
58
|
+
|
59
|
+
|
data/lib/jobba/status.rb
ADDED
@@ -0,0 +1,304 @@
|
|
1
|
+
require 'json'
|
2
|
+
require 'ostruct'
|
3
|
+
|
4
|
+
module Jobba
|
5
|
+
class Status
|
6
|
+
|
7
|
+
include Jobba::Common
|
8
|
+
|
9
|
+
def self.create!
|
10
|
+
create(state: State::UNQUEUED)
|
11
|
+
end
|
12
|
+
|
13
|
+
# Finds the job with the specified ID and returns it. If no such ID
|
14
|
+
# exists in the store, returns a job with 'unknown' state and sets it
|
15
|
+
# in the store
|
16
|
+
def self.find!(id)
|
17
|
+
find(id) || create(id: id)
|
18
|
+
end
|
19
|
+
|
20
|
+
# Finds the job with the specified ID and returns it. If no such ID
|
21
|
+
# exists in the store, returns nil.
|
22
|
+
def self.find(id)
|
23
|
+
if (hash = raw_redis_hash(id))
|
24
|
+
new(raw: hash)
|
25
|
+
else
|
26
|
+
nil
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
def self.local_attrs
|
31
|
+
%w(id state progress errors data kill_requested_at job_name job_args) +
|
32
|
+
State::ALL.collect(&:timestamp_name)
|
33
|
+
end
|
34
|
+
|
35
|
+
def reload!
|
36
|
+
@json_encoded_attrs = self.class.raw_redis_hash(id)
|
37
|
+
self.class.local_attrs.each{|aa| send("#{aa}=",nil)}
|
38
|
+
self
|
39
|
+
end
|
40
|
+
|
41
|
+
# If the attributes are nil, the attribute accessors lazily parse their values
|
42
|
+
# from the JSON retrieved from redis. That way there's no parsing that isn't used.
|
43
|
+
# As an extra step, convert state names into State objects.
|
44
|
+
|
45
|
+
local_attrs.each do |attribute|
|
46
|
+
class_eval <<-eoruby
|
47
|
+
def #{attribute}
|
48
|
+
@#{attribute} ||= load_from_json_encoded_attrs('#{attribute}')
|
49
|
+
end
|
50
|
+
|
51
|
+
protected
|
52
|
+
|
53
|
+
attr_writer :#{attribute}
|
54
|
+
eoruby
|
55
|
+
end
|
56
|
+
|
57
|
+
State::ENTERABLE.each do |state|
|
58
|
+
define_method("#{state.name}!") do
|
59
|
+
return self if state == self.state
|
60
|
+
|
61
|
+
redis.multi do
|
62
|
+
leave_current_state!
|
63
|
+
enter_state!(state)
|
64
|
+
end
|
65
|
+
|
66
|
+
self
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
State::ALL.each do |state|
|
71
|
+
define_method("#{state.name}?") do
|
72
|
+
state == self.state
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
def completed?
|
77
|
+
failed? || succeeded?
|
78
|
+
end
|
79
|
+
|
80
|
+
def incomplete?
|
81
|
+
!completed?
|
82
|
+
end
|
83
|
+
|
84
|
+
def request_kill!
|
85
|
+
time, usec_int = now
|
86
|
+
if redis.hsetnx(job_key, :kill_requested_at, usec_int)
|
87
|
+
@kill_requested_at = time
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
def kill_requested?
|
92
|
+
!kill_requested_at.nil?
|
93
|
+
end
|
94
|
+
|
95
|
+
def set_progress(at, out_of = nil)
|
96
|
+
progress = compute_fractional_progress(at, out_of)
|
97
|
+
set(progress: progress)
|
98
|
+
end
|
99
|
+
|
100
|
+
def set_job_name(job_name)
|
101
|
+
raise ArgumentError, "`job_name` must not be blank" if job_name.nil? || job_name.empty?
|
102
|
+
raise StandardError, "`job_name` can only be set once" if !self.job_name.nil?
|
103
|
+
|
104
|
+
redis.multi do
|
105
|
+
set(job_name: job_name)
|
106
|
+
redis.sadd(job_name_key, id)
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
def add_job_arg(arg_name, arg)
|
111
|
+
raise ArgumentError, "`arg_name` must not be blank" if arg_name.nil? || arg_name.empty?
|
112
|
+
raise ArgumentError, "`arg` must not be blank" if arg.nil? || arg.empty?
|
113
|
+
|
114
|
+
redis.multi do
|
115
|
+
self.job_args[arg_name.to_sym] = arg
|
116
|
+
redis.hset(job_args_key, arg_name, arg)
|
117
|
+
redis.sadd(job_arg_key(arg), id)
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
# def add_error(error, options = { })
|
122
|
+
# options = { is_fatal: false }.merge(options)
|
123
|
+
# @errors << { is_fatal: options[:is_fatal],
|
124
|
+
# code: error.code,
|
125
|
+
# message: error.message,
|
126
|
+
# data: error.data }
|
127
|
+
# set(errors: @errors)
|
128
|
+
# end
|
129
|
+
|
130
|
+
def save(data)
|
131
|
+
set(data: data)
|
132
|
+
end
|
133
|
+
|
134
|
+
def delete
|
135
|
+
completed? ?
|
136
|
+
delete! :
|
137
|
+
raise(NotCompletedError, "This status cannot be deleted because it " \
|
138
|
+
"isn't complete. Use `delete!` if you want to " \
|
139
|
+
"delete anyway.")
|
140
|
+
end
|
141
|
+
|
142
|
+
def delete!
|
143
|
+
redis.multi do
|
144
|
+
redis.del(job_key)
|
145
|
+
|
146
|
+
State::ALL.each do |state|
|
147
|
+
redis.srem(state.name, id)
|
148
|
+
redis.zrem(state.timestamp_name, id)
|
149
|
+
end
|
150
|
+
|
151
|
+
redis.srem(job_name_key, id)
|
152
|
+
|
153
|
+
redis.del(job_args_key)
|
154
|
+
job_args.marshal_dump.values.each do |arg|
|
155
|
+
redis.srem(job_arg_key(arg), id)
|
156
|
+
end
|
157
|
+
end
|
158
|
+
end
|
159
|
+
|
160
|
+
protected
|
161
|
+
|
162
|
+
def self.create(attrs)
|
163
|
+
new(attrs.merge!(persist: true))
|
164
|
+
end
|
165
|
+
|
166
|
+
def self.raw_redis_hash(id)
|
167
|
+
main_hash, job_args_hash = redis.multi do
|
168
|
+
redis.hgetall(job_key(id))
|
169
|
+
redis.hgetall(job_args_key(id))
|
170
|
+
end
|
171
|
+
|
172
|
+
return nil if main_hash.empty?
|
173
|
+
|
174
|
+
main_hash['job_args'] = job_args_hash.to_json if !job_args_hash.nil?
|
175
|
+
main_hash
|
176
|
+
end
|
177
|
+
|
178
|
+
def leave_current_state!
|
179
|
+
redis.srem(state.name, id)
|
180
|
+
end
|
181
|
+
|
182
|
+
def enter_state!(state)
|
183
|
+
time, usec_int = now
|
184
|
+
set(state: state.name, state.timestamp_name => usec_int)
|
185
|
+
self.state = state
|
186
|
+
self.send("#{state.timestamp_name}=",time)
|
187
|
+
redis.zadd(state.timestamp_name, usec_int, id)
|
188
|
+
redis.sadd(state.name, id)
|
189
|
+
end
|
190
|
+
|
191
|
+
def initialize(attrs = {})
|
192
|
+
# If we get a raw hash, don't parse the attributes until they are requested
|
193
|
+
|
194
|
+
@json_encoded_attrs = attrs[:raw]
|
195
|
+
|
196
|
+
if !@json_encoded_attrs.nil? && !@json_encoded_attrs.empty?
|
197
|
+
@json_encoded_attrs['data'] ||= "{}"
|
198
|
+
@json_encoded_attrs['job_args'] ||= "{}"
|
199
|
+
else
|
200
|
+
@id = attrs[:id] || attrs['id'] || SecureRandom.uuid
|
201
|
+
@state = attrs[:state] || attrs['state'] || State::UNKNOWN
|
202
|
+
@progress = attrs[:progress] || attrs['progress'] || 0
|
203
|
+
@errors = attrs[:errors] || attrs['errors'] || []
|
204
|
+
@data = attrs[:data] || attrs['data'] || {}
|
205
|
+
@job_args = OpenStruct.new # TODO need this and the above job args init?
|
206
|
+
|
207
|
+
if attrs[:persist]
|
208
|
+
redis.multi do
|
209
|
+
set({
|
210
|
+
id: id,
|
211
|
+
progress: progress,
|
212
|
+
errors: errors
|
213
|
+
})
|
214
|
+
enter_state!(state)
|
215
|
+
end
|
216
|
+
end
|
217
|
+
end
|
218
|
+
end
|
219
|
+
|
220
|
+
def load_from_json_encoded_attrs(attribute_name)
|
221
|
+
json = (@json_encoded_attrs || {})[attribute_name]
|
222
|
+
attribute = json.nil? ? nil : JSON.parse(json, quirks_mode: true)
|
223
|
+
|
224
|
+
case attribute_name
|
225
|
+
when 'state'
|
226
|
+
State.from_name(attribute)
|
227
|
+
when /.*_at/
|
228
|
+
attribute.nil? ? nil : Jobba::Utils.time_from_usec_int(attribute.to_i)
|
229
|
+
when 'job_args'
|
230
|
+
OpenStruct.new(attribute)
|
231
|
+
else
|
232
|
+
attribute
|
233
|
+
end
|
234
|
+
end
|
235
|
+
|
236
|
+
def set(incoming_hash)
|
237
|
+
apply_consistency_rules!(incoming_hash)
|
238
|
+
set_hash_locally(incoming_hash)
|
239
|
+
set_hash_in_redis(incoming_hash)
|
240
|
+
end
|
241
|
+
|
242
|
+
def apply_consistency_rules!(hash)
|
243
|
+
hash[:progress] = 1.0 if hash[:state] == State::SUCCEEDED
|
244
|
+
end
|
245
|
+
|
246
|
+
def set_hash_locally(hash)
|
247
|
+
hash.each{ |key, value| self.send("#{key}=", value) }
|
248
|
+
end
|
249
|
+
|
250
|
+
def set_hash_in_redis(hash)
|
251
|
+
redis_key_value_array =
|
252
|
+
hash.to_a
|
253
|
+
.collect{|kv_array| [kv_array[0], kv_array[1].to_json]}
|
254
|
+
.flatten(1)
|
255
|
+
|
256
|
+
Jobba.redis.hmset(job_key, *redis_key_value_array)
|
257
|
+
end
|
258
|
+
|
259
|
+
def job_name_key
|
260
|
+
"job_name:#{job_name}"
|
261
|
+
end
|
262
|
+
|
263
|
+
def job_key
|
264
|
+
self.class.job_key(id)
|
265
|
+
end
|
266
|
+
|
267
|
+
def self.job_key(id)
|
268
|
+
raise(ArgumentError, "`id` cannot be nil") if id.nil?
|
269
|
+
"id:#{id}"
|
270
|
+
end
|
271
|
+
|
272
|
+
def job_args_key
|
273
|
+
self.class.job_args_key(id)
|
274
|
+
end
|
275
|
+
|
276
|
+
def self.job_args_key(id)
|
277
|
+
raise(ArgumentError, "`id` cannot be nil") if id.nil?
|
278
|
+
"job_args:#{id}"
|
279
|
+
end
|
280
|
+
|
281
|
+
def job_arg_key(arg)
|
282
|
+
"job_arg:#{arg}"
|
283
|
+
end
|
284
|
+
|
285
|
+
def compute_fractional_progress(at, out_of)
|
286
|
+
if at.nil?
|
287
|
+
raise ArgumentError, "Must specify at least `at` argument to `progress` call"
|
288
|
+
elsif at < 0
|
289
|
+
raise ArgumentError, "progress cannot be negative (at=#{at})"
|
290
|
+
elsif out_of && out_of < at
|
291
|
+
raise ArgumentError, "`out_of` must be greater than `at` in `progress` calls"
|
292
|
+
elsif out_of.nil? && (at < 0 || at > 1)
|
293
|
+
raise ArgumentError, "If `out_of` not specified, `at` must be in the range [0.0, 1.0]"
|
294
|
+
end
|
295
|
+
|
296
|
+
at.to_f / (out_of || 1).to_f
|
297
|
+
end
|
298
|
+
|
299
|
+
def now
|
300
|
+
[time = Jobba::Time.now, Utils.time_to_usec_int(time)]
|
301
|
+
end
|
302
|
+
|
303
|
+
end
|
304
|
+
end
|
@@ -0,0 +1,74 @@
|
|
1
|
+
class Jobba::Statuses
|
2
|
+
|
3
|
+
include Jobba::Common
|
4
|
+
extend Forwardable
|
5
|
+
|
6
|
+
attr_reader :ids
|
7
|
+
|
8
|
+
def all
|
9
|
+
load
|
10
|
+
end
|
11
|
+
|
12
|
+
def_delegator :@ids, :empty?
|
13
|
+
def_delegators :all, :first, :any?, :none?, :all?, :each, :each_with_index,
|
14
|
+
:map, :collect, :select, :count
|
15
|
+
|
16
|
+
def delete
|
17
|
+
if any?(&:incomplete?)
|
18
|
+
raise(Jobba::NotCompletedError,
|
19
|
+
"This status cannot be deleted because it isn't complete. Use " \
|
20
|
+
"`delete!` if you want to delete anyway.")
|
21
|
+
end
|
22
|
+
|
23
|
+
delete!
|
24
|
+
end
|
25
|
+
|
26
|
+
def delete!
|
27
|
+
load
|
28
|
+
redis.multi do
|
29
|
+
@cache.each(&:delete!)
|
30
|
+
end
|
31
|
+
@cache = []
|
32
|
+
@ids = []
|
33
|
+
end
|
34
|
+
|
35
|
+
def request_kill!
|
36
|
+
load
|
37
|
+
redis.multi do
|
38
|
+
@cache.each(&:request_kill!)
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
def multi(&block)
|
43
|
+
load
|
44
|
+
redis.multi do
|
45
|
+
@cache.each{|status| block.call(status, redis)}
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
protected
|
50
|
+
|
51
|
+
def load
|
52
|
+
@cache ||= get_all!
|
53
|
+
end
|
54
|
+
|
55
|
+
def get_all!
|
56
|
+
id_keys = @ids.collect{|id| "id:#{id}"}
|
57
|
+
|
58
|
+
raw_statuses = redis.pipelined do
|
59
|
+
id_keys.each do |key|
|
60
|
+
redis.hgetall(key)
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
raw_statuses.collect do |raw_status|
|
65
|
+
Jobba::Status.new(raw: raw_status)
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
def initialize(*ids)
|
70
|
+
@ids = [ids].flatten.compact
|
71
|
+
@cache = nil
|
72
|
+
end
|
73
|
+
|
74
|
+
end
|
data/lib/jobba/time.rb
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
class Jobba::Time
|
2
|
+
|
3
|
+
# We can only accurately record times in redis up to microseconds. Some
|
4
|
+
# platforms, e.g. Mac OS, give Time up to microseconds while others, e.g.
|
5
|
+
# Linux, give it up to nanoseconds. To make our specs happy and to gel
|
6
|
+
# with what redis is giving us, Jobba uses this Time class to enforce
|
7
|
+
# rounding away precision beyond microseconds.
|
8
|
+
|
9
|
+
def self.new(*args)
|
10
|
+
Time.new(*args).round(6)
|
11
|
+
end
|
12
|
+
|
13
|
+
def self.now
|
14
|
+
Time.new.round(6)
|
15
|
+
end
|
16
|
+
|
17
|
+
def self.at(*args)
|
18
|
+
Time.at(*args).round(6)
|
19
|
+
end
|
20
|
+
|
21
|
+
end
|
data/lib/jobba/utils.rb
ADDED
@@ -0,0 +1,25 @@
|
|
1
|
+
module Jobba::Utils
|
2
|
+
|
3
|
+
# Represent time as an integer number of us since epoch
|
4
|
+
# (helps avoid redis precision issues)
|
5
|
+
def self.time_to_usec_int(time)
|
6
|
+
case time
|
7
|
+
when ::Time
|
8
|
+
time.strftime("%s%6N").to_i
|
9
|
+
when Float
|
10
|
+
# assuming that time is the number of seconds since epoch
|
11
|
+
# to avoid precision issues, convert to a string, remove
|
12
|
+
# the decimal, and convert back to an integer
|
13
|
+
sprintf("%0.6f", time.to_f).gsub(/\./,'').to_i
|
14
|
+
when Integer
|
15
|
+
time
|
16
|
+
when String
|
17
|
+
time.to_i
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
def self.time_from_usec_int(int)
|
22
|
+
Jobba::Time.at(int / 1000000, int % 1000000)
|
23
|
+
end
|
24
|
+
|
25
|
+
end
|
data/lib/jobba.rb
ADDED
@@ -0,0 +1,44 @@
|
|
1
|
+
require "redis"
|
2
|
+
require "redis-namespace"
|
3
|
+
|
4
|
+
require "jobba/version"
|
5
|
+
require "jobba/exceptions"
|
6
|
+
require "jobba/time"
|
7
|
+
require "jobba/utils"
|
8
|
+
require "jobba/configuration"
|
9
|
+
require "jobba/common"
|
10
|
+
require "jobba/state"
|
11
|
+
require "jobba/status"
|
12
|
+
require "jobba/statuses"
|
13
|
+
require "jobba/query"
|
14
|
+
|
15
|
+
module Jobba
|
16
|
+
|
17
|
+
def self.where(*args)
|
18
|
+
Query.new.where(*args)
|
19
|
+
end
|
20
|
+
|
21
|
+
def self.all
|
22
|
+
Query.new.all
|
23
|
+
end
|
24
|
+
|
25
|
+
def self.count
|
26
|
+
Query.new.count
|
27
|
+
end
|
28
|
+
|
29
|
+
def self.configure
|
30
|
+
yield configuration
|
31
|
+
end
|
32
|
+
|
33
|
+
def self.configuration
|
34
|
+
@configuration ||= Configuration.new
|
35
|
+
end
|
36
|
+
|
37
|
+
def self.redis
|
38
|
+
@redis ||= Redis::Namespace.new(
|
39
|
+
configuration.namespace,
|
40
|
+
redis: Redis.new(configuration.redis_options || {})
|
41
|
+
)
|
42
|
+
end
|
43
|
+
|
44
|
+
end
|