litestack 0.1.7 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/BENCHMARKS.md +1 -1
- data/CHANGELOG.md +20 -3
- data/README.md +28 -1
- data/assets/litecable_logo_teal.png +0 -0
- data/bench/bench_cache_raw.rb +18 -2
- data/bench/bench_jobs_rails.rb +20 -14
- data/bench/bench_jobs_raw.rb +0 -2
- data/lib/action_cable/subscription_adapter/litecable.rb +36 -0
- data/lib/active_job/queue_adapters/litejob_adapter.rb +14 -10
- data/lib/litestack/litecable.rb +138 -0
- data/lib/litestack/litecable.sql.yml +24 -0
- data/lib/litestack/litecache.rb +56 -62
- data/lib/litestack/litecache.sql.yml +28 -0
- data/lib/litestack/litecache.yml +7 -0
- data/lib/litestack/litejob.rb +20 -11
- data/lib/litestack/litejobqueue.rb +122 -44
- data/lib/litestack/litemetric.rb +228 -0
- data/lib/litestack/litemetric.sql.yml +69 -0
- data/lib/litestack/litequeue.rb +57 -29
- data/lib/litestack/litequeue.sql.yml +34 -0
- data/lib/litestack/litesupport.rb +155 -6
- data/lib/litestack/metrics_app.rb +5 -0
- data/lib/litestack/version.rb +1 -1
- data/lib/litestack.rb +19 -10
- metadata +13 -6
- data/bench/bench_rails.rb +0 -81
- data/bench/bench_raw.rb +0 -72
- data/lib/active_job/queue_adapters/ultralite_adapter.rb +0 -49
data/lib/litestack/litecache.rb
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
|
3
3
|
# all components should require the support module
|
4
4
|
require_relative 'litesupport'
|
5
|
+
require_relative 'litemetric'
|
5
6
|
|
6
7
|
##
|
7
8
|
#Litecache is a caching library for Ruby applications that is built on top of SQLite. It is designed to be simple to use, very fast, and feature-rich, providing developers with a reliable and efficient way to cache data.
|
@@ -16,6 +17,9 @@ require_relative 'litesupport'
|
|
16
17
|
|
17
18
|
class Litecache
|
18
19
|
|
20
|
+
include Litesupport::Liteconnection
|
21
|
+
include Litemetric::Measurable
|
22
|
+
|
19
23
|
# the default options for the cache
|
20
24
|
# can be overriden by passing new options in a hash
|
21
25
|
# to Litecache.new
|
@@ -29,12 +33,15 @@ class Litecache
|
|
29
33
|
|
30
34
|
DEFAULT_OPTIONS = {
|
31
35
|
path: "./cache.db",
|
36
|
+
config_path: "./litecache.yml",
|
37
|
+
sync: 0,
|
32
38
|
expiry: 60 * 60 * 24 * 30, # one month
|
33
39
|
size: 128 * 1024 * 1024, #128MB
|
34
40
|
mmap_size: 128 * 1024 * 1024, #128MB
|
35
|
-
min_size:
|
41
|
+
min_size: 8 * 1024 * 1024, #16MB
|
36
42
|
return_full_record: false, #only return the payload
|
37
|
-
sleep_interval: 1 # 1 second
|
43
|
+
sleep_interval: 1, # 1 second
|
44
|
+
metrics: false
|
38
45
|
}
|
39
46
|
|
40
47
|
# creates a new instance of Litecache
|
@@ -56,36 +63,20 @@ class Litecache
|
|
56
63
|
# litecache.close # optional, you can safely kill the process
|
57
64
|
|
58
65
|
def initialize(options = {})
|
59
|
-
|
60
|
-
|
61
|
-
@sql = {
|
62
|
-
:pruner => "DELETE FROM data WHERE expires_in <= $1",
|
63
|
-
:extra_pruner => "DELETE FROM data WHERE id IN (SELECT id FROM data ORDER BY last_used ASC LIMIT (SELECT CAST((count(*) * $1) AS int) FROM data))",
|
64
|
-
:limited_pruner => "DELETE FROM data WHERE id IN (SELECT id FROM data ORDER BY last_used asc limit $1)",
|
65
|
-
:toucher => "UPDATE data SET last_used = unixepoch('now') WHERE id = $1",
|
66
|
-
:setter => "INSERT into data (id, value, expires_in, last_used) VALUES ($1, $2, unixepoch('now') + $3, unixepoch('now')) on conflict(id) do UPDATE SET value = excluded.value, last_used = excluded.last_used, expires_in = excluded.expires_in",
|
67
|
-
:inserter => "INSERT into data (id, value, expires_in, last_used) VALUES ($1, $2, unixepoch('now') + $3, unixepoch('now')) on conflict(id) do UPDATE SET value = excluded.value, last_used = excluded.last_used, expires_in = excluded.expires_in WHERE id = $1 and expires_in <= unixepoch('now')",
|
68
|
-
:finder => "SELECT id FROM data WHERE id = $1",
|
69
|
-
:getter => "SELECT id, value, expires_in FROM data WHERE id = $1",
|
70
|
-
:deleter => "delete FROM data WHERE id = $1 returning value",
|
71
|
-
:incrementer => "INSERT into data (id, value, expires_in, last_used) VALUES ($1, $2, unixepoch('now') + $3, unixepoch('now')) on conflict(id) do UPDATE SET value = cast(value AS int) + cast(excluded.value as int), last_used = excluded.last_used, expires_in = excluded.expires_in",
|
72
|
-
:counter => "SELECT count(*) FROM data",
|
73
|
-
:sizer => "SELECT size.page_size * count.page_count FROM pragma_page_size() AS size, pragma_page_count() AS count"
|
74
|
-
}
|
75
|
-
@cache = Litesupport::Pool.new(1){create_db}
|
76
|
-
@stats = {hit: 0, miss: 0}
|
66
|
+
options[:size] = DEFAULT_OPTIONS[:min_size] if options[:size] && options[:size] < DEFAULT_OPTIONS[:min_size]
|
67
|
+
init(options)
|
77
68
|
@last_visited = {}
|
78
|
-
|
79
|
-
@bgthread = spawn_worker
|
69
|
+
collect_metrics if @options[:metrics]
|
80
70
|
end
|
81
71
|
|
82
72
|
# add a key, value pair to the cache, with an optional expiry value (number of seconds)
|
83
73
|
def set(key, value, expires_in = nil)
|
84
74
|
key = key.to_s
|
85
75
|
expires_in = @options[:expires_in] if expires_in.nil? or expires_in.zero?
|
86
|
-
@
|
76
|
+
@conn.acquire do |cache|
|
87
77
|
begin
|
88
78
|
cache.stmts[:setter].execute!(key, value, expires_in)
|
79
|
+
capture(:write, key)
|
89
80
|
rescue SQLite3::FullException
|
90
81
|
cache.stmts[:extra_pruner].execute!(0.2)
|
91
82
|
cache.execute("vacuum")
|
@@ -100,12 +91,13 @@ class Litecache
|
|
100
91
|
key = key.to_s
|
101
92
|
expires_in = @options[:expires_in] if expires_in.nil? or expires_in.zero?
|
102
93
|
changes = 0
|
103
|
-
@
|
94
|
+
@conn.acquire do |cache|
|
104
95
|
begin
|
105
|
-
transaction(:immediate) do
|
96
|
+
cache.transaction(:immediate) do
|
106
97
|
cache.stmts[:inserter].execute!(key, value, expires_in)
|
107
|
-
changes =
|
98
|
+
changes = cache.changes
|
108
99
|
end
|
100
|
+
capture(:write, key)
|
109
101
|
rescue SQLite3::FullException
|
110
102
|
cache.stmts[:extra_pruner].execute!(0.2)
|
111
103
|
cache.execute("vacuum")
|
@@ -119,19 +111,19 @@ class Litecache
|
|
119
111
|
# if the key doesn't exist or it is expired then null will be returned
|
120
112
|
def get(key)
|
121
113
|
key = key.to_s
|
122
|
-
if record = @
|
114
|
+
if record = @conn.acquire{|cache| cache.stmts[:getter].execute!(key)[0] }
|
123
115
|
@last_visited[key] = true
|
124
|
-
|
116
|
+
capture(:hit, key)
|
125
117
|
return record[1]
|
126
118
|
end
|
127
|
-
|
119
|
+
capture(:miss, key)
|
128
120
|
nil
|
129
121
|
end
|
130
122
|
|
131
123
|
# delete a key, value pair from the cache
|
132
124
|
def delete(key)
|
133
125
|
changes = 0
|
134
|
-
@
|
126
|
+
@conn.acquire do |cache|
|
135
127
|
cache.stmts[:deleter].execute!(key)
|
136
128
|
changes = cache.changes
|
137
129
|
end
|
@@ -141,7 +133,7 @@ class Litecache
|
|
141
133
|
# increment an integer value by amount, optionally add an expiry value (in seconds)
|
142
134
|
def increment(key, amount, expires_in = nil)
|
143
135
|
expires_in = @expires_in unless expires_in
|
144
|
-
@
|
136
|
+
@conn.acquire{|cache| cache.stmts[:incrementer].execute!(key.to_s, amount, expires_in) }
|
145
137
|
end
|
146
138
|
|
147
139
|
# decrement an integer value by amount, optionally add an expiry value (in seconds)
|
@@ -151,7 +143,7 @@ class Litecache
|
|
151
143
|
|
152
144
|
# delete all entries in the cache up limit (ordered by LRU), if no limit is provided approximately 20% of the entries will be deleted
|
153
145
|
def prune(limit=nil)
|
154
|
-
@
|
146
|
+
@conn.acquire do |cache|
|
155
147
|
if limit and limit.is_a? Integer
|
156
148
|
cache.stmts[:limited_pruner].execute!(limit)
|
157
149
|
elsif limit and limit.is_a? Float
|
@@ -164,42 +156,34 @@ class Litecache
|
|
164
156
|
|
165
157
|
# return the number of key, value pairs in the cache
|
166
158
|
def count
|
167
|
-
|
159
|
+
run_stmt(:counter)[0][0]
|
168
160
|
end
|
169
161
|
|
170
162
|
# return the actual size of the cache file
|
171
163
|
def size
|
172
|
-
|
164
|
+
run_stmt(:sizer)[0][0]
|
173
165
|
end
|
174
166
|
|
175
167
|
# delete all key, value pairs in the cache
|
176
168
|
def clear
|
177
|
-
|
169
|
+
run_sql("delete FROM data")
|
178
170
|
end
|
179
171
|
|
180
172
|
# close the connection to the cache file
|
181
173
|
def close
|
182
174
|
@running = false
|
183
|
-
|
184
|
-
@cache.acquire{|cache| cache.close }
|
185
|
-
#end
|
175
|
+
super
|
186
176
|
end
|
187
177
|
|
188
178
|
# return the maximum size of the cache
|
189
179
|
def max_size
|
190
|
-
|
180
|
+
run_sql("SELECT s.page_size * c.max_page_count FROM pragma_page_size() as s, pragma_max_page_count() as c")[0][0]
|
191
181
|
end
|
192
182
|
|
193
|
-
# hits and misses for get operations performed over this particular connection (not cache wide)
|
194
|
-
#
|
195
|
-
# litecache.stats # => {hit: 543, miss: 31}
|
196
|
-
def stats
|
197
|
-
@stats
|
198
|
-
end
|
199
|
-
|
200
183
|
# low level access to SQLite transactions, use with caution
|
201
|
-
def transaction(mode)
|
202
|
-
|
184
|
+
def transaction(mode, acquire=true)
|
185
|
+
return cache.transaction(mode){yield} unless acquire
|
186
|
+
@conn.acquire do |cache|
|
203
187
|
cache.transaction(mode) do
|
204
188
|
yield
|
205
189
|
end
|
@@ -208,10 +192,15 @@ class Litecache
|
|
208
192
|
|
209
193
|
private
|
210
194
|
|
195
|
+
def setup
|
196
|
+
super # create connection
|
197
|
+
@bgthread = spawn_worker # create backgroud pruner thread
|
198
|
+
end
|
199
|
+
|
211
200
|
def spawn_worker
|
212
201
|
Litesupport.spawn do
|
213
202
|
while @running
|
214
|
-
@
|
203
|
+
@conn.acquire do |cache|
|
215
204
|
begin
|
216
205
|
cache.transaction(:immediate) do
|
217
206
|
@last_visited.delete_if do |k| # there is a race condition here, but not a serious one
|
@@ -232,19 +221,24 @@ class Litecache
|
|
232
221
|
end
|
233
222
|
end
|
234
223
|
|
235
|
-
def
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
224
|
+
def create_connection
|
225
|
+
conn = super
|
226
|
+
conn.cache_size = 2000
|
227
|
+
conn.journal_size_limit = [(@options[:size]/2).to_i, @options[:min_size]].min
|
228
|
+
conn.max_page_count = (@options[:size] / conn.page_size).to_i
|
229
|
+
conn.case_sensitive_like = true
|
230
|
+
sql = YAML.load_file("#{__dir__}/litecache.sql.yml")
|
231
|
+
version = conn.get_first_value("PRAGMA user_version")
|
232
|
+
sql["schema"].each_pair do |v, obj|
|
233
|
+
if v > version
|
234
|
+
conn.transaction do
|
235
|
+
obj.each{|k, s| conn.execute(s)}
|
236
|
+
conn.user_version = v
|
237
|
+
end
|
238
|
+
end
|
239
|
+
end
|
240
|
+
sql["stmts"].each { |k, v| conn.stmts[k.to_sym] = conn.prepare(v) }
|
241
|
+
conn
|
248
242
|
end
|
249
243
|
|
250
244
|
end
|
@@ -0,0 +1,28 @@
|
|
1
|
+
schema:
|
2
|
+
1:
|
3
|
+
create_table_data: >
|
4
|
+
CREATE table if not exists data(id text primary key, value text, expires_in integer, last_used integer)
|
5
|
+
create_expiry_index: >
|
6
|
+
CREATE index if not exists expiry_index on data (expires_in)
|
7
|
+
create_last_used_index: >
|
8
|
+
CREATE index if not exists last_used_index on data (last_used)
|
9
|
+
|
10
|
+
stmts:
|
11
|
+
pruner: DELETE FROM data WHERE expires_in <= $1
|
12
|
+
extra_pruner: DELETE FROM data WHERE id IN (SELECT id FROM data ORDER BY last_used ASC LIMIT (SELECT CAST((count(*) * $1) AS int) FROM data))
|
13
|
+
limited_pruner: DELETE FROM data WHERE id IN (SELECT id FROM data ORDER BY last_used asc limit $1)
|
14
|
+
toucher: UPDATE data SET last_used = unixepoch('now') WHERE id = $1
|
15
|
+
setter: >
|
16
|
+
INSERT into data (id, value, expires_in, last_used) VALUES ($1, $2, unixepoch('now') + $3, unixepoch('now')) on conflict(id) do
|
17
|
+
UPDATE SET value = excluded.value, last_used = excluded.last_used, expires_in = excluded.expires_in
|
18
|
+
inserter: >
|
19
|
+
INSERT into data (id, value, expires_in, last_used) VALUES ($1, $2, unixepoch('now') + $3, unixepoch('now')) on conflict(id) do
|
20
|
+
UPDATE SET value = excluded.value, last_used = excluded.last_used, expires_in = excluded.expires_in WHERE id = $1 and expires_in <= unixepoch('now')
|
21
|
+
finder: SELECT id FROM data WHERE id = $1
|
22
|
+
getter: SELECT id, value, expires_in FROM data WHERE id = $1
|
23
|
+
deleter: delete FROM data WHERE id = $1 returning value
|
24
|
+
incrementer: >
|
25
|
+
INSERT into data (id, value, expires_in, last_used) VALUES ($1, $2, unixepoch('now') + $3, unixepoch('now')) on conflict(id) do
|
26
|
+
UPDATE SET value = cast(value AS int) + cast(excluded.value as int), last_used = excluded.last_used, expires_in = excluded.expires_in
|
27
|
+
counter: SELECT count(*) FROM data
|
28
|
+
sizer: SELECT size.page_size * count.page_count FROM pragma_page_size() AS size, pragma_page_count() AS count
|
data/lib/litestack/litejob.rb
CHANGED
@@ -52,28 +52,37 @@ module Litejob
|
|
52
52
|
end
|
53
53
|
|
54
54
|
def perform_at(time, *params)
|
55
|
-
delay = time - Time.now.to_i
|
55
|
+
delay = time.to_i - Time.now.to_i
|
56
56
|
get_jobqueue.push(self.name, params, delay, queue)
|
57
57
|
end
|
58
58
|
|
59
|
-
def
|
59
|
+
def perform_in(delay, *params)
|
60
60
|
get_jobqueue.push(self.name, params, delay, queue)
|
61
61
|
end
|
62
|
-
|
63
|
-
def
|
64
|
-
|
62
|
+
|
63
|
+
def perform_after(delay, *params)
|
64
|
+
perform_in(delay, *params)
|
65
65
|
end
|
66
|
-
|
67
|
-
def
|
68
|
-
|
66
|
+
|
67
|
+
def process_jobs
|
68
|
+
get_jobqueue
|
69
69
|
end
|
70
|
-
|
70
|
+
|
71
|
+
def delete(id, queue_name=nil)
|
72
|
+
queue_name ||= queue
|
73
|
+
get_jobqueue.delete(id, queue)
|
74
|
+
end
|
75
|
+
|
71
76
|
def queue
|
72
|
-
@@
|
77
|
+
@@queue ||= "default"
|
73
78
|
end
|
74
79
|
|
75
80
|
def queue=(queue_name)
|
76
|
-
@@
|
81
|
+
@@queue = queue_name.to_s
|
82
|
+
end
|
83
|
+
|
84
|
+
def options
|
85
|
+
@options ||= self::DEFAULT_OPTIONS rescue {}
|
77
86
|
end
|
78
87
|
|
79
88
|
def get_jobqueue
|
@@ -1,8 +1,7 @@
|
|
1
1
|
# frozen_stringe_literal: true
|
2
|
-
|
3
|
-
require 'oj'
|
4
|
-
require 'yaml'
|
2
|
+
|
5
3
|
require_relative './litequeue'
|
4
|
+
require_relative './litemetric'
|
6
5
|
|
7
6
|
##
|
8
7
|
#Litejobqueue is a job queueing and processing system designed for Ruby applications. It is built on top of SQLite, which is an embedded relational database management system that is #lightweight and fast.
|
@@ -12,7 +11,9 @@ require_relative './litequeue'
|
|
12
11
|
#Litejobqueue also integrates well with various I/O frameworks like Async and Polyphony, making it a great choice for Ruby applications that use these frameworks. It provides a #simple and easy-to-use API for adding jobs to the queue and for processing them.
|
13
12
|
#
|
14
13
|
#Overall, LiteJobQueue is an excellent choice for Ruby applications that require a lightweight, embedded job queueing and processing system that is fast, efficient, and easy to use.
|
15
|
-
class Litejobqueue
|
14
|
+
class Litejobqueue < Litequeue
|
15
|
+
|
16
|
+
include Litemetric::Measurable
|
16
17
|
|
17
18
|
# the default options for the job queue
|
18
19
|
# can be overriden by passing new options in a hash
|
@@ -34,12 +35,22 @@ class Litejobqueue
|
|
34
35
|
path: "./queue.db",
|
35
36
|
queues: [["default", 1]],
|
36
37
|
workers: 5,
|
37
|
-
|
38
|
-
|
38
|
+
retries: 5,
|
39
|
+
retry_delay: 60,
|
40
|
+
retry_delay_multiplier: 10,
|
41
|
+
dead_job_retention: 10 * 24 * 3600,
|
42
|
+
gc_sleep_interval: 7200,
|
43
|
+
logger: 'STDOUT',
|
44
|
+
sleep_intervals: [0.001, 0.005, 0.025, 0.125, 0.625, 1.0, 2.0],
|
45
|
+
metrics: false
|
39
46
|
}
|
40
47
|
|
41
48
|
@@queue = nil
|
42
49
|
|
50
|
+
attr_reader :running
|
51
|
+
|
52
|
+
alias_method :_push, :push
|
53
|
+
|
43
54
|
# a method that returns a single instance of the job queue
|
44
55
|
# for use by Litejob
|
45
56
|
def self.jobqueue(options = {})
|
@@ -57,19 +68,10 @@ class Litejobqueue
|
|
57
68
|
# jobqueue = Litejobqueue.new
|
58
69
|
#
|
59
70
|
def initialize(options = {})
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
config.delete k
|
65
|
-
end
|
66
|
-
@options.merge!(config)
|
67
|
-
@queue = Litequeue.new(@options) # create a new queue object
|
68
|
-
if @options[:logger].respond_to? :info
|
69
|
-
@logger = @options[:logger]
|
70
|
-
else
|
71
|
-
@logger = Logger.new(@options[:logger])
|
72
|
-
end
|
71
|
+
|
72
|
+
@queues = [] # a place holder to allow workers to process
|
73
|
+
super(options)
|
74
|
+
|
73
75
|
# group and order queues according to their priority
|
74
76
|
pgroups = {}
|
75
77
|
@options[:queues].each do |q|
|
@@ -77,7 +79,6 @@ class Litejobqueue
|
|
77
79
|
pgroups[q[1]] << [q[0], q[2] == "spawn"]
|
78
80
|
end
|
79
81
|
@queues = pgroups.keys.sort.reverse.collect{|p| [p, pgroups[p]]}
|
80
|
-
@workers = @options[:workers].times.collect{ create_worker }
|
81
82
|
end
|
82
83
|
|
83
84
|
# push a job to the queue
|
@@ -89,10 +90,17 @@ class Litejobqueue
|
|
89
90
|
# jobqueue = Litejobqueue.new
|
90
91
|
# jobqueue.push(EasyJob, params) # the job will be performed asynchronously
|
91
92
|
def push(jobclass, params, delay=0, queue=nil)
|
92
|
-
payload = Oj.dump(
|
93
|
-
|
94
|
-
|
95
|
-
@logger.info("[litejob]:[ENQ]
|
93
|
+
payload = Oj.dump({klass: jobclass, params: params, retries: @options[:retries], queue: queue})
|
94
|
+
res = super(payload, delay, queue)
|
95
|
+
capture(:enqueue, queue)
|
96
|
+
@logger.info("[litejob]:[ENQ] queue:#{res[1]} class:#{jobclass} job:#{res[0]}")
|
97
|
+
res
|
98
|
+
end
|
99
|
+
|
100
|
+
def repush(id, job, delay=0, queue=nil)
|
101
|
+
res = super(id, Oj.dump(job), delay, queue)
|
102
|
+
capture(:enqueue, queue)
|
103
|
+
@logger.info("[litejob]:[ENQ] queue:#{res[0]} class:#{job[:klass]} job:#{id}")
|
96
104
|
res
|
97
105
|
end
|
98
106
|
|
@@ -106,13 +114,61 @@ class Litejobqueue
|
|
106
114
|
# id = jobqueue.push(EasyJob, params, 10) # queue for processing in 10 seconds
|
107
115
|
# jobqueue.delete(id)
|
108
116
|
def delete(id)
|
109
|
-
job =
|
117
|
+
job = super(id)
|
110
118
|
@logger.info("[litejob]:[DEL] job: #{job}")
|
111
|
-
Oj.load(job) if job
|
119
|
+
job = Oj.load(job[0]) if job
|
112
120
|
job
|
113
121
|
end
|
114
122
|
|
123
|
+
# delete all jobs in a certain named queue
|
124
|
+
# or delete all jobs if the queue name is nil
|
125
|
+
#def clear(queue=nil)
|
126
|
+
#@queue.clear(queue)
|
127
|
+
#end
|
128
|
+
|
129
|
+
# stop the queue object (does not delete the jobs in the queue)
|
130
|
+
# specifically useful for testing
|
131
|
+
def stop
|
132
|
+
@running = false
|
133
|
+
#@@queue = nil
|
134
|
+
close
|
135
|
+
end
|
136
|
+
|
137
|
+
|
115
138
|
private
|
139
|
+
|
140
|
+
def exit_callback
|
141
|
+
@running = false # stop all workers
|
142
|
+
puts "--- Litejob detected an exit, cleaning up"
|
143
|
+
index = 0
|
144
|
+
while @jobs_in_flight > 0 and index < 30 # 3 seconds grace period for jobs to finish
|
145
|
+
puts "--- Waiting for #{@jobs_in_flight} jobs to finish"
|
146
|
+
sleep 0.1
|
147
|
+
index += 1
|
148
|
+
end
|
149
|
+
puts " --- Exiting with #{@jobs_in_flight} jobs in flight"
|
150
|
+
end
|
151
|
+
|
152
|
+
def setup
|
153
|
+
super
|
154
|
+
@jobs_in_flight = 0
|
155
|
+
@workers = @options[:workers].times.collect{ create_worker }
|
156
|
+
@gc = create_garbage_collector
|
157
|
+
@mutex = Litesupport::Mutex.new
|
158
|
+
end
|
159
|
+
|
160
|
+
def job_started
|
161
|
+
Litesupport.synchronize(@mutex){@jobs_in_flight += 1}
|
162
|
+
end
|
163
|
+
|
164
|
+
def job_finished
|
165
|
+
Litesupport.synchronize(@mutex){@jobs_in_flight -= 1}
|
166
|
+
end
|
167
|
+
|
168
|
+
# return a hash encapsulating the info about the current jobqueue
|
169
|
+
def snapshot
|
170
|
+
info
|
171
|
+
end
|
116
172
|
|
117
173
|
# optionally run a job in its own context
|
118
174
|
def schedule(spawn = false, &block)
|
@@ -126,41 +182,47 @@ class Litejobqueue
|
|
126
182
|
# create a worker according to environment
|
127
183
|
def create_worker
|
128
184
|
Litesupport.spawn do
|
129
|
-
if @options[:logger].respond_to? :info
|
130
|
-
logger = @options[:logger]
|
131
|
-
else
|
132
|
-
logger = Logger.new(@options[:logger])
|
133
|
-
end
|
134
185
|
worker_sleep_index = 0
|
135
|
-
|
136
|
-
loop do
|
186
|
+
while @running do
|
137
187
|
processed = 0
|
138
188
|
@queues.each do |level| # iterate through the levels
|
139
189
|
level[1].each do |q| # iterate through the queues in the level
|
140
190
|
index = 0
|
141
191
|
max = level[0]
|
142
|
-
while index < max && payload =
|
192
|
+
while index < max && payload = pop(q[0], 1) # fearlessly use the same queue object
|
193
|
+
capture(:dequeue, q[0])
|
143
194
|
processed += 1
|
144
195
|
index += 1
|
145
196
|
begin
|
146
197
|
id, job = payload[0], payload[1]
|
147
198
|
job = Oj.load(job)
|
148
|
-
logger.info "[litejob]:[DEQ]
|
149
|
-
klass = eval(job[
|
199
|
+
@logger.info "[litejob]:[DEQ] queue:#{q[0]} class:#{job[:klass]} job:#{id}"
|
200
|
+
klass = eval(job[:klass])
|
150
201
|
schedule(q[1]) do # run the job in a new context
|
202
|
+
job_started #(Litesupport.current_context)
|
151
203
|
begin
|
152
|
-
klass.new.perform(*job[
|
153
|
-
logger.info "[litejob]:[END]
|
204
|
+
measure(:perform, q[0]){ klass.new.perform(*job[:params]) }
|
205
|
+
@logger.info "[litejob]:[END] queue:#{q[0]} class:#{job[:klass]} job:#{id}"
|
154
206
|
rescue Exception => e
|
155
|
-
|
156
|
-
|
157
|
-
|
207
|
+
# we can retry the failed job now
|
208
|
+
capture(:fail, q[0])
|
209
|
+
if job[:retries] == 0
|
210
|
+
@logger.error "[litejob]:[ERR] queue:#{q[0]} class:#{job[:klass]} job:#{id} failed with #{e}:#{e.message}, retries exhausted, moved to _dead queue"
|
211
|
+
repush(id, job, @options[:dead_job_retention], '_dead')
|
212
|
+
else
|
213
|
+
capture(:retry, q[0])
|
214
|
+
retry_delay = @options[:retry_delay_multiplier].pow(@options[:retries] - job[:retries]) * @options[:retry_delay]
|
215
|
+
job[:retries] -= 1
|
216
|
+
@logger.error "[litejob]:[ERR] queue:#{q[0]} class:#{job[:klass]} job:#{id} failed with #{e}:#{e.message}, retrying in #{retry_delay} seconds"
|
217
|
+
repush(id, job, retry_delay, q[0])
|
218
|
+
end
|
158
219
|
end
|
220
|
+
job_finished #(Litesupport.current_context)
|
159
221
|
end
|
160
222
|
rescue Exception => e
|
161
|
-
|
162
|
-
|
163
|
-
|
223
|
+
# this is an error in the extraction of job info, retrying here will not be useful
|
224
|
+
@logger.error "[litejob]:[ERR] failed to extract job info for: #{payload} with #{e}:#{e.message}"
|
225
|
+
job_finished #(Litesupport.current_context)
|
164
226
|
end
|
165
227
|
Litesupport.switch #give other contexts a chance to run here
|
166
228
|
end
|
@@ -176,4 +238,20 @@ class Litejobqueue
|
|
176
238
|
end
|
177
239
|
end
|
178
240
|
|
241
|
+
# create a gc for dead jobs
|
242
|
+
def create_garbage_collector
|
243
|
+
Litesupport.spawn do
|
244
|
+
while @running do
|
245
|
+
while jobs = pop('_dead', 100)
|
246
|
+
if jobs[0].is_a? Array
|
247
|
+
@logger.info "[litejob]:[DEL] garbage collector deleted #{jobs.length} dead jobs"
|
248
|
+
else
|
249
|
+
@logger.info "[litejob]:[DEL] garbage collector deleted 1 dead job"
|
250
|
+
end
|
251
|
+
end
|
252
|
+
sleep @options[:gc_sleep_interval]
|
253
|
+
end
|
254
|
+
end
|
255
|
+
end
|
256
|
+
|
179
257
|
end
|