litestack 0.2.6 → 0.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/BENCHMARKS.md +11 -0
- data/CHANGELOG.md +19 -0
- data/Gemfile +2 -0
- data/README.md +1 -1
- data/assets/event_page.png +0 -0
- data/assets/index_page.png +0 -0
- data/assets/topic_page.png +0 -0
- data/bench/bench_jobs_rails.rb +1 -1
- data/bench/bench_jobs_raw.rb +1 -1
- data/bench/uljob.rb +1 -1
- data/lib/action_cable/subscription_adapter/litecable.rb +1 -11
- data/lib/active_support/cache/litecache.rb +1 -1
- data/lib/generators/litestack/install/templates/database.yml +5 -1
- data/lib/litestack/liteboard/liteboard.rb +172 -35
- data/lib/litestack/liteboard/views/index.erb +52 -20
- data/lib/litestack/liteboard/views/layout.erb +189 -38
- data/lib/litestack/liteboard/views/litecable.erb +118 -0
- data/lib/litestack/liteboard/views/litecache.erb +144 -0
- data/lib/litestack/liteboard/views/litedb.erb +168 -0
- data/lib/litestack/liteboard/views/litejob.erb +151 -0
- data/lib/litestack/litecable.rb +27 -37
- data/lib/litestack/litecable.sql.yml +1 -1
- data/lib/litestack/litecache.rb +7 -18
- data/lib/litestack/litedb.rb +17 -2
- data/lib/litestack/litejob.rb +2 -3
- data/lib/litestack/litejobqueue.rb +51 -48
- data/lib/litestack/litemetric.rb +46 -69
- data/lib/litestack/litemetric.sql.yml +14 -12
- data/lib/litestack/litemetric_collector.sql.yml +4 -4
- data/lib/litestack/litequeue.rb +9 -20
- data/lib/litestack/litescheduler.rb +84 -0
- data/lib/litestack/litesearch/index.rb +230 -0
- data/lib/litestack/litesearch/model.rb +178 -0
- data/lib/litestack/litesearch/schema.rb +193 -0
- data/lib/litestack/litesearch/schema_adapters/backed_adapter.rb +147 -0
- data/lib/litestack/litesearch/schema_adapters/basic_adapter.rb +128 -0
- data/lib/litestack/litesearch/schema_adapters/contentless_adapter.rb +17 -0
- data/lib/litestack/litesearch/schema_adapters/standalone_adapter.rb +33 -0
- data/lib/litestack/litesearch/schema_adapters.rb +9 -0
- data/lib/litestack/litesearch.rb +37 -0
- data/lib/litestack/litesupport.rb +55 -125
- data/lib/litestack/version.rb +1 -1
- data/lib/litestack.rb +2 -1
- data/lib/sequel/adapters/litedb.rb +3 -2
- metadata +20 -3
@@ -54,7 +54,7 @@ class Litejobqueue < Litequeue
|
|
54
54
|
# a method that returns a single instance of the job queue
|
55
55
|
# for use by Litejob
|
56
56
|
def self.jobqueue(options = {})
|
57
|
-
@@queue ||=
|
57
|
+
@@queue ||= Litescheduler.synchronize{self.new(options)}
|
58
58
|
end
|
59
59
|
|
60
60
|
def self.new(options = {})
|
@@ -95,7 +95,7 @@ class Litejobqueue < Litequeue
|
|
95
95
|
# jobqueue = Litejobqueue.new
|
96
96
|
# jobqueue.push(EasyJob, params) # the job will be performed asynchronously
|
97
97
|
def push(jobclass, params, delay=0, queue=nil)
|
98
|
-
payload = Oj.dump({klass: jobclass, params: params, retries: @options[:retries], queue: queue})
|
98
|
+
payload = Oj.dump({klass: jobclass, params: params, retries: @options[:retries], queue: queue}, mode: :strict)
|
99
99
|
res = super(payload, delay, queue)
|
100
100
|
capture(:enqueue, queue)
|
101
101
|
@logger.info("[litejob]:[ENQ] queue:#{res[1]} class:#{jobclass} job:#{res[0]}")
|
@@ -103,7 +103,7 @@ class Litejobqueue < Litequeue
|
|
103
103
|
end
|
104
104
|
|
105
105
|
def repush(id, job, delay=0, queue=nil)
|
106
|
-
res = super(id, Oj.dump(job), delay, queue)
|
106
|
+
res = super(id, Oj.dump(job, mode: :strict), delay, queue)
|
107
107
|
capture(:enqueue, queue)
|
108
108
|
@logger.info("[litejob]:[ENQ] queue:#{res[0]} class:#{job[:klass]} job:#{id}")
|
109
109
|
res
|
@@ -121,7 +121,7 @@ class Litejobqueue < Litequeue
|
|
121
121
|
def delete(id)
|
122
122
|
job = super(id)
|
123
123
|
@logger.info("[litejob]:[DEL] job: #{job}")
|
124
|
-
job = Oj.load(job[0]) if job
|
124
|
+
job = Oj.load(job[0], symbol_keys: true) if job
|
125
125
|
job
|
126
126
|
end
|
127
127
|
|
@@ -163,17 +163,17 @@ class Litejobqueue < Litequeue
|
|
163
163
|
end
|
164
164
|
|
165
165
|
def job_started
|
166
|
-
|
166
|
+
Litescheduler.synchronize(@mutex){@jobs_in_flight += 1}
|
167
167
|
end
|
168
168
|
|
169
169
|
def job_finished
|
170
|
-
|
170
|
+
Litescheduler.synchronize(@mutex){@jobs_in_flight -= 1}
|
171
171
|
end
|
172
172
|
|
173
173
|
# optionally run a job in its own context
|
174
174
|
def schedule(spawn = false, &block)
|
175
175
|
if spawn
|
176
|
-
|
176
|
+
Litescheduler.spawn &block
|
177
177
|
else
|
178
178
|
yield
|
179
179
|
end
|
@@ -181,50 +181,23 @@ class Litejobqueue < Litequeue
|
|
181
181
|
|
182
182
|
# create a worker according to environment
|
183
183
|
def create_worker
|
184
|
-
|
184
|
+
Litescheduler.spawn do
|
185
185
|
worker_sleep_index = 0
|
186
186
|
while @running do
|
187
187
|
processed = 0
|
188
|
-
@queues.each do |
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
while
|
193
|
-
capture(:dequeue,
|
188
|
+
@queues.each do |priority, queues| # iterate through the levels
|
189
|
+
queues.each do |queue, spawns| # iterate through the queues in the level
|
190
|
+
batched = 0
|
191
|
+
|
192
|
+
while (batched < priority) && (payload = pop(queue, 1)) # fearlessly use the same queue object
|
193
|
+
capture(:dequeue, queue)
|
194
194
|
processed += 1
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
schedule(q[1]) do # run the job in a new context
|
202
|
-
job_started #(Litesupport.current_context)
|
203
|
-
begin
|
204
|
-
measure(:perform, q[0]){ klass.new.perform(*job[:params]) }
|
205
|
-
@logger.info "[litejob]:[END] queue:#{q[0]} class:#{job[:klass]} job:#{id}"
|
206
|
-
rescue Exception => e
|
207
|
-
# we can retry the failed job now
|
208
|
-
capture(:fail, q[0])
|
209
|
-
if job[:retries] == 0
|
210
|
-
@logger.error "[litejob]:[ERR] queue:#{q[0]} class:#{job[:klass]} job:#{id} failed with #{e}:#{e.message}, retries exhausted, moved to _dead queue"
|
211
|
-
repush(id, job, @options[:dead_job_retention], '_dead')
|
212
|
-
else
|
213
|
-
capture(:retry, q[0])
|
214
|
-
retry_delay = @options[:retry_delay_multiplier].pow(@options[:retries] - job[:retries]) * @options[:retry_delay]
|
215
|
-
job[:retries] -= 1
|
216
|
-
@logger.error "[litejob]:[ERR] queue:#{q[0]} class:#{job[:klass]} job:#{id} failed with #{e}:#{e.message}, retrying in #{retry_delay} seconds"
|
217
|
-
repush(id, job, retry_delay, q[0])
|
218
|
-
end
|
219
|
-
end
|
220
|
-
job_finished #(Litesupport.current_context)
|
221
|
-
end
|
222
|
-
rescue Exception => e
|
223
|
-
# this is an error in the extraction of job info, retrying here will not be useful
|
224
|
-
@logger.error "[litejob]:[ERR] failed to extract job info for: #{payload} with #{e}:#{e.message}"
|
225
|
-
job_finished #(Litesupport.current_context)
|
226
|
-
end
|
227
|
-
Litesupport.switch #give other contexts a chance to run here
|
195
|
+
batched += 1
|
196
|
+
|
197
|
+
id, serialized_job = payload
|
198
|
+
process_job(queue, id, serialized_job, spawns)
|
199
|
+
|
200
|
+
Litescheduler.switch # give other contexts a chance to run here
|
228
201
|
end
|
229
202
|
end
|
230
203
|
end
|
@@ -240,7 +213,7 @@ class Litejobqueue < Litequeue
|
|
240
213
|
|
241
214
|
# create a gc for dead jobs
|
242
215
|
def create_garbage_collector
|
243
|
-
|
216
|
+
Litescheduler.spawn do
|
244
217
|
while @running do
|
245
218
|
while jobs = pop('_dead', 100)
|
246
219
|
if jobs[0].is_a? Array
|
@@ -254,4 +227,34 @@ class Litejobqueue < Litequeue
|
|
254
227
|
end
|
255
228
|
end
|
256
229
|
|
230
|
+
def process_job(queue, id, serialized_job, spawns)
|
231
|
+
job = Oj.load(serialized_job)
|
232
|
+
@logger.info "[litejob]:[DEQ] queue:#{queue} class:#{job["klass"]} job:#{id}"
|
233
|
+
klass = Object.const_get(job["klass"])
|
234
|
+
schedule(spawns) do # run the job in a new context
|
235
|
+
job_started # (Litesupport.current_context)
|
236
|
+
begin
|
237
|
+
measure(:perform, queue) { klass.new.perform(*job["params"]) }
|
238
|
+
@logger.info "[litejob]:[END] queue:#{queue} class:#{job["klass"]} job:#{id}"
|
239
|
+
rescue Exception => e # standard:disable Lint/RescueException
|
240
|
+
# we can retry the failed job now
|
241
|
+
capture(:fail, queue)
|
242
|
+
if job["retries"] == 0
|
243
|
+
@logger.error "[litejob]:[ERR] queue:#{queue} class:#{job["klass"]} job:#{id} failed with #{e}:#{e.message}, retries exhausted, moved to _dead queue"
|
244
|
+
repush(id, job, @options[:dead_job_retention], "_dead")
|
245
|
+
else
|
246
|
+
capture(:retry, queue)
|
247
|
+
retry_delay = @options[:retry_delay_multiplier].pow(@options[:retries] - job["retries"]) * @options[:retry_delay]
|
248
|
+
job["retries"] -= 1
|
249
|
+
@logger.error "[litejob]:[ERR] queue:#{queue} class:#{job["klass"]} job:#{id} failed with #{e}:#{e.message}, retrying in #{retry_delay} seconds"
|
250
|
+
repush(id, job, retry_delay, queue)
|
251
|
+
end
|
252
|
+
end
|
253
|
+
job_finished # (Litesupport.current_context)
|
254
|
+
end
|
255
|
+
rescue Exception => e # standard:disable Lint/RescueException
|
256
|
+
# this is an error in the extraction of job info, retrying here will not be useful
|
257
|
+
@logger.error "[litejob]:[ERR] failed to extract job info for: #{serialized_job} with #{e}:#{e.message}"
|
258
|
+
job_finished # (Litesupport.current_context)
|
259
|
+
end
|
257
260
|
end
|
data/lib/litestack/litemetric.rb
CHANGED
@@ -55,9 +55,12 @@ class Litemetric
|
|
55
55
|
## event capturing
|
56
56
|
##################
|
57
57
|
|
58
|
+
def current_time_slot
|
59
|
+
(Time.now.to_i / 300) * 300
|
60
|
+
end
|
58
61
|
|
59
62
|
def capture(topic, event, key=event, value=nil)
|
60
|
-
@collector.capture(topic, event, key, value)
|
63
|
+
@collector.capture(topic, event, key, value, current_time_slot)
|
61
64
|
end
|
62
65
|
|
63
66
|
def capture_snapshot(topic, state)
|
@@ -83,18 +86,18 @@ class Litemetric
|
|
83
86
|
def events_summaries(topic, resolution, order, dir, search, count)
|
84
87
|
search = "%#{search}%" if search
|
85
88
|
if dir.downcase == "desc"
|
86
|
-
|
89
|
+
run_stmt_hash(:events_summaries, topic, resolution, order, search, count)
|
87
90
|
else
|
88
|
-
|
91
|
+
run_stmt_hash(:events_summaries_asc, topic, resolution, order, search, count)
|
89
92
|
end
|
90
93
|
end
|
91
94
|
|
92
95
|
def keys_summaries(topic, event, resolution, order, dir, search, count)
|
93
96
|
search = "%#{search}%" if search
|
94
97
|
if dir.downcase == "desc"
|
95
|
-
|
98
|
+
run_stmt_hash(:keys_summaries, topic, event, resolution, order, search, count).to_a
|
96
99
|
else
|
97
|
-
|
100
|
+
run_stmt_hash(:keys_summaries_asc, topic, event, resolution, order, search, count).to_a
|
98
101
|
end
|
99
102
|
end
|
100
103
|
|
@@ -103,11 +106,11 @@ class Litemetric
|
|
103
106
|
end
|
104
107
|
|
105
108
|
def event_data_points(step, count, resolution, topic, event)
|
106
|
-
|
109
|
+
run_stmt_hash(:event_data_points, step, count, resolution, topic, event).to_a
|
107
110
|
end
|
108
111
|
|
109
112
|
def key_data_points(step, count, resolution, topic, event, key)
|
110
|
-
|
113
|
+
run_stmt_hash(:key_data_points, step, count, resolution, topic, event, key).to_a
|
111
114
|
end
|
112
115
|
|
113
116
|
def snapshot(topic)
|
@@ -130,7 +133,21 @@ class Litemetric
|
|
130
133
|
###################
|
131
134
|
|
132
135
|
private
|
133
|
-
|
136
|
+
|
137
|
+
def run_stmt_hash(stmt, *args)
|
138
|
+
res = run_stmt(stmt, *args)
|
139
|
+
cols = run_stmt_method(stmt, :columns)
|
140
|
+
hashes = []
|
141
|
+
res.each do | row |
|
142
|
+
hash = {}
|
143
|
+
row.each_with_index do |field, i|
|
144
|
+
hash[cols[i]] = field
|
145
|
+
end
|
146
|
+
hashes << hash
|
147
|
+
end
|
148
|
+
hashes
|
149
|
+
end
|
150
|
+
|
134
151
|
def exit_callback
|
135
152
|
STDERR.puts "--- Litemetric detected an exit, flushing metrics"
|
136
153
|
@running = false
|
@@ -156,29 +173,13 @@ class Litemetric
|
|
156
173
|
end
|
157
174
|
|
158
175
|
def create_connection
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
version = conn.get_first_value("PRAGMA user_version")
|
163
|
-
sql["schema"].each_pair do |v, obj|
|
164
|
-
if v > version
|
165
|
-
begin
|
166
|
-
conn.transaction do
|
167
|
-
obj.each{|k, s| conn.execute(s)}
|
168
|
-
conn.user_version = v
|
169
|
-
end
|
170
|
-
rescue Exception => e
|
171
|
-
STDERR.puts e.message
|
172
|
-
raise e
|
173
|
-
end
|
174
|
-
end
|
175
|
-
end
|
176
|
-
sql["stmts"].each { |k, v| conn.stmts[k.to_sym] = conn.prepare(v) }
|
177
|
-
conn
|
176
|
+
super("#{__dir__}/litemetric.sql.yml") do |conn|
|
177
|
+
conn.wal_autocheckpoint = 10000 # checkpoint after 10000 pages are written
|
178
|
+
end
|
178
179
|
end
|
179
180
|
|
180
181
|
def create_flusher
|
181
|
-
|
182
|
+
Litescheduler.spawn do
|
182
183
|
while @running do
|
183
184
|
sleep @options[:flush_interval]
|
184
185
|
flush
|
@@ -187,7 +188,7 @@ class Litemetric
|
|
187
188
|
end
|
188
189
|
|
189
190
|
def create_summarizer
|
190
|
-
|
191
|
+
Litescheduler.spawn do
|
191
192
|
while @running do
|
192
193
|
sleep @options[:summarize_interval]
|
193
194
|
summarize
|
@@ -210,7 +211,7 @@ class Litemetric
|
|
210
211
|
end
|
211
212
|
|
212
213
|
def create_snapshotter
|
213
|
-
|
214
|
+
Litescheduler.spawn do
|
214
215
|
while @running do
|
215
216
|
sleep @litemetric.options[:snapshot_interval]
|
216
217
|
capture_snapshot
|
@@ -228,13 +229,16 @@ class Litemetric
|
|
228
229
|
end
|
229
230
|
|
230
231
|
def measure(event, key=event)
|
231
|
-
|
232
|
+
unless @litemetric
|
233
|
+
yield
|
234
|
+
return 0
|
235
|
+
end
|
232
236
|
t1 = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
233
|
-
|
237
|
+
yield
|
234
238
|
t2 = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
235
|
-
value =
|
239
|
+
value = t2 - t1
|
236
240
|
capture(event, key, value)
|
237
|
-
|
241
|
+
value # return value so other events can reuse it
|
238
242
|
end
|
239
243
|
|
240
244
|
def capture_snapshot
|
@@ -273,16 +277,16 @@ class Litemetric
|
|
273
277
|
init(options)
|
274
278
|
end
|
275
279
|
|
276
|
-
def capture(topic, event, key, value=nil)
|
280
|
+
def capture(topic, event, key, value=nil, time=nil)
|
277
281
|
if key.is_a? Array
|
278
|
-
key.each{|k| capture_single_key(topic, event, k, value)}
|
282
|
+
key.each{|k| capture_single_key(topic, event, k, value, time)}
|
279
283
|
else
|
280
|
-
capture_single_key(topic, event, key, value)
|
284
|
+
capture_single_key(topic, event, key, value, time)
|
281
285
|
end
|
282
286
|
end
|
283
287
|
|
284
|
-
def capture_single_key(topic, event, key, value)
|
285
|
-
run_stmt(:capture_event, topic.to_s, event.to_s, key.to_s,
|
288
|
+
def capture_single_key(topic, event, key, value, time=nil)
|
289
|
+
run_stmt(:capture_event, topic.to_s, event.to_s, key.to_s, time ,1, value)
|
286
290
|
end
|
287
291
|
|
288
292
|
def flush
|
@@ -302,37 +306,10 @@ class Litemetric
|
|
302
306
|
end
|
303
307
|
|
304
308
|
def create_connection
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
sql = YAML.load_file("#{__dir__}/litemetric_collector.sql.yml")
|
309
|
-
version = conn.get_first_value("PRAGMA user_version")
|
310
|
-
sql["schema"].each_pair do |v, obj|
|
311
|
-
if v > version
|
312
|
-
conn.transaction do
|
313
|
-
obj.each do |k, s|
|
314
|
-
begin
|
315
|
-
conn.execute(s)
|
316
|
-
rescue Exception => e
|
317
|
-
STDERR.puts "Error parsing #{k}"
|
318
|
-
STDERR.puts s
|
319
|
-
raise e
|
320
|
-
end
|
321
|
-
end
|
322
|
-
conn.user_version = v
|
323
|
-
end
|
324
|
-
end
|
325
|
-
end
|
326
|
-
sql["stmts"].each do |k, v|
|
327
|
-
begin
|
328
|
-
conn.stmts[k.to_sym] = conn.prepare(v)
|
329
|
-
rescue Exception => e
|
330
|
-
STDERR.puts "Error parsing #{k}"
|
331
|
-
STDERR.puts v
|
332
|
-
raise e
|
333
|
-
end
|
309
|
+
super("#{__dir__}/litemetric_collector.sql.yml") do |conn|
|
310
|
+
conn.execute("ATTACH ? as m", @options[:dbpath].to_s)
|
311
|
+
conn.wal_autocheckpoint = 10000
|
334
312
|
end
|
335
|
-
conn
|
336
313
|
end
|
337
314
|
|
338
315
|
end
|
@@ -13,13 +13,13 @@ schema:
|
|
13
13
|
name TEXT DEFAULT('___') NOT NULL ON CONFLICT REPLACE,
|
14
14
|
key TEXT DEFAULT('___') NOT NULL ON CONFLICT REPLACE,
|
15
15
|
count INTEGER DEFAULT(0) NOT NULL ON CONFLICT REPLACE,
|
16
|
-
value
|
17
|
-
minimum
|
18
|
-
maximum
|
16
|
+
value REAL,
|
17
|
+
minimum REAL,
|
18
|
+
maximum REAL,
|
19
19
|
created_at INTEGER DEFAULT((unixepoch()/300*300)) NOT NULL,
|
20
20
|
resolution TEXT DEFAULT('minute') NOT NULL,
|
21
21
|
PRIMARY KEY(resolution, created_at, topic, name, key)
|
22
|
-
);
|
22
|
+
) STRICT;
|
23
23
|
|
24
24
|
create_topic_index_on_events: CREATE INDEX events_topic_index ON events (resolution, created_at, topic) WHERE name = '___';
|
25
25
|
create_event_index_on_events: CREATE INDEX events_event_index ON events (resolution, created_at , topic, name) WHERE key = '___';
|
@@ -226,7 +226,7 @@ stmts:
|
|
226
226
|
FROM events
|
227
227
|
WHERE topic = ?1 AND name = ?2 AND resolution = ?3
|
228
228
|
AND created_at <= unixepoch()
|
229
|
-
AND created_at >= unixepoch()
|
229
|
+
AND created_at >= unixepoch() - ?6
|
230
230
|
AND key != '___'
|
231
231
|
AND iif(length(?5) > 0, rowid IN (SELECT rowid FROM events_search WHERE key LIKE ?5), TRUE)
|
232
232
|
GROUP BY key
|
@@ -251,7 +251,7 @@ stmts:
|
|
251
251
|
FROM events
|
252
252
|
WHERE topic = ?1 AND name = ?2 AND resolution = ?3
|
253
253
|
AND created_at <= unixepoch()
|
254
|
-
AND created_at >= unixepoch()
|
254
|
+
AND created_at >= unixepoch() - ?6
|
255
255
|
AND key != '___'
|
256
256
|
AND iif(length(?5) > 0, rowid IN (SELECT rowid FROM events_search WHERE key LIKE ?5), TRUE)
|
257
257
|
GROUP BY key
|
@@ -272,8 +272,8 @@ stmts:
|
|
272
272
|
SELECT
|
273
273
|
datetime(ts.slot, 'unixepoch') AS rtime,
|
274
274
|
ts.slot AS unixtime,
|
275
|
-
|
276
|
-
|
275
|
+
sum(events.count) AS rcount,
|
276
|
+
avg(events.value/events.count) AS ravg
|
277
277
|
FROM (
|
278
278
|
WITH RECURSIVE timeslot(x) AS (
|
279
279
|
SELECT (unixepoch()/(?1))*(?1)
|
@@ -294,8 +294,9 @@ stmts:
|
|
294
294
|
SELECT
|
295
295
|
datetime(ts.slot, 'unixepoch') AS rtime,
|
296
296
|
ts.slot AS unixtime,
|
297
|
-
|
298
|
-
|
297
|
+
events.count AS rcount,
|
298
|
+
events.value / events.count AS ravg,
|
299
|
+
events.value AS rtotal
|
299
300
|
FROM (
|
300
301
|
WITH RECURSIVE timeslot(x) AS (
|
301
302
|
SELECT (unixepoch()/(?1))*(?1)
|
@@ -317,8 +318,9 @@ stmts:
|
|
317
318
|
SELECT
|
318
319
|
datetime(ts.slot, 'unixepoch') AS rtime,
|
319
320
|
ts.slot AS unixtime,
|
320
|
-
|
321
|
-
|
321
|
+
events.count AS rcount,
|
322
|
+
events.value / events.count AS ravg,
|
323
|
+
events.value AS rtotal
|
322
324
|
FROM (
|
323
325
|
WITH RECURSIVE timeslot(x) AS (
|
324
326
|
SELECT (unixepoch()/(?1))*(?1)
|
@@ -7,13 +7,13 @@ schema:
|
|
7
7
|
name TEXT DEFAULT('___') NOT NULL ON CONFLICT REPLACE,
|
8
8
|
key TEXT DEFAULT('___') NOT NULL ON CONFLICT REPLACE,
|
9
9
|
count INTEGER DEFAULT(0) NOT NULL ON CONFLICT REPLACE,
|
10
|
-
value
|
11
|
-
minimum
|
12
|
-
maximum
|
10
|
+
value REAL,
|
11
|
+
minimum REAL,
|
12
|
+
maximum REAL,
|
13
13
|
created_at INTEGER DEFAULT((unixepoch()/300*300)) NOT NULL ON CONFLICT REPLACE,
|
14
14
|
resolution TEXT DEFAULT('minute') NOT NULL,
|
15
15
|
PRIMARY KEY(resolution, created_at, topic, name, key)
|
16
|
-
);
|
16
|
+
) STRICT;
|
17
17
|
|
18
18
|
stmts:
|
19
19
|
|
data/lib/litestack/litequeue.rb
CHANGED
@@ -78,12 +78,12 @@ class Litequeue
|
|
78
78
|
|
79
79
|
# deletes all the entries in all queues, or if a queue name is given, deletes all entries in that specific queue
|
80
80
|
def clear(queue=nil)
|
81
|
-
run_sql("DELETE FROM queue WHERE iif(
|
81
|
+
run_sql("DELETE FROM queue WHERE iif(?1 IS NOT NULL, name = ?1, TRUE)", queue)
|
82
82
|
end
|
83
83
|
|
84
84
|
# returns a count of entries in all queues, or if a queue name is given, reutrns the count of entries in that queue
|
85
85
|
def count(queue=nil)
|
86
|
-
run_sql("SELECT count(*) FROM queue WHERE iif(
|
86
|
+
run_sql("SELECT count(*) FROM queue WHERE iif(?1 IS NOT NULL, name = ?1, TRUE)", queue)[0][0]
|
87
87
|
end
|
88
88
|
|
89
89
|
# return the size of the queue file on disk
|
@@ -116,27 +116,16 @@ class Litequeue
|
|
116
116
|
private
|
117
117
|
|
118
118
|
def create_connection
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
obj.each{|k, s| conn.execute(s)}
|
127
|
-
conn.user_version = v
|
119
|
+
super("#{__dir__}/litequeue.sql.yml") do |conn|
|
120
|
+
conn.wal_autocheckpoint = 10000
|
121
|
+
# check if there is an old database and convert entries to the new format
|
122
|
+
if conn.get_first_value("select count(*) from sqlite_master where name = '_ul_queue_'") == 1
|
123
|
+
conn.transaction(:immediate) do
|
124
|
+
conn.execute("INSERT INTO queue(fire_at, name, value, created_at) SELECT fire_at, queue, value, created_at FROM _ul_queue_")
|
125
|
+
conn.execute("DROP TABLE _ul_queue_")
|
128
126
|
end
|
129
127
|
end
|
130
|
-
end
|
131
|
-
sql["stmts"].each { |k, v| conn.stmts[k.to_sym] = conn.prepare(v) }
|
132
|
-
# check if there is an old database and convert entries to the new format
|
133
|
-
if conn.get_first_value("select count(*) from sqlite_master where name = '_ul_queue_'") == 1
|
134
|
-
conn.transaction(:immediate) do
|
135
|
-
conn.execute("INSERT INTO queue(fire_at, name, value, created_at) SELECT fire_at, queue, value, created_at FROM _ul_queue_")
|
136
|
-
conn.execute("DROP TABLE _ul_queue_")
|
137
|
-
end
|
138
128
|
end
|
139
|
-
conn
|
140
129
|
end
|
141
130
|
|
142
131
|
end
|
@@ -0,0 +1,84 @@
|
|
1
|
+
# frozen_stringe_literal: true
|
2
|
+
|
3
|
+
module Litescheduler
|
4
|
+
# cache the scheduler we are running in
|
5
|
+
# it is an error to change the scheduler for a process
|
6
|
+
# or for a child forked from that process
|
7
|
+
def self.backend
|
8
|
+
@backend ||= if Fiber.scheduler
|
9
|
+
:fiber
|
10
|
+
elsif defined? Polyphony
|
11
|
+
:polyphony
|
12
|
+
elsif defined? Iodine
|
13
|
+
:iodine
|
14
|
+
else
|
15
|
+
:threaded
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
# spawn a new execution context
|
20
|
+
def self.spawn(&block)
|
21
|
+
if backend == :fiber
|
22
|
+
Fiber.schedule(&block)
|
23
|
+
elsif backend == :polyphony
|
24
|
+
spin(&block)
|
25
|
+
elsif backend == :threaded or backend == :iodine
|
26
|
+
Thread.new(&block)
|
27
|
+
end
|
28
|
+
# we should never reach here
|
29
|
+
end
|
30
|
+
|
31
|
+
def self.storage
|
32
|
+
if backend == :fiber || backend == :poylphony
|
33
|
+
Fiber.current.storage
|
34
|
+
else
|
35
|
+
Thread.current
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
def self.current
|
40
|
+
if backend == :fiber || backend == :poylphony
|
41
|
+
Fiber.current
|
42
|
+
else
|
43
|
+
Thread.current
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
# switch the execution context to allow others to run
|
48
|
+
def self.switch
|
49
|
+
if backend == :fiber
|
50
|
+
Fiber.scheduler.yield
|
51
|
+
true
|
52
|
+
elsif backend == :polyphony
|
53
|
+
Fiber.current.schedule
|
54
|
+
Thread.current.switch_fiber
|
55
|
+
true
|
56
|
+
else
|
57
|
+
#Thread.pass
|
58
|
+
false
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
# bold assumption, we will only synchronize threaded code!
|
63
|
+
# If some code explicitly wants to synchronize a fiber
|
64
|
+
# they must send (true) as a parameter to this method
|
65
|
+
# else it is a no-op for fibers
|
66
|
+
def self.synchronize(fiber_sync = false, &block)
|
67
|
+
if backend == :fiber or backend == :polyphony
|
68
|
+
yield # do nothing, just run the block as is
|
69
|
+
else
|
70
|
+
self.mutex.synchronize(&block)
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
def self.max_contexts
|
75
|
+
return 50 if backend == :fiber || backend == :polyphony
|
76
|
+
5
|
77
|
+
end
|
78
|
+
|
79
|
+
# mutex initialization
|
80
|
+
def self.mutex
|
81
|
+
# a single mutex per process (is that ok?)
|
82
|
+
@@mutex ||= Mutex.new
|
83
|
+
end
|
84
|
+
end
|