litestack 0.1.8 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +17 -4
- data/README.md +27 -0
- data/assets/litecable_logo_teal.png +0 -0
- data/bench/bench_cache_raw.rb +16 -5
- data/lib/action_cable/subscription_adapter/litecable.rb +36 -0
- data/lib/active_job/queue_adapters/litejob_adapter.rb +11 -11
- data/lib/litestack/litecable.rb +138 -0
- data/lib/litestack/litecable.sql.yml +24 -0
- data/lib/litestack/litecache.rb +56 -62
- data/lib/litestack/litecache.sql.yml +28 -0
- data/lib/litestack/litecache.yml +7 -0
- data/lib/litestack/litejob.rb +6 -2
- data/lib/litestack/litejobqueue.rb +68 -74
- data/lib/litestack/litemetric.rb +228 -0
- data/lib/litestack/litemetric.sql.yml +69 -0
- data/lib/litestack/litequeue.rb +51 -31
- data/lib/litestack/litequeue.sql.yml +34 -0
- data/lib/litestack/litesupport.rb +131 -1
- data/lib/litestack/metrics_app.rb +5 -0
- data/lib/litestack/version.rb +1 -1
- data/lib/litestack.rb +19 -10
- metadata +13 -6
- data/bench/bench_rails.rb +0 -81
- data/bench/bench_raw.rb +0 -72
- data/lib/active_job/queue_adapters/ultralite_adapter.rb +0 -49
data/lib/litestack/litejob.rb
CHANGED
@@ -43,7 +43,7 @@ module Litejob
|
|
43
43
|
private
|
44
44
|
def self.included(klass)
|
45
45
|
klass.extend(ClassMethods)
|
46
|
-
|
46
|
+
klass.get_jobqueue
|
47
47
|
end
|
48
48
|
|
49
49
|
module ClassMethods
|
@@ -80,9 +80,13 @@ module Litejob
|
|
80
80
|
def queue=(queue_name)
|
81
81
|
@@queue = queue_name.to_s
|
82
82
|
end
|
83
|
+
|
84
|
+
def options
|
85
|
+
@options ||= self::DEFAULT_OPTIONS rescue {}
|
86
|
+
end
|
83
87
|
|
84
88
|
def get_jobqueue
|
85
|
-
Litejobqueue.jobqueue
|
89
|
+
Litejobqueue.jobqueue(options)
|
86
90
|
end
|
87
91
|
end
|
88
92
|
|
@@ -1,8 +1,7 @@
|
|
1
1
|
# frozen_stringe_literal: true
|
2
|
-
|
3
|
-
require 'oj'
|
4
|
-
require 'yaml'
|
2
|
+
|
5
3
|
require_relative './litequeue'
|
4
|
+
require_relative './litemetric'
|
6
5
|
|
7
6
|
##
|
8
7
|
#Litejobqueue is a job queueing and processing system designed for Ruby applications. It is built on top of SQLite, which is an embedded relational database management system that is #lightweight and fast.
|
@@ -12,7 +11,9 @@ require_relative './litequeue'
|
|
12
11
|
#Litejobqueue also integrates well with various I/O frameworks like Async and Polyphony, making it a great choice for Ruby applications that use these frameworks. It provides a #simple and easy-to-use API for adding jobs to the queue and for processing them.
|
13
12
|
#
|
14
13
|
#Overall, LiteJobQueue is an excellent choice for Ruby applications that require a lightweight, embedded job queueing and processing system that is fast, efficient, and easy to use.
|
15
|
-
class Litejobqueue
|
14
|
+
class Litejobqueue < Litequeue
|
15
|
+
|
16
|
+
include Litemetric::Measurable
|
16
17
|
|
17
18
|
# the default options for the job queue
|
18
19
|
# can be overriden by passing new options in a hash
|
@@ -40,13 +41,16 @@ class Litejobqueue
|
|
40
41
|
dead_job_retention: 10 * 24 * 3600,
|
41
42
|
gc_sleep_interval: 7200,
|
42
43
|
logger: 'STDOUT',
|
43
|
-
sleep_intervals: [0.001, 0.005, 0.025, 0.125, 0.625, 1.0, 2.0]
|
44
|
+
sleep_intervals: [0.001, 0.005, 0.025, 0.125, 0.625, 1.0, 2.0],
|
45
|
+
metrics: false
|
44
46
|
}
|
45
47
|
|
46
48
|
@@queue = nil
|
47
49
|
|
48
50
|
attr_reader :running
|
49
51
|
|
52
|
+
alias_method :_push, :push
|
53
|
+
|
50
54
|
# a method that returns a single instance of the job queue
|
51
55
|
# for use by Litejob
|
52
56
|
def self.jobqueue(options = {})
|
@@ -64,31 +68,10 @@ class Litejobqueue
|
|
64
68
|
# jobqueue = Litejobqueue.new
|
65
69
|
#
|
66
70
|
def initialize(options = {})
|
67
|
-
@options = DEFAULT_OPTIONS.merge(options)
|
68
|
-
config = YAML.load_file(@options[:config_path]) rescue {} # an empty hash won't hurt
|
69
|
-
config.keys.each do |k| # symbolize keys
|
70
|
-
config[k.to_sym] = config[k]
|
71
|
-
config.delete k
|
72
|
-
end
|
73
|
-
@options.merge!(config)
|
74
|
-
@options.merge!(options) # make sure options passed to initialize trump everything else
|
75
71
|
|
76
|
-
@
|
72
|
+
@queues = [] # a place holder to allow workers to process
|
73
|
+
super(options)
|
77
74
|
|
78
|
-
# create logger
|
79
|
-
if @options[:logger].respond_to? :info
|
80
|
-
@logger = @options[:logger]
|
81
|
-
elsif @options[:logger] == 'STDOUT'
|
82
|
-
@logger = Logger.new(STDOUT)
|
83
|
-
elsif @options[:logger] == 'STDERR'
|
84
|
-
@logger = Logger.new(STDERR)
|
85
|
-
elsif @options[:logger].nil?
|
86
|
-
@logger = Logger.new(IO::NULL)
|
87
|
-
elsif @options[:logger].is_a? String
|
88
|
-
@logger = Logger.new(@options[:logger])
|
89
|
-
else
|
90
|
-
@logger = Logger.new(IO::NULL)
|
91
|
-
end
|
92
75
|
# group and order queues according to their priority
|
93
76
|
pgroups = {}
|
94
77
|
@options[:queues].each do |q|
|
@@ -96,25 +79,6 @@ class Litejobqueue
|
|
96
79
|
pgroups[q[1]] << [q[0], q[2] == "spawn"]
|
97
80
|
end
|
98
81
|
@queues = pgroups.keys.sort.reverse.collect{|p| [p, pgroups[p]]}
|
99
|
-
@running = true
|
100
|
-
@workers = @options[:workers].times.collect{ create_worker }
|
101
|
-
|
102
|
-
@gc = create_garbage_collector
|
103
|
-
@jobs_in_flight = 0
|
104
|
-
@mutex = Litesupport::Mutex.new
|
105
|
-
|
106
|
-
at_exit do
|
107
|
-
@running = false
|
108
|
-
puts "--- Litejob detected an exit attempt, cleaning up"
|
109
|
-
index = 0
|
110
|
-
while @jobs_in_flight > 0 and index < 5
|
111
|
-
puts "--- Waiting for #{@jobs_in_flight} jobs to finish"
|
112
|
-
sleep 1
|
113
|
-
index += 1
|
114
|
-
end
|
115
|
-
puts " --- Exiting with #{@jobs_in_flight} jobs in flight"
|
116
|
-
end
|
117
|
-
|
118
82
|
end
|
119
83
|
|
120
84
|
# push a job to the queue
|
@@ -127,8 +91,16 @@ class Litejobqueue
|
|
127
91
|
# jobqueue.push(EasyJob, params) # the job will be performed asynchronously
|
128
92
|
def push(jobclass, params, delay=0, queue=nil)
|
129
93
|
payload = Oj.dump({klass: jobclass, params: params, retries: @options[:retries], queue: queue})
|
130
|
-
res =
|
131
|
-
|
94
|
+
res = super(payload, delay, queue)
|
95
|
+
capture(:enqueue, queue)
|
96
|
+
@logger.info("[litejob]:[ENQ] queue:#{res[1]} class:#{jobclass} job:#{res[0]}")
|
97
|
+
res
|
98
|
+
end
|
99
|
+
|
100
|
+
def repush(id, job, delay=0, queue=nil)
|
101
|
+
res = super(id, Oj.dump(job), delay, queue)
|
102
|
+
capture(:enqueue, queue)
|
103
|
+
@logger.info("[litejob]:[ENQ] queue:#{res[0]} class:#{job[:klass]} job:#{id}")
|
132
104
|
res
|
133
105
|
end
|
134
106
|
|
@@ -140,9 +112,9 @@ class Litejobqueue
|
|
140
112
|
# end
|
141
113
|
# jobqueue = Litejobqueue.new
|
142
114
|
# id = jobqueue.push(EasyJob, params, 10) # queue for processing in 10 seconds
|
143
|
-
# jobqueue.delete(id
|
144
|
-
def delete(id
|
145
|
-
job =
|
115
|
+
# jobqueue.delete(id)
|
116
|
+
def delete(id)
|
117
|
+
job = super(id)
|
146
118
|
@logger.info("[litejob]:[DEL] job: #{job}")
|
147
119
|
job = Oj.load(job[0]) if job
|
148
120
|
job
|
@@ -150,23 +122,40 @@ class Litejobqueue
|
|
150
122
|
|
151
123
|
# delete all jobs in a certain named queue
|
152
124
|
# or delete all jobs if the queue name is nil
|
153
|
-
def clear(queue=nil)
|
154
|
-
|
155
|
-
end
|
125
|
+
#def clear(queue=nil)
|
126
|
+
#@queue.clear(queue)
|
127
|
+
#end
|
156
128
|
|
157
129
|
# stop the queue object (does not delete the jobs in the queue)
|
158
130
|
# specifically useful for testing
|
159
131
|
def stop
|
160
132
|
@running = false
|
161
|
-
|
133
|
+
#@@queue = nil
|
134
|
+
close
|
162
135
|
end
|
163
136
|
|
164
137
|
|
165
|
-
def count(queue=nil)
|
166
|
-
@queue.count(queue)
|
167
|
-
end
|
168
|
-
|
169
138
|
private
|
139
|
+
|
140
|
+
def exit_callback
|
141
|
+
@running = false # stop all workers
|
142
|
+
puts "--- Litejob detected an exit, cleaning up"
|
143
|
+
index = 0
|
144
|
+
while @jobs_in_flight > 0 and index < 30 # 3 seconds grace period for jobs to finish
|
145
|
+
puts "--- Waiting for #{@jobs_in_flight} jobs to finish"
|
146
|
+
sleep 0.1
|
147
|
+
index += 1
|
148
|
+
end
|
149
|
+
puts " --- Exiting with #{@jobs_in_flight} jobs in flight"
|
150
|
+
end
|
151
|
+
|
152
|
+
def setup
|
153
|
+
super
|
154
|
+
@jobs_in_flight = 0
|
155
|
+
@workers = @options[:workers].times.collect{ create_worker }
|
156
|
+
@gc = create_garbage_collector
|
157
|
+
@mutex = Litesupport::Mutex.new
|
158
|
+
end
|
170
159
|
|
171
160
|
def job_started
|
172
161
|
Litesupport.synchronize(@mutex){@jobs_in_flight += 1}
|
@@ -176,6 +165,11 @@ class Litejobqueue
|
|
176
165
|
Litesupport.synchronize(@mutex){@jobs_in_flight -= 1}
|
177
166
|
end
|
178
167
|
|
168
|
+
# return a hash encapsulating the info about the current jobqueue
|
169
|
+
def snapshot
|
170
|
+
info
|
171
|
+
end
|
172
|
+
|
179
173
|
# optionally run a job in its own context
|
180
174
|
def schedule(spawn = false, &block)
|
181
175
|
if spawn
|
@@ -195,40 +189,40 @@ class Litejobqueue
|
|
195
189
|
level[1].each do |q| # iterate through the queues in the level
|
196
190
|
index = 0
|
197
191
|
max = level[0]
|
198
|
-
while index < max && payload =
|
192
|
+
while index < max && payload = pop(q[0], 1) # fearlessly use the same queue object
|
193
|
+
capture(:dequeue, q[0])
|
199
194
|
processed += 1
|
200
195
|
index += 1
|
201
196
|
begin
|
202
197
|
id, job = payload[0], payload[1]
|
203
198
|
job = Oj.load(job)
|
204
|
-
|
205
|
-
job[:id] = id if job[:retries].to_i == @options[:retries].to_i
|
206
|
-
@logger.info "[litejob]:[DEQ] job:#{job}"
|
199
|
+
@logger.info "[litejob]:[DEQ] queue:#{q[0]} class:#{job[:klass]} job:#{id}"
|
207
200
|
klass = eval(job[:klass])
|
208
201
|
schedule(q[1]) do # run the job in a new context
|
209
202
|
job_started #(Litesupport.current_context)
|
210
203
|
begin
|
211
|
-
klass.new.perform(*job[:params])
|
212
|
-
@logger.info "[litejob]:[END]
|
204
|
+
measure(:perform, q[0]){ klass.new.perform(*job[:params]) }
|
205
|
+
@logger.info "[litejob]:[END] queue:#{q[0]} class:#{job[:klass]} job:#{id}"
|
213
206
|
rescue Exception => e
|
214
207
|
# we can retry the failed job now
|
208
|
+
capture(:fail, q[0])
|
215
209
|
if job[:retries] == 0
|
216
|
-
@logger.error "[litejob]:[ERR] job:
|
217
|
-
|
210
|
+
@logger.error "[litejob]:[ERR] queue:#{q[0]} class:#{job[:klass]} job:#{id} failed with #{e}:#{e.message}, retries exhausted, moved to _dead queue"
|
211
|
+
repush(id, job, @options[:dead_job_retention], '_dead')
|
218
212
|
else
|
213
|
+
capture(:retry, q[0])
|
219
214
|
retry_delay = @options[:retry_delay_multiplier].pow(@options[:retries] - job[:retries]) * @options[:retry_delay]
|
220
215
|
job[:retries] -= 1
|
221
|
-
@logger.error "[litejob]:[ERR] job:
|
222
|
-
|
223
|
-
@logger.info "[litejob]:[ENQ] job: #{job} enqueued"
|
216
|
+
@logger.error "[litejob]:[ERR] queue:#{q[0]} class:#{job[:klass]} job:#{id} failed with #{e}:#{e.message}, retrying in #{retry_delay} seconds"
|
217
|
+
repush(id, job, retry_delay, q[0])
|
224
218
|
end
|
225
219
|
end
|
226
220
|
job_finished #(Litesupport.current_context)
|
227
221
|
end
|
228
222
|
rescue Exception => e
|
229
|
-
# this is an error in the extraction of job info
|
230
|
-
# retrying here will not be useful
|
223
|
+
# this is an error in the extraction of job info, retrying here will not be useful
|
231
224
|
@logger.error "[litejob]:[ERR] failed to extract job info for: #{payload} with #{e}:#{e.message}"
|
225
|
+
job_finished #(Litesupport.current_context)
|
232
226
|
end
|
233
227
|
Litesupport.switch #give other contexts a chance to run here
|
234
228
|
end
|
@@ -248,7 +242,7 @@ class Litejobqueue
|
|
248
242
|
def create_garbage_collector
|
249
243
|
Litesupport.spawn do
|
250
244
|
while @running do
|
251
|
-
while jobs =
|
245
|
+
while jobs = pop('_dead', 100)
|
252
246
|
if jobs[0].is_a? Array
|
253
247
|
@logger.info "[litejob]:[DEL] garbage collector deleted #{jobs.length} dead jobs"
|
254
248
|
else
|
@@ -0,0 +1,228 @@
|
|
1
|
+
# frozen_stringe_literal: true
|
2
|
+
|
3
|
+
require 'singleton'
|
4
|
+
|
5
|
+
require_relative './litesupport'
|
6
|
+
|
7
|
+
# this class is a singleton
|
8
|
+
# and should remain so
|
9
|
+
class Litemetric
|
10
|
+
|
11
|
+
include Singleton
|
12
|
+
include Litesupport::Liteconnection
|
13
|
+
|
14
|
+
DEFAULT_OPTIONS = {
|
15
|
+
config_path: "./litemetric.yml",
|
16
|
+
path: "./metrics.db",
|
17
|
+
sync: 1,
|
18
|
+
mmap_size: 16 * 1024 * 1024, # 16MB of memory to easily process 1 year worth of data
|
19
|
+
flush_interval: 10, # flush data every 1 minute
|
20
|
+
summarize_interval: 10 # summarize data every 1 minute
|
21
|
+
}
|
22
|
+
|
23
|
+
RESOLUTIONS = {
|
24
|
+
minute: 300, # 5 minutes (highest resolution)
|
25
|
+
hour: 3600, # 1 hour
|
26
|
+
day: 24*3600, # 1 day
|
27
|
+
week: 7*24*3600 # 1 week (lowest resolution)
|
28
|
+
}
|
29
|
+
|
30
|
+
# :nodoc:
|
31
|
+
def initialize(options = {})
|
32
|
+
init(options)
|
33
|
+
end
|
34
|
+
|
35
|
+
# registers a class for metrics to be collected
|
36
|
+
def register(identifier)
|
37
|
+
@registered[identifier] = true
|
38
|
+
@metrics[identifier] = {} unless @metrics[identifier]
|
39
|
+
run_stmt(:register_topic, identifier) # it is safe to call register topic multiple times with the same identifier
|
40
|
+
end
|
41
|
+
|
42
|
+
## event capturing
|
43
|
+
##################
|
44
|
+
|
45
|
+
def capture(topic, event, key=event, value=nil)
|
46
|
+
if key.is_a? Array
|
47
|
+
key.each{|k| capture_single_key(topic, event, k, value)}
|
48
|
+
else
|
49
|
+
capture_single_key(topic, event, key, value)
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
def capture_single_key(topic, event, key=event, value=nil)
|
54
|
+
@mutex.synchronize do
|
55
|
+
time_slot = current_time_slot # should that be 5 minutes?
|
56
|
+
topic_slot = @metrics[topic]
|
57
|
+
if event_slot = topic_slot[event]
|
58
|
+
if key_slot = event_slot[key]
|
59
|
+
if key_slot[time_slot]
|
60
|
+
key_slot[time_slot][:count] += 1
|
61
|
+
key_slot[time_slot][:value] += value unless value.nil?
|
62
|
+
else # new time slot
|
63
|
+
key_slot[time_slot] = {count: 1, value: value}
|
64
|
+
end
|
65
|
+
else
|
66
|
+
event_slot[key] = {time_slot => {count: 1, value: value}}
|
67
|
+
end
|
68
|
+
else # new event
|
69
|
+
topic_slot[event] = {key => {time_slot => {count: 1, value: value}}}
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
|
75
|
+
## event reporting
|
76
|
+
##################
|
77
|
+
|
78
|
+
def topics
|
79
|
+
run_stmt(:list_topics).to_a
|
80
|
+
end
|
81
|
+
|
82
|
+
def event_names(resolution, topic)
|
83
|
+
run_stmt(:list_event_names, resolution, topic).to_a
|
84
|
+
end
|
85
|
+
|
86
|
+
def keys(resolution, topic, event_name)
|
87
|
+
run_stmt(:list_event_keys, resolution, topic, event_name).to_a
|
88
|
+
end
|
89
|
+
|
90
|
+
def event_data(resolution, topic, event_name, key)
|
91
|
+
run_stmt(:list_events_by_key, resolution, topic, event_name, key).to_a
|
92
|
+
end
|
93
|
+
|
94
|
+
## summarize data
|
95
|
+
#################
|
96
|
+
|
97
|
+
def summarize
|
98
|
+
run_stmt(:summarize_events, RESOLUTIONS[:hour], "hour", "minute")
|
99
|
+
run_stmt(:summarize_events, RESOLUTIONS[:day], "day", "hour")
|
100
|
+
run_stmt(:summarize_events, RESOLUTIONS[:week], "week", "day")
|
101
|
+
run_stmt(:delete_events, "minute", RESOLUTIONS[:hour]*1)
|
102
|
+
run_stmt(:delete_events, "hour", RESOLUTIONS[:day]*1)
|
103
|
+
run_stmt(:delete_events, "day", RESOLUTIONS[:week]*1)
|
104
|
+
end
|
105
|
+
|
106
|
+
## background stuff
|
107
|
+
###################
|
108
|
+
|
109
|
+
private
|
110
|
+
|
111
|
+
def exit_callback
|
112
|
+
puts "--- Litemetric detected an exit, flushing metrics"
|
113
|
+
@running = false
|
114
|
+
flush
|
115
|
+
end
|
116
|
+
|
117
|
+
def setup
|
118
|
+
super
|
119
|
+
@metrics = {}
|
120
|
+
@registered = {}
|
121
|
+
@flusher = create_flusher
|
122
|
+
@summarizer = create_summarizer
|
123
|
+
@mutex = Litesupport::Mutex.new
|
124
|
+
end
|
125
|
+
|
126
|
+
def current_time_slot
|
127
|
+
(Time.now.to_i / 300) * 300 # every 5 minutes
|
128
|
+
end
|
129
|
+
|
130
|
+
def flush
|
131
|
+
to_delete = []
|
132
|
+
@conn.acquire do |conn|
|
133
|
+
conn.transaction(:immediate) do
|
134
|
+
@metrics.each_pair do |topic, event_hash|
|
135
|
+
event_hash.each_pair do |event, key_hash|
|
136
|
+
key_hash.each_pair do |key, time_hash|
|
137
|
+
time_hash.each_pair do |time, data|
|
138
|
+
conn.stmts[:capture_event].execute!(topic, event.to_s, key, time, data[:count], data[:value]) if data
|
139
|
+
time_hash[time] = nil
|
140
|
+
to_delete << [topic, event, key, time]
|
141
|
+
end
|
142
|
+
end
|
143
|
+
end
|
144
|
+
end
|
145
|
+
end
|
146
|
+
end
|
147
|
+
to_delete.each do |r|
|
148
|
+
@metrics[r[0]][r[1]][r[2]].delete(r[3])
|
149
|
+
@metrics[r[0]][r[1]].delete(r[2]) if @metrics[r[0]][r[1]][r[2]].empty?
|
150
|
+
@metrics[r[0]].delete(r[1]) if @metrics[r[0]][r[1]].empty?
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
def create_connection
|
155
|
+
conn = super
|
156
|
+
conn.wal_autocheckpoint = 10000
|
157
|
+
sql = YAML.load_file("#{__dir__}/litemetric.sql.yml")
|
158
|
+
version = conn.get_first_value("PRAGMA user_version")
|
159
|
+
sql["schema"].each_pair do |v, obj|
|
160
|
+
if v > version
|
161
|
+
conn.transaction do
|
162
|
+
obj.each{|k, s| conn.execute(s)}
|
163
|
+
conn.user_version = v
|
164
|
+
end
|
165
|
+
end
|
166
|
+
end
|
167
|
+
sql["stmts"].each { |k, v| conn.stmts[k.to_sym] = conn.prepare(v) }
|
168
|
+
conn
|
169
|
+
end
|
170
|
+
|
171
|
+
def create_flusher
|
172
|
+
Litesupport.spawn do
|
173
|
+
while @running do
|
174
|
+
sleep @options[:flush_interval]
|
175
|
+
@mutex.synchronize do
|
176
|
+
flush
|
177
|
+
end
|
178
|
+
end
|
179
|
+
end
|
180
|
+
end
|
181
|
+
|
182
|
+
def create_summarizer
|
183
|
+
Litesupport.spawn do
|
184
|
+
while @running do
|
185
|
+
sleep @options[:summarize_interval]
|
186
|
+
summarize
|
187
|
+
end
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
191
|
+
end
|
192
|
+
|
193
|
+
## Measurable Module
|
194
|
+
####################
|
195
|
+
|
196
|
+
class Litemetric
|
197
|
+
module Measurable
|
198
|
+
|
199
|
+
def collect_metrics
|
200
|
+
@litemetric = Litemetric.instance
|
201
|
+
@litemetric.register(metrics_identifier)
|
202
|
+
end
|
203
|
+
|
204
|
+
def metrics_identifier
|
205
|
+
self.class.name # override in included classes
|
206
|
+
end
|
207
|
+
|
208
|
+
def capture(event, key=event, value=nil)
|
209
|
+
return unless @litemetric
|
210
|
+
@litemetric.capture(metrics_identifier, event, key, value)
|
211
|
+
end
|
212
|
+
|
213
|
+
def measure(event, key=event)
|
214
|
+
return yield unless @litemetric
|
215
|
+
t1 = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
216
|
+
res = yield
|
217
|
+
t2 = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
218
|
+
value = (( t2 - t1 ) * 1000).round # capture time in milliseconds
|
219
|
+
capture(event, key, value)
|
220
|
+
res
|
221
|
+
end
|
222
|
+
|
223
|
+
def snapshot
|
224
|
+
raise Litestack::NotImplementedError
|
225
|
+
end
|
226
|
+
|
227
|
+
end
|
228
|
+
end
|
@@ -0,0 +1,69 @@
|
|
1
|
+
schema:
|
2
|
+
1:
|
3
|
+
create_topics: >
|
4
|
+
CREATE TABLE IF NOT EXISTS topics(
|
5
|
+
name text PRIMARY KEY NOT NULL
|
6
|
+
) WITHOUT ROWID;
|
7
|
+
create_events: >
|
8
|
+
CREATE TABLE IF NOT EXISTS events(
|
9
|
+
topic text NOT NULL references topics(name) ON DELETE CASCADE,
|
10
|
+
name TEXT NOT NULL,
|
11
|
+
key TEXT NOT NULL,
|
12
|
+
count INTEGER DEFAULT(0) NOT NULL ON CONFLICT REPLACE,
|
13
|
+
value INTEGER,
|
14
|
+
minimum INTEGER,
|
15
|
+
maximum INTEGER,
|
16
|
+
created_at INTEGER DEFAULT((unixepoch()/300*300)) NOT NULL,
|
17
|
+
resolution TEXT DEFAULT('minute') NOT NULL,
|
18
|
+
PRIMARY KEY(resolution, topic, name, key, created_at)
|
19
|
+
) WITHOUT ROWID;
|
20
|
+
create_index_on_event: CREATE INDEX IF NOT EXISTS events_by_resolution ON events(resolution, created_at);
|
21
|
+
|
22
|
+
stmts:
|
23
|
+
# register topic
|
24
|
+
register_topic: INSERT INTO topics VALUES (?) ON CONFLICT DO NOTHING;
|
25
|
+
|
26
|
+
capture_event: >
|
27
|
+
INSERT INTO events(topic, name, key, created_at, count, value, minimum, maximum) VALUES ($1, $2, $3, $4, $5, $6, $6, $6)
|
28
|
+
ON CONFLICT DO
|
29
|
+
UPDATE SET count = count + EXCLUDED.count, value = value + EXCLUDED.value, minimum = min(minimum, EXCLUDED.minimum), maximum = max(maximum, EXCLUDED.maximum)
|
30
|
+
|
31
|
+
# requires an index on (resolution, created_at)
|
32
|
+
summarize_events: >
|
33
|
+
INSERT INTO events (topic, name, key, count, value, minimum, maximum, created_at, resolution ) SELECT
|
34
|
+
topic,
|
35
|
+
name,
|
36
|
+
key,
|
37
|
+
sum(count) as count,
|
38
|
+
sum(value) as value,
|
39
|
+
min(minimum) as minimum,
|
40
|
+
max(maximum) as maximum,
|
41
|
+
(created_at/$1)*$1 as created,
|
42
|
+
$2
|
43
|
+
FROM events WHERE resolution = $3 AND created_at < (unixepoch()/$1)*$1 GROUP BY topic, name, key, created ON CONFLICT DO UPDATE
|
44
|
+
SET count = count + EXCLUDED.count, value = value + EXCLUDED.value, minimum = min(minimum, EXCLUDED.minimum), maximum = max(maximum, EXCLUDED.maximum);
|
45
|
+
|
46
|
+
# requires an index on (resolution, created_at)
|
47
|
+
delete_events: DELETE FROM events WHERE resolution = $3 AND created_at < (unixepoch() - $4);
|
48
|
+
|
49
|
+
# select topics from the topics table
|
50
|
+
list_topics: SELECT name FROM topics;
|
51
|
+
|
52
|
+
# requires an index on (resolution, topic, name)
|
53
|
+
list_event_names: >
|
54
|
+
SELECT name, sum(count) as count, count(distinct name) as name, sum(value) as value, min(minimum), max(maximum)
|
55
|
+
FROM events WHERE resolution = ? AND topic = ? GROUP BY name ORDER BY count;
|
56
|
+
|
57
|
+
# requires an index on (resolution, topic, name, key)
|
58
|
+
list_event_keys: >
|
59
|
+
SELECT key, sum(count) as count, sum(value) as value, min(minimum), max(maximum)
|
60
|
+
FROM events WHERE resolution = ? AND topic = ? AND name = ? GROUP BY key ORDER BY count;
|
61
|
+
|
62
|
+
# requires an index on (resolution, topic, name, key, created_at)
|
63
|
+
list_events_by_key: >
|
64
|
+
SELECT * FROM events WHERE resolution = $1 AND topic = $2 AND name = $3 AND key = $4 ORDER BY created_at ASC;
|
65
|
+
|
66
|
+
# requires an index on (resolution, topic, name, key, created_at)
|
67
|
+
list_all_events: >
|
68
|
+
SELECT * FROM events WHERE resolution = ? AND topic = ? ORDER BY name, key, created_at ASC;
|
69
|
+
|