litestack 0.1.8 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +17 -4
- data/README.md +27 -0
- data/assets/litecable_logo_teal.png +0 -0
- data/bench/bench_cache_raw.rb +16 -5
- data/lib/action_cable/subscription_adapter/litecable.rb +36 -0
- data/lib/active_job/queue_adapters/litejob_adapter.rb +11 -11
- data/lib/litestack/litecable.rb +138 -0
- data/lib/litestack/litecable.sql.yml +24 -0
- data/lib/litestack/litecache.rb +56 -62
- data/lib/litestack/litecache.sql.yml +28 -0
- data/lib/litestack/litecache.yml +7 -0
- data/lib/litestack/litejob.rb +6 -2
- data/lib/litestack/litejobqueue.rb +68 -74
- data/lib/litestack/litemetric.rb +228 -0
- data/lib/litestack/litemetric.sql.yml +69 -0
- data/lib/litestack/litequeue.rb +51 -31
- data/lib/litestack/litequeue.sql.yml +34 -0
- data/lib/litestack/litesupport.rb +131 -1
- data/lib/litestack/metrics_app.rb +5 -0
- data/lib/litestack/version.rb +1 -1
- data/lib/litestack.rb +19 -10
- metadata +13 -6
- data/bench/bench_rails.rb +0 -81
- data/bench/bench_raw.rb +0 -72
- data/lib/active_job/queue_adapters/ultralite_adapter.rb +0 -49
data/lib/litestack/litequeue.rb
CHANGED
@@ -3,6 +3,8 @@
|
|
3
3
|
# all components should require the support module
|
4
4
|
require_relative 'litesupport'
|
5
5
|
|
6
|
+
#require 'securerandom'
|
7
|
+
|
6
8
|
##
|
7
9
|
#Litequeue is a simple queueing system for Ruby applications that allows you to push and pop values from a queue. It provides a straightforward API for creating and managing named queues, and for adding and removing values from those queues. Additionally, it offers options for scheduling pops at a certain time in the future, which can be useful for delaying processing until a later time.
|
8
10
|
#
|
@@ -18,10 +20,12 @@ class Litequeue
|
|
18
20
|
# mmap_size: 128 * 1024 * 1024 -> 128MB to be held in memory
|
19
21
|
# sync: 1 -> sync only when checkpointing
|
20
22
|
|
23
|
+
include Litesupport::Liteconnection
|
24
|
+
|
21
25
|
DEFAULT_OPTIONS = {
|
22
26
|
path: "./queue.db",
|
23
27
|
mmap_size: 32 * 1024,
|
24
|
-
sync:
|
28
|
+
sync: 0
|
25
29
|
}
|
26
30
|
|
27
31
|
# create a new instance of the litequeue object
|
@@ -33,8 +37,7 @@ class Litequeue
|
|
33
37
|
# queue.pop # => "somevalue"
|
34
38
|
|
35
39
|
def initialize(options = {})
|
36
|
-
|
37
|
-
@queue = Litesupport::Pool.new(1){create_db} # delegate the db creation to the litepool
|
40
|
+
init(options)
|
38
41
|
end
|
39
42
|
|
40
43
|
# push an item to the queue, optionally specifying the queue name (defaults to default) and after how many seconds it should be ready to pop (defaults to zero)
|
@@ -45,15 +48,20 @@ class Litequeue
|
|
45
48
|
# also bring back the synchronize block, to prevent
|
46
49
|
# a race condition if a thread hits the busy handler
|
47
50
|
# before the current thread proceeds after a backoff
|
48
|
-
|
49
|
-
|
51
|
+
#id = SecureRandom.uuid # this is somehow expensive, can we improve?
|
52
|
+
run_stmt(:push, queue, delay, value)[0]
|
53
|
+
end
|
54
|
+
|
55
|
+
def repush(id, value, delay=0, queue='default')
|
56
|
+
run_stmt(:repush, id, queue, delay, value)[0]
|
50
57
|
end
|
51
58
|
|
52
59
|
alias_method :"<<", :push
|
60
|
+
alias_method :"<<<", :repush
|
53
61
|
|
54
62
|
# pop an item from the queue, optionally with a specific queue name (default queue name is 'default')
|
55
63
|
def pop(queue='default', limit = 1)
|
56
|
-
res =
|
64
|
+
res = run_stmt(:pop, queue, limit)
|
57
65
|
return res[0] if res.length == 1
|
58
66
|
return nil if res.empty?
|
59
67
|
res
|
@@ -64,49 +72,61 @@ class Litequeue
|
|
64
72
|
# id = queue.push("somevalue")
|
65
73
|
# queue.delete(id) # => "somevalue"
|
66
74
|
# queue.pop # => nil
|
67
|
-
def delete(id
|
68
|
-
|
69
|
-
result = @queue.acquire{|q| q.stmts[:delete].execute!(queue, fire_at.to_i, id)[0] }
|
75
|
+
def delete(id)
|
76
|
+
result = run_stmt(:delete, id)[0]
|
70
77
|
end
|
71
78
|
|
72
79
|
# deletes all the entries in all queues, or if a queue name is given, deletes all entries in that specific queue
|
73
80
|
def clear(queue=nil)
|
74
|
-
|
81
|
+
run_sql("DELETE FROM queue WHERE iif(?, name = ?, 1)", queue)
|
75
82
|
end
|
76
83
|
|
77
84
|
# returns a count of entries in all queues, or if a queue name is given, reutrns the count of entries in that queue
|
78
85
|
def count(queue=nil)
|
79
|
-
|
86
|
+
run_sql("SELECT count(*) FROM queue WHERE iif(?, name = ?, 1)", queue)[0][0]
|
80
87
|
end
|
81
88
|
|
82
89
|
# return the size of the queue file on disk
|
83
90
|
def size
|
84
|
-
|
91
|
+
run_sql("SELECT size.page_size * count.page_count FROM pragma_page_size() AS size, pragma_page_count() AS count")[0][0]
|
92
|
+
end
|
93
|
+
|
94
|
+
def queues_info
|
95
|
+
run_sql("SELECT name, count(*) AS count, avg(unixepoch() - created_at), min(unixepoch() - created_at), max(unixepoch() - created_at) FROM queue GROUP BY name ORDER BY count DESC ")
|
85
96
|
end
|
86
97
|
|
87
|
-
def
|
88
|
-
|
89
|
-
|
90
|
-
|
98
|
+
def info
|
99
|
+
counts = {}
|
100
|
+
queues_info.each do |qc|
|
101
|
+
counts[qc[0]] = {count: qc[1], time_in_queue: {avg: qc[2], min: qc[3], max: qc[4]}}
|
91
102
|
end
|
103
|
+
{size: size, count: count, info: counts}
|
92
104
|
end
|
93
105
|
|
94
106
|
private
|
95
|
-
|
96
|
-
def
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
107
|
+
|
108
|
+
def create_connection
|
109
|
+
conn = super
|
110
|
+
conn.wal_autocheckpoint = 10000
|
111
|
+
sql = YAML.load_file("#{__dir__}/litequeue.sql.yml")
|
112
|
+
version = conn.get_first_value("PRAGMA user_version")
|
113
|
+
sql["schema"].each_pair do |v, obj|
|
114
|
+
if v > version
|
115
|
+
conn.transaction(:immediate) do
|
116
|
+
obj.each{|k, s| conn.execute(s)}
|
117
|
+
conn.user_version = v
|
118
|
+
end
|
119
|
+
end
|
120
|
+
end
|
121
|
+
sql["stmts"].each { |k, v| conn.stmts[k.to_sym] = conn.prepare(v) }
|
122
|
+
# check if there is an old database and convert entries to the new format
|
123
|
+
if conn.get_first_value("select count(*) from sqlite_master where name = '_ul_queue_'") == 1
|
124
|
+
conn.transaction(:immediate) do
|
125
|
+
conn.execute("INSERT INTO queue(fire_at, name, value, created_at) SELECT fire_at, queue, value, created_at FROM _ul_queue_")
|
126
|
+
conn.execute("DROP TABLE _ul_queue_")
|
127
|
+
end
|
128
|
+
end
|
129
|
+
conn
|
106
130
|
end
|
107
131
|
|
108
|
-
|
109
132
|
end
|
110
|
-
|
111
|
-
|
112
|
-
|
@@ -0,0 +1,34 @@
|
|
1
|
+
schema:
|
2
|
+
1:
|
3
|
+
create_table_queue: >
|
4
|
+
CREATE TABLE IF NOT EXISTS queue(
|
5
|
+
id TEXT PRIMARY KEY DEFAULT(hex(randomblob(32))) NOT NULL ON CONFLICT REPLACE,
|
6
|
+
name TEXT DEFAULT('default') NOT NULL ON CONFLICT REPLACE,
|
7
|
+
fire_at INTEGER DEFAULT(unixepoch()) NOT NULL ON CONFLICT REPLACE,
|
8
|
+
value TEXT,
|
9
|
+
created_at INTEGER DEFAULT(unixepoch()) NOT NULL ON CONFLICT REPLACE
|
10
|
+
) WITHOUT ROWID
|
11
|
+
|
12
|
+
create_index_queue_by_name: >
|
13
|
+
CREATE INDEX IF NOT EXISTS idx_queue_by_name ON queue(name, fire_at ASC)
|
14
|
+
|
15
|
+
stmts:
|
16
|
+
|
17
|
+
push: INSERT INTO queue(id, name, fire_at, value) VALUES (hex(randomblob(32)), $1, (unixepoch() + $2), $3) RETURNING id, name
|
18
|
+
|
19
|
+
repush: INSERT INTO queue(id, name, fire_at, value) VALUES (?, ?, (unixepoch() + ?), ?) RETURNING name
|
20
|
+
|
21
|
+
pop: >
|
22
|
+
DELETE FROM queue
|
23
|
+
WHERE (name, fire_at, id)
|
24
|
+
IN (
|
25
|
+
SELECT name, fire_at, id FROM queue
|
26
|
+
WHERE name = ifnull($1, 'default')
|
27
|
+
AND fire_at <= (unixepoch())
|
28
|
+
ORDER BY fire_at ASC
|
29
|
+
LIMIT ifnull($2, 1)
|
30
|
+
)
|
31
|
+
RETURNING id, value
|
32
|
+
|
33
|
+
delete: DELETE FROM queue WHERE id = $1 RETURNING value
|
34
|
+
|
@@ -1,4 +1,9 @@
|
|
1
|
+
# frozen_stringe_literal: true
|
2
|
+
|
1
3
|
require 'sqlite3'
|
4
|
+
require 'logger'
|
5
|
+
require 'oj'
|
6
|
+
require 'yaml'
|
2
7
|
|
3
8
|
module Litesupport
|
4
9
|
|
@@ -90,7 +95,7 @@ module Litesupport
|
|
90
95
|
# common db object options
|
91
96
|
def self.create_db(path)
|
92
97
|
db = SQLite3::Database.new(path)
|
93
|
-
db.busy_handler{ switch || sleep(0.
|
98
|
+
db.busy_handler{ switch || sleep(0.0001) }
|
94
99
|
db.journal_mode = "WAL"
|
95
100
|
db.instance_variable_set(:@stmts, {})
|
96
101
|
class << db
|
@@ -167,5 +172,130 @@ module Litesupport
|
|
167
172
|
end
|
168
173
|
|
169
174
|
end
|
175
|
+
|
176
|
+
module ForkListener
|
177
|
+
def self.listeners
|
178
|
+
@listeners ||= []
|
179
|
+
end
|
180
|
+
|
181
|
+
def self.listen(&block)
|
182
|
+
listeners << block
|
183
|
+
end
|
184
|
+
end
|
185
|
+
|
186
|
+
module Forkable
|
187
|
+
|
188
|
+
def _fork(*args)
|
189
|
+
ppid = Process.pid
|
190
|
+
result = super
|
191
|
+
if Process.pid != ppid && [:threaded, :iodine].include?(Litesupport.environment)
|
192
|
+
ForkListener.listeners.each{|l| l.call }
|
193
|
+
end
|
194
|
+
result
|
195
|
+
end
|
196
|
+
|
197
|
+
end
|
170
198
|
|
199
|
+
module Liteconnection
|
200
|
+
|
201
|
+
include Forkable
|
202
|
+
|
203
|
+
# close, setup, run_stmt and run_sql assume a single connection was created
|
204
|
+
def close
|
205
|
+
@running = false
|
206
|
+
@conn.acquire do |q|
|
207
|
+
q.stmts.each_pair {|k, v| q.stmts[k].close }
|
208
|
+
q.close
|
209
|
+
end
|
210
|
+
end
|
211
|
+
|
212
|
+
private # all methods are private
|
213
|
+
|
214
|
+
def init(options = {})
|
215
|
+
#c configure the object, loading options from the appropriate location
|
216
|
+
configure(options)
|
217
|
+
# setup connections and background threads
|
218
|
+
setup
|
219
|
+
# handle process exiting
|
220
|
+
at_exit do
|
221
|
+
exit_callback
|
222
|
+
end
|
223
|
+
# handle forking (restart connections and background threads)
|
224
|
+
Litesupport::ForkListener.listen do
|
225
|
+
setup
|
226
|
+
end
|
227
|
+
end
|
228
|
+
|
229
|
+
def configure(options = {})
|
230
|
+
# detect environment (production, development, etc.)
|
231
|
+
env = "development"
|
232
|
+
if defined? Rails
|
233
|
+
env = ENV["RAILS_ENV"]
|
234
|
+
elsif ENV["RACK_ENV"]
|
235
|
+
env = ENV["RACK_ENV"]
|
236
|
+
elsif ENV["APP_ENV"]
|
237
|
+
env = ENV["RACK_ENV"]
|
238
|
+
end
|
239
|
+
defaults = self.class::DEFAULT_OPTIONS rescue {}
|
240
|
+
@options = defaults.merge(options)
|
241
|
+
config = YAML.load_file(@options[:config_path]) rescue {} # an empty hash won't hurt
|
242
|
+
config = config[env] if config[env] # if there is a config for the current environment defined then use it, otherwise use the top level declaration
|
243
|
+
config.keys.each do |k| # symbolize keys
|
244
|
+
config[k.to_sym] = config[k]
|
245
|
+
config.delete k
|
246
|
+
end
|
247
|
+
@options.merge!(config)
|
248
|
+
@options.merge!(options) # make sure options passed to initialize trump everything else
|
249
|
+
end
|
250
|
+
|
251
|
+
def setup
|
252
|
+
@conn = create_pooled_connection
|
253
|
+
@logger = create_logger
|
254
|
+
@running = true
|
255
|
+
end
|
256
|
+
|
257
|
+
def create_logger
|
258
|
+
@options[:logger] = nil unless @options[:logger]
|
259
|
+
return @options[:logger] if @options[:logger].respond_to? :info
|
260
|
+
return Logger.new(STDOUT) if @options[:logger] == 'STDOUT'
|
261
|
+
return Logger.new(STDERR) if @options[:logger] == 'STDERR'
|
262
|
+
return Logger.new(@options[:logger]) if @options[:logger].is_a? String
|
263
|
+
return Logger.new(IO::NULL)
|
264
|
+
end
|
265
|
+
|
266
|
+
def exit_callback
|
267
|
+
close
|
268
|
+
end
|
269
|
+
|
270
|
+
def run_stmt(stmt, *args)
|
271
|
+
@conn.acquire{|q| q.stmts[stmt].execute!(*args) }
|
272
|
+
end
|
273
|
+
|
274
|
+
def run_sql(sql, *args)
|
275
|
+
@conn.acquire{|q| q.execute(sql, *args) }
|
276
|
+
end
|
277
|
+
|
278
|
+
def create_pooled_connection(count = 1)
|
279
|
+
Litesupport::Pool.new(1){create_connection}
|
280
|
+
end
|
281
|
+
|
282
|
+
# common db object options
|
283
|
+
def create_connection
|
284
|
+
conn = SQLite3::Database.new(@options[:path])
|
285
|
+
conn.busy_handler{ Litesupport.switch || sleep(rand * 0.002) }
|
286
|
+
conn.journal_mode = "WAL"
|
287
|
+
conn.synchronous = @options[:sync] || 1
|
288
|
+
conn.mmap_size = @options[:mmap_size] || 0
|
289
|
+
conn.instance_variable_set(:@stmts, {})
|
290
|
+
class << conn
|
291
|
+
attr_reader :stmts
|
292
|
+
end
|
293
|
+
conn
|
294
|
+
end
|
295
|
+
|
296
|
+
end
|
297
|
+
|
171
298
|
end
|
299
|
+
|
300
|
+
Process.singleton_class.prepend(Litesupport::Forkable)
|
301
|
+
|
data/lib/litestack/version.rb
CHANGED
data/lib/litestack.rb
CHANGED
@@ -1,15 +1,24 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
# load core classes
|
4
|
-
|
5
|
-
require_relative "litestack/litesupport"
|
6
|
-
|
7
|
-
require_relative "litestack/
|
8
|
-
require_relative "litestack/
|
4
|
+
require_relative "./litestack/version"
|
5
|
+
require_relative "./litestack/litesupport"
|
6
|
+
require_relative "./litestack/litemetric"
|
7
|
+
require_relative "./litestack/litedb"
|
8
|
+
require_relative "./litestack/litecache"
|
9
|
+
require_relative "./litestack/litejob"
|
10
|
+
require_relative "./litestack/litecable"
|
9
11
|
|
10
12
|
# conditionally load integration with other libraries
|
11
|
-
|
12
|
-
|
13
|
-
require_relative "
|
14
|
-
require_relative "
|
15
|
-
|
13
|
+
require_relative "./sequel/adapters/litedb" if defined? Sequel
|
14
|
+
require_relative "./active_record/connection_adapters/litedb_adapter" if defined? ActiveRecord
|
15
|
+
require_relative "./railties/rails/commands/dbconsole" if defined? Rails && defined? ActiveRecord
|
16
|
+
require_relative "./active_support/cache/litecache" if defined? ActiveSupport
|
17
|
+
require_relative "./active_job/queue_adapters/litejob_adapter" if defined? ActiveJob
|
18
|
+
require_relative "./action_cable/subscription_adapter/litecable" if defined? ActionCable
|
19
|
+
|
20
|
+
module Litestack
|
21
|
+
class NotImplementedError < Exception; end
|
22
|
+
class TimeoutError < Exception; end
|
23
|
+
class DeadlockError < Exception; end
|
24
|
+
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: litestack
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1
|
4
|
+
version: 0.2.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Mohamed Hassan
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-
|
11
|
+
date: 2023-05-08 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: sqlite3
|
@@ -52,6 +52,7 @@ files:
|
|
52
52
|
- README.md
|
53
53
|
- Rakefile
|
54
54
|
- WHYLITESTACK.md
|
55
|
+
- assets/litecable_logo_teal.png
|
55
56
|
- assets/litecache_logo_teal.png
|
56
57
|
- assets/litedb_logo_teal.png
|
57
58
|
- assets/litejob_logo_teal.png
|
@@ -63,22 +64,28 @@ files:
|
|
63
64
|
- bench/bench_jobs_rails.rb
|
64
65
|
- bench/bench_jobs_raw.rb
|
65
66
|
- bench/bench_queue.rb
|
66
|
-
- bench/bench_rails.rb
|
67
|
-
- bench/bench_raw.rb
|
68
67
|
- bench/rails_job.rb
|
69
68
|
- bench/skjob.rb
|
70
69
|
- bench/uljob.rb
|
70
|
+
- lib/action_cable/subscription_adapter/litecable.rb
|
71
71
|
- lib/active_job/queue_adapters/litejob_adapter.rb
|
72
|
-
- lib/active_job/queue_adapters/ultralite_adapter.rb
|
73
72
|
- lib/active_record/connection_adapters/litedb_adapter.rb
|
74
73
|
- lib/active_support/cache/litecache.rb
|
75
74
|
- lib/litestack.rb
|
75
|
+
- lib/litestack/litecable.rb
|
76
|
+
- lib/litestack/litecable.sql.yml
|
76
77
|
- lib/litestack/litecache.rb
|
78
|
+
- lib/litestack/litecache.sql.yml
|
79
|
+
- lib/litestack/litecache.yml
|
77
80
|
- lib/litestack/litedb.rb
|
78
81
|
- lib/litestack/litejob.rb
|
79
82
|
- lib/litestack/litejobqueue.rb
|
83
|
+
- lib/litestack/litemetric.rb
|
84
|
+
- lib/litestack/litemetric.sql.yml
|
80
85
|
- lib/litestack/litequeue.rb
|
86
|
+
- lib/litestack/litequeue.sql.yml
|
81
87
|
- lib/litestack/litesupport.rb
|
88
|
+
- lib/litestack/metrics_app.rb
|
82
89
|
- lib/litestack/version.rb
|
83
90
|
- lib/railties/rails/commands/dbconsole.rb
|
84
91
|
- lib/sequel/adapters/litedb.rb
|
@@ -108,7 +115,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
108
115
|
- !ruby/object:Gem::Version
|
109
116
|
version: '0'
|
110
117
|
requirements: []
|
111
|
-
rubygems_version: 3.4.
|
118
|
+
rubygems_version: 3.4.8
|
112
119
|
signing_key:
|
113
120
|
specification_version: 4
|
114
121
|
summary: A SQLite based, lightning fast, super efficient and dead simple to setup
|
data/bench/bench_rails.rb
DELETED
@@ -1,81 +0,0 @@
|
|
1
|
-
require 'ultralite'
|
2
|
-
require 'active_support'
|
3
|
-
require './bench'
|
4
|
-
|
5
|
-
cache = ActiveSupport::Cache.lookup_store(:ultralite_cache_store, {})
|
6
|
-
mem = ActiveSupport::Cache.lookup_store(:ultralite_cache_store, {path: ":memory:"})
|
7
|
-
redis = ActiveSupport::Cache.lookup_store(:redis_cache_store, {})
|
8
|
-
|
9
|
-
values = []
|
10
|
-
keys = []
|
11
|
-
count = 1000
|
12
|
-
|
13
|
-
[10, 100, 1000, 10000].each do |size|
|
14
|
-
count.times do
|
15
|
-
keys << random_str(10)
|
16
|
-
values << random_str(size)
|
17
|
-
end
|
18
|
-
|
19
|
-
random_keys = keys.shuffle
|
20
|
-
puts "Benchmarks for values of size #{size} bytes"
|
21
|
-
puts "=========================================================="
|
22
|
-
puts "== Writes =="
|
23
|
-
bench("Ultralite cache writes", count) do |i|
|
24
|
-
cache.write(keys[i], values[i])
|
25
|
-
end
|
26
|
-
|
27
|
-
bench("Ultralite memory cache writes", count) do |i|
|
28
|
-
mem.write(keys[i], values[i])
|
29
|
-
end
|
30
|
-
|
31
|
-
bench("Redis writes", count) do |i|
|
32
|
-
redis.write(keys[i], values[i])
|
33
|
-
end
|
34
|
-
|
35
|
-
puts "== Reads =="
|
36
|
-
bench("Ultralite cache reads", count) do |i|
|
37
|
-
cache.read(random_keys[i])
|
38
|
-
end
|
39
|
-
|
40
|
-
bench("Ultralite memory cache reads", count) do |i|
|
41
|
-
mem.read(random_keys[i])
|
42
|
-
end
|
43
|
-
|
44
|
-
bench("Redis reads", count) do |i|
|
45
|
-
redis.read(random_keys[i])
|
46
|
-
end
|
47
|
-
puts "=========================================================="
|
48
|
-
|
49
|
-
|
50
|
-
keys = []
|
51
|
-
values = []
|
52
|
-
end
|
53
|
-
|
54
|
-
|
55
|
-
cache.write("somekey", 1, raw: true)
|
56
|
-
#puts cache.read("somekey", raw: true)
|
57
|
-
|
58
|
-
mem.write("somekey", 1, raw: true)
|
59
|
-
#puts mem.read("somekey", raw: true)
|
60
|
-
|
61
|
-
redis.write("somekey", 1, raw: true)
|
62
|
-
#puts redis.read("somekey", raw: true)
|
63
|
-
|
64
|
-
puts "Benchmarks for incrementing integer values"
|
65
|
-
puts "=========================================================="
|
66
|
-
|
67
|
-
bench("Ultralite cache increment", count) do
|
68
|
-
cache.increment("somekey", 1, raw: true)
|
69
|
-
end
|
70
|
-
|
71
|
-
bench("Ultralite memory cache increment", count) do
|
72
|
-
mem.increment("somekey", 1, raw: true)
|
73
|
-
end
|
74
|
-
|
75
|
-
bench("Redis increment", count) do
|
76
|
-
redis.increment("somekey", 1, raw: true )
|
77
|
-
end
|
78
|
-
|
79
|
-
cache.clear
|
80
|
-
redis.clear
|
81
|
-
|
data/bench/bench_raw.rb
DELETED
@@ -1,72 +0,0 @@
|
|
1
|
-
require 'ultralite'
|
2
|
-
require './bench'
|
3
|
-
require 'redis'
|
4
|
-
require 'sqlite3'
|
5
|
-
|
6
|
-
cache = Ultralite::Cache.new # default settings
|
7
|
-
#mem = Ultralite::Cache.new(path: ":memory:") # default settings
|
8
|
-
redis = Redis.new # default settings
|
9
|
-
|
10
|
-
values = []
|
11
|
-
keys = []
|
12
|
-
count = 1000
|
13
|
-
count.times { keys << random_str(10) }
|
14
|
-
|
15
|
-
[10, 100, 1000, 10000].each do |size|
|
16
|
-
count.times do
|
17
|
-
values << random_str(size)
|
18
|
-
end
|
19
|
-
|
20
|
-
random_keys = keys.shuffle
|
21
|
-
puts "Benchmarks for values of size #{size} bytes"
|
22
|
-
puts "=========================================================="
|
23
|
-
puts "== Writes =="
|
24
|
-
bench("Ultralite cache writes", count) do |i|
|
25
|
-
cache.set(keys[i], values[i])
|
26
|
-
end
|
27
|
-
|
28
|
-
# bench("Ultralite memory cache writes", count) do |i|
|
29
|
-
# mem.set(keys[i], values[i])
|
30
|
-
# end
|
31
|
-
|
32
|
-
bench("Redis writes", count) do |i|
|
33
|
-
redis.set(keys[i], values[i])
|
34
|
-
end
|
35
|
-
|
36
|
-
puts "== Reads =="
|
37
|
-
bench("Ultralite cache reads", count) do |i|
|
38
|
-
cache.get(random_keys[i])
|
39
|
-
end
|
40
|
-
|
41
|
-
# bench("Ultralite memory cache reads", count) do |i|
|
42
|
-
# cache.get(random_keys[i])
|
43
|
-
# end
|
44
|
-
|
45
|
-
bench("Redis reads", count) do |i|
|
46
|
-
redis.get(random_keys[i])
|
47
|
-
end
|
48
|
-
puts "=========================================================="
|
49
|
-
|
50
|
-
values = []
|
51
|
-
end
|
52
|
-
|
53
|
-
|
54
|
-
cache.set("somekey", 1)
|
55
|
-
#mem.set("somekey", 1)
|
56
|
-
redis.set("somekey", 1)
|
57
|
-
|
58
|
-
bench("Ultralite cache increment") do
|
59
|
-
cache.increment("somekey", 1)
|
60
|
-
end
|
61
|
-
|
62
|
-
#bench("Ultralite memory cache increment") do
|
63
|
-
# mem.increment("somekey", 1)
|
64
|
-
#end
|
65
|
-
|
66
|
-
bench("Redis increment") do
|
67
|
-
redis.incr("somekey")
|
68
|
-
end
|
69
|
-
|
70
|
-
cache.clear
|
71
|
-
redis.flushdb
|
72
|
-
|
@@ -1,49 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require_relative '../../ultralite/job.rb'
|
4
|
-
require "active_support/core_ext/enumerable"
|
5
|
-
require "active_support/core_ext/array/access"
|
6
|
-
require "active_job"
|
7
|
-
|
8
|
-
module ActiveJob
|
9
|
-
module QueueAdapters
|
10
|
-
# == Ultralite adapter for Active Job
|
11
|
-
#
|
12
|
-
#
|
13
|
-
# Rails.application.config.active_job.queue_adapter = :ultralite
|
14
|
-
class UltraliteAdapter
|
15
|
-
|
16
|
-
DEFAULT_OPTIONS = {
|
17
|
-
config_path: "./config/ultrajob.yml",
|
18
|
-
path: "../db/queue.db",
|
19
|
-
queues: [["default", 1, "spawn"]],
|
20
|
-
workers: 1
|
21
|
-
}
|
22
|
-
|
23
|
-
DEFAULT_CONFIG_PATH = "./config/ultrajob.yml"
|
24
|
-
|
25
|
-
def initialize(options={})
|
26
|
-
Job.options = DEFAULT_OPTIONS.merge(options)
|
27
|
-
end
|
28
|
-
|
29
|
-
def enqueue(job) # :nodoc:
|
30
|
-
Job.queue = job.queue_name
|
31
|
-
Job.perform_async(job.serialize)
|
32
|
-
end
|
33
|
-
|
34
|
-
def enqueue_at(job, timestamp) # :nodoc:
|
35
|
-
Job.queue = job.queue_name
|
36
|
-
Job.perform_at(timestamp, job.serialize)
|
37
|
-
end
|
38
|
-
|
39
|
-
class Job # :nodoc:
|
40
|
-
|
41
|
-
include ::Ultralite::Job
|
42
|
-
|
43
|
-
def perform(job_data)
|
44
|
-
Base.execute job_data
|
45
|
-
end
|
46
|
-
end
|
47
|
-
end
|
48
|
-
end
|
49
|
-
end
|