litestack 0.1.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +5 -0
- data/Gemfile +8 -0
- data/LICENSE.txt +21 -0
- data/README.md +166 -0
- data/Rakefile +12 -0
- data/WHYLITESTACK.md +26 -0
- data/assets/litecache_logo_teal.png +0 -0
- data/assets/litedb_logo_teal.png +0 -0
- data/assets/litejob_logo_teal.png +0 -0
- data/assets/litestack_logo_teal.png +0 -0
- data/assets/litestack_logo_teal_large.png +0 -0
- data/bench/bench.rb +23 -0
- data/bench/bench_cache_rails.rb +67 -0
- data/bench/bench_cache_raw.rb +68 -0
- data/bench/bench_jobs_rails.rb +38 -0
- data/bench/bench_jobs_raw.rb +27 -0
- data/bench/bench_queue.rb +16 -0
- data/bench/bench_rails.rb +81 -0
- data/bench/bench_raw.rb +72 -0
- data/bench/rails_job.rb +18 -0
- data/bench/skjob.rb +13 -0
- data/bench/uljob.rb +15 -0
- data/lib/active_job/queue_adapters/litejob_adapter.rb +47 -0
- data/lib/active_job/queue_adapters/ultralite_adapter.rb +49 -0
- data/lib/active_record/connection_adapters/litedb_adapter.rb +102 -0
- data/lib/active_support/cache/litecache.rb +100 -0
- data/lib/active_support/cache/ultralite_cache_store.rb +100 -0
- data/lib/litestack/litecache.rb +254 -0
- data/lib/litestack/litedb.rb +47 -0
- data/lib/litestack/litejob.rb +84 -0
- data/lib/litestack/litejobqueue.rb +161 -0
- data/lib/litestack/litequeue.rb +105 -0
- data/lib/litestack/litesupport.rb +74 -0
- data/lib/litestack/version.rb +5 -0
- data/lib/litestack.rb +15 -0
- data/lib/railties/rails/commands/dbconsole.rb +87 -0
- data/lib/sequel/adapters/litedb.rb +43 -0
- data/samples/ultrajob.yaml +2 -0
- metadata +115 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 025c78f9b04863bfce9f09dae2181b735187d0ff8852ff0a474ce59fc4f3b7fc
|
4
|
+
data.tar.gz: f4d950cbfef0e9b0e6642196b2efa0953dd8d988de54168ff9934bb257d6c729
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 29bb82387bac7b66cf2e335fc072ff79613de006ead064cbbf01746b1c57687b47c5bf875af7e173273ca8ba961dfc561595dfb9fd1527f92989444e294be4ea
|
7
|
+
data.tar.gz: ae523a47ebeed583670541bec755c39559e6a24230aaf7fa074e262a7410e64dd6e5c4f82af9b9d40dc8ef39748bac6bc38656a00a2a0066957a10af02573a9e
|
data/CHANGELOG.md
ADDED
data/Gemfile
ADDED
data/LICENSE.txt
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
The MIT License (MIT)
|
2
|
+
|
3
|
+
Copyright (c) 2022 TODO: Write your name
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
13
|
+
all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
+
THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,166 @@
|
|
1
|
+
![litestack](https://github.com/oldmoe/litestack/blob/master/assets/litestack_logo_teal_large.png?raw=true)
|
2
|
+
|
3
|
+
|
4
|
+
litestack is a revolutionary gem for Ruby and Ruby on Rails that provides an all-in-one solution for web application development. It exploits the power and embeddedness of SQLite to include a full-fledged SQL database, a fast cache, a robust job queue, and a simple yet performant full-text search all in a single package.
|
5
|
+
|
6
|
+
Compared to conventional approaches that require separate servers and databases, LiteStack offers superior performance, efficiency, ease of use, and cost savings. Its embedded database and cache reduce memory and CPU usage, while its simple interface streamlines the development process. Overall, LiteStack sets a new standard for web application development and is an excellent choice for those who demand speed, efficiency, and simplicity.
|
7
|
+
|
8
|
+
litestack provides integration with popular libraries, including:
|
9
|
+
|
10
|
+
- Rack
|
11
|
+
- Sequel
|
12
|
+
- Rails
|
13
|
+
- ActiveRecord
|
14
|
+
- ActiveSupport::Cache
|
15
|
+
- ActiveJob
|
16
|
+
|
17
|
+
With litestack you only need to add a single gem to your app which would replace a host of other gems and services, for example, a typical Rails app using litestack will no longer need the following services:
|
18
|
+
|
19
|
+
- PostgreSQL
|
20
|
+
- Redis
|
21
|
+
- Sidekiq
|
22
|
+
|
23
|
+
To make it even more efficient, litestack will detect the presence of Fiber based IO frameworks like Async (e.g. when you use the Falcon web server) or Polyphony. It will then switch its background workers for caches and queues to fibers (using the semantics of the existing framework). This is done transparently and will generally lead to lower CPU and memory utilization.
|
24
|
+
|
25
|
+
Litestack is still pretty young and under heavy development, but you are welcome to give it a try today!.
|
26
|
+
|
27
|
+
## Installation
|
28
|
+
|
29
|
+
Add this line to your application's Gemfile:
|
30
|
+
|
31
|
+
```ruby
|
32
|
+
gem 'litestack'
|
33
|
+
```
|
34
|
+
|
35
|
+
And then execute:
|
36
|
+
|
37
|
+
$ bundle install
|
38
|
+
|
39
|
+
Or install it yourself as:
|
40
|
+
|
41
|
+
$ gem install litestack
|
42
|
+
|
43
|
+
## Usage
|
44
|
+
|
45
|
+
litestack currently offers three main components
|
46
|
+
|
47
|
+
- litedb
|
48
|
+
- litecache
|
49
|
+
- litejob
|
50
|
+
|
51
|
+
> ![litedb](https://github.com/oldmoe/litestack/blob/master/assets/litedb_logo_teal.png?raw=true)
|
52
|
+
|
53
|
+
litedb is a wrapper around SQLite3, offering a better default configuration that is tuned for concurrency and performance. Out of the box, litedb works seamlessly between multiple processes without database locking errors. lite db can be used in multiple ways, including:
|
54
|
+
|
55
|
+
#### Direct litedb usage
|
56
|
+
|
57
|
+
litedb can be used exactly as the SQLite3 gem, since litedb iherits from SQLite3
|
58
|
+
|
59
|
+
```ruby
|
60
|
+
require 'litestack'
|
61
|
+
db = Litedb.new(path_to_db)
|
62
|
+
db.execute("create table users(id integer primary key, name text)")
|
63
|
+
db.execute("insert into users(name) values (?)", "Hamada")
|
64
|
+
db.get_first_value("select count(*) from users") # => 1
|
65
|
+
```
|
66
|
+
|
67
|
+
#### ActiveRecord
|
68
|
+
|
69
|
+
litesd provides tight Rails/ActiveRecord integration and can be configured as follows
|
70
|
+
|
71
|
+
In database.yml
|
72
|
+
|
73
|
+
```yaml
|
74
|
+
adapter: litedb
|
75
|
+
# normal sqlite3 configuration follows
|
76
|
+
```
|
77
|
+
|
78
|
+
#### Sequel
|
79
|
+
|
80
|
+
litedb offers integration with the Sequel database toolkit and can be configured as follows
|
81
|
+
|
82
|
+
```ruby
|
83
|
+
DB = Sequel.conncet("litedb://path_to_db_file")
|
84
|
+
```
|
85
|
+
|
86
|
+
|
87
|
+
> ![litecache](https://github.com/oldmoe/litestack/blob/master/assets/litecache_logo_teal.png?raw=true)
|
88
|
+
|
89
|
+
litecache is a high speed, low overhead caching library that uses SQLite as its backend. litecache can be accessed from multiple processes on the same machine seamlessly. It also has features like key expiry, LRU based eviction and increment/decrement of integer values.
|
90
|
+
|
91
|
+
#### Direct litecache usage
|
92
|
+
|
93
|
+
```ruby
|
94
|
+
require 'litestack'
|
95
|
+
cache = Litecache.new(path: "path_to_file")
|
96
|
+
cache.set("key", "value")
|
97
|
+
cache.get("key") #=> "value"
|
98
|
+
```
|
99
|
+
|
100
|
+
#### ActiveResource::Cache
|
101
|
+
|
102
|
+
In your desired environment file (e.g. production.rb)
|
103
|
+
|
104
|
+
```ruby
|
105
|
+
config.cache_store = :litecache, {path: './path/to/your/cache/file'}
|
106
|
+
```
|
107
|
+
This provides a transparent integration that uses the Rails caching interface
|
108
|
+
|
109
|
+
litecache spawns a background thread for cleanup purposes. In case it detects that the current environment has *Fiber::Scheduler* or *Polyphony* loaded it will spawn a fiber instead, saving on both memory and CPU cycles.
|
110
|
+
|
111
|
+
> ![litejob](https://github.com/oldmoe/litestack/blob/master/assets/litejob_logo_teal.png?raw=true)
|
112
|
+
|
113
|
+
litejob is a fast and very efficient job queue processor for Ruby applications. It builds on top of SQLite as well, which provides transactional guarantees, persistence and exceptional performance.
|
114
|
+
|
115
|
+
#### Direct litejob usage
|
116
|
+
```ruby
|
117
|
+
require 'litestack'
|
118
|
+
# define your job class
|
119
|
+
class MyJob
|
120
|
+
include ::litejob
|
121
|
+
|
122
|
+
queue = :default
|
123
|
+
|
124
|
+
# must implement perform, with any number of params
|
125
|
+
def perform(params)
|
126
|
+
# do stuff
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
#schedule a job asynchronusly
|
131
|
+
MyJob.perform_async(params)
|
132
|
+
|
133
|
+
#schedule a job at a certain time
|
134
|
+
MyJob.perform_at(time, params)
|
135
|
+
|
136
|
+
#schedule a job after a certain delay
|
137
|
+
MyJob.perform_after(delay, params)
|
138
|
+
```
|
139
|
+
|
140
|
+
#### ActiveJob
|
141
|
+
|
142
|
+
In your desired environment file (e.g. production.rb)
|
143
|
+
|
144
|
+
```ruby
|
145
|
+
config.active_job.queue_adapter = :litejob
|
146
|
+
```
|
147
|
+
#### Configuration file
|
148
|
+
You can add more configuration in litejob.yml (or config/litejob.yml if you are integrating with Rails)
|
149
|
+
|
150
|
+
```yaml
|
151
|
+
queues:
|
152
|
+
- [default 1]
|
153
|
+
- [urgent 5]
|
154
|
+
- [critical 10 "spawn"]
|
155
|
+
```
|
156
|
+
|
157
|
+
The queues need to include a name and a priority (a number between 1 and 10) and can also optionally add the token "spawn", which means every job will run it its own concurrency context (thread or fiber)
|
158
|
+
|
159
|
+
|
160
|
+
## Contributing
|
161
|
+
|
162
|
+
Bug reports aree welcome on GitHub at https://github.com/oldmoe/litestack. Please note that this is not an open contribution project and that we don't accept pull requests.
|
163
|
+
|
164
|
+
## License
|
165
|
+
|
166
|
+
The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
|
data/Rakefile
ADDED
data/WHYLITESTACK.md
ADDED
@@ -0,0 +1,26 @@
|
|
1
|
+
# Why Litestack?
|
2
|
+
|
3
|
+
If you're developing a Ruby web application, you may be wondering which database, caching, and job queueing solution is right for you. While there are many options out there, Litestack stands out as a top choice for its performance benefits, ease of setup and administration, resource efficiency, and potential cost savings.
|
4
|
+
|
5
|
+
## Standing on the shoulder of a (figurative) giant!
|
6
|
+
At its core, Litestack is built on top of SQLite, a highly regarded open-source relational database engine. This means that Litestack is highly efficient and resource-friendly, making it an excellent choice for small to medium-sized web applications. Additionally, because SQLite is a file-based database, it is incredibly easy to set up and manage, requiring no separate server installation or configuration.
|
7
|
+
|
8
|
+
## Performance and Effeciency
|
9
|
+
One of the most significant advantages of Litestack is its performance benefits. SQLite has a small memory footprint and is highly optimized, meaning that it can deliver fast and reliable database access. Additionally, Litestack's job queueing and caching functionality provide additional performance benefits, enabling you to execute tasks asynchronously and store frequently accessed data in memory for faster access.
|
10
|
+
|
11
|
+
## Dead simple Rails integration
|
12
|
+
In addition to its performance benefits and ease of administration, Litestack also offers a remarkably simple setup process for Ruby on Rails applications. With just a single gem install and three lines of configuration changes, you can have a fully functional database, caching, and job queueing solution up and running in no time.
|
13
|
+
|
14
|
+
## Simple by design
|
15
|
+
This ease of setup and configuration is due to Litestack's design philosophy, which prioritizes simplicity and ease of use. Rather than requiring developers to make significant code changes or configure complex infrastructure, Litestack offers a turnkey solution that can be set up with minimal effort.
|
16
|
+
|
17
|
+
This simplicity is a significant advantage for developers, as it allows them to focus on writing code and delivering features rather than dealing with the complexities of database and infrastructure management. By simplifying the setup process, Litestack can help reduce the time and cost required to get your application up and running, enabling you to focus on delivering value to your users.
|
18
|
+
|
19
|
+
## Simple can be cutting edge
|
20
|
+
Another benefit of Litestack is its deep integration with state of the art Ruby IO libraries like Async and Polyphony. This allows for even greater performance improvements and improved efficiency, as these libraries can help manage concurrency and parallelism in your web application.
|
21
|
+
|
22
|
+
## DevOps? Think DevNops!
|
23
|
+
Because Litestack is easy to set up and maintain, it can potentially reduce development costs and time to market. With fewer dependencies and simpler configuration requirements, developers can focus on writing code and delivering features rather than worrying about complex infrastructure and administration.
|
24
|
+
|
25
|
+
## Conclusion
|
26
|
+
In conclusion, if you're looking for a reliable, performant, and easy-to-use database, caching, and job queueing solution for your Ruby or Rails web application, Litestack is an excellent choice. With its built-in integration with SQLite and major IO libraries, Litestack can provide significant performance benefits while also reducing development costs and time to market.
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
data/bench/bench.rb
ADDED
@@ -0,0 +1,23 @@
|
|
1
|
+
require 'sqlite3'
|
2
|
+
|
3
|
+
def bench(msg, iterations=1000)
|
4
|
+
GC.start
|
5
|
+
GC.compact
|
6
|
+
print "Starting #{iterations} iterations of #{msg} ... "
|
7
|
+
t1 = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
8
|
+
iterations.times do |i|
|
9
|
+
yield i
|
10
|
+
end
|
11
|
+
t2 = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
12
|
+
time = ((t2 - t1)*1000).to_i.to_f / 1000 rescue 0
|
13
|
+
ips = ((iterations/(t2-t1))*100).to_i.to_f / 100 rescue "infinity?"
|
14
|
+
#{m: msg, t: time, ips: iteratinos/time, i: iterations}
|
15
|
+
puts "finished in #{time} seconds (#{ips} ips)"
|
16
|
+
end
|
17
|
+
|
18
|
+
@db = SQLite3::Database.new(":memory:") # sqlite database for fast random string generation
|
19
|
+
|
20
|
+
def random_str(size)
|
21
|
+
@db.get_first_value("select hex(randomblob(?))", size)
|
22
|
+
end
|
23
|
+
|
@@ -0,0 +1,67 @@
|
|
1
|
+
require 'active_support'
|
2
|
+
require_relative '../lib/litestack'
|
3
|
+
require_relative './bench'
|
4
|
+
|
5
|
+
cache = ActiveSupport::Cache::Litecache.new({path: '../db/rails_cache.db'})
|
6
|
+
|
7
|
+
#can only use the lookup method when the gem is installed
|
8
|
+
#cache = ActiveSupport::Cache.lookup_store(:litecache, {path: '../db/rails_cache.db'})
|
9
|
+
|
10
|
+
redis = ActiveSupport::Cache.lookup_store(:redis_cache_store, {})
|
11
|
+
|
12
|
+
values = []
|
13
|
+
keys = []
|
14
|
+
count = 1000
|
15
|
+
|
16
|
+
[10, 100, 1000, 10000].each do |size|
|
17
|
+
count.times do
|
18
|
+
keys << random_str(10)
|
19
|
+
values << random_str(size)
|
20
|
+
end
|
21
|
+
|
22
|
+
random_keys = keys.shuffle
|
23
|
+
puts "Benchmarks for values of size #{size} bytes"
|
24
|
+
puts "=========================================================="
|
25
|
+
puts "== Writes =="
|
26
|
+
bench("litecache writes", count) do |i|
|
27
|
+
cache.write(keys[i], values[i])
|
28
|
+
end
|
29
|
+
|
30
|
+
bench("Redis writes", count) do |i|
|
31
|
+
redis.write(keys[i], values[i])
|
32
|
+
end
|
33
|
+
|
34
|
+
puts "== Reads =="
|
35
|
+
bench("litecache reads", count) do |i|
|
36
|
+
cache.read(random_keys[i])
|
37
|
+
end
|
38
|
+
|
39
|
+
bench("Redis reads", count) do |i|
|
40
|
+
redis.read(random_keys[i])
|
41
|
+
end
|
42
|
+
puts "=========================================================="
|
43
|
+
|
44
|
+
|
45
|
+
keys = []
|
46
|
+
values = []
|
47
|
+
end
|
48
|
+
|
49
|
+
|
50
|
+
cache.write("somekey", 1, raw: true)
|
51
|
+
|
52
|
+
redis.write("somekey", 1, raw: true)
|
53
|
+
|
54
|
+
puts "Benchmarks for incrementing integer values"
|
55
|
+
puts "=========================================================="
|
56
|
+
|
57
|
+
bench("litecache increment", count) do
|
58
|
+
cache.increment("somekey", 1, raw: true)
|
59
|
+
end
|
60
|
+
|
61
|
+
bench("Redis increment", count) do
|
62
|
+
redis.increment("somekey", 1, raw: true )
|
63
|
+
end
|
64
|
+
|
65
|
+
cache.clear
|
66
|
+
redis.clear
|
67
|
+
|
@@ -0,0 +1,68 @@
|
|
1
|
+
require 'redis'
|
2
|
+
require 'sqlite3'
|
3
|
+
require_relative './bench'
|
4
|
+
|
5
|
+
#require 'polyphony'
|
6
|
+
require 'async/scheduler'
|
7
|
+
|
8
|
+
Fiber.set_scheduler Async::Scheduler.new
|
9
|
+
Fiber.scheduler.run
|
10
|
+
|
11
|
+
require_relative '../lib/litestack'
|
12
|
+
|
13
|
+
|
14
|
+
cache = Litecache.new({path: '../db/cache.db'}) # default settings
|
15
|
+
redis = Redis.new # default settings
|
16
|
+
|
17
|
+
values = []
|
18
|
+
keys = []
|
19
|
+
count = 1000
|
20
|
+
count.times { keys << random_str(10) }
|
21
|
+
|
22
|
+
[10, 100, 1000, 10000].each do |size|
|
23
|
+
count.times do
|
24
|
+
values << random_str(size)
|
25
|
+
end
|
26
|
+
|
27
|
+
random_keys = keys.shuffle
|
28
|
+
puts "Benchmarks for values of size #{size} bytes"
|
29
|
+
puts "=========================================================="
|
30
|
+
puts "== Writes =="
|
31
|
+
bench("litecache writes", count) do |i|
|
32
|
+
cache.set(keys[i], values[i])
|
33
|
+
end
|
34
|
+
|
35
|
+
bench("Redis writes", count) do |i|
|
36
|
+
redis.set(keys[i], values[i])
|
37
|
+
end
|
38
|
+
|
39
|
+
puts "== Reads =="
|
40
|
+
bench("litecache reads", count) do |i|
|
41
|
+
cache.get(random_keys[i])
|
42
|
+
end
|
43
|
+
|
44
|
+
bench("Redis reads", count) do |i|
|
45
|
+
redis.get(random_keys[i])
|
46
|
+
end
|
47
|
+
puts "=========================================================="
|
48
|
+
|
49
|
+
values = []
|
50
|
+
end
|
51
|
+
|
52
|
+
|
53
|
+
cache.set("somekey", 1)
|
54
|
+
redis.set("somekey", 1)
|
55
|
+
|
56
|
+
bench("litecache increment") do
|
57
|
+
cache.increment("somekey", 1)
|
58
|
+
end
|
59
|
+
|
60
|
+
bench("Redis increment") do
|
61
|
+
redis.incr("somekey")
|
62
|
+
end
|
63
|
+
|
64
|
+
cache.clear
|
65
|
+
redis.flushdb
|
66
|
+
|
67
|
+
sleep
|
68
|
+
|
@@ -0,0 +1,38 @@
|
|
1
|
+
require './bench'
|
2
|
+
require 'async/scheduler'
|
3
|
+
|
4
|
+
#ActiveJob::Base.logger = Logger.new(IO::NULL)
|
5
|
+
|
6
|
+
|
7
|
+
Fiber.set_scheduler Async::Scheduler.new
|
8
|
+
|
9
|
+
require_relative '../lib/active_job/queue_adapters/litejob_adapter'
|
10
|
+
|
11
|
+
ActiveSupport::IsolatedExecutionState.isolation_level = :fiber
|
12
|
+
|
13
|
+
require './rails_job.rb'
|
14
|
+
|
15
|
+
|
16
|
+
puts Litesupport.environment
|
17
|
+
|
18
|
+
count = 1000
|
19
|
+
|
20
|
+
RailsJob.queue_adapter = :sidekiq
|
21
|
+
t = Time.now.to_f
|
22
|
+
puts "Make sure sidekiq is started with -c ./rails_job.rb"
|
23
|
+
bench("enqueuing sidekiq jobs", count) do
|
24
|
+
RailsJob.perform_later(count, t)
|
25
|
+
end
|
26
|
+
|
27
|
+
puts "Don't forget to check the sidekiq log for processing time conclusion"
|
28
|
+
|
29
|
+
RailsJob.queue_adapter = :litejob
|
30
|
+
t = Time.now.to_f
|
31
|
+
bench("enqueuing litejobs", count) do
|
32
|
+
RailsJob.perform_later(count, t)
|
33
|
+
end
|
34
|
+
|
35
|
+
Fiber.scheduler.run
|
36
|
+
|
37
|
+
|
38
|
+
sleep
|
@@ -0,0 +1,27 @@
|
|
1
|
+
#require 'polyphony'
|
2
|
+
require 'async/scheduler'
|
3
|
+
require './bench'
|
4
|
+
require './skjob.rb'
|
5
|
+
require './uljob.rb'
|
6
|
+
|
7
|
+
Fiber.set_scheduler Async::Scheduler.new
|
8
|
+
|
9
|
+
count = 1000
|
10
|
+
|
11
|
+
t = Time.now.to_f
|
12
|
+
# make sure sidekiq is started with skjob.rb as the job-
|
13
|
+
bench("enqueuing sidekiq jobs", count) do |i|
|
14
|
+
SidekiqJob.perform_async(count, t)
|
15
|
+
end
|
16
|
+
|
17
|
+
puts "Don't forget to check the sidekiq log for processing time conclusion"
|
18
|
+
|
19
|
+
t = Time.now.to_f
|
20
|
+
bench("enqueuing litejobs", count) do |i|
|
21
|
+
MyJob.perform_async(count, t)
|
22
|
+
end
|
23
|
+
|
24
|
+
Fiber.scheduler.run
|
25
|
+
|
26
|
+
sleep
|
27
|
+
|
@@ -0,0 +1,81 @@
|
|
1
|
+
require 'ultralite'
|
2
|
+
require 'active_support'
|
3
|
+
require './bench'
|
4
|
+
|
5
|
+
cache = ActiveSupport::Cache.lookup_store(:ultralite_cache_store, {})
|
6
|
+
mem = ActiveSupport::Cache.lookup_store(:ultralite_cache_store, {path: ":memory:"})
|
7
|
+
redis = ActiveSupport::Cache.lookup_store(:redis_cache_store, {})
|
8
|
+
|
9
|
+
values = []
|
10
|
+
keys = []
|
11
|
+
count = 1000
|
12
|
+
|
13
|
+
[10, 100, 1000, 10000].each do |size|
|
14
|
+
count.times do
|
15
|
+
keys << random_str(10)
|
16
|
+
values << random_str(size)
|
17
|
+
end
|
18
|
+
|
19
|
+
random_keys = keys.shuffle
|
20
|
+
puts "Benchmarks for values of size #{size} bytes"
|
21
|
+
puts "=========================================================="
|
22
|
+
puts "== Writes =="
|
23
|
+
bench("Ultralite cache writes", count) do |i|
|
24
|
+
cache.write(keys[i], values[i])
|
25
|
+
end
|
26
|
+
|
27
|
+
bench("Ultralite memory cache writes", count) do |i|
|
28
|
+
mem.write(keys[i], values[i])
|
29
|
+
end
|
30
|
+
|
31
|
+
bench("Redis writes", count) do |i|
|
32
|
+
redis.write(keys[i], values[i])
|
33
|
+
end
|
34
|
+
|
35
|
+
puts "== Reads =="
|
36
|
+
bench("Ultralite cache reads", count) do |i|
|
37
|
+
cache.read(random_keys[i])
|
38
|
+
end
|
39
|
+
|
40
|
+
bench("Ultralite memory cache reads", count) do |i|
|
41
|
+
mem.read(random_keys[i])
|
42
|
+
end
|
43
|
+
|
44
|
+
bench("Redis reads", count) do |i|
|
45
|
+
redis.read(random_keys[i])
|
46
|
+
end
|
47
|
+
puts "=========================================================="
|
48
|
+
|
49
|
+
|
50
|
+
keys = []
|
51
|
+
values = []
|
52
|
+
end
|
53
|
+
|
54
|
+
|
55
|
+
cache.write("somekey", 1, raw: true)
|
56
|
+
#puts cache.read("somekey", raw: true)
|
57
|
+
|
58
|
+
mem.write("somekey", 1, raw: true)
|
59
|
+
#puts mem.read("somekey", raw: true)
|
60
|
+
|
61
|
+
redis.write("somekey", 1, raw: true)
|
62
|
+
#puts redis.read("somekey", raw: true)
|
63
|
+
|
64
|
+
puts "Benchmarks for incrementing integer values"
|
65
|
+
puts "=========================================================="
|
66
|
+
|
67
|
+
bench("Ultralite cache increment", count) do
|
68
|
+
cache.increment("somekey", 1, raw: true)
|
69
|
+
end
|
70
|
+
|
71
|
+
bench("Ultralite memory cache increment", count) do
|
72
|
+
mem.increment("somekey", 1, raw: true)
|
73
|
+
end
|
74
|
+
|
75
|
+
bench("Redis increment", count) do
|
76
|
+
redis.increment("somekey", 1, raw: true )
|
77
|
+
end
|
78
|
+
|
79
|
+
cache.clear
|
80
|
+
redis.clear
|
81
|
+
|
data/bench/bench_raw.rb
ADDED
@@ -0,0 +1,72 @@
|
|
1
|
+
require 'ultralite'
|
2
|
+
require './bench'
|
3
|
+
require 'redis'
|
4
|
+
require 'sqlite3'
|
5
|
+
|
6
|
+
cache = Ultralite::Cache.new # default settings
|
7
|
+
#mem = Ultralite::Cache.new(path: ":memory:") # default settings
|
8
|
+
redis = Redis.new # default settings
|
9
|
+
|
10
|
+
values = []
|
11
|
+
keys = []
|
12
|
+
count = 1000
|
13
|
+
count.times { keys << random_str(10) }
|
14
|
+
|
15
|
+
[10, 100, 1000, 10000].each do |size|
|
16
|
+
count.times do
|
17
|
+
values << random_str(size)
|
18
|
+
end
|
19
|
+
|
20
|
+
random_keys = keys.shuffle
|
21
|
+
puts "Benchmarks for values of size #{size} bytes"
|
22
|
+
puts "=========================================================="
|
23
|
+
puts "== Writes =="
|
24
|
+
bench("Ultralite cache writes", count) do |i|
|
25
|
+
cache.set(keys[i], values[i])
|
26
|
+
end
|
27
|
+
|
28
|
+
# bench("Ultralite memory cache writes", count) do |i|
|
29
|
+
# mem.set(keys[i], values[i])
|
30
|
+
# end
|
31
|
+
|
32
|
+
bench("Redis writes", count) do |i|
|
33
|
+
redis.set(keys[i], values[i])
|
34
|
+
end
|
35
|
+
|
36
|
+
puts "== Reads =="
|
37
|
+
bench("Ultralite cache reads", count) do |i|
|
38
|
+
cache.get(random_keys[i])
|
39
|
+
end
|
40
|
+
|
41
|
+
# bench("Ultralite memory cache reads", count) do |i|
|
42
|
+
# cache.get(random_keys[i])
|
43
|
+
# end
|
44
|
+
|
45
|
+
bench("Redis reads", count) do |i|
|
46
|
+
redis.get(random_keys[i])
|
47
|
+
end
|
48
|
+
puts "=========================================================="
|
49
|
+
|
50
|
+
values = []
|
51
|
+
end
|
52
|
+
|
53
|
+
|
54
|
+
cache.set("somekey", 1)
|
55
|
+
#mem.set("somekey", 1)
|
56
|
+
redis.set("somekey", 1)
|
57
|
+
|
58
|
+
bench("Ultralite cache increment") do
|
59
|
+
cache.increment("somekey", 1)
|
60
|
+
end
|
61
|
+
|
62
|
+
#bench("Ultralite memory cache increment") do
|
63
|
+
# mem.increment("somekey", 1)
|
64
|
+
#end
|
65
|
+
|
66
|
+
bench("Redis increment") do
|
67
|
+
redis.incr("somekey")
|
68
|
+
end
|
69
|
+
|
70
|
+
cache.clear
|
71
|
+
redis.flushdb
|
72
|
+
|
data/bench/rails_job.rb
ADDED
@@ -0,0 +1,18 @@
|
|
1
|
+
require 'active_job'
|
2
|
+
|
3
|
+
class RailsJob < ActiveJob::Base
|
4
|
+
|
5
|
+
queue_as :default
|
6
|
+
|
7
|
+
@@count = 0
|
8
|
+
|
9
|
+
def perform(count, time)
|
10
|
+
#sleep 1
|
11
|
+
@@count += 1
|
12
|
+
if @@count == count
|
13
|
+
puts "[litejob] Finished in #{Time.now.to_f - time} seconds (#{count / (Time.now.to_f - time)} jps)"
|
14
|
+
@@count = 0
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
end
|