litestack 0.4.1 → 0.4.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.standard.yml +3 -0
- data/BENCHMARKS.md +23 -7
- data/CHANGELOG.md +35 -0
- data/Gemfile +1 -7
- data/README.md +124 -6
- data/ROADMAP.md +45 -0
- data/Rakefile +3 -1
- data/WHYLITESTACK.md +1 -1
- data/assets/litecache_metrics.png +0 -0
- data/assets/litedb_metrics.png +0 -0
- data/assets/litemetric_logo_teal.png +0 -0
- data/assets/litesearch_logo_teal.png +0 -0
- data/bench/bench.rb +17 -10
- data/bench/bench_cache_rails.rb +45 -14
- data/bench/bench_cache_raw.rb +44 -28
- data/bench/bench_jobs_rails.rb +18 -12
- data/bench/bench_jobs_raw.rb +17 -10
- data/bench/bench_queue.rb +4 -6
- data/bench/rails_job.rb +5 -7
- data/bench/skjob.rb +4 -4
- data/bench/uljob.rb +6 -6
- data/bin/liteboard +2 -1
- data/lib/action_cable/subscription_adapter/litecable.rb +5 -8
- data/lib/active_job/queue_adapters/litejob_adapter.rb +6 -8
- data/lib/active_record/connection_adapters/litedb_adapter.rb +72 -84
- data/lib/active_support/cache/litecache.rb +61 -41
- data/lib/generators/litestack/install/install_generator.rb +3 -3
- data/lib/generators/litestack/install/templates/cable.yml +0 -3
- data/lib/generators/litestack/install/templates/database.yml +7 -1
- data/lib/litestack/liteboard/liteboard.rb +269 -149
- data/lib/litestack/litecable.rb +41 -37
- data/lib/litestack/litecable.sql.yml +22 -11
- data/lib/litestack/litecache.rb +118 -93
- data/lib/litestack/litecache.sql.yml +83 -22
- data/lib/litestack/litecache.yml +1 -1
- data/lib/litestack/litedb.rb +35 -40
- data/lib/litestack/litejob.rb +30 -29
- data/lib/litestack/litejobqueue.rb +63 -65
- data/lib/litestack/litemetric.rb +80 -92
- data/lib/litestack/litemetric.sql.yml +244 -234
- data/lib/litestack/litemetric_collector.sql.yml +38 -41
- data/lib/litestack/litequeue.rb +39 -41
- data/lib/litestack/litequeue.sql.yml +39 -31
- data/lib/litestack/litescheduler.rb +24 -18
- data/lib/litestack/litesearch/index.rb +93 -63
- data/lib/litestack/litesearch/model.rb +66 -65
- data/lib/litestack/litesearch/schema.rb +53 -56
- data/lib/litestack/litesearch/schema_adapters/backed_adapter.rb +46 -50
- data/lib/litestack/litesearch/schema_adapters/basic_adapter.rb +44 -35
- data/lib/litestack/litesearch/schema_adapters/contentless_adapter.rb +3 -6
- data/lib/litestack/litesearch/schema_adapters/standalone_adapter.rb +7 -9
- data/lib/litestack/litesearch/schema_adapters.rb +4 -9
- data/lib/litestack/litesearch.rb +6 -9
- data/lib/litestack/litesupport.rb +78 -87
- data/lib/litestack/railtie.rb +1 -1
- data/lib/litestack/version.rb +2 -2
- data/lib/litestack.rb +6 -4
- data/lib/railties/rails/commands/dbconsole.rb +16 -20
- data/lib/sequel/adapters/litedb.rb +16 -21
- data/lib/sequel/adapters/shared/litedb.rb +168 -168
- data/scripts/build_metrics.rb +91 -0
- data/scripts/test_cable.rb +30 -0
- data/scripts/test_job_retry.rb +33 -0
- data/scripts/test_metrics.rb +60 -0
- data/template.rb +2 -2
- metadata +115 -7
data/lib/litestack/litedb.rb
CHANGED
@@ -1,23 +1,22 @@
|
|
1
1
|
# all components should require the support module
|
2
|
-
require_relative
|
2
|
+
require_relative "litesupport"
|
3
3
|
|
4
4
|
# all measurable components should require the litemetric class
|
5
|
-
require_relative
|
5
|
+
require_relative "litemetric"
|
6
6
|
|
7
7
|
# litedb in particular gets access to litesearch
|
8
|
-
require_relative
|
8
|
+
require_relative "litesearch"
|
9
9
|
|
10
10
|
# Litedb inherits from the SQLite3::Database class and adds a few initialization options
|
11
11
|
class Litedb < ::SQLite3::Database
|
12
|
-
|
13
12
|
# add litemetric support
|
14
13
|
include Litemetric::Measurable
|
15
|
-
|
14
|
+
|
16
15
|
# add litesearch support
|
17
16
|
include Litesearch
|
18
|
-
|
17
|
+
|
19
18
|
# overrride the original initilaizer to allow for connection configuration
|
20
|
-
def initialize(file, options = {}, zfs = nil
|
19
|
+
def initialize(file, options = {}, zfs = nil)
|
21
20
|
if block_given?
|
22
21
|
super(file, options, zfs) do |db|
|
23
22
|
init unless options[:noinit] == true
|
@@ -41,15 +40,15 @@ class Litedb < ::SQLite3::Database
|
|
41
40
|
super(mode)
|
42
41
|
end
|
43
42
|
|
44
|
-
# return the size of the database file
|
43
|
+
# return the size of the database file
|
45
44
|
def size
|
46
45
|
execute("SELECT s.page_size * c.page_count FROM pragma_page_size() AS s, pragma_page_count() AS c")[0][0]
|
47
46
|
end
|
48
|
-
|
47
|
+
|
49
48
|
def schema_object_count(type = nil)
|
50
49
|
execute("SELECT count(*) FROM SQLITE_MASTER WHERE iif(?1 IS NOT NULL, type = ?1, TRUE)", type)[0][0]
|
51
50
|
end
|
52
|
-
|
51
|
+
|
53
52
|
# collect snapshot information
|
54
53
|
def snapshot
|
55
54
|
{
|
@@ -58,8 +57,8 @@ class Litedb < ::SQLite3::Database
|
|
58
57
|
journal_mode: journal_mode,
|
59
58
|
synchronous: synchronous,
|
60
59
|
size: size.to_f / (1024 * 1024),
|
61
|
-
tables: schema_object_count(
|
62
|
-
indexes: schema_object_count(
|
60
|
+
tables: schema_object_count("table"),
|
61
|
+
indexes: schema_object_count("index")
|
63
62
|
}
|
64
63
|
}
|
65
64
|
end
|
@@ -74,42 +73,41 @@ class Litedb < ::SQLite3::Database
|
|
74
73
|
ensure
|
75
74
|
stmt.close unless stmt.closed?
|
76
75
|
end
|
77
|
-
end
|
76
|
+
end
|
78
77
|
|
79
|
-
# override execute to capture metrics
|
78
|
+
# override execute to capture metrics
|
80
79
|
def execute(sql, bind_vars = [], *args, &block)
|
81
80
|
if bind_vars.nil? || !args.empty?
|
82
|
-
if args.empty?
|
83
|
-
|
81
|
+
bind_vars = if args.empty?
|
82
|
+
[]
|
84
83
|
else
|
85
|
-
|
84
|
+
[bind_vars] + args
|
86
85
|
end
|
87
86
|
end
|
88
|
-
|
87
|
+
|
89
88
|
prepare(sql) do |stmt|
|
90
89
|
measure(stmt.stmt_type, stmt.sql) do
|
91
90
|
stmt.bind_params(bind_vars)
|
92
91
|
stmt = SQLite3::ResultSet.new self, stmt
|
93
92
|
end
|
94
|
-
if
|
93
|
+
if block
|
95
94
|
stmt.each do |row|
|
96
95
|
yield row
|
97
96
|
end
|
98
97
|
else
|
99
98
|
stmt.to_a
|
100
99
|
end
|
101
|
-
end
|
102
|
-
|
100
|
+
end
|
103
101
|
end
|
104
102
|
|
105
103
|
private
|
106
104
|
|
107
|
-
# default connection configuration values
|
105
|
+
# default connection configuration values
|
108
106
|
def init
|
109
107
|
# version 3.37 is required for strict typing support and the newest json operators
|
110
108
|
raise Litesupport::Error if SQLite3::SQLITE_VERSION_NUMBER < 3037000
|
111
109
|
# time to wait to obtain a write lock before raising an exception
|
112
|
-
|
110
|
+
busy_handler { |i| sleep 0.001 }
|
113
111
|
# level of database durability, 2 = "FULL" (sync on every write), other values include 1 = "NORMAL" (sync every 1000 written pages) and 0 = "NONE"
|
114
112
|
self.synchronous = 1
|
115
113
|
# Journal mode WAL allows for greater concurrency (many readers + one writer)
|
@@ -117,16 +115,14 @@ class Litedb < ::SQLite3::Database
|
|
117
115
|
# impose a limit on the WAL file to prevent unlimited growth (with a negative impact on read performance as well)
|
118
116
|
self.journal_size_limit = 64 * 1024 * 1024
|
119
117
|
# set the global memory map so all processes can share data
|
120
|
-
self.mmap_size = 128 * 1024 * 1024
|
118
|
+
self.mmap_size = 128 * 1024 * 1024
|
121
119
|
# increase the local connection cache to 2000 pages
|
122
120
|
self.cache_size = 2000
|
123
121
|
end
|
124
|
-
|
125
122
|
end
|
126
123
|
|
127
124
|
# the Litedb::Statement also inherits from SQLite3::Statement
|
128
|
-
class Litedb::Statement < SQLite3::Statement
|
129
|
-
|
125
|
+
class Litedb::Statement < SQLite3::Statement
|
130
126
|
include Litemetric::Measurable
|
131
127
|
|
132
128
|
attr_accessor :sql
|
@@ -139,32 +135,32 @@ class Litedb::Statement < SQLite3::Statement
|
|
139
135
|
def metrics_identifier
|
140
136
|
"Litedb" # overridden to match the parent class
|
141
137
|
end
|
142
|
-
|
143
|
-
# return the type of the statement
|
138
|
+
|
139
|
+
# return the type of the statement
|
144
140
|
def stmt_type
|
145
141
|
@stmt_type ||= detect_stmt_type
|
146
142
|
end
|
147
|
-
|
143
|
+
|
148
144
|
def detect_stmt_type
|
149
|
-
if @sql.start_with?("SEL"
|
145
|
+
if @sql.start_with?("SEL", "WITH")
|
150
146
|
"Read"
|
151
|
-
elsif @sql.start_with?("CRE"
|
147
|
+
elsif @sql.start_with?("CRE", "ALT", "DRO")
|
152
148
|
"Schema change"
|
153
|
-
elsif @sql.start_with?("PRA")
|
149
|
+
elsif @sql.start_with?("PRA")
|
154
150
|
"Pragma"
|
155
151
|
else
|
156
152
|
"Write"
|
157
|
-
end
|
153
|
+
end
|
158
154
|
end
|
159
|
-
|
160
|
-
# overriding each to measure the query time (plus the processing time as well, sadly)
|
155
|
+
|
156
|
+
# overriding each to measure the query time (plus the processing time as well, sadly)
|
161
157
|
def each
|
162
158
|
measure(stmt_type, @sql) do
|
163
159
|
super
|
164
|
-
end
|
160
|
+
end
|
165
161
|
end
|
166
|
-
|
167
|
-
# overriding execute to measure the query time
|
162
|
+
|
163
|
+
# overriding execute to measure the query time
|
168
164
|
def execute(*bind_vars)
|
169
165
|
res = nil
|
170
166
|
measure(stmt_type, @sql) do
|
@@ -172,5 +168,4 @@ class Litedb::Statement < SQLite3::Statement
|
|
172
168
|
end
|
173
169
|
res
|
174
170
|
end
|
175
|
-
|
176
171
|
end
|
data/lib/litestack/litejob.rb
CHANGED
@@ -1,36 +1,36 @@
|
|
1
1
|
# frozen_stringe_literal: true
|
2
2
|
|
3
|
-
require_relative
|
3
|
+
require_relative "./litejobqueue"
|
4
4
|
|
5
5
|
##
|
6
|
-
#Litejob is a Ruby module that enables seamless integration of the Litejobqueue job queueing system into Ruby applications. By including the Litejob module in a class and implementing the #perform method, developers can easily enqueue and process jobs asynchronously.
|
6
|
+
# Litejob is a Ruby module that enables seamless integration of the Litejobqueue job queueing system into Ruby applications. By including the Litejob module in a class and implementing the #perform method, developers can easily enqueue and process jobs asynchronously.
|
7
7
|
#
|
8
|
-
#When a job is enqueued, Litejob creates a new instance of the class and passes it any necessary arguments. The class's #perform method is then called asynchronously to process the job. This allows the application to continue running without waiting for the job to finish, improving overall performance and responsiveness.
|
8
|
+
# When a job is enqueued, Litejob creates a new instance of the class and passes it any necessary arguments. The class's #perform method is then called asynchronously to process the job. This allows the application to continue running without waiting for the job to finish, improving overall performance and responsiveness.
|
9
9
|
#
|
10
|
-
#One of the main benefits of using Litejob is its simplicity. Because it integrates directly with Litejobqueue, developers do not need to worry about managing job queues or processing logic themselves. Instead, they can focus on implementing the #perform method to handle the specific job tasks.
|
10
|
+
# One of the main benefits of using Litejob is its simplicity. Because it integrates directly with Litejobqueue, developers do not need to worry about managing job queues or processing logic themselves. Instead, they can focus on implementing the #perform method to handle the specific job tasks.
|
11
11
|
#
|
12
|
-
#Litejob also provides a number of useful features, including the ability to set job priorities, retry failed jobs, and limit the number of retries. These features can be configured using simple configuration options in the class that includes the Litejob module.
|
12
|
+
# Litejob also provides a number of useful features, including the ability to set job priorities, retry failed jobs, and limit the number of retries. These features can be configured using simple configuration options in the class that includes the Litejob module.
|
13
13
|
#
|
14
|
-
#Overall, Litejob is a powerful and flexible module that allows developers to easily integrate Litejobqueue job queueing into their Ruby applications. By enabling asynchronous job processing, Litejob can help improve application performance and scalability, while simplifying the development and management of background job processing logic.
|
14
|
+
# Overall, Litejob is a powerful and flexible module that allows developers to easily integrate Litejobqueue job queueing into their Ruby applications. By enabling asynchronous job processing, Litejob can help improve application performance and scalability, while simplifying the development and management of background job processing logic.
|
15
15
|
# class EasyJob
|
16
16
|
# include ::Litejob
|
17
|
-
#
|
17
|
+
#
|
18
18
|
# def perform(params)
|
19
19
|
# # do stuff
|
20
20
|
# end
|
21
21
|
# end
|
22
22
|
#
|
23
|
-
#Then later you can perform a job asynchronously:
|
23
|
+
# Then later you can perform a job asynchronously:
|
24
24
|
#
|
25
25
|
# EasyJob.perform_async(params) # perform a job synchronously
|
26
|
-
#Or perform it at a specific time:
|
26
|
+
# Or perform it at a specific time:
|
27
27
|
# EasyJob.perform_at(time, params) # perform a job at a specific time
|
28
|
-
#Or perform it after a certain delay:
|
28
|
+
# Or perform it after a certain delay:
|
29
29
|
# EasyJob.perform_in(delay, params) # perform a job after a certain delay
|
30
|
-
#You can also specify a specific queue to be used
|
30
|
+
# You can also specify a specific queue to be used
|
31
31
|
# class EasyJob
|
32
32
|
# include ::Litejob
|
33
|
-
#
|
33
|
+
#
|
34
34
|
# self.queue = :urgent
|
35
35
|
#
|
36
36
|
# def perform(params)
|
@@ -39,25 +39,23 @@ require_relative './litejobqueue'
|
|
39
39
|
# end
|
40
40
|
#
|
41
41
|
module Litejob
|
42
|
-
|
43
|
-
private
|
44
42
|
def self.included(klass)
|
45
43
|
klass.extend(ClassMethods)
|
46
44
|
klass.get_jobqueue
|
47
45
|
end
|
48
|
-
|
46
|
+
|
49
47
|
module ClassMethods
|
50
48
|
def perform_async(*params)
|
51
|
-
get_jobqueue.push(
|
49
|
+
get_jobqueue.push(name, params, 0, queue)
|
52
50
|
end
|
53
|
-
|
51
|
+
|
54
52
|
def perform_at(time, *params)
|
55
53
|
delay = time.to_i - Time.now.to_i
|
56
|
-
get_jobqueue.push(
|
54
|
+
get_jobqueue.push(name, params, delay, queue)
|
57
55
|
end
|
58
|
-
|
56
|
+
|
59
57
|
def perform_in(delay, *params)
|
60
|
-
get_jobqueue.push(
|
58
|
+
get_jobqueue.push(name, params, delay, queue)
|
61
59
|
end
|
62
60
|
|
63
61
|
def perform_after(delay, *params)
|
@@ -67,26 +65,29 @@ module Litejob
|
|
67
65
|
def process_jobs
|
68
66
|
get_jobqueue
|
69
67
|
end
|
70
|
-
|
71
|
-
def delete(id)
|
68
|
+
|
69
|
+
def delete(id)
|
72
70
|
get_jobqueue.delete(id)
|
73
|
-
end
|
74
|
-
|
71
|
+
end
|
72
|
+
|
75
73
|
def queue
|
76
74
|
@queue_name ||= "default"
|
77
75
|
end
|
78
|
-
|
76
|
+
|
79
77
|
def queue=(queue_name)
|
80
78
|
@queue_name = queue_name.to_s
|
81
79
|
end
|
82
80
|
|
83
81
|
def options
|
84
|
-
@options ||=
|
82
|
+
@options ||= begin
|
83
|
+
self::DEFAULT_OPTIONS
|
84
|
+
rescue
|
85
|
+
{}
|
86
|
+
end
|
85
87
|
end
|
86
|
-
|
88
|
+
|
87
89
|
def get_jobqueue
|
88
90
|
Litejobqueue.jobqueue(options)
|
89
91
|
end
|
90
92
|
end
|
91
|
-
|
92
|
-
end
|
93
|
+
end
|
@@ -1,152 +1,150 @@
|
|
1
1
|
# frozen_stringe_literal: true
|
2
2
|
|
3
|
-
require_relative
|
4
|
-
require_relative
|
3
|
+
require_relative "./litequeue"
|
4
|
+
require_relative "./litemetric"
|
5
5
|
|
6
6
|
##
|
7
|
-
#Litejobqueue is a job queueing and processing system designed for Ruby applications. It is built on top of SQLite, which is an embedded relational database management system that is #lightweight and fast.
|
7
|
+
# Litejobqueue is a job queueing and processing system designed for Ruby applications. It is built on top of SQLite, which is an embedded relational database management system that is #lightweight and fast.
|
8
8
|
#
|
9
|
-
#One of the main benefits of Litejobqueue is that it is very low on resources, making it an ideal choice for applications that need to manage a large number of jobs without incurring #high resource costs. In addition, because it is built on SQLite, it is easy to use and does not require any additional configuration or setup.
|
9
|
+
# One of the main benefits of Litejobqueue is that it is very low on resources, making it an ideal choice for applications that need to manage a large number of jobs without incurring #high resource costs. In addition, because it is built on SQLite, it is easy to use and does not require any additional configuration or setup.
|
10
10
|
#
|
11
|
-
#Litejobqueue also integrates well with various I/O frameworks like Async and Polyphony, making it a great choice for Ruby applications that use these frameworks. It provides a #simple and easy-to-use API for adding jobs to the queue and for processing them.
|
11
|
+
# Litejobqueue also integrates well with various I/O frameworks like Async and Polyphony, making it a great choice for Ruby applications that use these frameworks. It provides a #simple and easy-to-use API for adding jobs to the queue and for processing them.
|
12
12
|
#
|
13
|
-
#Overall, LiteJobQueue is an excellent choice for Ruby applications that require a lightweight, embedded job queueing and processing system that is fast, efficient, and easy to use.
|
13
|
+
# Overall, LiteJobQueue is an excellent choice for Ruby applications that require a lightweight, embedded job queueing and processing system that is fast, efficient, and easy to use.
|
14
14
|
class Litejobqueue < Litequeue
|
15
|
-
|
16
15
|
include Litemetric::Measurable
|
17
16
|
|
18
17
|
# the default options for the job queue
|
19
|
-
# can be overriden by passing new options in a hash
|
18
|
+
# can be overriden by passing new options in a hash
|
20
19
|
# to Litejobqueue.new, it will also be then passed to the underlying Litequeue object
|
21
20
|
# config_path: "./litejob.yml" -> were to find the configuration file (if any)
|
22
21
|
# path: "./db/queue.db"
|
23
22
|
# mmap_size: 128 * 1024 * 1024 -> 128MB to be held in memory
|
24
23
|
# sync: 1 -> sync only when checkpointing
|
25
|
-
# queues: [["default", 1, "spawn"]] -> an array of queues to process
|
24
|
+
# queues: [["default", 1, "spawn"]] -> an array of queues to process
|
26
25
|
# workers: 1 -> number of job processing workers
|
27
26
|
# sleep_intervals: [0.001, 0.005, 0.025, 0.125, 0.625, 3.125] -> sleep intervals for workers
|
28
27
|
# queues will be processed according to priority, such that if the queues are as such
|
29
28
|
# queues: [["default", 1, "spawn"], ["urgent", 10]]
|
30
29
|
# it means that roughly, if the queues are full, for each 10 urgent jobs, 1 default job will be processed
|
31
|
-
# the priority value is mandatory. The optional "spawn" parameter tells the job workers to spawn a separate execution context (thread or fiber, based on environment) for each job.
|
32
|
-
# This can be particularly useful for long running, IO bound jobs. It is not recommended though for threaded environments, as it can result in creating many threads that may consudme a lot of memory.
|
30
|
+
# the priority value is mandatory. The optional "spawn" parameter tells the job workers to spawn a separate execution context (thread or fiber, based on environment) for each job.
|
31
|
+
# This can be particularly useful for long running, IO bound jobs. It is not recommended though for threaded environments, as it can result in creating many threads that may consudme a lot of memory.
|
33
32
|
DEFAULT_OPTIONS = {
|
34
33
|
config_path: "./litejob.yml",
|
35
34
|
path: Litesupport.root.join("queue.sqlite3"),
|
36
35
|
queues: [["default", 1]],
|
37
36
|
workers: 5,
|
38
|
-
retries: 5,
|
37
|
+
retries: 5,
|
39
38
|
retry_delay: 60,
|
40
39
|
retry_delay_multiplier: 10,
|
41
40
|
dead_job_retention: 10 * 24 * 3600,
|
42
|
-
gc_sleep_interval: 7200,
|
43
|
-
logger:
|
41
|
+
gc_sleep_interval: 7200,
|
42
|
+
logger: "STDOUT",
|
44
43
|
sleep_intervals: [0.001, 0.005, 0.025, 0.125, 0.625, 1.0, 2.0],
|
45
|
-
metrics: false
|
44
|
+
metrics: false
|
46
45
|
}
|
47
|
-
|
46
|
+
|
48
47
|
@@queue = nil
|
49
|
-
|
48
|
+
|
50
49
|
attr_reader :running
|
51
|
-
|
50
|
+
|
52
51
|
alias_method :_push, :push
|
53
|
-
|
52
|
+
|
54
53
|
# a method that returns a single instance of the job queue
|
55
54
|
# for use by Litejob
|
56
55
|
def self.jobqueue(options = {})
|
57
|
-
@@queue ||= Litescheduler.synchronize{
|
56
|
+
@@queue ||= Litescheduler.synchronize { new(options) }
|
58
57
|
end
|
59
58
|
|
60
59
|
def self.new(options = {})
|
61
60
|
return @@queue if @@queue
|
62
61
|
@@queue = allocate
|
63
62
|
@@queue.send(:initialize, options)
|
64
|
-
@@queue
|
63
|
+
@@queue
|
65
64
|
end
|
66
65
|
|
67
66
|
# create new queue instance (only once instance will be created in the process)
|
68
67
|
# jobqueue = Litejobqueue.new
|
69
|
-
#
|
68
|
+
#
|
70
69
|
def initialize(options = {})
|
71
|
-
|
72
70
|
@queues = [] # a place holder to allow workers to process
|
73
71
|
super(options)
|
74
|
-
|
72
|
+
|
75
73
|
# group and order queues according to their priority
|
76
74
|
pgroups = {}
|
77
75
|
@options[:queues].each do |q|
|
78
76
|
pgroups[q[1]] = [] unless pgroups[q[1]]
|
79
77
|
pgroups[q[1]] << [q[0], q[2] == "spawn"]
|
80
78
|
end
|
81
|
-
@queues = pgroups.keys.sort.reverse.collect{|p| [p, pgroups[p]]}
|
79
|
+
@queues = pgroups.keys.sort.reverse.collect { |p| [p, pgroups[p]] }
|
82
80
|
collect_metrics if @options[:metrics]
|
83
81
|
end
|
84
82
|
|
85
83
|
def metrics_identifier
|
86
84
|
"Litejob" # overrides default identifier
|
87
85
|
end
|
88
|
-
|
86
|
+
|
89
87
|
# push a job to the queue
|
90
88
|
# class EasyJob
|
91
89
|
# def perform(any, number, of_params)
|
92
90
|
# # do anything
|
93
|
-
# end
|
91
|
+
# end
|
94
92
|
# end
|
95
93
|
# jobqueue = Litejobqueue.new
|
96
94
|
# jobqueue.push(EasyJob, params) # the job will be performed asynchronously
|
97
|
-
def push(jobclass, params, delay=0, queue=nil)
|
95
|
+
def push(jobclass, params, delay = 0, queue = nil)
|
98
96
|
payload = Oj.dump({klass: jobclass, params: params, retries: @options[:retries], queue: queue}, mode: :strict)
|
99
97
|
res = super(payload, delay, queue)
|
100
98
|
capture(:enqueue, queue)
|
101
99
|
@logger.info("[litejob]:[ENQ] queue:#{res[1]} class:#{jobclass} job:#{res[0]}")
|
102
100
|
res
|
103
101
|
end
|
104
|
-
|
105
|
-
def repush(id, job, delay=0, queue=nil)
|
102
|
+
|
103
|
+
def repush(id, job, delay = 0, queue = nil)
|
106
104
|
res = super(id, Oj.dump(job, mode: :strict), delay, queue)
|
107
105
|
capture(:enqueue, queue)
|
108
106
|
@logger.info("[litejob]:[ENQ] queue:#{res[0]} class:#{job[:klass]} job:#{id}")
|
109
107
|
res
|
110
108
|
end
|
111
|
-
|
109
|
+
|
112
110
|
# delete a job from the job queue
|
113
111
|
# class EasyJob
|
114
112
|
# def perform(any, number, of_params)
|
115
113
|
# # do anything
|
116
|
-
# end
|
114
|
+
# end
|
117
115
|
# end
|
118
116
|
# jobqueue = Litejobqueue.new
|
119
117
|
# id = jobqueue.push(EasyJob, params, 10) # queue for processing in 10 seconds
|
120
|
-
# jobqueue.delete(id)
|
118
|
+
# jobqueue.delete(id)
|
121
119
|
def delete(id)
|
122
120
|
job = super(id)
|
123
121
|
@logger.info("[litejob]:[DEL] job: #{job}")
|
124
122
|
job = Oj.load(job[0], symbol_keys: true) if job
|
125
123
|
job
|
126
124
|
end
|
127
|
-
|
125
|
+
|
128
126
|
# delete all jobs in a certain named queue
|
129
127
|
# or delete all jobs if the queue name is nil
|
130
|
-
#def clear(queue=nil)
|
131
|
-
|
132
|
-
#end
|
133
|
-
|
128
|
+
# def clear(queue=nil)
|
129
|
+
# @queue.clear(queue)
|
130
|
+
# end
|
131
|
+
|
134
132
|
# stop the queue object (does not delete the jobs in the queue)
|
135
133
|
# specifically useful for testing
|
136
134
|
def stop
|
137
135
|
@running = false
|
138
|
-
|
136
|
+
# @@queue = nil
|
139
137
|
close
|
140
138
|
end
|
141
|
-
|
142
|
-
|
139
|
+
|
143
140
|
private
|
144
141
|
|
145
142
|
def exit_callback
|
146
143
|
@running = false # stop all workers
|
144
|
+
return unless @jobs_in_flight > 0
|
147
145
|
puts "--- Litejob detected an exit, cleaning up"
|
148
146
|
index = 0
|
149
|
-
while @jobs_in_flight > 0
|
147
|
+
while @jobs_in_flight > 0 && index < 30 # 3 seconds grace period for jobs to finish
|
150
148
|
puts "--- Waiting for #{@jobs_in_flight} jobs to finish"
|
151
149
|
sleep 0.1
|
152
150
|
index += 1
|
@@ -157,76 +155,76 @@ class Litejobqueue < Litequeue
|
|
157
155
|
def setup
|
158
156
|
super
|
159
157
|
@jobs_in_flight = 0
|
160
|
-
@workers = @options[:workers].times.collect{ create_worker }
|
158
|
+
@workers = @options[:workers].times.collect { create_worker }
|
161
159
|
@gc = create_garbage_collector
|
162
|
-
@mutex = Litesupport::Mutex.new
|
160
|
+
@mutex = Litesupport::Mutex.new
|
163
161
|
end
|
164
|
-
|
162
|
+
|
165
163
|
def job_started
|
166
|
-
Litescheduler.synchronize(@mutex){@jobs_in_flight += 1}
|
164
|
+
Litescheduler.synchronize(@mutex) { @jobs_in_flight += 1 }
|
167
165
|
end
|
168
|
-
|
166
|
+
|
169
167
|
def job_finished
|
170
|
-
Litescheduler.synchronize(@mutex){@jobs_in_flight -= 1}
|
168
|
+
Litescheduler.synchronize(@mutex) { @jobs_in_flight -= 1 }
|
171
169
|
end
|
172
|
-
|
170
|
+
|
173
171
|
# optionally run a job in its own context
|
174
172
|
def schedule(spawn = false, &block)
|
175
173
|
if spawn
|
176
|
-
Litescheduler.spawn
|
174
|
+
Litescheduler.spawn(&block)
|
177
175
|
else
|
178
176
|
yield
|
179
177
|
end
|
180
|
-
end
|
181
|
-
|
178
|
+
end
|
179
|
+
|
182
180
|
# create a worker according to environment
|
183
181
|
def create_worker
|
184
182
|
Litescheduler.spawn do
|
185
183
|
worker_sleep_index = 0
|
186
|
-
while @running
|
184
|
+
while @running
|
187
185
|
processed = 0
|
188
186
|
@queues.each do |priority, queues| # iterate through the levels
|
189
187
|
queues.each do |queue, spawns| # iterate through the queues in the level
|
190
188
|
batched = 0
|
191
|
-
|
189
|
+
|
192
190
|
while (batched < priority) && (payload = pop(queue, 1)) # fearlessly use the same queue object
|
193
191
|
capture(:dequeue, queue)
|
194
192
|
processed += 1
|
195
193
|
batched += 1
|
196
|
-
|
194
|
+
|
197
195
|
id, serialized_job = payload
|
198
196
|
process_job(queue, id, serialized_job, spawns)
|
199
|
-
|
197
|
+
|
200
198
|
Litescheduler.switch # give other contexts a chance to run here
|
201
199
|
end
|
202
200
|
end
|
203
201
|
end
|
204
|
-
if processed == 0
|
205
|
-
sleep @options[:sleep_intervals][worker_sleep_index]
|
206
|
-
worker_sleep_index += 1 if worker_sleep_index < @options[:sleep_intervals].length - 1
|
202
|
+
if processed == 0
|
203
|
+
sleep @options[:sleep_intervals][worker_sleep_index]
|
204
|
+
worker_sleep_index += 1 if worker_sleep_index < @options[:sleep_intervals].length - 1
|
207
205
|
else
|
208
206
|
worker_sleep_index = 0 # reset the index
|
209
207
|
end
|
210
208
|
end
|
211
209
|
end
|
212
|
-
end
|
213
|
-
|
210
|
+
end
|
211
|
+
|
214
212
|
# create a gc for dead jobs
|
215
213
|
def create_garbage_collector
|
216
214
|
Litescheduler.spawn do
|
217
|
-
while @running
|
218
|
-
while jobs = pop(
|
215
|
+
while @running
|
216
|
+
while (jobs = pop("_dead", 100))
|
219
217
|
if jobs[0].is_a? Array
|
220
218
|
@logger.info "[litejob]:[DEL] garbage collector deleted #{jobs.length} dead jobs"
|
221
219
|
else
|
222
220
|
@logger.info "[litejob]:[DEL] garbage collector deleted 1 dead job"
|
223
221
|
end
|
224
222
|
end
|
225
|
-
sleep @options[:gc_sleep_interval]
|
223
|
+
sleep @options[:gc_sleep_interval]
|
226
224
|
end
|
227
225
|
end
|
228
226
|
end
|
229
|
-
|
227
|
+
|
230
228
|
def process_job(queue, id, serialized_job, spawns)
|
231
229
|
job = Oj.load(serialized_job)
|
232
230
|
@logger.info "[litejob]:[DEQ] queue:#{queue} class:#{job["klass"]} job:#{id}"
|