solid_queue_lite 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +10 -0
- data/MIT-LICENSE +20 -0
- data/README.md +142 -0
- data/Rakefile +3 -0
- data/app/assets/stylesheets/soliq_queue_lite/application.css +15 -0
- data/app/controllers/concerns/solid_queue_lite/approximate_countable.rb +10 -0
- data/app/controllers/solid_queue_lite/application_controller.rb +4 -0
- data/app/controllers/solid_queue_lite/dashboards_controller.rb +61 -0
- data/app/controllers/solid_queue_lite/jobs_controller.rb +129 -0
- data/app/controllers/solid_queue_lite/processes_controller.rb +39 -0
- data/app/controllers/solid_queue_lite/queues_controller.rb +31 -0
- data/app/helpers/solid_queue_lite/application_helper.rb +27 -0
- data/app/jobs/solid_queue_lite/application_job.rb +4 -0
- data/app/jobs/solid_queue_lite/telemetry_sampler_job.rb +11 -0
- data/app/models/solid_queue_lite/application_record.rb +5 -0
- data/app/models/solid_queue_lite/stat.rb +7 -0
- data/app/views/layouts/solid_queue_lite/application.html.erb +383 -0
- data/app/views/solid_queue_lite/dashboards/show.html.erb +573 -0
- data/config/routes.rb +30 -0
- data/db/migrate/20260406000000_create_solid_queue_lite_stats.rb +16 -0
- data/lib/solid_queue_lite/approximate_counter.rb +87 -0
- data/lib/solid_queue_lite/engine.rb +20 -0
- data/lib/solid_queue_lite/install.rb +107 -0
- data/lib/solid_queue_lite/jobs.rb +236 -0
- data/lib/solid_queue_lite/processes.rb +156 -0
- data/lib/solid_queue_lite/telemetry.rb +201 -0
- data/lib/solid_queue_lite/version.rb +3 -0
- data/lib/solid_queue_lite.rb +46 -0
- data/lib/tasks/solid_queue_lite_tasks.rake +14 -0
- metadata +116 -0
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
require "json"
|
|
2
|
+
|
|
3
|
+
module SolidQueueLite
|
|
4
|
+
module ApproximateCounter
|
|
5
|
+
module_function
|
|
6
|
+
|
|
7
|
+
def count(relation)
|
|
8
|
+
adapter_name = relation.connection.adapter_name.downcase
|
|
9
|
+
|
|
10
|
+
case adapter_name
|
|
11
|
+
when /postgres/
|
|
12
|
+
simple_relation?(relation) ? postgresql_table_estimate(relation) : postgresql_explain_estimate(relation)
|
|
13
|
+
when /mysql/, /trilogy/
|
|
14
|
+
simple_relation?(relation) ? mysql_table_estimate(relation) : mysql_explain_estimate(relation)
|
|
15
|
+
when /sqlite/
|
|
16
|
+
relation.except(:select, :order).count
|
|
17
|
+
else
|
|
18
|
+
raise NotImplementedError, "Unsupported adapter for approximate counts: #{relation.connection.adapter_name}"
|
|
19
|
+
end.to_i
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def simple_relation?(relation)
|
|
23
|
+
relation.where_clause.empty? &&
|
|
24
|
+
relation.joins_values.empty? &&
|
|
25
|
+
relation.left_outer_joins_values.empty? &&
|
|
26
|
+
relation.group_values.empty? &&
|
|
27
|
+
relation.having_clause.empty? &&
|
|
28
|
+
relation.limit_value.nil? &&
|
|
29
|
+
relation.offset_value.nil? &&
|
|
30
|
+
!relation.distinct_value
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
def postgresql_table_estimate(relation)
|
|
34
|
+
relation.connection.select_value(<<~SQL.squish)&.to_i
|
|
35
|
+
SELECT COALESCE(reltuples, 0)
|
|
36
|
+
FROM pg_class
|
|
37
|
+
WHERE oid = #{relation.connection.quote(relation.table_name)}::regclass
|
|
38
|
+
SQL
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def mysql_table_estimate(relation)
|
|
42
|
+
relation.connection.select_value(<<~SQL.squish)&.to_i
|
|
43
|
+
SELECT COALESCE(table_rows, 0)
|
|
44
|
+
FROM information_schema.tables
|
|
45
|
+
WHERE table_schema = DATABASE()
|
|
46
|
+
AND table_name = #{relation.connection.quote(relation.table_name)}
|
|
47
|
+
SQL
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
def postgresql_explain_estimate(relation)
|
|
51
|
+
plan_json = relation.connection.select_value(
|
|
52
|
+
"EXPLAIN (FORMAT JSON) #{relation.except(:select, :order).to_sql}"
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
JSON.parse(plan_json).dig(0, "Plan", "Plan Rows")
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
def mysql_explain_estimate(relation)
|
|
59
|
+
explain_json = relation.connection.select_value(
|
|
60
|
+
"EXPLAIN FORMAT=JSON #{relation.except(:select, :order).to_sql}"
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
extract_mysql_row_estimate(JSON.parse(explain_json))
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
def extract_mysql_row_estimate(node)
|
|
67
|
+
case node
|
|
68
|
+
when Hash
|
|
69
|
+
return node["rows_produced_per_join"] if node.key?("rows_produced_per_join")
|
|
70
|
+
return node["rows_examined_per_scan"] if node.key?("rows_examined_per_scan")
|
|
71
|
+
return node["rows"] if node.key?("rows")
|
|
72
|
+
|
|
73
|
+
node.each_value do |value|
|
|
74
|
+
estimate = extract_mysql_row_estimate(value)
|
|
75
|
+
return estimate if estimate
|
|
76
|
+
end
|
|
77
|
+
when Array
|
|
78
|
+
node.each do |value|
|
|
79
|
+
estimate = extract_mysql_row_estimate(value)
|
|
80
|
+
return estimate if estimate
|
|
81
|
+
end
|
|
82
|
+
end
|
|
83
|
+
|
|
84
|
+
nil
|
|
85
|
+
end
|
|
86
|
+
end
|
|
87
|
+
end
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
module SolidQueueLite
|
|
2
|
+
class Engine < ::Rails::Engine
|
|
3
|
+
isolate_namespace SolidQueueLite
|
|
4
|
+
|
|
5
|
+
initializer "solid_queue_lite.configuration" do
|
|
6
|
+
SolidQueueLite.configuration
|
|
7
|
+
end
|
|
8
|
+
|
|
9
|
+
config.after_initialize do
|
|
10
|
+
next unless SolidQueueLite.configuration.telemetry_backfill_on_boot
|
|
11
|
+
next unless ActiveRecord::Base.connected?
|
|
12
|
+
next unless ActiveRecord::Base.connection.data_source_exists?("solid_queue_lite_stats")
|
|
13
|
+
|
|
14
|
+
SolidQueueLite::Stat
|
|
15
|
+
SolidQueueLite::Telemetry.backfill!
|
|
16
|
+
rescue StandardError
|
|
17
|
+
nil
|
|
18
|
+
end
|
|
19
|
+
end
|
|
20
|
+
end
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
require "fileutils"
|
|
2
|
+
require "pathname"
|
|
3
|
+
|
|
4
|
+
module SolidQueueLite
|
|
5
|
+
class Install
|
|
6
|
+
INITIALIZER_RELATIVE_PATH = "config/initializers/solid_queue_lite.rb"
|
|
7
|
+
|
|
8
|
+
def initialize(host_root: Rails.root, stdout: $stdout)
|
|
9
|
+
@host_root = Pathname(host_root)
|
|
10
|
+
@stdout = stdout
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
def run!(migrate: false)
|
|
14
|
+
install_migrations
|
|
15
|
+
install_initializer
|
|
16
|
+
run_migrations if migrate
|
|
17
|
+
print_next_steps(migrate: migrate)
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
private
|
|
21
|
+
|
|
22
|
+
attr_reader :host_root, :stdout
|
|
23
|
+
|
|
24
|
+
def install_migrations
|
|
25
|
+
with_env("FROM" => "solid_queue_lite") do
|
|
26
|
+
invoke_task("railties:install:migrations")
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def install_initializer
|
|
31
|
+
if initializer_path.exist?
|
|
32
|
+
say "Skipped #{relative_initializer_path}; file already exists"
|
|
33
|
+
return
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
FileUtils.mkdir_p(initializer_path.dirname)
|
|
37
|
+
initializer_path.write(initializer_template)
|
|
38
|
+
say "Created #{relative_initializer_path}"
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def run_migrations
|
|
42
|
+
invoke_task("db:migrate")
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
def print_next_steps(migrate:)
|
|
46
|
+
say ""
|
|
47
|
+
say "Solid Queue Lite install complete."
|
|
48
|
+
|
|
49
|
+
unless migrate
|
|
50
|
+
say "Run `bin/rails db:migrate` or rerun with `bin/rails solid_queue_lite:install MIGRATE=1`."
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
say "Mount the engine inside your host application's auth boundary, for example:"
|
|
54
|
+
say ""
|
|
55
|
+
say "authenticate :user, ->(user) { user.admin? } do"
|
|
56
|
+
say " mount SolidQueueLite::Engine => \"/ops/jobs\""
|
|
57
|
+
say "end"
|
|
58
|
+
say ""
|
|
59
|
+
say "Schedule `SolidQueueLite::TelemetrySamplerJob` in `config/recurring.yml` if you want historical charts."
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
def invoke_task(task_name)
|
|
63
|
+
task = Rake::Task[task_name]
|
|
64
|
+
task.reenable
|
|
65
|
+
task.invoke
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
def with_env(updates)
|
|
69
|
+
previous_values = updates.transform_values { |_,| nil }
|
|
70
|
+
|
|
71
|
+
updates.each do |key, value|
|
|
72
|
+
previous_values[key] = ENV[key]
|
|
73
|
+
ENV[key] = value
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
yield
|
|
77
|
+
ensure
|
|
78
|
+
previous_values.each do |key, value|
|
|
79
|
+
ENV[key] = value
|
|
80
|
+
end
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
def initializer_path
|
|
84
|
+
host_root.join(relative_initializer_path)
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
def relative_initializer_path
|
|
88
|
+
INITIALIZER_RELATIVE_PATH
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
def initializer_template
|
|
92
|
+
<<~RUBY
|
|
93
|
+
SolidQueueLite.configure do |config|
|
|
94
|
+
config.tenant_scope = lambda do |relation|
|
|
95
|
+
relation
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
config.telemetry_backfill_on_boot = true
|
|
99
|
+
end
|
|
100
|
+
RUBY
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
def say(message)
|
|
104
|
+
stdout.puts(message)
|
|
105
|
+
end
|
|
106
|
+
end
|
|
107
|
+
end
|
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
module SolidQueueLite
|
|
2
|
+
module Jobs
|
|
3
|
+
MAX_PAGES = 10
|
|
4
|
+
DEFAULT_PER_PAGE = 50
|
|
5
|
+
MAX_PER_PAGE = 100
|
|
6
|
+
SUPPORTED_STATES = {
|
|
7
|
+
"ready" => :ready,
|
|
8
|
+
"in_progress" => :claimed,
|
|
9
|
+
"claimed" => :claimed,
|
|
10
|
+
"scheduled" => :scheduled,
|
|
11
|
+
"failed" => :failed,
|
|
12
|
+
"recurring" => :recurring
|
|
13
|
+
}.freeze
|
|
14
|
+
|
|
15
|
+
module_function
|
|
16
|
+
|
|
17
|
+
def list(state_key:, page: 1, per_page: DEFAULT_PER_PAGE, queue_name: nil, include_details: false)
|
|
18
|
+
state = resolve_state(state_key)
|
|
19
|
+
relation = filtered_jobs_relation(state: state, queue_name: queue_name)
|
|
20
|
+
approximate_total = SolidQueueLite::ApproximateCounter.count(relation)
|
|
21
|
+
total_pages = estimated_total_pages(approximate_total, per_page)
|
|
22
|
+
|
|
23
|
+
listed_relation = relation
|
|
24
|
+
.reorder(created_at: :desc, id: :desc)
|
|
25
|
+
.limit(per_page)
|
|
26
|
+
.offset((page - 1) * per_page)
|
|
27
|
+
|
|
28
|
+
jobs = if include_details
|
|
29
|
+
listed_relation
|
|
30
|
+
.includes(
|
|
31
|
+
:ready_execution,
|
|
32
|
+
:claimed_execution,
|
|
33
|
+
:failed_execution,
|
|
34
|
+
:scheduled_execution,
|
|
35
|
+
:blocked_execution,
|
|
36
|
+
:recurring_execution
|
|
37
|
+
)
|
|
38
|
+
.map { |job| serialize_list_job(job) }
|
|
39
|
+
else
|
|
40
|
+
listed_relation
|
|
41
|
+
.pluck(:id, :class_name, :queue_name)
|
|
42
|
+
.map do |id, class_name, queue_name_value|
|
|
43
|
+
{ id: id, class_name: class_name, queue_name: queue_name_value, state: state.to_s }
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
{
|
|
48
|
+
jobs: jobs,
|
|
49
|
+
selected_state: state_key,
|
|
50
|
+
selected_queue_name: queue_name.presence,
|
|
51
|
+
state_options: SUPPORTED_STATES.keys,
|
|
52
|
+
pagination: {
|
|
53
|
+
page: page,
|
|
54
|
+
per_page: per_page,
|
|
55
|
+
total_pages: total_pages,
|
|
56
|
+
approximate_total_count: approximate_total,
|
|
57
|
+
max_pages: MAX_PAGES
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
def find(id)
|
|
63
|
+
scoped_jobs(
|
|
64
|
+
::SolidQueue::Job.includes(
|
|
65
|
+
:ready_execution,
|
|
66
|
+
:claimed_execution,
|
|
67
|
+
:failed_execution,
|
|
68
|
+
:scheduled_execution,
|
|
69
|
+
:blocked_execution,
|
|
70
|
+
:recurring_execution
|
|
71
|
+
)
|
|
72
|
+
).find(id)
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
def retry!(id)
|
|
76
|
+
job = scoped_jobs(::SolidQueue::Job.includes(:failed_execution)).find(id)
|
|
77
|
+
raise StandardError, "Only failed jobs can be retried" unless job.failed_execution
|
|
78
|
+
|
|
79
|
+
job.failed_execution.retry
|
|
80
|
+
job
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
def discard!(id)
|
|
84
|
+
job = find(id)
|
|
85
|
+
previous_state = job.status
|
|
86
|
+
job.discard
|
|
87
|
+
[ job, previous_state ]
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
def bulk_retry!(job_ids:, state_key: "failed")
|
|
91
|
+
raise StandardError, "Bulk retry is only available for failed jobs" unless resolve_state(state_key) == :failed
|
|
92
|
+
|
|
93
|
+
jobs = selected_jobs(job_ids)
|
|
94
|
+
::SolidQueue::FailedExecution.retry_all(jobs)
|
|
95
|
+
jobs.size
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
def bulk_discard!(job_ids:, state_key:)
|
|
99
|
+
jobs = selected_jobs(job_ids)
|
|
100
|
+
bulk_discard_execution_class(state_key).discard_all_from_jobs(jobs)
|
|
101
|
+
jobs.size
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
def serialize(job)
|
|
105
|
+
{
|
|
106
|
+
id: job.id,
|
|
107
|
+
active_job_id: job.active_job_id,
|
|
108
|
+
class_name: job.class_name,
|
|
109
|
+
queue_name: job.queue_name,
|
|
110
|
+
priority: job.priority,
|
|
111
|
+
scheduled_at: job.scheduled_at,
|
|
112
|
+
finished_at: job.finished_at,
|
|
113
|
+
created_at: job.created_at,
|
|
114
|
+
updated_at: job.updated_at,
|
|
115
|
+
concurrency_key: job.concurrency_key,
|
|
116
|
+
arguments: job.arguments,
|
|
117
|
+
state: job.status,
|
|
118
|
+
failed_execution: serialize_failed_execution(job.failed_execution),
|
|
119
|
+
recurring_execution: serialize_recurring_execution(job.recurring_execution)
|
|
120
|
+
}
|
|
121
|
+
end
|
|
122
|
+
|
|
123
|
+
def serialize_list_job(job)
|
|
124
|
+
serialize(job).merge(
|
|
125
|
+
state: job.status.to_s,
|
|
126
|
+
state_label: job.status.to_s.humanize,
|
|
127
|
+
error_label: job.failed_execution&.exception_class || job.status.to_s.humanize
|
|
128
|
+
)
|
|
129
|
+
end
|
|
130
|
+
|
|
131
|
+
def resolve_state(state_key)
|
|
132
|
+
SUPPORTED_STATES.fetch(state_key)
|
|
133
|
+
rescue KeyError
|
|
134
|
+
raise ::ActionController::BadRequest, "state must be one of: #{SUPPORTED_STATES.keys.join(', ')}"
|
|
135
|
+
end
|
|
136
|
+
|
|
137
|
+
def normalize_page(page)
|
|
138
|
+
value = page.to_i
|
|
139
|
+
raise ::ActionController::BadRequest, "page must be between 1 and #{MAX_PAGES}" if value < 1 || value > MAX_PAGES
|
|
140
|
+
|
|
141
|
+
value
|
|
142
|
+
end
|
|
143
|
+
|
|
144
|
+
def normalize_per_page(per_page)
|
|
145
|
+
value = per_page.to_i
|
|
146
|
+
return DEFAULT_PER_PAGE if value <= 0
|
|
147
|
+
|
|
148
|
+
[ value, MAX_PER_PAGE ].min
|
|
149
|
+
end
|
|
150
|
+
|
|
151
|
+
def selected_jobs(job_ids)
|
|
152
|
+
ids = Array(job_ids).map(&:to_i).uniq
|
|
153
|
+
raise ::ActionController::BadRequest, "At least one job must be selected" if ids.empty?
|
|
154
|
+
|
|
155
|
+
jobs = scoped_jobs(::SolidQueue::Job.where(id: ids)).to_a
|
|
156
|
+
raise ::ActionController::BadRequest, "Selected jobs could not be found" if jobs.empty?
|
|
157
|
+
|
|
158
|
+
jobs
|
|
159
|
+
end
|
|
160
|
+
|
|
161
|
+
def jobs_redirect_params(params, default_state: "failed")
|
|
162
|
+
{
|
|
163
|
+
state: params[:state].presence || default_state,
|
|
164
|
+
queue_name: params[:queue_name].presence,
|
|
165
|
+
per_page: params[:per_page].presence,
|
|
166
|
+
page: params[:page].presence
|
|
167
|
+
}.compact
|
|
168
|
+
end
|
|
169
|
+
|
|
170
|
+
def scoped_jobs(relation = ::SolidQueue::Job.all)
|
|
171
|
+
SolidQueueLite.apply_tenant_scope(relation)
|
|
172
|
+
end
|
|
173
|
+
|
|
174
|
+
def filtered_jobs_relation(state:, queue_name: nil)
|
|
175
|
+
relation = scoped_jobs(::SolidQueue::Job.all)
|
|
176
|
+
relation = relation.where(queue_name: queue_name) if queue_name.present?
|
|
177
|
+
|
|
178
|
+
case state
|
|
179
|
+
when :ready
|
|
180
|
+
relation.joins(:ready_execution)
|
|
181
|
+
when :claimed
|
|
182
|
+
relation.joins(:claimed_execution)
|
|
183
|
+
when :scheduled
|
|
184
|
+
relation.joins(:scheduled_execution)
|
|
185
|
+
when :failed
|
|
186
|
+
relation.joins(:failed_execution)
|
|
187
|
+
when :recurring
|
|
188
|
+
relation.joins(:recurring_execution)
|
|
189
|
+
else
|
|
190
|
+
raise ::ActionController::BadRequest, "Unsupported state filter: #{state}"
|
|
191
|
+
end
|
|
192
|
+
end
|
|
193
|
+
|
|
194
|
+
def serialize_failed_execution(failed_execution)
|
|
195
|
+
return unless failed_execution
|
|
196
|
+
|
|
197
|
+
{
|
|
198
|
+
id: failed_execution.id,
|
|
199
|
+
message: failed_execution.message,
|
|
200
|
+
exception_class: failed_execution.exception_class,
|
|
201
|
+
backtrace: failed_execution.backtrace,
|
|
202
|
+
created_at: failed_execution.created_at
|
|
203
|
+
}
|
|
204
|
+
end
|
|
205
|
+
|
|
206
|
+
def serialize_recurring_execution(recurring_execution)
|
|
207
|
+
return unless recurring_execution
|
|
208
|
+
|
|
209
|
+
{
|
|
210
|
+
id: recurring_execution.id,
|
|
211
|
+
task_key: recurring_execution.task_key,
|
|
212
|
+
run_at: recurring_execution.run_at,
|
|
213
|
+
created_at: recurring_execution.created_at
|
|
214
|
+
}
|
|
215
|
+
end
|
|
216
|
+
|
|
217
|
+
def estimated_total_pages(approximate_total, per_page)
|
|
218
|
+
pages = (approximate_total.to_f / per_page).ceil
|
|
219
|
+
pages = 1 if pages.zero?
|
|
220
|
+
[ pages, MAX_PAGES ].min
|
|
221
|
+
end
|
|
222
|
+
|
|
223
|
+
def bulk_discard_execution_class(state_key)
|
|
224
|
+
case resolve_state(state_key)
|
|
225
|
+
when :ready
|
|
226
|
+
::SolidQueue::ReadyExecution
|
|
227
|
+
when :scheduled
|
|
228
|
+
::SolidQueue::ScheduledExecution
|
|
229
|
+
when :failed
|
|
230
|
+
::SolidQueue::FailedExecution
|
|
231
|
+
else
|
|
232
|
+
raise ::ActionController::BadRequest, "Bulk discard is supported only for ready, scheduled, and failed jobs"
|
|
233
|
+
end
|
|
234
|
+
end
|
|
235
|
+
end
|
|
236
|
+
end
|
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
module SolidQueueLite
|
|
2
|
+
module Processes
|
|
3
|
+
WILDCARD_QUEUE_NAME = "*"
|
|
4
|
+
|
|
5
|
+
module_function
|
|
6
|
+
|
|
7
|
+
def index_data
|
|
8
|
+
stale_cutoff = ::SolidQueue.process_alive_threshold.ago
|
|
9
|
+
dead_cutoff = (::SolidQueue.process_alive_threshold * 2).ago
|
|
10
|
+
available_queue_names = queue_names
|
|
11
|
+
processes = ::SolidQueue::Process
|
|
12
|
+
.order(:kind, last_heartbeat_at: :desc)
|
|
13
|
+
.pluck(:id, :kind, :name, :pid, :hostname, :last_heartbeat_at, :supervisor_id, :metadata)
|
|
14
|
+
.map do |id, kind, name, pid, hostname, last_heartbeat_at, supervisor_id, metadata|
|
|
15
|
+
status = process_status(last_heartbeat_at, stale_cutoff: stale_cutoff, dead_cutoff: dead_cutoff)
|
|
16
|
+
|
|
17
|
+
{
|
|
18
|
+
id: id,
|
|
19
|
+
kind: kind,
|
|
20
|
+
name: name,
|
|
21
|
+
pid: pid,
|
|
22
|
+
hostname: hostname,
|
|
23
|
+
last_heartbeat_at: last_heartbeat_at,
|
|
24
|
+
supervisor_id: supervisor_id,
|
|
25
|
+
stale: status != "active",
|
|
26
|
+
status: status,
|
|
27
|
+
metadata: metadata || {},
|
|
28
|
+
queue_names: queue_names_from_metadata(metadata, fallback_queue_names: available_queue_names)
|
|
29
|
+
}
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
{
|
|
33
|
+
processes: processes,
|
|
34
|
+
heartbeat: {
|
|
35
|
+
stale_after_seconds: ::SolidQueue.process_alive_threshold,
|
|
36
|
+
stale_cutoff: stale_cutoff,
|
|
37
|
+
dead_cutoff: dead_cutoff
|
|
38
|
+
},
|
|
39
|
+
queues: queue_rows(queue_names: available_queue_names)
|
|
40
|
+
}
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
def prune!
|
|
44
|
+
prunable_before = ::SolidQueue::Process.prunable.where(kind: [ "Worker", "Dispatcher", "Supervisor", "Supervisor(fork)", "Scheduler" ]).count
|
|
45
|
+
::SolidQueue::Process.prune
|
|
46
|
+
prunable_before
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
def pause_queue!(queue_name)
|
|
50
|
+
queue(queue_name).pause
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
def resume_queue!(queue_name)
|
|
54
|
+
queue(queue_name).resume
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
def clear_queue!(queue_name)
|
|
58
|
+
queue(queue_name).clear
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
def queue_rows(queue_names: self.queue_names)
|
|
62
|
+
queue_names.map do |queue_name|
|
|
63
|
+
queue_record = queue(queue_name)
|
|
64
|
+
|
|
65
|
+
{
|
|
66
|
+
name: queue_name,
|
|
67
|
+
paused: queue_record.paused?,
|
|
68
|
+
ready_estimate: exact_queue_count(queue_name, :ready),
|
|
69
|
+
in_progress_count: exact_queue_count(queue_name, :claimed),
|
|
70
|
+
failed_count: exact_queue_count(queue_name, :failed),
|
|
71
|
+
scheduled_count: exact_queue_count(queue_name, :scheduled),
|
|
72
|
+
recurring_count: exact_queue_count(queue_name, :recurring),
|
|
73
|
+
total_jobs_count: exact_total_jobs_count(queue_name),
|
|
74
|
+
latency_seconds: queue_record.latency,
|
|
75
|
+
human_latency: queue_record.human_latency
|
|
76
|
+
}
|
|
77
|
+
end
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
def queue(queue_name)
|
|
81
|
+
::SolidQueue::Queue.find_by_name(queue_name)
|
|
82
|
+
end
|
|
83
|
+
|
|
84
|
+
def exact_queue_count(queue_name, state)
|
|
85
|
+
relation = SolidQueueLite::Jobs.filtered_jobs_relation(state: state, queue_name: queue_name)
|
|
86
|
+
relation.except(:select, :order).count
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
def exact_total_jobs_count(queue_name)
|
|
90
|
+
SolidQueueLite::Jobs.scoped_jobs(::SolidQueue::Job.where(queue_name: queue_name)).count
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
def process_status(last_heartbeat_at, stale_cutoff:, dead_cutoff:)
|
|
94
|
+
return "dead" unless last_heartbeat_at
|
|
95
|
+
return "dead" if last_heartbeat_at <= dead_cutoff
|
|
96
|
+
return "stale" if last_heartbeat_at <= stale_cutoff
|
|
97
|
+
|
|
98
|
+
"active"
|
|
99
|
+
end
|
|
100
|
+
|
|
101
|
+
def queue_names_from_metadata(metadata, fallback_queue_names: [])
|
|
102
|
+
return [] unless metadata.is_a?(Hash)
|
|
103
|
+
|
|
104
|
+
value = metadata["queues"] || metadata[:queues] || metadata["queue_names"] || metadata[:queue_names]
|
|
105
|
+
queue_names = extract_queue_names(value)
|
|
106
|
+
|
|
107
|
+
if queue_names.include?(WILDCARD_QUEUE_NAME)
|
|
108
|
+
(queue_names - [ WILDCARD_QUEUE_NAME ] + fallback_queue_names).uniq
|
|
109
|
+
else
|
|
110
|
+
queue_names
|
|
111
|
+
end
|
|
112
|
+
end
|
|
113
|
+
|
|
114
|
+
def queue_names
|
|
115
|
+
(
|
|
116
|
+
::SolidQueue::Queue.all.map(&:name) +
|
|
117
|
+
configured_queue_names +
|
|
118
|
+
configured_process_queue_names
|
|
119
|
+
).reject { |queue_name| wildcard_queue_name?(queue_name) }.uniq.sort
|
|
120
|
+
end
|
|
121
|
+
|
|
122
|
+
def configured_queue_names
|
|
123
|
+
::SolidQueue::Configuration.new.configured_processes.filter_map do |configured_process|
|
|
124
|
+
next unless configured_process.kind.to_sym == :worker
|
|
125
|
+
|
|
126
|
+
configured_process.attributes[:queues]
|
|
127
|
+
end.flat_map { |value| extract_queue_names(value) }
|
|
128
|
+
rescue StandardError
|
|
129
|
+
[]
|
|
130
|
+
end
|
|
131
|
+
|
|
132
|
+
def configured_process_queue_names
|
|
133
|
+
::SolidQueue::Process.pluck(:metadata).flat_map do |metadata|
|
|
134
|
+
next [] unless metadata.is_a?(Hash)
|
|
135
|
+
|
|
136
|
+
value = metadata["queues"] || metadata[:queues] || metadata["queue_names"] || metadata[:queue_names]
|
|
137
|
+
extract_queue_names(value)
|
|
138
|
+
end
|
|
139
|
+
end
|
|
140
|
+
|
|
141
|
+
def extract_queue_names(value)
|
|
142
|
+
case value
|
|
143
|
+
when String
|
|
144
|
+
value.split(/[\s,]+/).map(&:presence).compact
|
|
145
|
+
when Array
|
|
146
|
+
value.flat_map { |entry| extract_queue_names(entry) }
|
|
147
|
+
else
|
|
148
|
+
[]
|
|
149
|
+
end
|
|
150
|
+
end
|
|
151
|
+
|
|
152
|
+
def wildcard_queue_name?(queue_name)
|
|
153
|
+
queue_name == WILDCARD_QUEUE_NAME
|
|
154
|
+
end
|
|
155
|
+
end
|
|
156
|
+
end
|