prometheus_exporter 0.8.0 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/ci.yml +36 -25
- data/CHANGELOG +51 -26
- data/README.md +151 -52
- data/bin/prometheus_exporter +19 -6
- data/lib/prometheus_exporter/client.rb +13 -6
- data/lib/prometheus_exporter/instrumentation/active_record.rb +5 -5
- data/lib/prometheus_exporter/instrumentation/process.rb +1 -1
- data/lib/prometheus_exporter/instrumentation/puma.rb +12 -12
- data/lib/prometheus_exporter/instrumentation/resque.rb +7 -7
- data/lib/prometheus_exporter/instrumentation/sidekiq.rb +27 -7
- data/lib/prometheus_exporter/instrumentation/sidekiq_process.rb +58 -0
- data/lib/prometheus_exporter/instrumentation/sidekiq_queue.rb +27 -13
- data/lib/prometheus_exporter/instrumentation/sidekiq_stats.rb +43 -0
- data/lib/prometheus_exporter/instrumentation/unicorn.rb +4 -4
- data/lib/prometheus_exporter/instrumentation.rb +2 -0
- data/lib/prometheus_exporter/metric/base.rb +11 -9
- data/lib/prometheus_exporter/metric/gauge.rb +4 -0
- data/lib/prometheus_exporter/metric/histogram.rb +13 -1
- data/lib/prometheus_exporter/middleware.rb +8 -4
- data/lib/prometheus_exporter/server/collector.rb +2 -0
- data/lib/prometheus_exporter/server/delayed_job_collector.rb +17 -18
- data/lib/prometheus_exporter/server/puma_collector.rb +7 -7
- data/lib/prometheus_exporter/server/resque_collector.rb +6 -6
- data/lib/prometheus_exporter/server/runner.rb +11 -2
- data/lib/prometheus_exporter/server/sidekiq_collector.rb +1 -1
- data/lib/prometheus_exporter/server/sidekiq_process_collector.rb +46 -0
- data/lib/prometheus_exporter/server/sidekiq_queue_collector.rb +1 -1
- data/lib/prometheus_exporter/server/sidekiq_stats_collector.rb +46 -0
- data/lib/prometheus_exporter/server/unicorn_collector.rb +3 -3
- data/lib/prometheus_exporter/server/web_collector.rb +17 -17
- data/lib/prometheus_exporter/server/web_server.rb +6 -8
- data/lib/prometheus_exporter/server.rb +2 -0
- data/lib/prometheus_exporter/version.rb +1 -1
- data/prometheus_exporter.gemspec +1 -1
- metadata +10 -6
@@ -2,6 +2,7 @@
|
|
2
2
|
|
3
3
|
require 'socket'
|
4
4
|
require 'thread'
|
5
|
+
require 'logger'
|
5
6
|
|
6
7
|
module PrometheusExporter
|
7
8
|
class Client
|
@@ -53,14 +54,20 @@ module PrometheusExporter
|
|
53
54
|
MAX_SOCKET_AGE = 25
|
54
55
|
MAX_QUEUE_SIZE = 10_000
|
55
56
|
|
57
|
+
attr_reader :logger
|
58
|
+
|
56
59
|
def initialize(
|
57
60
|
host: ENV.fetch('PROMETHEUS_EXPORTER_HOST', 'localhost'),
|
58
61
|
port: ENV.fetch('PROMETHEUS_EXPORTER_PORT', PrometheusExporter::DEFAULT_PORT),
|
59
62
|
max_queue_size: nil,
|
60
63
|
thread_sleep: 0.5,
|
61
64
|
json_serializer: nil,
|
62
|
-
custom_labels: nil
|
65
|
+
custom_labels: nil,
|
66
|
+
logger: Logger.new(STDERR),
|
67
|
+
log_level: Logger::WARN
|
63
68
|
)
|
69
|
+
@logger = logger
|
70
|
+
@logger.level = log_level
|
64
71
|
@metrics = []
|
65
72
|
|
66
73
|
@queue = Queue.new
|
@@ -72,7 +79,7 @@ module PrometheusExporter
|
|
72
79
|
max_queue_size ||= MAX_QUEUE_SIZE
|
73
80
|
max_queue_size = max_queue_size.to_i
|
74
81
|
|
75
|
-
if max_queue_size
|
82
|
+
if max_queue_size <= 0
|
76
83
|
raise ArgumentError, "max_queue_size must be larger than 0"
|
77
84
|
end
|
78
85
|
|
@@ -125,7 +132,7 @@ module PrometheusExporter
|
|
125
132
|
def send(str)
|
126
133
|
@queue << str
|
127
134
|
if @queue.length > @max_queue_size
|
128
|
-
|
135
|
+
logger.warn "Prometheus Exporter client is dropping message cause queue is full"
|
129
136
|
@queue.pop
|
130
137
|
end
|
131
138
|
|
@@ -143,7 +150,7 @@ module PrometheusExporter
|
|
143
150
|
@socket.write(message)
|
144
151
|
@socket.write("\r\n")
|
145
152
|
rescue => e
|
146
|
-
|
153
|
+
logger.warn "Prometheus Exporter is dropping a message: #{e}"
|
147
154
|
@socket = nil
|
148
155
|
raise
|
149
156
|
end
|
@@ -168,7 +175,7 @@ module PrometheusExporter
|
|
168
175
|
close_socket_if_old!
|
169
176
|
process_queue
|
170
177
|
rescue => e
|
171
|
-
|
178
|
+
logger.error "Prometheus Exporter, failed to send message #{e}"
|
172
179
|
end
|
173
180
|
|
174
181
|
def ensure_worker_thread!
|
@@ -186,7 +193,7 @@ module PrometheusExporter
|
|
186
193
|
end
|
187
194
|
rescue ThreadError => e
|
188
195
|
raise unless e.message =~ /can't alloc thread/
|
189
|
-
|
196
|
+
logger.error "Prometheus Exporter, failed to send message ThreadError #{e}"
|
190
197
|
end
|
191
198
|
|
192
199
|
def close_socket!
|
@@ -7,9 +7,11 @@ module PrometheusExporter::Instrumentation
|
|
7
7
|
|
8
8
|
def self.start(client: nil, frequency: 30, custom_labels: {}, config_labels: [])
|
9
9
|
|
10
|
-
|
10
|
+
client ||= PrometheusExporter::Client.default
|
11
|
+
|
12
|
+
# Not all rails versions support connection pool stats
|
11
13
|
unless ::ActiveRecord::Base.connection_pool.respond_to?(:stat)
|
12
|
-
|
14
|
+
client.logger.error("ActiveRecord connection pool stats not supported in your rails version")
|
13
15
|
return
|
14
16
|
end
|
15
17
|
|
@@ -18,8 +20,6 @@ module PrometheusExporter::Instrumentation
|
|
18
20
|
|
19
21
|
active_record_collector = new(custom_labels, config_labels)
|
20
22
|
|
21
|
-
client ||= PrometheusExporter::Client.default
|
22
|
-
|
23
23
|
stop if @thread
|
24
24
|
|
25
25
|
@thread = Thread.new do
|
@@ -28,7 +28,7 @@ module PrometheusExporter::Instrumentation
|
|
28
28
|
metrics = active_record_collector.collect
|
29
29
|
metrics.each { |metric| client.send_json metric }
|
30
30
|
rescue => e
|
31
|
-
|
31
|
+
client.logger.error("Prometheus Exporter Failed To Collect Process Stats #{e}")
|
32
32
|
ensure
|
33
33
|
sleep frequency
|
34
34
|
end
|
@@ -27,7 +27,7 @@ module PrometheusExporter::Instrumentation
|
|
27
27
|
metric = process_collector.collect
|
28
28
|
client.send_json metric
|
29
29
|
rescue => e
|
30
|
-
|
30
|
+
client.logger.error("Prometheus Exporter Failed To Collect Process Stats #{e}")
|
31
31
|
ensure
|
32
32
|
sleep frequency
|
33
33
|
end
|
@@ -14,7 +14,7 @@ module PrometheusExporter::Instrumentation
|
|
14
14
|
metric = puma_collector.collect
|
15
15
|
client.send_json metric
|
16
16
|
rescue => e
|
17
|
-
|
17
|
+
client.logger.error("Prometheus Exporter Failed To Collect Puma Stats #{e}")
|
18
18
|
ensure
|
19
19
|
sleep frequency
|
20
20
|
end
|
@@ -46,9 +46,9 @@ module PrometheusExporter::Instrumentation
|
|
46
46
|
|
47
47
|
if stats.key?("workers")
|
48
48
|
metric[:phase] = stats["phase"]
|
49
|
-
metric[:
|
50
|
-
metric[:
|
51
|
-
metric[:
|
49
|
+
metric[:workers] = stats["workers"]
|
50
|
+
metric[:booted_workers] = stats["booted_workers"]
|
51
|
+
metric[:old_workers] = stats["old_workers"]
|
52
52
|
|
53
53
|
stats["worker_status"].each do |worker|
|
54
54
|
next if worker["last_status"].empty?
|
@@ -62,15 +62,15 @@ module PrometheusExporter::Instrumentation
|
|
62
62
|
private
|
63
63
|
|
64
64
|
def collect_worker_status(metric, status)
|
65
|
-
metric[:
|
66
|
-
metric[:
|
67
|
-
metric[:
|
68
|
-
metric[:
|
65
|
+
metric[:request_backlog] ||= 0
|
66
|
+
metric[:running_threads] ||= 0
|
67
|
+
metric[:thread_pool_capacity] ||= 0
|
68
|
+
metric[:max_threads] ||= 0
|
69
69
|
|
70
|
-
metric[:
|
71
|
-
metric[:
|
72
|
-
metric[:
|
73
|
-
metric[:
|
70
|
+
metric[:request_backlog] += status["backlog"]
|
71
|
+
metric[:running_threads] += status["running"]
|
72
|
+
metric[:thread_pool_capacity] += status["pool_capacity"]
|
73
|
+
metric[:max_threads] += status["max_threads"]
|
74
74
|
end
|
75
75
|
end
|
76
76
|
end
|
@@ -11,7 +11,7 @@ module PrometheusExporter::Instrumentation
|
|
11
11
|
begin
|
12
12
|
client.send_json(resque_collector.collect)
|
13
13
|
rescue => e
|
14
|
-
|
14
|
+
client.logger.error("Prometheus Exporter Failed To Collect Resque Stats #{e}")
|
15
15
|
ensure
|
16
16
|
sleep frequency
|
17
17
|
end
|
@@ -29,12 +29,12 @@ module PrometheusExporter::Instrumentation
|
|
29
29
|
def collect_resque_stats(metric)
|
30
30
|
info = ::Resque.info
|
31
31
|
|
32
|
-
metric[:
|
33
|
-
metric[:
|
34
|
-
metric[:
|
35
|
-
metric[:
|
36
|
-
metric[:
|
37
|
-
metric[:
|
32
|
+
metric[:processed_jobs] = info[:processed]
|
33
|
+
metric[:failed_jobs] = info[:failed]
|
34
|
+
metric[:pending_jobs] = info[:pending]
|
35
|
+
metric[:queues] = info[:queues]
|
36
|
+
metric[:worker] = info[:workers]
|
37
|
+
metric[:working] = info[:working]
|
38
38
|
end
|
39
39
|
end
|
40
40
|
end
|
@@ -15,16 +15,24 @@ module PrometheusExporter::Instrumentation
|
|
15
15
|
-> (job, ex) do
|
16
16
|
job_is_fire_and_forget = job["retry"] == false
|
17
17
|
|
18
|
+
worker_class = Object.const_get(job["class"])
|
19
|
+
worker_custom_labels = self.get_worker_custom_labels(worker_class)
|
20
|
+
|
18
21
|
unless job_is_fire_and_forget
|
19
22
|
PrometheusExporter::Client.default.send_json(
|
20
23
|
type: "sidekiq",
|
21
24
|
name: job["class"],
|
22
25
|
dead: true,
|
26
|
+
custom_labels: worker_custom_labels
|
23
27
|
)
|
24
28
|
end
|
25
29
|
end
|
26
30
|
end
|
27
31
|
|
32
|
+
def self.get_worker_custom_labels(worker_class)
|
33
|
+
worker_class.respond_to?(:custom_labels) ? worker_class.custom_labels : {}
|
34
|
+
end
|
35
|
+
|
28
36
|
def initialize(client: nil)
|
29
37
|
@client = client || PrometheusExporter::Client.default
|
30
38
|
end
|
@@ -47,7 +55,8 @@ module PrometheusExporter::Instrumentation
|
|
47
55
|
queue: queue,
|
48
56
|
success: success,
|
49
57
|
shutdown: shutdown,
|
50
|
-
duration: duration
|
58
|
+
duration: duration,
|
59
|
+
custom_labels: self.class.get_worker_custom_labels(worker.class)
|
51
60
|
)
|
52
61
|
end
|
53
62
|
|
@@ -69,19 +78,30 @@ module PrometheusExporter::Instrumentation
|
|
69
78
|
end
|
70
79
|
|
71
80
|
def get_delayed_name(msg, class_name)
|
72
|
-
# fallback to class_name since we're relying on the internal implementation
|
73
|
-
# of the delayed extensions
|
74
|
-
# https://github.com/mperham/sidekiq/blob/master/lib/sidekiq/extensions/class_methods.rb
|
75
81
|
begin
|
76
|
-
|
82
|
+
# fallback to class_name since we're relying on the internal implementation
|
83
|
+
# of the delayed extensions
|
84
|
+
# https://github.com/mperham/sidekiq/blob/master/lib/sidekiq/extensions/class_methods.rb
|
85
|
+
(target, method_name, _args) = YAML.load(msg['args'].first) # rubocop:disable Security/YAMLLoad
|
77
86
|
if target.class == Class
|
78
87
|
"#{target.name}##{method_name}"
|
79
88
|
else
|
80
89
|
"#{target.class.name}##{method_name}"
|
81
90
|
end
|
82
|
-
rescue
|
83
|
-
|
91
|
+
rescue Psych::DisallowedClass, ArgumentError
|
92
|
+
parsed = Psych.parse(msg['args'].first)
|
93
|
+
children = parsed.root.children
|
94
|
+
target = (children[0].value || children[0].tag).sub('!', '')
|
95
|
+
method_name = (children[1].value || children[1].tag).sub(':', '')
|
96
|
+
|
97
|
+
if target && method_name
|
98
|
+
"#{target}##{method_name}"
|
99
|
+
else
|
100
|
+
class_name
|
101
|
+
end
|
84
102
|
end
|
103
|
+
rescue
|
104
|
+
class_name
|
85
105
|
end
|
86
106
|
end
|
87
107
|
end
|
@@ -0,0 +1,58 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module PrometheusExporter::Instrumentation
|
4
|
+
class SidekiqProcess
|
5
|
+
def self.start(client: nil, frequency: 30)
|
6
|
+
client ||= PrometheusExporter::Client.default
|
7
|
+
sidekiq_process_collector = new
|
8
|
+
|
9
|
+
Thread.new do
|
10
|
+
loop do
|
11
|
+
begin
|
12
|
+
client.send_json(sidekiq_process_collector.collect)
|
13
|
+
rescue StandardError => e
|
14
|
+
STDERR.puts("Prometheus Exporter Failed To Collect Sidekiq Processes metrics #{e}")
|
15
|
+
ensure
|
16
|
+
sleep frequency
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
def initialize
|
23
|
+
@pid = ::Process.pid
|
24
|
+
@hostname = Socket.gethostname
|
25
|
+
end
|
26
|
+
|
27
|
+
def collect
|
28
|
+
{
|
29
|
+
type: 'sidekiq_process',
|
30
|
+
process: collect_stats
|
31
|
+
}
|
32
|
+
end
|
33
|
+
|
34
|
+
def collect_stats
|
35
|
+
process = current_process
|
36
|
+
return {} unless process
|
37
|
+
|
38
|
+
{
|
39
|
+
busy: process['busy'],
|
40
|
+
concurrency: process['concurrency'],
|
41
|
+
labels: {
|
42
|
+
labels: process['labels'].sort.join(','),
|
43
|
+
queues: process['queues'].sort.join(','),
|
44
|
+
quiet: process['quiet'],
|
45
|
+
tag: process['tag'],
|
46
|
+
hostname: process['hostname'],
|
47
|
+
identity: process['identity'],
|
48
|
+
}
|
49
|
+
}
|
50
|
+
end
|
51
|
+
|
52
|
+
def current_process
|
53
|
+
::Sidekiq::ProcessSet.new.find do |sp|
|
54
|
+
sp['hostname'] == @hostname && sp['pid'] == @pid
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
@@ -2,16 +2,16 @@
|
|
2
2
|
|
3
3
|
module PrometheusExporter::Instrumentation
|
4
4
|
class SidekiqQueue
|
5
|
-
def self.start(client: nil, frequency: 30)
|
5
|
+
def self.start(client: nil, frequency: 30, all_queues: false)
|
6
6
|
client ||= PrometheusExporter::Client.default
|
7
|
-
sidekiq_queue_collector = new
|
7
|
+
sidekiq_queue_collector = new(all_queues: all_queues)
|
8
8
|
|
9
9
|
Thread.new do
|
10
10
|
loop do
|
11
11
|
begin
|
12
12
|
client.send_json(sidekiq_queue_collector.collect)
|
13
13
|
rescue StandardError => e
|
14
|
-
|
14
|
+
client.logger.error("Prometheus Exporter Failed To Collect Sidekiq Queue metrics #{e}")
|
15
15
|
ensure
|
16
16
|
sleep frequency
|
17
17
|
end
|
@@ -19,6 +19,12 @@ module PrometheusExporter::Instrumentation
|
|
19
19
|
end
|
20
20
|
end
|
21
21
|
|
22
|
+
def initialize(all_queues: false)
|
23
|
+
@all_queues = all_queues
|
24
|
+
@pid = ::Process.pid
|
25
|
+
@hostname = Socket.gethostname
|
26
|
+
end
|
27
|
+
|
22
28
|
def collect
|
23
29
|
{
|
24
30
|
type: 'sidekiq_queue',
|
@@ -27,24 +33,32 @@ module PrometheusExporter::Instrumentation
|
|
27
33
|
end
|
28
34
|
|
29
35
|
def collect_queue_stats
|
30
|
-
|
31
|
-
pid = ::Process.pid
|
32
|
-
ps = ::Sidekiq::ProcessSet.new
|
36
|
+
sidekiq_queues = ::Sidekiq::Queue.all
|
33
37
|
|
34
|
-
|
35
|
-
|
38
|
+
unless @all_queues
|
39
|
+
queues = collect_current_process_queues
|
40
|
+
sidekiq_queues.select! { |sidekiq_queue| queues.include?(sidekiq_queue.name) }
|
36
41
|
end
|
37
42
|
|
38
|
-
|
39
|
-
|
40
|
-
::Sidekiq::Queue.all.map do |queue|
|
41
|
-
next unless queues.include? queue.name
|
43
|
+
sidekiq_queues.map do |queue|
|
42
44
|
{
|
43
|
-
|
45
|
+
backlog: queue.size,
|
44
46
|
latency_seconds: queue.latency.to_i,
|
45
47
|
labels: { queue: queue.name }
|
46
48
|
}
|
47
49
|
end.compact
|
48
50
|
end
|
51
|
+
|
52
|
+
private
|
53
|
+
|
54
|
+
def collect_current_process_queues
|
55
|
+
ps = ::Sidekiq::ProcessSet.new
|
56
|
+
|
57
|
+
process = ps.find do |sp|
|
58
|
+
sp['hostname'] == @hostname && sp['pid'] == @pid
|
59
|
+
end
|
60
|
+
|
61
|
+
process.nil? ? [] : process['queues']
|
62
|
+
end
|
49
63
|
end
|
50
64
|
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module PrometheusExporter::Instrumentation
|
4
|
+
class SidekiqStats
|
5
|
+
def self.start(client: nil, frequency: 30)
|
6
|
+
client ||= PrometheusExporter::Client.default
|
7
|
+
sidekiq_stats_collector = new
|
8
|
+
|
9
|
+
Thread.new do
|
10
|
+
loop do
|
11
|
+
begin
|
12
|
+
client.send_json(sidekiq_stats_collector.collect)
|
13
|
+
rescue StandardError => e
|
14
|
+
STDERR.puts("Prometheus Exporter Failed To Collect Sidekiq Stats metrics #{e}")
|
15
|
+
ensure
|
16
|
+
sleep frequency
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
def collect
|
23
|
+
{
|
24
|
+
type: 'sidekiq_stats',
|
25
|
+
stats: collect_stats
|
26
|
+
}
|
27
|
+
end
|
28
|
+
|
29
|
+
def collect_stats
|
30
|
+
stats = ::Sidekiq::Stats.new
|
31
|
+
{
|
32
|
+
'dead_size' => stats.dead_size,
|
33
|
+
'enqueued' => stats.enqueued,
|
34
|
+
'failed' => stats.failed,
|
35
|
+
'processed' => stats.processed,
|
36
|
+
'processes_size' => stats.processes_size,
|
37
|
+
'retry_size' => stats.retry_size,
|
38
|
+
'scheduled_size' => stats.scheduled_size,
|
39
|
+
'workers_size' => stats.workers_size,
|
40
|
+
}
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
@@ -18,7 +18,7 @@ module PrometheusExporter::Instrumentation
|
|
18
18
|
metric = unicorn_collector.collect
|
19
19
|
client.send_json metric
|
20
20
|
rescue StandardError => e
|
21
|
-
|
21
|
+
client.logger.error("Prometheus Exporter Failed To Collect Unicorn Stats #{e}")
|
22
22
|
ensure
|
23
23
|
sleep frequency
|
24
24
|
end
|
@@ -42,9 +42,9 @@ module PrometheusExporter::Instrumentation
|
|
42
42
|
def collect_unicorn_stats(metric)
|
43
43
|
stats = listener_address_stats
|
44
44
|
|
45
|
-
metric[:
|
46
|
-
metric[:
|
47
|
-
metric[:
|
45
|
+
metric[:active_workers] = stats.active
|
46
|
+
metric[:request_backlog] = stats.queued
|
47
|
+
metric[:workers] = worker_process_count
|
48
48
|
end
|
49
49
|
|
50
50
|
private
|
@@ -5,6 +5,8 @@ require_relative "instrumentation/process"
|
|
5
5
|
require_relative "instrumentation/method_profiler"
|
6
6
|
require_relative "instrumentation/sidekiq"
|
7
7
|
require_relative "instrumentation/sidekiq_queue"
|
8
|
+
require_relative "instrumentation/sidekiq_process"
|
9
|
+
require_relative "instrumentation/sidekiq_stats"
|
8
10
|
require_relative "instrumentation/delayed_job"
|
9
11
|
require_relative "instrumentation/puma"
|
10
12
|
require_relative "instrumentation/hutch"
|
@@ -5,6 +5,7 @@ module PrometheusExporter::Metric
|
|
5
5
|
|
6
6
|
@default_prefix = nil if !defined?(@default_prefix)
|
7
7
|
@default_labels = nil if !defined?(@default_labels)
|
8
|
+
@default_aggregation = nil if !defined?(@default_aggregation)
|
8
9
|
|
9
10
|
# prefix applied to all metrics
|
10
11
|
def self.default_prefix=(name)
|
@@ -23,6 +24,14 @@ module PrometheusExporter::Metric
|
|
23
24
|
@default_labels || {}
|
24
25
|
end
|
25
26
|
|
27
|
+
def self.default_aggregation=(aggregation)
|
28
|
+
@default_aggregation = aggregation
|
29
|
+
end
|
30
|
+
|
31
|
+
def self.default_aggregation
|
32
|
+
@default_aggregation ||= Summary
|
33
|
+
end
|
34
|
+
|
26
35
|
attr_accessor :help, :name, :data
|
27
36
|
|
28
37
|
def initialize(name, help)
|
@@ -97,15 +106,8 @@ module PrometheusExporter::Metric
|
|
97
106
|
end
|
98
107
|
end
|
99
108
|
|
100
|
-
|
101
|
-
|
102
|
-
def needs_escape?(str)
|
103
|
-
str.match?(/[\n"\\]/m)
|
104
|
-
end
|
105
|
-
else
|
106
|
-
def needs_escape?(str)
|
107
|
-
!!str.match(/[\n"\\]/m)
|
108
|
-
end
|
109
|
+
def needs_escape?(str)
|
110
|
+
str.match?(/[\n"\\]/m)
|
109
111
|
end
|
110
112
|
|
111
113
|
end
|
@@ -5,9 +5,21 @@ module PrometheusExporter::Metric
|
|
5
5
|
|
6
6
|
DEFAULT_BUCKETS = [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5.0, 10.0].freeze
|
7
7
|
|
8
|
+
@default_buckets = nil if !defined?(@default_buckets)
|
9
|
+
|
10
|
+
def self.default_buckets
|
11
|
+
@default_buckets || DEFAULT_BUCKETS
|
12
|
+
end
|
13
|
+
|
14
|
+
def self.default_buckets=(buckets)
|
15
|
+
@default_buckets = buckets
|
16
|
+
end
|
17
|
+
|
18
|
+
attr_reader :buckets
|
19
|
+
|
8
20
|
def initialize(name, help, opts = {})
|
9
21
|
super(name, help)
|
10
|
-
@buckets = (opts[:buckets] ||
|
22
|
+
@buckets = (opts[:buckets] || self.class.default_buckets).sort.reverse
|
11
23
|
reset!
|
12
24
|
end
|
13
25
|
|
@@ -36,11 +36,12 @@ class PrometheusExporter::Middleware
|
|
36
36
|
|
37
37
|
result
|
38
38
|
ensure
|
39
|
-
|
39
|
+
status = (result && result[0]) || -1
|
40
40
|
obj = {
|
41
41
|
type: "web",
|
42
42
|
timings: info,
|
43
43
|
queue_time: queue_time,
|
44
|
+
status: status,
|
44
45
|
default_labels: default_labels(env, result)
|
45
46
|
}
|
46
47
|
labels = custom_labels(env)
|
@@ -52,18 +53,21 @@ class PrometheusExporter::Middleware
|
|
52
53
|
end
|
53
54
|
|
54
55
|
def default_labels(env, result)
|
55
|
-
status = (result && result[0]) || -1
|
56
56
|
params = env["action_dispatch.request.parameters"]
|
57
57
|
action = controller = nil
|
58
58
|
if params
|
59
59
|
action = params["action"]
|
60
60
|
controller = params["controller"]
|
61
|
+
elsif (cors = env["rack.cors"]) && cors.respond_to?(:preflight?) && cors.preflight?
|
62
|
+
# if the Rack CORS Middleware identifies the request as a preflight request,
|
63
|
+
# the stack doesn't get to the point where controllers/actions are defined
|
64
|
+
action = "preflight"
|
65
|
+
controller = "preflight"
|
61
66
|
end
|
62
67
|
|
63
68
|
{
|
64
69
|
action: action || "other",
|
65
|
-
controller: controller || "other"
|
66
|
-
status: status
|
70
|
+
controller: controller || "other"
|
67
71
|
}
|
68
72
|
end
|
69
73
|
|
@@ -14,6 +14,8 @@ module PrometheusExporter::Server
|
|
14
14
|
register_collector(ProcessCollector.new)
|
15
15
|
register_collector(SidekiqCollector.new)
|
16
16
|
register_collector(SidekiqQueueCollector.new)
|
17
|
+
register_collector(SidekiqProcessCollector.new)
|
18
|
+
register_collector(SidekiqStatsCollector.new)
|
17
19
|
register_collector(DelayedJobCollector.new)
|
18
20
|
register_collector(PumaCollector.new)
|
19
21
|
register_collector(HutchCollector.new)
|
@@ -19,22 +19,21 @@ module PrometheusExporter::Server
|
|
19
19
|
end
|
20
20
|
|
21
21
|
def collect(obj)
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
labels = custom_labels.nil? ? default_labels : default_labels.merge(custom_labels)
|
22
|
+
custom_labels = obj['custom_labels'] || {}
|
23
|
+
gauge_labels = { queue_name: obj['queue_name'] }.merge(custom_labels)
|
24
|
+
counter_labels = gauge_labels.merge(job_name: obj['name'])
|
26
25
|
|
27
26
|
ensure_delayed_job_metrics
|
28
|
-
@delayed_job_duration_seconds.observe(obj["duration"],
|
29
|
-
@delayed_jobs_total.observe(1,
|
30
|
-
@delayed_failed_jobs_total.observe(1,
|
31
|
-
@delayed_jobs_max_attempts_reached_total.observe(1,
|
32
|
-
@delayed_job_duration_seconds_summary.observe(obj["duration"],
|
33
|
-
@delayed_job_duration_seconds_summary.observe(obj["duration"],
|
34
|
-
@delayed_job_duration_seconds_summary.observe(obj["duration"],
|
35
|
-
@delayed_job_attempts_summary.observe(obj["attempts"],
|
36
|
-
@delayed_jobs_enqueued.observe(obj["enqueued"],
|
37
|
-
@delayed_jobs_pending.observe(obj["pending"],
|
27
|
+
@delayed_job_duration_seconds.observe(obj["duration"], counter_labels)
|
28
|
+
@delayed_jobs_total.observe(1, counter_labels)
|
29
|
+
@delayed_failed_jobs_total.observe(1, counter_labels) if !obj["success"]
|
30
|
+
@delayed_jobs_max_attempts_reached_total.observe(1, counter_labels) if obj["attempts"] >= obj["max_attempts"]
|
31
|
+
@delayed_job_duration_seconds_summary.observe(obj["duration"], counter_labels)
|
32
|
+
@delayed_job_duration_seconds_summary.observe(obj["duration"], counter_labels.merge(status: "success")) if obj["success"]
|
33
|
+
@delayed_job_duration_seconds_summary.observe(obj["duration"], counter_labels.merge(status: "failed")) if !obj["success"]
|
34
|
+
@delayed_job_attempts_summary.observe(obj["attempts"], counter_labels) if obj["success"]
|
35
|
+
@delayed_jobs_enqueued.observe(obj["enqueued"], gauge_labels)
|
36
|
+
@delayed_jobs_pending.observe(obj["pending"], gauge_labels)
|
38
37
|
end
|
39
38
|
|
40
39
|
def metrics
|
@@ -77,12 +76,12 @@ module PrometheusExporter::Server
|
|
77
76
|
"delayed_jobs_max_attempts_reached_total", "Total number of delayed jobs that reached max attempts.")
|
78
77
|
|
79
78
|
@delayed_job_duration_seconds_summary =
|
80
|
-
PrometheusExporter::Metric::
|
81
|
-
|
79
|
+
PrometheusExporter::Metric::Base.default_aggregation.new("delayed_job_duration_seconds_summary",
|
80
|
+
"Summary of the time it takes jobs to execute.")
|
82
81
|
|
83
82
|
@delayed_job_attempts_summary =
|
84
|
-
PrometheusExporter::Metric::
|
85
|
-
|
83
|
+
PrometheusExporter::Metric::Base.default_aggregation.new("delayed_job_attempts_summary",
|
84
|
+
"Summary of the amount of attempts it takes delayed jobs to succeed.")
|
86
85
|
end
|
87
86
|
end
|
88
87
|
end
|