prometheus_exporter 0.7.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/ci.yml +82 -25
  3. data/Appraisals +7 -3
  4. data/CHANGELOG +104 -24
  5. data/Dockerfile +9 -0
  6. data/README.md +258 -51
  7. data/bin/prometheus_exporter +19 -6
  8. data/examples/custom_collector.rb +1 -1
  9. data/gemfiles/ar_70.gemfile +5 -0
  10. data/lib/prometheus_exporter/client.rb +48 -23
  11. data/lib/prometheus_exporter/instrumentation/active_record.rb +11 -29
  12. data/lib/prometheus_exporter/instrumentation/delayed_job.rb +5 -2
  13. data/lib/prometheus_exporter/instrumentation/good_job.rb +30 -0
  14. data/lib/prometheus_exporter/instrumentation/method_profiler.rb +63 -23
  15. data/lib/prometheus_exporter/instrumentation/periodic_stats.rb +62 -0
  16. data/lib/prometheus_exporter/instrumentation/process.rb +5 -21
  17. data/lib/prometheus_exporter/instrumentation/puma.rb +34 -27
  18. data/lib/prometheus_exporter/instrumentation/resque.rb +35 -0
  19. data/lib/prometheus_exporter/instrumentation/sidekiq.rb +53 -23
  20. data/lib/prometheus_exporter/instrumentation/sidekiq_process.rb +52 -0
  21. data/lib/prometheus_exporter/instrumentation/sidekiq_queue.rb +32 -24
  22. data/lib/prometheus_exporter/instrumentation/sidekiq_stats.rb +37 -0
  23. data/lib/prometheus_exporter/instrumentation/unicorn.rb +10 -15
  24. data/lib/prometheus_exporter/instrumentation.rb +5 -0
  25. data/lib/prometheus_exporter/metric/base.rb +12 -10
  26. data/lib/prometheus_exporter/metric/gauge.rb +4 -0
  27. data/lib/prometheus_exporter/metric/histogram.rb +15 -3
  28. data/lib/prometheus_exporter/middleware.rb +45 -19
  29. data/lib/prometheus_exporter/server/active_record_collector.rb +9 -12
  30. data/lib/prometheus_exporter/server/collector.rb +4 -0
  31. data/lib/prometheus_exporter/server/delayed_job_collector.rb +24 -18
  32. data/lib/prometheus_exporter/server/good_job_collector.rb +52 -0
  33. data/lib/prometheus_exporter/server/metrics_container.rb +66 -0
  34. data/lib/prometheus_exporter/server/process_collector.rb +8 -13
  35. data/lib/prometheus_exporter/server/puma_collector.rb +14 -12
  36. data/lib/prometheus_exporter/server/resque_collector.rb +50 -0
  37. data/lib/prometheus_exporter/server/runner.rb +14 -3
  38. data/lib/prometheus_exporter/server/sidekiq_collector.rb +1 -1
  39. data/lib/prometheus_exporter/server/sidekiq_process_collector.rb +43 -0
  40. data/lib/prometheus_exporter/server/sidekiq_queue_collector.rb +6 -7
  41. data/lib/prometheus_exporter/server/sidekiq_stats_collector.rb +48 -0
  42. data/lib/prometheus_exporter/server/type_collector.rb +2 -0
  43. data/lib/prometheus_exporter/server/unicorn_collector.rb +32 -33
  44. data/lib/prometheus_exporter/server/web_collector.rb +17 -17
  45. data/lib/prometheus_exporter/server/web_server.rb +72 -41
  46. data/lib/prometheus_exporter/server.rb +4 -0
  47. data/lib/prometheus_exporter/version.rb +1 -1
  48. data/lib/prometheus_exporter.rb +12 -13
  49. data/prometheus_exporter.gemspec +6 -6
  50. metadata +53 -14
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'socket'
4
- require 'thread'
3
+ require "socket"
4
+ require "logger"
5
5
 
6
6
  module PrometheusExporter
7
7
  class Client
@@ -24,7 +24,9 @@ module PrometheusExporter
24
24
  keys: keys,
25
25
  value: value
26
26
  }
27
- values[:prometheus_exporter_action] = prometheus_exporter_action if prometheus_exporter_action
27
+ values[
28
+ :prometheus_exporter_action
29
+ ] = prometheus_exporter_action if prometheus_exporter_action
28
30
  values[:opts] = @opts if @opts
29
31
  values
30
32
  end
@@ -53,14 +55,23 @@ module PrometheusExporter
53
55
  MAX_SOCKET_AGE = 25
54
56
  MAX_QUEUE_SIZE = 10_000
55
57
 
58
+ attr_reader :logger
59
+
56
60
  def initialize(
57
- host: ENV.fetch('PROMETHEUS_EXPORTER_HOST', 'localhost'),
58
- port: ENV.fetch('PROMETHEUS_EXPORTER_PORT', PrometheusExporter::DEFAULT_PORT),
61
+ host: ENV.fetch("PROMETHEUS_EXPORTER_HOST", "localhost"),
62
+ port: ENV.fetch(
63
+ "PROMETHEUS_EXPORTER_PORT",
64
+ PrometheusExporter::DEFAULT_PORT
65
+ ),
59
66
  max_queue_size: nil,
60
67
  thread_sleep: 0.5,
61
68
  json_serializer: nil,
62
- custom_labels: nil
69
+ custom_labels: nil,
70
+ logger: Logger.new(STDERR),
71
+ log_level: Logger::WARN
63
72
  )
73
+ @logger = logger
74
+ @logger.level = log_level
64
75
  @metrics = []
65
76
 
66
77
  @queue = Queue.new
@@ -72,7 +83,7 @@ module PrometheusExporter
72
83
  max_queue_size ||= MAX_QUEUE_SIZE
73
84
  max_queue_size = max_queue_size.to_i
74
85
 
75
- if max_queue_size.to_i <= 0
86
+ if max_queue_size <= 0
76
87
  raise ArgumentError, "max_queue_size must be larger than 0"
77
88
  end
78
89
 
@@ -83,7 +94,8 @@ module PrometheusExporter
83
94
  @mutex = Mutex.new
84
95
  @thread_sleep = thread_sleep
85
96
 
86
- @json_serializer = json_serializer == :oj ? PrometheusExporter::OjCompat : JSON
97
+ @json_serializer =
98
+ json_serializer == :oj ? PrometheusExporter::OjCompat : JSON
87
99
 
88
100
  @custom_labels = custom_labels
89
101
  end
@@ -93,7 +105,14 @@ module PrometheusExporter
93
105
  end
94
106
 
95
107
  def register(type, name, help, opts = nil)
96
- metric = RemoteMetric.new(type: type, name: name, help: help, client: self, opts: opts)
108
+ metric =
109
+ RemoteMetric.new(
110
+ type: type,
111
+ name: name,
112
+ help: help,
113
+ client: self,
114
+ opts: opts
115
+ )
97
116
  @metrics << metric
98
117
  metric
99
118
  end
@@ -125,7 +144,7 @@ module PrometheusExporter
125
144
  def send(str)
126
145
  @queue << str
127
146
  if @queue.length > @max_queue_size
128
- STDERR.puts "Prometheus Exporter client is dropping message cause queue is full"
147
+ logger.warn "Prometheus Exporter client is dropping message cause queue is full"
129
148
  @queue.pop
130
149
  end
131
150
 
@@ -143,7 +162,7 @@ module PrometheusExporter
143
162
  @socket.write(message)
144
163
  @socket.write("\r\n")
145
164
  rescue => e
146
- STDERR.puts "Prometheus Exporter is dropping a message: #{e}"
165
+ logger.warn "Prometheus Exporter is dropping a message: #{e}"
147
166
  @socket = nil
148
167
  raise
149
168
  end
@@ -154,9 +173,7 @@ module PrometheusExporter
154
173
  @mutex.synchronize do
155
174
  wait_for_empty_queue_with_timeout(wait_timeout_seconds)
156
175
  @worker_thread&.kill
157
- while @worker_thread&.alive?
158
- sleep 0.001
159
- end
176
+ sleep 0.001 while @worker_thread&.alive?
160
177
  @worker_thread = nil
161
178
  close_socket!
162
179
  end
@@ -168,7 +185,7 @@ module PrometheusExporter
168
185
  close_socket_if_old!
169
186
  process_queue
170
187
  rescue => e
171
- STDERR.puts "Prometheus Exporter, failed to send message #{e}"
188
+ logger.error "Prometheus Exporter, failed to send message #{e}"
172
189
  end
173
190
 
174
191
  def ensure_worker_thread!
@@ -176,14 +193,18 @@ module PrometheusExporter
176
193
  @mutex.synchronize do
177
194
  return if @worker_thread&.alive?
178
195
 
179
- @worker_thread = Thread.new do
180
- while true
181
- worker_loop
182
- sleep @thread_sleep
196
+ @worker_thread =
197
+ Thread.new do
198
+ while true
199
+ worker_loop
200
+ sleep @thread_sleep
201
+ end
183
202
  end
184
- end
185
203
  end
186
204
  end
205
+ rescue ThreadError => e
206
+ raise unless e.message =~ /can't alloc thread/
207
+ logger.error "Prometheus Exporter, failed to send message ThreadError #{e}"
187
208
  end
188
209
 
189
210
  def close_socket!
@@ -202,7 +223,8 @@ module PrometheusExporter
202
223
  end
203
224
 
204
225
  def close_socket_if_old!
205
- if @socket_pid == Process.pid && @socket && @socket_started && ((@socket_started + MAX_SOCKET_AGE) < Time.now.to_f)
226
+ if @socket_pid == Process.pid && @socket && @socket_started &&
227
+ ((@socket_started + MAX_SOCKET_AGE) < Time.now.to_f)
206
228
  close_socket!
207
229
  end
208
230
  end
@@ -230,7 +252,7 @@ module PrometheusExporter
230
252
  end
231
253
 
232
254
  nil
233
- rescue
255
+ rescue StandardError
234
256
  @socket = nil
235
257
  @socket_started = nil
236
258
  @socket_pid = nil
@@ -240,7 +262,10 @@ module PrometheusExporter
240
262
  def wait_for_empty_queue_with_timeout(timeout_seconds)
241
263
  start_time = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
242
264
  while @queue.length > 0
243
- break if start_time + timeout_seconds < ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
265
+ if start_time + timeout_seconds <
266
+ ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
267
+ break
268
+ end
244
269
  sleep(0.05)
245
270
  end
246
271
  end
@@ -2,14 +2,15 @@
2
2
 
3
3
  # collects stats from currently running process
4
4
  module PrometheusExporter::Instrumentation
5
- class ActiveRecord
5
+ class ActiveRecord < PeriodicStats
6
6
  ALLOWED_CONFIG_LABELS = %i(database username host port)
7
7
 
8
8
  def self.start(client: nil, frequency: 30, custom_labels: {}, config_labels: [])
9
+ client ||= PrometheusExporter::Client.default
9
10
 
10
- # Not all rails versions support coonection pool stats
11
+ # Not all rails versions support connection pool stats
11
12
  unless ::ActiveRecord::Base.connection_pool.respond_to?(:stat)
12
- STDERR.puts("ActiveRecord connection pool stats not supported in your rails version")
13
+ client.logger.error("ActiveRecord connection pool stats not supported in your rails version")
13
14
  return
14
15
  end
15
16
 
@@ -18,22 +19,12 @@ module PrometheusExporter::Instrumentation
18
19
 
19
20
  active_record_collector = new(custom_labels, config_labels)
20
21
 
21
- client ||= PrometheusExporter::Client.default
22
-
23
- stop if @thread
24
-
25
- @thread = Thread.new do
26
- while true
27
- begin
28
- metrics = active_record_collector.collect
29
- metrics.each { |metric| client.send_json metric }
30
- rescue => e
31
- STDERR.puts("Prometheus Exporter Failed To Collect Process Stats #{e}")
32
- ensure
33
- sleep frequency
34
- end
35
- end
22
+ worker_loop do
23
+ metrics = active_record_collector.collect
24
+ metrics.each { |metric| client.send_json metric }
36
25
  end
26
+
27
+ super
37
28
  end
38
29
 
39
30
  def self.validate_config_labels(config_labels)
@@ -41,13 +32,6 @@ module PrometheusExporter::Instrumentation
41
32
  raise "Invalid Config Labels, available options #{ALLOWED_CONFIG_LABELS}" if (config_labels - ALLOWED_CONFIG_LABELS).size > 0
42
33
  end
43
34
 
44
- def self.stop
45
- if t = @thread
46
- t.kill
47
- @thread = nil
48
- end
49
- end
50
-
51
35
  def initialize(metric_labels, config_labels)
52
36
  @metric_labels = metric_labels
53
37
  @config_labels = config_labels
@@ -81,15 +65,13 @@ module PrometheusExporter::Instrumentation
81
65
  private
82
66
 
83
67
  def labels(pool)
84
- if pool.respond_to?(:spec) # ActiveRecord <= 6.0
68
+ if ::ActiveRecord.version < Gem::Version.new("6.1.0.rc1")
85
69
  @metric_labels.merge(pool_name: pool.spec.name).merge(pool.spec.config
86
70
  .select { |k, v| @config_labels.include? k }
87
71
  .map { |k, v| [k.to_s.dup.prepend("dbconfig_"), v] }.to_h)
88
- elsif pool.respond_to?(:db_config) # ActiveRecord >= 6.1.rc1
72
+ else
89
73
  @metric_labels.merge(pool_name: pool.db_config.name).merge(
90
74
  @config_labels.each_with_object({}) { |l, acc| acc["dbconfig_#{l}"] = pool.db_config.public_send(l) })
91
- else
92
- raise "Unsupported connection pool"
93
75
  end
94
76
  end
95
77
  end
@@ -13,8 +13,8 @@ module PrometheusExporter::Instrumentation
13
13
  callbacks do |lifecycle|
14
14
  lifecycle.around(:invoke_job) do |job, *args, &block|
15
15
  max_attempts = Delayed::Worker.max_attempts
16
- enqueued_count = Delayed::Job.count
17
- pending_count = Delayed::Job.where(attempts: 0, locked_at: nil).count
16
+ enqueued_count = Delayed::Job.where(queue: job.queue).count
17
+ pending_count = Delayed::Job.where(attempts: 0, locked_at: nil, queue: job.queue).count
18
18
  instrumenter.call(job, max_attempts, enqueued_count, pending_count, *args, &block)
19
19
  end
20
20
  end
@@ -31,6 +31,7 @@ module PrometheusExporter::Instrumentation
31
31
  def call(job, max_attempts, enqueued_count, pending_count, *args, &block)
32
32
  success = false
33
33
  start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
34
+ latency = Time.current - job.run_at
34
35
  attempts = job.attempts + 1 # Increment because we're adding the current attempt
35
36
  result = block.call(job, *args)
36
37
  success = true
@@ -41,8 +42,10 @@ module PrometheusExporter::Instrumentation
41
42
  @client.send_json(
42
43
  type: "delayed_job",
43
44
  name: job.handler.to_s.match(JOB_CLASS_REGEXP).to_a[1].to_s,
45
+ queue_name: job.queue,
44
46
  success: success,
45
47
  duration: duration,
48
+ latency: latency,
46
49
  attempts: attempts,
47
50
  max_attempts: max_attempts,
48
51
  enqueued: enqueued_count,
@@ -0,0 +1,30 @@
1
+ # frozen_string_literal: true
2
+
3
+ # collects stats from GoodJob
4
+ module PrometheusExporter::Instrumentation
5
+ class GoodJob < PeriodicStats
6
+ def self.start(client: nil, frequency: 30)
7
+ good_job_collector = new
8
+ client ||= PrometheusExporter::Client.default
9
+
10
+ worker_loop do
11
+ client.send_json(good_job_collector.collect)
12
+ end
13
+
14
+ super
15
+ end
16
+
17
+ def collect
18
+ {
19
+ type: "good_job",
20
+ scheduled: ::GoodJob::Job.scheduled.size,
21
+ retried: ::GoodJob::Job.retried.size,
22
+ queued: ::GoodJob::Job.queued.size,
23
+ running: ::GoodJob::Job.running.size,
24
+ finished: ::GoodJob::Job.finished.size,
25
+ succeeded: ::GoodJob::Job.succeeded.size,
26
+ discarded: ::GoodJob::Job.discarded.size
27
+ }
28
+ end
29
+ end
30
+ end
@@ -4,29 +4,14 @@
4
4
  module PrometheusExporter::Instrumentation; end
5
5
 
6
6
  class PrometheusExporter::Instrumentation::MethodProfiler
7
- def self.patch(klass, methods, name)
8
- patches = methods.map do |method_name|
9
- <<~RUBY
10
- unless defined?(#{method_name}__mp_unpatched)
11
- alias_method :#{method_name}__mp_unpatched, :#{method_name}
12
- def #{method_name}(*args, &blk)
13
- unless prof = Thread.current[:_method_profiler]
14
- return #{method_name}__mp_unpatched(*args, &blk)
15
- end
16
- begin
17
- start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
18
- #{method_name}__mp_unpatched(*args, &blk)
19
- ensure
20
- data = (prof[:#{name}] ||= {duration: 0.0, calls: 0})
21
- data[:duration] += Process.clock_gettime(Process::CLOCK_MONOTONIC) - start
22
- data[:calls] += 1
23
- end
24
- end
25
- end
26
- RUBY
27
- end.join("\n")
28
-
29
- klass.class_eval patches
7
+ def self.patch(klass, methods, name, instrument:)
8
+ if instrument == :alias_method
9
+ patch_using_alias_method(klass, methods, name)
10
+ elsif instrument == :prepend
11
+ patch_using_prepend(klass, methods, name)
12
+ else
13
+ raise ArgumentError, "instrument must be :alias_method or :prepend"
14
+ end
30
15
  end
31
16
 
32
17
  def self.transfer
@@ -54,4 +39,59 @@ class PrometheusExporter::Instrumentation::MethodProfiler
54
39
  end
55
40
  data
56
41
  end
42
+
43
+ def self.define_methods_on_module(klass, methods, name)
44
+ patch_source_line = __LINE__ + 3
45
+ patches = methods.map do |method_name|
46
+ <<~RUBY
47
+ def #{method_name}(*args, &blk)
48
+ unless prof = Thread.current[:_method_profiler]
49
+ return super
50
+ end
51
+ begin
52
+ start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
53
+ super
54
+ ensure
55
+ data = (prof[:#{name}] ||= {duration: 0.0, calls: 0})
56
+ data[:duration] += Process.clock_gettime(Process::CLOCK_MONOTONIC) - start
57
+ data[:calls] += 1
58
+ end
59
+ end
60
+ RUBY
61
+ end.join("\n")
62
+
63
+ klass.module_eval patches, __FILE__, patch_source_line
64
+ end
65
+
66
+ def self.patch_using_prepend(klass, methods, name)
67
+ prepend_instrument = Module.new
68
+ define_methods_on_module(prepend_instrument, methods, name)
69
+ klass.prepend(prepend_instrument)
70
+ end
71
+
72
+ def self.patch_using_alias_method(klass, methods, name)
73
+ patch_source_line = __LINE__ + 3
74
+ patches = methods.map do |method_name|
75
+ <<~RUBY
76
+ unless defined?(#{method_name}__mp_unpatched)
77
+ alias_method :#{method_name}__mp_unpatched, :#{method_name}
78
+ def #{method_name}(*args, &blk)
79
+ unless prof = Thread.current[:_method_profiler]
80
+ return #{method_name}__mp_unpatched(*args, &blk)
81
+ end
82
+ begin
83
+ start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
84
+ #{method_name}__mp_unpatched(*args, &blk)
85
+ ensure
86
+ data = (prof[:#{name}] ||= {duration: 0.0, calls: 0})
87
+ data[:duration] += Process.clock_gettime(Process::CLOCK_MONOTONIC) - start
88
+ data[:calls] += 1
89
+ end
90
+ end
91
+ end
92
+ RUBY
93
+ end.join("\n")
94
+
95
+ klass.class_eval patches, __FILE__, patch_source_line
96
+ end
57
97
  end
@@ -0,0 +1,62 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PrometheusExporter::Instrumentation
4
+ class PeriodicStats
5
+
6
+ def self.start(*args, frequency:, client: nil, **kwargs)
7
+ client ||= PrometheusExporter::Client.default
8
+
9
+ if !(Numeric === frequency)
10
+ raise ArgumentError.new("Expected frequency to be a number")
11
+ end
12
+
13
+ if frequency < 0
14
+ raise ArgumentError.new("Expected frequency to be a positive number")
15
+ end
16
+
17
+ if !@worker_loop
18
+ raise ArgumentError.new("Worker loop was not set")
19
+ end
20
+
21
+ klass = self
22
+
23
+ stop
24
+
25
+ @stop_thread = false
26
+
27
+ @thread = Thread.new do
28
+ while !@stop_thread
29
+ begin
30
+ @worker_loop.call
31
+ rescue => e
32
+ client.logger.error("#{klass} Prometheus Exporter Failed To Collect Stats #{e}")
33
+ ensure
34
+ sleep frequency
35
+ end
36
+ end
37
+ end
38
+
39
+ end
40
+
41
+ def self.started?
42
+ !!@thread&.alive?
43
+ end
44
+
45
+ def self.worker_loop(&blk)
46
+ @worker_loop = blk
47
+ end
48
+
49
+ def self.stop
50
+ # to avoid a warning
51
+ @thread = nil if !defined?(@thread)
52
+
53
+ if @thread&.alive?
54
+ @stop_thread = true
55
+ @thread.wakeup
56
+ @thread.join
57
+ end
58
+ @thread = nil
59
+ end
60
+
61
+ end
62
+ end
@@ -2,8 +2,7 @@
2
2
 
3
3
  # collects stats from currently running process
4
4
  module PrometheusExporter::Instrumentation
5
- class Process
6
- @thread = nil if !defined?(@thread)
5
+ class Process < PeriodicStats
7
6
 
8
7
  def self.start(client: nil, type: "ruby", frequency: 30, labels: nil)
9
8
 
@@ -19,27 +18,12 @@ module PrometheusExporter::Instrumentation
19
18
  process_collector = new(metric_labels)
20
19
  client ||= PrometheusExporter::Client.default
21
20
 
22
- stop if @thread
23
-
24
- @thread = Thread.new do
25
- while true
26
- begin
27
- metric = process_collector.collect
28
- client.send_json metric
29
- rescue => e
30
- STDERR.puts("Prometheus Exporter Failed To Collect Process Stats #{e}")
31
- ensure
32
- sleep frequency
33
- end
34
- end
21
+ worker_loop do
22
+ metric = process_collector.collect
23
+ client.send_json metric
35
24
  end
36
- end
37
25
 
38
- def self.stop
39
- if t = @thread
40
- t.kill
41
- @thread = nil
42
- end
26
+ super
43
27
  end
44
28
 
45
29
  def initialize(metric_labels)
@@ -4,39 +4,46 @@ require "json"
4
4
 
5
5
  # collects stats from puma
6
6
  module PrometheusExporter::Instrumentation
7
- class Puma
8
- def self.start(client: nil, frequency: 30)
9
- puma_collector = new
7
+ class Puma < PeriodicStats
8
+ def self.start(client: nil, frequency: 30, labels: {})
9
+ puma_collector = new(labels)
10
10
  client ||= PrometheusExporter::Client.default
11
- Thread.new do
12
- while true
13
- begin
14
- metric = puma_collector.collect
15
- client.send_json metric
16
- rescue => e
17
- STDERR.puts("Prometheus Exporter Failed To Collect Puma Stats #{e}")
18
- ensure
19
- sleep frequency
20
- end
21
- end
11
+
12
+ worker_loop do
13
+ metric = puma_collector.collect
14
+ client.send_json metric
22
15
  end
16
+
17
+ super
18
+ end
19
+
20
+ def initialize(metric_labels = {})
21
+ @metric_labels = metric_labels
23
22
  end
24
23
 
25
24
  def collect
26
- metric = {}
27
- metric[:type] = "puma"
25
+ metric = {
26
+ pid: pid,
27
+ type: "puma",
28
+ hostname: ::PrometheusExporter.hostname,
29
+ metric_labels: @metric_labels
30
+ }
28
31
  collect_puma_stats(metric)
29
32
  metric
30
33
  end
31
34
 
35
+ def pid
36
+ @pid = ::Process.pid
37
+ end
38
+
32
39
  def collect_puma_stats(metric)
33
40
  stats = JSON.parse(::Puma.stats)
34
41
 
35
42
  if stats.key?("workers")
36
43
  metric[:phase] = stats["phase"]
37
- metric[:workers_total] = stats["workers"]
38
- metric[:booted_workers_total] = stats["booted_workers"]
39
- metric[:old_workers_total] = stats["old_workers"]
44
+ metric[:workers] = stats["workers"]
45
+ metric[:booted_workers] = stats["booted_workers"]
46
+ metric[:old_workers] = stats["old_workers"]
40
47
 
41
48
  stats["worker_status"].each do |worker|
42
49
  next if worker["last_status"].empty?
@@ -50,15 +57,15 @@ module PrometheusExporter::Instrumentation
50
57
  private
51
58
 
52
59
  def collect_worker_status(metric, status)
53
- metric[:request_backlog_total] ||= 0
54
- metric[:running_threads_total] ||= 0
55
- metric[:thread_pool_capacity_total] ||= 0
56
- metric[:max_threads_total] ||= 0
60
+ metric[:request_backlog] ||= 0
61
+ metric[:running_threads] ||= 0
62
+ metric[:thread_pool_capacity] ||= 0
63
+ metric[:max_threads] ||= 0
57
64
 
58
- metric[:request_backlog_total] += status["backlog"]
59
- metric[:running_threads_total] += status["running"]
60
- metric[:thread_pool_capacity_total] += status["pool_capacity"]
61
- metric[:max_threads_total] += status["max_threads"]
65
+ metric[:request_backlog] += status["backlog"]
66
+ metric[:running_threads] += status["running"]
67
+ metric[:thread_pool_capacity] += status["pool_capacity"]
68
+ metric[:max_threads] += status["max_threads"]
62
69
  end
63
70
  end
64
71
  end
@@ -0,0 +1,35 @@
1
+ # frozen_string_literal: true
2
+
3
+ # collects stats from resque
4
+ module PrometheusExporter::Instrumentation
5
+ class Resque < PeriodicStats
6
+ def self.start(client: nil, frequency: 30)
7
+ resque_collector = new
8
+ client ||= PrometheusExporter::Client.default
9
+
10
+ worker_loop do
11
+ client.send_json(resque_collector.collect)
12
+ end
13
+
14
+ super
15
+ end
16
+
17
+ def collect
18
+ metric = {}
19
+ metric[:type] = "resque"
20
+ collect_resque_stats(metric)
21
+ metric
22
+ end
23
+
24
+ def collect_resque_stats(metric)
25
+ info = ::Resque.info
26
+
27
+ metric[:processed_jobs] = info[:processed]
28
+ metric[:failed_jobs] = info[:failed]
29
+ metric[:pending_jobs] = info[:pending]
30
+ metric[:queues] = info[:queues]
31
+ metric[:worker] = info[:workers]
32
+ metric[:working] = info[:working]
33
+ end
34
+ end
35
+ end