prometheus_exporter 0.6.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/ci.yml +17 -4
  3. data/.gitignore +2 -0
  4. data/.rubocop.yml +5 -0
  5. data/Appraisals +10 -0
  6. data/CHANGELOG +33 -1
  7. data/README.md +115 -18
  8. data/bin/prometheus_exporter +17 -4
  9. data/gemfiles/.bundle/config +2 -0
  10. data/gemfiles/ar_60.gemfile +5 -0
  11. data/gemfiles/ar_61.gemfile +7 -0
  12. data/lib/prometheus_exporter/client.rb +16 -6
  13. data/lib/prometheus_exporter/instrumentation/active_record.rb +19 -12
  14. data/lib/prometheus_exporter/instrumentation/delayed_job.rb +3 -2
  15. data/lib/prometheus_exporter/instrumentation/method_profiler.rb +2 -1
  16. data/lib/prometheus_exporter/instrumentation/process.rb +1 -1
  17. data/lib/prometheus_exporter/instrumentation/puma.rb +28 -16
  18. data/lib/prometheus_exporter/instrumentation/resque.rb +40 -0
  19. data/lib/prometheus_exporter/instrumentation/sidekiq_process.rb +58 -0
  20. data/lib/prometheus_exporter/instrumentation/sidekiq_queue.rb +27 -13
  21. data/lib/prometheus_exporter/instrumentation/unicorn.rb +4 -4
  22. data/lib/prometheus_exporter/instrumentation.rb +2 -0
  23. data/lib/prometheus_exporter/metric/base.rb +9 -0
  24. data/lib/prometheus_exporter/metric/gauge.rb +4 -0
  25. data/lib/prometheus_exporter/middleware.rb +31 -19
  26. data/lib/prometheus_exporter/server/active_record_collector.rb +2 -1
  27. data/lib/prometheus_exporter/server/collector.rb +2 -0
  28. data/lib/prometheus_exporter/server/delayed_job_collector.rb +17 -17
  29. data/lib/prometheus_exporter/server/puma_collector.rb +16 -8
  30. data/lib/prometheus_exporter/server/resque_collector.rb +54 -0
  31. data/lib/prometheus_exporter/server/runner.rb +11 -2
  32. data/lib/prometheus_exporter/server/sidekiq_collector.rb +1 -1
  33. data/lib/prometheus_exporter/server/sidekiq_process_collector.rb +46 -0
  34. data/lib/prometheus_exporter/server/sidekiq_queue_collector.rb +1 -1
  35. data/lib/prometheus_exporter/server/unicorn_collector.rb +3 -3
  36. data/lib/prometheus_exporter/server/web_collector.rb +6 -9
  37. data/lib/prometheus_exporter/server/web_server.rb +6 -8
  38. data/lib/prometheus_exporter/server.rb +2 -0
  39. data/lib/prometheus_exporter/version.rb +1 -1
  40. data/prometheus_exporter.gemspec +7 -3
  41. metadata +62 -12
@@ -5,8 +5,8 @@ require "json"
5
5
  # collects stats from puma
6
6
  module PrometheusExporter::Instrumentation
7
7
  class Puma
8
- def self.start(client: nil, frequency: 30)
9
- puma_collector = new
8
+ def self.start(client: nil, frequency: 30, labels: {})
9
+ puma_collector = new(labels)
10
10
  client ||= PrometheusExporter::Client.default
11
11
  Thread.new do
12
12
  while true
@@ -14,7 +14,7 @@ module PrometheusExporter::Instrumentation
14
14
  metric = puma_collector.collect
15
15
  client.send_json metric
16
16
  rescue => e
17
- STDERR.puts("Prometheus Exporter Failed To Collect Puma Stats #{e}")
17
+ client.logger.error("Prometheus Exporter Failed To Collect Puma Stats #{e}")
18
18
  ensure
19
19
  sleep frequency
20
20
  end
@@ -22,21 +22,33 @@ module PrometheusExporter::Instrumentation
22
22
  end
23
23
  end
24
24
 
25
+ def initialize(metric_labels = {})
26
+ @metric_labels = metric_labels
27
+ end
28
+
25
29
  def collect
26
- metric = {}
27
- metric[:type] = "puma"
30
+ metric = {
31
+ pid: pid,
32
+ type: "puma",
33
+ hostname: ::PrometheusExporter.hostname,
34
+ metric_labels: @metric_labels
35
+ }
28
36
  collect_puma_stats(metric)
29
37
  metric
30
38
  end
31
39
 
40
+ def pid
41
+ @pid = ::Process.pid
42
+ end
43
+
32
44
  def collect_puma_stats(metric)
33
45
  stats = JSON.parse(::Puma.stats)
34
46
 
35
47
  if stats.key?("workers")
36
48
  metric[:phase] = stats["phase"]
37
- metric[:workers_total] = stats["workers"]
38
- metric[:booted_workers_total] = stats["booted_workers"]
39
- metric[:old_workers_total] = stats["old_workers"]
49
+ metric[:workers] = stats["workers"]
50
+ metric[:booted_workers] = stats["booted_workers"]
51
+ metric[:old_workers] = stats["old_workers"]
40
52
 
41
53
  stats["worker_status"].each do |worker|
42
54
  next if worker["last_status"].empty?
@@ -50,15 +62,15 @@ module PrometheusExporter::Instrumentation
50
62
  private
51
63
 
52
64
  def collect_worker_status(metric, status)
53
- metric[:request_backlog_total] ||= 0
54
- metric[:running_threads_total] ||= 0
55
- metric[:thread_pool_capacity_total] ||= 0
56
- metric[:max_threads_total] ||= 0
65
+ metric[:request_backlog] ||= 0
66
+ metric[:running_threads] ||= 0
67
+ metric[:thread_pool_capacity] ||= 0
68
+ metric[:max_threads] ||= 0
57
69
 
58
- metric[:request_backlog_total] += status["backlog"]
59
- metric[:running_threads_total] += status["running"]
60
- metric[:thread_pool_capacity_total] += status["pool_capacity"]
61
- metric[:max_threads_total] += status["max_threads"]
70
+ metric[:request_backlog] += status["backlog"]
71
+ metric[:running_threads] += status["running"]
72
+ metric[:thread_pool_capacity] += status["pool_capacity"]
73
+ metric[:max_threads] += status["max_threads"]
62
74
  end
63
75
  end
64
76
  end
@@ -0,0 +1,40 @@
1
+ # frozen_string_literal: true
2
+
3
+ # collects stats from resque
4
+ module PrometheusExporter::Instrumentation
5
+ class Resque
6
+ def self.start(client: nil, frequency: 30)
7
+ resque_collector = new
8
+ client ||= PrometheusExporter::Client.default
9
+ Thread.new do
10
+ while true
11
+ begin
12
+ client.send_json(resque_collector.collect)
13
+ rescue => e
14
+ client.logger.error("Prometheus Exporter Failed To Collect Resque Stats #{e}")
15
+ ensure
16
+ sleep frequency
17
+ end
18
+ end
19
+ end
20
+ end
21
+
22
+ def collect
23
+ metric = {}
24
+ metric[:type] = "resque"
25
+ collect_resque_stats(metric)
26
+ metric
27
+ end
28
+
29
+ def collect_resque_stats(metric)
30
+ info = ::Resque.info
31
+
32
+ metric[:processed_jobs] = info[:processed]
33
+ metric[:failed_jobs] = info[:failed]
34
+ metric[:pending_jobs] = info[:pending]
35
+ metric[:queues] = info[:queues]
36
+ metric[:worker] = info[:workers]
37
+ metric[:working] = info[:working]
38
+ end
39
+ end
40
+ end
@@ -0,0 +1,58 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PrometheusExporter::Instrumentation
4
+ class SidekiqProcess
5
+ def self.start(client: nil, frequency: 30)
6
+ client ||= PrometheusExporter::Client.default
7
+ sidekiq_process_collector = new
8
+
9
+ Thread.new do
10
+ loop do
11
+ begin
12
+ client.send_json(sidekiq_process_collector.collect)
13
+ rescue StandardError => e
14
+ STDERR.puts("Prometheus Exporter Failed To Collect Sidekiq Processes metrics #{e}")
15
+ ensure
16
+ sleep frequency
17
+ end
18
+ end
19
+ end
20
+ end
21
+
22
+ def initialize
23
+ @pid = ::Process.pid
24
+ @hostname = Socket.gethostname
25
+ end
26
+
27
+ def collect
28
+ {
29
+ type: 'sidekiq_process',
30
+ process: collect_stats
31
+ }
32
+ end
33
+
34
+ def collect_stats
35
+ process = current_process
36
+ return {} unless process
37
+
38
+ {
39
+ busy: process['busy'],
40
+ concurrency: process['concurrency'],
41
+ labels: {
42
+ labels: process['labels'].sort.join(','),
43
+ queues: process['queues'].sort.join(','),
44
+ quiet: process['quiet'],
45
+ tag: process['tag'],
46
+ hostname: process['hostname'],
47
+ identity: process['identity'],
48
+ }
49
+ }
50
+ end
51
+
52
+ def current_process
53
+ ::Sidekiq::ProcessSet.new.find do |sp|
54
+ sp['hostname'] == @hostname && sp['pid'] == @pid
55
+ end
56
+ end
57
+ end
58
+ end
@@ -2,16 +2,16 @@
2
2
 
3
3
  module PrometheusExporter::Instrumentation
4
4
  class SidekiqQueue
5
- def self.start(client: nil, frequency: 30)
5
+ def self.start(client: nil, frequency: 30, all_queues: false)
6
6
  client ||= PrometheusExporter::Client.default
7
- sidekiq_queue_collector = new
7
+ sidekiq_queue_collector = new(all_queues: all_queues)
8
8
 
9
9
  Thread.new do
10
10
  loop do
11
11
  begin
12
12
  client.send_json(sidekiq_queue_collector.collect)
13
13
  rescue StandardError => e
14
- STDERR.puts("Prometheus Exporter Failed To Collect Sidekiq Queue metrics #{e}")
14
+ client.logger.error("Prometheus Exporter Failed To Collect Sidekiq Queue metrics #{e}")
15
15
  ensure
16
16
  sleep frequency
17
17
  end
@@ -19,6 +19,12 @@ module PrometheusExporter::Instrumentation
19
19
  end
20
20
  end
21
21
 
22
+ def initialize(all_queues: false)
23
+ @all_queues = all_queues
24
+ @pid = ::Process.pid
25
+ @hostname = Socket.gethostname
26
+ end
27
+
22
28
  def collect
23
29
  {
24
30
  type: 'sidekiq_queue',
@@ -27,24 +33,32 @@ module PrometheusExporter::Instrumentation
27
33
  end
28
34
 
29
35
  def collect_queue_stats
30
- hostname = Socket.gethostname
31
- pid = ::Process.pid
32
- ps = ::Sidekiq::ProcessSet.new
36
+ sidekiq_queues = ::Sidekiq::Queue.all
33
37
 
34
- process = ps.find do |sp|
35
- sp['hostname'] == hostname && sp['pid'] == pid
38
+ unless @all_queues
39
+ queues = collect_current_process_queues
40
+ sidekiq_queues.select! { |sidekiq_queue| queues.include?(sidekiq_queue.name) }
36
41
  end
37
42
 
38
- queues = process.nil? ? [] : process['queues']
39
-
40
- ::Sidekiq::Queue.all.map do |queue|
41
- next unless queues.include? queue.name
43
+ sidekiq_queues.map do |queue|
42
44
  {
43
- backlog_total: queue.size,
45
+ backlog: queue.size,
44
46
  latency_seconds: queue.latency.to_i,
45
47
  labels: { queue: queue.name }
46
48
  }
47
49
  end.compact
48
50
  end
51
+
52
+ private
53
+
54
+ def collect_current_process_queues
55
+ ps = ::Sidekiq::ProcessSet.new
56
+
57
+ process = ps.find do |sp|
58
+ sp['hostname'] == @hostname && sp['pid'] == @pid
59
+ end
60
+
61
+ process.nil? ? [] : process['queues']
62
+ end
49
63
  end
50
64
  end
@@ -18,7 +18,7 @@ module PrometheusExporter::Instrumentation
18
18
  metric = unicorn_collector.collect
19
19
  client.send_json metric
20
20
  rescue StandardError => e
21
- STDERR.puts("Prometheus Exporter Failed To Collect Unicorn Stats #{e}")
21
+ client.logger.error("Prometheus Exporter Failed To Collect Unicorn Stats #{e}")
22
22
  ensure
23
23
  sleep frequency
24
24
  end
@@ -42,9 +42,9 @@ module PrometheusExporter::Instrumentation
42
42
  def collect_unicorn_stats(metric)
43
43
  stats = listener_address_stats
44
44
 
45
- metric[:active_workers_total] = stats.active
46
- metric[:request_backlog_total] = stats.queued
47
- metric[:workers_total] = worker_process_count
45
+ metric[:active_workers] = stats.active
46
+ metric[:request_backlog] = stats.queued
47
+ metric[:workers] = worker_process_count
48
48
  end
49
49
 
50
50
  private
@@ -5,9 +5,11 @@ require_relative "instrumentation/process"
5
5
  require_relative "instrumentation/method_profiler"
6
6
  require_relative "instrumentation/sidekiq"
7
7
  require_relative "instrumentation/sidekiq_queue"
8
+ require_relative "instrumentation/sidekiq_process"
8
9
  require_relative "instrumentation/delayed_job"
9
10
  require_relative "instrumentation/puma"
10
11
  require_relative "instrumentation/hutch"
11
12
  require_relative "instrumentation/unicorn"
12
13
  require_relative "instrumentation/active_record"
13
14
  require_relative "instrumentation/shoryuken"
15
+ require_relative "instrumentation/resque"
@@ -5,6 +5,7 @@ module PrometheusExporter::Metric
5
5
 
6
6
  @default_prefix = nil if !defined?(@default_prefix)
7
7
  @default_labels = nil if !defined?(@default_labels)
8
+ @default_aggregation = nil if !defined?(@default_aggregation)
8
9
 
9
10
  # prefix applied to all metrics
10
11
  def self.default_prefix=(name)
@@ -23,6 +24,14 @@ module PrometheusExporter::Metric
23
24
  @default_labels || {}
24
25
  end
25
26
 
27
+ def self.default_aggregation=(aggregation)
28
+ @default_aggregation = aggregation
29
+ end
30
+
31
+ def self.default_aggregation
32
+ @default_aggregation ||= Summary
33
+ end
34
+
26
35
  attr_accessor :help, :name, :data
27
36
 
28
37
  def initialize(name, help)
@@ -5,6 +5,10 @@ module PrometheusExporter::Metric
5
5
  attr_reader :data
6
6
 
7
7
  def initialize(name, help)
8
+ if name.end_with?("_total")
9
+ raise ArgumentError, "The metric name of gauge must not have _total suffix. Given: #{name}"
10
+ end
11
+
8
12
  super
9
13
  reset!
10
14
  end
@@ -36,21 +36,12 @@ class PrometheusExporter::Middleware
36
36
 
37
37
  result
38
38
  ensure
39
- status = (result && result[0]) || -1
40
- params = env["action_dispatch.request.parameters"]
41
- action, controller = nil
42
- if params
43
- action = params["action"]
44
- controller = params["controller"]
45
- end
46
39
 
47
40
  obj = {
48
41
  type: "web",
49
42
  timings: info,
50
43
  queue_time: queue_time,
51
- action: action,
52
- controller: controller,
53
- status: status
44
+ default_labels: default_labels(env, result)
54
45
  }
55
46
  labels = custom_labels(env)
56
47
  if labels
@@ -60,6 +51,22 @@ class PrometheusExporter::Middleware
60
51
  @client.send_json(obj)
61
52
  end
62
53
 
54
+ def default_labels(env, result)
55
+ status = (result && result[0]) || -1
56
+ params = env["action_dispatch.request.parameters"]
57
+ action = controller = nil
58
+ if params
59
+ action = params["action"]
60
+ controller = params["controller"]
61
+ end
62
+
63
+ {
64
+ action: action || "other",
65
+ controller: controller || "other",
66
+ status: status
67
+ }
68
+ end
69
+
63
70
  # allows subclasses to add custom labels based on env
64
71
  def custom_labels(env)
65
72
  nil
@@ -83,19 +90,24 @@ class PrometheusExporter::Middleware
83
90
  Process.clock_gettime(Process::CLOCK_REALTIME)
84
91
  end
85
92
 
86
- # get the content of the x-queue-start or x-request-start header
93
+ # determine queue start from well-known trace headers
87
94
  def queue_start(env)
95
+
96
+ # get the content of the x-queue-start or x-request-start header
88
97
  value = env['HTTP_X_REQUEST_START'] || env['HTTP_X_QUEUE_START']
89
98
  unless value.nil? || value == ''
90
- convert_header_to_ms(value.to_s)
99
+ # nginx returns time as milliseconds with 3 decimal places
100
+ # apache returns time as microseconds without decimal places
101
+ # this method takes care to convert both into a proper second + fractions timestamp
102
+ value = value.to_s.gsub(/t=|\./, '')
103
+ return "#{value[0, 10]}.#{value[10, 13]}".to_f
91
104
  end
92
- end
93
105
 
94
- # nginx returns time as milliseconds with 3 decimal places
95
- # apache returns time as microseconds without decimal places
96
- # this method takes care to convert both into a proper second + fractions timestamp
97
- def convert_header_to_ms(str)
98
- str = str.gsub(/t=|\./, '')
99
- "#{str[0, 10]}.#{str[10, 13]}".to_f
106
+ # get the content of the x-amzn-trace-id header
107
+ # see also: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-request-tracing.html
108
+ value = env['HTTP_X_AMZN_TRACE_ID']
109
+ value&.split('Root=')&.last&.split('-')&.fetch(1)&.to_i(16)
110
+
100
111
  end
112
+
101
113
  end
@@ -47,7 +47,8 @@ module PrometheusExporter::Server
47
47
  obj["created_at"] = now
48
48
 
49
49
  @active_record_metrics.delete_if do |current|
50
- (obj["pid"] == current["pid"] && obj["hostname"] == current["hostname"]) ||
50
+ (obj["pid"] == current["pid"] && obj["hostname"] == current["hostname"] &&
51
+ obj["metric_labels"]["pool_name"] == current["metric_labels"]["pool_name"]) ||
51
52
  (current["created_at"] + MAX_ACTIVERECORD_METRIC_AGE < now)
52
53
  end
53
54
 
@@ -14,12 +14,14 @@ module PrometheusExporter::Server
14
14
  register_collector(ProcessCollector.new)
15
15
  register_collector(SidekiqCollector.new)
16
16
  register_collector(SidekiqQueueCollector.new)
17
+ register_collector(SidekiqProcessCollector.new)
17
18
  register_collector(DelayedJobCollector.new)
18
19
  register_collector(PumaCollector.new)
19
20
  register_collector(HutchCollector.new)
20
21
  register_collector(UnicornCollector.new)
21
22
  register_collector(ActiveRecordCollector.new)
22
23
  register_collector(ShoryukenCollector.new)
24
+ register_collector(ResqueCollector.new)
23
25
  end
24
26
 
25
27
  def register_collector(collector)
@@ -19,21 +19,21 @@ module PrometheusExporter::Server
19
19
  end
20
20
 
21
21
  def collect(obj)
22
- default_labels = { job_name: obj['name'] }
23
- custom_labels = obj['custom_labels']
24
- labels = custom_labels.nil? ? default_labels : default_labels.merge(custom_labels)
22
+ custom_labels = obj['custom_labels'] || {}
23
+ gauge_labels = { queue_name: obj['queue_name'] }.merge(custom_labels)
24
+ counter_labels = gauge_labels.merge(job_name: obj['name'])
25
25
 
26
26
  ensure_delayed_job_metrics
27
- @delayed_job_duration_seconds.observe(obj["duration"], labels)
28
- @delayed_jobs_total.observe(1, labels)
29
- @delayed_failed_jobs_total.observe(1, labels) if !obj["success"]
30
- @delayed_jobs_max_attempts_reached_total.observe(1) if obj["attempts"] >= obj["max_attempts"]
31
- @delayed_job_duration_seconds_summary.observe(obj["duration"])
32
- @delayed_job_duration_seconds_summary.observe(obj["duration"], status: "success") if obj["success"]
33
- @delayed_job_duration_seconds_summary.observe(obj["duration"], status: "failed") if !obj["success"]
34
- @delayed_job_attempts_summary.observe(obj["attempts"]) if obj["success"]
35
- @delayed_jobs_enqueued.observe(obj["enqueued"])
36
- @delayed_jobs_pending.observe(obj["pending"])
27
+ @delayed_job_duration_seconds.observe(obj["duration"], counter_labels)
28
+ @delayed_jobs_total.observe(1, counter_labels)
29
+ @delayed_failed_jobs_total.observe(1, counter_labels) if !obj["success"]
30
+ @delayed_jobs_max_attempts_reached_total.observe(1, counter_labels) if obj["attempts"] >= obj["max_attempts"]
31
+ @delayed_job_duration_seconds_summary.observe(obj["duration"], counter_labels)
32
+ @delayed_job_duration_seconds_summary.observe(obj["duration"], counter_labels.merge(status: "success")) if obj["success"]
33
+ @delayed_job_duration_seconds_summary.observe(obj["duration"], counter_labels.merge(status: "failed")) if !obj["success"]
34
+ @delayed_job_attempts_summary.observe(obj["attempts"], counter_labels) if obj["success"]
35
+ @delayed_jobs_enqueued.observe(obj["enqueued"], gauge_labels)
36
+ @delayed_jobs_pending.observe(obj["pending"], gauge_labels)
37
37
  end
38
38
 
39
39
  def metrics
@@ -76,12 +76,12 @@ module PrometheusExporter::Server
76
76
  "delayed_jobs_max_attempts_reached_total", "Total number of delayed jobs that reached max attempts.")
77
77
 
78
78
  @delayed_job_duration_seconds_summary =
79
- PrometheusExporter::Metric::Summary.new("delayed_job_duration_seconds_summary",
80
- "Summary of the time it takes jobs to execute.")
79
+ PrometheusExporter::Metric::Base.default_aggregation.new("delayed_job_duration_seconds_summary",
80
+ "Summary of the time it takes jobs to execute.")
81
81
 
82
82
  @delayed_job_attempts_summary =
83
- PrometheusExporter::Metric::Summary.new("delayed_job_attempts_summary",
84
- "Summary of the amount of attempts it takes delayed jobs to succeed.")
83
+ PrometheusExporter::Metric::Base.default_aggregation.new("delayed_job_attempts_summary",
84
+ "Summary of the amount of attempts it takes delayed jobs to succeed.")
85
85
  end
86
86
  end
87
87
  end
@@ -4,13 +4,13 @@ module PrometheusExporter::Server
4
4
  class PumaCollector < TypeCollector
5
5
  MAX_PUMA_METRIC_AGE = 30
6
6
  PUMA_GAUGES = {
7
- workers_total: "Number of puma workers.",
8
- booted_workers_total: "Number of puma workers booted.",
9
- old_workers_total: "Number of old puma workers.",
10
- running_threads_total: "Number of puma threads currently running.",
11
- request_backlog_total: "Number of requests waiting to be processed by a puma thread.",
12
- thread_pool_capacity_total: "Number of puma threads available at current scale.",
13
- max_threads_total: "Number of puma threads at available at max scale.",
7
+ workers: "Number of puma workers.",
8
+ booted_workers: "Number of puma workers booted.",
9
+ old_workers: "Number of old puma workers.",
10
+ running_threads: "Number of puma threads currently running.",
11
+ request_backlog: "Number of requests waiting to be processed by a puma thread.",
12
+ thread_pool_capacity: "Number of puma threads available at current scale.",
13
+ max_threads: "Number of puma threads at available at max scale.",
14
14
  }
15
15
 
16
16
  def initialize
@@ -34,6 +34,9 @@ module PrometheusExporter::Server
34
34
  if m["custom_labels"]
35
35
  labels.merge!(m["custom_labels"])
36
36
  end
37
+ if m["metric_labels"]
38
+ labels.merge!(m["metric_labels"])
39
+ end
37
40
 
38
41
  PUMA_GAUGES.map do |k, help|
39
42
  k = k.to_s
@@ -51,7 +54,12 @@ module PrometheusExporter::Server
51
54
  now = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
52
55
 
53
56
  obj["created_at"] = now
54
- @puma_metrics.delete_if { |m| m["created_at"] + MAX_PUMA_METRIC_AGE < now }
57
+
58
+ @puma_metrics.delete_if do |current|
59
+ (obj["pid"] == current["pid"] && obj["hostname"] == current["hostname"]) ||
60
+ (current["created_at"] + MAX_PUMA_METRIC_AGE < now)
61
+ end
62
+
55
63
  @puma_metrics << obj
56
64
  end
57
65
  end
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PrometheusExporter::Server
4
+ class ResqueCollector < TypeCollector
5
+ MAX_RESQUE_METRIC_AGE = 30
6
+ RESQUE_GAUGES = {
7
+ processed_jobs: "Total number of processed Resque jobs.",
8
+ failed_jobs: "Total number of failed Resque jobs.",
9
+ pending_jobs: "Total number of pending Resque jobs.",
10
+ queues: "Total number of Resque queues.",
11
+ workers: "Total number of Resque workers running.",
12
+ working: "Total number of Resque workers working."
13
+ }
14
+
15
+ def initialize
16
+ @resque_metrics = []
17
+ @gauges = {}
18
+ end
19
+
20
+ def type
21
+ "resque"
22
+ end
23
+
24
+ def metrics
25
+ return [] if resque_metrics.length == 0
26
+
27
+ resque_metrics.map do |metric|
28
+ labels = metric.fetch("custom_labels", {})
29
+
30
+ RESQUE_GAUGES.map do |name, help|
31
+ name = name.to_s
32
+ if value = metric[name]
33
+ gauge = gauges[name] ||= PrometheusExporter::Metric::Gauge.new("resque_#{name}", help)
34
+ gauge.observe(value, labels)
35
+ end
36
+ end
37
+ end
38
+
39
+ gauges.values
40
+ end
41
+
42
+ def collect(object)
43
+ now = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
44
+
45
+ object["created_at"] = now
46
+ resque_metrics.delete_if { |metric| metric["created_at"] + MAX_RESQUE_METRIC_AGE < now }
47
+ resque_metrics << object
48
+ end
49
+
50
+ private
51
+
52
+ attr_reader :resque_metrics, :gauges
53
+ end
54
+ end
@@ -1,6 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'prometheus_exporter/client'
3
+ require_relative '../client'
4
4
  require_relative '../instrumentation/unicorn'
5
5
 
6
6
  module PrometheusExporter::Server
@@ -17,6 +17,7 @@ module PrometheusExporter::Server
17
17
  @prefix = nil
18
18
  @auth = nil
19
19
  @realm = nil
20
+ @histogram = nil
20
21
 
21
22
  options.each do |k, v|
22
23
  send("#{k}=", v) if self.class.method_defined?("#{k}=")
@@ -27,6 +28,10 @@ module PrometheusExporter::Server
27
28
  PrometheusExporter::Metric::Base.default_prefix = prefix
28
29
  PrometheusExporter::Metric::Base.default_labels = label
29
30
 
31
+ if histogram
32
+ PrometheusExporter::Metric::Base.default_aggregation = PrometheusExporter::Metric::Histogram
33
+ end
34
+
30
35
  register_type_collectors
31
36
 
32
37
  unless collector.is_a?(PrometheusExporter::Server::CollectorBase)
@@ -47,7 +52,7 @@ module PrometheusExporter::Server
47
52
  end
48
53
 
49
54
  attr_accessor :unicorn_listen_address, :unicorn_pid_file
50
- attr_writer :prefix, :port, :bind, :collector_class, :type_collectors, :timeout, :verbose, :server_class, :label, :auth, :realm
55
+ attr_writer :prefix, :port, :bind, :collector_class, :type_collectors, :timeout, :verbose, :server_class, :label, :auth, :realm, :histogram
51
56
 
52
57
  def auth
53
58
  @auth || nil
@@ -98,6 +103,10 @@ module PrometheusExporter::Server
98
103
  @label ||= PrometheusExporter::DEFAULT_LABEL
99
104
  end
100
105
 
106
+ def histogram
107
+ @histogram || false
108
+ end
109
+
101
110
  private
102
111
 
103
112
  def register_type_collectors
@@ -52,7 +52,7 @@ module PrometheusExporter::Server
52
52
  if !@sidekiq_jobs_total
53
53
 
54
54
  @sidekiq_job_duration_seconds =
55
- PrometheusExporter::Metric::Summary.new(
55
+ PrometheusExporter::Metric::Base.default_aggregation.new(
56
56
  "sidekiq_job_duration_seconds", "Total time spent in sidekiq jobs.")
57
57
 
58
58
  @sidekiq_jobs_total =