prometheus_exporter 0.7.0 → 2.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/ci.yml +82 -25
  3. data/Appraisals +7 -3
  4. data/CHANGELOG +104 -24
  5. data/Dockerfile +9 -0
  6. data/README.md +258 -51
  7. data/bin/prometheus_exporter +19 -6
  8. data/examples/custom_collector.rb +1 -1
  9. data/gemfiles/ar_70.gemfile +5 -0
  10. data/lib/prometheus_exporter/client.rb +48 -23
  11. data/lib/prometheus_exporter/instrumentation/active_record.rb +11 -29
  12. data/lib/prometheus_exporter/instrumentation/delayed_job.rb +5 -2
  13. data/lib/prometheus_exporter/instrumentation/good_job.rb +30 -0
  14. data/lib/prometheus_exporter/instrumentation/method_profiler.rb +63 -23
  15. data/lib/prometheus_exporter/instrumentation/periodic_stats.rb +62 -0
  16. data/lib/prometheus_exporter/instrumentation/process.rb +5 -21
  17. data/lib/prometheus_exporter/instrumentation/puma.rb +34 -27
  18. data/lib/prometheus_exporter/instrumentation/resque.rb +35 -0
  19. data/lib/prometheus_exporter/instrumentation/sidekiq.rb +53 -23
  20. data/lib/prometheus_exporter/instrumentation/sidekiq_process.rb +52 -0
  21. data/lib/prometheus_exporter/instrumentation/sidekiq_queue.rb +32 -24
  22. data/lib/prometheus_exporter/instrumentation/sidekiq_stats.rb +37 -0
  23. data/lib/prometheus_exporter/instrumentation/unicorn.rb +10 -15
  24. data/lib/prometheus_exporter/instrumentation.rb +5 -0
  25. data/lib/prometheus_exporter/metric/base.rb +12 -10
  26. data/lib/prometheus_exporter/metric/gauge.rb +4 -0
  27. data/lib/prometheus_exporter/metric/histogram.rb +15 -3
  28. data/lib/prometheus_exporter/middleware.rb +45 -19
  29. data/lib/prometheus_exporter/server/active_record_collector.rb +9 -12
  30. data/lib/prometheus_exporter/server/collector.rb +4 -0
  31. data/lib/prometheus_exporter/server/delayed_job_collector.rb +24 -18
  32. data/lib/prometheus_exporter/server/good_job_collector.rb +52 -0
  33. data/lib/prometheus_exporter/server/metrics_container.rb +66 -0
  34. data/lib/prometheus_exporter/server/process_collector.rb +8 -13
  35. data/lib/prometheus_exporter/server/puma_collector.rb +14 -12
  36. data/lib/prometheus_exporter/server/resque_collector.rb +50 -0
  37. data/lib/prometheus_exporter/server/runner.rb +14 -3
  38. data/lib/prometheus_exporter/server/sidekiq_collector.rb +1 -1
  39. data/lib/prometheus_exporter/server/sidekiq_process_collector.rb +43 -0
  40. data/lib/prometheus_exporter/server/sidekiq_queue_collector.rb +6 -7
  41. data/lib/prometheus_exporter/server/sidekiq_stats_collector.rb +48 -0
  42. data/lib/prometheus_exporter/server/type_collector.rb +2 -0
  43. data/lib/prometheus_exporter/server/unicorn_collector.rb +32 -33
  44. data/lib/prometheus_exporter/server/web_collector.rb +17 -17
  45. data/lib/prometheus_exporter/server/web_server.rb +72 -41
  46. data/lib/prometheus_exporter/server.rb +4 -0
  47. data/lib/prometheus_exporter/version.rb +1 -1
  48. data/lib/prometheus_exporter.rb +12 -13
  49. data/prometheus_exporter.gemspec +6 -6
  50. metadata +53 -14
@@ -5,6 +5,7 @@ module PrometheusExporter::Server
5
5
  def initialize
6
6
  @delayed_jobs_total = nil
7
7
  @delayed_job_duration_seconds = nil
8
+ @delayed_job_latency_seconds_total = nil
8
9
  @delayed_jobs_total = nil
9
10
  @delayed_failed_jobs_total = nil
10
11
  @delayed_jobs_max_attempts_reached_total = nil
@@ -19,26 +20,27 @@ module PrometheusExporter::Server
19
20
  end
20
21
 
21
22
  def collect(obj)
22
- default_labels = { job_name: obj['name'] }
23
- custom_labels = obj['custom_labels']
24
- labels = custom_labels.nil? ? default_labels : default_labels.merge(custom_labels)
23
+ custom_labels = obj['custom_labels'] || {}
24
+ gauge_labels = { queue_name: obj['queue_name'] }.merge(custom_labels)
25
+ counter_labels = gauge_labels.merge(job_name: obj['name'])
25
26
 
26
27
  ensure_delayed_job_metrics
27
- @delayed_job_duration_seconds.observe(obj["duration"], labels)
28
- @delayed_jobs_total.observe(1, labels)
29
- @delayed_failed_jobs_total.observe(1, labels) if !obj["success"]
30
- @delayed_jobs_max_attempts_reached_total.observe(1) if obj["attempts"] >= obj["max_attempts"]
31
- @delayed_job_duration_seconds_summary.observe(obj["duration"])
32
- @delayed_job_duration_seconds_summary.observe(obj["duration"], status: "success") if obj["success"]
33
- @delayed_job_duration_seconds_summary.observe(obj["duration"], status: "failed") if !obj["success"]
34
- @delayed_job_attempts_summary.observe(obj["attempts"]) if obj["success"]
35
- @delayed_jobs_enqueued.observe(obj["enqueued"])
36
- @delayed_jobs_pending.observe(obj["pending"])
28
+ @delayed_job_duration_seconds.observe(obj["duration"], counter_labels)
29
+ @delayed_job_latency_seconds_total.observe(obj["latency"], counter_labels)
30
+ @delayed_jobs_total.observe(1, counter_labels)
31
+ @delayed_failed_jobs_total.observe(1, counter_labels) if !obj["success"]
32
+ @delayed_jobs_max_attempts_reached_total.observe(1, counter_labels) if obj["attempts"] >= obj["max_attempts"]
33
+ @delayed_job_duration_seconds_summary.observe(obj["duration"], counter_labels)
34
+ @delayed_job_duration_seconds_summary.observe(obj["duration"], counter_labels.merge(status: "success")) if obj["success"]
35
+ @delayed_job_duration_seconds_summary.observe(obj["duration"], counter_labels.merge(status: "failed")) if !obj["success"]
36
+ @delayed_job_attempts_summary.observe(obj["attempts"], counter_labels) if obj["success"]
37
+ @delayed_jobs_enqueued.observe(obj["enqueued"], gauge_labels)
38
+ @delayed_jobs_pending.observe(obj["pending"], gauge_labels)
37
39
  end
38
40
 
39
41
  def metrics
40
42
  if @delayed_jobs_total
41
- [@delayed_job_duration_seconds, @delayed_jobs_total, @delayed_failed_jobs_total,
43
+ [@delayed_job_duration_seconds, @delayed_job_latency_seconds_total, @delayed_jobs_total, @delayed_failed_jobs_total,
42
44
  @delayed_jobs_max_attempts_reached_total, @delayed_job_duration_seconds_summary, @delayed_job_attempts_summary,
43
45
  @delayed_jobs_enqueued, @delayed_jobs_pending]
44
46
  else
@@ -55,6 +57,10 @@ module PrometheusExporter::Server
55
57
  PrometheusExporter::Metric::Counter.new(
56
58
  "delayed_job_duration_seconds", "Total time spent in delayed jobs.")
57
59
 
60
+ @delayed_job_latency_seconds_total =
61
+ PrometheusExporter::Metric::Counter.new(
62
+ "delayed_job_latency_seconds_total", "Total delayed jobs latency.")
63
+
58
64
  @delayed_jobs_total =
59
65
  PrometheusExporter::Metric::Counter.new(
60
66
  "delayed_jobs_total", "Total number of delayed jobs executed.")
@@ -76,12 +82,12 @@ module PrometheusExporter::Server
76
82
  "delayed_jobs_max_attempts_reached_total", "Total number of delayed jobs that reached max attempts.")
77
83
 
78
84
  @delayed_job_duration_seconds_summary =
79
- PrometheusExporter::Metric::Summary.new("delayed_job_duration_seconds_summary",
80
- "Summary of the time it takes jobs to execute.")
85
+ PrometheusExporter::Metric::Base.default_aggregation.new("delayed_job_duration_seconds_summary",
86
+ "Summary of the time it takes jobs to execute.")
81
87
 
82
88
  @delayed_job_attempts_summary =
83
- PrometheusExporter::Metric::Summary.new("delayed_job_attempts_summary",
84
- "Summary of the amount of attempts it takes delayed jobs to succeed.")
89
+ PrometheusExporter::Metric::Base.default_aggregation.new("delayed_job_attempts_summary",
90
+ "Summary of the amount of attempts it takes delayed jobs to succeed.")
85
91
  end
86
92
  end
87
93
  end
@@ -0,0 +1,52 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PrometheusExporter::Server
4
+ class GoodJobCollector < TypeCollector
5
+ MAX_METRIC_AGE = 30
6
+ GOOD_JOB_GAUGES = {
7
+ scheduled: "Total number of scheduled GoodJob jobs.",
8
+ retried: "Total number of retried GoodJob jobs.",
9
+ queued: "Total number of queued GoodJob jobs.",
10
+ running: "Total number of running GoodJob jobs.",
11
+ finished: "Total number of finished GoodJob jobs.",
12
+ succeeded: "Total number of succeeded GoodJob jobs.",
13
+ discarded: "Total number of discarded GoodJob jobs."
14
+ }
15
+
16
+ def initialize
17
+ @good_job_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
18
+ @gauges = {}
19
+ end
20
+
21
+ def type
22
+ "good_job"
23
+ end
24
+
25
+ def metrics
26
+ return [] if good_job_metrics.length == 0
27
+
28
+ good_job_metrics.map do |metric|
29
+ labels = metric.fetch("custom_labels", {})
30
+
31
+ GOOD_JOB_GAUGES.map do |name, help|
32
+ value = metric[name.to_s]
33
+
34
+ if value
35
+ gauge = gauges[name] ||= PrometheusExporter::Metric::Gauge.new("good_job_#{name}", help)
36
+ gauge.observe(value, labels)
37
+ end
38
+ end
39
+ end
40
+
41
+ gauges.values
42
+ end
43
+
44
+ def collect(object)
45
+ @good_job_metrics << object
46
+ end
47
+
48
+ private
49
+
50
+ attr_reader :good_job_metrics, :gauges
51
+ end
52
+ end
@@ -0,0 +1,66 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PrometheusExporter::Server
4
+ class MetricsContainer
5
+ METRIC_MAX_AGE = 60
6
+ METRIC_EXPIRE_ATTR = "_expire_at"
7
+
8
+ attr_reader :data, :ttl
9
+ attr_accessor :filter
10
+
11
+ def initialize(ttl: METRIC_MAX_AGE, expire_attr: METRIC_EXPIRE_ATTR, filter: nil)
12
+ @data = []
13
+ @ttl = ttl
14
+ @expire_attr = expire_attr
15
+ @filter = filter
16
+ end
17
+
18
+ def <<(obj)
19
+ now = get_time
20
+ obj[@expire_attr] = now + @ttl
21
+
22
+ expire(time: now, new_metric: obj)
23
+
24
+ @data << obj
25
+ @data
26
+ end
27
+
28
+ def [](key)
29
+ @data.tap { expire }[key]
30
+ end
31
+
32
+ def size(&blk)
33
+ wrap_expire(:size, &blk)
34
+ end
35
+ alias_method :length, :size
36
+
37
+ def map(&blk)
38
+ wrap_expire(:map, &blk)
39
+ end
40
+
41
+ def each(&blk)
42
+ wrap_expire(:each, &blk)
43
+ end
44
+
45
+ def expire(time: nil, new_metric: nil)
46
+ time ||= get_time
47
+
48
+ @data.delete_if do |metric|
49
+ expired = metric[@expire_attr] < time
50
+ expired ||= filter.call(new_metric, metric) if @filter && new_metric
51
+ expired
52
+ end
53
+ end
54
+
55
+ private
56
+
57
+ def get_time
58
+ ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
59
+ end
60
+
61
+ def wrap_expire(method_name, &blk)
62
+ expire
63
+ @data.public_send(method_name, &blk)
64
+ end
65
+ end
66
+ end
@@ -3,7 +3,8 @@
3
3
  module PrometheusExporter::Server
4
4
 
5
5
  class ProcessCollector < TypeCollector
6
- MAX_PROCESS_METRIC_AGE = 60
6
+ MAX_METRIC_AGE = 60
7
+
7
8
  PROCESS_GAUGES = {
8
9
  heap_free_slots: "Free ruby heap slots.",
9
10
  heap_live_slots: "Used ruby heap slots.",
@@ -21,7 +22,10 @@ module PrometheusExporter::Server
21
22
  }
22
23
 
23
24
  def initialize
24
- @process_metrics = []
25
+ @process_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
26
+ @process_metrics.filter = -> (new_metric, old_metric) do
27
+ new_metric["pid"] == old_metric["pid"] && new_metric["hostname"] == old_metric["hostname"]
28
+ end
25
29
  end
26
30
 
27
31
  def type
@@ -34,8 +38,8 @@ module PrometheusExporter::Server
34
38
  metrics = {}
35
39
 
36
40
  @process_metrics.map do |m|
37
- metric_key = m["metric_labels"].merge("pid" => m["pid"])
38
- metric_key.merge!(m["custom_labels"] || {})
41
+ metric_key = (m["metric_labels"] || {}).merge("pid" => m["pid"], "hostname" => m["hostname"])
42
+ metric_key.merge!(m["custom_labels"]) if m["custom_labels"]
39
43
 
40
44
  PROCESS_GAUGES.map do |k, help|
41
45
  k = k.to_s
@@ -58,15 +62,6 @@ module PrometheusExporter::Server
58
62
  end
59
63
 
60
64
  def collect(obj)
61
- now = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
62
-
63
- obj["created_at"] = now
64
-
65
- @process_metrics.delete_if do |current|
66
- (obj["pid"] == current["pid"] && obj["hostname"] == current["hostname"]) ||
67
- (current["created_at"] + MAX_PROCESS_METRIC_AGE < now)
68
- end
69
-
70
65
  @process_metrics << obj
71
66
  end
72
67
  end
@@ -4,17 +4,20 @@ module PrometheusExporter::Server
4
4
  class PumaCollector < TypeCollector
5
5
  MAX_PUMA_METRIC_AGE = 30
6
6
  PUMA_GAUGES = {
7
- workers_total: "Number of puma workers.",
8
- booted_workers_total: "Number of puma workers booted.",
9
- old_workers_total: "Number of old puma workers.",
10
- running_threads_total: "Number of puma threads currently running.",
11
- request_backlog_total: "Number of requests waiting to be processed by a puma thread.",
12
- thread_pool_capacity_total: "Number of puma threads available at current scale.",
13
- max_threads_total: "Number of puma threads at available at max scale.",
7
+ workers: "Number of puma workers.",
8
+ booted_workers: "Number of puma workers booted.",
9
+ old_workers: "Number of old puma workers.",
10
+ running_threads: "Number of puma threads currently running.",
11
+ request_backlog: "Number of requests waiting to be processed by a puma thread.",
12
+ thread_pool_capacity: "Number of puma threads available at current scale.",
13
+ max_threads: "Number of puma threads at available at max scale.",
14
14
  }
15
15
 
16
16
  def initialize
17
- @puma_metrics = []
17
+ @puma_metrics = MetricsContainer.new(ttl: MAX_PUMA_METRIC_AGE)
18
+ @puma_metrics.filter = -> (new_metric, old_metric) do
19
+ new_metric["pid"] == old_metric["pid"] && new_metric["hostname"] == old_metric["hostname"]
20
+ end
18
21
  end
19
22
 
20
23
  def type
@@ -34,6 +37,9 @@ module PrometheusExporter::Server
34
37
  if m["custom_labels"]
35
38
  labels.merge!(m["custom_labels"])
36
39
  end
40
+ if m["metric_labels"]
41
+ labels.merge!(m["metric_labels"])
42
+ end
37
43
 
38
44
  PUMA_GAUGES.map do |k, help|
39
45
  k = k.to_s
@@ -48,10 +54,6 @@ module PrometheusExporter::Server
48
54
  end
49
55
 
50
56
  def collect(obj)
51
- now = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
52
-
53
- obj["created_at"] = now
54
- @puma_metrics.delete_if { |m| m["created_at"] + MAX_PUMA_METRIC_AGE < now }
55
57
  @puma_metrics << obj
56
58
  end
57
59
  end
@@ -0,0 +1,50 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PrometheusExporter::Server
4
+ class ResqueCollector < TypeCollector
5
+ MAX_METRIC_AGE = 30
6
+ RESQUE_GAUGES = {
7
+ processed_jobs: "Total number of processed Resque jobs.",
8
+ failed_jobs: "Total number of failed Resque jobs.",
9
+ pending_jobs: "Total number of pending Resque jobs.",
10
+ queues: "Total number of Resque queues.",
11
+ workers: "Total number of Resque workers running.",
12
+ working: "Total number of Resque workers working."
13
+ }
14
+
15
+ def initialize
16
+ @resque_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
17
+ @gauges = {}
18
+ end
19
+
20
+ def type
21
+ "resque"
22
+ end
23
+
24
+ def metrics
25
+ return [] if resque_metrics.length == 0
26
+
27
+ resque_metrics.map do |metric|
28
+ labels = metric.fetch("custom_labels", {})
29
+
30
+ RESQUE_GAUGES.map do |name, help|
31
+ name = name.to_s
32
+ if value = metric[name]
33
+ gauge = gauges[name] ||= PrometheusExporter::Metric::Gauge.new("resque_#{name}", help)
34
+ gauge.observe(value, labels)
35
+ end
36
+ end
37
+ end
38
+
39
+ gauges.values
40
+ end
41
+
42
+ def collect(object)
43
+ @resque_metrics << object
44
+ end
45
+
46
+ private
47
+
48
+ attr_reader :resque_metrics, :gauges
49
+ end
50
+ end
@@ -1,7 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'prometheus_exporter/client'
4
- require_relative '../instrumentation/unicorn'
3
+ require_relative '../client'
5
4
 
6
5
  module PrometheusExporter::Server
7
6
  class RunnerException < StandardError; end
@@ -17,6 +16,7 @@ module PrometheusExporter::Server
17
16
  @prefix = nil
18
17
  @auth = nil
19
18
  @realm = nil
19
+ @histogram = nil
20
20
 
21
21
  options.each do |k, v|
22
22
  send("#{k}=", v) if self.class.method_defined?("#{k}=")
@@ -27,6 +27,10 @@ module PrometheusExporter::Server
27
27
  PrometheusExporter::Metric::Base.default_prefix = prefix
28
28
  PrometheusExporter::Metric::Base.default_labels = label
29
29
 
30
+ if histogram
31
+ PrometheusExporter::Metric::Base.default_aggregation = PrometheusExporter::Metric::Histogram
32
+ end
33
+
30
34
  register_type_collectors
31
35
 
32
36
  unless collector.is_a?(PrometheusExporter::Server::CollectorBase)
@@ -34,6 +38,9 @@ module PrometheusExporter::Server
34
38
  end
35
39
 
36
40
  if unicorn_listen_address && unicorn_pid_file
41
+
42
+ require_relative '../instrumentation'
43
+
37
44
  local_client = PrometheusExporter::LocalClient.new(collector: collector)
38
45
  PrometheusExporter::Instrumentation::Unicorn.start(
39
46
  pid_file: unicorn_pid_file,
@@ -47,7 +54,7 @@ module PrometheusExporter::Server
47
54
  end
48
55
 
49
56
  attr_accessor :unicorn_listen_address, :unicorn_pid_file
50
- attr_writer :prefix, :port, :bind, :collector_class, :type_collectors, :timeout, :verbose, :server_class, :label, :auth, :realm
57
+ attr_writer :prefix, :port, :bind, :collector_class, :type_collectors, :timeout, :verbose, :server_class, :label, :auth, :realm, :histogram
51
58
 
52
59
  def auth
53
60
  @auth || nil
@@ -98,6 +105,10 @@ module PrometheusExporter::Server
98
105
  @label ||= PrometheusExporter::DEFAULT_LABEL
99
106
  end
100
107
 
108
+ def histogram
109
+ @histogram || false
110
+ end
111
+
101
112
  private
102
113
 
103
114
  def register_type_collectors
@@ -52,7 +52,7 @@ module PrometheusExporter::Server
52
52
  if !@sidekiq_jobs_total
53
53
 
54
54
  @sidekiq_job_duration_seconds =
55
- PrometheusExporter::Metric::Summary.new(
55
+ PrometheusExporter::Metric::Base.default_aggregation.new(
56
56
  "sidekiq_job_duration_seconds", "Total time spent in sidekiq jobs.")
57
57
 
58
58
  @sidekiq_jobs_total =
@@ -0,0 +1,43 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PrometheusExporter::Server
4
+ class SidekiqProcessCollector < PrometheusExporter::Server::TypeCollector
5
+ MAX_METRIC_AGE = 60
6
+
7
+ SIDEKIQ_PROCESS_GAUGES = {
8
+ 'busy' => 'Number of running jobs',
9
+ 'concurrency' => 'Maximum concurrency',
10
+ }.freeze
11
+
12
+ attr_reader :sidekiq_metrics, :gauges
13
+
14
+ def initialize
15
+ @sidekiq_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
16
+ @gauges = {}
17
+ end
18
+
19
+ def type
20
+ 'sidekiq_process'
21
+ end
22
+
23
+ def metrics
24
+ SIDEKIQ_PROCESS_GAUGES.each_key { |name| gauges[name]&.reset! }
25
+
26
+ sidekiq_metrics.map do |metric|
27
+ labels = metric.fetch('labels', {})
28
+ SIDEKIQ_PROCESS_GAUGES.map do |name, help|
29
+ if (value = metric[name])
30
+ gauge = gauges[name] ||= PrometheusExporter::Metric::Gauge.new("sidekiq_process_#{name}", help)
31
+ gauge.observe(value, labels)
32
+ end
33
+ end
34
+ end
35
+
36
+ gauges.values
37
+ end
38
+
39
+ def collect(object)
40
+ @sidekiq_metrics << object["process"]
41
+ end
42
+ end
43
+ end
@@ -1,17 +1,17 @@
1
1
  # frozen_string_literal: true
2
2
  module PrometheusExporter::Server
3
3
  class SidekiqQueueCollector < TypeCollector
4
- MAX_SIDEKIQ_METRIC_AGE = 60
4
+ MAX_METRIC_AGE = 60
5
5
 
6
6
  SIDEKIQ_QUEUE_GAUGES = {
7
- 'backlog_total' => 'Size of the sidekiq queue.',
7
+ 'backlog' => 'Size of the sidekiq queue.',
8
8
  'latency_seconds' => 'Latency of the sidekiq queue.',
9
9
  }.freeze
10
10
 
11
11
  attr_reader :sidekiq_metrics, :gauges
12
12
 
13
13
  def initialize
14
- @sidekiq_metrics = []
14
+ @sidekiq_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
15
15
  @gauges = {}
16
16
  end
17
17
 
@@ -20,6 +20,8 @@ module PrometheusExporter::Server
20
20
  end
21
21
 
22
22
  def metrics
23
+ SIDEKIQ_QUEUE_GAUGES.each_key { |name| gauges[name]&.reset! }
24
+
23
25
  sidekiq_metrics.map do |metric|
24
26
  labels = metric.fetch("labels", {})
25
27
  SIDEKIQ_QUEUE_GAUGES.map do |name, help|
@@ -34,12 +36,9 @@ module PrometheusExporter::Server
34
36
  end
35
37
 
36
38
  def collect(object)
37
- now = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
38
39
  object['queues'].each do |queue|
39
- queue["created_at"] = now
40
40
  queue["labels"].merge!(object['custom_labels']) if object['custom_labels']
41
- sidekiq_metrics.delete_if { |metric| metric['created_at'] + MAX_SIDEKIQ_METRIC_AGE < now }
42
- sidekiq_metrics << queue
41
+ @sidekiq_metrics << queue
43
42
  end
44
43
  end
45
44
  end
@@ -0,0 +1,48 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PrometheusExporter::Server
4
+ class SidekiqStatsCollector < TypeCollector
5
+ MAX_METRIC_AGE = 60
6
+
7
+ SIDEKIQ_STATS_GAUGES = {
8
+ 'dead_size' => 'Size of dead the queue',
9
+ 'enqueued' => 'Number of enqueued jobs',
10
+ 'failed' => 'Number of failed jobs',
11
+ 'processed' => 'Total number of processed jobs',
12
+ 'processes_size' => 'Number of processes',
13
+ 'retry_size' => 'Size of the retries queue',
14
+ 'scheduled_size' => 'Size of the scheduled queue',
15
+ 'workers_size' => 'Number of jobs actively being processed',
16
+ }.freeze
17
+
18
+ attr_reader :sidekiq_metrics, :gauges
19
+
20
+ def initialize
21
+ @sidekiq_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
22
+ @gauges = {}
23
+ end
24
+
25
+ def type
26
+ 'sidekiq_stats'
27
+ end
28
+
29
+ def metrics
30
+ SIDEKIQ_STATS_GAUGES.each_key { |name| gauges[name]&.reset! }
31
+
32
+ sidekiq_metrics.map do |metric|
33
+ SIDEKIQ_STATS_GAUGES.map do |name, help|
34
+ if (value = metric['stats'][name])
35
+ gauge = gauges[name] ||= PrometheusExporter::Metric::Gauge.new("sidekiq_stats_#{name}", help)
36
+ gauge.observe(value)
37
+ end
38
+ end
39
+ end
40
+
41
+ gauges.values
42
+ end
43
+
44
+ def collect(object)
45
+ @sidekiq_metrics << object
46
+ end
47
+ end
48
+ end
@@ -1,5 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require "prometheus_exporter/server/metrics_container"
4
+
3
5
  module PrometheusExporter::Server
4
6
  class TypeCollector
5
7
  def type
@@ -2,47 +2,46 @@
2
2
 
3
3
  # custom type collector for prometheus_exporter for handling the metrics sent from
4
4
  # PrometheusExporter::Instrumentation::Unicorn
5
- class PrometheusExporter::Server::UnicornCollector < PrometheusExporter::Server::TypeCollector
6
- MAX_UNICORN_METRIC_AGE = 60
7
-
8
- UNICORN_GAUGES = {
9
- workers_total: 'Number of unicorn workers.',
10
- active_workers_total: 'Number of active unicorn workers',
11
- request_backlog_total: 'Number of requests waiting to be processed by a unicorn worker.'
12
- }.freeze
13
-
14
- def initialize
15
- @unicorn_metrics = []
16
- end
5
+ module PrometheusExporter::Server
6
+ class UnicornCollector < PrometheusExporter::Server::TypeCollector
7
+ MAX_METRIC_AGE = 60
8
+
9
+ UNICORN_GAUGES = {
10
+ workers: 'Number of unicorn workers.',
11
+ active_workers: 'Number of active unicorn workers',
12
+ request_backlog: 'Number of requests waiting to be processed by a unicorn worker.'
13
+ }.freeze
14
+
15
+ def initialize
16
+ @unicorn_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
17
+ end
17
18
 
18
- def type
19
- 'unicorn'
20
- end
19
+ def type
20
+ 'unicorn'
21
+ end
21
22
 
22
- def metrics
23
- return [] if @unicorn_metrics.length.zero?
23
+ def metrics
24
+ return [] if @unicorn_metrics.length.zero?
24
25
 
25
- metrics = {}
26
+ metrics = {}
26
27
 
27
- @unicorn_metrics.map do |m|
28
- labels = m["custom_labels"] || {}
28
+ @unicorn_metrics.map do |m|
29
+ labels = m["custom_labels"] || {}
29
30
 
30
- UNICORN_GAUGES.map do |k, help|
31
- k = k.to_s
32
- if (v = m[k])
33
- g = metrics[k] ||= PrometheusExporter::Metric::Gauge.new("unicorn_#{k}", help)
34
- g.observe(v, labels)
31
+ UNICORN_GAUGES.map do |k, help|
32
+ k = k.to_s
33
+ if (v = m[k])
34
+ g = metrics[k] ||= PrometheusExporter::Metric::Gauge.new("unicorn_#{k}", help)
35
+ g.observe(v, labels)
36
+ end
35
37
  end
36
38
  end
37
- end
38
39
 
39
- metrics.values
40
- end
40
+ metrics.values
41
+ end
41
42
 
42
- def collect(obj)
43
- now = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
44
- obj["created_at"] = now
45
- @unicorn_metrics.delete_if { |m| m['created_at'] + MAX_UNICORN_METRIC_AGE < now }
46
- @unicorn_metrics << obj
43
+ def collect(obj)
44
+ @unicorn_metrics << obj
45
+ end
47
46
  end
48
47
  end