prometheus_exporter 2.0.8 → 2.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/ci.yml +51 -4
  3. data/.rubocop +1 -0
  4. data/.rubocop.yml +12 -1
  5. data/Appraisals +6 -2
  6. data/CHANGELOG +12 -0
  7. data/README.md +45 -8
  8. data/bin/prometheus_exporter +2 -2
  9. data/gemfiles/ar_70.gemfile +7 -0
  10. data/gemfiles/ar_71.gemfile +7 -0
  11. data/lib/prometheus_exporter/client.rb +34 -19
  12. data/lib/prometheus_exporter/instrumentation/delayed_job.rb +8 -5
  13. data/lib/prometheus_exporter/instrumentation/good_job.rb +30 -0
  14. data/lib/prometheus_exporter/instrumentation/method_profiler.rb +4 -4
  15. data/lib/prometheus_exporter/instrumentation/process.rb +2 -0
  16. data/lib/prometheus_exporter/instrumentation/sidekiq.rb +16 -14
  17. data/lib/prometheus_exporter/instrumentation.rb +1 -0
  18. data/lib/prometheus_exporter/metric/histogram.rb +1 -1
  19. data/lib/prometheus_exporter/middleware.rb +1 -1
  20. data/lib/prometheus_exporter/server/active_record_collector.rb +9 -13
  21. data/lib/prometheus_exporter/server/collector.rb +1 -0
  22. data/lib/prometheus_exporter/server/delayed_job_collector.rb +7 -1
  23. data/lib/prometheus_exporter/server/good_job_collector.rb +52 -0
  24. data/lib/prometheus_exporter/server/process_collector.rb +10 -13
  25. data/lib/prometheus_exporter/server/puma_collector.rb +1 -1
  26. data/lib/prometheus_exporter/server/resque_collector.rb +3 -7
  27. data/lib/prometheus_exporter/server/sidekiq_process_collector.rb +2 -2
  28. data/lib/prometheus_exporter/server/sidekiq_queue_collector.rb +2 -2
  29. data/lib/prometheus_exporter/server/sidekiq_stats_collector.rb +2 -2
  30. data/lib/prometheus_exporter/server/unicorn_collector.rb +32 -33
  31. data/lib/prometheus_exporter/server/web_server.rb +65 -39
  32. data/lib/prometheus_exporter/server.rb +1 -0
  33. data/lib/prometheus_exporter/version.rb +1 -1
  34. data/lib/prometheus_exporter.rb +12 -13
  35. data/prometheus_exporter.gemspec +5 -7
  36. metadata +19 -16
  37. data/.github/workflows/docker.yml +0 -47
@@ -2,7 +2,8 @@
2
2
 
3
3
  module PrometheusExporter::Server
4
4
  class ActiveRecordCollector < TypeCollector
5
- MAX_ACTIVERECORD_METRIC_AGE = 60
5
+ MAX_METRIC_AGE = 60
6
+
6
7
  ACTIVE_RECORD_GAUGES = {
7
8
  connections: "Total connections in pool",
8
9
  busy: "Connections in use in pool",
@@ -13,7 +14,12 @@ module PrometheusExporter::Server
13
14
  }
14
15
 
15
16
  def initialize
16
- @active_record_metrics = []
17
+ @active_record_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
18
+ @active_record_metrics.filter = -> (new_metric, old_metric) do
19
+ new_metric["pid"] == old_metric["pid"] &&
20
+ new_metric["hostname"] == old_metric["hostname"] &&
21
+ new_metric["metric_labels"]["pool_name"] == old_metric["metric_labels"]["pool_name"]
22
+ end
17
23
  end
18
24
 
19
25
  def type
@@ -26,7 +32,7 @@ module PrometheusExporter::Server
26
32
  metrics = {}
27
33
 
28
34
  @active_record_metrics.map do |m|
29
- metric_key = (m["metric_labels"] || {}).merge("pid" => m["pid"])
35
+ metric_key = (m["metric_labels"] || {}).merge("pid" => m["pid"], "hostname" => m["hostname"])
30
36
  metric_key.merge!(m["custom_labels"]) if m["custom_labels"]
31
37
 
32
38
  ACTIVE_RECORD_GAUGES.map do |k, help|
@@ -42,16 +48,6 @@ module PrometheusExporter::Server
42
48
  end
43
49
 
44
50
  def collect(obj)
45
- now = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
46
-
47
- obj["created_at"] = now
48
-
49
- @active_record_metrics.delete_if do |current|
50
- (obj["pid"] == current["pid"] && obj["hostname"] == current["hostname"] &&
51
- obj["metric_labels"]["pool_name"] == current["metric_labels"]["pool_name"]) ||
52
- (current["created_at"] + MAX_ACTIVERECORD_METRIC_AGE < now)
53
- end
54
-
55
51
  @active_record_metrics << obj
56
52
  end
57
53
  end
@@ -23,6 +23,7 @@ module PrometheusExporter::Server
23
23
  register_collector(ActiveRecordCollector.new)
24
24
  register_collector(ShoryukenCollector.new)
25
25
  register_collector(ResqueCollector.new)
26
+ register_collector(GoodJobCollector.new)
26
27
  end
27
28
 
28
29
  def register_collector(collector)
@@ -5,6 +5,7 @@ module PrometheusExporter::Server
5
5
  def initialize
6
6
  @delayed_jobs_total = nil
7
7
  @delayed_job_duration_seconds = nil
8
+ @delayed_job_latency_seconds_total = nil
8
9
  @delayed_jobs_total = nil
9
10
  @delayed_failed_jobs_total = nil
10
11
  @delayed_jobs_max_attempts_reached_total = nil
@@ -25,6 +26,7 @@ module PrometheusExporter::Server
25
26
 
26
27
  ensure_delayed_job_metrics
27
28
  @delayed_job_duration_seconds.observe(obj["duration"], counter_labels)
29
+ @delayed_job_latency_seconds_total.observe(obj["latency"], counter_labels)
28
30
  @delayed_jobs_total.observe(1, counter_labels)
29
31
  @delayed_failed_jobs_total.observe(1, counter_labels) if !obj["success"]
30
32
  @delayed_jobs_max_attempts_reached_total.observe(1, counter_labels) if obj["attempts"] >= obj["max_attempts"]
@@ -38,7 +40,7 @@ module PrometheusExporter::Server
38
40
 
39
41
  def metrics
40
42
  if @delayed_jobs_total
41
- [@delayed_job_duration_seconds, @delayed_jobs_total, @delayed_failed_jobs_total,
43
+ [@delayed_job_duration_seconds, @delayed_job_latency_seconds_total, @delayed_jobs_total, @delayed_failed_jobs_total,
42
44
  @delayed_jobs_max_attempts_reached_total, @delayed_job_duration_seconds_summary, @delayed_job_attempts_summary,
43
45
  @delayed_jobs_enqueued, @delayed_jobs_pending]
44
46
  else
@@ -55,6 +57,10 @@ module PrometheusExporter::Server
55
57
  PrometheusExporter::Metric::Counter.new(
56
58
  "delayed_job_duration_seconds", "Total time spent in delayed jobs.")
57
59
 
60
+ @delayed_job_latency_seconds_total =
61
+ PrometheusExporter::Metric::Counter.new(
62
+ "delayed_job_latency_seconds_total", "Total delayed jobs latency.")
63
+
58
64
  @delayed_jobs_total =
59
65
  PrometheusExporter::Metric::Counter.new(
60
66
  "delayed_jobs_total", "Total number of delayed jobs executed.")
@@ -0,0 +1,52 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PrometheusExporter::Server
4
+ class GoodJobCollector < TypeCollector
5
+ MAX_METRIC_AGE = 30
6
+ GOOD_JOB_GAUGES = {
7
+ scheduled: "Total number of scheduled GoodJob jobs.",
8
+ retried: "Total number of retried GoodJob jobs.",
9
+ queued: "Total number of queued GoodJob jobs.",
10
+ running: "Total number of running GoodJob jobs.",
11
+ finished: "Total number of finished GoodJob jobs.",
12
+ succeeded: "Total number of succeeded GoodJob jobs.",
13
+ discarded: "Total number of discarded GoodJob jobs."
14
+ }
15
+
16
+ def initialize
17
+ @good_job_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
18
+ @gauges = {}
19
+ end
20
+
21
+ def type
22
+ "good_job"
23
+ end
24
+
25
+ def metrics
26
+ return [] if good_job_metrics.length == 0
27
+
28
+ good_job_metrics.map do |metric|
29
+ labels = metric.fetch("custom_labels", {})
30
+
31
+ GOOD_JOB_GAUGES.map do |name, help|
32
+ value = metric[name.to_s]
33
+
34
+ if value
35
+ gauge = gauges[name] ||= PrometheusExporter::Metric::Gauge.new("good_job_#{name}", help)
36
+ gauge.observe(value, labels)
37
+ end
38
+ end
39
+ end
40
+
41
+ gauges.values
42
+ end
43
+
44
+ def collect(object)
45
+ @good_job_metrics << object
46
+ end
47
+
48
+ private
49
+
50
+ attr_reader :good_job_metrics, :gauges
51
+ end
52
+ end
@@ -3,7 +3,8 @@
3
3
  module PrometheusExporter::Server
4
4
 
5
5
  class ProcessCollector < TypeCollector
6
- MAX_PROCESS_METRIC_AGE = 60
6
+ MAX_METRIC_AGE = 60
7
+
7
8
  PROCESS_GAUGES = {
8
9
  heap_free_slots: "Free ruby heap slots.",
9
10
  heap_live_slots: "Used ruby heap slots.",
@@ -12,6 +13,8 @@ module PrometheusExporter::Server
12
13
  v8_physical_size: "Physical size consumed by V8 heaps.",
13
14
  v8_heap_count: "Number of V8 contexts running.",
14
15
  rss: "Total RSS used by process.",
16
+ malloc_increase_bytes_limit: 'Limit before Ruby triggers a GC against current objects (bytes).',
17
+ oldmalloc_increase_bytes_limit: 'Limit before Ruby triggers a major GC against old objects (bytes).'
15
18
  }
16
19
 
17
20
  PROCESS_COUNTERS = {
@@ -21,7 +24,10 @@ module PrometheusExporter::Server
21
24
  }
22
25
 
23
26
  def initialize
24
- @process_metrics = []
27
+ @process_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
28
+ @process_metrics.filter = -> (new_metric, old_metric) do
29
+ new_metric["pid"] == old_metric["pid"] && new_metric["hostname"] == old_metric["hostname"]
30
+ end
25
31
  end
26
32
 
27
33
  def type
@@ -34,8 +40,8 @@ module PrometheusExporter::Server
34
40
  metrics = {}
35
41
 
36
42
  @process_metrics.map do |m|
37
- metric_key = m["metric_labels"].merge("pid" => m["pid"])
38
- metric_key.merge!(m["custom_labels"] || {})
43
+ metric_key = (m["metric_labels"] || {}).merge("pid" => m["pid"], "hostname" => m["hostname"])
44
+ metric_key.merge!(m["custom_labels"]) if m["custom_labels"]
39
45
 
40
46
  PROCESS_GAUGES.map do |k, help|
41
47
  k = k.to_s
@@ -58,15 +64,6 @@ module PrometheusExporter::Server
58
64
  end
59
65
 
60
66
  def collect(obj)
61
- now = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
62
-
63
- obj["created_at"] = now
64
-
65
- @process_metrics.delete_if do |current|
66
- (obj["pid"] == current["pid"] && obj["hostname"] == current["hostname"]) ||
67
- (current["created_at"] + MAX_PROCESS_METRIC_AGE < now)
68
- end
69
-
70
67
  @process_metrics << obj
71
68
  end
72
69
  end
@@ -14,7 +14,7 @@ module PrometheusExporter::Server
14
14
  }
15
15
 
16
16
  def initialize
17
- @puma_metrics = MetricsContainer.new
17
+ @puma_metrics = MetricsContainer.new(ttl: MAX_PUMA_METRIC_AGE)
18
18
  @puma_metrics.filter = -> (new_metric, old_metric) do
19
19
  new_metric["pid"] == old_metric["pid"] && new_metric["hostname"] == old_metric["hostname"]
20
20
  end
@@ -2,7 +2,7 @@
2
2
 
3
3
  module PrometheusExporter::Server
4
4
  class ResqueCollector < TypeCollector
5
- MAX_RESQUE_METRIC_AGE = 30
5
+ MAX_METRIC_AGE = 30
6
6
  RESQUE_GAUGES = {
7
7
  processed_jobs: "Total number of processed Resque jobs.",
8
8
  failed_jobs: "Total number of failed Resque jobs.",
@@ -13,7 +13,7 @@ module PrometheusExporter::Server
13
13
  }
14
14
 
15
15
  def initialize
16
- @resque_metrics = []
16
+ @resque_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
17
17
  @gauges = {}
18
18
  end
19
19
 
@@ -40,11 +40,7 @@ module PrometheusExporter::Server
40
40
  end
41
41
 
42
42
  def collect(object)
43
- now = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
44
-
45
- object["created_at"] = now
46
- resque_metrics.delete_if { |metric| metric["created_at"] + MAX_RESQUE_METRIC_AGE < now }
47
- resque_metrics << object
43
+ @resque_metrics << object
48
44
  end
49
45
 
50
46
  private
@@ -2,7 +2,7 @@
2
2
 
3
3
  module PrometheusExporter::Server
4
4
  class SidekiqProcessCollector < PrometheusExporter::Server::TypeCollector
5
- MAX_SIDEKIQ_METRIC_AGE = 60
5
+ MAX_METRIC_AGE = 60
6
6
 
7
7
  SIDEKIQ_PROCESS_GAUGES = {
8
8
  'busy' => 'Number of running jobs',
@@ -12,7 +12,7 @@ module PrometheusExporter::Server
12
12
  attr_reader :sidekiq_metrics, :gauges
13
13
 
14
14
  def initialize
15
- @sidekiq_metrics = MetricsContainer.new(ttl: MAX_SIDEKIQ_METRIC_AGE)
15
+ @sidekiq_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
16
16
  @gauges = {}
17
17
  end
18
18
 
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
  module PrometheusExporter::Server
3
3
  class SidekiqQueueCollector < TypeCollector
4
- MAX_SIDEKIQ_METRIC_AGE = 60
4
+ MAX_METRIC_AGE = 60
5
5
 
6
6
  SIDEKIQ_QUEUE_GAUGES = {
7
7
  'backlog' => 'Size of the sidekiq queue.',
@@ -11,7 +11,7 @@ module PrometheusExporter::Server
11
11
  attr_reader :sidekiq_metrics, :gauges
12
12
 
13
13
  def initialize
14
- @sidekiq_metrics = MetricsContainer.new
14
+ @sidekiq_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
15
15
  @gauges = {}
16
16
  end
17
17
 
@@ -2,7 +2,7 @@
2
2
 
3
3
  module PrometheusExporter::Server
4
4
  class SidekiqStatsCollector < TypeCollector
5
- MAX_SIDEKIQ_METRIC_AGE = 60
5
+ MAX_METRIC_AGE = 60
6
6
 
7
7
  SIDEKIQ_STATS_GAUGES = {
8
8
  'dead_size' => 'Size of dead the queue',
@@ -18,7 +18,7 @@ module PrometheusExporter::Server
18
18
  attr_reader :sidekiq_metrics, :gauges
19
19
 
20
20
  def initialize
21
- @sidekiq_metrics = MetricsContainer.new(ttl: MAX_SIDEKIQ_METRIC_AGE)
21
+ @sidekiq_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
22
22
  @gauges = {}
23
23
  end
24
24
 
@@ -2,47 +2,46 @@
2
2
 
3
3
  # custom type collector for prometheus_exporter for handling the metrics sent from
4
4
  # PrometheusExporter::Instrumentation::Unicorn
5
- class PrometheusExporter::Server::UnicornCollector < PrometheusExporter::Server::TypeCollector
6
- MAX_UNICORN_METRIC_AGE = 60
7
-
8
- UNICORN_GAUGES = {
9
- workers: 'Number of unicorn workers.',
10
- active_workers: 'Number of active unicorn workers',
11
- request_backlog: 'Number of requests waiting to be processed by a unicorn worker.'
12
- }.freeze
13
-
14
- def initialize
15
- @unicorn_metrics = []
16
- end
5
+ module PrometheusExporter::Server
6
+ class UnicornCollector < PrometheusExporter::Server::TypeCollector
7
+ MAX_METRIC_AGE = 60
8
+
9
+ UNICORN_GAUGES = {
10
+ workers: 'Number of unicorn workers.',
11
+ active_workers: 'Number of active unicorn workers',
12
+ request_backlog: 'Number of requests waiting to be processed by a unicorn worker.'
13
+ }.freeze
14
+
15
+ def initialize
16
+ @unicorn_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
17
+ end
17
18
 
18
- def type
19
- 'unicorn'
20
- end
19
+ def type
20
+ 'unicorn'
21
+ end
21
22
 
22
- def metrics
23
- return [] if @unicorn_metrics.length.zero?
23
+ def metrics
24
+ return [] if @unicorn_metrics.length.zero?
24
25
 
25
- metrics = {}
26
+ metrics = {}
26
27
 
27
- @unicorn_metrics.map do |m|
28
- labels = m["custom_labels"] || {}
28
+ @unicorn_metrics.map do |m|
29
+ labels = m["custom_labels"] || {}
29
30
 
30
- UNICORN_GAUGES.map do |k, help|
31
- k = k.to_s
32
- if (v = m[k])
33
- g = metrics[k] ||= PrometheusExporter::Metric::Gauge.new("unicorn_#{k}", help)
34
- g.observe(v, labels)
31
+ UNICORN_GAUGES.map do |k, help|
32
+ k = k.to_s
33
+ if (v = m[k])
34
+ g = metrics[k] ||= PrometheusExporter::Metric::Gauge.new("unicorn_#{k}", help)
35
+ g.observe(v, labels)
36
+ end
35
37
  end
36
38
  end
37
- end
38
39
 
39
- metrics.values
40
- end
40
+ metrics.values
41
+ end
41
42
 
42
- def collect(obj)
43
- now = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
44
- obj["created_at"] = now
45
- @unicorn_metrics.delete_if { |m| m['created_at'] + MAX_UNICORN_METRIC_AGE < now }
46
- @unicorn_metrics << obj
43
+ def collect(obj)
44
+ @unicorn_metrics << obj
45
+ end
47
46
  end
48
47
  end
@@ -1,9 +1,9 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'webrick'
4
- require 'timeout'
5
- require 'zlib'
6
- require 'stringio'
3
+ require "webrick"
4
+ require "timeout"
5
+ require "zlib"
6
+ require "stringio"
7
7
 
8
8
  module PrometheusExporter::Server
9
9
  class WebServer
@@ -18,11 +18,23 @@ module PrometheusExporter::Server
18
18
  @auth = opts[:auth]
19
19
  @realm = opts[:realm] || PrometheusExporter::DEFAULT_REALM
20
20
 
21
- @metrics_total = PrometheusExporter::Metric::Counter.new("collector_metrics_total", "Total metrics processed by exporter web.")
21
+ @metrics_total =
22
+ PrometheusExporter::Metric::Counter.new(
23
+ "collector_metrics_total",
24
+ "Total metrics processed by exporter web."
25
+ )
22
26
 
23
- @sessions_total = PrometheusExporter::Metric::Counter.new("collector_sessions_total", "Total send_metric sessions processed by exporter web.")
27
+ @sessions_total =
28
+ PrometheusExporter::Metric::Counter.new(
29
+ "collector_sessions_total",
30
+ "Total send_metric sessions processed by exporter web."
31
+ )
24
32
 
25
- @bad_metrics_total = PrometheusExporter::Metric::Counter.new("collector_bad_metrics_total", "Total mis-handled metrics by collector.")
33
+ @bad_metrics_total =
34
+ PrometheusExporter::Metric::Counter.new(
35
+ "collector_bad_metrics_total",
36
+ "Total mis-handled metrics by collector."
37
+ )
26
38
 
27
39
  @metrics_total.observe(0)
28
40
  @sessions_total.observe(0)
@@ -34,7 +46,7 @@ module PrometheusExporter::Server
34
46
  if @verbose
35
47
  @access_log = [
36
48
  [$stderr, WEBrick::AccessLog::COMMON_LOG_FORMAT],
37
- [$stderr, WEBrick::AccessLog::REFERER_LOG_FORMAT],
49
+ [$stderr, WEBrick::AccessLog::REFERER_LOG_FORMAT]
38
50
  ]
39
51
  @logger = WEBrick::Log.new(log_target || $stderr)
40
52
  else
@@ -42,23 +54,26 @@ module PrometheusExporter::Server
42
54
  @logger = WEBrick::Log.new(log_target || "/dev/null")
43
55
  end
44
56
 
45
- @logger.info "Using Basic Authentication via #{@auth}" if @verbose && @auth
57
+ if @verbose && @auth
58
+ @logger.info "Using Basic Authentication via #{@auth}"
59
+ end
46
60
 
47
- if %w(ALL ANY).include?(@bind)
61
+ if %w[ALL ANY].include?(@bind)
48
62
  @logger.info "Listening on both 0.0.0.0/:: network interfaces"
49
63
  @bind = nil
50
64
  end
51
65
 
52
- @server = WEBrick::HTTPServer.new(
53
- Port: @port,
54
- BindAddress: @bind,
55
- Logger: @logger,
56
- AccessLog: @access_log,
57
- )
58
-
59
- @server.mount_proc '/' do |req, res|
60
- res['Content-Type'] = 'text/plain; charset=utf-8'
61
- if req.path == '/metrics'
66
+ @server =
67
+ WEBrick::HTTPServer.new(
68
+ Port: @port,
69
+ BindAddress: @bind,
70
+ Logger: @logger,
71
+ AccessLog: @access_log
72
+ )
73
+
74
+ @server.mount_proc "/" do |req, res|
75
+ res["Content-Type"] = "text/plain; charset=utf-8"
76
+ if req.path == "/metrics"
62
77
  authenticate(req, res) if @auth
63
78
 
64
79
  res.status = 200
@@ -76,13 +91,14 @@ module PrometheusExporter::Server
76
91
  else
77
92
  res.body = metrics
78
93
  end
79
- elsif req.path == '/send-metrics'
94
+ elsif req.path == "/send-metrics"
80
95
  handle_metrics(req, res)
81
- elsif req.path == '/ping'
82
- res.body = 'PONG'
96
+ elsif req.path == "/ping"
97
+ res.body = "PONG"
83
98
  else
84
99
  res.status = 404
85
- res.body = "Not Found! The Prometheus Ruby Exporter only listens on /ping, /metrics and /send-metrics"
100
+ res.body =
101
+ "Not Found! The Prometheus Ruby Exporter only listens on /ping, /metrics and /send-metrics"
86
102
  end
87
103
  end
88
104
  end
@@ -94,13 +110,11 @@ module PrometheusExporter::Server
94
110
  @metrics_total.observe
95
111
  @collector.process(block)
96
112
  rescue => e
97
- if @verbose
98
- @logger.error "\n\n#{e.inspect}\n#{e.backtrace}\n\n"
99
- end
113
+ @logger.error "\n\n#{e.inspect}\n#{e.backtrace}\n\n" if @verbose
100
114
  @bad_metrics_total.observe
101
115
  res.body = "Bad Metrics #{e}"
102
116
  res.status = e.respond_to?(:status_code) ? e.status_code : 500
103
- return
117
+ break
104
118
  end
105
119
  end
106
120
 
@@ -109,13 +123,14 @@ module PrometheusExporter::Server
109
123
  end
110
124
 
111
125
  def start
112
- @runner ||= Thread.start do
113
- begin
114
- @server.start
115
- rescue => e
116
- @logger.error "Failed to start prometheus collector web on port #{@port}: #{e}"
126
+ @runner ||=
127
+ Thread.start do
128
+ begin
129
+ @server.start
130
+ rescue => e
131
+ @logger.error "Failed to start prometheus collector web on port #{@port}: #{e}"
132
+ end
117
133
  end
118
- end
119
134
  end
120
135
 
121
136
  def stop
@@ -125,7 +140,7 @@ module PrometheusExporter::Server
125
140
  def metrics
126
141
  metric_text = nil
127
142
  begin
128
- Timeout::timeout(@timeout) do
143
+ Timeout.timeout(@timeout) do
129
144
  metric_text = @collector.prometheus_metrics_text
130
145
  end
131
146
  rescue Timeout::Error
@@ -158,9 +173,18 @@ module PrometheusExporter::Server
158
173
  end
159
174
 
160
175
  def get_rss
161
- @pagesize ||= `getconf PAGESIZE`.to_i rescue 4096
176
+ @pagesize ||=
177
+ begin
178
+ `getconf PAGESIZE`.to_i
179
+ rescue StandardError
180
+ 4096
181
+ end
162
182
  @pid ||= Process.pid
163
- File.read("/proc/#{@pid}/statm").split(' ')[1].to_i * @pagesize rescue 0
183
+ begin
184
+ File.read("/proc/#{@pid}/statm").split(" ")[1].to_i * @pagesize
185
+ rescue StandardError
186
+ 0
187
+ end
164
188
  end
165
189
 
166
190
  def add_gauge(name, help, value)
@@ -171,10 +195,12 @@ module PrometheusExporter::Server
171
195
 
172
196
  def authenticate(req, res)
173
197
  htpasswd = WEBrick::HTTPAuth::Htpasswd.new(@auth)
174
- basic_auth = WEBrick::HTTPAuth::BasicAuth.new({ Realm: @realm, UserDB: htpasswd, Logger: @logger })
198
+ basic_auth =
199
+ WEBrick::HTTPAuth::BasicAuth.new(
200
+ { Realm: @realm, UserDB: htpasswd, Logger: @logger }
201
+ )
175
202
 
176
203
  basic_auth.authenticate(req, res)
177
204
  end
178
-
179
205
  end
180
206
  end
@@ -19,3 +19,4 @@ require_relative "server/unicorn_collector"
19
19
  require_relative "server/active_record_collector"
20
20
  require_relative "server/shoryuken_collector"
21
21
  require_relative "server/resque_collector"
22
+ require_relative "server/good_job_collector"
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module PrometheusExporter
4
- VERSION = "2.0.8"
4
+ VERSION = "2.1.1"
5
5
  end
@@ -2,21 +2,21 @@
2
2
 
3
3
  require_relative "prometheus_exporter/version"
4
4
  require "json"
5
- require "thread"
6
5
 
7
6
  module PrometheusExporter
8
7
  # per: https://github.com/prometheus/prometheus/wiki/Default-port-allocations
9
8
  DEFAULT_PORT = 9394
10
- DEFAULT_BIND_ADDRESS = 'localhost'
11
- DEFAULT_PREFIX = 'ruby_'
9
+ DEFAULT_BIND_ADDRESS = "localhost"
10
+ DEFAULT_PREFIX = "ruby_"
12
11
  DEFAULT_LABEL = {}
13
12
  DEFAULT_TIMEOUT = 2
14
- DEFAULT_REALM = 'Prometheus Exporter'
13
+ DEFAULT_REALM = "Prometheus Exporter"
15
14
 
16
15
  class OjCompat
17
16
  def self.parse(obj)
18
17
  Oj.compat_load(obj)
19
18
  end
19
+
20
20
  def self.dump(obj)
21
21
  Oj.dump(obj, mode: :compat)
22
22
  end
@@ -25,7 +25,7 @@ module PrometheusExporter
25
25
  def self.hostname
26
26
  @hostname ||=
27
27
  begin
28
- require 'socket'
28
+ require "socket"
29
29
  Socket.gethostname
30
30
  rescue => e
31
31
  STDERR.puts "Unable to lookup hostname #{e}"
@@ -45,13 +45,12 @@ module PrometheusExporter
45
45
  def self.has_oj?
46
46
  (
47
47
  @@has_oj ||=
48
- begin
49
- require 'oj'
50
- :true
51
- rescue LoadError
52
- :false
53
- end
54
- ) == :true
48
+ begin
49
+ require "oj"
50
+ :T
51
+ rescue LoadError
52
+ :F
53
+ end
54
+ ) == :T
55
55
  end
56
-
57
56
  end
@@ -15,8 +15,6 @@ Gem::Specification.new do |spec|
15
15
  spec.homepage = "https://github.com/discourse/prometheus_exporter"
16
16
  spec.license = "MIT"
17
17
 
18
- spec.post_install_message = "prometheus_exporter will only bind to localhost by default as of v0.5"
19
-
20
18
  spec.files = `git ls-files -z`.split("\x0").reject do |f|
21
19
  f.match(%r{^(test|spec|features|bin)/})
22
20
  end
@@ -29,14 +27,14 @@ Gem::Specification.new do |spec|
29
27
  spec.add_development_dependency "rubocop", ">= 0.69"
30
28
  spec.add_development_dependency "bundler", ">= 2.1.4"
31
29
  spec.add_development_dependency "rake", "~> 13.0"
32
- spec.add_development_dependency "minitest", "~> 5.15.0" # https://github.com/qrush/m/issues/93
30
+ spec.add_development_dependency "minitest", "~> 5.23.0"
33
31
  spec.add_development_dependency "guard", "~> 2.0"
34
- spec.add_development_dependency "mini_racer", "~> 0.5.0"
32
+ spec.add_development_dependency "mini_racer", "~> 0.12.0"
35
33
  spec.add_development_dependency "guard-minitest", "~> 2.0"
36
34
  spec.add_development_dependency "oj", "~> 3.0"
37
- spec.add_development_dependency "rack-test", "~> 0.8.3"
35
+ spec.add_development_dependency "rack-test", "~> 2.1.0"
38
36
  spec.add_development_dependency "minitest-stub-const", "~> 0.6"
39
- spec.add_development_dependency "rubocop-discourse", ">2"
37
+ spec.add_development_dependency "rubocop-discourse", ">= 3"
40
38
  spec.add_development_dependency "appraisal", "~> 2.3"
41
39
  spec.add_development_dependency "activerecord", "~> 6.0.0"
42
40
  spec.add_development_dependency "redis", "> 5"
@@ -44,5 +42,5 @@ Gem::Specification.new do |spec|
44
42
  if !RUBY_ENGINE == 'jruby'
45
43
  spec.add_development_dependency "raindrops", "~> 0.19"
46
44
  end
47
- spec.required_ruby_version = '>= 2.6.0'
45
+ spec.required_ruby_version = '>= 3.0.0'
48
46
  end