prometheus_exporter 0.7.0 → 2.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG +298 -35
  3. data/README.md +276 -53
  4. data/{bin → exe}/prometheus_exporter +20 -7
  5. data/lib/prometheus_exporter/client.rb +41 -32
  6. data/lib/prometheus_exporter/instrumentation/active_record.rb +29 -35
  7. data/lib/prometheus_exporter/instrumentation/delayed_job.rb +28 -13
  8. data/lib/prometheus_exporter/instrumentation/good_job.rb +28 -0
  9. data/lib/prometheus_exporter/instrumentation/hutch.rb +1 -1
  10. data/lib/prometheus_exporter/instrumentation/method_profiler.rb +67 -27
  11. data/lib/prometheus_exporter/instrumentation/periodic_stats.rb +54 -0
  12. data/lib/prometheus_exporter/instrumentation/process.rb +25 -27
  13. data/lib/prometheus_exporter/instrumentation/puma.rb +36 -27
  14. data/lib/prometheus_exporter/instrumentation/resque.rb +33 -0
  15. data/lib/prometheus_exporter/instrumentation/shoryuken.rb +6 -7
  16. data/lib/prometheus_exporter/instrumentation/sidekiq.rb +51 -23
  17. data/lib/prometheus_exporter/instrumentation/sidekiq_process.rb +45 -0
  18. data/lib/prometheus_exporter/instrumentation/sidekiq_queue.rb +38 -33
  19. data/lib/prometheus_exporter/instrumentation/sidekiq_stats.rb +32 -0
  20. data/lib/prometheus_exporter/instrumentation/unicorn.rb +12 -17
  21. data/lib/prometheus_exporter/instrumentation.rb +5 -0
  22. data/lib/prometheus_exporter/metric/base.rb +20 -17
  23. data/lib/prometheus_exporter/metric/counter.rb +1 -3
  24. data/lib/prometheus_exporter/metric/gauge.rb +6 -6
  25. data/lib/prometheus_exporter/metric/histogram.rb +15 -5
  26. data/lib/prometheus_exporter/metric/summary.rb +5 -14
  27. data/lib/prometheus_exporter/middleware.rb +72 -38
  28. data/lib/prometheus_exporter/server/active_record_collector.rb +16 -14
  29. data/lib/prometheus_exporter/server/collector.rb +29 -17
  30. data/lib/prometheus_exporter/server/collector_base.rb +0 -2
  31. data/lib/prometheus_exporter/server/delayed_job_collector.rb +76 -33
  32. data/lib/prometheus_exporter/server/good_job_collector.rb +52 -0
  33. data/lib/prometheus_exporter/server/hutch_collector.rb +19 -11
  34. data/lib/prometheus_exporter/server/metrics_container.rb +66 -0
  35. data/lib/prometheus_exporter/server/process_collector.rb +15 -14
  36. data/lib/prometheus_exporter/server/puma_collector.rb +21 -18
  37. data/lib/prometheus_exporter/server/resque_collector.rb +50 -0
  38. data/lib/prometheus_exporter/server/runner.rb +49 -13
  39. data/lib/prometheus_exporter/server/shoryuken_collector.rb +22 -17
  40. data/lib/prometheus_exporter/server/sidekiq_collector.rb +22 -14
  41. data/lib/prometheus_exporter/server/sidekiq_process_collector.rb +47 -0
  42. data/lib/prometheus_exporter/server/sidekiq_queue_collector.rb +12 -12
  43. data/lib/prometheus_exporter/server/sidekiq_stats_collector.rb +49 -0
  44. data/lib/prometheus_exporter/server/type_collector.rb +2 -0
  45. data/lib/prometheus_exporter/server/unicorn_collector.rb +32 -33
  46. data/lib/prometheus_exporter/server/web_collector.rb +48 -31
  47. data/lib/prometheus_exporter/server/web_server.rb +70 -48
  48. data/lib/prometheus_exporter/server.rb +4 -0
  49. data/lib/prometheus_exporter/version.rb +1 -1
  50. data/lib/prometheus_exporter.rb +12 -13
  51. metadata +19 -206
  52. data/.github/workflows/ci.yml +0 -42
  53. data/.gitignore +0 -13
  54. data/.rubocop.yml +0 -7
  55. data/Appraisals +0 -10
  56. data/CODE_OF_CONDUCT.md +0 -74
  57. data/Gemfile +0 -8
  58. data/Guardfile +0 -8
  59. data/Rakefile +0 -12
  60. data/bench/bench.rb +0 -45
  61. data/examples/custom_collector.rb +0 -27
  62. data/gemfiles/.bundle/config +0 -2
  63. data/gemfiles/ar_60.gemfile +0 -5
  64. data/gemfiles/ar_61.gemfile +0 -7
  65. data/prometheus_exporter.gemspec +0 -46
@@ -2,12 +2,23 @@
2
2
 
3
3
  module PrometheusExporter::Metric
4
4
  class Histogram < Base
5
-
6
5
  DEFAULT_BUCKETS = [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5.0, 10.0].freeze
7
6
 
7
+ @default_buckets = nil if !defined?(@default_buckets)
8
+
9
+ def self.default_buckets
10
+ @default_buckets || DEFAULT_BUCKETS
11
+ end
12
+
13
+ def self.default_buckets=(buckets)
14
+ @default_buckets = buckets
15
+ end
16
+
17
+ attr_reader :buckets
18
+
8
19
  def initialize(name, help, opts = {})
9
20
  super(name, help)
10
- @buckets = (opts[:buckets] || DEFAULT_BUCKETS).sort.reverse
21
+ @buckets = (opts[:buckets] || self.class.default_buckets).sort
11
22
  reset!
12
23
  end
13
24
 
@@ -45,11 +56,11 @@ module PrometheusExporter::Metric
45
56
  first = false
46
57
  count = @counts[labels]
47
58
  sum = @sums[labels]
48
- text << "#{prefix(@name)}_bucket#{labels_text(with_bucket(labels, "+Inf"))} #{count}\n"
49
59
  @buckets.each do |bucket|
50
60
  value = @observations[labels][bucket]
51
61
  text << "#{prefix(@name)}_bucket#{labels_text(with_bucket(labels, bucket.to_s))} #{value}\n"
52
62
  end
63
+ text << "#{prefix(@name)}_bucket#{labels_text(with_bucket(labels, "+Inf"))} #{count}\n"
53
64
  text << "#{prefix(@name)}_count#{labels_text(labels)} #{count}\n"
54
65
  text << "#{prefix(@name)}_sum#{labels_text(labels)} #{sum}"
55
66
  end
@@ -79,7 +90,7 @@ module PrometheusExporter::Metric
79
90
  end
80
91
 
81
92
  def fill_buckets(value, buckets)
82
- @buckets.each do |b|
93
+ @buckets.reverse_each do |b|
83
94
  break if value > b
84
95
  buckets[b] += 1
85
96
  end
@@ -88,6 +99,5 @@ module PrometheusExporter::Metric
88
99
  def with_bucket(labels, bucket)
89
100
  labels.merge("le" => bucket)
90
101
  end
91
-
92
102
  end
93
103
  end
@@ -2,7 +2,6 @@
2
2
 
3
3
  module PrometheusExporter::Metric
4
4
  class Summary < Base
5
-
6
5
  DEFAULT_QUANTILES = [0.99, 0.9, 0.5, 0.1, 0.01]
7
6
  ROTATE_AGE = 120
8
7
 
@@ -49,9 +48,7 @@ module PrometheusExporter::Metric
49
48
  result = {}
50
49
 
51
50
  if length > 0
52
- @quantiles.each do |quantile|
53
- result[quantile] = sorted[(length * quantile).ceil - 1]
54
- end
51
+ @quantiles.each { |quantile| result[quantile] = sorted[(length * quantile).ceil - 1] }
55
52
  end
56
53
 
57
54
  result
@@ -61,12 +58,9 @@ module PrometheusExporter::Metric
61
58
  buffer = @buffers[@current_buffer]
62
59
 
63
60
  result = {}
64
- buffer.each do |labels, raw_data|
65
- result[labels] = calculate_quantiles(raw_data)
66
- end
61
+ buffer.each { |labels, raw_data| result[labels] = calculate_quantiles(raw_data) }
67
62
 
68
63
  result
69
-
70
64
  end
71
65
 
72
66
  def metric_text
@@ -87,8 +81,8 @@ module PrometheusExporter::Metric
87
81
 
88
82
  # makes sure we have storage
89
83
  def ensure_summary(labels)
90
- @buffers[0][labels] ||= []
91
- @buffers[1][labels] ||= []
84
+ @buffers[0][labels] ||= []
85
+ @buffers[1][labels] ||= []
92
86
  @sums[labels] ||= 0.0
93
87
  @counts[labels] ||= 0
94
88
  nil
@@ -97,9 +91,7 @@ module PrometheusExporter::Metric
97
91
  def rotate_if_needed
98
92
  if (now = Process.clock_gettime(Process::CLOCK_MONOTONIC)) > (@last_rotated + ROTATE_AGE)
99
93
  @last_rotated = now
100
- @buffers[@current_buffer].each do |labels, raw|
101
- raw.clear
102
- end
94
+ @buffers[@current_buffer].each { |labels, raw| raw.clear }
103
95
  @current_buffer = @current_buffer == 0 ? 1 : 0
104
96
  end
105
97
  nil
@@ -116,6 +108,5 @@ module PrometheusExporter::Metric
116
108
  @sums[labels] += value
117
109
  @counts[labels] += 1
118
110
  end
119
-
120
111
  end
121
112
  end
@@ -1,28 +1,51 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'prometheus_exporter/instrumentation/method_profiler'
4
- require 'prometheus_exporter/client'
3
+ require "prometheus_exporter/instrumentation/method_profiler"
4
+ require "prometheus_exporter/client"
5
5
 
6
6
  class PrometheusExporter::Middleware
7
7
  MethodProfiler = PrometheusExporter::Instrumentation::MethodProfiler
8
8
 
9
- def initialize(app, config = { instrument: true, client: nil })
9
+ def initialize(app, config = { instrument: :alias_method, client: nil })
10
10
  @app = app
11
11
  @client = config[:client] || PrometheusExporter::Client.default
12
12
 
13
13
  if config[:instrument]
14
- if defined? Redis::Client
15
- MethodProfiler.patch(Redis::Client, [:call, :call_pipeline], :redis)
14
+ apply_redis_client_middleware! if defined?(RedisClient)
15
+
16
+ if defined?(Redis::VERSION) && (Gem::Version.new(Redis::VERSION) >= Gem::Version.new("5.0.0"))
17
+ # redis 5 support handled via RedisClient
18
+ elsif defined?(Redis::Client)
19
+ MethodProfiler.patch(
20
+ Redis::Client,
21
+ %i[call call_pipeline],
22
+ :redis,
23
+ instrument: config[:instrument],
24
+ )
16
25
  end
17
- if defined? PG::Connection
18
- MethodProfiler.patch(PG::Connection, [
19
- :exec, :async_exec, :exec_prepared, :send_query_prepared, :query
20
- ], :sql)
26
+
27
+ if defined?(PG::Connection)
28
+ MethodProfiler.patch(
29
+ PG::Connection,
30
+ %i[exec async_exec exec_prepared exec_params send_query_prepared query],
31
+ :sql,
32
+ instrument: config[:instrument],
33
+ )
34
+ end
35
+
36
+ if defined?(Mysql2::Client)
37
+ MethodProfiler.patch(Mysql2::Client, [:query], :sql, instrument: config[:instrument])
38
+ MethodProfiler.patch(Mysql2::Statement, [:execute], :sql, instrument: config[:instrument])
39
+ MethodProfiler.patch(Mysql2::Result, [:each], :sql, instrument: config[:instrument])
21
40
  end
22
- if defined? Mysql2::Client
23
- MethodProfiler.patch(Mysql2::Client, [:query], :sql)
24
- MethodProfiler.patch(Mysql2::Statement, [:execute], :sql)
25
- MethodProfiler.patch(Mysql2::Result, [:each], :sql)
41
+
42
+ if defined?(Dalli::Client)
43
+ MethodProfiler.patch(
44
+ Dalli::Client,
45
+ %i[delete fetch get add set],
46
+ :memcache,
47
+ instrument: config[:instrument],
48
+ )
26
49
  end
27
50
  end
28
51
  end
@@ -36,35 +59,34 @@ class PrometheusExporter::Middleware
36
59
 
37
60
  result
38
61
  ensure
39
-
62
+ status = (result && result[0]) || -1
40
63
  obj = {
41
64
  type: "web",
42
65
  timings: info,
43
66
  queue_time: queue_time,
44
- default_labels: default_labels(env, result)
67
+ status: status,
68
+ default_labels: default_labels(env, result),
45
69
  }
46
70
  labels = custom_labels(env)
47
- if labels
48
- obj = obj.merge(custom_labels: labels)
49
- end
71
+ obj = obj.merge(custom_labels: labels) if labels
50
72
 
51
73
  @client.send_json(obj)
52
74
  end
53
75
 
54
76
  def default_labels(env, result)
55
- status = (result && result[0]) || -1
56
- params = env["action_dispatch.request.parameters"]
77
+ controller_instance = env["action_controller.instance"]
57
78
  action = controller = nil
58
- if params
59
- action = params["action"]
60
- controller = params["controller"]
79
+ if controller_instance
80
+ action = controller_instance.action_name
81
+ controller = controller_instance.controller_name
82
+ elsif (cors = env["rack.cors"]) && cors.respond_to?(:preflight?) && cors.preflight?
83
+ # if the Rack CORS Middleware identifies the request as a preflight request,
84
+ # the stack doesn't get to the point where controllers/actions are defined
85
+ action = "preflight"
86
+ controller = "preflight"
61
87
  end
62
88
 
63
- {
64
- action: action || "other",
65
- controller: controller || "other",
66
- status: status
67
- }
89
+ { action: action || "other", controller: controller || "other" }
68
90
  end
69
91
 
70
92
  # allows subclasses to add custom labels based on env
@@ -90,19 +112,31 @@ class PrometheusExporter::Middleware
90
112
  Process.clock_gettime(Process::CLOCK_REALTIME)
91
113
  end
92
114
 
93
- # get the content of the x-queue-start or x-request-start header
115
+ # determine queue start from well-known trace headers
94
116
  def queue_start(env)
95
- value = env['HTTP_X_REQUEST_START'] || env['HTTP_X_QUEUE_START']
96
- unless value.nil? || value == ''
97
- convert_header_to_ms(value.to_s)
117
+ # get the content of the x-queue-start or x-request-start header
118
+ value = env["HTTP_X_REQUEST_START"] || env["HTTP_X_QUEUE_START"]
119
+ unless value.nil? || value == ""
120
+ # nginx returns time as milliseconds with 3 decimal places
121
+ # apache returns time as microseconds without decimal places
122
+ # this method takes care to convert both into a proper second + fractions timestamp
123
+ value = value.to_s.gsub(/t=|\./, "")
124
+ return "#{value[0, 10]}.#{value[10, 13]}".to_f
98
125
  end
126
+
127
+ # get the content of the x-amzn-trace-id header
128
+ # see also: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-request-tracing.html
129
+ value = env["HTTP_X_AMZN_TRACE_ID"]
130
+ value&.split("Root=")&.last&.split("-")&.fetch(1)&.to_i(16)
131
+ end
132
+
133
+ private
134
+
135
+ module RedisInstrumenter
136
+ MethodProfiler.define_methods_on_module(self, %w[call call_pipelined], "redis")
99
137
  end
100
138
 
101
- # nginx returns time as milliseconds with 3 decimal places
102
- # apache returns time as microseconds without decimal places
103
- # this method takes care to convert both into a proper second + fractions timestamp
104
- def convert_header_to_ms(str)
105
- str = str.gsub(/t=|\./, '')
106
- "#{str[0, 10]}.#{str[10, 13]}".to_f
139
+ def apply_redis_client_middleware!
140
+ RedisClient.register(RedisInstrumenter)
107
141
  end
108
142
  end
@@ -2,18 +2,24 @@
2
2
 
3
3
  module PrometheusExporter::Server
4
4
  class ActiveRecordCollector < TypeCollector
5
- MAX_ACTIVERECORD_METRIC_AGE = 60
5
+ MAX_METRIC_AGE = 60
6
+
6
7
  ACTIVE_RECORD_GAUGES = {
7
8
  connections: "Total connections in pool",
8
9
  busy: "Connections in use in pool",
9
10
  dead: "Dead connections in pool",
10
11
  idle: "Idle connections in pool",
11
12
  waiting: "Connection requests waiting",
12
- size: "Maximum allowed connection pool size"
13
+ size: "Maximum allowed connection pool size",
13
14
  }
14
15
 
15
16
  def initialize
16
- @active_record_metrics = []
17
+ @active_record_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
18
+ @active_record_metrics.filter = ->(new_metric, old_metric) do
19
+ new_metric["pid"] == old_metric["pid"] &&
20
+ new_metric["hostname"] == old_metric["hostname"] &&
21
+ new_metric["metric_labels"]["pool_name"] == old_metric["metric_labels"]["pool_name"]
22
+ end
17
23
  end
18
24
 
19
25
  def type
@@ -26,13 +32,18 @@ module PrometheusExporter::Server
26
32
  metrics = {}
27
33
 
28
34
  @active_record_metrics.map do |m|
29
- metric_key = (m["metric_labels"] || {}).merge("pid" => m["pid"])
35
+ metric_key =
36
+ (m["metric_labels"] || {}).merge("pid" => m["pid"], "hostname" => m["hostname"])
30
37
  metric_key.merge!(m["custom_labels"]) if m["custom_labels"]
31
38
 
32
39
  ACTIVE_RECORD_GAUGES.map do |k, help|
33
40
  k = k.to_s
34
41
  if v = m[k]
35
- g = metrics[k] ||= PrometheusExporter::Metric::Gauge.new("active_record_connection_pool_#{k}", help)
42
+ g =
43
+ metrics[k] ||= PrometheusExporter::Metric::Gauge.new(
44
+ "active_record_connection_pool_#{k}",
45
+ help,
46
+ )
36
47
  g.observe(v, metric_key)
37
48
  end
38
49
  end
@@ -42,15 +53,6 @@ module PrometheusExporter::Server
42
53
  end
43
54
 
44
55
  def collect(obj)
45
- now = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
46
-
47
- obj["created_at"] = now
48
-
49
- @active_record_metrics.delete_if do |current|
50
- (obj["pid"] == current["pid"] && obj["hostname"] == current["hostname"]) ||
51
- (current["created_at"] + MAX_ACTIVERECORD_METRIC_AGE < now)
52
- end
53
-
54
56
  @active_record_metrics << obj
55
57
  end
56
58
  end
@@ -1,10 +1,13 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module PrometheusExporter::Server
3
+ require "logger"
4
4
 
5
+ module PrometheusExporter::Server
5
6
  class Collector < CollectorBase
7
+ attr_reader :logger
6
8
 
7
- def initialize(json_serializer: nil)
9
+ def initialize(json_serializer: nil, logger: Logger.new(STDERR))
10
+ @logger = logger
8
11
  @process_metrics = []
9
12
  @metrics = {}
10
13
  @mutex = Mutex.new
@@ -14,12 +17,16 @@ module PrometheusExporter::Server
14
17
  register_collector(ProcessCollector.new)
15
18
  register_collector(SidekiqCollector.new)
16
19
  register_collector(SidekiqQueueCollector.new)
20
+ register_collector(SidekiqProcessCollector.new)
21
+ register_collector(SidekiqStatsCollector.new)
17
22
  register_collector(DelayedJobCollector.new)
18
23
  register_collector(PumaCollector.new)
19
24
  register_collector(HutchCollector.new)
20
25
  register_collector(UnicornCollector.new)
21
26
  register_collector(ActiveRecordCollector.new)
22
27
  register_collector(ShoryukenCollector.new)
28
+ register_collector(ResqueCollector.new)
29
+ register_collector(GoodJobCollector.new)
23
30
  end
24
31
 
25
32
  def register_collector(collector)
@@ -36,19 +43,17 @@ module PrometheusExporter::Server
36
43
  collector.collect(obj)
37
44
  else
38
45
  metric = @metrics[obj["name"]]
39
- if !metric
40
- metric = register_metric_unsafe(obj)
41
- end
46
+ metric = register_metric_unsafe(obj) if !metric
47
+
48
+ next unless metric
42
49
 
43
50
  keys = obj["keys"] || {}
44
- if obj["custom_labels"]
45
- keys = obj["custom_labels"].merge(keys)
46
- end
51
+ keys = obj["custom_labels"].merge(keys) if obj["custom_labels"]
47
52
 
48
53
  case obj["prometheus_exporter_action"]
49
- when 'increment'
54
+ when "increment"
50
55
  metric.increment(keys, obj["value"])
51
- when 'decrement'
56
+ when "decrement"
52
57
  metric.decrement(keys, obj["value"])
53
58
  else
54
59
  metric.observe(obj["value"], keys)
@@ -59,15 +64,14 @@ module PrometheusExporter::Server
59
64
 
60
65
  def prometheus_metrics_text
61
66
  @mutex.synchronize do
62
- (@metrics.values + @collectors.values.map(&:metrics).flatten)
63
- .map(&:to_prometheus_text).join("\n")
67
+ (@metrics.values + @collectors.values.map(&:metrics).flatten).map(
68
+ &:to_prometheus_text
69
+ ).join("\n")
64
70
  end
65
71
  end
66
72
 
67
73
  def register_metric(metric)
68
- @mutex.synchronize do
69
- @metrics[metric.name] = metric
70
- end
74
+ @mutex.synchronize { @metrics[metric.name] = metric }
71
75
  end
72
76
 
73
77
  protected
@@ -77,6 +81,11 @@ module PrometheusExporter::Server
77
81
  help = obj["help"]
78
82
  opts = symbolize_keys(obj["opts"] || {})
79
83
 
84
+ if !name
85
+ logger.warn "failed to register metric due to empty name #{obj}"
86
+ return
87
+ end
88
+
80
89
  metric =
81
90
  case obj["type"]
82
91
  when "gauge"
@@ -92,12 +101,15 @@ module PrometheusExporter::Server
92
101
  if metric
93
102
  @metrics[name] = metric
94
103
  else
95
- STDERR.puts "failed to register metric #{obj}"
104
+ logger.warn "failed to register metric #{obj}"
96
105
  end
97
106
  end
98
107
 
99
108
  def symbolize_keys(hash)
100
- hash.inject({}) { |memo, k| memo[k.first.to_sym] = k.last; memo }
109
+ hash.inject({}) do |memo, k|
110
+ memo[k.first.to_sym] = k.last
111
+ memo
112
+ end
101
113
  end
102
114
  end
103
115
  end
@@ -1,10 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module PrometheusExporter::Server
4
-
5
4
  # minimal interface to implement a customer collector
6
5
  class CollectorBase
7
-
8
6
  # called each time a string is delivered from the web
9
7
  def process(str)
10
8
  end
@@ -5,6 +5,7 @@ module PrometheusExporter::Server
5
5
  def initialize
6
6
  @delayed_jobs_total = nil
7
7
  @delayed_job_duration_seconds = nil
8
+ @delayed_job_latency_seconds_total = nil
8
9
  @delayed_jobs_total = nil
9
10
  @delayed_failed_jobs_total = nil
10
11
  @delayed_jobs_max_attempts_reached_total = nil
@@ -19,28 +20,49 @@ module PrometheusExporter::Server
19
20
  end
20
21
 
21
22
  def collect(obj)
22
- default_labels = { job_name: obj['name'] }
23
- custom_labels = obj['custom_labels']
24
- labels = custom_labels.nil? ? default_labels : default_labels.merge(custom_labels)
23
+ custom_labels = obj["custom_labels"] || {}
24
+ gauge_labels = { queue_name: obj["queue_name"] }.merge(custom_labels)
25
+ counter_labels = gauge_labels.merge(job_name: obj["name"])
25
26
 
26
27
  ensure_delayed_job_metrics
27
- @delayed_job_duration_seconds.observe(obj["duration"], labels)
28
- @delayed_jobs_total.observe(1, labels)
29
- @delayed_failed_jobs_total.observe(1, labels) if !obj["success"]
30
- @delayed_jobs_max_attempts_reached_total.observe(1) if obj["attempts"] >= obj["max_attempts"]
31
- @delayed_job_duration_seconds_summary.observe(obj["duration"])
32
- @delayed_job_duration_seconds_summary.observe(obj["duration"], status: "success") if obj["success"]
33
- @delayed_job_duration_seconds_summary.observe(obj["duration"], status: "failed") if !obj["success"]
34
- @delayed_job_attempts_summary.observe(obj["attempts"]) if obj["success"]
35
- @delayed_jobs_enqueued.observe(obj["enqueued"])
36
- @delayed_jobs_pending.observe(obj["pending"])
28
+ @delayed_job_duration_seconds.observe(obj["duration"], counter_labels)
29
+ @delayed_job_latency_seconds_total.observe(obj["latency"], counter_labels)
30
+ @delayed_jobs_total.observe(1, counter_labels)
31
+ @delayed_failed_jobs_total.observe(1, counter_labels) if !obj["success"]
32
+ if obj["attempts"] >= obj["max_attempts"]
33
+ @delayed_jobs_max_attempts_reached_total.observe(1, counter_labels)
34
+ end
35
+ @delayed_job_duration_seconds_summary.observe(obj["duration"], counter_labels)
36
+ if obj["success"]
37
+ @delayed_job_duration_seconds_summary.observe(
38
+ obj["duration"],
39
+ counter_labels.merge(status: "success"),
40
+ )
41
+ end
42
+ if !obj["success"]
43
+ @delayed_job_duration_seconds_summary.observe(
44
+ obj["duration"],
45
+ counter_labels.merge(status: "failed"),
46
+ )
47
+ end
48
+ @delayed_job_attempts_summary.observe(obj["attempts"], counter_labels) if obj["success"]
49
+ @delayed_jobs_enqueued.observe(obj["enqueued"], gauge_labels)
50
+ @delayed_jobs_pending.observe(obj["pending"], gauge_labels)
37
51
  end
38
52
 
39
53
  def metrics
40
54
  if @delayed_jobs_total
41
- [@delayed_job_duration_seconds, @delayed_jobs_total, @delayed_failed_jobs_total,
42
- @delayed_jobs_max_attempts_reached_total, @delayed_job_duration_seconds_summary, @delayed_job_attempts_summary,
43
- @delayed_jobs_enqueued, @delayed_jobs_pending]
55
+ [
56
+ @delayed_job_duration_seconds,
57
+ @delayed_job_latency_seconds_total,
58
+ @delayed_jobs_total,
59
+ @delayed_failed_jobs_total,
60
+ @delayed_jobs_max_attempts_reached_total,
61
+ @delayed_job_duration_seconds_summary,
62
+ @delayed_job_attempts_summary,
63
+ @delayed_jobs_enqueued,
64
+ @delayed_jobs_pending,
65
+ ]
44
66
  else
45
67
  []
46
68
  end
@@ -50,38 +72,59 @@ module PrometheusExporter::Server
50
72
 
51
73
  def ensure_delayed_job_metrics
52
74
  if !@delayed_jobs_total
53
-
54
75
  @delayed_job_duration_seconds =
55
- PrometheusExporter::Metric::Counter.new(
56
- "delayed_job_duration_seconds", "Total time spent in delayed jobs.")
76
+ PrometheusExporter::Metric::Counter.new(
77
+ "delayed_job_duration_seconds",
78
+ "Total time spent in delayed jobs.",
79
+ )
80
+
81
+ @delayed_job_latency_seconds_total =
82
+ PrometheusExporter::Metric::Counter.new(
83
+ "delayed_job_latency_seconds_total",
84
+ "Total delayed jobs latency.",
85
+ )
57
86
 
58
87
  @delayed_jobs_total =
59
- PrometheusExporter::Metric::Counter.new(
60
- "delayed_jobs_total", "Total number of delayed jobs executed.")
88
+ PrometheusExporter::Metric::Counter.new(
89
+ "delayed_jobs_total",
90
+ "Total number of delayed jobs executed.",
91
+ )
61
92
 
62
93
  @delayed_jobs_enqueued =
63
- PrometheusExporter::Metric::Gauge.new(
64
- "delayed_jobs_enqueued", "Number of enqueued delayed jobs.")
94
+ PrometheusExporter::Metric::Gauge.new(
95
+ "delayed_jobs_enqueued",
96
+ "Number of enqueued delayed jobs.",
97
+ )
65
98
 
66
99
  @delayed_jobs_pending =
67
- PrometheusExporter::Metric::Gauge.new(
68
- "delayed_jobs_pending", "Number of pending delayed jobs.")
100
+ PrometheusExporter::Metric::Gauge.new(
101
+ "delayed_jobs_pending",
102
+ "Number of pending delayed jobs.",
103
+ )
69
104
 
70
105
  @delayed_failed_jobs_total =
71
- PrometheusExporter::Metric::Counter.new(
72
- "delayed_failed_jobs_total", "Total number failed delayed jobs executed.")
106
+ PrometheusExporter::Metric::Counter.new(
107
+ "delayed_failed_jobs_total",
108
+ "Total number failed delayed jobs executed.",
109
+ )
73
110
 
74
111
  @delayed_jobs_max_attempts_reached_total =
75
- PrometheusExporter::Metric::Counter.new(
76
- "delayed_jobs_max_attempts_reached_total", "Total number of delayed jobs that reached max attempts.")
112
+ PrometheusExporter::Metric::Counter.new(
113
+ "delayed_jobs_max_attempts_reached_total",
114
+ "Total number of delayed jobs that reached max attempts.",
115
+ )
77
116
 
78
117
  @delayed_job_duration_seconds_summary =
79
- PrometheusExporter::Metric::Summary.new("delayed_job_duration_seconds_summary",
80
- "Summary of the time it takes jobs to execute.")
118
+ PrometheusExporter::Metric::Base.default_aggregation.new(
119
+ "delayed_job_duration_seconds_summary",
120
+ "Summary of the time it takes jobs to execute.",
121
+ )
81
122
 
82
123
  @delayed_job_attempts_summary =
83
- PrometheusExporter::Metric::Summary.new("delayed_job_attempts_summary",
84
- "Summary of the amount of attempts it takes delayed jobs to succeed.")
124
+ PrometheusExporter::Metric::Base.default_aggregation.new(
125
+ "delayed_job_attempts_summary",
126
+ "Summary of the amount of attempts it takes delayed jobs to succeed.",
127
+ )
85
128
  end
86
129
  end
87
130
  end
@@ -0,0 +1,52 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PrometheusExporter::Server
4
+ class GoodJobCollector < TypeCollector
5
+ MAX_METRIC_AGE = 30
6
+ GOOD_JOB_GAUGES = {
7
+ scheduled: "Total number of scheduled GoodJob jobs.",
8
+ retried: "Total number of retried GoodJob jobs.",
9
+ queued: "Total number of queued GoodJob jobs.",
10
+ running: "Total number of running GoodJob jobs.",
11
+ finished: "Total number of finished GoodJob jobs.",
12
+ succeeded: "Total number of succeeded GoodJob jobs.",
13
+ discarded: "Total number of discarded GoodJob jobs.",
14
+ }
15
+
16
+ def initialize
17
+ @good_job_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
18
+ @gauges = {}
19
+ end
20
+
21
+ def type
22
+ "good_job"
23
+ end
24
+
25
+ def metrics
26
+ return [] if good_job_metrics.length == 0
27
+
28
+ good_job_metrics.map do |metric|
29
+ labels = metric.fetch("custom_labels", {})
30
+
31
+ GOOD_JOB_GAUGES.map do |name, help|
32
+ value = metric[name.to_s]
33
+
34
+ if value
35
+ gauge = gauges[name] ||= PrometheusExporter::Metric::Gauge.new("good_job_#{name}", help)
36
+ gauge.observe(value, labels)
37
+ end
38
+ end
39
+ end
40
+
41
+ gauges.values
42
+ end
43
+
44
+ def collect(object)
45
+ @good_job_metrics << object
46
+ end
47
+
48
+ private
49
+
50
+ attr_reader :good_job_metrics, :gauges
51
+ end
52
+ end