prometheus_exporter 0.7.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/ci.yml +35 -24
  3. data/Appraisals +1 -1
  4. data/CHANGELOG +54 -24
  5. data/README.md +129 -38
  6. data/bin/prometheus_exporter +19 -6
  7. data/lib/prometheus_exporter/client.rb +15 -5
  8. data/lib/prometheus_exporter/instrumentation/active_record.rb +7 -9
  9. data/lib/prometheus_exporter/instrumentation/delayed_job.rb +3 -2
  10. data/lib/prometheus_exporter/instrumentation/method_profiler.rb +2 -1
  11. data/lib/prometheus_exporter/instrumentation/process.rb +1 -1
  12. data/lib/prometheus_exporter/instrumentation/puma.rb +28 -16
  13. data/lib/prometheus_exporter/instrumentation/resque.rb +40 -0
  14. data/lib/prometheus_exporter/instrumentation/sidekiq.rb +1 -1
  15. data/lib/prometheus_exporter/instrumentation/sidekiq_process.rb +58 -0
  16. data/lib/prometheus_exporter/instrumentation/sidekiq_queue.rb +27 -13
  17. data/lib/prometheus_exporter/instrumentation/sidekiq_stats.rb +43 -0
  18. data/lib/prometheus_exporter/instrumentation/unicorn.rb +4 -4
  19. data/lib/prometheus_exporter/instrumentation.rb +3 -0
  20. data/lib/prometheus_exporter/metric/base.rb +9 -0
  21. data/lib/prometheus_exporter/metric/gauge.rb +4 -0
  22. data/lib/prometheus_exporter/middleware.rb +19 -9
  23. data/lib/prometheus_exporter/server/active_record_collector.rb +2 -1
  24. data/lib/prometheus_exporter/server/collector.rb +3 -0
  25. data/lib/prometheus_exporter/server/delayed_job_collector.rb +17 -17
  26. data/lib/prometheus_exporter/server/puma_collector.rb +16 -8
  27. data/lib/prometheus_exporter/server/resque_collector.rb +54 -0
  28. data/lib/prometheus_exporter/server/runner.rb +11 -2
  29. data/lib/prometheus_exporter/server/sidekiq_collector.rb +1 -1
  30. data/lib/prometheus_exporter/server/sidekiq_process_collector.rb +46 -0
  31. data/lib/prometheus_exporter/server/sidekiq_queue_collector.rb +1 -1
  32. data/lib/prometheus_exporter/server/sidekiq_stats_collector.rb +46 -0
  33. data/lib/prometheus_exporter/server/unicorn_collector.rb +3 -3
  34. data/lib/prometheus_exporter/server/web_collector.rb +4 -4
  35. data/lib/prometheus_exporter/server/web_server.rb +6 -8
  36. data/lib/prometheus_exporter/server.rb +3 -0
  37. data/lib/prometheus_exporter/version.rb +1 -1
  38. data/prometheus_exporter.gemspec +3 -3
  39. metadata +16 -10
@@ -2,6 +2,7 @@
2
2
 
3
3
  require 'socket'
4
4
  require 'thread'
5
+ require 'logger'
5
6
 
6
7
  module PrometheusExporter
7
8
  class Client
@@ -53,14 +54,20 @@ module PrometheusExporter
53
54
  MAX_SOCKET_AGE = 25
54
55
  MAX_QUEUE_SIZE = 10_000
55
56
 
57
+ attr_reader :logger
58
+
56
59
  def initialize(
57
60
  host: ENV.fetch('PROMETHEUS_EXPORTER_HOST', 'localhost'),
58
61
  port: ENV.fetch('PROMETHEUS_EXPORTER_PORT', PrometheusExporter::DEFAULT_PORT),
59
62
  max_queue_size: nil,
60
63
  thread_sleep: 0.5,
61
64
  json_serializer: nil,
62
- custom_labels: nil
65
+ custom_labels: nil,
66
+ logger: Logger.new(STDERR),
67
+ log_level: Logger::WARN
63
68
  )
69
+ @logger = logger
70
+ @logger.level = log_level
64
71
  @metrics = []
65
72
 
66
73
  @queue = Queue.new
@@ -72,7 +79,7 @@ module PrometheusExporter
72
79
  max_queue_size ||= MAX_QUEUE_SIZE
73
80
  max_queue_size = max_queue_size.to_i
74
81
 
75
- if max_queue_size.to_i <= 0
82
+ if max_queue_size <= 0
76
83
  raise ArgumentError, "max_queue_size must be larger than 0"
77
84
  end
78
85
 
@@ -125,7 +132,7 @@ module PrometheusExporter
125
132
  def send(str)
126
133
  @queue << str
127
134
  if @queue.length > @max_queue_size
128
- STDERR.puts "Prometheus Exporter client is dropping message cause queue is full"
135
+ logger.warn "Prometheus Exporter client is dropping message cause queue is full"
129
136
  @queue.pop
130
137
  end
131
138
 
@@ -143,7 +150,7 @@ module PrometheusExporter
143
150
  @socket.write(message)
144
151
  @socket.write("\r\n")
145
152
  rescue => e
146
- STDERR.puts "Prometheus Exporter is dropping a message: #{e}"
153
+ logger.warn "Prometheus Exporter is dropping a message: #{e}"
147
154
  @socket = nil
148
155
  raise
149
156
  end
@@ -168,7 +175,7 @@ module PrometheusExporter
168
175
  close_socket_if_old!
169
176
  process_queue
170
177
  rescue => e
171
- STDERR.puts "Prometheus Exporter, failed to send message #{e}"
178
+ logger.error "Prometheus Exporter, failed to send message #{e}"
172
179
  end
173
180
 
174
181
  def ensure_worker_thread!
@@ -184,6 +191,9 @@ module PrometheusExporter
184
191
  end
185
192
  end
186
193
  end
194
+ rescue ThreadError => e
195
+ raise unless e.message =~ /can't alloc thread/
196
+ logger.error "Prometheus Exporter, failed to send message ThreadError #{e}"
187
197
  end
188
198
 
189
199
  def close_socket!
@@ -7,9 +7,11 @@ module PrometheusExporter::Instrumentation
7
7
 
8
8
  def self.start(client: nil, frequency: 30, custom_labels: {}, config_labels: [])
9
9
 
10
- # Not all rails versions support coonection pool stats
10
+ client ||= PrometheusExporter::Client.default
11
+
12
+ # Not all rails versions support connection pool stats
11
13
  unless ::ActiveRecord::Base.connection_pool.respond_to?(:stat)
12
- STDERR.puts("ActiveRecord connection pool stats not supported in your rails version")
14
+ client.logger.error("ActiveRecord connection pool stats not supported in your rails version")
13
15
  return
14
16
  end
15
17
 
@@ -18,8 +20,6 @@ module PrometheusExporter::Instrumentation
18
20
 
19
21
  active_record_collector = new(custom_labels, config_labels)
20
22
 
21
- client ||= PrometheusExporter::Client.default
22
-
23
23
  stop if @thread
24
24
 
25
25
  @thread = Thread.new do
@@ -28,7 +28,7 @@ module PrometheusExporter::Instrumentation
28
28
  metrics = active_record_collector.collect
29
29
  metrics.each { |metric| client.send_json metric }
30
30
  rescue => e
31
- STDERR.puts("Prometheus Exporter Failed To Collect Process Stats #{e}")
31
+ client.logger.error("Prometheus Exporter Failed To Collect Process Stats #{e}")
32
32
  ensure
33
33
  sleep frequency
34
34
  end
@@ -81,15 +81,13 @@ module PrometheusExporter::Instrumentation
81
81
  private
82
82
 
83
83
  def labels(pool)
84
- if pool.respond_to?(:spec) # ActiveRecord <= 6.0
84
+ if ::ActiveRecord.version < Gem::Version.new("6.1.0.rc1")
85
85
  @metric_labels.merge(pool_name: pool.spec.name).merge(pool.spec.config
86
86
  .select { |k, v| @config_labels.include? k }
87
87
  .map { |k, v| [k.to_s.dup.prepend("dbconfig_"), v] }.to_h)
88
- elsif pool.respond_to?(:db_config) # ActiveRecord >= 6.1.rc1
88
+ else
89
89
  @metric_labels.merge(pool_name: pool.db_config.name).merge(
90
90
  @config_labels.each_with_object({}) { |l, acc| acc["dbconfig_#{l}"] = pool.db_config.public_send(l) })
91
- else
92
- raise "Unsupported connection pool"
93
91
  end
94
92
  end
95
93
  end
@@ -13,8 +13,8 @@ module PrometheusExporter::Instrumentation
13
13
  callbacks do |lifecycle|
14
14
  lifecycle.around(:invoke_job) do |job, *args, &block|
15
15
  max_attempts = Delayed::Worker.max_attempts
16
- enqueued_count = Delayed::Job.count
17
- pending_count = Delayed::Job.where(attempts: 0, locked_at: nil).count
16
+ enqueued_count = Delayed::Job.where(queue: job.queue).count
17
+ pending_count = Delayed::Job.where(attempts: 0, locked_at: nil, queue: job.queue).count
18
18
  instrumenter.call(job, max_attempts, enqueued_count, pending_count, *args, &block)
19
19
  end
20
20
  end
@@ -41,6 +41,7 @@ module PrometheusExporter::Instrumentation
41
41
  @client.send_json(
42
42
  type: "delayed_job",
43
43
  name: job.handler.to_s.match(JOB_CLASS_REGEXP).to_a[1].to_s,
44
+ queue_name: job.queue,
44
45
  success: success,
45
46
  duration: duration,
46
47
  attempts: attempts,
@@ -5,6 +5,7 @@ module PrometheusExporter::Instrumentation; end
5
5
 
6
6
  class PrometheusExporter::Instrumentation::MethodProfiler
7
7
  def self.patch(klass, methods, name)
8
+ patch_source_line = __LINE__ + 3
8
9
  patches = methods.map do |method_name|
9
10
  <<~RUBY
10
11
  unless defined?(#{method_name}__mp_unpatched)
@@ -26,7 +27,7 @@ class PrometheusExporter::Instrumentation::MethodProfiler
26
27
  RUBY
27
28
  end.join("\n")
28
29
 
29
- klass.class_eval patches
30
+ klass.class_eval patches, __FILE__, patch_source_line
30
31
  end
31
32
 
32
33
  def self.transfer
@@ -27,7 +27,7 @@ module PrometheusExporter::Instrumentation
27
27
  metric = process_collector.collect
28
28
  client.send_json metric
29
29
  rescue => e
30
- STDERR.puts("Prometheus Exporter Failed To Collect Process Stats #{e}")
30
+ client.logger.error("Prometheus Exporter Failed To Collect Process Stats #{e}")
31
31
  ensure
32
32
  sleep frequency
33
33
  end
@@ -5,8 +5,8 @@ require "json"
5
5
  # collects stats from puma
6
6
  module PrometheusExporter::Instrumentation
7
7
  class Puma
8
- def self.start(client: nil, frequency: 30)
9
- puma_collector = new
8
+ def self.start(client: nil, frequency: 30, labels: {})
9
+ puma_collector = new(labels)
10
10
  client ||= PrometheusExporter::Client.default
11
11
  Thread.new do
12
12
  while true
@@ -14,7 +14,7 @@ module PrometheusExporter::Instrumentation
14
14
  metric = puma_collector.collect
15
15
  client.send_json metric
16
16
  rescue => e
17
- STDERR.puts("Prometheus Exporter Failed To Collect Puma Stats #{e}")
17
+ client.logger.error("Prometheus Exporter Failed To Collect Puma Stats #{e}")
18
18
  ensure
19
19
  sleep frequency
20
20
  end
@@ -22,21 +22,33 @@ module PrometheusExporter::Instrumentation
22
22
  end
23
23
  end
24
24
 
25
+ def initialize(metric_labels = {})
26
+ @metric_labels = metric_labels
27
+ end
28
+
25
29
  def collect
26
- metric = {}
27
- metric[:type] = "puma"
30
+ metric = {
31
+ pid: pid,
32
+ type: "puma",
33
+ hostname: ::PrometheusExporter.hostname,
34
+ metric_labels: @metric_labels
35
+ }
28
36
  collect_puma_stats(metric)
29
37
  metric
30
38
  end
31
39
 
40
+ def pid
41
+ @pid = ::Process.pid
42
+ end
43
+
32
44
  def collect_puma_stats(metric)
33
45
  stats = JSON.parse(::Puma.stats)
34
46
 
35
47
  if stats.key?("workers")
36
48
  metric[:phase] = stats["phase"]
37
- metric[:workers_total] = stats["workers"]
38
- metric[:booted_workers_total] = stats["booted_workers"]
39
- metric[:old_workers_total] = stats["old_workers"]
49
+ metric[:workers] = stats["workers"]
50
+ metric[:booted_workers] = stats["booted_workers"]
51
+ metric[:old_workers] = stats["old_workers"]
40
52
 
41
53
  stats["worker_status"].each do |worker|
42
54
  next if worker["last_status"].empty?
@@ -50,15 +62,15 @@ module PrometheusExporter::Instrumentation
50
62
  private
51
63
 
52
64
  def collect_worker_status(metric, status)
53
- metric[:request_backlog_total] ||= 0
54
- metric[:running_threads_total] ||= 0
55
- metric[:thread_pool_capacity_total] ||= 0
56
- metric[:max_threads_total] ||= 0
65
+ metric[:request_backlog] ||= 0
66
+ metric[:running_threads] ||= 0
67
+ metric[:thread_pool_capacity] ||= 0
68
+ metric[:max_threads] ||= 0
57
69
 
58
- metric[:request_backlog_total] += status["backlog"]
59
- metric[:running_threads_total] += status["running"]
60
- metric[:thread_pool_capacity_total] += status["pool_capacity"]
61
- metric[:max_threads_total] += status["max_threads"]
70
+ metric[:request_backlog] += status["backlog"]
71
+ metric[:running_threads] += status["running"]
72
+ metric[:thread_pool_capacity] += status["pool_capacity"]
73
+ metric[:max_threads] += status["max_threads"]
62
74
  end
63
75
  end
64
76
  end
@@ -0,0 +1,40 @@
1
+ # frozen_string_literal: true
2
+
3
+ # collects stats from resque
4
+ module PrometheusExporter::Instrumentation
5
+ class Resque
6
+ def self.start(client: nil, frequency: 30)
7
+ resque_collector = new
8
+ client ||= PrometheusExporter::Client.default
9
+ Thread.new do
10
+ while true
11
+ begin
12
+ client.send_json(resque_collector.collect)
13
+ rescue => e
14
+ client.logger.error("Prometheus Exporter Failed To Collect Resque Stats #{e}")
15
+ ensure
16
+ sleep frequency
17
+ end
18
+ end
19
+ end
20
+ end
21
+
22
+ def collect
23
+ metric = {}
24
+ metric[:type] = "resque"
25
+ collect_resque_stats(metric)
26
+ metric
27
+ end
28
+
29
+ def collect_resque_stats(metric)
30
+ info = ::Resque.info
31
+
32
+ metric[:processed_jobs] = info[:processed]
33
+ metric[:failed_jobs] = info[:failed]
34
+ metric[:pending_jobs] = info[:pending]
35
+ metric[:queues] = info[:queues]
36
+ metric[:worker] = info[:workers]
37
+ metric[:working] = info[:working]
38
+ end
39
+ end
40
+ end
@@ -73,7 +73,7 @@ module PrometheusExporter::Instrumentation
73
73
  # of the delayed extensions
74
74
  # https://github.com/mperham/sidekiq/blob/master/lib/sidekiq/extensions/class_methods.rb
75
75
  begin
76
- (target, method_name, _args) = YAML.load(msg['args'].first)
76
+ (target, method_name, _args) = YAML.load(msg['args'].first) # rubocop:disable Security/YAMLLoad
77
77
  if target.class == Class
78
78
  "#{target.name}##{method_name}"
79
79
  else
@@ -0,0 +1,58 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PrometheusExporter::Instrumentation
4
+ class SidekiqProcess
5
+ def self.start(client: nil, frequency: 30)
6
+ client ||= PrometheusExporter::Client.default
7
+ sidekiq_process_collector = new
8
+
9
+ Thread.new do
10
+ loop do
11
+ begin
12
+ client.send_json(sidekiq_process_collector.collect)
13
+ rescue StandardError => e
14
+ STDERR.puts("Prometheus Exporter Failed To Collect Sidekiq Processes metrics #{e}")
15
+ ensure
16
+ sleep frequency
17
+ end
18
+ end
19
+ end
20
+ end
21
+
22
+ def initialize
23
+ @pid = ::Process.pid
24
+ @hostname = Socket.gethostname
25
+ end
26
+
27
+ def collect
28
+ {
29
+ type: 'sidekiq_process',
30
+ process: collect_stats
31
+ }
32
+ end
33
+
34
+ def collect_stats
35
+ process = current_process
36
+ return {} unless process
37
+
38
+ {
39
+ busy: process['busy'],
40
+ concurrency: process['concurrency'],
41
+ labels: {
42
+ labels: process['labels'].sort.join(','),
43
+ queues: process['queues'].sort.join(','),
44
+ quiet: process['quiet'],
45
+ tag: process['tag'],
46
+ hostname: process['hostname'],
47
+ identity: process['identity'],
48
+ }
49
+ }
50
+ end
51
+
52
+ def current_process
53
+ ::Sidekiq::ProcessSet.new.find do |sp|
54
+ sp['hostname'] == @hostname && sp['pid'] == @pid
55
+ end
56
+ end
57
+ end
58
+ end
@@ -2,16 +2,16 @@
2
2
 
3
3
  module PrometheusExporter::Instrumentation
4
4
  class SidekiqQueue
5
- def self.start(client: nil, frequency: 30)
5
+ def self.start(client: nil, frequency: 30, all_queues: false)
6
6
  client ||= PrometheusExporter::Client.default
7
- sidekiq_queue_collector = new
7
+ sidekiq_queue_collector = new(all_queues: all_queues)
8
8
 
9
9
  Thread.new do
10
10
  loop do
11
11
  begin
12
12
  client.send_json(sidekiq_queue_collector.collect)
13
13
  rescue StandardError => e
14
- STDERR.puts("Prometheus Exporter Failed To Collect Sidekiq Queue metrics #{e}")
14
+ client.logger.error("Prometheus Exporter Failed To Collect Sidekiq Queue metrics #{e}")
15
15
  ensure
16
16
  sleep frequency
17
17
  end
@@ -19,6 +19,12 @@ module PrometheusExporter::Instrumentation
19
19
  end
20
20
  end
21
21
 
22
+ def initialize(all_queues: false)
23
+ @all_queues = all_queues
24
+ @pid = ::Process.pid
25
+ @hostname = Socket.gethostname
26
+ end
27
+
22
28
  def collect
23
29
  {
24
30
  type: 'sidekiq_queue',
@@ -27,24 +33,32 @@ module PrometheusExporter::Instrumentation
27
33
  end
28
34
 
29
35
  def collect_queue_stats
30
- hostname = Socket.gethostname
31
- pid = ::Process.pid
32
- ps = ::Sidekiq::ProcessSet.new
36
+ sidekiq_queues = ::Sidekiq::Queue.all
33
37
 
34
- process = ps.find do |sp|
35
- sp['hostname'] == hostname && sp['pid'] == pid
38
+ unless @all_queues
39
+ queues = collect_current_process_queues
40
+ sidekiq_queues.select! { |sidekiq_queue| queues.include?(sidekiq_queue.name) }
36
41
  end
37
42
 
38
- queues = process.nil? ? [] : process['queues']
39
-
40
- ::Sidekiq::Queue.all.map do |queue|
41
- next unless queues.include? queue.name
43
+ sidekiq_queues.map do |queue|
42
44
  {
43
- backlog_total: queue.size,
45
+ backlog: queue.size,
44
46
  latency_seconds: queue.latency.to_i,
45
47
  labels: { queue: queue.name }
46
48
  }
47
49
  end.compact
48
50
  end
51
+
52
+ private
53
+
54
+ def collect_current_process_queues
55
+ ps = ::Sidekiq::ProcessSet.new
56
+
57
+ process = ps.find do |sp|
58
+ sp['hostname'] == @hostname && sp['pid'] == @pid
59
+ end
60
+
61
+ process.nil? ? [] : process['queues']
62
+ end
49
63
  end
50
64
  end
@@ -0,0 +1,43 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PrometheusExporter::Instrumentation
4
+ class SidekiqStats
5
+ def self.start(client: nil, frequency: 30)
6
+ client ||= PrometheusExporter::Client.default
7
+ sidekiq_stats_collector = new
8
+
9
+ Thread.new do
10
+ loop do
11
+ begin
12
+ client.send_json(sidekiq_stats_collector.collect)
13
+ rescue StandardError => e
14
+ STDERR.puts("Prometheus Exporter Failed To Collect Sidekiq Stats metrics #{e}")
15
+ ensure
16
+ sleep frequency
17
+ end
18
+ end
19
+ end
20
+ end
21
+
22
+ def collect
23
+ {
24
+ type: 'sidekiq_stats',
25
+ stats: collect_stats
26
+ }
27
+ end
28
+
29
+ def collect_stats
30
+ stats = ::Sidekiq::Stats.new
31
+ {
32
+ 'dead_size' => stats.dead_size,
33
+ 'enqueued' => stats.enqueued,
34
+ 'failed' => stats.failed,
35
+ 'processed' => stats.processed,
36
+ 'processes_size' => stats.processes_size,
37
+ 'retry_size' => stats.retry_size,
38
+ 'scheduled_size' => stats.scheduled_size,
39
+ 'workers_size' => stats.workers_size,
40
+ }
41
+ end
42
+ end
43
+ end
@@ -18,7 +18,7 @@ module PrometheusExporter::Instrumentation
18
18
  metric = unicorn_collector.collect
19
19
  client.send_json metric
20
20
  rescue StandardError => e
21
- STDERR.puts("Prometheus Exporter Failed To Collect Unicorn Stats #{e}")
21
+ client.logger.error("Prometheus Exporter Failed To Collect Unicorn Stats #{e}")
22
22
  ensure
23
23
  sleep frequency
24
24
  end
@@ -42,9 +42,9 @@ module PrometheusExporter::Instrumentation
42
42
  def collect_unicorn_stats(metric)
43
43
  stats = listener_address_stats
44
44
 
45
- metric[:active_workers_total] = stats.active
46
- metric[:request_backlog_total] = stats.queued
47
- metric[:workers_total] = worker_process_count
45
+ metric[:active_workers] = stats.active
46
+ metric[:request_backlog] = stats.queued
47
+ metric[:workers] = worker_process_count
48
48
  end
49
49
 
50
50
  private
@@ -5,9 +5,12 @@ require_relative "instrumentation/process"
5
5
  require_relative "instrumentation/method_profiler"
6
6
  require_relative "instrumentation/sidekiq"
7
7
  require_relative "instrumentation/sidekiq_queue"
8
+ require_relative "instrumentation/sidekiq_process"
9
+ require_relative "instrumentation/sidekiq_stats"
8
10
  require_relative "instrumentation/delayed_job"
9
11
  require_relative "instrumentation/puma"
10
12
  require_relative "instrumentation/hutch"
11
13
  require_relative "instrumentation/unicorn"
12
14
  require_relative "instrumentation/active_record"
13
15
  require_relative "instrumentation/shoryuken"
16
+ require_relative "instrumentation/resque"
@@ -5,6 +5,7 @@ module PrometheusExporter::Metric
5
5
 
6
6
  @default_prefix = nil if !defined?(@default_prefix)
7
7
  @default_labels = nil if !defined?(@default_labels)
8
+ @default_aggregation = nil if !defined?(@default_aggregation)
8
9
 
9
10
  # prefix applied to all metrics
10
11
  def self.default_prefix=(name)
@@ -23,6 +24,14 @@ module PrometheusExporter::Metric
23
24
  @default_labels || {}
24
25
  end
25
26
 
27
+ def self.default_aggregation=(aggregation)
28
+ @default_aggregation = aggregation
29
+ end
30
+
31
+ def self.default_aggregation
32
+ @default_aggregation ||= Summary
33
+ end
34
+
26
35
  attr_accessor :help, :name, :data
27
36
 
28
37
  def initialize(name, help)
@@ -5,6 +5,10 @@ module PrometheusExporter::Metric
5
5
  attr_reader :data
6
6
 
7
7
  def initialize(name, help)
8
+ if name.end_with?("_total")
9
+ raise ArgumentError, "The metric name of gauge must not have _total suffix. Given: #{name}"
10
+ end
11
+
8
12
  super
9
13
  reset!
10
14
  end
@@ -58,6 +58,11 @@ class PrometheusExporter::Middleware
58
58
  if params
59
59
  action = params["action"]
60
60
  controller = params["controller"]
61
+ elsif (cors = env["rack.cors"]) && cors.respond_to?(:preflight?) && cors.preflight?
62
+ # if the Rack CORS Middleware identifies the request as a preflight request,
63
+ # the stack doesn't get to the point where controllers/actions are defined
64
+ action = "preflight"
65
+ controller = "preflight"
61
66
  end
62
67
 
63
68
  {
@@ -90,19 +95,24 @@ class PrometheusExporter::Middleware
90
95
  Process.clock_gettime(Process::CLOCK_REALTIME)
91
96
  end
92
97
 
93
- # get the content of the x-queue-start or x-request-start header
98
+ # determine queue start from well-known trace headers
94
99
  def queue_start(env)
100
+
101
+ # get the content of the x-queue-start or x-request-start header
95
102
  value = env['HTTP_X_REQUEST_START'] || env['HTTP_X_QUEUE_START']
96
103
  unless value.nil? || value == ''
97
- convert_header_to_ms(value.to_s)
104
+ # nginx returns time as milliseconds with 3 decimal places
105
+ # apache returns time as microseconds without decimal places
106
+ # this method takes care to convert both into a proper second + fractions timestamp
107
+ value = value.to_s.gsub(/t=|\./, '')
108
+ return "#{value[0, 10]}.#{value[10, 13]}".to_f
98
109
  end
99
- end
100
110
 
101
- # nginx returns time as milliseconds with 3 decimal places
102
- # apache returns time as microseconds without decimal places
103
- # this method takes care to convert both into a proper second + fractions timestamp
104
- def convert_header_to_ms(str)
105
- str = str.gsub(/t=|\./, '')
106
- "#{str[0, 10]}.#{str[10, 13]}".to_f
111
+ # get the content of the x-amzn-trace-id header
112
+ # see also: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-request-tracing.html
113
+ value = env['HTTP_X_AMZN_TRACE_ID']
114
+ value&.split('Root=')&.last&.split('-')&.fetch(1)&.to_i(16)
115
+
107
116
  end
117
+
108
118
  end
@@ -47,7 +47,8 @@ module PrometheusExporter::Server
47
47
  obj["created_at"] = now
48
48
 
49
49
  @active_record_metrics.delete_if do |current|
50
- (obj["pid"] == current["pid"] && obj["hostname"] == current["hostname"]) ||
50
+ (obj["pid"] == current["pid"] && obj["hostname"] == current["hostname"] &&
51
+ obj["metric_labels"]["pool_name"] == current["metric_labels"]["pool_name"]) ||
51
52
  (current["created_at"] + MAX_ACTIVERECORD_METRIC_AGE < now)
52
53
  end
53
54
 
@@ -14,12 +14,15 @@ module PrometheusExporter::Server
14
14
  register_collector(ProcessCollector.new)
15
15
  register_collector(SidekiqCollector.new)
16
16
  register_collector(SidekiqQueueCollector.new)
17
+ register_collector(SidekiqProcessCollector.new)
18
+ register_collector(SidekiqStatsCollector.new)
17
19
  register_collector(DelayedJobCollector.new)
18
20
  register_collector(PumaCollector.new)
19
21
  register_collector(HutchCollector.new)
20
22
  register_collector(UnicornCollector.new)
21
23
  register_collector(ActiveRecordCollector.new)
22
24
  register_collector(ShoryukenCollector.new)
25
+ register_collector(ResqueCollector.new)
23
26
  end
24
27
 
25
28
  def register_collector(collector)