prometheus_exporter 0.7.0 → 2.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG +298 -35
  3. data/README.md +276 -53
  4. data/{bin → exe}/prometheus_exporter +20 -7
  5. data/lib/prometheus_exporter/client.rb +41 -32
  6. data/lib/prometheus_exporter/instrumentation/active_record.rb +29 -35
  7. data/lib/prometheus_exporter/instrumentation/delayed_job.rb +28 -13
  8. data/lib/prometheus_exporter/instrumentation/good_job.rb +28 -0
  9. data/lib/prometheus_exporter/instrumentation/hutch.rb +1 -1
  10. data/lib/prometheus_exporter/instrumentation/method_profiler.rb +67 -27
  11. data/lib/prometheus_exporter/instrumentation/periodic_stats.rb +54 -0
  12. data/lib/prometheus_exporter/instrumentation/process.rb +25 -27
  13. data/lib/prometheus_exporter/instrumentation/puma.rb +36 -27
  14. data/lib/prometheus_exporter/instrumentation/resque.rb +33 -0
  15. data/lib/prometheus_exporter/instrumentation/shoryuken.rb +6 -7
  16. data/lib/prometheus_exporter/instrumentation/sidekiq.rb +51 -23
  17. data/lib/prometheus_exporter/instrumentation/sidekiq_process.rb +45 -0
  18. data/lib/prometheus_exporter/instrumentation/sidekiq_queue.rb +38 -33
  19. data/lib/prometheus_exporter/instrumentation/sidekiq_stats.rb +32 -0
  20. data/lib/prometheus_exporter/instrumentation/unicorn.rb +12 -17
  21. data/lib/prometheus_exporter/instrumentation.rb +5 -0
  22. data/lib/prometheus_exporter/metric/base.rb +20 -17
  23. data/lib/prometheus_exporter/metric/counter.rb +1 -3
  24. data/lib/prometheus_exporter/metric/gauge.rb +6 -6
  25. data/lib/prometheus_exporter/metric/histogram.rb +15 -5
  26. data/lib/prometheus_exporter/metric/summary.rb +5 -14
  27. data/lib/prometheus_exporter/middleware.rb +72 -38
  28. data/lib/prometheus_exporter/server/active_record_collector.rb +16 -14
  29. data/lib/prometheus_exporter/server/collector.rb +29 -17
  30. data/lib/prometheus_exporter/server/collector_base.rb +0 -2
  31. data/lib/prometheus_exporter/server/delayed_job_collector.rb +76 -33
  32. data/lib/prometheus_exporter/server/good_job_collector.rb +52 -0
  33. data/lib/prometheus_exporter/server/hutch_collector.rb +19 -11
  34. data/lib/prometheus_exporter/server/metrics_container.rb +66 -0
  35. data/lib/prometheus_exporter/server/process_collector.rb +15 -14
  36. data/lib/prometheus_exporter/server/puma_collector.rb +21 -18
  37. data/lib/prometheus_exporter/server/resque_collector.rb +50 -0
  38. data/lib/prometheus_exporter/server/runner.rb +49 -13
  39. data/lib/prometheus_exporter/server/shoryuken_collector.rb +22 -17
  40. data/lib/prometheus_exporter/server/sidekiq_collector.rb +22 -14
  41. data/lib/prometheus_exporter/server/sidekiq_process_collector.rb +47 -0
  42. data/lib/prometheus_exporter/server/sidekiq_queue_collector.rb +12 -12
  43. data/lib/prometheus_exporter/server/sidekiq_stats_collector.rb +49 -0
  44. data/lib/prometheus_exporter/server/type_collector.rb +2 -0
  45. data/lib/prometheus_exporter/server/unicorn_collector.rb +32 -33
  46. data/lib/prometheus_exporter/server/web_collector.rb +48 -31
  47. data/lib/prometheus_exporter/server/web_server.rb +70 -48
  48. data/lib/prometheus_exporter/server.rb +4 -0
  49. data/lib/prometheus_exporter/version.rb +1 -1
  50. data/lib/prometheus_exporter.rb +12 -13
  51. metadata +19 -206
  52. data/.github/workflows/ci.yml +0 -42
  53. data/.gitignore +0 -13
  54. data/.rubocop.yml +0 -7
  55. data/Appraisals +0 -10
  56. data/CODE_OF_CONDUCT.md +0 -74
  57. data/Gemfile +0 -8
  58. data/Guardfile +0 -8
  59. data/Rakefile +0 -12
  60. data/bench/bench.rb +0 -45
  61. data/examples/custom_collector.rb +0 -27
  62. data/gemfiles/.bundle/config +0 -2
  63. data/gemfiles/ar_60.gemfile +0 -5
  64. data/gemfiles/ar_61.gemfile +0 -7
  65. data/prometheus_exporter.gemspec +0 -46
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'socket'
4
- require 'thread'
3
+ require "socket"
4
+ require "logger"
5
5
 
6
6
  module PrometheusExporter
7
7
  class Client
@@ -17,14 +17,10 @@ module PrometheusExporter
17
17
  end
18
18
 
19
19
  def standard_values(value, keys, prometheus_exporter_action = nil)
20
- values = {
21
- type: @type,
22
- help: @help,
23
- name: @name,
24
- keys: keys,
25
- value: value
26
- }
27
- values[:prometheus_exporter_action] = prometheus_exporter_action if prometheus_exporter_action
20
+ values = { type: @type, help: @help, name: @name, keys: keys, value: value }
21
+ values[
22
+ :prometheus_exporter_action
23
+ ] = prometheus_exporter_action if prometheus_exporter_action
28
24
  values[:opts] = @opts if @opts
29
25
  values
30
26
  end
@@ -53,14 +49,21 @@ module PrometheusExporter
53
49
  MAX_SOCKET_AGE = 25
54
50
  MAX_QUEUE_SIZE = 10_000
55
51
 
52
+ attr_reader :logger
53
+
56
54
  def initialize(
57
- host: ENV.fetch('PROMETHEUS_EXPORTER_HOST', 'localhost'),
58
- port: ENV.fetch('PROMETHEUS_EXPORTER_PORT', PrometheusExporter::DEFAULT_PORT),
55
+ host: ENV.fetch("PROMETHEUS_EXPORTER_HOST", "localhost"),
56
+ port: ENV.fetch("PROMETHEUS_EXPORTER_PORT", PrometheusExporter::DEFAULT_PORT),
59
57
  max_queue_size: nil,
60
58
  thread_sleep: 0.5,
61
59
  json_serializer: nil,
62
- custom_labels: nil
60
+ custom_labels: nil,
61
+ logger: Logger.new(STDERR),
62
+ log_level: Logger::WARN,
63
+ process_queue_once_and_stop: false
63
64
  )
65
+ @logger = logger
66
+ @logger.level = log_level
64
67
  @metrics = []
65
68
 
66
69
  @queue = Queue.new
@@ -72,9 +75,7 @@ module PrometheusExporter
72
75
  max_queue_size ||= MAX_QUEUE_SIZE
73
76
  max_queue_size = max_queue_size.to_i
74
77
 
75
- if max_queue_size.to_i <= 0
76
- raise ArgumentError, "max_queue_size must be larger than 0"
77
- end
78
+ raise ArgumentError, "max_queue_size must be larger than 0" if max_queue_size <= 0
78
79
 
79
80
  @max_queue_size = max_queue_size
80
81
  @host = host
@@ -86,6 +87,7 @@ module PrometheusExporter
86
87
  @json_serializer = json_serializer == :oj ? PrometheusExporter::OjCompat : JSON
87
88
 
88
89
  @custom_labels = custom_labels
90
+ @process_queue_once_and_stop = process_queue_once_and_stop
89
91
  end
90
92
 
91
93
  def custom_labels=(custom_labels)
@@ -125,7 +127,7 @@ module PrometheusExporter
125
127
  def send(str)
126
128
  @queue << str
127
129
  if @queue.length > @max_queue_size
128
- STDERR.puts "Prometheus Exporter client is dropping message cause queue is full"
130
+ logger.warn "Prometheus Exporter client is dropping message cause queue is full"
129
131
  @queue.pop
130
132
  end
131
133
 
@@ -143,8 +145,8 @@ module PrometheusExporter
143
145
  @socket.write(message)
144
146
  @socket.write("\r\n")
145
147
  rescue => e
146
- STDERR.puts "Prometheus Exporter is dropping a message: #{e}"
147
- @socket = nil
148
+ logger.warn "Prometheus Exporter is dropping a message: #{e}"
149
+ close_socket!
148
150
  raise
149
151
  end
150
152
  end
@@ -154,9 +156,7 @@ module PrometheusExporter
154
156
  @mutex.synchronize do
155
157
  wait_for_empty_queue_with_timeout(wait_timeout_seconds)
156
158
  @worker_thread&.kill
157
- while @worker_thread&.alive?
158
- sleep 0.001
159
- end
159
+ sleep 0.001 while @worker_thread&.alive?
160
160
  @worker_thread = nil
161
161
  close_socket!
162
162
  end
@@ -168,22 +168,31 @@ module PrometheusExporter
168
168
  close_socket_if_old!
169
169
  process_queue
170
170
  rescue => e
171
- STDERR.puts "Prometheus Exporter, failed to send message #{e}"
171
+ logger.error "Prometheus Exporter, failed to send message #{e}"
172
172
  end
173
173
 
174
174
  def ensure_worker_thread!
175
+ if @process_queue_once_and_stop
176
+ worker_loop
177
+ return
178
+ end
179
+
175
180
  unless @worker_thread&.alive?
176
181
  @mutex.synchronize do
177
182
  return if @worker_thread&.alive?
178
183
 
179
- @worker_thread = Thread.new do
180
- while true
181
- worker_loop
182
- sleep @thread_sleep
184
+ @worker_thread =
185
+ Thread.new do
186
+ while true
187
+ worker_loop
188
+ sleep @thread_sleep
189
+ end
183
190
  end
184
- end
185
191
  end
186
192
  end
193
+ rescue ThreadError => e
194
+ raise unless e.message =~ /can't alloc thread/
195
+ logger.error "Prometheus Exporter, failed to send message ThreadError #{e}"
187
196
  end
188
197
 
189
198
  def close_socket!
@@ -202,7 +211,8 @@ module PrometheusExporter
202
211
  end
203
212
 
204
213
  def close_socket_if_old!
205
- if @socket_pid == Process.pid && @socket && @socket_started && ((@socket_started + MAX_SOCKET_AGE) < Time.now.to_f)
214
+ if @socket_pid == Process.pid && @socket && @socket_started &&
215
+ ((@socket_started + MAX_SOCKET_AGE) < Time.now.to_f)
206
216
  close_socket!
207
217
  end
208
218
  end
@@ -230,9 +240,8 @@ module PrometheusExporter
230
240
  end
231
241
 
232
242
  nil
233
- rescue
234
- @socket = nil
235
- @socket_started = nil
243
+ rescue StandardError
244
+ close_socket!
236
245
  @socket_pid = nil
237
246
  raise
238
247
  end
@@ -2,14 +2,17 @@
2
2
 
3
3
  # collects stats from currently running process
4
4
  module PrometheusExporter::Instrumentation
5
- class ActiveRecord
6
- ALLOWED_CONFIG_LABELS = %i(database username host port)
5
+ class ActiveRecord < PeriodicStats
6
+ ALLOWED_CONFIG_LABELS = %i[database username host port]
7
7
 
8
8
  def self.start(client: nil, frequency: 30, custom_labels: {}, config_labels: [])
9
+ client ||= PrometheusExporter::Client.default
9
10
 
10
- # Not all rails versions support coonection pool stats
11
+ # Not all rails versions support connection pool stats
11
12
  unless ::ActiveRecord::Base.connection_pool.respond_to?(:stat)
12
- STDERR.puts("ActiveRecord connection pool stats not supported in your rails version")
13
+ client.logger.error(
14
+ "ActiveRecord connection pool stats not supported in your rails version",
15
+ )
13
16
  return
14
17
  end
15
18
 
@@ -18,33 +21,18 @@ module PrometheusExporter::Instrumentation
18
21
 
19
22
  active_record_collector = new(custom_labels, config_labels)
20
23
 
21
- client ||= PrometheusExporter::Client.default
22
-
23
- stop if @thread
24
-
25
- @thread = Thread.new do
26
- while true
27
- begin
28
- metrics = active_record_collector.collect
29
- metrics.each { |metric| client.send_json metric }
30
- rescue => e
31
- STDERR.puts("Prometheus Exporter Failed To Collect Process Stats #{e}")
32
- ensure
33
- sleep frequency
34
- end
35
- end
24
+ worker_loop do
25
+ metrics = active_record_collector.collect
26
+ metrics.each { |metric| client.send_json metric }
36
27
  end
28
+
29
+ super
37
30
  end
38
31
 
39
32
  def self.validate_config_labels(config_labels)
40
33
  return if config_labels.size == 0
41
- raise "Invalid Config Labels, available options #{ALLOWED_CONFIG_LABELS}" if (config_labels - ALLOWED_CONFIG_LABELS).size > 0
42
- end
43
-
44
- def self.stop
45
- if t = @thread
46
- t.kill
47
- @thread = nil
34
+ if (config_labels - ALLOWED_CONFIG_LABELS).size > 0
35
+ raise "Invalid Config Labels, available options #{ALLOWED_CONFIG_LABELS}"
48
36
  end
49
37
  end
50
38
 
@@ -71,7 +59,7 @@ module PrometheusExporter::Instrumentation
71
59
  pid: pid,
72
60
  type: "active_record",
73
61
  hostname: ::PrometheusExporter.hostname,
74
- metric_labels: labels(pool)
62
+ metric_labels: labels(pool),
75
63
  }
76
64
  metric.merge!(pool.stat)
77
65
  metrics << metric
@@ -81,15 +69,21 @@ module PrometheusExporter::Instrumentation
81
69
  private
82
70
 
83
71
  def labels(pool)
84
- if pool.respond_to?(:spec) # ActiveRecord <= 6.0
85
- @metric_labels.merge(pool_name: pool.spec.name).merge(pool.spec.config
86
- .select { |k, v| @config_labels.include? k }
87
- .map { |k, v| [k.to_s.dup.prepend("dbconfig_"), v] }.to_h)
88
- elsif pool.respond_to?(:db_config) # ActiveRecord >= 6.1.rc1
89
- @metric_labels.merge(pool_name: pool.db_config.name).merge(
90
- @config_labels.each_with_object({}) { |l, acc| acc["dbconfig_#{l}"] = pool.db_config.public_send(l) })
72
+ if ::ActiveRecord.version < Gem::Version.new("6.1.0.rc1")
73
+ @metric_labels.merge(pool_name: pool.spec.name).merge(
74
+ pool
75
+ .spec
76
+ .config
77
+ .select { |k, v| @config_labels.include? k }
78
+ .map { |k, v| [k.to_s.dup.prepend("dbconfig_"), v] }
79
+ .to_h,
80
+ )
91
81
  else
92
- raise "Unsupported connection pool"
82
+ @metric_labels.merge(pool_name: pool.db_config.name).merge(
83
+ @config_labels.each_with_object({}) do |l, acc|
84
+ acc["dbconfig_#{l}"] = pool.db_config.public_send(l)
85
+ end,
86
+ )
93
87
  end
94
88
  end
95
89
  end
@@ -2,23 +2,33 @@
2
2
 
3
3
  module PrometheusExporter::Instrumentation
4
4
  class DelayedJob
5
- JOB_CLASS_REGEXP = %r{job_class: (\w+:{0,2})+}.freeze
5
+ JOB_CLASS_REGEXP = /job_class: ((\w+:{0,2})+)/.freeze
6
6
 
7
7
  class << self
8
- def register_plugin(client: nil)
8
+ def register_plugin(client: nil, include_module_name: false)
9
9
  instrumenter = self.new(client: client)
10
10
  return unless defined?(Delayed::Plugin)
11
11
 
12
- plugin = Class.new(Delayed::Plugin) do
13
- callbacks do |lifecycle|
14
- lifecycle.around(:invoke_job) do |job, *args, &block|
15
- max_attempts = Delayed::Worker.max_attempts
16
- enqueued_count = Delayed::Job.count
17
- pending_count = Delayed::Job.where(attempts: 0, locked_at: nil).count
18
- instrumenter.call(job, max_attempts, enqueued_count, pending_count, *args, &block)
12
+ plugin =
13
+ Class.new(Delayed::Plugin) do
14
+ callbacks do |lifecycle|
15
+ lifecycle.around(:invoke_job) do |job, *args, &block|
16
+ max_attempts = Delayed::Worker.max_attempts
17
+ enqueued_count = Delayed::Job.where(queue: job.queue).count
18
+ pending_count =
19
+ Delayed::Job.where(attempts: 0, locked_at: nil, queue: job.queue).count
20
+ instrumenter.call(
21
+ job,
22
+ max_attempts,
23
+ enqueued_count,
24
+ pending_count,
25
+ include_module_name,
26
+ *args,
27
+ &block
28
+ )
29
+ end
19
30
  end
20
31
  end
21
- end
22
32
 
23
33
  Delayed::Worker.plugins << plugin
24
34
  end
@@ -28,9 +38,12 @@ module PrometheusExporter::Instrumentation
28
38
  @client = client || PrometheusExporter::Client.default
29
39
  end
30
40
 
31
- def call(job, max_attempts, enqueued_count, pending_count, *args, &block)
41
+ def call(job, max_attempts, enqueued_count, pending_count, include_module_name, *args, &block)
32
42
  success = false
43
+ job_name = job.handler.to_s.match(JOB_CLASS_REGEXP).to_a[include_module_name ? 1 : 2].to_s
44
+ job_name ||= job.try(:name)
33
45
  start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
46
+ latency = Time.current - job.run_at
34
47
  attempts = job.attempts + 1 # Increment because we're adding the current attempt
35
48
  result = block.call(job, *args)
36
49
  success = true
@@ -40,13 +53,15 @@ module PrometheusExporter::Instrumentation
40
53
 
41
54
  @client.send_json(
42
55
  type: "delayed_job",
43
- name: job.handler.to_s.match(JOB_CLASS_REGEXP).to_a[1].to_s,
56
+ name: job_name,
57
+ queue_name: job.queue,
44
58
  success: success,
45
59
  duration: duration,
60
+ latency: latency,
46
61
  attempts: attempts,
47
62
  max_attempts: max_attempts,
48
63
  enqueued: enqueued_count,
49
- pending: pending_count
64
+ pending: pending_count,
50
65
  )
51
66
  end
52
67
  end
@@ -0,0 +1,28 @@
1
+ # frozen_string_literal: true
2
+
3
+ # collects stats from GoodJob
4
+ module PrometheusExporter::Instrumentation
5
+ class GoodJob < PeriodicStats
6
+ def self.start(client: nil, frequency: 30)
7
+ good_job_collector = new
8
+ client ||= PrometheusExporter::Client.default
9
+
10
+ worker_loop { client.send_json(good_job_collector.collect) }
11
+
12
+ super
13
+ end
14
+
15
+ def collect
16
+ {
17
+ type: "good_job",
18
+ scheduled: ::GoodJob::Job.scheduled.size,
19
+ retried: ::GoodJob::Job.retried.size,
20
+ queued: ::GoodJob::Job.queued.size,
21
+ running: ::GoodJob::Job.running.size,
22
+ finished: ::GoodJob::Job.finished.size,
23
+ succeeded: ::GoodJob::Job.succeeded.size,
24
+ discarded: ::GoodJob::Job.discarded.size,
25
+ }
26
+ end
27
+ end
28
+ end
@@ -19,7 +19,7 @@ module PrometheusExporter::Instrumentation
19
19
  type: "hutch",
20
20
  name: @klass.class.to_s,
21
21
  success: success,
22
- duration: duration
22
+ duration: duration,
23
23
  )
24
24
  end
25
25
  end
@@ -1,32 +1,18 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  # see https://samsaffron.com/archive/2017/10/18/fastest-way-to-profile-a-method-in-ruby
4
- module PrometheusExporter::Instrumentation; end
4
+ module PrometheusExporter::Instrumentation
5
+ end
5
6
 
6
7
  class PrometheusExporter::Instrumentation::MethodProfiler
7
- def self.patch(klass, methods, name)
8
- patches = methods.map do |method_name|
9
- <<~RUBY
10
- unless defined?(#{method_name}__mp_unpatched)
11
- alias_method :#{method_name}__mp_unpatched, :#{method_name}
12
- def #{method_name}(*args, &blk)
13
- unless prof = Thread.current[:_method_profiler]
14
- return #{method_name}__mp_unpatched(*args, &blk)
15
- end
16
- begin
17
- start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
18
- #{method_name}__mp_unpatched(*args, &blk)
19
- ensure
20
- data = (prof[:#{name}] ||= {duration: 0.0, calls: 0})
21
- data[:duration] += Process.clock_gettime(Process::CLOCK_MONOTONIC) - start
22
- data[:calls] += 1
23
- end
24
- end
25
- end
26
- RUBY
27
- end.join("\n")
28
-
29
- klass.class_eval patches
8
+ def self.patch(klass, methods, name, instrument:)
9
+ if instrument == :alias_method
10
+ patch_using_alias_method(klass, methods, name)
11
+ elsif instrument == :prepend
12
+ patch_using_prepend(klass, methods, name)
13
+ else
14
+ raise ArgumentError, "instrument must be :alias_method or :prepend"
15
+ end
30
16
  end
31
17
 
32
18
  def self.transfer
@@ -36,9 +22,8 @@ class PrometheusExporter::Instrumentation::MethodProfiler
36
22
  end
37
23
 
38
24
  def self.start(transfer = nil)
39
- Thread.current[:_method_profiler] = transfer || {
40
- __start: Process.clock_gettime(Process::CLOCK_MONOTONIC)
41
- }
25
+ Thread.current[:_method_profiler] = transfer ||
26
+ { __start: Process.clock_gettime(Process::CLOCK_MONOTONIC) }
42
27
  end
43
28
 
44
29
  def self.clear
@@ -54,4 +39,59 @@ class PrometheusExporter::Instrumentation::MethodProfiler
54
39
  end
55
40
  data
56
41
  end
42
+
43
+ def self.define_methods_on_module(klass, methods, name)
44
+ patch_source_line = __LINE__ + 3
45
+
46
+ patches = methods.map { |method_name| <<~RUBY }.join("\n")
47
+ def #{method_name}(...)
48
+ unless prof = Thread.current[:_method_profiler]
49
+ return super
50
+ end
51
+ begin
52
+ start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
53
+ super
54
+ ensure
55
+ data = (prof[:#{name}] ||= {duration: 0.0, calls: 0})
56
+ data[:duration] += Process.clock_gettime(Process::CLOCK_MONOTONIC) - start
57
+ data[:calls] += 1
58
+ end
59
+ end
60
+ RUBY
61
+
62
+ klass.module_eval(patches, __FILE__, patch_source_line)
63
+ end
64
+
65
+ def self.patch_using_prepend(klass, methods, name)
66
+ prepend_instrument = Module.new
67
+ define_methods_on_module(prepend_instrument, methods, name)
68
+ klass.prepend(prepend_instrument)
69
+ end
70
+
71
+ def self.patch_using_alias_method(klass, methods, name)
72
+ patch_source_line = __LINE__ + 3
73
+
74
+ patches = methods.map { |method_name| <<~RUBY }.join("\n")
75
+ unless defined?(#{method_name}__mp_unpatched)
76
+ alias_method :#{method_name}__mp_unpatched, :#{method_name}
77
+
78
+ def #{method_name}(...)
79
+ unless prof = Thread.current[:_method_profiler]
80
+ return #{method_name}__mp_unpatched(...)
81
+ end
82
+
83
+ begin
84
+ start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
85
+ #{method_name}__mp_unpatched(...)
86
+ ensure
87
+ data = (prof[:#{name}] ||= {duration: 0.0, calls: 0})
88
+ data[:duration] += Process.clock_gettime(Process::CLOCK_MONOTONIC) - start
89
+ data[:calls] += 1
90
+ end
91
+ end
92
+ end
93
+ RUBY
94
+
95
+ klass.class_eval(patches, __FILE__, patch_source_line)
96
+ end
57
97
  end
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ module PrometheusExporter::Instrumentation
4
+ class PeriodicStats
5
+ def self.start(*args, frequency:, client: nil, **kwargs)
6
+ client ||= PrometheusExporter::Client.default
7
+
8
+ raise ArgumentError.new("Expected frequency to be a number") if !(Numeric === frequency)
9
+
10
+ raise ArgumentError.new("Expected frequency to be a positive number") if frequency < 0
11
+
12
+ raise ArgumentError.new("Worker loop was not set") if !@worker_loop
13
+
14
+ klass = self
15
+
16
+ stop
17
+
18
+ @stop_thread = false
19
+
20
+ @thread =
21
+ Thread.new do
22
+ while !@stop_thread
23
+ begin
24
+ @worker_loop.call
25
+ rescue => e
26
+ client.logger.error("#{klass} Prometheus Exporter Failed To Collect Stats #{e}")
27
+ ensure
28
+ sleep frequency
29
+ end
30
+ end
31
+ end
32
+ end
33
+
34
+ def self.started?
35
+ !!@thread&.alive?
36
+ end
37
+
38
+ def self.worker_loop(&blk)
39
+ @worker_loop = blk
40
+ end
41
+
42
+ def self.stop
43
+ # to avoid a warning
44
+ @thread = nil if !defined?(@thread)
45
+
46
+ if @thread&.alive?
47
+ @stop_thread = true
48
+ @thread.wakeup
49
+ @thread.join
50
+ end
51
+ @thread = nil
52
+ end
53
+ end
54
+ end
@@ -2,11 +2,8 @@
2
2
 
3
3
  # collects stats from currently running process
4
4
  module PrometheusExporter::Instrumentation
5
- class Process
6
- @thread = nil if !defined?(@thread)
7
-
5
+ class Process < PeriodicStats
8
6
  def self.start(client: nil, type: "ruby", frequency: 30, labels: nil)
9
-
10
7
  metric_labels =
11
8
  if labels && type
12
9
  labels.merge(type: type)
@@ -19,27 +16,12 @@ module PrometheusExporter::Instrumentation
19
16
  process_collector = new(metric_labels)
20
17
  client ||= PrometheusExporter::Client.default
21
18
 
22
- stop if @thread
23
-
24
- @thread = Thread.new do
25
- while true
26
- begin
27
- metric = process_collector.collect
28
- client.send_json metric
29
- rescue => e
30
- STDERR.puts("Prometheus Exporter Failed To Collect Process Stats #{e}")
31
- ensure
32
- sleep frequency
33
- end
34
- end
19
+ worker_loop do
20
+ metric = process_collector.collect
21
+ client.send_json metric
35
22
  end
36
- end
37
23
 
38
- def self.stop
39
- if t = @thread
40
- t.kill
41
- @thread = nil
42
- end
24
+ super
43
25
  end
44
26
 
45
27
  def initialize(metric_labels)
@@ -62,16 +44,26 @@ module PrometheusExporter::Instrumentation
62
44
  end
63
45
 
64
46
  def rss
65
- @pagesize ||= `getconf PAGESIZE`.to_i rescue 4096
66
- File.read("/proc/#{pid}/statm").split(' ')[1].to_i * @pagesize rescue 0
47
+ @pagesize ||=
48
+ begin
49
+ `getconf PAGESIZE`.to_i
50
+ rescue StandardError
51
+ 4096
52
+ end
53
+ begin
54
+ File.read("/proc/#{pid}/statm").split(" ")[1].to_i * @pagesize
55
+ rescue StandardError
56
+ 0
57
+ end
67
58
  end
68
59
 
69
60
  def collect_process_stats(metric)
70
61
  metric[:pid] = pid
71
62
  metric[:rss] = rss
72
-
73
63
  end
74
64
 
65
+ SWEEPING_AND_MARKING = RUBY_VERSION >= "3.3.0"
66
+
75
67
  def collect_gc_stats(metric)
76
68
  stat = GC.stat
77
69
  metric[:heap_live_slots] = stat[:heap_live_slots]
@@ -79,10 +71,16 @@ module PrometheusExporter::Instrumentation
79
71
  metric[:major_gc_ops_total] = stat[:major_gc_count]
80
72
  metric[:minor_gc_ops_total] = stat[:minor_gc_count]
81
73
  metric[:allocated_objects_total] = stat[:total_allocated_objects]
74
+ metric[:malloc_increase_bytes_limit] = stat[:malloc_increase_bytes_limit]
75
+ metric[:oldmalloc_increase_bytes_limit] = stat[:oldmalloc_increase_bytes_limit]
76
+ if SWEEPING_AND_MARKING
77
+ metric[:marking_time] = stat[:marking_time]
78
+ metric[:sweeping_time] = stat[:sweeping_time]
79
+ end
82
80
  end
83
81
 
84
82
  def collect_v8_stats(metric)
85
- return if !defined? MiniRacer
83
+ return if !defined?(MiniRacer)
86
84
 
87
85
  metric[:v8_heap_count] = metric[:v8_heap_size] = 0
88
86
  metric[:v8_heap_size] = metric[:v8_physical_size] = 0