prometheus_exporter 0.4.9 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.travis.yml +6 -1
- data/CHANGELOG +43 -0
- data/Gemfile +2 -0
- data/Guardfile +2 -0
- data/README.md +84 -3
- data/Rakefile +2 -0
- data/bench/bench.rb +2 -0
- data/bin/prometheus_exporter +8 -1
- data/examples/custom_collector.rb +2 -0
- data/lib/prometheus_exporter/client.rb +19 -7
- data/lib/prometheus_exporter/instrumentation/active_record.rb +87 -0
- data/lib/prometheus_exporter/instrumentation/delayed_job.rb +9 -3
- data/lib/prometheus_exporter/instrumentation/method_profiler.rb +2 -0
- data/lib/prometheus_exporter/instrumentation/process.rb +1 -12
- data/lib/prometheus_exporter/instrumentation/puma.rb +4 -2
- data/lib/prometheus_exporter/instrumentation/shoryuken.rb +31 -0
- data/lib/prometheus_exporter/instrumentation/sidekiq.rb +9 -5
- data/lib/prometheus_exporter/instrumentation/unicorn.rb +8 -6
- data/lib/prometheus_exporter/instrumentation.rb +5 -0
- data/lib/prometheus_exporter/metric/base.rb +8 -0
- data/lib/prometheus_exporter/metric/counter.rb +9 -1
- data/lib/prometheus_exporter/metric/gauge.rb +9 -1
- data/lib/prometheus_exporter/metric/histogram.rb +14 -0
- data/lib/prometheus_exporter/metric/summary.rb +15 -1
- data/lib/prometheus_exporter/metric.rb +2 -0
- data/lib/prometheus_exporter/server/active_record_collector.rb +56 -0
- data/lib/prometheus_exporter/server/collector.rb +9 -2
- data/lib/prometheus_exporter/server/collector_base.rb +2 -0
- data/lib/prometheus_exporter/server/delayed_job_collector.rb +14 -1
- data/lib/prometheus_exporter/server/hutch_collector.rb +2 -0
- data/lib/prometheus_exporter/server/process_collector.rb +1 -0
- data/lib/prometheus_exporter/server/puma_collector.rb +11 -1
- data/lib/prometheus_exporter/server/runner.rb +8 -30
- data/lib/prometheus_exporter/server/shoryuken_collector.rb +59 -0
- data/lib/prometheus_exporter/server/sidekiq_collector.rb +2 -0
- data/lib/prometheus_exporter/server/type_collector.rb +2 -0
- data/lib/prometheus_exporter/server/unicorn_collector.rb +8 -1
- data/lib/prometheus_exporter/server/web_server.rb +2 -1
- data/lib/prometheus_exporter/server.rb +4 -0
- data/lib/prometheus_exporter/version.rb +3 -1
- data/lib/prometheus_exporter.rb +14 -0
- data/prometheus_exporter.gemspec +17 -13
- metadata +38 -6
- data/.travis +0 -9
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: c77ede3528c6008266620498f698c8a0e9e17bfa0c0691fedf5a3c4c3bdb8ebd
|
4
|
+
data.tar.gz: 62f8b159f68036f05b785fea4eebef48df66606b083136eda5c37e4d3c294132
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 2c0699014753102b514f1bd6fa965cd82f6d83c34084daf4a3b07537b9224295a1df02aedce3082588e44f4c001e71ea83178456bd7efd8a8eae836b2fc6e9a7
|
7
|
+
data.tar.gz: df738517c22422ba9e4e2e7cfd6ea6bbcf7542175a827b1632e7ff69af4651d4acf45bcec9693c4dd899b9b305b952fac989999ebea23013f3ddad0d3c396e00
|
data/.travis.yml
CHANGED
@@ -2,6 +2,11 @@ sudo: required
|
|
2
2
|
language: ruby
|
3
3
|
dist: trusty
|
4
4
|
rvm:
|
5
|
-
- 2.
|
5
|
+
- 2.3
|
6
|
+
- 2.4
|
7
|
+
- 2.5
|
8
|
+
- 2.6
|
6
9
|
before_install: gem install bundler -v 1.16.1
|
7
10
|
before_script: sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6'
|
11
|
+
|
12
|
+
script: bundle exec rubocop && bundle exec rake
|
data/CHANGELOG
CHANGED
@@ -1,3 +1,46 @@
|
|
1
|
+
0.5.0 - 14-02-2019
|
2
|
+
|
3
|
+
- Breaking change: listen only to localhost by default to prevent unintended insecure configuration
|
4
|
+
- FIX: Avoid calling `hostname` aggressively, instead cache it on the exporter instance
|
5
|
+
|
6
|
+
0.4.17 - 13-01-2019
|
7
|
+
|
8
|
+
- FEATURE: add support for `to_h` on all metrics which can be used to query existing key/values
|
9
|
+
|
10
|
+
0.4.16 - 04-11-2019
|
11
|
+
|
12
|
+
- FEATURE: Support #reset! on all metric types to reset a metric to default
|
13
|
+
|
14
|
+
0.4.15 - 04-11-2019
|
15
|
+
|
16
|
+
- FEATURE: Improve delayed job collector, add pending counts
|
17
|
+
- FEATURE: New ActiveRecord collector (documented in readme)
|
18
|
+
- FEATURE: Allow passing in histogram and summary options
|
19
|
+
- FEATURE: Allow custom labels for unicorn collector
|
20
|
+
|
21
|
+
0.4.14 - 10-09-2019
|
22
|
+
|
23
|
+
- FEATURE: allow finding metrics by name RemoteMetric #find_registered_metric
|
24
|
+
- FIX: guard socket closing
|
25
|
+
|
26
|
+
0.4.13 - 09-07-2019
|
27
|
+
|
28
|
+
- Fix: Memory leak in unicorn and puma collectors
|
29
|
+
|
30
|
+
0.4.12 - 30-05-2019
|
31
|
+
|
32
|
+
- Fix: unicorn collector reporting incorrect number of unicorn workers
|
33
|
+
|
34
|
+
0.4.11 - 15-05-2019
|
35
|
+
|
36
|
+
- Fix: Handle stopping nil worker_threads in Client
|
37
|
+
- Dev: add frozen string literals
|
38
|
+
|
39
|
+
0.4.10 - 29-04-2019
|
40
|
+
|
41
|
+
- Fix: Custom label support for puma collector
|
42
|
+
- Fix: Raindrops socket collector not working correctly
|
43
|
+
|
1
44
|
0.4.9 - 11-04-2019
|
2
45
|
|
3
46
|
- Fix: Gem was not working correctly in Ruby 2.4 and below due to a syntax error
|
data/Gemfile
CHANGED
data/Guardfile
CHANGED
data/README.md
CHANGED
@@ -13,10 +13,12 @@ To learn more see [Instrumenting Rails with Prometheus](https://samsaffron.com/a
|
|
13
13
|
* [Rails integration](#rails-integration)
|
14
14
|
* [Per-process stats](#per-process-stats)
|
15
15
|
* [Sidekiq metrics](#sidekiq-metrics)
|
16
|
+
* [Shoryuken metrics](#shoryuken-metrics)
|
17
|
+
* [ActiveRecord Connection Pool Metrics](#activerecord-connection-pool-metrics)
|
16
18
|
* [Delayed Job plugin](#delayed-job-plugin)
|
17
19
|
* [Hutch metrics](#hutch-message-processing-tracer)
|
18
20
|
* [Puma metrics](#puma-metrics)
|
19
|
-
* [Unicorn metrics](#unicorn-metrics)
|
21
|
+
* [Unicorn metrics](#unicorn-process-metrics)
|
20
22
|
* [Custom type collectors](#custom-type-collectors)
|
21
23
|
* [Multi process mode with custom collector](#multi-process-mode-with-custom-collector)
|
22
24
|
* [GraphQL support](#graphql-support)
|
@@ -61,8 +63,9 @@ require 'prometheus_exporter/server'
|
|
61
63
|
require 'prometheus_exporter/client'
|
62
64
|
require 'prometheus_exporter/instrumentation'
|
63
65
|
|
66
|
+
# bind is the address, on which the webserver will listen
|
64
67
|
# port is the port that will provide the /metrics route
|
65
|
-
server = PrometheusExporter::Server::WebServer.new port: 12345
|
68
|
+
server = PrometheusExporter::Server::WebServer.new bind: 'localhost', port: 12345
|
66
69
|
server.start
|
67
70
|
|
68
71
|
# wire up a default local client
|
@@ -114,7 +117,7 @@ In some cases (for example, unicorn or puma clusters) you may want to aggregate
|
|
114
117
|
|
115
118
|
Simplest way to achieve this is to use the built-in collector.
|
116
119
|
|
117
|
-
First, run an exporter on your desired port (we use the default port of 9394):
|
120
|
+
First, run an exporter on your desired port (we use the default bind to localhost and port of 9394):
|
118
121
|
|
119
122
|
```
|
120
123
|
$ prometheus_exporter
|
@@ -148,6 +151,17 @@ awesome 10
|
|
148
151
|
|
149
152
|
```
|
150
153
|
|
154
|
+
Custom quantiles for summaries and buckets for histograms can also be passed in.
|
155
|
+
|
156
|
+
```ruby
|
157
|
+
require 'prometheus_exporter/client'
|
158
|
+
|
159
|
+
client = PrometheusExporter::Client.default
|
160
|
+
histogram = client.register(:histogram, "api_time", "time to call api", buckets: [0.1, 0.5, 1])
|
161
|
+
|
162
|
+
histogram.observe(0.2, api: 'twitter')
|
163
|
+
```
|
164
|
+
|
151
165
|
### Rails integration
|
152
166
|
|
153
167
|
You can easily integrate into any Rack application.
|
@@ -175,6 +189,60 @@ Ensure you run the exporter in a monitored background process:
|
|
175
189
|
$ bundle exec prometheus_exporter
|
176
190
|
```
|
177
191
|
|
192
|
+
#### Activerecord Connection Pool Metrics
|
193
|
+
|
194
|
+
This collects activerecord connection pool metrics.
|
195
|
+
|
196
|
+
It supports injection of custom labels and the connection config options (`username`, `database`, `host`, `port`) as labels.
|
197
|
+
|
198
|
+
For Puma single mode
|
199
|
+
```ruby
|
200
|
+
#in puma.rb
|
201
|
+
require 'prometheus_exporter/instrumentation'
|
202
|
+
PrometheusExporter::Instrumentation::ActiveRecord.start(
|
203
|
+
custom_labels: { type: "puma_single_mode" }, #optional params
|
204
|
+
config_labels: [:database, :host] #optional params
|
205
|
+
)
|
206
|
+
```
|
207
|
+
|
208
|
+
For Puma cluster mode
|
209
|
+
|
210
|
+
```ruby
|
211
|
+
# in puma.rb
|
212
|
+
on_worker_boot do
|
213
|
+
require 'prometheus_exporter/instrumentation'
|
214
|
+
PrometheusExporter::Instrumentation::ActiveRecord.start(
|
215
|
+
custom_labels: { type: "puma_worker" }, #optional params
|
216
|
+
config_labels: [:database, :host] #optional params
|
217
|
+
)
|
218
|
+
end
|
219
|
+
```
|
220
|
+
|
221
|
+
For Unicorn / Passenger
|
222
|
+
|
223
|
+
```ruby
|
224
|
+
after_fork do |_server, _worker|
|
225
|
+
require 'prometheus_exporter/instrumentation'
|
226
|
+
PrometheusExporter::Instrumentation::ActiveRecord.start(
|
227
|
+
custom_labels: { type: "unicorn_worker" }, #optional params
|
228
|
+
config_labels: [:database, :host] #optional params
|
229
|
+
)
|
230
|
+
end
|
231
|
+
```
|
232
|
+
|
233
|
+
For Sidekiq
|
234
|
+
```ruby
|
235
|
+
Sidekiq.configure_server do |config|
|
236
|
+
config.on :startup do
|
237
|
+
require 'prometheus_exporter/instrumentation'
|
238
|
+
PrometheusExporter::Instrumentation::ActiveRecord.start(
|
239
|
+
custom_labels: { type: "sidekiq" }, #optional params
|
240
|
+
config_labels: [:database, :host] #optional params
|
241
|
+
)
|
242
|
+
end
|
243
|
+
end
|
244
|
+
```
|
245
|
+
|
178
246
|
#### Per-process stats
|
179
247
|
|
180
248
|
You may also be interested in per-process stats. This collects memory and GC stats:
|
@@ -231,6 +299,19 @@ Sometimes the Sidekiq server shuts down before it can send metrics, that were ge
|
|
231
299
|
end
|
232
300
|
```
|
233
301
|
|
302
|
+
#### Shoryuken metrics
|
303
|
+
|
304
|
+
For Shoryuken metrics (how many jobs ran? how many failed? how long did they take? how many were restarted?)
|
305
|
+
|
306
|
+
```ruby
|
307
|
+
Shoryuken.configure_server do |config|
|
308
|
+
config.server_middleware do |chain|
|
309
|
+
require 'prometheus_exporter/instrumentation'
|
310
|
+
chain.add PrometheusExporter::Instrumentation::Shoryuken
|
311
|
+
end
|
312
|
+
end
|
313
|
+
```
|
314
|
+
|
234
315
|
#### Delayed Job plugin
|
235
316
|
|
236
317
|
In an initializer:
|
data/Rakefile
CHANGED
data/bench/bench.rb
CHANGED
data/bin/prometheus_exporter
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
#!/usr/bin/env ruby
|
2
|
+
# frozen_string_literal: true
|
2
3
|
|
3
4
|
require 'optparse'
|
4
5
|
|
@@ -18,6 +19,12 @@ def run
|
|
18
19
|
"Port exporter should listen on (default: #{PrometheusExporter::DEFAULT_PORT})") do |o|
|
19
20
|
options[:port] = o.to_i
|
20
21
|
end
|
22
|
+
opt.on('-b',
|
23
|
+
'--bind STRING',
|
24
|
+
String,
|
25
|
+
"IP address exporter should listen on (default: #{PrometheusExporter::DEFAULT_BIND_ADDRESS})") do |o|
|
26
|
+
options[:bind] = o.to_s
|
27
|
+
end
|
21
28
|
opt.on('-t',
|
22
29
|
'--timeout INTEGER',
|
23
30
|
Integer,
|
@@ -80,7 +87,7 @@ def run
|
|
80
87
|
|
81
88
|
runner = PrometheusExporter::Server::Runner.new(options)
|
82
89
|
|
83
|
-
puts "#{Time.now} Starting prometheus exporter on
|
90
|
+
puts "#{Time.now} Starting prometheus exporter on #{runner.bind}:#{runner.port}"
|
84
91
|
runner.start
|
85
92
|
sleep
|
86
93
|
end
|
@@ -4,14 +4,16 @@ require 'socket'
|
|
4
4
|
require 'thread'
|
5
5
|
|
6
6
|
module PrometheusExporter
|
7
|
-
|
8
7
|
class Client
|
9
8
|
class RemoteMetric
|
10
|
-
|
9
|
+
attr_reader :name, :type, :help
|
10
|
+
|
11
|
+
def initialize(name:, help:, type:, client:, opts: nil)
|
11
12
|
@name = name
|
12
13
|
@help = help
|
13
14
|
@client = client
|
14
15
|
@type = type
|
16
|
+
@opts = opts
|
15
17
|
end
|
16
18
|
|
17
19
|
def standard_values(value, keys, prometheus_exporter_action = nil)
|
@@ -23,6 +25,7 @@ module PrometheusExporter
|
|
23
25
|
value: value
|
24
26
|
}
|
25
27
|
values[:prometheus_exporter_action] = prometheus_exporter_action if prometheus_exporter_action
|
28
|
+
values[:opts] = @opts if @opts
|
26
29
|
values
|
27
30
|
end
|
28
31
|
|
@@ -37,7 +40,6 @@ module PrometheusExporter
|
|
37
40
|
def decrement(keys = nil, value = 1)
|
38
41
|
@client.send_json(standard_values(value, keys, :decrement))
|
39
42
|
end
|
40
|
-
|
41
43
|
end
|
42
44
|
|
43
45
|
def self.default
|
@@ -81,12 +83,22 @@ module PrometheusExporter
|
|
81
83
|
@custom_labels = custom_labels
|
82
84
|
end
|
83
85
|
|
84
|
-
def register(type, name, help)
|
85
|
-
metric = RemoteMetric.new(type: type, name: name, help: help, client: self)
|
86
|
+
def register(type, name, help, opts = nil)
|
87
|
+
metric = RemoteMetric.new(type: type, name: name, help: help, client: self, opts: opts)
|
86
88
|
@metrics << metric
|
87
89
|
metric
|
88
90
|
end
|
89
91
|
|
92
|
+
def find_registered_metric(name, type: nil, help: nil)
|
93
|
+
@metrics.find do |metric|
|
94
|
+
type_match = type ? metric.type == type : true
|
95
|
+
help_match = help ? metric.help == help : true
|
96
|
+
name_match = metric.name == name
|
97
|
+
|
98
|
+
type_match && help_match && name_match
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
90
102
|
def send_json(obj)
|
91
103
|
payload = @custom_labels.nil? ? obj : obj.merge(custom_labels: @custom_labels)
|
92
104
|
send(@json_serializer.dump(payload))
|
@@ -124,12 +136,12 @@ module PrometheusExporter
|
|
124
136
|
@mutex.synchronize do
|
125
137
|
wait_for_empty_queue_with_timeout(wait_timeout_seconds)
|
126
138
|
@worker_thread&.kill
|
127
|
-
while @worker_thread
|
139
|
+
while @worker_thread&.alive?
|
128
140
|
sleep 0.001
|
129
141
|
end
|
130
142
|
@worker_thread = nil
|
131
|
-
end
|
132
143
|
close_socket!
|
144
|
+
end
|
133
145
|
end
|
134
146
|
|
135
147
|
private
|
@@ -0,0 +1,87 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# collects stats from currently running process
|
4
|
+
module PrometheusExporter::Instrumentation
|
5
|
+
class ActiveRecord
|
6
|
+
ALLOWED_CONFIG_LABELS = %i(database username host port)
|
7
|
+
|
8
|
+
def self.start(client: nil, frequency: 30, custom_labels: {}, config_labels: [])
|
9
|
+
|
10
|
+
# Not all rails versions support coonection pool stats
|
11
|
+
unless ::ActiveRecord::Base.connection_pool.respond_to?(:stat)
|
12
|
+
STDERR.puts("ActiveRecord connection pool stats not supported in your rails version")
|
13
|
+
return
|
14
|
+
end
|
15
|
+
|
16
|
+
config_labels.map!(&:to_sym)
|
17
|
+
validate_config_labels(config_labels)
|
18
|
+
|
19
|
+
active_record_collector = new(custom_labels, config_labels)
|
20
|
+
|
21
|
+
client ||= PrometheusExporter::Client.default
|
22
|
+
|
23
|
+
stop if @thread
|
24
|
+
|
25
|
+
@thread = Thread.new do
|
26
|
+
while true
|
27
|
+
begin
|
28
|
+
metrics = active_record_collector.collect
|
29
|
+
metrics.each { |metric| client.send_json metric }
|
30
|
+
rescue => e
|
31
|
+
STDERR.puts("Prometheus Exporter Failed To Collect Process Stats #{e}")
|
32
|
+
ensure
|
33
|
+
sleep frequency
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
def self.validate_config_labels(config_labels)
|
40
|
+
return if config_labels.size == 0
|
41
|
+
raise "Invalid Config Labels, available options #{ALLOWED_CONFIG_LABELS}" if (config_labels - ALLOWED_CONFIG_LABELS).size > 0
|
42
|
+
end
|
43
|
+
|
44
|
+
def self.stop
|
45
|
+
if t = @thread
|
46
|
+
t.kill
|
47
|
+
@thread = nil
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
def initialize(metric_labels, config_labels)
|
52
|
+
@metric_labels = metric_labels
|
53
|
+
@config_labels = config_labels
|
54
|
+
end
|
55
|
+
|
56
|
+
def collect
|
57
|
+
metrics = []
|
58
|
+
collect_active_record_pool_stats(metrics)
|
59
|
+
metrics
|
60
|
+
end
|
61
|
+
|
62
|
+
def pid
|
63
|
+
@pid = ::Process.pid
|
64
|
+
end
|
65
|
+
|
66
|
+
def collect_active_record_pool_stats(metrics)
|
67
|
+
ObjectSpace.each_object(::ActiveRecord::ConnectionAdapters::ConnectionPool) do |pool|
|
68
|
+
next if pool.connections.nil?
|
69
|
+
|
70
|
+
labels_from_config = pool.spec.config
|
71
|
+
.select { |k, v| @config_labels.include? k }
|
72
|
+
.map { |k, v| [k.to_s.prepend("dbconfig_"), v] }
|
73
|
+
|
74
|
+
labels = @metric_labels.merge(pool_name: pool.spec.name).merge(Hash[labels_from_config])
|
75
|
+
|
76
|
+
metric = {
|
77
|
+
pid: pid,
|
78
|
+
type: "active_record",
|
79
|
+
hostname: ::PrometheusExporter.hostname,
|
80
|
+
metric_labels: labels
|
81
|
+
}
|
82
|
+
metric.merge!(pool.stat)
|
83
|
+
metrics << metric
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
module PrometheusExporter::Instrumentation
|
2
4
|
class DelayedJob
|
3
5
|
JOB_CLASS_REGEXP = %r{job_class: (\w+:{0,2})+}.freeze
|
@@ -11,7 +13,9 @@ module PrometheusExporter::Instrumentation
|
|
11
13
|
callbacks do |lifecycle|
|
12
14
|
lifecycle.around(:invoke_job) do |job, *args, &block|
|
13
15
|
max_attempts = Delayed::Worker.max_attempts
|
14
|
-
|
16
|
+
enqueued_count = Delayed::Job.count
|
17
|
+
pending_count = Delayed::Job.where(attempts: 0, locked_at: nil).count
|
18
|
+
instrumenter.call(job, max_attempts, enqueued_count, pending_count, *args, &block)
|
15
19
|
end
|
16
20
|
end
|
17
21
|
end
|
@@ -24,7 +28,7 @@ module PrometheusExporter::Instrumentation
|
|
24
28
|
@client = client || PrometheusExporter::Client.default
|
25
29
|
end
|
26
30
|
|
27
|
-
def call(job, max_attempts, *args, &block)
|
31
|
+
def call(job, max_attempts, enqueued_count, pending_count, *args, &block)
|
28
32
|
success = false
|
29
33
|
start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
30
34
|
attempts = job.attempts + 1 # Increment because we're adding the current attempt
|
@@ -40,7 +44,9 @@ module PrometheusExporter::Instrumentation
|
|
40
44
|
success: success,
|
41
45
|
duration: duration,
|
42
46
|
attempts: attempts,
|
43
|
-
max_attempts: max_attempts
|
47
|
+
max_attempts: max_attempts,
|
48
|
+
enqueued: enqueued_count,
|
49
|
+
pending: pending_count
|
44
50
|
)
|
45
51
|
end
|
46
52
|
end
|
@@ -42,24 +42,13 @@ module PrometheusExporter::Instrumentation
|
|
42
42
|
|
43
43
|
def initialize(metric_labels)
|
44
44
|
@metric_labels = metric_labels
|
45
|
-
@hostname = nil
|
46
|
-
end
|
47
|
-
|
48
|
-
def hostname
|
49
|
-
@hostname ||=
|
50
|
-
begin
|
51
|
-
`hostname`.strip
|
52
|
-
rescue => e
|
53
|
-
STDERR.puts "Unable to lookup hostname #{e}"
|
54
|
-
"unknown-host"
|
55
|
-
end
|
56
45
|
end
|
57
46
|
|
58
47
|
def collect
|
59
48
|
metric = {}
|
60
49
|
metric[:type] = "process"
|
61
50
|
metric[:metric_labels] = @metric_labels
|
62
|
-
metric[:hostname] = hostname
|
51
|
+
metric[:hostname] = ::PrometheusExporter.hostname
|
63
52
|
collect_gc_stats(metric)
|
64
53
|
collect_v8_stats(metric)
|
65
54
|
collect_process_stats(metric)
|
@@ -1,4 +1,6 @@
|
|
1
|
-
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "json"
|
2
4
|
|
3
5
|
# collects stats from puma
|
4
6
|
module PrometheusExporter::Instrumentation
|
@@ -30,7 +32,7 @@ module PrometheusExporter::Instrumentation
|
|
30
32
|
def collect_puma_stats(metric)
|
31
33
|
stats = JSON.parse(::Puma.stats)
|
32
34
|
|
33
|
-
if stats.key?
|
35
|
+
if stats.key?("workers")
|
34
36
|
metric[:phase] = stats["phase"]
|
35
37
|
metric[:workers_total] = stats["workers"]
|
36
38
|
metric[:booted_workers_total] = stats["booted_workers"]
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module PrometheusExporter::Instrumentation
|
4
|
+
class Shoryuken
|
5
|
+
|
6
|
+
def initialize(client: nil)
|
7
|
+
@client = client || PrometheusExporter::Client.default
|
8
|
+
end
|
9
|
+
|
10
|
+
def call(worker, queue, msg, body)
|
11
|
+
success = false
|
12
|
+
start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
13
|
+
result = yield
|
14
|
+
success = true
|
15
|
+
result
|
16
|
+
rescue ::Shoryuken::Shutdown => e
|
17
|
+
shutdown = true
|
18
|
+
raise e
|
19
|
+
ensure
|
20
|
+
duration = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start
|
21
|
+
@client.send_json(
|
22
|
+
type: "shoryuken",
|
23
|
+
queue: queue,
|
24
|
+
name: worker.class.name,
|
25
|
+
success: success,
|
26
|
+
shutdown: shutdown,
|
27
|
+
duration: duration
|
28
|
+
)
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
@@ -4,11 +4,15 @@ module PrometheusExporter::Instrumentation
|
|
4
4
|
class Sidekiq
|
5
5
|
def self.death_handler
|
6
6
|
-> (job, ex) do
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
7
|
+
job_is_fire_and_forget = job["retry"] == false
|
8
|
+
|
9
|
+
unless job_is_fire_and_forget
|
10
|
+
PrometheusExporter::Client.default.send_json(
|
11
|
+
type: "sidekiq",
|
12
|
+
name: job["class"],
|
13
|
+
dead: true,
|
14
|
+
)
|
15
|
+
end
|
12
16
|
end
|
13
17
|
end
|
14
18
|
|
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
begin
|
2
4
|
require 'raindrops'
|
3
5
|
rescue LoadError
|
@@ -7,7 +9,7 @@ end
|
|
7
9
|
module PrometheusExporter::Instrumentation
|
8
10
|
# collects stats from unicorn
|
9
11
|
class Unicorn
|
10
|
-
def self.start(pid_file:, listener_address:, client
|
12
|
+
def self.start(pid_file:, listener_address:, client: nil, frequency: 30)
|
11
13
|
unicorn_collector = new(pid_file: pid_file, listener_address: listener_address)
|
12
14
|
client ||= PrometheusExporter::Client.default
|
13
15
|
Thread.new do
|
@@ -49,21 +51,21 @@ module PrometheusExporter::Instrumentation
|
|
49
51
|
|
50
52
|
def worker_process_count
|
51
53
|
return nil unless File.exist?(@pid_file)
|
52
|
-
pid = File.read(@pid_file)
|
54
|
+
pid = File.read(@pid_file).to_i
|
53
55
|
|
54
|
-
return nil
|
56
|
+
return nil if pid < 1
|
55
57
|
|
56
58
|
# find all processes whose parent is the unicorn master
|
57
59
|
# but we're actually only interested in the number of processes (= lines of output)
|
58
|
-
result = `
|
60
|
+
result = `pgrep -P #{pid} -f unicorn -a`
|
59
61
|
result.lines.count
|
60
62
|
end
|
61
63
|
|
62
64
|
def listener_address_stats
|
63
65
|
if @tcp
|
64
|
-
Raindrops::Linux.tcp_listener_stats(@listener_address)[@listener_address]
|
66
|
+
Raindrops::Linux.tcp_listener_stats([@listener_address])[@listener_address]
|
65
67
|
else
|
66
|
-
Raindrops::Linux.unix_listener_stats(@listener_address)[@listener_address]
|
68
|
+
Raindrops::Linux.unix_listener_stats([@listener_address])[@listener_address]
|
67
69
|
end
|
68
70
|
end
|
69
71
|
end
|
@@ -1,3 +1,6 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "client"
|
1
4
|
require_relative "instrumentation/process"
|
2
5
|
require_relative "instrumentation/method_profiler"
|
3
6
|
require_relative "instrumentation/sidekiq"
|
@@ -5,3 +8,5 @@ require_relative "instrumentation/delayed_job"
|
|
5
8
|
require_relative "instrumentation/puma"
|
6
9
|
require_relative "instrumentation/hutch"
|
7
10
|
require_relative "instrumentation/unicorn"
|
11
|
+
require_relative "instrumentation/active_record"
|
12
|
+
require_relative "instrumentation/shoryuken"
|
@@ -34,6 +34,14 @@ module PrometheusExporter::Metric
|
|
34
34
|
raise "Not implemented"
|
35
35
|
end
|
36
36
|
|
37
|
+
def reset!
|
38
|
+
raise "Not implemented"
|
39
|
+
end
|
40
|
+
|
41
|
+
def to_h
|
42
|
+
raise "Not implemented"
|
43
|
+
end
|
44
|
+
|
37
45
|
def from_json(json)
|
38
46
|
json = JSON.parse(json) if String === json
|
39
47
|
@name = json["name"]
|
@@ -6,19 +6,27 @@ module PrometheusExporter::Metric
|
|
6
6
|
|
7
7
|
def initialize(name, help)
|
8
8
|
super
|
9
|
-
|
9
|
+
reset!
|
10
10
|
end
|
11
11
|
|
12
12
|
def type
|
13
13
|
"counter"
|
14
14
|
end
|
15
15
|
|
16
|
+
def reset!
|
17
|
+
@data = {}
|
18
|
+
end
|
19
|
+
|
16
20
|
def metric_text
|
17
21
|
@data.map do |labels, value|
|
18
22
|
"#{prefix(@name)}#{labels_text(labels)} #{value}"
|
19
23
|
end.join("\n")
|
20
24
|
end
|
21
25
|
|
26
|
+
def to_h
|
27
|
+
@data.dup
|
28
|
+
end
|
29
|
+
|
22
30
|
def observe(increment = 1, labels = {})
|
23
31
|
@data[labels] ||= 0
|
24
32
|
@data[labels] += increment
|