prometheus_exporter 2.0.7 → 2.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/workflows/ci.yml +48 -2
- data/Appraisals +6 -2
- data/CHANGELOG +10 -0
- data/Dockerfile +9 -0
- data/README.md +55 -6
- data/gemfiles/ar_70.gemfile +5 -0
- data/lib/prometheus_exporter/client.rb +34 -19
- data/lib/prometheus_exporter/instrumentation/delayed_job.rb +2 -0
- data/lib/prometheus_exporter/instrumentation/good_job.rb +30 -0
- data/lib/prometheus_exporter/instrumentation/sidekiq.rb +16 -14
- data/lib/prometheus_exporter/instrumentation.rb +1 -0
- data/lib/prometheus_exporter/metric/histogram.rb +1 -1
- data/lib/prometheus_exporter/server/active_record_collector.rb +9 -13
- data/lib/prometheus_exporter/server/collector.rb +1 -0
- data/lib/prometheus_exporter/server/delayed_job_collector.rb +7 -1
- data/lib/prometheus_exporter/server/good_job_collector.rb +52 -0
- data/lib/prometheus_exporter/server/process_collector.rb +8 -13
- data/lib/prometheus_exporter/server/puma_collector.rb +1 -1
- data/lib/prometheus_exporter/server/resque_collector.rb +3 -7
- data/lib/prometheus_exporter/server/sidekiq_process_collector.rb +2 -2
- data/lib/prometheus_exporter/server/sidekiq_queue_collector.rb +2 -2
- data/lib/prometheus_exporter/server/sidekiq_stats_collector.rb +2 -2
- data/lib/prometheus_exporter/server/unicorn_collector.rb +32 -33
- data/lib/prometheus_exporter/server/web_server.rb +65 -39
- data/lib/prometheus_exporter/server.rb +1 -0
- data/lib/prometheus_exporter/version.rb +1 -1
- data/lib/prometheus_exporter.rb +12 -13
- data/prometheus_exporter.gemspec +0 -2
- metadata +7 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: '042907307e8e1555111f34fb69d6cf66127e7742a76130952e3708a742ada24a'
|
4
|
+
data.tar.gz: 1e474618e9ddfa870a47e3ba7d7e55935677f3770d73968bb60457474b49502d
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: '0137930945e0b254dbf0d7245d696d782044bfee9dd255ff022ffadaddafb47cdefc1aa6f83927bc84866d87fee761027590c2f08fbe730496e7099e68a883b7'
|
7
|
+
data.tar.gz: 562691a91a7f4a0e380b92236f3295177481763393db7b0db9d8763f6ac631d317e6bd21bd3368471029742b8e649a1c2a3884fa2aafe93d280aa353909d9ecd
|
data/.github/workflows/ci.yml
CHANGED
@@ -8,6 +8,13 @@ on:
|
|
8
8
|
schedule:
|
9
9
|
- cron: "0 0 * * 0" # weekly
|
10
10
|
|
11
|
+
permissions:
|
12
|
+
contents: write
|
13
|
+
packages: write
|
14
|
+
|
15
|
+
env:
|
16
|
+
DOCKER_REPO: ghcr.io/discourse/prometheus_exporter
|
17
|
+
|
11
18
|
jobs:
|
12
19
|
build:
|
13
20
|
runs-on: ubuntu-latest
|
@@ -20,8 +27,8 @@ jobs:
|
|
20
27
|
strategy:
|
21
28
|
fail-fast: false
|
22
29
|
matrix:
|
23
|
-
ruby: ['
|
24
|
-
activerecord: [60, 61]
|
30
|
+
ruby: ['3.0', '3.1', '3.2', '3.3']
|
31
|
+
activerecord: [60, 61, 70]
|
25
32
|
|
26
33
|
steps:
|
27
34
|
- uses: actions/checkout@v2
|
@@ -42,12 +49,51 @@ jobs:
|
|
42
49
|
needs: build
|
43
50
|
runs-on: ubuntu-latest
|
44
51
|
|
52
|
+
outputs:
|
53
|
+
new_version_published: ${{ steps.release.outputs.new_version_published }}
|
54
|
+
|
45
55
|
steps:
|
46
56
|
- uses: actions/checkout@v2
|
47
57
|
|
48
58
|
- name: Release gem
|
59
|
+
id: release
|
49
60
|
uses: discourse/publish-rubygems-action@v2
|
50
61
|
env:
|
51
62
|
RUBYGEMS_API_KEY: ${{ secrets.RUBYGEMS_API_KEY }}
|
52
63
|
GIT_EMAIL: team@discourse.org
|
53
64
|
GIT_NAME: discoursebot
|
65
|
+
|
66
|
+
publish_docker:
|
67
|
+
needs: publish
|
68
|
+
if: needs.publish.outputs.new_version_published == 'true'
|
69
|
+
runs-on: ubuntu-latest
|
70
|
+
timeout-minutes: 20
|
71
|
+
|
72
|
+
steps:
|
73
|
+
- uses: actions/checkout@v3
|
74
|
+
- uses: docker/setup-qemu-action@v2
|
75
|
+
- uses: docker/setup-buildx-action@v2
|
76
|
+
|
77
|
+
- name: Set vars
|
78
|
+
id: vars
|
79
|
+
run: |
|
80
|
+
ruby -r ./lib/prometheus_exporter/version.rb -e 'print "version=#{PrometheusExporter::VERSION}"' >> $GITHUB_OUTPUT
|
81
|
+
|
82
|
+
- name: Login to Github Container Registry
|
83
|
+
uses: docker/login-action@v2
|
84
|
+
with:
|
85
|
+
registry: ghcr.io
|
86
|
+
username: ${{ github.actor }}
|
87
|
+
password: ${{ secrets.GITHUB_TOKEN }}
|
88
|
+
|
89
|
+
- name: Build and push images
|
90
|
+
uses: docker/build-push-action@v3
|
91
|
+
with:
|
92
|
+
context: .
|
93
|
+
push: true
|
94
|
+
platforms: linux/amd64,linux/arm64
|
95
|
+
build-args: |
|
96
|
+
GEM_VERSION=${{ steps.vars.outputs.version }}
|
97
|
+
tags: |
|
98
|
+
${{ env.DOCKER_REPO }}:${{ steps.vars.outputs.version }}
|
99
|
+
${{ env.DOCKER_REPO }}:latest
|
data/Appraisals
CHANGED
@@ -1,10 +1,14 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
appraise "ar-60" do
|
4
|
-
|
5
|
-
# gem "activerecord", "~> 6.0.0"
|
4
|
+
gem "activerecord", "~> 6.0.0"
|
6
5
|
end
|
7
6
|
|
8
7
|
appraise "ar-61" do
|
9
8
|
gem "activerecord", "~> 6.1.1"
|
10
9
|
end
|
10
|
+
|
11
|
+
appraise "ar-70" do
|
12
|
+
# latest version
|
13
|
+
gem "activerecord", "~> 7.1.2"
|
14
|
+
end
|
data/CHANGELOG
CHANGED
@@ -1,3 +1,13 @@
|
|
1
|
+
2.1.0 - 2024-08-01
|
2
|
+
|
3
|
+
- FEATURE: good_job instrumentation
|
4
|
+
- PERF: improve performance of histogram
|
5
|
+
- DEV: use new metric collector pattern so we reuse code between collectors
|
6
|
+
|
7
|
+
2.0.8 - 2023-01-20
|
8
|
+
|
9
|
+
- FEATURE: attempting to make our first docker release
|
10
|
+
|
1
11
|
2.0.7 - 2023-01-13
|
2
12
|
|
3
13
|
- FEATURE: allow binding server to both ipv4 and v6
|
data/Dockerfile
ADDED
data/README.md
CHANGED
@@ -21,6 +21,7 @@ To learn more see [Instrumenting Rails with Prometheus](https://samsaffron.com/a
|
|
21
21
|
* [Puma metrics](#puma-metrics)
|
22
22
|
* [Unicorn metrics](#unicorn-process-metrics)
|
23
23
|
* [Resque metrics](#resque-metrics)
|
24
|
+
* [GoodJob metrics](#goodjob-metrics)
|
24
25
|
* [Custom type collectors](#custom-type-collectors)
|
25
26
|
* [Multi process mode with custom collector](#multi-process-mode-with-custom-collector)
|
26
27
|
* [GraphQL support](#graphql-support)
|
@@ -28,10 +29,11 @@ To learn more see [Instrumenting Rails with Prometheus](https://samsaffron.com/a
|
|
28
29
|
* [Client default labels](#client-default-labels)
|
29
30
|
* [Client default host](#client-default-host)
|
30
31
|
* [Histogram mode](#histogram-mode)
|
31
|
-
* [Histogram - custom buckets](#histogram
|
32
|
+
* [Histogram - custom buckets](#histogram---custom-buckets)
|
32
33
|
* [Transport concerns](#transport-concerns)
|
33
34
|
* [JSON generation and parsing](#json-generation-and-parsing)
|
34
35
|
* [Logging](#logging)
|
36
|
+
* [Docker Usage](#docker-usage)
|
35
37
|
* [Contributing](#contributing)
|
36
38
|
* [License](#license)
|
37
39
|
* [Code of Conduct](#code-of-conduct)
|
@@ -187,7 +189,7 @@ gem 'prometheus_exporter'
|
|
187
189
|
In an initializer:
|
188
190
|
|
189
191
|
```ruby
|
190
|
-
unless Rails.env
|
192
|
+
unless Rails.env.test?
|
191
193
|
require 'prometheus_exporter/middleware'
|
192
194
|
|
193
195
|
# This reports stats per request like HTTP status and timings
|
@@ -340,7 +342,7 @@ You may also be interested in per-process stats. This collects memory and GC sta
|
|
340
342
|
|
341
343
|
```ruby
|
342
344
|
# in an initializer
|
343
|
-
unless Rails.env
|
345
|
+
unless Rails.env.test?
|
344
346
|
require 'prometheus_exporter/instrumentation'
|
345
347
|
|
346
348
|
# this reports basic process stats like RSS and GC info
|
@@ -521,7 +523,7 @@ All metrics have labels for `job_name` and `queue_name`.
|
|
521
523
|
In an initializer:
|
522
524
|
|
523
525
|
```ruby
|
524
|
-
unless Rails.env
|
526
|
+
unless Rails.env.test?
|
525
527
|
require 'prometheus_exporter/instrumentation'
|
526
528
|
PrometheusExporter::Instrumentation::DelayedJob.register_plugin
|
527
529
|
end
|
@@ -532,6 +534,7 @@ end
|
|
532
534
|
| Type | Name | Description | Labels |
|
533
535
|
| --- | --- | --- | --- |
|
534
536
|
| Counter | `delayed_job_duration_seconds` | Total time spent in delayed jobs | `job_name` |
|
537
|
+
| Counter | `delayed_job_latency_seconds_total` | Total delayed jobs latency | `job_name` |
|
535
538
|
| Counter | `delayed_jobs_total` | Total number of delayed jobs executed | `job_name` |
|
536
539
|
| Gauge | `delayed_jobs_enqueued` | Number of enqueued delayed jobs | - |
|
537
540
|
| Gauge | `delayed_jobs_pending` | Number of pending delayed jobs | - |
|
@@ -541,13 +544,14 @@ end
|
|
541
544
|
| Summary | `delayed_job_attempts_summary` | Summary of the amount of attempts it takes delayed jobs to succeed | - |
|
542
545
|
|
543
546
|
All metrics have labels for `job_name` and `queue_name`.
|
547
|
+
`delayed_job_latency_seconds_total` is considering delayed job's [sleep_delay](https://github.com/collectiveidea/delayed_job#:~:text=If%20no%20jobs%20are%20found%2C%20the%20worker%20sleeps%20for%20the%20amount%20of%20time%20specified%20by%20the%20sleep%20delay%20option.%20Set%20Delayed%3A%3AWorker.sleep_delay%20%3D%2060%20for%20a%2060%20second%20sleep%20time.) parameter, so please be aware of this in case you are looking for high latency precision.
|
544
548
|
|
545
549
|
#### Hutch Message Processing Tracer
|
546
550
|
|
547
551
|
Capture [Hutch](https://github.com/gocardless/hutch) metrics (how many jobs ran? how many failed? how long did they take?)
|
548
552
|
|
549
553
|
```ruby
|
550
|
-
unless Rails.env
|
554
|
+
unless Rails.env.test?
|
551
555
|
require 'prometheus_exporter/instrumentation'
|
552
556
|
Hutch::Config.set(:tracer, PrometheusExporter::Instrumentation::Hutch)
|
553
557
|
end
|
@@ -569,7 +573,7 @@ Request Queueing is defined as the time it takes for a request to reach your app
|
|
569
573
|
|
570
574
|
As this metric starts before `prometheus_exporter` can handle the request, you must add a specific HTTP header as early in your infrastructure as possible (we recommend your load balancer or reverse proxy).
|
571
575
|
|
572
|
-
The Amazon Application Load Balancer [request tracing header](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-request-tracing.html) is natively supported. If you are using another upstream entrypoint, you may configure your HTTP server / load balancer to add a header `X-Request-Start: t=<MSEC>` when passing the request upstream. For more information, please consult your software manual.
|
576
|
+
The Amazon Application Load Balancer [request tracing header](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-request-tracing.html) is natively supported. If you are using another upstream entrypoint, you may configure your HTTP server / load balancer to add a header `X-Request-Start: t=<MSEC>` when passing the request upstream. Please keep in mind request time start is reported as epoch time (in seconds) and lacks precision, which may introduce additional latency in reported metrics. For more information, please consult your software manual.
|
573
577
|
|
574
578
|
Hint: we aim to be API-compatible with the big APM solutions, so if you've got requests queueing time configured for them, it should be expected to also work with `prometheus_exporter`.
|
575
579
|
|
@@ -626,6 +630,29 @@ PrometheusExporter::Instrumentation::Resque.start
|
|
626
630
|
| Gauge | `resque_workers` | Total number of Resque workers running |
|
627
631
|
| Gauge | `resque_working` | Total number of Resque workers working |
|
628
632
|
|
633
|
+
### GoodJob metrics
|
634
|
+
|
635
|
+
The metrics are generated from the database using the relevant scopes. To start monitoring your GoodJob
|
636
|
+
installation, you'll need to start the instrumentation:
|
637
|
+
|
638
|
+
```ruby
|
639
|
+
# e.g. config/initializers/good_job.rb
|
640
|
+
require 'prometheus_exporter/instrumentation'
|
641
|
+
PrometheusExporter::Instrumentation::GoodJob.start
|
642
|
+
```
|
643
|
+
|
644
|
+
#### Metrics collected by GoodJob Instrumentation
|
645
|
+
|
646
|
+
| Type | Name | Description |
|
647
|
+
| --- |----------------------|-----------------------------------------|
|
648
|
+
| Gauge | `good_job_scheduled` | Total number of scheduled GoodJob jobs. |
|
649
|
+
| Gauge | `good_job_retried` | Total number of retried GoodJob jobs. |
|
650
|
+
| Gauge | `good_job_queued` | Total number of queued GoodJob jobs. |
|
651
|
+
| Gauge | `good_job_running` | Total number of running GoodJob jobs. |
|
652
|
+
| Gauge | `good_job_finished` | Total number of finished GoodJob jobs. |
|
653
|
+
| Gauge | `good_job_succeeded` | Total number of succeeded GoodJob jobs. |
|
654
|
+
| Gauge | `good_job_discarded` | Total number of discarded GoodJob jobs |
|
655
|
+
|
629
656
|
### Unicorn process metrics
|
630
657
|
|
631
658
|
In order to gather metrics from unicorn processes, we use `rainbows`, which exposes `Rainbows::Linux.tcp_listener_stats` to gather information about active workers and queued requests. To start monitoring your unicorn processes, you'll need to know both the path to unicorn PID file and the listen address (`pid_file` and `listen` in your unicorn config file)
|
@@ -963,6 +990,28 @@ You can also pass a log level (default is [`Logger::WARN`](https://ruby-doc.org/
|
|
963
990
|
PrometheusExporter::Client.new(log_level: Logger::DEBUG)
|
964
991
|
```
|
965
992
|
|
993
|
+
## Docker Usage
|
994
|
+
|
995
|
+
You can run `prometheus_exporter` project using an official Docker image:
|
996
|
+
|
997
|
+
```bash
|
998
|
+
docker pull discourse/prometheus_exporter:latest
|
999
|
+
# or use specific version
|
1000
|
+
docker pull discourse/prometheus_exporter:x.x.x
|
1001
|
+
```
|
1002
|
+
|
1003
|
+
The start the container:
|
1004
|
+
|
1005
|
+
```bash
|
1006
|
+
docker run -p 9394:9394 discourse/prometheus_exporter
|
1007
|
+
```
|
1008
|
+
|
1009
|
+
Additional flags could be included:
|
1010
|
+
|
1011
|
+
```
|
1012
|
+
docker run -p 9394:9394 discourse/prometheus_exporter --verbose --prefix=myapp
|
1013
|
+
```
|
1014
|
+
|
966
1015
|
## Docker/Kubernetes Healthcheck
|
967
1016
|
|
968
1017
|
A `/ping` endpoint which only returns `PONG` is available so you can run container healthchecks :
|
@@ -1,8 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require
|
4
|
-
require
|
5
|
-
require 'logger'
|
3
|
+
require "socket"
|
4
|
+
require "logger"
|
6
5
|
|
7
6
|
module PrometheusExporter
|
8
7
|
class Client
|
@@ -25,7 +24,9 @@ module PrometheusExporter
|
|
25
24
|
keys: keys,
|
26
25
|
value: value
|
27
26
|
}
|
28
|
-
values[
|
27
|
+
values[
|
28
|
+
:prometheus_exporter_action
|
29
|
+
] = prometheus_exporter_action if prometheus_exporter_action
|
29
30
|
values[:opts] = @opts if @opts
|
30
31
|
values
|
31
32
|
end
|
@@ -57,8 +58,11 @@ module PrometheusExporter
|
|
57
58
|
attr_reader :logger
|
58
59
|
|
59
60
|
def initialize(
|
60
|
-
host: ENV.fetch(
|
61
|
-
port: ENV.fetch(
|
61
|
+
host: ENV.fetch("PROMETHEUS_EXPORTER_HOST", "localhost"),
|
62
|
+
port: ENV.fetch(
|
63
|
+
"PROMETHEUS_EXPORTER_PORT",
|
64
|
+
PrometheusExporter::DEFAULT_PORT
|
65
|
+
),
|
62
66
|
max_queue_size: nil,
|
63
67
|
thread_sleep: 0.5,
|
64
68
|
json_serializer: nil,
|
@@ -90,7 +94,8 @@ module PrometheusExporter
|
|
90
94
|
@mutex = Mutex.new
|
91
95
|
@thread_sleep = thread_sleep
|
92
96
|
|
93
|
-
@json_serializer =
|
97
|
+
@json_serializer =
|
98
|
+
json_serializer == :oj ? PrometheusExporter::OjCompat : JSON
|
94
99
|
|
95
100
|
@custom_labels = custom_labels
|
96
101
|
end
|
@@ -100,7 +105,14 @@ module PrometheusExporter
|
|
100
105
|
end
|
101
106
|
|
102
107
|
def register(type, name, help, opts = nil)
|
103
|
-
metric =
|
108
|
+
metric =
|
109
|
+
RemoteMetric.new(
|
110
|
+
type: type,
|
111
|
+
name: name,
|
112
|
+
help: help,
|
113
|
+
client: self,
|
114
|
+
opts: opts
|
115
|
+
)
|
104
116
|
@metrics << metric
|
105
117
|
metric
|
106
118
|
end
|
@@ -161,9 +173,7 @@ module PrometheusExporter
|
|
161
173
|
@mutex.synchronize do
|
162
174
|
wait_for_empty_queue_with_timeout(wait_timeout_seconds)
|
163
175
|
@worker_thread&.kill
|
164
|
-
while @worker_thread&.alive?
|
165
|
-
sleep 0.001
|
166
|
-
end
|
176
|
+
sleep 0.001 while @worker_thread&.alive?
|
167
177
|
@worker_thread = nil
|
168
178
|
close_socket!
|
169
179
|
end
|
@@ -183,12 +193,13 @@ module PrometheusExporter
|
|
183
193
|
@mutex.synchronize do
|
184
194
|
return if @worker_thread&.alive?
|
185
195
|
|
186
|
-
@worker_thread =
|
187
|
-
|
188
|
-
|
189
|
-
|
196
|
+
@worker_thread =
|
197
|
+
Thread.new do
|
198
|
+
while true
|
199
|
+
worker_loop
|
200
|
+
sleep @thread_sleep
|
201
|
+
end
|
190
202
|
end
|
191
|
-
end
|
192
203
|
end
|
193
204
|
end
|
194
205
|
rescue ThreadError => e
|
@@ -212,7 +223,8 @@ module PrometheusExporter
|
|
212
223
|
end
|
213
224
|
|
214
225
|
def close_socket_if_old!
|
215
|
-
if @socket_pid == Process.pid && @socket && @socket_started &&
|
226
|
+
if @socket_pid == Process.pid && @socket && @socket_started &&
|
227
|
+
((@socket_started + MAX_SOCKET_AGE) < Time.now.to_f)
|
216
228
|
close_socket!
|
217
229
|
end
|
218
230
|
end
|
@@ -240,7 +252,7 @@ module PrometheusExporter
|
|
240
252
|
end
|
241
253
|
|
242
254
|
nil
|
243
|
-
rescue
|
255
|
+
rescue StandardError
|
244
256
|
@socket = nil
|
245
257
|
@socket_started = nil
|
246
258
|
@socket_pid = nil
|
@@ -250,7 +262,10 @@ module PrometheusExporter
|
|
250
262
|
def wait_for_empty_queue_with_timeout(timeout_seconds)
|
251
263
|
start_time = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
252
264
|
while @queue.length > 0
|
253
|
-
|
265
|
+
if start_time + timeout_seconds <
|
266
|
+
::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
267
|
+
break
|
268
|
+
end
|
254
269
|
sleep(0.05)
|
255
270
|
end
|
256
271
|
end
|
@@ -31,6 +31,7 @@ module PrometheusExporter::Instrumentation
|
|
31
31
|
def call(job, max_attempts, enqueued_count, pending_count, *args, &block)
|
32
32
|
success = false
|
33
33
|
start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
34
|
+
latency = Time.current - job.run_at
|
34
35
|
attempts = job.attempts + 1 # Increment because we're adding the current attempt
|
35
36
|
result = block.call(job, *args)
|
36
37
|
success = true
|
@@ -44,6 +45,7 @@ module PrometheusExporter::Instrumentation
|
|
44
45
|
queue_name: job.queue,
|
45
46
|
success: success,
|
46
47
|
duration: duration,
|
48
|
+
latency: latency,
|
47
49
|
attempts: attempts,
|
48
50
|
max_attempts: max_attempts,
|
49
51
|
enqueued: enqueued_count,
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# collects stats from GoodJob
|
4
|
+
module PrometheusExporter::Instrumentation
|
5
|
+
class GoodJob < PeriodicStats
|
6
|
+
def self.start(client: nil, frequency: 30)
|
7
|
+
good_job_collector = new
|
8
|
+
client ||= PrometheusExporter::Client.default
|
9
|
+
|
10
|
+
worker_loop do
|
11
|
+
client.send_json(good_job_collector.collect)
|
12
|
+
end
|
13
|
+
|
14
|
+
super
|
15
|
+
end
|
16
|
+
|
17
|
+
def collect
|
18
|
+
{
|
19
|
+
type: "good_job",
|
20
|
+
scheduled: ::GoodJob::Job.scheduled.size,
|
21
|
+
retried: ::GoodJob::Job.retried.size,
|
22
|
+
queued: ::GoodJob::Job.queued.size,
|
23
|
+
running: ::GoodJob::Job.running.size,
|
24
|
+
finished: ::GoodJob::Job.finished.size,
|
25
|
+
succeeded: ::GoodJob::Job.succeeded.size,
|
26
|
+
discarded: ::GoodJob::Job.discarded.size
|
27
|
+
}
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
@@ -1,18 +1,19 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require
|
3
|
+
require "yaml"
|
4
4
|
|
5
5
|
module PrometheusExporter::Instrumentation
|
6
|
-
JOB_WRAPPER_CLASS_NAME =
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
6
|
+
JOB_WRAPPER_CLASS_NAME =
|
7
|
+
"ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
|
8
|
+
DELAYED_CLASS_NAMES = %w[
|
9
|
+
Sidekiq::Extensions::DelayedClass
|
10
|
+
Sidekiq::Extensions::DelayedModel
|
11
|
+
Sidekiq::Extensions::DelayedMailer
|
11
12
|
]
|
12
13
|
|
13
14
|
class Sidekiq
|
14
15
|
def self.death_handler
|
15
|
-
->
|
16
|
+
->(job, ex) do
|
16
17
|
job_is_fire_and_forget = job["retry"] == false
|
17
18
|
|
18
19
|
worker_class = Object.const_get(job["class"])
|
@@ -43,7 +44,8 @@ module PrometheusExporter::Instrumentation
|
|
43
44
|
end
|
44
45
|
|
45
46
|
def initialize(options = { client: nil })
|
46
|
-
@client =
|
47
|
+
@client =
|
48
|
+
options.fetch(:client, nil) || PrometheusExporter::Client.default
|
47
49
|
end
|
48
50
|
|
49
51
|
def call(worker, msg, queue)
|
@@ -82,7 +84,7 @@ module PrometheusExporter::Instrumentation
|
|
82
84
|
end
|
83
85
|
|
84
86
|
def self.get_job_wrapper_name(msg)
|
85
|
-
msg[
|
87
|
+
msg["wrapped"]
|
86
88
|
end
|
87
89
|
|
88
90
|
def self.get_delayed_name(msg, class_name)
|
@@ -90,17 +92,17 @@ module PrometheusExporter::Instrumentation
|
|
90
92
|
# fallback to class_name since we're relying on the internal implementation
|
91
93
|
# of the delayed extensions
|
92
94
|
# https://github.com/mperham/sidekiq/blob/master/lib/sidekiq/extensions/class_methods.rb
|
93
|
-
|
95
|
+
target, method_name, _args = YAML.load(msg["args"].first)
|
94
96
|
if target.class == Class
|
95
97
|
"#{target.name}##{method_name}"
|
96
98
|
else
|
97
99
|
"#{target.class.name}##{method_name}"
|
98
100
|
end
|
99
101
|
rescue Psych::DisallowedClass, ArgumentError
|
100
|
-
parsed = Psych.parse(msg[
|
102
|
+
parsed = Psych.parse(msg["args"].first)
|
101
103
|
children = parsed.root.children
|
102
|
-
target = (children[0].value || children[0].tag).sub(
|
103
|
-
method_name = (children[1].value || children[1].tag).sub(
|
104
|
+
target = (children[0].value || children[0].tag).sub("!", "")
|
105
|
+
method_name = (children[1].value || children[1].tag).sub(":", "")
|
104
106
|
|
105
107
|
if target && method_name
|
106
108
|
"#{target}##{method_name}"
|
@@ -108,7 +110,7 @@ module PrometheusExporter::Instrumentation
|
|
108
110
|
class_name
|
109
111
|
end
|
110
112
|
end
|
111
|
-
rescue
|
113
|
+
rescue StandardError
|
112
114
|
class_name
|
113
115
|
end
|
114
116
|
end
|
@@ -2,7 +2,8 @@
|
|
2
2
|
|
3
3
|
module PrometheusExporter::Server
|
4
4
|
class ActiveRecordCollector < TypeCollector
|
5
|
-
|
5
|
+
MAX_METRIC_AGE = 60
|
6
|
+
|
6
7
|
ACTIVE_RECORD_GAUGES = {
|
7
8
|
connections: "Total connections in pool",
|
8
9
|
busy: "Connections in use in pool",
|
@@ -13,7 +14,12 @@ module PrometheusExporter::Server
|
|
13
14
|
}
|
14
15
|
|
15
16
|
def initialize
|
16
|
-
@active_record_metrics =
|
17
|
+
@active_record_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
|
18
|
+
@active_record_metrics.filter = -> (new_metric, old_metric) do
|
19
|
+
new_metric["pid"] == old_metric["pid"] &&
|
20
|
+
new_metric["hostname"] == old_metric["hostname"] &&
|
21
|
+
new_metric["metric_labels"]["pool_name"] == old_metric["metric_labels"]["pool_name"]
|
22
|
+
end
|
17
23
|
end
|
18
24
|
|
19
25
|
def type
|
@@ -26,7 +32,7 @@ module PrometheusExporter::Server
|
|
26
32
|
metrics = {}
|
27
33
|
|
28
34
|
@active_record_metrics.map do |m|
|
29
|
-
metric_key = (m["metric_labels"] || {}).merge("pid" => m["pid"])
|
35
|
+
metric_key = (m["metric_labels"] || {}).merge("pid" => m["pid"], "hostname" => m["hostname"])
|
30
36
|
metric_key.merge!(m["custom_labels"]) if m["custom_labels"]
|
31
37
|
|
32
38
|
ACTIVE_RECORD_GAUGES.map do |k, help|
|
@@ -42,16 +48,6 @@ module PrometheusExporter::Server
|
|
42
48
|
end
|
43
49
|
|
44
50
|
def collect(obj)
|
45
|
-
now = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
46
|
-
|
47
|
-
obj["created_at"] = now
|
48
|
-
|
49
|
-
@active_record_metrics.delete_if do |current|
|
50
|
-
(obj["pid"] == current["pid"] && obj["hostname"] == current["hostname"] &&
|
51
|
-
obj["metric_labels"]["pool_name"] == current["metric_labels"]["pool_name"]) ||
|
52
|
-
(current["created_at"] + MAX_ACTIVERECORD_METRIC_AGE < now)
|
53
|
-
end
|
54
|
-
|
55
51
|
@active_record_metrics << obj
|
56
52
|
end
|
57
53
|
end
|
@@ -23,6 +23,7 @@ module PrometheusExporter::Server
|
|
23
23
|
register_collector(ActiveRecordCollector.new)
|
24
24
|
register_collector(ShoryukenCollector.new)
|
25
25
|
register_collector(ResqueCollector.new)
|
26
|
+
register_collector(GoodJobCollector.new)
|
26
27
|
end
|
27
28
|
|
28
29
|
def register_collector(collector)
|
@@ -5,6 +5,7 @@ module PrometheusExporter::Server
|
|
5
5
|
def initialize
|
6
6
|
@delayed_jobs_total = nil
|
7
7
|
@delayed_job_duration_seconds = nil
|
8
|
+
@delayed_job_latency_seconds_total = nil
|
8
9
|
@delayed_jobs_total = nil
|
9
10
|
@delayed_failed_jobs_total = nil
|
10
11
|
@delayed_jobs_max_attempts_reached_total = nil
|
@@ -25,6 +26,7 @@ module PrometheusExporter::Server
|
|
25
26
|
|
26
27
|
ensure_delayed_job_metrics
|
27
28
|
@delayed_job_duration_seconds.observe(obj["duration"], counter_labels)
|
29
|
+
@delayed_job_latency_seconds_total.observe(obj["latency"], counter_labels)
|
28
30
|
@delayed_jobs_total.observe(1, counter_labels)
|
29
31
|
@delayed_failed_jobs_total.observe(1, counter_labels) if !obj["success"]
|
30
32
|
@delayed_jobs_max_attempts_reached_total.observe(1, counter_labels) if obj["attempts"] >= obj["max_attempts"]
|
@@ -38,7 +40,7 @@ module PrometheusExporter::Server
|
|
38
40
|
|
39
41
|
def metrics
|
40
42
|
if @delayed_jobs_total
|
41
|
-
[@delayed_job_duration_seconds, @delayed_jobs_total, @delayed_failed_jobs_total,
|
43
|
+
[@delayed_job_duration_seconds, @delayed_job_latency_seconds_total, @delayed_jobs_total, @delayed_failed_jobs_total,
|
42
44
|
@delayed_jobs_max_attempts_reached_total, @delayed_job_duration_seconds_summary, @delayed_job_attempts_summary,
|
43
45
|
@delayed_jobs_enqueued, @delayed_jobs_pending]
|
44
46
|
else
|
@@ -55,6 +57,10 @@ module PrometheusExporter::Server
|
|
55
57
|
PrometheusExporter::Metric::Counter.new(
|
56
58
|
"delayed_job_duration_seconds", "Total time spent in delayed jobs.")
|
57
59
|
|
60
|
+
@delayed_job_latency_seconds_total =
|
61
|
+
PrometheusExporter::Metric::Counter.new(
|
62
|
+
"delayed_job_latency_seconds_total", "Total delayed jobs latency.")
|
63
|
+
|
58
64
|
@delayed_jobs_total =
|
59
65
|
PrometheusExporter::Metric::Counter.new(
|
60
66
|
"delayed_jobs_total", "Total number of delayed jobs executed.")
|
@@ -0,0 +1,52 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module PrometheusExporter::Server
|
4
|
+
class GoodJobCollector < TypeCollector
|
5
|
+
MAX_METRIC_AGE = 30
|
6
|
+
GOOD_JOB_GAUGES = {
|
7
|
+
scheduled: "Total number of scheduled GoodJob jobs.",
|
8
|
+
retried: "Total number of retried GoodJob jobs.",
|
9
|
+
queued: "Total number of queued GoodJob jobs.",
|
10
|
+
running: "Total number of running GoodJob jobs.",
|
11
|
+
finished: "Total number of finished GoodJob jobs.",
|
12
|
+
succeeded: "Total number of succeeded GoodJob jobs.",
|
13
|
+
discarded: "Total number of discarded GoodJob jobs."
|
14
|
+
}
|
15
|
+
|
16
|
+
def initialize
|
17
|
+
@good_job_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
|
18
|
+
@gauges = {}
|
19
|
+
end
|
20
|
+
|
21
|
+
def type
|
22
|
+
"good_job"
|
23
|
+
end
|
24
|
+
|
25
|
+
def metrics
|
26
|
+
return [] if good_job_metrics.length == 0
|
27
|
+
|
28
|
+
good_job_metrics.map do |metric|
|
29
|
+
labels = metric.fetch("custom_labels", {})
|
30
|
+
|
31
|
+
GOOD_JOB_GAUGES.map do |name, help|
|
32
|
+
value = metric[name.to_s]
|
33
|
+
|
34
|
+
if value
|
35
|
+
gauge = gauges[name] ||= PrometheusExporter::Metric::Gauge.new("good_job_#{name}", help)
|
36
|
+
gauge.observe(value, labels)
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
gauges.values
|
42
|
+
end
|
43
|
+
|
44
|
+
def collect(object)
|
45
|
+
@good_job_metrics << object
|
46
|
+
end
|
47
|
+
|
48
|
+
private
|
49
|
+
|
50
|
+
attr_reader :good_job_metrics, :gauges
|
51
|
+
end
|
52
|
+
end
|
@@ -3,7 +3,8 @@
|
|
3
3
|
module PrometheusExporter::Server
|
4
4
|
|
5
5
|
class ProcessCollector < TypeCollector
|
6
|
-
|
6
|
+
MAX_METRIC_AGE = 60
|
7
|
+
|
7
8
|
PROCESS_GAUGES = {
|
8
9
|
heap_free_slots: "Free ruby heap slots.",
|
9
10
|
heap_live_slots: "Used ruby heap slots.",
|
@@ -21,7 +22,10 @@ module PrometheusExporter::Server
|
|
21
22
|
}
|
22
23
|
|
23
24
|
def initialize
|
24
|
-
@process_metrics =
|
25
|
+
@process_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
|
26
|
+
@process_metrics.filter = -> (new_metric, old_metric) do
|
27
|
+
new_metric["pid"] == old_metric["pid"] && new_metric["hostname"] == old_metric["hostname"]
|
28
|
+
end
|
25
29
|
end
|
26
30
|
|
27
31
|
def type
|
@@ -34,8 +38,8 @@ module PrometheusExporter::Server
|
|
34
38
|
metrics = {}
|
35
39
|
|
36
40
|
@process_metrics.map do |m|
|
37
|
-
metric_key = m["metric_labels"].merge("pid" => m["pid"])
|
38
|
-
metric_key.merge!(m["custom_labels"]
|
41
|
+
metric_key = (m["metric_labels"] || {}).merge("pid" => m["pid"], "hostname" => m["hostname"])
|
42
|
+
metric_key.merge!(m["custom_labels"]) if m["custom_labels"]
|
39
43
|
|
40
44
|
PROCESS_GAUGES.map do |k, help|
|
41
45
|
k = k.to_s
|
@@ -58,15 +62,6 @@ module PrometheusExporter::Server
|
|
58
62
|
end
|
59
63
|
|
60
64
|
def collect(obj)
|
61
|
-
now = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
62
|
-
|
63
|
-
obj["created_at"] = now
|
64
|
-
|
65
|
-
@process_metrics.delete_if do |current|
|
66
|
-
(obj["pid"] == current["pid"] && obj["hostname"] == current["hostname"]) ||
|
67
|
-
(current["created_at"] + MAX_PROCESS_METRIC_AGE < now)
|
68
|
-
end
|
69
|
-
|
70
65
|
@process_metrics << obj
|
71
66
|
end
|
72
67
|
end
|
@@ -14,7 +14,7 @@ module PrometheusExporter::Server
|
|
14
14
|
}
|
15
15
|
|
16
16
|
def initialize
|
17
|
-
@puma_metrics = MetricsContainer.new
|
17
|
+
@puma_metrics = MetricsContainer.new(ttl: MAX_PUMA_METRIC_AGE)
|
18
18
|
@puma_metrics.filter = -> (new_metric, old_metric) do
|
19
19
|
new_metric["pid"] == old_metric["pid"] && new_metric["hostname"] == old_metric["hostname"]
|
20
20
|
end
|
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
module PrometheusExporter::Server
|
4
4
|
class ResqueCollector < TypeCollector
|
5
|
-
|
5
|
+
MAX_METRIC_AGE = 30
|
6
6
|
RESQUE_GAUGES = {
|
7
7
|
processed_jobs: "Total number of processed Resque jobs.",
|
8
8
|
failed_jobs: "Total number of failed Resque jobs.",
|
@@ -13,7 +13,7 @@ module PrometheusExporter::Server
|
|
13
13
|
}
|
14
14
|
|
15
15
|
def initialize
|
16
|
-
@resque_metrics =
|
16
|
+
@resque_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
|
17
17
|
@gauges = {}
|
18
18
|
end
|
19
19
|
|
@@ -40,11 +40,7 @@ module PrometheusExporter::Server
|
|
40
40
|
end
|
41
41
|
|
42
42
|
def collect(object)
|
43
|
-
|
44
|
-
|
45
|
-
object["created_at"] = now
|
46
|
-
resque_metrics.delete_if { |metric| metric["created_at"] + MAX_RESQUE_METRIC_AGE < now }
|
47
|
-
resque_metrics << object
|
43
|
+
@resque_metrics << object
|
48
44
|
end
|
49
45
|
|
50
46
|
private
|
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
module PrometheusExporter::Server
|
4
4
|
class SidekiqProcessCollector < PrometheusExporter::Server::TypeCollector
|
5
|
-
|
5
|
+
MAX_METRIC_AGE = 60
|
6
6
|
|
7
7
|
SIDEKIQ_PROCESS_GAUGES = {
|
8
8
|
'busy' => 'Number of running jobs',
|
@@ -12,7 +12,7 @@ module PrometheusExporter::Server
|
|
12
12
|
attr_reader :sidekiq_metrics, :gauges
|
13
13
|
|
14
14
|
def initialize
|
15
|
-
@sidekiq_metrics = MetricsContainer.new(ttl:
|
15
|
+
@sidekiq_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
|
16
16
|
@gauges = {}
|
17
17
|
end
|
18
18
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
module PrometheusExporter::Server
|
3
3
|
class SidekiqQueueCollector < TypeCollector
|
4
|
-
|
4
|
+
MAX_METRIC_AGE = 60
|
5
5
|
|
6
6
|
SIDEKIQ_QUEUE_GAUGES = {
|
7
7
|
'backlog' => 'Size of the sidekiq queue.',
|
@@ -11,7 +11,7 @@ module PrometheusExporter::Server
|
|
11
11
|
attr_reader :sidekiq_metrics, :gauges
|
12
12
|
|
13
13
|
def initialize
|
14
|
-
@sidekiq_metrics = MetricsContainer.new
|
14
|
+
@sidekiq_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
|
15
15
|
@gauges = {}
|
16
16
|
end
|
17
17
|
|
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
module PrometheusExporter::Server
|
4
4
|
class SidekiqStatsCollector < TypeCollector
|
5
|
-
|
5
|
+
MAX_METRIC_AGE = 60
|
6
6
|
|
7
7
|
SIDEKIQ_STATS_GAUGES = {
|
8
8
|
'dead_size' => 'Size of dead the queue',
|
@@ -18,7 +18,7 @@ module PrometheusExporter::Server
|
|
18
18
|
attr_reader :sidekiq_metrics, :gauges
|
19
19
|
|
20
20
|
def initialize
|
21
|
-
@sidekiq_metrics = MetricsContainer.new(ttl:
|
21
|
+
@sidekiq_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
|
22
22
|
@gauges = {}
|
23
23
|
end
|
24
24
|
|
@@ -2,47 +2,46 @@
|
|
2
2
|
|
3
3
|
# custom type collector for prometheus_exporter for handling the metrics sent from
|
4
4
|
# PrometheusExporter::Instrumentation::Unicorn
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
5
|
+
module PrometheusExporter::Server
|
6
|
+
class UnicornCollector < PrometheusExporter::Server::TypeCollector
|
7
|
+
MAX_METRIC_AGE = 60
|
8
|
+
|
9
|
+
UNICORN_GAUGES = {
|
10
|
+
workers: 'Number of unicorn workers.',
|
11
|
+
active_workers: 'Number of active unicorn workers',
|
12
|
+
request_backlog: 'Number of requests waiting to be processed by a unicorn worker.'
|
13
|
+
}.freeze
|
14
|
+
|
15
|
+
def initialize
|
16
|
+
@unicorn_metrics = MetricsContainer.new(ttl: MAX_METRIC_AGE)
|
17
|
+
end
|
17
18
|
|
18
|
-
|
19
|
-
|
20
|
-
|
19
|
+
def type
|
20
|
+
'unicorn'
|
21
|
+
end
|
21
22
|
|
22
|
-
|
23
|
-
|
23
|
+
def metrics
|
24
|
+
return [] if @unicorn_metrics.length.zero?
|
24
25
|
|
25
|
-
|
26
|
+
metrics = {}
|
26
27
|
|
27
|
-
|
28
|
-
|
28
|
+
@unicorn_metrics.map do |m|
|
29
|
+
labels = m["custom_labels"] || {}
|
29
30
|
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
31
|
+
UNICORN_GAUGES.map do |k, help|
|
32
|
+
k = k.to_s
|
33
|
+
if (v = m[k])
|
34
|
+
g = metrics[k] ||= PrometheusExporter::Metric::Gauge.new("unicorn_#{k}", help)
|
35
|
+
g.observe(v, labels)
|
36
|
+
end
|
35
37
|
end
|
36
38
|
end
|
37
|
-
end
|
38
39
|
|
39
|
-
|
40
|
-
|
40
|
+
metrics.values
|
41
|
+
end
|
41
42
|
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
@unicorn_metrics.delete_if { |m| m['created_at'] + MAX_UNICORN_METRIC_AGE < now }
|
46
|
-
@unicorn_metrics << obj
|
43
|
+
def collect(obj)
|
44
|
+
@unicorn_metrics << obj
|
45
|
+
end
|
47
46
|
end
|
48
47
|
end
|
@@ -1,9 +1,9 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require
|
4
|
-
require
|
5
|
-
require
|
6
|
-
require
|
3
|
+
require "webrick"
|
4
|
+
require "timeout"
|
5
|
+
require "zlib"
|
6
|
+
require "stringio"
|
7
7
|
|
8
8
|
module PrometheusExporter::Server
|
9
9
|
class WebServer
|
@@ -18,11 +18,23 @@ module PrometheusExporter::Server
|
|
18
18
|
@auth = opts[:auth]
|
19
19
|
@realm = opts[:realm] || PrometheusExporter::DEFAULT_REALM
|
20
20
|
|
21
|
-
@metrics_total =
|
21
|
+
@metrics_total =
|
22
|
+
PrometheusExporter::Metric::Counter.new(
|
23
|
+
"collector_metrics_total",
|
24
|
+
"Total metrics processed by exporter web."
|
25
|
+
)
|
22
26
|
|
23
|
-
@sessions_total =
|
27
|
+
@sessions_total =
|
28
|
+
PrometheusExporter::Metric::Counter.new(
|
29
|
+
"collector_sessions_total",
|
30
|
+
"Total send_metric sessions processed by exporter web."
|
31
|
+
)
|
24
32
|
|
25
|
-
@bad_metrics_total =
|
33
|
+
@bad_metrics_total =
|
34
|
+
PrometheusExporter::Metric::Counter.new(
|
35
|
+
"collector_bad_metrics_total",
|
36
|
+
"Total mis-handled metrics by collector."
|
37
|
+
)
|
26
38
|
|
27
39
|
@metrics_total.observe(0)
|
28
40
|
@sessions_total.observe(0)
|
@@ -34,7 +46,7 @@ module PrometheusExporter::Server
|
|
34
46
|
if @verbose
|
35
47
|
@access_log = [
|
36
48
|
[$stderr, WEBrick::AccessLog::COMMON_LOG_FORMAT],
|
37
|
-
[$stderr, WEBrick::AccessLog::REFERER_LOG_FORMAT]
|
49
|
+
[$stderr, WEBrick::AccessLog::REFERER_LOG_FORMAT]
|
38
50
|
]
|
39
51
|
@logger = WEBrick::Log.new(log_target || $stderr)
|
40
52
|
else
|
@@ -42,23 +54,26 @@ module PrometheusExporter::Server
|
|
42
54
|
@logger = WEBrick::Log.new(log_target || "/dev/null")
|
43
55
|
end
|
44
56
|
|
45
|
-
|
57
|
+
if @verbose && @auth
|
58
|
+
@logger.info "Using Basic Authentication via #{@auth}"
|
59
|
+
end
|
46
60
|
|
47
|
-
if %w
|
61
|
+
if %w[ALL ANY].include?(@bind)
|
48
62
|
@logger.info "Listening on both 0.0.0.0/:: network interfaces"
|
49
63
|
@bind = nil
|
50
64
|
end
|
51
65
|
|
52
|
-
@server =
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
66
|
+
@server =
|
67
|
+
WEBrick::HTTPServer.new(
|
68
|
+
Port: @port,
|
69
|
+
BindAddress: @bind,
|
70
|
+
Logger: @logger,
|
71
|
+
AccessLog: @access_log
|
72
|
+
)
|
73
|
+
|
74
|
+
@server.mount_proc "/" do |req, res|
|
75
|
+
res["Content-Type"] = "text/plain; charset=utf-8"
|
76
|
+
if req.path == "/metrics"
|
62
77
|
authenticate(req, res) if @auth
|
63
78
|
|
64
79
|
res.status = 200
|
@@ -76,13 +91,14 @@ module PrometheusExporter::Server
|
|
76
91
|
else
|
77
92
|
res.body = metrics
|
78
93
|
end
|
79
|
-
elsif req.path ==
|
94
|
+
elsif req.path == "/send-metrics"
|
80
95
|
handle_metrics(req, res)
|
81
|
-
elsif req.path ==
|
82
|
-
res.body =
|
96
|
+
elsif req.path == "/ping"
|
97
|
+
res.body = "PONG"
|
83
98
|
else
|
84
99
|
res.status = 404
|
85
|
-
res.body =
|
100
|
+
res.body =
|
101
|
+
"Not Found! The Prometheus Ruby Exporter only listens on /ping, /metrics and /send-metrics"
|
86
102
|
end
|
87
103
|
end
|
88
104
|
end
|
@@ -94,13 +110,11 @@ module PrometheusExporter::Server
|
|
94
110
|
@metrics_total.observe
|
95
111
|
@collector.process(block)
|
96
112
|
rescue => e
|
97
|
-
if @verbose
|
98
|
-
@logger.error "\n\n#{e.inspect}\n#{e.backtrace}\n\n"
|
99
|
-
end
|
113
|
+
@logger.error "\n\n#{e.inspect}\n#{e.backtrace}\n\n" if @verbose
|
100
114
|
@bad_metrics_total.observe
|
101
115
|
res.body = "Bad Metrics #{e}"
|
102
116
|
res.status = e.respond_to?(:status_code) ? e.status_code : 500
|
103
|
-
|
117
|
+
break
|
104
118
|
end
|
105
119
|
end
|
106
120
|
|
@@ -109,13 +123,14 @@ module PrometheusExporter::Server
|
|
109
123
|
end
|
110
124
|
|
111
125
|
def start
|
112
|
-
@runner ||=
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
126
|
+
@runner ||=
|
127
|
+
Thread.start do
|
128
|
+
begin
|
129
|
+
@server.start
|
130
|
+
rescue => e
|
131
|
+
@logger.error "Failed to start prometheus collector web on port #{@port}: #{e}"
|
132
|
+
end
|
117
133
|
end
|
118
|
-
end
|
119
134
|
end
|
120
135
|
|
121
136
|
def stop
|
@@ -125,7 +140,7 @@ module PrometheusExporter::Server
|
|
125
140
|
def metrics
|
126
141
|
metric_text = nil
|
127
142
|
begin
|
128
|
-
Timeout
|
143
|
+
Timeout.timeout(@timeout) do
|
129
144
|
metric_text = @collector.prometheus_metrics_text
|
130
145
|
end
|
131
146
|
rescue Timeout::Error
|
@@ -158,9 +173,18 @@ module PrometheusExporter::Server
|
|
158
173
|
end
|
159
174
|
|
160
175
|
def get_rss
|
161
|
-
@pagesize ||=
|
176
|
+
@pagesize ||=
|
177
|
+
begin
|
178
|
+
`getconf PAGESIZE`.to_i
|
179
|
+
rescue StandardError
|
180
|
+
4096
|
181
|
+
end
|
162
182
|
@pid ||= Process.pid
|
163
|
-
|
183
|
+
begin
|
184
|
+
File.read("/proc/#{@pid}/statm").split(" ")[1].to_i * @pagesize
|
185
|
+
rescue StandardError
|
186
|
+
0
|
187
|
+
end
|
164
188
|
end
|
165
189
|
|
166
190
|
def add_gauge(name, help, value)
|
@@ -171,10 +195,12 @@ module PrometheusExporter::Server
|
|
171
195
|
|
172
196
|
def authenticate(req, res)
|
173
197
|
htpasswd = WEBrick::HTTPAuth::Htpasswd.new(@auth)
|
174
|
-
basic_auth =
|
198
|
+
basic_auth =
|
199
|
+
WEBrick::HTTPAuth::BasicAuth.new(
|
200
|
+
{ Realm: @realm, UserDB: htpasswd, Logger: @logger }
|
201
|
+
)
|
175
202
|
|
176
203
|
basic_auth.authenticate(req, res)
|
177
204
|
end
|
178
|
-
|
179
205
|
end
|
180
206
|
end
|
data/lib/prometheus_exporter.rb
CHANGED
@@ -2,21 +2,21 @@
|
|
2
2
|
|
3
3
|
require_relative "prometheus_exporter/version"
|
4
4
|
require "json"
|
5
|
-
require "thread"
|
6
5
|
|
7
6
|
module PrometheusExporter
|
8
7
|
# per: https://github.com/prometheus/prometheus/wiki/Default-port-allocations
|
9
8
|
DEFAULT_PORT = 9394
|
10
|
-
DEFAULT_BIND_ADDRESS =
|
11
|
-
DEFAULT_PREFIX =
|
9
|
+
DEFAULT_BIND_ADDRESS = "localhost"
|
10
|
+
DEFAULT_PREFIX = "ruby_"
|
12
11
|
DEFAULT_LABEL = {}
|
13
12
|
DEFAULT_TIMEOUT = 2
|
14
|
-
DEFAULT_REALM =
|
13
|
+
DEFAULT_REALM = "Prometheus Exporter"
|
15
14
|
|
16
15
|
class OjCompat
|
17
16
|
def self.parse(obj)
|
18
17
|
Oj.compat_load(obj)
|
19
18
|
end
|
19
|
+
|
20
20
|
def self.dump(obj)
|
21
21
|
Oj.dump(obj, mode: :compat)
|
22
22
|
end
|
@@ -25,7 +25,7 @@ module PrometheusExporter
|
|
25
25
|
def self.hostname
|
26
26
|
@hostname ||=
|
27
27
|
begin
|
28
|
-
require
|
28
|
+
require "socket"
|
29
29
|
Socket.gethostname
|
30
30
|
rescue => e
|
31
31
|
STDERR.puts "Unable to lookup hostname #{e}"
|
@@ -45,13 +45,12 @@ module PrometheusExporter
|
|
45
45
|
def self.has_oj?
|
46
46
|
(
|
47
47
|
@@has_oj ||=
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
) == :
|
48
|
+
begin
|
49
|
+
require "oj"
|
50
|
+
:T
|
51
|
+
rescue LoadError
|
52
|
+
:F
|
53
|
+
end
|
54
|
+
) == :T
|
55
55
|
end
|
56
|
-
|
57
56
|
end
|
data/prometheus_exporter.gemspec
CHANGED
@@ -15,8 +15,6 @@ Gem::Specification.new do |spec|
|
|
15
15
|
spec.homepage = "https://github.com/discourse/prometheus_exporter"
|
16
16
|
spec.license = "MIT"
|
17
17
|
|
18
|
-
spec.post_install_message = "prometheus_exporter will only bind to localhost by default as of v0.5"
|
19
|
-
|
20
18
|
spec.files = `git ls-files -z`.split("\x0").reject do |f|
|
21
19
|
f.match(%r{^(test|spec|features|bin)/})
|
22
20
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: prometheus_exporter
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 2.0
|
4
|
+
version: 2.1.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Sam Saffron
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2024-01-07 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: webrick
|
@@ -248,6 +248,7 @@ files:
|
|
248
248
|
- Appraisals
|
249
249
|
- CHANGELOG
|
250
250
|
- CODE_OF_CONDUCT.md
|
251
|
+
- Dockerfile
|
251
252
|
- Gemfile
|
252
253
|
- Guardfile
|
253
254
|
- LICENSE.txt
|
@@ -259,11 +260,13 @@ files:
|
|
259
260
|
- gemfiles/.bundle/config
|
260
261
|
- gemfiles/ar_60.gemfile
|
261
262
|
- gemfiles/ar_61.gemfile
|
263
|
+
- gemfiles/ar_70.gemfile
|
262
264
|
- lib/prometheus_exporter.rb
|
263
265
|
- lib/prometheus_exporter/client.rb
|
264
266
|
- lib/prometheus_exporter/instrumentation.rb
|
265
267
|
- lib/prometheus_exporter/instrumentation/active_record.rb
|
266
268
|
- lib/prometheus_exporter/instrumentation/delayed_job.rb
|
269
|
+
- lib/prometheus_exporter/instrumentation/good_job.rb
|
267
270
|
- lib/prometheus_exporter/instrumentation/hutch.rb
|
268
271
|
- lib/prometheus_exporter/instrumentation/method_profiler.rb
|
269
272
|
- lib/prometheus_exporter/instrumentation/periodic_stats.rb
|
@@ -288,6 +291,7 @@ files:
|
|
288
291
|
- lib/prometheus_exporter/server/collector.rb
|
289
292
|
- lib/prometheus_exporter/server/collector_base.rb
|
290
293
|
- lib/prometheus_exporter/server/delayed_job_collector.rb
|
294
|
+
- lib/prometheus_exporter/server/good_job_collector.rb
|
291
295
|
- lib/prometheus_exporter/server/hutch_collector.rb
|
292
296
|
- lib/prometheus_exporter/server/metrics_container.rb
|
293
297
|
- lib/prometheus_exporter/server/process_collector.rb
|
@@ -309,8 +313,7 @@ homepage: https://github.com/discourse/prometheus_exporter
|
|
309
313
|
licenses:
|
310
314
|
- MIT
|
311
315
|
metadata: {}
|
312
|
-
post_install_message:
|
313
|
-
of v0.5
|
316
|
+
post_install_message:
|
314
317
|
rdoc_options: []
|
315
318
|
require_paths:
|
316
319
|
- lib
|