prometheus_exporter 1.0.0 → 2.0.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/workflows/ci.yml +36 -32
- data/CHANGELOG +44 -27
- data/README.md +88 -32
- data/bin/prometheus_exporter +2 -2
- data/lib/prometheus_exporter/instrumentation/active_record.rb +6 -22
- data/lib/prometheus_exporter/instrumentation/periodic_stats.rb +62 -0
- data/lib/prometheus_exporter/instrumentation/process.rb +5 -21
- data/lib/prometheus_exporter/instrumentation/puma.rb +7 -12
- data/lib/prometheus_exporter/instrumentation/resque.rb +6 -11
- data/lib/prometheus_exporter/instrumentation/sidekiq.rb +27 -7
- data/lib/prometheus_exporter/instrumentation/sidekiq_process.rb +5 -11
- data/lib/prometheus_exporter/instrumentation/sidekiq_queue.rb +5 -11
- data/lib/prometheus_exporter/instrumentation/sidekiq_stats.rb +37 -0
- data/lib/prometheus_exporter/instrumentation/unicorn.rb +7 -12
- data/lib/prometheus_exporter/instrumentation.rb +2 -0
- data/lib/prometheus_exporter/metric/base.rb +2 -9
- data/lib/prometheus_exporter/metric/histogram.rb +13 -1
- data/lib/prometheus_exporter/middleware.rb +8 -4
- data/lib/prometheus_exporter/server/collector.rb +1 -0
- data/lib/prometheus_exporter/server/sidekiq_process_collector.rb +1 -1
- data/lib/prometheus_exporter/server/sidekiq_stats_collector.rb +46 -0
- data/lib/prometheus_exporter/server/web_collector.rb +17 -17
- data/lib/prometheus_exporter/server.rb +1 -0
- data/lib/prometheus_exporter/version.rb +1 -1
- data/prometheus_exporter.gemspec +1 -1
- metadata +7 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 0a8fb6585d155b56625dcd0d89c41301659e5b32cfc8272a340c7b067dbf686a
|
4
|
+
data.tar.gz: 6e5715300786de15f2906b4f8a5603bd19150cb1c511cb83e5c3a3bdf53a5f5b
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 52cce25c1403f889f65fc25b054ec22703f32dad1c32d14e35af74b65308c2bf7f0a10e8343bd906c98f884167243484f43114700c3175113e8e75f002421823
|
7
|
+
data.tar.gz: de8bc77ce936ad4f045c0f34907febb881518b537d5828bfeba5476924355157902f6618a0acc37aa1202ff604853f7bb3908ba78478a6cd3cf57bb1b0f5ff66
|
data/.github/workflows/ci.yml
CHANGED
@@ -1,49 +1,53 @@
|
|
1
|
-
name:
|
1
|
+
name: CI
|
2
2
|
|
3
3
|
on:
|
4
4
|
push:
|
5
|
+
branches:
|
6
|
+
- main
|
5
7
|
pull_request:
|
6
8
|
schedule:
|
7
|
-
- cron:
|
9
|
+
- cron: "0 0 * * 0" # weekly
|
8
10
|
|
9
11
|
jobs:
|
10
12
|
build:
|
11
13
|
runs-on: ubuntu-latest
|
12
|
-
name: Ruby ${{ matrix.ruby }}
|
14
|
+
name: Ruby ${{ matrix.ruby }} AR ${{ matrix.activerecord }}
|
15
|
+
timeout-minutes: 10
|
16
|
+
|
17
|
+
env:
|
18
|
+
BUNDLE_GEMFILE: ${{ github.workspace }}/gemfiles/ar_${{ matrix.activerecord }}.gemfile
|
19
|
+
|
13
20
|
strategy:
|
21
|
+
fail-fast: false
|
14
22
|
matrix:
|
15
|
-
ruby: [
|
23
|
+
ruby: ['2.6', '2.7', '3.0', '3.1']
|
24
|
+
activerecord: [60, 61]
|
25
|
+
|
16
26
|
steps:
|
17
|
-
- uses: actions/checkout@
|
18
|
-
|
19
|
-
|
20
|
-
- uses: actions/setup-ruby@v1
|
27
|
+
- uses: actions/checkout@v2
|
28
|
+
|
29
|
+
- uses: ruby/setup-ruby@v1
|
21
30
|
with:
|
22
31
|
ruby-version: ${{ matrix.ruby }}
|
23
|
-
|
24
|
-
|
25
|
-
path: vendor/bundle
|
26
|
-
key: ${{ runner.os }}-${{ matrix.ruby }}-gems-v2-${{ hashFiles('**/Gemfile.lock') }}
|
27
|
-
restore-keys: |
|
28
|
-
${{ runner.os }}-${{ matrix.ruby }}-gems-v2-
|
29
|
-
|
30
|
-
- name: Downgrade rubygems
|
31
|
-
run: |
|
32
|
-
# for Ruby <= 2.6 , details https://github.com/rubygems/rubygems/issues/3284
|
33
|
-
gem update --system 3.0.8
|
34
|
-
if: ${{ matrix.ruby == '2.6' || matrix.ruby == '2.7' }}
|
35
|
-
- name: Upgrade rubygems
|
36
|
-
run: |
|
37
|
-
gem update --system
|
38
|
-
- name: Setup gems
|
39
|
-
run: |
|
40
|
-
gem install bundler
|
41
|
-
bundle config path vendor/bundle
|
42
|
-
bundle install --jobs 4
|
43
|
-
bundle exec appraisal install
|
32
|
+
bundler-cache: true
|
33
|
+
|
44
34
|
- name: Rubocop
|
45
35
|
run: bundle exec rubocop
|
46
|
-
|
47
|
-
run: bundle exec appraisal bundle
|
36
|
+
|
48
37
|
- name: Run tests
|
49
|
-
run: bundle exec
|
38
|
+
run: bundle exec rake
|
39
|
+
|
40
|
+
publish:
|
41
|
+
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
42
|
+
needs: build
|
43
|
+
runs-on: ubuntu-latest
|
44
|
+
|
45
|
+
steps:
|
46
|
+
- uses: actions/checkout@v2
|
47
|
+
|
48
|
+
- name: Release gem
|
49
|
+
uses: discourse/publish-rubygems-action@v2
|
50
|
+
env:
|
51
|
+
RUBYGEMS_API_KEY: ${{ secrets.RUBYGEMS_API_KEY }}
|
52
|
+
GIT_EMAIL: team@discourse.org
|
53
|
+
GIT_NAME: discoursebot
|
data/CHANGELOG
CHANGED
@@ -1,4 +1,21 @@
|
|
1
|
-
|
1
|
+
2.0.1 - 2022-02-24
|
2
|
+
|
3
|
+
- FIX: ensure threads do not leak when calling #start repeatedly on instrumentation classes, this is an urgent patch for Puma integration
|
4
|
+
|
5
|
+
2.0.0 - 2022-02-18
|
6
|
+
|
7
|
+
- FEATURE: Add per worker custom labels
|
8
|
+
- FEATURE: support custom histogram buckets
|
9
|
+
- FIX: all metrics are exposing status label, and not only `http_requests_total`
|
10
|
+
- BREAKING: rename all `http_duration` metrics to `http_request_duration` to match prometheus official naming conventions (See https://prometheus.io/docs/practices/naming/#metric-names).
|
11
|
+
|
12
|
+
1.0.1 - 2021-12-22
|
13
|
+
|
14
|
+
- FEATURE: add labels to preflight requests
|
15
|
+
- FEATURE: SidekiqStats metrics
|
16
|
+
- FIX: mintor refactors to Sidekiq metrics
|
17
|
+
|
18
|
+
1.0.0 - 2021-11-23
|
2
19
|
|
3
20
|
- BREAKING: rename metrics to match prometheus official naming conventions (See https://prometheus.io/docs/practices/naming/#metric-names)
|
4
21
|
- FEATURE: Sidekiq process metrics
|
@@ -6,11 +23,11 @@
|
|
6
23
|
- FIX: logger improved for web server
|
7
24
|
- FIX: Remove job labels from DelayedJob queues
|
8
25
|
|
9
|
-
0.8.1 -
|
26
|
+
0.8.1 - 2021-08-04
|
10
27
|
|
11
28
|
- FEATURE: swap from hardcoded STDERR to logger pattern (see README for details)
|
12
29
|
|
13
|
-
0.8.0 -
|
30
|
+
0.8.0 - 2021-07-05
|
14
31
|
|
15
32
|
- FIX: handle ThreadError more gracefully in cases where process shuts down
|
16
33
|
- FEATURE: add job_name and queue_name labels to delayed job metrics
|
@@ -22,7 +39,7 @@
|
|
22
39
|
- FEATURE: Improve Active Record instrumentation
|
23
40
|
- FEATURE: Support HTTP_X_AMZN_TRACE_ID when supplied
|
24
41
|
|
25
|
-
0.7.0 -
|
42
|
+
0.7.0 - 2020-12-29
|
26
43
|
|
27
44
|
- Dev: Removed support from EOL rubies, only 2.5, 2.6, 2.7 and 3.0 are supported now.
|
28
45
|
- Dev: Better support for Ruby 3.0, explicitly depending on webrick
|
@@ -30,111 +47,111 @@
|
|
30
47
|
- FEATURE: clean pattern for overriding middleware labels was introduced (in README)
|
31
48
|
- Fix: Better support for forking
|
32
49
|
|
33
|
-
0.6.0 -
|
50
|
+
0.6.0 - 2020-11-17
|
34
51
|
|
35
52
|
- FEATURE: add support for basic-auth in the prometheus_exporter web server
|
36
53
|
|
37
|
-
0.5.3 -
|
54
|
+
0.5.3 - 2020-07-29
|
38
55
|
|
39
56
|
- FEATURE: added #remove to all metric types so users can remove specific labels if needed
|
40
57
|
|
41
|
-
0.5.2 -
|
58
|
+
0.5.2 - 2020-07-01
|
42
59
|
|
43
60
|
- FEATURE: expanded instrumentation for sidekiq
|
44
61
|
- FEATURE: configurable default labels
|
45
62
|
|
46
|
-
0.5.1 -
|
63
|
+
0.5.1 - 2020-02-25
|
47
64
|
|
48
65
|
- FEATURE: Allow configuring the default client's host and port via environment variables
|
49
66
|
|
50
|
-
0.5.0 -
|
67
|
+
0.5.0 - 2020-02-14
|
51
68
|
|
52
69
|
- Breaking change: listen only to localhost by default to prevent unintended insecure configuration
|
53
70
|
- FIX: Avoid calling `hostname` aggressively, instead cache it on the exporter instance
|
54
71
|
|
55
|
-
0.4.17 -
|
72
|
+
0.4.17 - 2020-01-13
|
56
73
|
|
57
74
|
- FEATURE: add support for `to_h` on all metrics which can be used to query existing key/values
|
58
75
|
|
59
|
-
0.4.16 -
|
76
|
+
0.4.16 - 2019-11-04
|
60
77
|
|
61
78
|
- FEATURE: Support #reset! on all metric types to reset a metric to default
|
62
79
|
|
63
|
-
0.4.15 -
|
80
|
+
0.4.15 - 2019-11-04
|
64
81
|
|
65
82
|
- FEATURE: Improve delayed job collector, add pending counts
|
66
83
|
- FEATURE: New ActiveRecord collector (documented in readme)
|
67
84
|
- FEATURE: Allow passing in histogram and summary options
|
68
85
|
- FEATURE: Allow custom labels for unicorn collector
|
69
86
|
|
70
|
-
0.4.14 -
|
87
|
+
0.4.14 - 2019-09-10
|
71
88
|
|
72
89
|
- FEATURE: allow finding metrics by name RemoteMetric #find_registered_metric
|
73
90
|
- FIX: guard socket closing
|
74
91
|
|
75
|
-
0.4.13 -
|
92
|
+
0.4.13 - 2019-07-09
|
76
93
|
|
77
94
|
- Fix: Memory leak in unicorn and puma collectors
|
78
95
|
|
79
|
-
0.4.12 -
|
96
|
+
0.4.12 - 2019-05-30
|
80
97
|
|
81
98
|
- Fix: unicorn collector reporting incorrect number of unicorn workers
|
82
99
|
|
83
|
-
0.4.11 -
|
100
|
+
0.4.11 - 2019-05-15
|
84
101
|
|
85
102
|
- Fix: Handle stopping nil worker_threads in Client
|
86
103
|
- Dev: add frozen string literals
|
87
104
|
|
88
|
-
0.4.10 -
|
105
|
+
0.4.10 - 2019-04-29
|
89
106
|
|
90
107
|
- Fix: Custom label support for puma collector
|
91
108
|
- Fix: Raindrops socket collector not working correctly
|
92
109
|
|
93
|
-
0.4.9 -
|
110
|
+
0.4.9 - 2019-04-11
|
94
111
|
|
95
112
|
- Fix: Gem was not working correctly in Ruby 2.4 and below due to a syntax error
|
96
113
|
|
97
|
-
0.4.8 -
|
114
|
+
0.4.8 - 2019-04-10
|
98
115
|
|
99
116
|
- Feature: added helpers for instrumenting unicorn using raindrops
|
100
117
|
|
101
|
-
0.4.7 -
|
118
|
+
0.4.7 - 2019-04-08
|
102
119
|
|
103
120
|
- Fix: collector was not escaping " \ and \n correctly. This could lead
|
104
121
|
to a corrupt payload in some cases.
|
105
122
|
|
106
|
-
0.4.6 -
|
123
|
+
0.4.6 - 2019-04-02
|
107
124
|
|
108
125
|
- Feature: Allow resetting a counter
|
109
126
|
- Feature: Add sidekiq metrics: restarted, dead jobs counters
|
110
127
|
- Fix: Client shutting down before sending metrics to collector
|
111
128
|
|
112
|
-
0.4.5 -
|
129
|
+
0.4.5 - 2019-02-14
|
113
130
|
|
114
131
|
- Feature: Allow process collector to ship custom labels for all process metrics
|
115
132
|
- Fix: Always scope process metrics on hostname in collector
|
116
133
|
|
117
|
-
0.4.4 -
|
134
|
+
0.4.4 - 2019-02-13
|
118
135
|
|
119
136
|
- Feature: add support for local metric collection without using HTTP
|
120
137
|
|
121
|
-
0.4.3 -
|
138
|
+
0.4.3 - 2019-02-11
|
122
139
|
|
123
140
|
- Feature: Add alias for Gauge #observe called #set, this makes it a bit easier to migrate from prom
|
124
141
|
- Feature: Add increment and decrement to Counter
|
125
142
|
|
126
|
-
0.4.2 -
|
143
|
+
0.4.2 - 2018-11-30
|
127
144
|
|
128
145
|
- Fix/Feature: setting a Gauge to nil will remove Gauge (setting to non numeric will raise)
|
129
146
|
|
130
|
-
0.4.0 -
|
147
|
+
0.4.0 - 2018-10-23
|
131
148
|
|
132
149
|
- Feature: histogram support
|
133
150
|
- Feature: custom quantile support for summary
|
134
151
|
- Feature: Puma metrics
|
135
152
|
- Fix: delayed job metrics
|
136
153
|
|
137
|
-
0.3.4 -
|
154
|
+
0.3.4 - 2018-10-02
|
138
155
|
|
139
156
|
- Fix: custom collector via CLI was not working correctly
|
140
157
|
|
data/README.md
CHANGED
@@ -28,6 +28,7 @@ To learn more see [Instrumenting Rails with Prometheus](https://samsaffron.com/a
|
|
28
28
|
* [Client default labels](#client-default-labels)
|
29
29
|
* [Client default host](#client-default-host)
|
30
30
|
* [Histogram mode](#histogram-mode)
|
31
|
+
* [Histogram - custom buckets](#histogram-custom-buckets)
|
31
32
|
* [Transport concerns](#transport-concerns)
|
32
33
|
* [JSON generation and parsing](#json-generation-and-parsing)
|
33
34
|
* [Logging](#logging)
|
@@ -202,13 +203,13 @@ $ bundle exec prometheus_exporter
|
|
202
203
|
|
203
204
|
#### Metrics collected by Rails integration middleware
|
204
205
|
|
205
|
-
| Type | Name
|
206
|
-
| --- | ---
|
207
|
-
| Counter | `http_requests_total`
|
208
|
-
| Summary | `
|
209
|
-
| Summary | `
|
210
|
-
| Summary | `
|
211
|
-
| Summary | `
|
206
|
+
| Type | Name | Description |
|
207
|
+
| --- | --- | --- |
|
208
|
+
| Counter | `http_requests_total` | Total HTTP requests from web app |
|
209
|
+
| Summary | `http_request_duration_seconds` | Time spent in HTTP reqs in seconds |
|
210
|
+
| Summary | `http_request_redis_duration_seconds`¹ | Time spent in HTTP reqs in Redis, in seconds |
|
211
|
+
| Summary | `http_request_sql_duration_seconds`² | Time spent in HTTP reqs in SQL in seconds |
|
212
|
+
| Summary | `http_request_queue_duration_seconds`³ | Time spent queueing the request in load balancer in seconds |
|
212
213
|
|
213
214
|
All metrics have a `controller` and an `action` label.
|
214
215
|
`http_requests_total` additionally has a (HTTP response) `status` label.
|
@@ -251,7 +252,7 @@ end
|
|
251
252
|
```
|
252
253
|
That way you won't have all metrics labeled with `controller=other` and `action=other`, but have labels such as
|
253
254
|
```
|
254
|
-
|
255
|
+
ruby_http_request_duration_seconds{path="/api/v1/teams/:id",method="GET",status="200",quantile="0.99"} 0.009880661998977303
|
255
256
|
```
|
256
257
|
|
257
258
|
¹) Only available when Redis is used.
|
@@ -367,43 +368,49 @@ Metrics collected by Process instrumentation include labels `type` (as given wit
|
|
367
368
|
|
368
369
|
#### Sidekiq metrics
|
369
370
|
|
370
|
-
|
371
|
-
|
372
|
-
```ruby
|
373
|
-
Sidekiq.configure_server do |config|
|
374
|
-
config.server_middleware do |chain|
|
375
|
-
require 'prometheus_exporter/instrumentation'
|
376
|
-
chain.add PrometheusExporter::Instrumentation::Sidekiq
|
377
|
-
end
|
378
|
-
config.death_handlers << PrometheusExporter::Instrumentation::Sidekiq.death_handler
|
379
|
-
end
|
380
|
-
```
|
381
|
-
|
382
|
-
To monitor Queue size and latency:
|
371
|
+
There are different kinds of Sidekiq metrics that can be collected. A recommended setup looks like this:
|
383
372
|
|
384
373
|
```ruby
|
385
374
|
Sidekiq.configure_server do |config|
|
375
|
+
require 'prometheus_exporter/instrumentation'
|
376
|
+
config.server_middleware do |chain|
|
377
|
+
chain.add PrometheusExporter::Instrumentation::Sidekiq
|
378
|
+
end
|
379
|
+
config.death_handlers << PrometheusExporter::Instrumentation::Sidekiq.death_handler
|
386
380
|
config.on :startup do
|
387
|
-
|
381
|
+
PrometheusExporter::Instrumentation::Process.start type: 'sidekiq'
|
382
|
+
PrometheusExporter::Instrumentation::SidekiqProcess.start
|
388
383
|
PrometheusExporter::Instrumentation::SidekiqQueue.start
|
384
|
+
PrometheusExporter::Instrumentation::SidekiqStats.start
|
389
385
|
end
|
390
386
|
end
|
391
387
|
```
|
392
388
|
|
393
|
-
|
389
|
+
* The middleware and death handler will generate job specific metrics (how many jobs ran? how many failed? how long did they take? how many are dead? how many were restarted?).
|
390
|
+
* The [`Process`](#per-process-stats) metrics provide basic ruby metrics.
|
391
|
+
* The `SidekiqProcess` metrics provide the concurrency and busy metrics for this process.
|
392
|
+
* The `SidekiqQueue` metrics provides size and latency for the queues run by this process.
|
393
|
+
* The `SidekiqStats` metrics provide general, global Sidekiq stats (size of Scheduled, Retries, Dead queues, total number of jobs, etc).
|
394
|
+
|
395
|
+
For `SidekiqQueue`, if you run more than one process for the same queues, note that the same metrics will be exposed by all the processes, just like the `SidekiqStats` will if you run more than one process of any kind. You might want use `avg` or `max` when consuming their metrics.
|
394
396
|
|
395
|
-
|
397
|
+
An alternative would be to expose these metrics in lone, long-lived process. Using a rake task, for example:
|
396
398
|
|
397
399
|
```ruby
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
400
|
+
task :sidekiq_metrics do
|
401
|
+
server = PrometheusExporter::Server::WebServer.new
|
402
|
+
server.start
|
403
|
+
|
404
|
+
PrometheusExporter::Client.default = PrometheusExporter::LocalClient.new(collector: server.collector)
|
405
|
+
|
406
|
+
PrometheusExporter::Instrumentation::SidekiqQueue.start(all_queues: true)
|
407
|
+
PrometheusExporter::Instrumentation::SidekiqStats.start
|
408
|
+
sleep
|
404
409
|
end
|
405
410
|
```
|
406
411
|
|
412
|
+
The `all_queues` parameter for `SidekiqQueue` will expose metrics for all queues.
|
413
|
+
|
407
414
|
Sometimes the Sidekiq server shuts down before it can send metrics, that were generated right before the shutdown, to the collector. Especially if you care about the `sidekiq_restarted_jobs_total` metric, it is a good idea to explicitly stop the client:
|
408
415
|
|
409
416
|
```ruby
|
@@ -414,6 +421,18 @@ Sometimes the Sidekiq server shuts down before it can send metrics, that were ge
|
|
414
421
|
end
|
415
422
|
```
|
416
423
|
|
424
|
+
Custom labels can be added for individual jobs by defining a class method on the job class. These labels will be added to all Sidekiq metrics written by the job:
|
425
|
+
|
426
|
+
```ruby
|
427
|
+
class WorkerWithCustomLabels
|
428
|
+
def self.custom_labels
|
429
|
+
{ my_label: 'value-here', other_label: 'second-val' }
|
430
|
+
end
|
431
|
+
|
432
|
+
def perform; end
|
433
|
+
end
|
434
|
+
```
|
435
|
+
|
417
436
|
##### Metrics collected by Sidekiq Instrumentation
|
418
437
|
|
419
438
|
**PrometheusExporter::Instrumentation::Sidekiq**
|
@@ -447,7 +466,21 @@ Both metrics will have a `queue` label with the name of the queue.
|
|
447
466
|
| Gauge | `sidekiq_process_busy` | Number of busy workers for this process |
|
448
467
|
| Gauge | `sidekiq_process_concurrency` | Concurrency for this process |
|
449
468
|
|
450
|
-
Both metrics will include the labels `labels`, `queues`, `quiet`, `tag`, `hostname` and `identity`, as returned by the [Sidekiq API](https://github.com/mperham/sidekiq/wiki/API#processes).
|
469
|
+
Both metrics will include the labels `labels`, `queues`, `quiet`, `tag`, `hostname` and `identity`, as returned by the [Sidekiq Processes API](https://github.com/mperham/sidekiq/wiki/API#processes).
|
470
|
+
|
471
|
+
**PrometheusExporter::Instrumentation::SidekiqStats**
|
472
|
+
| Type | Name | Description |
|
473
|
+
| --- | --- | --- |
|
474
|
+
| Gauge | `sidekiq_stats_dead_size` | Size of the dead queue |
|
475
|
+
| Gauge | `sidekiq_stats_enqueued` | Number of enqueued jobs |
|
476
|
+
| Gauge | `sidekiq_stats_failed` | Number of failed jobs |
|
477
|
+
| Gauge | `sidekiq_stats_processed` | Total number of processed jobs |
|
478
|
+
| Gauge | `sidekiq_stats_processes_size` | Number of processes |
|
479
|
+
| Gauge | `sidekiq_stats_retry_size` | Size of the retries queue |
|
480
|
+
| Gauge | `sidekiq_stats_scheduled_size` | Size of the scheduled queue |
|
481
|
+
| Gauge | `sidekiq_stats_workers_size` | Number of jobs actively being processed |
|
482
|
+
|
483
|
+
Based on the [Sidekiq Stats API](https://github.com/mperham/sidekiq/wiki/API#stats).
|
451
484
|
|
452
485
|
_See [Metrics collected by Process Instrumentation](#metrics-collected-by-process-instrumentation) for a list of metrics the Process instrumentation will produce._
|
453
486
|
|
@@ -542,7 +575,10 @@ The easiest way to gather this metrics is to put the following in your `puma.rb`
|
|
542
575
|
# puma.rb config
|
543
576
|
after_worker_boot do
|
544
577
|
require 'prometheus_exporter/instrumentation'
|
545
|
-
|
578
|
+
# optional check, avoids spinning up and down threads per worker
|
579
|
+
if !PrometheusExporter::Instrumentation::Puma.started?
|
580
|
+
PrometheusExporter::Instrumentation::Puma.start
|
581
|
+
end
|
546
582
|
end
|
547
583
|
```
|
548
584
|
|
@@ -871,6 +907,26 @@ In histogram mode, the same metrics will be collected but will be reported as hi
|
|
871
907
|
|
872
908
|
[`histogram_quantile`]: https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile
|
873
909
|
|
910
|
+
### Histogram - custom buckets
|
911
|
+
|
912
|
+
By default these buckets will be used:
|
913
|
+
```
|
914
|
+
[0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5.0, 10.0].freeze
|
915
|
+
```
|
916
|
+
if this is not enough you can specify `default_buckets` like this:
|
917
|
+
```
|
918
|
+
Histogram.default_buckets = [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2, 2.5, 3, 4, 5.0, 10.0, 12, 14, 15, 20, 25].freeze
|
919
|
+
```
|
920
|
+
|
921
|
+
Specfied buckets on the instance takes precedence over default:
|
922
|
+
|
923
|
+
```
|
924
|
+
Histogram.default_buckets = [0.005, 0.01, 0,5].freeze
|
925
|
+
buckets = [0.1, 0.2, 0.3]
|
926
|
+
histogram = Histogram.new('test_bucktets', 'I have specified buckets', buckets: buckets)
|
927
|
+
histogram.buckets => [0.1, 0.2, 0.3]
|
928
|
+
```
|
929
|
+
|
874
930
|
## Transport concerns
|
875
931
|
|
876
932
|
Prometheus Exporter handles transport using a simple HTTP protocol. In multi process mode we avoid needing a large number of HTTP request by using chunked encoding to send metrics. This means that a single HTTP channel can deliver 100s or even 1000s of metrics over a single HTTP session to the `/send-metrics` endpoint. All calls to `send` and `send_json` on the `PrometheusExporter::Client` class are **non-blocking** and batched.
|
data/bin/prometheus_exporter
CHANGED
@@ -88,7 +88,7 @@ def run
|
|
88
88
|
end
|
89
89
|
|
90
90
|
if custom_collector_filename
|
91
|
-
|
91
|
+
require File.expand_path(custom_collector_filename)
|
92
92
|
found = false
|
93
93
|
|
94
94
|
base_klass = PrometheusExporter::Server::CollectorBase
|
@@ -108,7 +108,7 @@ def run
|
|
108
108
|
|
109
109
|
if custom_type_collectors_filenames.length > 0
|
110
110
|
custom_type_collectors_filenames.each do |t|
|
111
|
-
|
111
|
+
require File.expand_path(t)
|
112
112
|
end
|
113
113
|
|
114
114
|
ObjectSpace.each_object(Class) do |klass|
|
@@ -2,11 +2,10 @@
|
|
2
2
|
|
3
3
|
# collects stats from currently running process
|
4
4
|
module PrometheusExporter::Instrumentation
|
5
|
-
class ActiveRecord
|
5
|
+
class ActiveRecord < PeriodicStats
|
6
6
|
ALLOWED_CONFIG_LABELS = %i(database username host port)
|
7
7
|
|
8
8
|
def self.start(client: nil, frequency: 30, custom_labels: {}, config_labels: [])
|
9
|
-
|
10
9
|
client ||= PrometheusExporter::Client.default
|
11
10
|
|
12
11
|
# Not all rails versions support connection pool stats
|
@@ -20,20 +19,12 @@ module PrometheusExporter::Instrumentation
|
|
20
19
|
|
21
20
|
active_record_collector = new(custom_labels, config_labels)
|
22
21
|
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
while true
|
27
|
-
begin
|
28
|
-
metrics = active_record_collector.collect
|
29
|
-
metrics.each { |metric| client.send_json metric }
|
30
|
-
rescue => e
|
31
|
-
client.logger.error("Prometheus Exporter Failed To Collect Process Stats #{e}")
|
32
|
-
ensure
|
33
|
-
sleep frequency
|
34
|
-
end
|
35
|
-
end
|
22
|
+
worker_loop do
|
23
|
+
metrics = active_record_collector.collect
|
24
|
+
metrics.each { |metric| client.send_json metric }
|
36
25
|
end
|
26
|
+
|
27
|
+
super
|
37
28
|
end
|
38
29
|
|
39
30
|
def self.validate_config_labels(config_labels)
|
@@ -41,13 +32,6 @@ module PrometheusExporter::Instrumentation
|
|
41
32
|
raise "Invalid Config Labels, available options #{ALLOWED_CONFIG_LABELS}" if (config_labels - ALLOWED_CONFIG_LABELS).size > 0
|
42
33
|
end
|
43
34
|
|
44
|
-
def self.stop
|
45
|
-
if t = @thread
|
46
|
-
t.kill
|
47
|
-
@thread = nil
|
48
|
-
end
|
49
|
-
end
|
50
|
-
|
51
35
|
def initialize(metric_labels, config_labels)
|
52
36
|
@metric_labels = metric_labels
|
53
37
|
@config_labels = config_labels
|
@@ -0,0 +1,62 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module PrometheusExporter::Instrumentation
|
4
|
+
class PeriodicStats
|
5
|
+
|
6
|
+
def self.start(*args, frequency:, client: nil, **kwargs)
|
7
|
+
client ||= PrometheusExporter::Client.default
|
8
|
+
|
9
|
+
if !(Numeric === frequency)
|
10
|
+
raise ArgumentError.new("Expected frequency to be a number")
|
11
|
+
end
|
12
|
+
|
13
|
+
if frequency < 0
|
14
|
+
raise ArgumentError.new("Expected frequency to be a positive number")
|
15
|
+
end
|
16
|
+
|
17
|
+
if !@worker_loop
|
18
|
+
raise ArgumentError.new("Worker loop was not set")
|
19
|
+
end
|
20
|
+
|
21
|
+
klass = self
|
22
|
+
|
23
|
+
stop
|
24
|
+
|
25
|
+
@stop_thread = false
|
26
|
+
|
27
|
+
@thread = Thread.new do
|
28
|
+
while !@stop_thread
|
29
|
+
begin
|
30
|
+
@worker_loop.call
|
31
|
+
rescue => e
|
32
|
+
client.logger.error("#{klass} Prometheus Exporter Failed To Collect Stats #{e}")
|
33
|
+
ensure
|
34
|
+
sleep frequency
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
end
|
40
|
+
|
41
|
+
def self.started?
|
42
|
+
!!@thread&.alive?
|
43
|
+
end
|
44
|
+
|
45
|
+
def self.worker_loop(&blk)
|
46
|
+
@worker_loop = blk
|
47
|
+
end
|
48
|
+
|
49
|
+
def self.stop
|
50
|
+
# to avoid a warning
|
51
|
+
@thread = nil if !defined?(@thread)
|
52
|
+
|
53
|
+
if @thread&.alive?
|
54
|
+
@stop_thread = true
|
55
|
+
@thread.wakeup
|
56
|
+
@thread.join
|
57
|
+
end
|
58
|
+
@thread = nil
|
59
|
+
end
|
60
|
+
|
61
|
+
end
|
62
|
+
end
|
@@ -2,8 +2,7 @@
|
|
2
2
|
|
3
3
|
# collects stats from currently running process
|
4
4
|
module PrometheusExporter::Instrumentation
|
5
|
-
class Process
|
6
|
-
@thread = nil if !defined?(@thread)
|
5
|
+
class Process < PeriodicStats
|
7
6
|
|
8
7
|
def self.start(client: nil, type: "ruby", frequency: 30, labels: nil)
|
9
8
|
|
@@ -19,27 +18,12 @@ module PrometheusExporter::Instrumentation
|
|
19
18
|
process_collector = new(metric_labels)
|
20
19
|
client ||= PrometheusExporter::Client.default
|
21
20
|
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
while true
|
26
|
-
begin
|
27
|
-
metric = process_collector.collect
|
28
|
-
client.send_json metric
|
29
|
-
rescue => e
|
30
|
-
client.logger.error("Prometheus Exporter Failed To Collect Process Stats #{e}")
|
31
|
-
ensure
|
32
|
-
sleep frequency
|
33
|
-
end
|
34
|
-
end
|
21
|
+
worker_loop do
|
22
|
+
metric = process_collector.collect
|
23
|
+
client.send_json metric
|
35
24
|
end
|
36
|
-
end
|
37
25
|
|
38
|
-
|
39
|
-
if t = @thread
|
40
|
-
t.kill
|
41
|
-
@thread = nil
|
42
|
-
end
|
26
|
+
super
|
43
27
|
end
|
44
28
|
|
45
29
|
def initialize(metric_labels)
|
@@ -4,22 +4,17 @@ require "json"
|
|
4
4
|
|
5
5
|
# collects stats from puma
|
6
6
|
module PrometheusExporter::Instrumentation
|
7
|
-
class Puma
|
7
|
+
class Puma < PeriodicStats
|
8
8
|
def self.start(client: nil, frequency: 30, labels: {})
|
9
9
|
puma_collector = new(labels)
|
10
10
|
client ||= PrometheusExporter::Client.default
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
client.send_json metric
|
16
|
-
rescue => e
|
17
|
-
client.logger.error("Prometheus Exporter Failed To Collect Puma Stats #{e}")
|
18
|
-
ensure
|
19
|
-
sleep frequency
|
20
|
-
end
|
21
|
-
end
|
11
|
+
|
12
|
+
worker_loop do
|
13
|
+
metric = puma_collector.collect
|
14
|
+
client.send_json metric
|
22
15
|
end
|
16
|
+
|
17
|
+
super
|
23
18
|
end
|
24
19
|
|
25
20
|
def initialize(metric_labels = {})
|
@@ -2,21 +2,16 @@
|
|
2
2
|
|
3
3
|
# collects stats from resque
|
4
4
|
module PrometheusExporter::Instrumentation
|
5
|
-
class Resque
|
5
|
+
class Resque < PeriodicStats
|
6
6
|
def self.start(client: nil, frequency: 30)
|
7
7
|
resque_collector = new
|
8
8
|
client ||= PrometheusExporter::Client.default
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
client.send_json(resque_collector.collect)
|
13
|
-
rescue => e
|
14
|
-
client.logger.error("Prometheus Exporter Failed To Collect Resque Stats #{e}")
|
15
|
-
ensure
|
16
|
-
sleep frequency
|
17
|
-
end
|
18
|
-
end
|
9
|
+
|
10
|
+
worker_loop do
|
11
|
+
client.send_json(resque_collector.collect)
|
19
12
|
end
|
13
|
+
|
14
|
+
super
|
20
15
|
end
|
21
16
|
|
22
17
|
def collect
|
@@ -15,16 +15,24 @@ module PrometheusExporter::Instrumentation
|
|
15
15
|
-> (job, ex) do
|
16
16
|
job_is_fire_and_forget = job["retry"] == false
|
17
17
|
|
18
|
+
worker_class = Object.const_get(job["class"])
|
19
|
+
worker_custom_labels = self.get_worker_custom_labels(worker_class)
|
20
|
+
|
18
21
|
unless job_is_fire_and_forget
|
19
22
|
PrometheusExporter::Client.default.send_json(
|
20
23
|
type: "sidekiq",
|
21
24
|
name: job["class"],
|
22
25
|
dead: true,
|
26
|
+
custom_labels: worker_custom_labels
|
23
27
|
)
|
24
28
|
end
|
25
29
|
end
|
26
30
|
end
|
27
31
|
|
32
|
+
def self.get_worker_custom_labels(worker_class)
|
33
|
+
worker_class.respond_to?(:custom_labels) ? worker_class.custom_labels : {}
|
34
|
+
end
|
35
|
+
|
28
36
|
def initialize(client: nil)
|
29
37
|
@client = client || PrometheusExporter::Client.default
|
30
38
|
end
|
@@ -47,7 +55,8 @@ module PrometheusExporter::Instrumentation
|
|
47
55
|
queue: queue,
|
48
56
|
success: success,
|
49
57
|
shutdown: shutdown,
|
50
|
-
duration: duration
|
58
|
+
duration: duration,
|
59
|
+
custom_labels: self.class.get_worker_custom_labels(worker.class)
|
51
60
|
)
|
52
61
|
end
|
53
62
|
|
@@ -69,19 +78,30 @@ module PrometheusExporter::Instrumentation
|
|
69
78
|
end
|
70
79
|
|
71
80
|
def get_delayed_name(msg, class_name)
|
72
|
-
# fallback to class_name since we're relying on the internal implementation
|
73
|
-
# of the delayed extensions
|
74
|
-
# https://github.com/mperham/sidekiq/blob/master/lib/sidekiq/extensions/class_methods.rb
|
75
81
|
begin
|
76
|
-
|
82
|
+
# fallback to class_name since we're relying on the internal implementation
|
83
|
+
# of the delayed extensions
|
84
|
+
# https://github.com/mperham/sidekiq/blob/master/lib/sidekiq/extensions/class_methods.rb
|
85
|
+
(target, method_name, _args) = YAML.load(msg['args'].first) # rubocop:disable Security/YAMLLoad
|
77
86
|
if target.class == Class
|
78
87
|
"#{target.name}##{method_name}"
|
79
88
|
else
|
80
89
|
"#{target.class.name}##{method_name}"
|
81
90
|
end
|
82
|
-
rescue
|
83
|
-
|
91
|
+
rescue Psych::DisallowedClass, ArgumentError
|
92
|
+
parsed = Psych.parse(msg['args'].first)
|
93
|
+
children = parsed.root.children
|
94
|
+
target = (children[0].value || children[0].tag).sub('!', '')
|
95
|
+
method_name = (children[1].value || children[1].tag).sub(':', '')
|
96
|
+
|
97
|
+
if target && method_name
|
98
|
+
"#{target}##{method_name}"
|
99
|
+
else
|
100
|
+
class_name
|
101
|
+
end
|
84
102
|
end
|
103
|
+
rescue
|
104
|
+
class_name
|
85
105
|
end
|
86
106
|
end
|
87
107
|
end
|
@@ -1,22 +1,16 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module PrometheusExporter::Instrumentation
|
4
|
-
class SidekiqProcess
|
4
|
+
class SidekiqProcess < PeriodicStats
|
5
5
|
def self.start(client: nil, frequency: 30)
|
6
6
|
client ||= PrometheusExporter::Client.default
|
7
7
|
sidekiq_process_collector = new
|
8
8
|
|
9
|
-
|
10
|
-
|
11
|
-
begin
|
12
|
-
client.send_json(sidekiq_process_collector.collect)
|
13
|
-
rescue StandardError => e
|
14
|
-
STDERR.puts("Prometheus Exporter Failed To Collect Sidekiq Processes metrics #{e}")
|
15
|
-
ensure
|
16
|
-
sleep frequency
|
17
|
-
end
|
18
|
-
end
|
9
|
+
worker_loop do
|
10
|
+
client.send_json(sidekiq_process_collector.collect)
|
19
11
|
end
|
12
|
+
|
13
|
+
super
|
20
14
|
end
|
21
15
|
|
22
16
|
def initialize
|
@@ -1,22 +1,16 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module PrometheusExporter::Instrumentation
|
4
|
-
class SidekiqQueue
|
4
|
+
class SidekiqQueue < PeriodicStats
|
5
5
|
def self.start(client: nil, frequency: 30, all_queues: false)
|
6
6
|
client ||= PrometheusExporter::Client.default
|
7
7
|
sidekiq_queue_collector = new(all_queues: all_queues)
|
8
8
|
|
9
|
-
|
10
|
-
|
11
|
-
begin
|
12
|
-
client.send_json(sidekiq_queue_collector.collect)
|
13
|
-
rescue StandardError => e
|
14
|
-
client.logger.error("Prometheus Exporter Failed To Collect Sidekiq Queue metrics #{e}")
|
15
|
-
ensure
|
16
|
-
sleep frequency
|
17
|
-
end
|
18
|
-
end
|
9
|
+
worker_loop do
|
10
|
+
client.send_json(sidekiq_queue_collector.collect)
|
19
11
|
end
|
12
|
+
|
13
|
+
super
|
20
14
|
end
|
21
15
|
|
22
16
|
def initialize(all_queues: false)
|
@@ -0,0 +1,37 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module PrometheusExporter::Instrumentation
|
4
|
+
class SidekiqStats < PeriodicStats
|
5
|
+
def self.start(client: nil, frequency: 30)
|
6
|
+
client ||= PrometheusExporter::Client.default
|
7
|
+
sidekiq_stats_collector = new
|
8
|
+
|
9
|
+
worker_loop do
|
10
|
+
client.send_json(sidekiq_stats_collector.collect)
|
11
|
+
end
|
12
|
+
|
13
|
+
super
|
14
|
+
end
|
15
|
+
|
16
|
+
def collect
|
17
|
+
{
|
18
|
+
type: 'sidekiq_stats',
|
19
|
+
stats: collect_stats
|
20
|
+
}
|
21
|
+
end
|
22
|
+
|
23
|
+
def collect_stats
|
24
|
+
stats = ::Sidekiq::Stats.new
|
25
|
+
{
|
26
|
+
'dead_size' => stats.dead_size,
|
27
|
+
'enqueued' => stats.enqueued,
|
28
|
+
'failed' => stats.failed,
|
29
|
+
'processed' => stats.processed,
|
30
|
+
'processes_size' => stats.processes_size,
|
31
|
+
'retry_size' => stats.retry_size,
|
32
|
+
'scheduled_size' => stats.scheduled_size,
|
33
|
+
'workers_size' => stats.workers_size,
|
34
|
+
}
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
@@ -8,22 +8,17 @@ end
|
|
8
8
|
|
9
9
|
module PrometheusExporter::Instrumentation
|
10
10
|
# collects stats from unicorn
|
11
|
-
class Unicorn
|
11
|
+
class Unicorn < PeriodicStats
|
12
12
|
def self.start(pid_file:, listener_address:, client: nil, frequency: 30)
|
13
13
|
unicorn_collector = new(pid_file: pid_file, listener_address: listener_address)
|
14
14
|
client ||= PrometheusExporter::Client.default
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
client.send_json metric
|
20
|
-
rescue StandardError => e
|
21
|
-
client.logger.error("Prometheus Exporter Failed To Collect Unicorn Stats #{e}")
|
22
|
-
ensure
|
23
|
-
sleep frequency
|
24
|
-
end
|
25
|
-
end
|
15
|
+
|
16
|
+
worker_loop do
|
17
|
+
metric = unicorn_collector.collect
|
18
|
+
client.send_json metric
|
26
19
|
end
|
20
|
+
|
21
|
+
super
|
27
22
|
end
|
28
23
|
|
29
24
|
def initialize(pid_file:, listener_address:)
|
@@ -1,11 +1,13 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require_relative "client"
|
4
|
+
require_relative "instrumentation/periodic_stats"
|
4
5
|
require_relative "instrumentation/process"
|
5
6
|
require_relative "instrumentation/method_profiler"
|
6
7
|
require_relative "instrumentation/sidekiq"
|
7
8
|
require_relative "instrumentation/sidekiq_queue"
|
8
9
|
require_relative "instrumentation/sidekiq_process"
|
10
|
+
require_relative "instrumentation/sidekiq_stats"
|
9
11
|
require_relative "instrumentation/delayed_job"
|
10
12
|
require_relative "instrumentation/puma"
|
11
13
|
require_relative "instrumentation/hutch"
|
@@ -106,15 +106,8 @@ module PrometheusExporter::Metric
|
|
106
106
|
end
|
107
107
|
end
|
108
108
|
|
109
|
-
|
110
|
-
|
111
|
-
def needs_escape?(str)
|
112
|
-
str.match?(/[\n"\\]/m)
|
113
|
-
end
|
114
|
-
else
|
115
|
-
def needs_escape?(str)
|
116
|
-
!!str.match(/[\n"\\]/m)
|
117
|
-
end
|
109
|
+
def needs_escape?(str)
|
110
|
+
str.match?(/[\n"\\]/m)
|
118
111
|
end
|
119
112
|
|
120
113
|
end
|
@@ -5,9 +5,21 @@ module PrometheusExporter::Metric
|
|
5
5
|
|
6
6
|
DEFAULT_BUCKETS = [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5.0, 10.0].freeze
|
7
7
|
|
8
|
+
@default_buckets = nil if !defined?(@default_buckets)
|
9
|
+
|
10
|
+
def self.default_buckets
|
11
|
+
@default_buckets || DEFAULT_BUCKETS
|
12
|
+
end
|
13
|
+
|
14
|
+
def self.default_buckets=(buckets)
|
15
|
+
@default_buckets = buckets
|
16
|
+
end
|
17
|
+
|
18
|
+
attr_reader :buckets
|
19
|
+
|
8
20
|
def initialize(name, help, opts = {})
|
9
21
|
super(name, help)
|
10
|
-
@buckets = (opts[:buckets] ||
|
22
|
+
@buckets = (opts[:buckets] || self.class.default_buckets).sort.reverse
|
11
23
|
reset!
|
12
24
|
end
|
13
25
|
|
@@ -36,11 +36,12 @@ class PrometheusExporter::Middleware
|
|
36
36
|
|
37
37
|
result
|
38
38
|
ensure
|
39
|
-
|
39
|
+
status = (result && result[0]) || -1
|
40
40
|
obj = {
|
41
41
|
type: "web",
|
42
42
|
timings: info,
|
43
43
|
queue_time: queue_time,
|
44
|
+
status: status,
|
44
45
|
default_labels: default_labels(env, result)
|
45
46
|
}
|
46
47
|
labels = custom_labels(env)
|
@@ -52,18 +53,21 @@ class PrometheusExporter::Middleware
|
|
52
53
|
end
|
53
54
|
|
54
55
|
def default_labels(env, result)
|
55
|
-
status = (result && result[0]) || -1
|
56
56
|
params = env["action_dispatch.request.parameters"]
|
57
57
|
action = controller = nil
|
58
58
|
if params
|
59
59
|
action = params["action"]
|
60
60
|
controller = params["controller"]
|
61
|
+
elsif (cors = env["rack.cors"]) && cors.respond_to?(:preflight?) && cors.preflight?
|
62
|
+
# if the Rack CORS Middleware identifies the request as a preflight request,
|
63
|
+
# the stack doesn't get to the point where controllers/actions are defined
|
64
|
+
action = "preflight"
|
65
|
+
controller = "preflight"
|
61
66
|
end
|
62
67
|
|
63
68
|
{
|
64
69
|
action: action || "other",
|
65
|
-
controller: controller || "other"
|
66
|
-
status: status
|
70
|
+
controller: controller || "other"
|
67
71
|
}
|
68
72
|
end
|
69
73
|
|
@@ -15,6 +15,7 @@ module PrometheusExporter::Server
|
|
15
15
|
register_collector(SidekiqCollector.new)
|
16
16
|
register_collector(SidekiqQueueCollector.new)
|
17
17
|
register_collector(SidekiqProcessCollector.new)
|
18
|
+
register_collector(SidekiqStatsCollector.new)
|
18
19
|
register_collector(DelayedJobCollector.new)
|
19
20
|
register_collector(PumaCollector.new)
|
20
21
|
register_collector(HutchCollector.new)
|
@@ -26,7 +26,7 @@ module PrometheusExporter::Server
|
|
26
26
|
SIDEKIQ_PROCESS_GAUGES.map do |name, help|
|
27
27
|
if (value = metric[name])
|
28
28
|
gauge = gauges[name] ||= PrometheusExporter::Metric::Gauge.new("sidekiq_process_#{name}", help)
|
29
|
-
|
29
|
+
gauge.observe(value, labels)
|
30
30
|
end
|
31
31
|
end
|
32
32
|
end
|
@@ -0,0 +1,46 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module PrometheusExporter::Server
|
4
|
+
class SidekiqStatsCollector < TypeCollector
|
5
|
+
MAX_SIDEKIQ_METRIC_AGE = 60
|
6
|
+
|
7
|
+
SIDEKIQ_STATS_GAUGES = {
|
8
|
+
'dead_size' => 'Size of dead the queue',
|
9
|
+
'enqueued' => 'Number of enqueued jobs',
|
10
|
+
'failed' => 'Number of failed jobs',
|
11
|
+
'processed' => 'Total number of processed jobs',
|
12
|
+
'processes_size' => 'Number of processes',
|
13
|
+
'retry_size' => 'Size of the retries queue',
|
14
|
+
'scheduled_size' => 'Size of the scheduled queue',
|
15
|
+
'workers_size' => 'Number of jobs actively being processed',
|
16
|
+
}.freeze
|
17
|
+
|
18
|
+
attr_reader :sidekiq_metrics, :gauges
|
19
|
+
|
20
|
+
def initialize
|
21
|
+
@sidekiq_metrics = []
|
22
|
+
@gauges = {}
|
23
|
+
end
|
24
|
+
|
25
|
+
def type
|
26
|
+
'sidekiq_stats'
|
27
|
+
end
|
28
|
+
|
29
|
+
def metrics
|
30
|
+
sidekiq_metrics.map do |metric|
|
31
|
+
SIDEKIQ_STATS_GAUGES.map do |name, help|
|
32
|
+
if (value = metric['stats'][name])
|
33
|
+
gauge = gauges[name] ||= PrometheusExporter::Metric::Gauge.new("sidekiq_stats_#{name}", help)
|
34
|
+
gauge.observe(value)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
gauges.values
|
40
|
+
end
|
41
|
+
|
42
|
+
def collect(object)
|
43
|
+
sidekiq_metrics << object
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
@@ -5,10 +5,10 @@ module PrometheusExporter::Server
|
|
5
5
|
def initialize
|
6
6
|
@metrics = {}
|
7
7
|
@http_requests_total = nil
|
8
|
-
@
|
9
|
-
@
|
10
|
-
@
|
11
|
-
@
|
8
|
+
@http_request_duration_seconds = nil
|
9
|
+
@http_request_redis_duration_seconds = nil
|
10
|
+
@http_request_sql_duration_seconds = nil
|
11
|
+
@http_request_queue_duration_seconds = nil
|
12
12
|
end
|
13
13
|
|
14
14
|
def type
|
@@ -33,23 +33,23 @@ module PrometheusExporter::Server
|
|
33
33
|
"Total HTTP requests from web app."
|
34
34
|
)
|
35
35
|
|
36
|
-
@metrics["
|
37
|
-
"
|
36
|
+
@metrics["http_request_duration_seconds"] = @http_request_duration_seconds = PrometheusExporter::Metric::Base.default_aggregation.new(
|
37
|
+
"http_request_duration_seconds",
|
38
38
|
"Time spent in HTTP reqs in seconds."
|
39
39
|
)
|
40
40
|
|
41
|
-
@metrics["
|
42
|
-
"
|
41
|
+
@metrics["http_request_redis_duration_seconds"] = @http_request_redis_duration_seconds = PrometheusExporter::Metric::Base.default_aggregation.new(
|
42
|
+
"http_request_redis_duration_seconds",
|
43
43
|
"Time spent in HTTP reqs in Redis, in seconds."
|
44
44
|
)
|
45
45
|
|
46
|
-
@metrics["
|
47
|
-
"
|
46
|
+
@metrics["http_request_sql_duration_seconds"] = @http_request_sql_duration_seconds = PrometheusExporter::Metric::Base.default_aggregation.new(
|
47
|
+
"http_request_sql_duration_seconds",
|
48
48
|
"Time spent in HTTP reqs in SQL in seconds."
|
49
49
|
)
|
50
50
|
|
51
|
-
@metrics["
|
52
|
-
"
|
51
|
+
@metrics["http_request_queue_duration_seconds"] = @http_request_queue_duration_seconds = PrometheusExporter::Metric::Base.default_aggregation.new(
|
52
|
+
"http_request_queue_duration_seconds",
|
53
53
|
"Time spent queueing the request in load balancer in seconds."
|
54
54
|
)
|
55
55
|
end
|
@@ -60,19 +60,19 @@ module PrometheusExporter::Server
|
|
60
60
|
custom_labels = obj['custom_labels']
|
61
61
|
labels = custom_labels.nil? ? default_labels : default_labels.merge(custom_labels)
|
62
62
|
|
63
|
-
@http_requests_total.observe(1, labels)
|
63
|
+
@http_requests_total.observe(1, labels.merge(status: obj["status"]))
|
64
64
|
|
65
65
|
if timings = obj["timings"]
|
66
|
-
@
|
66
|
+
@http_request_duration_seconds.observe(timings["total_duration"], labels)
|
67
67
|
if redis = timings["redis"]
|
68
|
-
@
|
68
|
+
@http_request_redis_duration_seconds.observe(redis["duration"], labels)
|
69
69
|
end
|
70
70
|
if sql = timings["sql"]
|
71
|
-
@
|
71
|
+
@http_request_sql_duration_seconds.observe(sql["duration"], labels)
|
72
72
|
end
|
73
73
|
end
|
74
74
|
if queue_time = obj["queue_time"]
|
75
|
-
@
|
75
|
+
@http_request_queue_duration_seconds.observe(queue_time, labels)
|
76
76
|
end
|
77
77
|
end
|
78
78
|
end
|
@@ -7,6 +7,7 @@ require_relative "server/process_collector"
|
|
7
7
|
require_relative "server/sidekiq_collector"
|
8
8
|
require_relative "server/sidekiq_queue_collector"
|
9
9
|
require_relative "server/sidekiq_process_collector"
|
10
|
+
require_relative "server/sidekiq_stats_collector"
|
10
11
|
require_relative "server/delayed_job_collector"
|
11
12
|
require_relative "server/collector_base"
|
12
13
|
require_relative "server/collector"
|
data/prometheus_exporter.gemspec
CHANGED
@@ -31,7 +31,7 @@ Gem::Specification.new do |spec|
|
|
31
31
|
spec.add_development_dependency "rake", "~> 13.0"
|
32
32
|
spec.add_development_dependency "minitest", "~> 5.0"
|
33
33
|
spec.add_development_dependency "guard", "~> 2.0"
|
34
|
-
spec.add_development_dependency "mini_racer", "~> 0.
|
34
|
+
spec.add_development_dependency "mini_racer", "~> 0.5.0"
|
35
35
|
spec.add_development_dependency "guard-minitest", "~> 2.0"
|
36
36
|
spec.add_development_dependency "oj", "~> 3.0"
|
37
37
|
spec.add_development_dependency "rack-test", "~> 0.8.3"
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: prometheus_exporter
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version:
|
4
|
+
version: 2.0.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Sam Saffron
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2022-02-24 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: webrick
|
@@ -100,14 +100,14 @@ dependencies:
|
|
100
100
|
requirements:
|
101
101
|
- - "~>"
|
102
102
|
- !ruby/object:Gem::Version
|
103
|
-
version: 0.
|
103
|
+
version: 0.5.0
|
104
104
|
type: :development
|
105
105
|
prerelease: false
|
106
106
|
version_requirements: !ruby/object:Gem::Requirement
|
107
107
|
requirements:
|
108
108
|
- - "~>"
|
109
109
|
- !ruby/object:Gem::Version
|
110
|
-
version: 0.
|
110
|
+
version: 0.5.0
|
111
111
|
- !ruby/object:Gem::Dependency
|
112
112
|
name: guard-minitest
|
113
113
|
requirement: !ruby/object:Gem::Requirement
|
@@ -238,6 +238,7 @@ files:
|
|
238
238
|
- lib/prometheus_exporter/instrumentation/delayed_job.rb
|
239
239
|
- lib/prometheus_exporter/instrumentation/hutch.rb
|
240
240
|
- lib/prometheus_exporter/instrumentation/method_profiler.rb
|
241
|
+
- lib/prometheus_exporter/instrumentation/periodic_stats.rb
|
241
242
|
- lib/prometheus_exporter/instrumentation/process.rb
|
242
243
|
- lib/prometheus_exporter/instrumentation/puma.rb
|
243
244
|
- lib/prometheus_exporter/instrumentation/resque.rb
|
@@ -245,6 +246,7 @@ files:
|
|
245
246
|
- lib/prometheus_exporter/instrumentation/sidekiq.rb
|
246
247
|
- lib/prometheus_exporter/instrumentation/sidekiq_process.rb
|
247
248
|
- lib/prometheus_exporter/instrumentation/sidekiq_queue.rb
|
249
|
+
- lib/prometheus_exporter/instrumentation/sidekiq_stats.rb
|
248
250
|
- lib/prometheus_exporter/instrumentation/unicorn.rb
|
249
251
|
- lib/prometheus_exporter/metric.rb
|
250
252
|
- lib/prometheus_exporter/metric/base.rb
|
@@ -267,6 +269,7 @@ files:
|
|
267
269
|
- lib/prometheus_exporter/server/sidekiq_collector.rb
|
268
270
|
- lib/prometheus_exporter/server/sidekiq_process_collector.rb
|
269
271
|
- lib/prometheus_exporter/server/sidekiq_queue_collector.rb
|
272
|
+
- lib/prometheus_exporter/server/sidekiq_stats_collector.rb
|
270
273
|
- lib/prometheus_exporter/server/type_collector.rb
|
271
274
|
- lib/prometheus_exporter/server/unicorn_collector.rb
|
272
275
|
- lib/prometheus_exporter/server/web_collector.rb
|