prometheus_exporter 0.6.0 → 1.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (41) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/ci.yml +17 -4
  3. data/.gitignore +2 -0
  4. data/.rubocop.yml +5 -0
  5. data/Appraisals +10 -0
  6. data/CHANGELOG +33 -1
  7. data/README.md +115 -18
  8. data/bin/prometheus_exporter +17 -4
  9. data/gemfiles/.bundle/config +2 -0
  10. data/gemfiles/ar_60.gemfile +5 -0
  11. data/gemfiles/ar_61.gemfile +7 -0
  12. data/lib/prometheus_exporter/client.rb +16 -6
  13. data/lib/prometheus_exporter/instrumentation/active_record.rb +19 -12
  14. data/lib/prometheus_exporter/instrumentation/delayed_job.rb +3 -2
  15. data/lib/prometheus_exporter/instrumentation/method_profiler.rb +2 -1
  16. data/lib/prometheus_exporter/instrumentation/process.rb +1 -1
  17. data/lib/prometheus_exporter/instrumentation/puma.rb +28 -16
  18. data/lib/prometheus_exporter/instrumentation/resque.rb +40 -0
  19. data/lib/prometheus_exporter/instrumentation/sidekiq_process.rb +58 -0
  20. data/lib/prometheus_exporter/instrumentation/sidekiq_queue.rb +27 -13
  21. data/lib/prometheus_exporter/instrumentation/unicorn.rb +4 -4
  22. data/lib/prometheus_exporter/instrumentation.rb +2 -0
  23. data/lib/prometheus_exporter/metric/base.rb +9 -0
  24. data/lib/prometheus_exporter/metric/gauge.rb +4 -0
  25. data/lib/prometheus_exporter/middleware.rb +31 -19
  26. data/lib/prometheus_exporter/server/active_record_collector.rb +2 -1
  27. data/lib/prometheus_exporter/server/collector.rb +2 -0
  28. data/lib/prometheus_exporter/server/delayed_job_collector.rb +17 -17
  29. data/lib/prometheus_exporter/server/puma_collector.rb +16 -8
  30. data/lib/prometheus_exporter/server/resque_collector.rb +54 -0
  31. data/lib/prometheus_exporter/server/runner.rb +11 -2
  32. data/lib/prometheus_exporter/server/sidekiq_collector.rb +1 -1
  33. data/lib/prometheus_exporter/server/sidekiq_process_collector.rb +46 -0
  34. data/lib/prometheus_exporter/server/sidekiq_queue_collector.rb +1 -1
  35. data/lib/prometheus_exporter/server/unicorn_collector.rb +3 -3
  36. data/lib/prometheus_exporter/server/web_collector.rb +6 -9
  37. data/lib/prometheus_exporter/server/web_server.rb +6 -8
  38. data/lib/prometheus_exporter/server.rb +2 -0
  39. data/lib/prometheus_exporter/version.rb +1 -1
  40. data/prometheus_exporter.gemspec +7 -3
  41. metadata +62 -12
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f516c39448418a2216851149f0479f3f9e0f5feadae5a5468e7953fc0551d318
4
- data.tar.gz: dcc937b79e05d4cd74a64ab2135425c75c4bba1be053bec99e8db0695a3ac998
3
+ metadata.gz: 4b9fae4f661a2154e78754266e14a491a3d4271a7f2bf308586d1d5c16975452
4
+ data.tar.gz: 3c12e51359809b26963be8d148ebf0775f83ad588ca6b7f352b855e73f553598
5
5
  SHA512:
6
- metadata.gz: 5664f24c1c4a1520bafe789e5df8a9f1c40b118933cab9b73baa953f633ec6e81c3a11bbc9fc63097a2a2acdd2390daeade9449e6451cc83207fc2162575b359
7
- data.tar.gz: ca4aeedbc6e211818569257e8a3e9fde93be363a2da246abae45749bc9cc232052ac52f0850cd54d0f600904bed193a10db1f76e4fc9567b837b20a78264158b
6
+ metadata.gz: 866d3593b9d575451efec2a87dac5d0e4b03b9ed265b776c9ae3f65ce417e7cb04c8ee29769ba38d3fc5ad1abb5824c56c10f5ee7b8461d62629c95145f8c58d
7
+ data.tar.gz: 6dd42a1d78443807ca0b026617e55e23089e772620d0dfd88ccc3c2871a6096fb081ca0065060e943680158f7b78c88035bb8d64327f95e4756f3902add36fe3
@@ -12,7 +12,7 @@ jobs:
12
12
  name: Ruby ${{ matrix.ruby }}
13
13
  strategy:
14
14
  matrix:
15
- ruby: ["2.7", "2.6", "2.5", "2.4"]
15
+ ruby: ["3.0", "2.7", "2.6"]
16
16
  steps:
17
17
  - uses: actions/checkout@master
18
18
  with:
@@ -23,14 +23,27 @@ jobs:
23
23
  - uses: actions/cache@v2
24
24
  with:
25
25
  path: vendor/bundle
26
- key: ${{ runner.os }}-${{ matrix.ruby }}-gems-${{ hashFiles('**/Gemfile.lock') }}
26
+ key: ${{ runner.os }}-${{ matrix.ruby }}-gems-v2-${{ hashFiles('**/Gemfile.lock') }}
27
27
  restore-keys: |
28
- ${{ runner.os }}-${{ matrix.ruby }}-gems-
28
+ ${{ runner.os }}-${{ matrix.ruby }}-gems-v2-
29
+
30
+ - name: Downgrade rubygems
31
+ run: |
32
+ # for Ruby <= 2.6 , details https://github.com/rubygems/rubygems/issues/3284
33
+ gem update --system 3.0.8
34
+ if: ${{ matrix.ruby == '2.6' || matrix.ruby == '2.7' }}
35
+ - name: Upgrade rubygems
36
+ run: |
37
+ gem update --system
29
38
  - name: Setup gems
30
39
  run: |
40
+ gem install bundler
31
41
  bundle config path vendor/bundle
32
42
  bundle install --jobs 4
43
+ bundle exec appraisal install
33
44
  - name: Rubocop
34
45
  run: bundle exec rubocop
46
+ - name: install gems
47
+ run: bundle exec appraisal bundle
35
48
  - name: Run tests
36
- run: bundle exec rake
49
+ run: bundle exec appraisal rake
data/.gitignore CHANGED
@@ -7,5 +7,7 @@
7
7
  /spec/reports/
8
8
  /tmp/
9
9
  Gemfile.lock
10
+ /gemfiles/*.gemfile.lock
11
+
10
12
 
11
13
  .rubocop-https---raw-githubusercontent-com-discourse-discourse-master--rubocop-yml
data/.rubocop.yml CHANGED
@@ -1,2 +1,7 @@
1
1
  inherit_gem:
2
2
  rubocop-discourse: default.yml
3
+
4
+ AllCops:
5
+ Exclude:
6
+ - 'gemfiles/**/*'
7
+ - 'vendor/**/*'
data/Appraisals ADDED
@@ -0,0 +1,10 @@
1
+ # frozen_string_literal: true
2
+
3
+ appraise "ar-60" do
4
+ # we are using this version as default in gemspec
5
+ # gem "activerecord", "~> 6.0.0"
6
+ end
7
+
8
+ appraise "ar-61" do
9
+ gem "activerecord", "~> 6.1.1"
10
+ end
data/CHANGELOG CHANGED
@@ -1,4 +1,36 @@
1
- 0.6.0 - 10-11-2020
1
+ 1.0.0 - 23-11-2021
2
+
3
+ - BREAKING: rename metrics to match prometheus official naming conventions (See https://prometheus.io/docs/practices/naming/#metric-names)
4
+ - FEATURE: Sidekiq process metrics
5
+ - FEATURE: Allow collecting web metrics as histograms
6
+ - FIX: logger improved for web server
7
+ - FIX: Remove job labels from DelayedJob queues
8
+
9
+ 0.8.1 - 04-08-2021
10
+
11
+ - FEATURE: swap from hardcoded STDERR to logger pattern (see README for details)
12
+
13
+ 0.8.0 - 05-07-2021
14
+
15
+ - FIX: handle ThreadError more gracefully in cases where process shuts down
16
+ - FEATURE: add job_name and queue_name labels to delayed job metrics
17
+ - FEATURE: always scope puma metrics on hostname in collector
18
+ - FEATURE: add customizable labels option to puma collector
19
+ - FEATURE: support for Resque
20
+ - DEV: Remove support for EOL ruby 2.5
21
+ - FIX: Add source location to MethodProfiler patches
22
+ - FEATURE: Improve Active Record instrumentation
23
+ - FEATURE: Support HTTP_X_AMZN_TRACE_ID when supplied
24
+
25
+ 0.7.0 - 29-12-2020
26
+
27
+ - Dev: Removed support from EOL rubies, only 2.5, 2.6, 2.7 and 3.0 are supported now.
28
+ - Dev: Better support for Ruby 3.0, explicitly depending on webrick
29
+ - Dev: Rails 6.1 instrumentation support
30
+ - FEATURE: clean pattern for overriding middleware labels was introduced (in README)
31
+ - Fix: Better support for forking
32
+
33
+ 0.6.0 - 17-11-2020
2
34
 
3
35
  - FEATURE: add support for basic-auth in the prometheus_exporter web server
4
36
 
data/README.md CHANGED
@@ -5,6 +5,7 @@ Prometheus Exporter allows you to aggregate custom metrics from multiple process
5
5
  To learn more see [Instrumenting Rails with Prometheus](https://samsaffron.com/archive/2018/02/02/instrumenting-rails-with-prometheus) (it has pretty pictures!)
6
6
 
7
7
  * [Requirements](#requirements)
8
+ * [Migrating from v0.x](#migrating-from-v0.x)
8
9
  * [Installation](#installation)
9
10
  * [Usage](#usage)
10
11
  * [Single process mode](#single-process-mode)
@@ -19,21 +20,30 @@ To learn more see [Instrumenting Rails with Prometheus](https://samsaffron.com/a
19
20
  * [Hutch metrics](#hutch-message-processing-tracer)
20
21
  * [Puma metrics](#puma-metrics)
21
22
  * [Unicorn metrics](#unicorn-process-metrics)
23
+ * [Resque metrics](#resque-metrics)
22
24
  * [Custom type collectors](#custom-type-collectors)
23
25
  * [Multi process mode with custom collector](#multi-process-mode-with-custom-collector)
24
26
  * [GraphQL support](#graphql-support)
25
27
  * [Metrics default prefix / labels](#metrics-default-prefix--labels)
26
28
  * [Client default labels](#client-default-labels)
27
29
  * [Client default host](#client-default-host)
30
+ * [Histogram mode](#histogram-mode)
28
31
  * [Transport concerns](#transport-concerns)
29
32
  * [JSON generation and parsing](#json-generation-and-parsing)
33
+ * [Logging](#logging)
30
34
  * [Contributing](#contributing)
31
35
  * [License](#license)
32
36
  * [Code of Conduct](#code-of-conduct)
33
37
 
34
38
  ## Requirements
35
39
 
36
- Minimum Ruby of version 2.3.0 is required, Ruby 2.2.0 is EOL as of 2018-03-31
40
+ Minimum Ruby of version 2.6.0 is required, Ruby 2.5.0 is EOL as of March 31st 2021.
41
+
42
+ ## Migrating from v0.x
43
+
44
+ There are some major changes in v1.x from v0.x.
45
+
46
+ - Some of metrics are renamed to match [prometheus official guide for metric names](https://prometheus.io/docs/practices/naming/#metric-names). (#184)
37
47
 
38
48
  ## Installation
39
49
 
@@ -218,6 +228,32 @@ class MyMiddleware < PrometheusExporter::Middleware
218
228
  end
219
229
  ```
220
230
 
231
+ If you're not using Rails like framework, you can extend `PrometheusExporter::Middleware#default_labels` in a way to add more relevant labels.
232
+ For example you can mimic [prometheus-client](https://github.com/prometheus/client_ruby) labels with code like this:
233
+ ```ruby
234
+ class MyMiddleware < PrometheusExporter::Middleware
235
+ def default_labels(env, result)
236
+ status = (result && result[0]) || -1
237
+ path = [env["SCRIPT_NAME"], env["PATH_INFO"]].join
238
+ {
239
+ path: strip_ids_from_path(path),
240
+ method: env["REQUEST_METHOD"],
241
+ status: status
242
+ }
243
+ end
244
+
245
+ def strip_ids_from_path(path)
246
+ path
247
+ .gsub(%r{/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}(/|$)}, '/:uuid\\1')
248
+ .gsub(%r{/\d+(/|$)}, '/:id\\1')
249
+ end
250
+ end
251
+ ```
252
+ That way you won't have all metrics labeled with `controller=other` and `action=other`, but have labels such as
253
+ ```
254
+ ruby_http_duration_seconds{path="/api/v1/teams/:id",method="GET",status="200",quantile="0.99"} 0.009880661998977303
255
+ ```
256
+
221
257
  ¹) Only available when Redis is used.
222
258
  ²) Only available when Mysql or PostgreSQL are used.
223
259
  ³) Only available when [Instrumenting Request Queueing Time](#instrumenting-request-queueing-time) is set up.
@@ -354,6 +390,8 @@ Sidekiq.configure_server do |config|
354
390
  end
355
391
  ```
356
392
 
393
+ This will only monitor the queues that are consumed by the sidekiq process you are on. You can pass an `all_queues` parameter to monitor metrics on all queues.
394
+
357
395
  To monitor Sidekiq process info:
358
396
 
359
397
  ```ruby
@@ -361,6 +399,7 @@ Sidekiq.configure_server do |config|
361
399
  config.on :startup do
362
400
  require 'prometheus_exporter/instrumentation'
363
401
  PrometheusExporter::Instrumentation::Process.start type: 'sidekiq'
402
+ PrometheusExporter::Instrumentation::SidekiqProcess.start
364
403
  end
365
404
  end
366
405
  ```
@@ -397,11 +436,19 @@ This metric has a `job_name` label and a `queue` label.
397
436
  **PrometheusExporter::Instrumentation::SidekiqQueue**
398
437
  | Type | Name | Description |
399
438
  | --- | --- | --- |
400
- | Gauge | `sidekiq_queue_backlog_total` | Size of the sidekiq queue |
439
+ | Gauge | `sidekiq_queue_backlog` | Size of the sidekiq queue |
401
440
  | Gauge | `sidekiq_queue_latency_seconds` | Latency of the sidekiq queue |
402
441
 
403
442
  Both metrics will have a `queue` label with the name of the queue.
404
443
 
444
+ **PrometheusExporter::Instrumentation::SidekiqProcess**
445
+ | Type | Name | Description |
446
+ | --- | --- | --- |
447
+ | Gauge | `sidekiq_process_busy` | Number of busy workers for this process |
448
+ | Gauge | `sidekiq_process_concurrency` | Concurrency for this process |
449
+
450
+ Both metrics will include the labels `labels`, `queues`, `quiet`, `tag`, `hostname` and `identity`, as returned by the [Sidekiq API](https://github.com/mperham/sidekiq/wiki/API#processes).
451
+
405
452
  _See [Metrics collected by Process Instrumentation](#metrics-collected-by-process-instrumentation) for a list of metrics the Process instrumentation will produce._
406
453
 
407
454
  #### Shoryuken metrics
@@ -452,6 +499,8 @@ end
452
499
  | Summary | `delayed_job_duration_seconds_summary` | Summary of the time it takes jobs to execute | `status` |
453
500
  | Summary | `delayed_job_attempts_summary` | Summary of the amount of attempts it takes delayed jobs to succeed | - |
454
501
 
502
+ All metrics have labels for `job_name` and `queue_name`.
503
+
455
504
  #### Hutch Message Processing Tracer
456
505
 
457
506
  Capture [Hutch](https://github.com/gocardless/hutch) metrics (how many jobs ran? how many failed? how long did they take?)
@@ -479,7 +528,7 @@ Request Queueing is defined as the time it takes for a request to reach your app
479
528
 
480
529
  As this metric starts before `prometheus_exporter` can handle the request, you must add a specific HTTP header as early in your infrastructure as possible (we recommend your load balancer or reverse proxy).
481
530
 
482
- Configure your HTTP server / load balancer to add a header `X-Request-Start: t=<MSEC>` when passing the request upstream. For more information, please consult your software manual.
531
+ The Amazon Application Load Balancer [request tracing header](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-request-tracing.html) is natively supported. If you are using another upstream entrypoint, you may configure your HTTP server / load balancer to add a header `X-Request-Start: t=<MSEC>` when passing the request upstream. For more information, please consult your software manual.
483
532
 
484
533
  Hint: we aim to be API-compatible with the big APM solutions, so if you've got requests queueing time configured for them, it should be expected to also work with `prometheus_exporter`.
485
534
 
@@ -499,17 +548,39 @@ end
499
548
 
500
549
  #### Metrics collected by Puma Instrumentation
501
550
 
502
- | Type | Name | Description |
503
- | --- | --- | --- |
504
- | Gauge | `puma_workers_total` | Number of puma workers |
505
- | Gauge | `puma_booted_workers_total` | Number of puma workers booted |
506
- | Gauge | `puma_old_workers_total` | Number of old puma workers |
507
- | Gauge | `puma_running_threads_total` | Number of puma threads currently running |
508
- | Gauge | `puma_request_backlog_total` | Number of requests waiting to be processed by a puma thread |
509
- | Gauge | `puma_thread_pool_capacity_total` | Number of puma threads available at current scale |
510
- | Gauge | `puma_max_threads_total` | Number of puma threads at available at max scale |
551
+ | Type | Name | Description |
552
+ | --- | --- | --- |
553
+ | Gauge | `puma_workers` | Number of puma workers |
554
+ | Gauge | `puma_booted_workers` | Number of puma workers booted |
555
+ | Gauge | `puma_old_workers` | Number of old puma workers |
556
+ | Gauge | `puma_running_threads` | Number of puma threads currently running |
557
+ | Gauge | `puma_request_backlog` | Number of requests waiting to be processed by a puma thread |
558
+ | Gauge | `puma_thread_pool_capacity` | Number of puma threads available at current scale |
559
+ | Gauge | `puma_max_threads` | Number of puma threads at available at max scale |
560
+
561
+ All metrics may have a `phase` label and all custom labels provided with the `labels` option.
562
+
563
+ ### Resque metrics
511
564
 
512
- All metrics may have a `phase` label.
565
+ The resque metrics are using the `Resque.info` method, which queries Redis internally. To start monitoring your resque
566
+ installation, you'll need to start the instrumentation:
567
+
568
+ ```ruby
569
+ # e.g. config/initializers/resque.rb
570
+ require 'prometheus_exporter/instrumentation'
571
+ PrometheusExporter::Instrumentation::Resque.start
572
+ ```
573
+
574
+ #### Metrics collected by Resque Instrumentation
575
+
576
+ | Type | Name | Description |
577
+ | --- | --- | --- |
578
+ | Gauge | `resque_processed_jobs` | Total number of processed Resque jobs |
579
+ | Gauge | `resque_failed_jobs` | Total number of failed Resque jobs |
580
+ | Gauge | `resque_pending_jobs` | Total number of pending Resque jobs |
581
+ | Gauge | `resque_queues` | Total number of Resque queues |
582
+ | Gauge | `resque_workers` | Total number of Resque workers running |
583
+ | Gauge | `resque_working` | Total number of Resque workers working |
513
584
 
514
585
  ### Unicorn process metrics
515
586
 
@@ -528,11 +599,11 @@ Note: You must install the `raindrops` gem in your `Gemfile` or locally.
528
599
 
529
600
  #### Metrics collected by Unicorn Instrumentation
530
601
 
531
- | Type | Name | Description |
532
- | --- | --- | --- |
533
- | Gauge | `unicorn_workers_total` | Number of unicorn workers |
534
- | Gauge | `unicorn_active_workers_total` | Number of active unicorn workers |
535
- | Gauge | `unicorn_request_backlog_total` | Number of requests waiting to be processed by a unicorn worker |
602
+ | Type | Name | Description |
603
+ | --- | --- | --- |
604
+ | Gauge | `unicorn_workers` | Number of unicorn workers |
605
+ | Gauge | `unicorn_active_workers` | Number of active unicorn workers |
606
+ | Gauge | `unicorn_request_backlog` | Number of requests waiting to be processed by a unicorn worker |
536
607
 
537
608
  ### Custom type collectors
538
609
 
@@ -717,6 +788,7 @@ Usage: prometheus_exporter [options]
717
788
  -c, --collector FILE (optional) Custom collector to run
718
789
  -a, --type-collector FILE (optional) Custom type collectors to run in main collector
719
790
  -v, --verbose
791
+ -g, --histogram Use histogram instead of summary for aggregations
720
792
  --auth FILE (optional) enable basic authentication using a htpasswd FILE
721
793
  --realm REALM (optional) Use REALM for basic authentication (default: "Prometheus Exporter")
722
794
  --unicorn-listen-address ADDRESS
@@ -787,6 +859,18 @@ http_requests_total{service="app-server-01",app_name="app-01"} 1
787
859
 
788
860
  By default, `PrometheusExporter::Client.default` connects to `localhost:9394`. If your setup requires this (e.g. when using `docker-compose`), you can change the default host and port by setting the environment variables `PROMETHEUS_EXPORTER_HOST` and `PROMETHEUS_EXPORTER_PORT`.
789
861
 
862
+ ### Histogram mode
863
+
864
+ By default, the built-in collectors will report aggregations as summaries. If you need to aggregate metrics across labels, you can switch from summaries to histograms:
865
+
866
+ ```
867
+ $ prometheus_exporter --histogram
868
+ ```
869
+
870
+ In histogram mode, the same metrics will be collected but will be reported as histograms rather than summaries. This sacrifices some precision but allows aggregating metrics across actions and nodes using [`histogram_quantile`].
871
+
872
+ [`histogram_quantile`]: https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile
873
+
790
874
  ## Transport concerns
791
875
 
792
876
  Prometheus Exporter handles transport using a simple HTTP protocol. In multi process mode we avoid needing a large number of HTTP request by using chunked encoding to send metrics. This means that a single HTTP channel can deliver 100s or even 1000s of metrics over a single HTTP session to the `/send-metrics` endpoint. All calls to `send` and `send_json` on the `PrometheusExporter::Client` class are **non-blocking** and batched.
@@ -799,6 +883,19 @@ The `PrometheusExporter::Client` class has the method `#send-json`. This method,
799
883
 
800
884
  When `PrometheusExporter::Server::Collector` parses your JSON, by default it will use the faster Oj deserializer if available. This happens cause it only expects a simple Hash out of the box. You can opt in for the default JSON deserializer with `json_serializer: :json`.
801
885
 
886
+ ## Logging
887
+
888
+ `PrometheusExporter::Client.default` will export to `STDERR`. To change this, you can pass your own logger:
889
+ ```ruby
890
+ PrometheusExporter::Client.new(logger: Rails.logger)
891
+ PrometheusExporter::Client.new(logger: Logger.new(STDOUT))
892
+ ```
893
+
894
+ You can also pass a log level (default is [`Logger::WARN`](https://ruby-doc.org/stdlib-3.0.1/libdoc/logger/rdoc/Logger.html)):
895
+ ```ruby
896
+ PrometheusExporter::Client.new(log_level: Logger::DEBUG)
897
+ ```
898
+
802
899
  ## Contributing
803
900
 
804
901
  Bug reports and pull requests are welcome on GitHub at https://github.com/discourse/prometheus_exporter. This project is intended to be a safe, welcoming space for collaboration, and contributors are expected to adhere to the [Contributor Covenant](http://contributor-covenant.org) code of conduct.
@@ -3,12 +3,15 @@
3
3
 
4
4
  require 'optparse'
5
5
  require 'json'
6
+ require 'logger'
6
7
 
7
8
  require_relative "./../lib/prometheus_exporter"
8
9
  require_relative "./../lib/prometheus_exporter/server"
9
10
 
10
11
  def run
11
- options = {}
12
+ options = {
13
+ logger_path: STDERR
14
+ }
12
15
  custom_collector_filename = nil
13
16
  custom_type_collectors_filenames = []
14
17
 
@@ -47,6 +50,9 @@ def run
47
50
  opt.on('-v', '--verbose') do |o|
48
51
  options[:verbose] = true
49
52
  end
53
+ opt.on('-g', '--histogram', "Use histogram instead of summary for aggregations") do |o|
54
+ options[:histogram] = true
55
+ end
50
56
  opt.on('--auth FILE', String, "(optional) enable basic authentication using a htpasswd FILE") do |o|
51
57
  options[:auth] = o
52
58
  end
@@ -61,15 +67,22 @@ def run
61
67
  opt.on('--unicorn-master PID_FILE', String, '(optional) PID file of unicorn master process to monitor unicorn') do |o|
62
68
  options[:unicorn_pid_file] = o
63
69
  end
70
+
71
+ opt.on('--logger-path PATH', String, '(optional) Path to file for logger output. Defaults to STDERR') do |o|
72
+ options[:logger_path] = o
73
+ end
64
74
  end.parse!
65
75
 
76
+ logger = Logger.new(options[:logger_path])
77
+ logger.level = Logger::WARN
78
+
66
79
  if options.has_key?(:realm) && !options.has_key?(:auth)
67
- STDERR.puts "[Warn] Providing REALM without AUTH has no effect"
80
+ logger.warn "Providing REALM without AUTH has no effect"
68
81
  end
69
82
 
70
83
  if options.has_key?(:auth)
71
84
  unless File.exist?(options[:auth]) && File.readable?(options[:auth])
72
- STDERR.puts "[Error] The AUTH file either doesn't exist or we don't have access to it"
85
+ logger.fatal "The AUTH file either doesn't exist or we don't have access to it"
73
86
  exit 1
74
87
  end
75
88
  end
@@ -88,7 +101,7 @@ def run
88
101
  end
89
102
 
90
103
  if !found
91
- STDERR.puts "[Error] Can not find a class inheriting off PrometheusExporter::Server::CollectorBase"
104
+ logger.fatal "Can not find a class inheriting off PrometheusExporter::Server::CollectorBase"
92
105
  exit 1
93
106
  end
94
107
  end
@@ -0,0 +1,2 @@
1
+ ---
2
+ BUNDLE_RETRY: "1"
@@ -0,0 +1,5 @@
1
+ # This file was generated by Appraisal
2
+
3
+ source "https://rubygems.org"
4
+
5
+ gemspec path: "../"
@@ -0,0 +1,7 @@
1
+ # This file was generated by Appraisal
2
+
3
+ source "https://rubygems.org"
4
+
5
+ gem "activerecord", "~> 6.1.0"
6
+
7
+ gemspec path: "../"
@@ -2,6 +2,7 @@
2
2
 
3
3
  require 'socket'
4
4
  require 'thread'
5
+ require 'logger'
5
6
 
6
7
  module PrometheusExporter
7
8
  class Client
@@ -53,14 +54,20 @@ module PrometheusExporter
53
54
  MAX_SOCKET_AGE = 25
54
55
  MAX_QUEUE_SIZE = 10_000
55
56
 
57
+ attr_reader :logger
58
+
56
59
  def initialize(
57
60
  host: ENV.fetch('PROMETHEUS_EXPORTER_HOST', 'localhost'),
58
61
  port: ENV.fetch('PROMETHEUS_EXPORTER_PORT', PrometheusExporter::DEFAULT_PORT),
59
62
  max_queue_size: nil,
60
63
  thread_sleep: 0.5,
61
64
  json_serializer: nil,
62
- custom_labels: nil
65
+ custom_labels: nil,
66
+ logger: Logger.new(STDERR),
67
+ log_level: Logger::WARN
63
68
  )
69
+ @logger = logger
70
+ @logger.level = log_level
64
71
  @metrics = []
65
72
 
66
73
  @queue = Queue.new
@@ -72,7 +79,7 @@ module PrometheusExporter
72
79
  max_queue_size ||= MAX_QUEUE_SIZE
73
80
  max_queue_size = max_queue_size.to_i
74
81
 
75
- if max_queue_size.to_i <= 0
82
+ if max_queue_size <= 0
76
83
  raise ArgumentError, "max_queue_size must be larger than 0"
77
84
  end
78
85
 
@@ -125,7 +132,7 @@ module PrometheusExporter
125
132
  def send(str)
126
133
  @queue << str
127
134
  if @queue.length > @max_queue_size
128
- STDERR.puts "Prometheus Exporter client is dropping message cause queue is full"
135
+ logger.warn "Prometheus Exporter client is dropping message cause queue is full"
129
136
  @queue.pop
130
137
  end
131
138
 
@@ -143,7 +150,7 @@ module PrometheusExporter
143
150
  @socket.write(message)
144
151
  @socket.write("\r\n")
145
152
  rescue => e
146
- STDERR.puts "Prometheus Exporter is dropping a message: #{e}"
153
+ logger.warn "Prometheus Exporter is dropping a message: #{e}"
147
154
  @socket = nil
148
155
  raise
149
156
  end
@@ -168,7 +175,7 @@ module PrometheusExporter
168
175
  close_socket_if_old!
169
176
  process_queue
170
177
  rescue => e
171
- STDERR.puts "Prometheus Exporter, failed to send message #{e}"
178
+ logger.error "Prometheus Exporter, failed to send message #{e}"
172
179
  end
173
180
 
174
181
  def ensure_worker_thread!
@@ -184,11 +191,14 @@ module PrometheusExporter
184
191
  end
185
192
  end
186
193
  end
194
+ rescue ThreadError => e
195
+ raise unless e.message =~ /can't alloc thread/
196
+ logger.error "Prometheus Exporter, failed to send message ThreadError #{e}"
187
197
  end
188
198
 
189
199
  def close_socket!
190
200
  begin
191
- if @socket
201
+ if @socket && !@socket.closed?
192
202
  @socket.write("0\r\n")
193
203
  @socket.write("\r\n")
194
204
  @socket.flush
@@ -7,9 +7,11 @@ module PrometheusExporter::Instrumentation
7
7
 
8
8
  def self.start(client: nil, frequency: 30, custom_labels: {}, config_labels: [])
9
9
 
10
- # Not all rails versions support coonection pool stats
10
+ client ||= PrometheusExporter::Client.default
11
+
12
+ # Not all rails versions support connection pool stats
11
13
  unless ::ActiveRecord::Base.connection_pool.respond_to?(:stat)
12
- STDERR.puts("ActiveRecord connection pool stats not supported in your rails version")
14
+ client.logger.error("ActiveRecord connection pool stats not supported in your rails version")
13
15
  return
14
16
  end
15
17
 
@@ -18,8 +20,6 @@ module PrometheusExporter::Instrumentation
18
20
 
19
21
  active_record_collector = new(custom_labels, config_labels)
20
22
 
21
- client ||= PrometheusExporter::Client.default
22
-
23
23
  stop if @thread
24
24
 
25
25
  @thread = Thread.new do
@@ -28,7 +28,7 @@ module PrometheusExporter::Instrumentation
28
28
  metrics = active_record_collector.collect
29
29
  metrics.each { |metric| client.send_json metric }
30
30
  rescue => e
31
- STDERR.puts("Prometheus Exporter Failed To Collect Process Stats #{e}")
31
+ client.logger.error("Prometheus Exporter Failed To Collect Process Stats #{e}")
32
32
  ensure
33
33
  sleep frequency
34
34
  end
@@ -67,21 +67,28 @@ module PrometheusExporter::Instrumentation
67
67
  ObjectSpace.each_object(::ActiveRecord::ConnectionAdapters::ConnectionPool) do |pool|
68
68
  next if pool.connections.nil?
69
69
 
70
- labels_from_config = pool.spec.config
71
- .select { |k, v| @config_labels.include? k }
72
- .map { |k, v| [k.to_s.dup.prepend("dbconfig_"), v] }
73
-
74
- labels = @metric_labels.merge(pool_name: pool.spec.name).merge(Hash[labels_from_config])
75
-
76
70
  metric = {
77
71
  pid: pid,
78
72
  type: "active_record",
79
73
  hostname: ::PrometheusExporter.hostname,
80
- metric_labels: labels
74
+ metric_labels: labels(pool)
81
75
  }
82
76
  metric.merge!(pool.stat)
83
77
  metrics << metric
84
78
  end
85
79
  end
80
+
81
+ private
82
+
83
+ def labels(pool)
84
+ if ::ActiveRecord.version < Gem::Version.new("6.1.0.rc1")
85
+ @metric_labels.merge(pool_name: pool.spec.name).merge(pool.spec.config
86
+ .select { |k, v| @config_labels.include? k }
87
+ .map { |k, v| [k.to_s.dup.prepend("dbconfig_"), v] }.to_h)
88
+ else
89
+ @metric_labels.merge(pool_name: pool.db_config.name).merge(
90
+ @config_labels.each_with_object({}) { |l, acc| acc["dbconfig_#{l}"] = pool.db_config.public_send(l) })
91
+ end
92
+ end
86
93
  end
87
94
  end
@@ -13,8 +13,8 @@ module PrometheusExporter::Instrumentation
13
13
  callbacks do |lifecycle|
14
14
  lifecycle.around(:invoke_job) do |job, *args, &block|
15
15
  max_attempts = Delayed::Worker.max_attempts
16
- enqueued_count = Delayed::Job.count
17
- pending_count = Delayed::Job.where(attempts: 0, locked_at: nil).count
16
+ enqueued_count = Delayed::Job.where(queue: job.queue).count
17
+ pending_count = Delayed::Job.where(attempts: 0, locked_at: nil, queue: job.queue).count
18
18
  instrumenter.call(job, max_attempts, enqueued_count, pending_count, *args, &block)
19
19
  end
20
20
  end
@@ -41,6 +41,7 @@ module PrometheusExporter::Instrumentation
41
41
  @client.send_json(
42
42
  type: "delayed_job",
43
43
  name: job.handler.to_s.match(JOB_CLASS_REGEXP).to_a[1].to_s,
44
+ queue_name: job.queue,
44
45
  success: success,
45
46
  duration: duration,
46
47
  attempts: attempts,
@@ -5,6 +5,7 @@ module PrometheusExporter::Instrumentation; end
5
5
 
6
6
  class PrometheusExporter::Instrumentation::MethodProfiler
7
7
  def self.patch(klass, methods, name)
8
+ patch_source_line = __LINE__ + 3
8
9
  patches = methods.map do |method_name|
9
10
  <<~RUBY
10
11
  unless defined?(#{method_name}__mp_unpatched)
@@ -26,7 +27,7 @@ class PrometheusExporter::Instrumentation::MethodProfiler
26
27
  RUBY
27
28
  end.join("\n")
28
29
 
29
- klass.class_eval patches
30
+ klass.class_eval patches, __FILE__, patch_source_line
30
31
  end
31
32
 
32
33
  def self.transfer
@@ -27,7 +27,7 @@ module PrometheusExporter::Instrumentation
27
27
  metric = process_collector.collect
28
28
  client.send_json metric
29
29
  rescue => e
30
- STDERR.puts("Prometheus Exporter Failed To Collect Process Stats #{e}")
30
+ client.logger.error("Prometheus Exporter Failed To Collect Process Stats #{e}")
31
31
  ensure
32
32
  sleep frequency
33
33
  end