promenade 0.1.10 → 0.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 6d222c254996b462ac7314436b4cc4c59806a3beedb90d77e761f0f771604fc0
4
- data.tar.gz: 23177e7fe90c5b80c8425a1cd86bc2b85ac4a40756957a4a2788ccc97cfeecb3
3
+ metadata.gz: 336c06ecdc7505702a9adfbb8b826b3ea3bcfe386f1f8e907bc5532dfeff1d62
4
+ data.tar.gz: 3af09e7c9f83d39b638086d7d1e285bded62e92086412fdc575f47dac9554794
5
5
  SHA512:
6
- metadata.gz: 6588b702827ced64b962fd8ecd88febcf6daa74a9cf11c73ad9cbb939595899bc28d516bb3e5584d7b8f1681fc762e148b3d62f15bd8c3618b12b32e01dd7a39
7
- data.tar.gz: 839d7696d71185bb2ee883fff46aab962e7e0e4458fd13705091838a93c3e98fdf42df9783436729fd5354165f73045e039e7f81ad447c3241ceea96b664fbcd
6
+ metadata.gz: 23e3dbdbfebc0661786c8643fc8e1b85965268a8b7287ec385d9114275c7aff6548e457d539fa9efc87d19d146a654c5f5f3db045eaf78146be177ba62814a8a
7
+ data.tar.gz: 63e70c2788ff665529e4bc9dcd610a48962f7a9f7722c6558d224bd028850b8ff672e10a17081c8459c73371a98fea8ff43dc2b58f2660c1427076869775a45d
@@ -1,5 +1,6 @@
1
1
  sudo: false
2
2
  language: ruby
3
3
  rvm:
4
- - 2.5.1
5
- before_install: gem install bundler -v 1.16.2
4
+ - 2.5.5
5
+ - 2.6.3
6
+ before_install: gem install bundler -v 2.0.2
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- promenade (0.1.10)
4
+ promenade (0.2.0)
5
5
  activesupport
6
6
  prometheus-client-mmap (~> 0.9.3)
7
7
  rack
@@ -9,13 +9,14 @@ PATH
9
9
  GEM
10
10
  remote: https://rubygems.org/
11
11
  specs:
12
- activesupport (5.2.2)
12
+ activesupport (6.0.0)
13
13
  concurrent-ruby (~> 1.0, >= 1.0.2)
14
14
  i18n (>= 0.7, < 2)
15
15
  minitest (~> 5.1)
16
16
  tzinfo (~> 1.1)
17
+ zeitwerk (~> 2.1, >= 2.1.8)
17
18
  ast (2.4.0)
18
- backports (3.11.4)
19
+ backports (3.15.0)
19
20
  binding_of_caller (0.8.0)
20
21
  debug_inspector (>= 0.0.1)
21
22
  climate_control (0.2.0)
@@ -24,87 +25,90 @@ GEM
24
25
  simplecov
25
26
  url
26
27
  coderay (1.1.2)
27
- concurrent-ruby (1.1.3)
28
+ concurrent-ruby (1.1.5)
28
29
  debug_inspector (0.0.3)
29
- deep-cover (0.7.0)
30
- bundler
31
- deep-cover-core (= 0.7.0)
30
+ deep-cover (0.7.5)
31
+ deep-cover-core (= 0.7.5)
32
32
  highline
33
33
  thor (>= 0.20.3)
34
34
  with_progress
35
- deep-cover-core (0.7.0)
35
+ deep-cover-core (0.7.5)
36
36
  backports (>= 3.11.0)
37
37
  binding_of_caller
38
- parser (~> 2.5.0)
38
+ parser (>= 2.5, < 2.7)
39
39
  pry
40
40
  term-ansicolor
41
41
  terminal-table
42
42
  diff-lcs (1.3)
43
- docile (1.3.1)
44
- highline (2.0.0)
45
- i18n (1.1.1)
43
+ docile (1.3.2)
44
+ highline (2.0.2)
45
+ i18n (1.6.0)
46
46
  concurrent-ruby (~> 1.0)
47
- jaro_winkler (1.5.1)
48
- json (2.1.0)
47
+ jaro_winkler (1.5.3)
48
+ json (2.2.0)
49
49
  method_source (0.9.2)
50
50
  minitest (5.11.3)
51
- parallel (1.12.1)
52
- parser (2.5.3.0)
51
+ parallel (1.17.0)
52
+ parser (2.6.3.0)
53
53
  ast (~> 2.4.0)
54
- powerpack (0.1.2)
55
- prometheus-client-mmap (0.9.5)
54
+ prometheus-client-mmap (0.9.9)
56
55
  pry (0.12.2)
57
56
  coderay (~> 1.1.0)
58
57
  method_source (~> 0.9.0)
59
- rack (2.0.6)
58
+ rack (2.0.7)
60
59
  rainbow (3.0.0)
61
60
  rake (10.5.0)
62
61
  rspec (3.8.0)
63
62
  rspec-core (~> 3.8.0)
64
63
  rspec-expectations (~> 3.8.0)
65
64
  rspec-mocks (~> 3.8.0)
66
- rspec-core (3.8.0)
65
+ rspec-core (3.8.2)
67
66
  rspec-support (~> 3.8.0)
68
- rspec-expectations (3.8.2)
67
+ rspec-expectations (3.8.4)
69
68
  diff-lcs (>= 1.2.0, < 2.0)
70
69
  rspec-support (~> 3.8.0)
71
- rspec-mocks (3.8.0)
70
+ rspec-mocks (3.8.1)
72
71
  diff-lcs (>= 1.2.0, < 2.0)
73
72
  rspec-support (~> 3.8.0)
74
- rspec-support (3.8.0)
75
- rubocop (0.61.1)
73
+ rspec-support (3.8.2)
74
+ rubocop (0.74.0)
76
75
  jaro_winkler (~> 1.5.1)
77
76
  parallel (~> 1.10)
78
- parser (>= 2.5, != 2.5.1.1)
79
- powerpack (~> 0.1)
77
+ parser (>= 2.6)
80
78
  rainbow (>= 2.2.2, < 4.0)
81
79
  ruby-progressbar (~> 1.7)
82
- unicode-display_width (~> 1.4.0)
83
- ruby-progressbar (1.10.0)
84
- simplecov (0.16.1)
80
+ unicode-display_width (>= 1.4.0, < 1.7)
81
+ rubocop-performance (1.4.1)
82
+ rubocop (>= 0.71.0)
83
+ rubocop-rails (2.3.0)
84
+ rack (>= 1.1)
85
+ rubocop (>= 0.72.0)
86
+ ruby-progressbar (1.10.1)
87
+ simplecov (0.17.0)
85
88
  docile (~> 1.1)
86
89
  json (>= 1.8, < 3)
87
90
  simplecov-html (~> 0.10.0)
88
91
  simplecov-html (0.10.2)
89
- term-ansicolor (1.7.0)
92
+ term-ansicolor (1.7.1)
90
93
  tins (~> 1.0)
91
94
  terminal-table (1.8.0)
92
95
  unicode-display_width (~> 1.1, >= 1.1.1)
93
96
  thor (0.20.3)
94
97
  thread_safe (0.3.6)
95
- tins (1.20.2)
98
+ tins (1.21.1)
96
99
  tzinfo (1.2.5)
97
100
  thread_safe (~> 0.1)
98
- unicode-display_width (1.4.0)
101
+ unicode-display_width (1.6.0)
99
102
  url (0.3.2)
100
103
  with_progress (1.0.1)
101
104
  ruby-progressbar (~> 1.4)
105
+ zeitwerk (2.1.10)
102
106
 
103
107
  PLATFORMS
104
108
  ruby
105
109
 
106
110
  DEPENDENCIES
107
- bundler (~> 1.16)
111
+ bundler (~> 2.0)
108
112
  climate_control
109
113
  codecov
110
114
  deep-cover
@@ -112,7 +116,9 @@ DEPENDENCIES
112
116
  rake (~> 10.0)
113
117
  rspec (~> 3.0)
114
118
  rubocop
119
+ rubocop-performance
120
+ rubocop-rails
115
121
  simplecov
116
122
 
117
123
  BUNDLED WITH
118
- 1.16.6
124
+ 2.0.2
data/README.md CHANGED
@@ -4,13 +4,13 @@
4
4
  [![Gem Version](https://badge.fury.io/rb/promenade.svg)](https://badge.fury.io/rb/promenade)
5
5
  [![codecov](https://codecov.io/gh/errm/promenade/branch/master/graph/badge.svg)](https://codecov.io/gh/errm/promenade)
6
6
 
7
- Promenade is a libary to simplify instrumenting Ruby applications with prometheus.
7
+ Promenade is a library to simplify instrumenting Ruby applications with Prometheus.
8
8
 
9
9
  It is currently under development.
10
10
 
11
11
  ## Usage
12
12
 
13
- Add promenade to your Gemfile:
13
+ Add promenade to your Gemfle:
14
14
 
15
15
  ```
16
16
  gem "promenade"
@@ -20,7 +20,7 @@ gem "promenade"
20
20
 
21
21
  Promenade includes some built in instrumentation that can be used by requiring it (for example in an initializer).
22
22
 
23
- Currently there is just support for [ruby-kafka](https://github.com/zendesk/ruby-kafka), but I plan to support other things soon.
23
+ Currently there is support for [ruby-kafka](https://github.com/zendesk/ruby-kafka), but I plan to support other things soon.
24
24
 
25
25
  ```
26
26
  # Instrument the ruby-kafka libary
@@ -29,35 +29,31 @@ require "promenade/kafka"
29
29
 
30
30
  ### Instrumentation DSL
31
31
 
32
- Promenade makes recording prometheus metrics from your own code a little simpler with a DSL of sorts.
32
+ Promenade makes recording Prometheus metrics from your own code a little simpler with a DSL of sorts.
33
33
 
34
- `Promenade::Helper` includes some class macros for defining your own metrics, and a metric method you can use to record metrics.
34
+ `Promenade` includes some methods for defining your own metrics, and a metric method you can use to record your metrics.
35
35
 
36
36
  #### Counter
37
37
 
38
38
  A counter is a metric that exposes a sum or tally of things.
39
39
 
40
40
  ```ruby
41
- require "promenade/helper"
42
-
43
41
  class WidgetService
44
- include ::Promenade::Helper
45
-
46
- counter :widgets_created do
42
+ Promenade.counter :widgets_created do
47
43
  doc "Records how many widgets are created"
48
44
  end
49
45
 
50
46
  def create
51
47
  # Widget creation code :)
52
- metric(:widgets_created).increment
48
+ Promenade.metric(:widgets_created).increment
53
49
 
54
50
  # You can also add extra labels as you set increment counters
55
- metric(:widgets_created).increment({ type: "guinness" })
51
+ Promenade.metric(:widgets_created).increment({ type: "guinness" })
56
52
  end
57
53
 
58
54
  def batch_create
59
55
  You can increment by more than 1 at a time if you need
60
- metric(:widgets_created).increment({ type: "guinness" }, 100)
56
+ Promenade.metric(:widgets_created).increment({ type: "guinness" }, 100)
61
57
  end
62
58
  end
63
59
  ```
@@ -67,19 +63,15 @@ end
67
63
  A gauge is a metric that exposes an instantaneous value or some snapshot of a changing value.
68
64
 
69
65
  ```ruby
70
- require "promenade/helper"
71
-
72
66
  class Thermometer
73
- include ::Promenade::Helper
74
-
75
- gauge :room_temperature_celsius do
67
+ Promenade.gauge :room_temperature_celsius do
76
68
  doc "Records room temprature"
77
69
  end
78
70
 
79
71
  def take_mesurements
80
- metric(:room_temperature_celsius).set({ room: "lounge" }, 22.3)
81
- metric(:room_temperature_celsius).set({ room: "kitchen" }, 25.45)
82
- metric(:room_temperature_celsius).set({ room: "broom_cupboard" }, 15.37)
72
+ Promenade.metric(:room_temperature_celsius).set({ room: "lounge" }, 22.3)
73
+ Promenade.metric(:room_temperature_celsius).set({ room: "kitchen" }, 25.45)
74
+ Promenade.metric(:room_temperature_celsius).set({ room: "broom_cupboard" }, 15.37)
83
75
  end
84
76
  end
85
77
  ```
@@ -91,12 +83,8 @@ response sizes) and counts them in configurable buckets. It also provides a sum
91
83
  of all observed values.
92
84
 
93
85
  ```ruby
94
- require "promenade/helper"
95
-
96
86
  class Calculator
97
- include ::Promenade::Helper
98
-
99
- histogram :calculator_time_taken do
87
+ Promenade.histogram :calculator_time_taken do
100
88
  doc "Records how long it takes to do the adding"
101
89
  # promenade also has some bucket presets like :network and :memory for common usecases
102
90
  buckets [0.25, 0.5, 1, 2, 4]
@@ -107,7 +95,7 @@ class Calculator
107
95
  # Some time consuming addition
108
96
  end
109
97
 
110
- metric(:calculator_time_taken).observe({ operation: "addition"}, timing)
98
+ Promenade.metric(:calculator_time_taken).observe({ operation: "addition"}, timing)
111
99
  end
112
100
  end
113
101
  ```
@@ -117,12 +105,8 @@ end
117
105
  Summary is similar to a histogram, but for when you just care about percentile values. Often useful for timings.
118
106
 
119
107
  ```ruby
120
- require "promenade/helper"
121
-
122
108
  class ApiClient
123
- include ::Promenade::Helper
124
-
125
- summary :api_client_http_timing do
109
+ Promenade.summary :api_client_http_timing do
126
110
  doc "record how long requests to the api are taking"
127
111
  end
128
112
 
@@ -131,7 +115,7 @@ class ApiClient
131
115
  # Makes a network call
132
116
  end
133
117
 
134
- metric(:api_client_http_timing).observe({ method: "get", path: "/api/v1/users" }, timing)
118
+ Promenade.metric(:api_client_http_timing).observe({ method: "get", path: "/api/v1/users" }, timing)
135
119
  end
136
120
  end
137
121
  ```
@@ -142,9 +126,9 @@ Because promenade is based on prometheus-client you can add the `Prometheus::Cli
142
126
 
143
127
  There is also a stand alone exporter that can be run with the `promenade` command.
144
128
 
145
- This is ideal if you are worried about acidently exposing your metrics, are concerned about the performance impact prometheus scrapes might have on your application, or for applications without a webserver (like background processing jobs). It does mean that you have another process to manage on your server though 🤷.
129
+ This is ideal if you are worried about accidentally exposing your metrics, are concerned about the performance impact prometheus scrapes might have on your application, or for applications without a web server (like background processing jobs). It does mean that you have another process to manage on your server though 🤷.
146
130
 
147
- The exporter runs by default on port `9394` and the metrics are avaible at the standard path of `/metrics`, the standalone exporter is congfigured to use gzip.
131
+ The exporter runs by default on port `9394` and the metrics are available at the standard path of `/metrics`, the stand-alone exporter is configured to use gzip.
148
132
 
149
133
  ### Configuration
150
134
 
@@ -156,7 +140,7 @@ In a typical development environment there should be nothing for you to do. Prom
156
140
 
157
141
  In a production environment you should try to store the state files on tmpfs for performance, you can configure the path that promenade will write to by setting the `PROMETHEUS_MULTIPROC_DIR` environment variable.
158
142
 
159
- If you are running the standalone exporter, you may also set the `PORT` environment variable to bind to a port other than the default (`9394`).
143
+ If you are running the stand-alone exporter, you may also set the `PORT` environment variable to bind to a port other than the default (`9394`).
160
144
 
161
145
  ## Development
162
146
 
data/Rakefile CHANGED
@@ -12,3 +12,5 @@ task :clean do
12
12
  end
13
13
 
14
14
  task spec: :clean
15
+
16
+ task release: :spec
@@ -9,7 +9,12 @@ Promenade.setup
9
9
  app = Rack::Builder.app do
10
10
  use Rack::Deflater
11
11
  use ::Prometheus::Client::Rack::Exporter
12
+ map "/health" do
13
+ run lambda { |_env| [200, { "Content-Type" => "text/plain" }, ["OK"]] }
14
+ end
12
15
  run ->(_env) { [404, {}, ["visit /metrics for metrics"]] }
13
16
  end
14
17
 
15
- Rack::Handler::WEBrick.run app, Port: ENV.fetch("PORT", "9394")
18
+ Signal.trap("TERM") { Rack::Handler::WEBrick.shutdown }
19
+
20
+ Rack::Handler::WEBrick.run app, Port: ENV.fetch("PORT", "9394"), Host: "0.0.0.0"
@@ -1,3 +1,18 @@
1
1
  require "promenade/version"
2
2
  require "promenade/setup"
3
3
  require "promenade/railtie" if defined? ::Rails::Railtie
4
+ require "promenade/prometheus"
5
+
6
+ module Promenade
7
+ class << self
8
+ %i(gauge counter summary histogram).each do |type|
9
+ define_method type do |*args, &block|
10
+ Promenade::Prometheus.define_metric(type, *args, &block)
11
+ end
12
+ end
13
+
14
+ def metric(name)
15
+ Promenade::Prometheus.metric(name)
16
+ end
17
+ end
18
+ end
@@ -5,23 +5,23 @@ module Promenade
5
5
  class AsyncProducerSubscriber < Subscriber
6
6
  attach_to "async_producer.kafka"
7
7
 
8
- gauge :kafka_async_producer_queue_size do
8
+ Promenade.gauge :kafka_async_producer_queue_size do
9
9
  doc "Size of Kafka async producer queue"
10
10
  end
11
11
 
12
- gauge :kafka_async_producer_max_queue_size do
12
+ Promenade.gauge :kafka_async_producer_max_queue_size do
13
13
  doc "Max size of Kafka async producer queue"
14
14
  end
15
15
 
16
- gauge :kafka_async_producer_queue_fill_ratio do
16
+ Promenade.gauge :kafka_async_producer_queue_fill_ratio do
17
17
  doc "Size of Kafka async producer queue"
18
18
  end
19
19
 
20
- counter :kafka_async_producer_buffer_overflows do
20
+ Promenade.counter :kafka_async_producer_buffer_overflows do
21
21
  doc "Count of buffer overflows"
22
22
  end
23
23
 
24
- counter :kafka_async_producer_dropped_messages do
24
+ Promenade.counter :kafka_async_producer_dropped_messages do
25
25
  doc "Count of dropped messages"
26
26
  end
27
27
 
@@ -29,22 +29,22 @@ module Promenade
29
29
  labels = get_labels(event)
30
30
  queue_size = event.payload.fetch(:queue_size)
31
31
  max_queue_size = event.payload.fetch(:max_queue_size)
32
- queue_fill_ratio = queue_size.to_f / max_queue_size.to_f
32
+ queue_fill_ratio = queue_size.to_f / max_queue_size
33
33
 
34
- metric(:kafka_async_producer_queue_size).set(labels, queue_size)
35
- metric(:kafka_async_producer_max_queue_size).set(labels, max_queue_size)
36
- metric(:kafka_async_producer_queue_fill_ratio).set(labels, queue_fill_ratio)
34
+ Promenade.metric(:kafka_async_producer_queue_size).set(labels, queue_size)
35
+ Promenade.metric(:kafka_async_producer_max_queue_size).set(labels, max_queue_size)
36
+ Promenade.metric(:kafka_async_producer_queue_fill_ratio).set(labels, queue_fill_ratio)
37
37
  end
38
38
 
39
39
  def buffer_overflow(event)
40
- metric(:kafka_async_producer_buffer_overflows).increment(get_labels(event))
40
+ Promenade.metric(:kafka_async_producer_buffer_overflows).increment(get_labels(event))
41
41
  end
42
42
 
43
43
  def drop_messages(event)
44
44
  client = event.payload.fetch(:client_id)
45
45
  message_count = event.payload.fetch(:message_count)
46
46
 
47
- metric(:kafka_async_producer_dropped_messages).increment({ client: client }, message_count)
47
+ Promenade.metric(:kafka_async_producer_dropped_messages).increment({ client: client }, message_count)
48
48
  end
49
49
  end
50
50
  end
@@ -5,24 +5,24 @@ module Promenade
5
5
  class ConnectionSubscriber < Subscriber
6
6
  attach_to "connection.kafka"
7
7
 
8
- histogram :kafka_connection_latency do
8
+ Promenade.histogram :kafka_connection_latency do
9
9
  doc "Request latency"
10
10
  buckets :network
11
11
  end
12
12
 
13
- counter :kafka_connection_calls do
13
+ Promenade.counter :kafka_connection_calls do
14
14
  doc "Count of calls made to Kafka broker"
15
15
  end
16
16
 
17
- summary :kafka_connection_request_size do
17
+ Promenade.summary :kafka_connection_request_size do
18
18
  doc "Average size of requests made to kafka"
19
19
  end
20
20
 
21
- summary :kafka_connection_response_size do
21
+ Promenade.summary :kafka_connection_response_size do
22
22
  doc "Average size of responses made by kafka"
23
23
  end
24
24
 
25
- counter :kafka_connection_errors do
25
+ Promenade.counter :kafka_connection_errors do
26
26
  doc "Count of Kafka connection errors"
27
27
  end
28
28
 
@@ -33,13 +33,13 @@ module Promenade
33
33
  broker: event.payload.fetch(:broker_host),
34
34
  }
35
35
 
36
- metric(:kafka_connection_calls).increment(labels)
37
- metric(:kafka_connection_latency).observe(labels, event.duration)
36
+ Promenade.metric(:kafka_connection_calls).increment(labels)
37
+ Promenade.metric(:kafka_connection_latency).observe(labels, event.duration)
38
38
 
39
- metric(:kafka_connection_request_size).observe(labels, event.payload.fetch(:request_size, 0))
40
- metric(:kafka_connection_response_size).observe(labels, event.payload.fetch(:response_size, 0))
39
+ Promenade.metric(:kafka_connection_request_size).observe(labels, event.payload.fetch(:request_size, 0))
40
+ Promenade.metric(:kafka_connection_response_size).observe(labels, event.payload.fetch(:response_size, 0))
41
41
 
42
- metric(:kafka_connection_errors).increment if event.payload.key?(:exception)
42
+ Promenade.metric(:kafka_connection_errors).increment(labels) if event.payload.key?(:exception)
43
43
  end
44
44
  end
45
45
  end
@@ -5,64 +5,68 @@ module Promenade
5
5
  class ConsumerSubscriber < Subscriber
6
6
  attach_to "consumer.kafka"
7
7
 
8
- gauge :kafka_consumer_time_lag do
8
+ Promenade.gauge :kafka_consumer_time_lag do
9
9
  doc "Lag between message create and consume time"
10
10
  end
11
11
 
12
- gauge :kafka_consumer_ofset_lag do
12
+ Promenade.gauge :kafka_consumer_ofset_lag do
13
13
  doc "Lag between message create and consume time"
14
14
  end
15
15
 
16
- histogram :kafka_consumer_message_processing_latency do
16
+ Promenade.histogram :kafka_consumer_message_processing_latency do
17
17
  doc "Consumer message processing latency"
18
18
  buckets :network
19
19
  end
20
20
 
21
- counter :kafka_consumer_messages_processed do
21
+ Promenade.counter :kafka_consumer_messages_processed do
22
22
  doc "Messages processed by this consumer"
23
23
  end
24
24
 
25
- counter :kafka_consumer_message_processing_errors do
25
+ Promenade.counter :kafka_consumer_messages_fetched do
26
+ doc "Messages fetched by this consumer"
27
+ end
28
+
29
+ Promenade.counter :kafka_consumer_message_processing_errors do
26
30
  doc "Consumer errors while processing a message"
27
31
  end
28
32
 
29
- histogram :kafka_consumer_batch_processing_latency do
33
+ Promenade.histogram :kafka_consumer_batch_processing_latency do
30
34
  doc "Consumer batch processing latency"
31
35
  buckets :network
32
36
  end
33
37
 
34
- counter :kafka_consumer_batch_processing_errors do
38
+ Promenade.counter :kafka_consumer_batch_processing_errors do
35
39
  doc "Consumer errors while processing a batch"
36
40
  end
37
41
 
38
- histogram :kafka_consumer_join_group do
42
+ Promenade.histogram :kafka_consumer_join_group do
39
43
  doc "Time taken to join"
40
44
  buckets :network
41
45
  end
42
46
 
43
- counter :kafka_consumer_join_group_errors do
47
+ Promenade.counter :kafka_consumer_join_group_errors do
44
48
  doc "Errors joining the group"
45
49
  end
46
50
 
47
- histogram :kafka_consumer_sync_group do
51
+ Promenade.histogram :kafka_consumer_sync_group do
48
52
  doc "Time taken to sync"
49
53
  buckets :network
50
54
  end
51
55
 
52
- counter :kafka_consumer_sync_group_errors do
56
+ Promenade.counter :kafka_consumer_sync_group_errors do
53
57
  doc "Errors syncing the group"
54
58
  end
55
59
 
56
- histogram :kafka_consumer_leave_group do
60
+ Promenade.histogram :kafka_consumer_leave_group do
57
61
  doc "Time taken to leave group"
58
62
  buckets :network
59
63
  end
60
64
 
61
- counter :kafka_consumer_leave_group_errors do
65
+ Promenade.counter :kafka_consumer_leave_group_errors do
62
66
  doc "Errors leaving the group"
63
67
  end
64
68
 
65
- histogram :kafka_consumer_pause_duration do
69
+ Promenade.histogram :kafka_consumer_pause_duration do
66
70
  doc "Time taken to leave group"
67
71
  buckets :network
68
72
  end
@@ -74,53 +78,62 @@ module Promenade
74
78
  time_lag = create_time && ((Time.now.utc - create_time) * 1000).to_i
75
79
 
76
80
  if event.payload.key?(:exception)
77
- metric(:kafka_consumer_message_processing_errors).increment(labels)
81
+ Promenade.metric(:kafka_consumer_message_processing_errors).increment(labels)
78
82
  else
79
- metric(:kafka_consumer_messages_processed).increment(labels)
80
- metric(:kafka_consumer_message_processing_latency).observe(labels, event.duration)
83
+ Promenade.metric(:kafka_consumer_messages_processed).increment(labels)
84
+ Promenade.metric(:kafka_consumer_message_processing_latency).observe(labels, event.duration)
81
85
  end
82
86
 
83
- metric(:kafka_consumer_ofset_lag).set(labels, offset_lag)
87
+ Promenade.metric(:kafka_consumer_ofset_lag).set(labels, offset_lag)
84
88
 
85
89
  # Not all messages have timestamps.
86
- metric(:kafka_consumer_time_lag).set(labels, time_lag) if time_lag
90
+ Promenade.metric(:kafka_consumer_time_lag).set(labels, time_lag) if time_lag
87
91
  end
88
92
 
89
93
  def process_batch(event) # rubocop:disable Metrics/AbcSize
90
94
  labels = get_labels(event)
91
- lag = event.payload.fetch(:offset_lag)
95
+ offset_lag = event.payload.fetch(:offset_lag)
92
96
  messages = event.payload.fetch(:message_count)
93
97
 
94
98
  if event.payload.key?(:exception)
95
- metric(:kafka_consumer_batch_processing_errors).increment(labels)
99
+ Promenade.metric(:kafka_consumer_batch_processing_errors).increment(labels)
96
100
  else
97
- metric(:kafka_consumer_messages_processed).increment(labels, messages)
98
- metric(:kafka_consumer_batch_processing_latency).observe(labels, event.duration)
101
+ Promenade.metric(:kafka_consumer_messages_processed).increment(labels, messages)
102
+ Promenade.metric(:kafka_consumer_batch_processing_latency).observe(labels, event.duration)
99
103
  end
100
104
 
101
- metric(:kafka_consumer_ofset_lag).set(labels, lag)
105
+ Promenade.metric(:kafka_consumer_ofset_lag).set(labels, offset_lag)
106
+ end
107
+
108
+ def fetch_batch(event)
109
+ labels = get_labels(event)
110
+ offset_lag = event.payload.fetch(:offset_lag)
111
+ messages = event.payload.fetch(:message_count)
112
+
113
+ Promenade.metric(:kafka_consumer_messages_fetched).increment(labels, messages)
114
+ Promenade.metric(:kafka_consumer_ofset_lag).set(labels, offset_lag)
102
115
  end
103
116
 
104
117
  def join_group(event)
105
118
  labels = group_labels(event)
106
- metric(:kafka_consumer_join_group).observe(labels, event.duration)
107
- metric(:kafka_consumer_join_group_errors).increment(labels) if event.payload.key?(:exception)
119
+ Promenade.metric(:kafka_consumer_join_group).observe(labels, event.duration)
120
+ Promenade.metric(:kafka_consumer_join_group_errors).increment(labels) if event.payload.key?(:exception)
108
121
  end
109
122
 
110
123
  def sync_group(event)
111
124
  labels = group_labels(event)
112
- metric(:kafka_consumer_sync_group).observe(labels, event.duration)
113
- metric(:kafka_consumer_sync_group_errors).increment(labels) if event.payload.key?(:exception)
125
+ Promenade.metric(:kafka_consumer_sync_group).observe(labels, event.duration)
126
+ Promenade.metric(:kafka_consumer_sync_group_errors).increment(labels) if event.payload.key?(:exception)
114
127
  end
115
128
 
116
129
  def leave_group(event)
117
130
  labels = group_labels(event)
118
- metric(:kafka_consumer_leave_group).observe(labels, event.duration)
119
- metric(:kafka_consumer_leave_group_errors).increment(labels) if event.payload.key?(:exception)
131
+ Promenade.metric(:kafka_consumer_leave_group).observe(labels, event.duration)
132
+ Promenade.metric(:kafka_consumer_leave_group_errors).increment(labels) if event.payload.key?(:exception)
120
133
  end
121
134
 
122
135
  def pause_status(event)
123
- metric(:kafka_consumer_pause_duration).observe(get_labels(event), event.payload.fetch(:duration))
136
+ Promenade.metric(:kafka_consumer_pause_duration).observe(get_labels(event), event.payload.fetch(:duration))
124
137
  end
125
138
 
126
139
  private
@@ -5,7 +5,7 @@ module Promenade
5
5
  class FetcherSubscriber < Subscriber
6
6
  attach_to "fetcher.kafka"
7
7
 
8
- gauge :kafka_fetcher_queue_size do
8
+ Promenade.gauge :kafka_fetcher_queue_size do
9
9
  doc "Fetcher queue size"
10
10
  end
11
11
 
@@ -14,7 +14,7 @@ module Promenade
14
14
  client = event.payload.fetch(:client_id)
15
15
  group_id = event.payload.fetch(:group_id)
16
16
 
17
- metric(:kafka_fetcher_queue_size).set({ client: client, group: group_id }, queue_size)
17
+ Promenade.metric(:kafka_fetcher_queue_size).set({ client: client, group: group_id }, queue_size)
18
18
  end
19
19
  end
20
20
  end
@@ -5,59 +5,59 @@ module Promenade
5
5
  class ProducerSubscriber < Subscriber
6
6
  attach_to "producer.kafka"
7
7
 
8
- counter :kafka_producer_messages do
8
+ Promenade.counter :kafka_producer_messages do
9
9
  doc "Number of messages written to Kafka producer"
10
10
  end
11
11
 
12
- histogram :kafka_producer_message_size do
12
+ Promenade.histogram :kafka_producer_message_size do
13
13
  doc "Historgram of message sizes written to Kafka producer"
14
14
  buckets :memory
15
15
  end
16
16
 
17
- gauge :kafka_producer_buffer_size do
17
+ Promenade.gauge :kafka_producer_buffer_size do
18
18
  doc "The current size of the Kafka producer buffer, in messages"
19
19
  end
20
20
 
21
- gauge :kafka_producer_max_buffer_size do
21
+ Promenade.gauge :kafka_producer_max_buffer_size do
22
22
  doc "The max size of the Kafka producer buffer"
23
23
  end
24
24
 
25
- gauge :kafka_producer_buffer_fill_ratio do
25
+ Promenade.gauge :kafka_producer_buffer_fill_ratio do
26
26
  doc "The current ratio of Kafka producer buffer in use"
27
27
  end
28
28
 
29
- counter :kafka_producer_buffer_overflows do
29
+ Promenade.counter :kafka_producer_buffer_overflows do
30
30
  doc "A count of kafka producer buffer overflow errors"
31
31
  end
32
32
 
33
- counter :kafka_producer_delivery_errors do
33
+ Promenade.counter :kafka_producer_delivery_errors do
34
34
  doc "A count of kafka producer delivery errors"
35
35
  end
36
36
 
37
- histogram :kafka_producer_delivery_latency do
37
+ Promenade.histogram :kafka_producer_delivery_latency do
38
38
  doc "Kafka producer delivery latency histogram"
39
39
  buckets :network
40
40
  end
41
41
 
42
- counter :kafka_producer_delivered_messages do
42
+ Promenade.counter :kafka_producer_delivered_messages do
43
43
  doc "A count of the total messages delivered to Kafka"
44
44
  end
45
45
 
46
- histogram :kafka_producer_delivery_attempts do
46
+ Promenade.histogram :kafka_producer_delivery_attempts do
47
47
  doc "A count of the total message deliveries attempted"
48
48
  buckets [0, 6, 12, 18, 24, 30]
49
49
  end
50
50
 
51
- counter :kafka_producer_ack_messages do
51
+ Promenade.counter :kafka_producer_ack_messages do
52
52
  doc "Count of the number of messages Acked by Kafka"
53
53
  end
54
54
 
55
- histogram :kafka_producer_ack_latency do
55
+ Promenade.histogram :kafka_producer_ack_latency do
56
56
  doc "Delay between message being produced and Acked"
57
57
  buckets :network
58
58
  end
59
59
 
60
- counter :kafka_producer_ack_errors do
60
+ Promenade.counter :kafka_producer_ack_errors do
61
61
  doc "Count of the number of Kafka Ack errors"
62
62
  end
63
63
 
@@ -66,17 +66,17 @@ module Promenade
66
66
  message_size = event.payload.fetch(:message_size)
67
67
  buffer_size = event.payload.fetch(:buffer_size)
68
68
  max_buffer_size = event.payload.fetch(:max_buffer_size)
69
- buffer_fill_ratio = buffer_size.to_f / max_buffer_size.to_f
69
+ buffer_fill_ratio = buffer_size.to_f / max_buffer_size
70
70
 
71
- metric(:kafka_producer_messages).increment(labels)
72
- metric(:kafka_producer_message_size).observe(labels, message_size)
73
- metric(:kafka_producer_buffer_size).set(labels.slice(:client), buffer_size)
74
- metric(:kafka_producer_max_buffer_size).set(labels.slice(:client), max_buffer_size)
75
- metric(:kafka_producer_buffer_fill_ratio).set(labels.slice(:client), buffer_fill_ratio)
71
+ Promenade.metric(:kafka_producer_messages).increment(labels)
72
+ Promenade.metric(:kafka_producer_message_size).observe(labels, message_size)
73
+ Promenade.metric(:kafka_producer_buffer_size).set(labels.slice(:client), buffer_size)
74
+ Promenade.metric(:kafka_producer_max_buffer_size).set(labels.slice(:client), max_buffer_size)
75
+ Promenade.metric(:kafka_producer_buffer_fill_ratio).set(labels.slice(:client), buffer_fill_ratio)
76
76
  end
77
77
 
78
78
  def buffer_overflow(event)
79
- metric(:kafka_producer_buffer_overflows).increment(get_labels(event))
79
+ Promenade.metric(:kafka_producer_buffer_overflows).increment(get_labels(event))
80
80
  end
81
81
 
82
82
  def deliver_messages(event) # rubocop:disable Metrics/AbcSize
@@ -84,25 +84,25 @@ module Promenade
84
84
  message_count = event.payload.fetch(:delivered_message_count)
85
85
  attempts = event.payload.fetch(:attempts)
86
86
 
87
- metric(:kafka_producer_delivery_errors).increment(labels) if event.payload.key?(:exception)
88
- metric(:kafka_producer_delivery_latency).observe(labels, event.duration)
89
- metric(:kafka_producer_delivered_messages).increment(labels, message_count)
90
- metric(:kafka_producer_delivery_attempts).observe(labels, attempts)
87
+ Promenade.metric(:kafka_producer_delivery_errors).increment(labels) if event.payload.key?(:exception)
88
+ Promenade.metric(:kafka_producer_delivery_latency).observe(labels, event.duration)
89
+ Promenade.metric(:kafka_producer_delivered_messages).increment(labels, message_count)
90
+ Promenade.metric(:kafka_producer_delivery_attempts).observe(labels, attempts)
91
91
  end
92
92
 
93
93
  def ack_message(event)
94
94
  labels = get_labels(event)
95
95
  delay = event.payload.fetch(:delay)
96
96
 
97
- metric(:kafka_producer_ack_messages).increment(labels)
98
- metric(:kafka_producer_ack_latency).observe(labels, delay)
97
+ Promenade.metric(:kafka_producer_ack_messages).increment(labels)
98
+ Promenade.metric(:kafka_producer_ack_latency).observe(labels, delay)
99
99
  end
100
100
 
101
101
  def topic_error(event)
102
102
  client = event.payload.fetch(:client_id)
103
103
  topic = event.payload.fetch(:topic)
104
104
 
105
- metric(:kafka_producer_ack_errors).increment(client: client, topic: topic)
105
+ Promenade.metric(:kafka_producer_ack_errors).increment(client: client, topic: topic)
106
106
  end
107
107
 
108
108
  private
@@ -1,11 +1,9 @@
1
- require "promenade/helper"
2
1
  require "active_support/subscriber"
2
+ require "concurrent/utility/monotonic_time"
3
3
 
4
4
  module Promenade
5
5
  module Kafka
6
6
  class Subscriber < ActiveSupport::Subscriber
7
- include ::Promenade::Helper
8
-
9
7
  private
10
8
 
11
9
  def get_labels(event)
@@ -1,3 +1,3 @@
1
1
  module Promenade
2
- VERSION = "0.1.10".freeze
2
+ VERSION = "0.2.0".freeze
3
3
  end
@@ -26,11 +26,13 @@ Gem::Specification.new do |spec|
26
26
  spec.add_dependency "prometheus-client-mmap", "~> 0.9.3"
27
27
  spec.add_dependency "rack"
28
28
 
29
- spec.add_development_dependency "bundler", "~> 1.16"
29
+ spec.add_development_dependency "bundler", "~> 2.0"
30
30
  spec.add_development_dependency "climate_control"
31
31
  spec.add_development_dependency "deep-cover"
32
32
  spec.add_development_dependency "rake", "~> 10.0"
33
33
  spec.add_development_dependency "rspec", "~> 3.0"
34
34
  spec.add_development_dependency "rubocop"
35
+ spec.add_development_dependency "rubocop-performance"
36
+ spec.add_development_dependency "rubocop-rails"
35
37
  spec.add_development_dependency "simplecov"
36
38
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: promenade
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.10
4
+ version: 0.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Ed Robinson
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2018-12-10 00:00:00.000000000 Z
11
+ date: 2019-09-06 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: activesupport
@@ -58,14 +58,14 @@ dependencies:
58
58
  requirements:
59
59
  - - "~>"
60
60
  - !ruby/object:Gem::Version
61
- version: '1.16'
61
+ version: '2.0'
62
62
  type: :development
63
63
  prerelease: false
64
64
  version_requirements: !ruby/object:Gem::Requirement
65
65
  requirements:
66
66
  - - "~>"
67
67
  - !ruby/object:Gem::Version
68
- version: '1.16'
68
+ version: '2.0'
69
69
  - !ruby/object:Gem::Dependency
70
70
  name: climate_control
71
71
  requirement: !ruby/object:Gem::Requirement
@@ -136,6 +136,34 @@ dependencies:
136
136
  - - ">="
137
137
  - !ruby/object:Gem::Version
138
138
  version: '0'
139
+ - !ruby/object:Gem::Dependency
140
+ name: rubocop-performance
141
+ requirement: !ruby/object:Gem::Requirement
142
+ requirements:
143
+ - - ">="
144
+ - !ruby/object:Gem::Version
145
+ version: '0'
146
+ type: :development
147
+ prerelease: false
148
+ version_requirements: !ruby/object:Gem::Requirement
149
+ requirements:
150
+ - - ">="
151
+ - !ruby/object:Gem::Version
152
+ version: '0'
153
+ - !ruby/object:Gem::Dependency
154
+ name: rubocop-rails
155
+ requirement: !ruby/object:Gem::Requirement
156
+ requirements:
157
+ - - ">="
158
+ - !ruby/object:Gem::Version
159
+ version: '0'
160
+ type: :development
161
+ prerelease: false
162
+ version_requirements: !ruby/object:Gem::Requirement
163
+ requirements:
164
+ - - ">="
165
+ - !ruby/object:Gem::Version
166
+ version: '0'
139
167
  - !ruby/object:Gem::Dependency
140
168
  name: simplecov
141
169
  requirement: !ruby/object:Gem::Requirement
@@ -173,7 +201,6 @@ files:
173
201
  - bin/setup
174
202
  - exe/promenade
175
203
  - lib/promenade.rb
176
- - lib/promenade/helper.rb
177
204
  - lib/promenade/kafka.rb
178
205
  - lib/promenade/kafka/async_producer_subscriber.rb
179
206
  - lib/promenade/kafka/connection_subscriber.rb
@@ -205,8 +232,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
205
232
  - !ruby/object:Gem::Version
206
233
  version: '0'
207
234
  requirements: []
208
- rubyforge_project:
209
- rubygems_version: 2.7.6
235
+ rubygems_version: 3.0.1
210
236
  signing_key:
211
237
  specification_version: 4
212
238
  summary: Promenade makes it simple to instrument Ruby apps for prometheus scraping
@@ -1,23 +0,0 @@
1
- require "promenade/prometheus"
2
-
3
- module Promenade
4
- module Helper
5
- extend ActiveSupport::Concern
6
-
7
- class_methods do
8
- %i(gauge counter summary histogram).each do |type|
9
- define_method type do |*args, &block|
10
- Promenade::Prometheus.define_metric(type, *args, &block)
11
- end
12
- end
13
-
14
- def metric(name)
15
- Promenade::Prometheus.metric(name)
16
- end
17
- end
18
-
19
- def metric(name)
20
- Promenade::Prometheus.metric(name)
21
- end
22
- end
23
- end