karafka 2.0.33 → 2.0.34

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 511cc39ce92e82706fcd6ac9c54b3416c7908cf5e9773ee15b972219046778ca
4
- data.tar.gz: 737059dc4f9de33577f4883590f607da1ee4ee330ff9f5c18bd42dfe261c567d
3
+ metadata.gz: 36d890d825aaeaee5349dcc653d888da3a023c01a837864544a905db977569c4
4
+ data.tar.gz: be442485812a05a030bab33da31a8e2fda684add8c4d59a0af78f517bb2519bd
5
5
  SHA512:
6
- metadata.gz: d91e5de5bddd7b9ff2e423c986eda7f8e795528dc5ffb7a3694c82a2a096f8b3ee31679ac221bb0da4cd6efc7efa55ab195f081fbd2cf393df7d21549d950cd9
7
- data.tar.gz: dbea8026eb3385b6ee14a749b3bc0187f4e91e9e66b60f2e8ac74fcdd409ad238b8e6f7dd5e5d5fbb5f0cc36299795ed63b794f1dd4a2a2b53e26776a7523876
6
+ metadata.gz: d92be137485c436c1ed02435669785422e6e4da194ab19b97dca31f70b530fe7e0ae4e6b0c7c54895dbceca2d8fe4ef1bdcabdb287affe06e377942062777979
7
+ data.tar.gz: 0525b652373088a7a692134a6a4c89487e495e01934504d256a60ae7805c92f65a308d38c8d75ad806f39f5128d2d00865e10b1b4ae326c7dd60e7594c68558e
checksums.yaml.gz.sig CHANGED
@@ -1 +1 @@
1
- ����m? {a`�M�a���u2t0�v�:# e�m�Ͳ9��nǘa6N͝����x����sB��^�Eq35*dF��1��GOzS�9���WTt_f��ޓ2"0���q@�[lbd
1
+ Ԫ����D�����jO����dl0m��I���t�uNv�b��2z_����viGzW:dA|��A����՛<u��iR�8LW��[m�ڳ�3y΄Py{#�)�����j�z.�::�z`���ו�,b�ֵ�:w��H-���/���Ӷ2�Bs��r6c
data/CHANGELOG.md CHANGED
@@ -1,5 +1,13 @@
1
1
  # Karafka framework changelog
2
2
 
3
+ ## 2.0.34 (2023-03-04)
4
+ - [Improvement] Attach an `embedded` tag to Karafka processes started using the embedded API.
5
+ - [Change] Renamed `Datadog::Listener` to `Datadog::MetricsListener` for consistency. (#1124)
6
+
7
+ ### Upgrade notes
8
+
9
+ 1. Replace `Datadog::Listener` references to `Datadog::MetricsListener`.
10
+
3
11
  ## 2.0.33 (2023-02-24)
4
12
  - **[Feature]** Support `perform_all_later` in ActiveJob adapter for Rails `7.1+`
5
13
  - **[Feature]** Introduce ability to assign and re-assign tags in consumer instances. This can be used for extra instrumentation that is context aware.
@@ -99,7 +107,7 @@ class KarafkaApp < Karafka::App
99
107
  - [Improvement] Expand `LoggerListener` with `client.resume` notification.
100
108
  - [Improvement] Replace random anonymous subscription groups ids with stable once.
101
109
  - [Improvement] Add `consumer.consume`, `consumer.revoke` and `consumer.shutting_down` notification events and move the revocation logic calling to strategies.
102
- - [Change] Rename job queue statistics `processing` key to `busy`. No changes needed because naming in the DataDog listener stays the same.
110
+ - [Change] Rename job queue statistics `processing` key to `busy`. No changes needed because naming in the DataDog listener stays the same.
103
111
  - [Fix] Fix proctitle listener state changes reporting on new states.
104
112
  - [Fix] Make sure all files descriptors are closed in the integration specs.
105
113
  - [Fix] Fix a case where empty subscription groups could leak into the execution flow.
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- karafka (2.0.33)
4
+ karafka (2.0.34)
5
5
  karafka-core (>= 2.0.12, < 3.0.0)
6
6
  thor (>= 0.20)
7
7
  waterdrop (>= 2.4.10, < 3.0.0)
@@ -19,7 +19,7 @@ GEM
19
19
  minitest (>= 5.1)
20
20
  tzinfo (~> 2.0)
21
21
  byebug (11.1.3)
22
- concurrent-ruby (1.2.0)
22
+ concurrent-ruby (1.2.2)
23
23
  diff-lcs (1.5.0)
24
24
  docile (1.4.0)
25
25
  factory_bot (6.2.1)
@@ -61,8 +61,8 @@ GEM
61
61
  thor (1.2.1)
62
62
  tzinfo (2.0.6)
63
63
  concurrent-ruby (~> 1.0)
64
- waterdrop (2.4.10)
65
- karafka-core (>= 2.0.9, < 3.0.0)
64
+ waterdrop (2.4.11)
65
+ karafka-core (>= 2.0.12, < 3.0.0)
66
66
  zeitwerk (~> 2.3)
67
67
  zeitwerk (2.6.7)
68
68
 
@@ -7,7 +7,10 @@ module Karafka
7
7
  # Starts Karafka without supervision and without ownership of signals in a background thread
8
8
  # so it won't interrupt other things running
9
9
  def start
10
- Thread.new { Karafka::Server.start }
10
+ Thread.new do
11
+ Karafka::Process.tags.add(:execution_mode, 'embedded')
12
+ Karafka::Server.start
13
+ end
11
14
  end
12
15
 
13
16
  # Stops Karafka upon any event
@@ -1,258 +1,15 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require_relative 'metrics_listener'
4
+
3
5
  module Karafka
4
6
  module Instrumentation
5
7
  # Namespace for vendor specific instrumentation
6
8
  module Vendors
7
9
  # Datadog specific instrumentation
8
10
  module Datadog
9
- # Listener that can be used to subscribe to Karafka to receive stats via StatsD
10
- # and/or Datadog
11
- #
12
- # @note You need to setup the `dogstatsd-ruby` client and assign it
13
- class Listener
14
- include ::Karafka::Core::Configurable
15
- extend Forwardable
16
-
17
- def_delegators :config, :client, :rd_kafka_metrics, :namespace, :default_tags
18
-
19
- # Value object for storing a single rdkafka metric publishing details
20
- RdKafkaMetric = Struct.new(:type, :scope, :name, :key_location)
21
-
22
- # Namespace under which the DD metrics should be published
23
- setting :namespace, default: 'karafka'
24
-
25
- # Datadog client that we should use to publish the metrics
26
- setting :client
27
-
28
- # Default tags we want to publish (for example hostname)
29
- # Format as followed (example for hostname): `["host:#{Socket.gethostname}"]`
30
- setting :default_tags, default: []
31
-
32
- # All the rdkafka metrics we want to publish
33
- #
34
- # By default we publish quite a lot so this can be tuned
35
- # Note, that the once with `_d` come from Karafka, not rdkafka or Kafka
36
- setting :rd_kafka_metrics, default: [
37
- # Client metrics
38
- RdKafkaMetric.new(:count, :root, 'messages.consumed', 'rxmsgs_d'),
39
- RdKafkaMetric.new(:count, :root, 'messages.consumed.bytes', 'rxmsg_bytes'),
40
-
41
- # Broker metrics
42
- RdKafkaMetric.new(:count, :brokers, 'consume.attempts', 'txretries_d'),
43
- RdKafkaMetric.new(:count, :brokers, 'consume.errors', 'txerrs_d'),
44
- RdKafkaMetric.new(:count, :brokers, 'receive.errors', 'rxerrs_d'),
45
- RdKafkaMetric.new(:count, :brokers, 'connection.connects', 'connects_d'),
46
- RdKafkaMetric.new(:count, :brokers, 'connection.disconnects', 'disconnects_d'),
47
- RdKafkaMetric.new(:gauge, :brokers, 'network.latency.avg', %w[rtt avg]),
48
- RdKafkaMetric.new(:gauge, :brokers, 'network.latency.p95', %w[rtt p95]),
49
- RdKafkaMetric.new(:gauge, :brokers, 'network.latency.p99', %w[rtt p99]),
50
-
51
- # Topics metrics
52
- RdKafkaMetric.new(:gauge, :topics, 'consumer.lags', 'consumer_lag_stored'),
53
- RdKafkaMetric.new(:gauge, :topics, 'consumer.lags_delta', 'consumer_lag_stored_d')
54
- ].freeze
55
-
56
- configure
57
-
58
- # @param block [Proc] configuration block
59
- def initialize(&block)
60
- configure
61
- setup(&block) if block
62
- end
63
-
64
- # @param block [Proc] configuration block
65
- # @note We define this alias to be consistent with `WaterDrop#setup`
66
- def setup(&block)
67
- configure(&block)
68
- end
69
-
70
- # Hooks up to WaterDrop instrumentation for emitted statistics
71
- #
72
- # @param event [Karafka::Core::Monitoring::Event]
73
- def on_statistics_emitted(event)
74
- statistics = event[:statistics]
75
- consumer_group_id = event[:consumer_group_id]
76
-
77
- base_tags = default_tags + ["consumer_group:#{consumer_group_id}"]
78
-
79
- rd_kafka_metrics.each do |metric|
80
- report_metric(metric, statistics, base_tags)
81
- end
82
- end
83
-
84
- # Increases the errors count by 1
85
- #
86
- # @param event [Karafka::Core::Monitoring::Event]
87
- def on_error_occurred(event)
88
- extra_tags = ["type:#{event[:type]}"]
89
-
90
- if event.payload[:caller].respond_to?(:messages)
91
- extra_tags += consumer_tags(event.payload[:caller])
92
- end
93
-
94
- count('error_occurred', 1, tags: default_tags + extra_tags)
95
- end
96
-
97
- # Reports how many messages we've polled and how much time did we spend on it
98
- #
99
- # @param event [Karafka::Core::Monitoring::Event]
100
- def on_connection_listener_fetch_loop_received(event)
101
- time_taken = event[:time]
102
- messages_count = event[:messages_buffer].size
103
-
104
- consumer_group_id = event[:subscription_group].consumer_group_id
105
-
106
- extra_tags = ["consumer_group:#{consumer_group_id}"]
107
-
108
- histogram('listener.polling.time_taken', time_taken, tags: default_tags + extra_tags)
109
- histogram('listener.polling.messages', messages_count, tags: default_tags + extra_tags)
110
- end
111
-
112
- # Here we report majority of things related to processing as we have access to the
113
- # consumer
114
- # @param event [Karafka::Core::Monitoring::Event]
115
- def on_consumer_consumed(event)
116
- consumer = event.payload[:caller]
117
- messages = consumer.messages
118
- metadata = messages.metadata
119
-
120
- tags = default_tags + consumer_tags(consumer)
121
-
122
- count('consumer.messages', messages.count, tags: tags)
123
- count('consumer.batches', 1, tags: tags)
124
- gauge('consumer.offset', metadata.last_offset, tags: tags)
125
- histogram('consumer.consumed.time_taken', event[:time], tags: tags)
126
- histogram('consumer.batch_size', messages.count, tags: tags)
127
- histogram('consumer.processing_lag', metadata.processing_lag, tags: tags)
128
- histogram('consumer.consumption_lag', metadata.consumption_lag, tags: tags)
129
- end
130
-
131
- # @param event [Karafka::Core::Monitoring::Event]
132
- def on_consumer_revoked(event)
133
- tags = default_tags + consumer_tags(event.payload[:caller])
134
-
135
- count('consumer.revoked', 1, tags: tags)
136
- end
137
-
138
- # @param event [Karafka::Core::Monitoring::Event]
139
- def on_consumer_shutdown(event)
140
- tags = default_tags + consumer_tags(event.payload[:caller])
141
-
142
- count('consumer.shutdown', 1, tags: tags)
143
- end
144
-
145
- # Worker related metrics
146
- # @param event [Karafka::Core::Monitoring::Event]
147
- def on_worker_process(event)
148
- jq_stats = event[:jobs_queue].statistics
149
-
150
- gauge('worker.total_threads', Karafka::App.config.concurrency, tags: default_tags)
151
- histogram('worker.processing', jq_stats[:busy], tags: default_tags)
152
- histogram('worker.enqueued_jobs', jq_stats[:enqueued], tags: default_tags)
153
- end
154
-
155
- # We report this metric before and after processing for higher accuracy
156
- # Without this, the utilization would not be fully reflected
157
- # @param event [Karafka::Core::Monitoring::Event]
158
- def on_worker_processed(event)
159
- jq_stats = event[:jobs_queue].statistics
160
-
161
- histogram('worker.processing', jq_stats[:busy], tags: default_tags)
162
- end
163
-
164
- private
165
-
166
- %i[
167
- count
168
- gauge
169
- histogram
170
- increment
171
- decrement
172
- ].each do |metric_type|
173
- class_eval <<~METHODS, __FILE__, __LINE__ + 1
174
- def #{metric_type}(key, *args)
175
- client.#{metric_type}(
176
- namespaced_metric(key),
177
- *args
178
- )
179
- end
180
- METHODS
181
- end
182
-
183
- # Wraps metric name in listener's namespace
184
- # @param metric_name [String] RdKafkaMetric name
185
- # @return [String]
186
- def namespaced_metric(metric_name)
187
- "#{namespace}.#{metric_name}"
188
- end
189
-
190
- # Reports a given metric statistics to Datadog
191
- # @param metric [RdKafkaMetric] metric value object
192
- # @param statistics [Hash] hash with all the statistics emitted
193
- # @param base_tags [Array<String>] base tags we want to start with
194
- def report_metric(metric, statistics, base_tags)
195
- case metric.scope
196
- when :root
197
- public_send(
198
- metric.type,
199
- metric.name,
200
- statistics.fetch(*metric.key_location),
201
- tags: base_tags
202
- )
203
- when :brokers
204
- statistics.fetch('brokers').each_value do |broker_statistics|
205
- # Skip bootstrap nodes
206
- # Bootstrap nodes have nodeid -1, other nodes have positive
207
- # node ids
208
- next if broker_statistics['nodeid'] == -1
209
-
210
- public_send(
211
- metric.type,
212
- metric.name,
213
- broker_statistics.dig(*metric.key_location),
214
- tags: base_tags + ["broker:#{broker_statistics['nodename']}"]
215
- )
216
- end
217
- when :topics
218
- statistics.fetch('topics').each do |topic_name, topic_values|
219
- topic_values['partitions'].each do |partition_name, partition_statistics|
220
- next if partition_name == '-1'
221
- # Skip until lag info is available
222
- next if partition_statistics['consumer_lag'] == -1
223
-
224
- public_send(
225
- metric.type,
226
- metric.name,
227
- partition_statistics.dig(*metric.key_location),
228
- tags: base_tags + [
229
- "topic:#{topic_name}",
230
- "partition:#{partition_name}"
231
- ]
232
- )
233
- end
234
- end
235
- else
236
- raise ArgumentError, metric.scope
237
- end
238
- end
239
-
240
- # Builds basic per consumer tags for publication
241
- #
242
- # @param consumer [Karafka::BaseConsumer]
243
- # @return [Array<String>]
244
- def consumer_tags(consumer)
245
- messages = consumer.messages
246
- metadata = messages.metadata
247
- consumer_group_id = consumer.topic.consumer_group.id
248
-
249
- [
250
- "topic:#{metadata.topic}",
251
- "partition:#{metadata.partition}",
252
- "consumer_group:#{consumer_group_id}"
253
- ]
254
- end
255
- end
11
+ # Alias to keep backwards compatibility
12
+ Listener = MetricsListener
256
13
  end
257
14
  end
258
15
  end
@@ -0,0 +1,259 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Karafka
4
+ module Instrumentation
5
+ # Namespace for vendor specific instrumentation
6
+ module Vendors
7
+ # Datadog specific instrumentation
8
+ module Datadog
9
+ # Listener that can be used to subscribe to Karafka to receive stats via StatsD
10
+ # and/or Datadog
11
+ #
12
+ # @note You need to setup the `dogstatsd-ruby` client and assign it
13
+ class MetricsListener
14
+ include ::Karafka::Core::Configurable
15
+ extend Forwardable
16
+
17
+ def_delegators :config, :client, :rd_kafka_metrics, :namespace, :default_tags
18
+
19
+ # Value object for storing a single rdkafka metric publishing details
20
+ RdKafkaMetric = Struct.new(:type, :scope, :name, :key_location)
21
+
22
+ # Namespace under which the DD metrics should be published
23
+ setting :namespace, default: 'karafka'
24
+
25
+ # Datadog client that we should use to publish the metrics
26
+ setting :client
27
+
28
+ # Default tags we want to publish (for example hostname)
29
+ # Format as followed (example for hostname): `["host:#{Socket.gethostname}"]`
30
+ setting :default_tags, default: []
31
+
32
+ # All the rdkafka metrics we want to publish
33
+ #
34
+ # By default we publish quite a lot so this can be tuned
35
+ # Note, that the once with `_d` come from Karafka, not rdkafka or Kafka
36
+ setting :rd_kafka_metrics, default: [
37
+ # Client metrics
38
+ RdKafkaMetric.new(:count, :root, 'messages.consumed', 'rxmsgs_d'),
39
+ RdKafkaMetric.new(:count, :root, 'messages.consumed.bytes', 'rxmsg_bytes'),
40
+
41
+ # Broker metrics
42
+ RdKafkaMetric.new(:count, :brokers, 'consume.attempts', 'txretries_d'),
43
+ RdKafkaMetric.new(:count, :brokers, 'consume.errors', 'txerrs_d'),
44
+ RdKafkaMetric.new(:count, :brokers, 'receive.errors', 'rxerrs_d'),
45
+ RdKafkaMetric.new(:count, :brokers, 'connection.connects', 'connects_d'),
46
+ RdKafkaMetric.new(:count, :brokers, 'connection.disconnects', 'disconnects_d'),
47
+ RdKafkaMetric.new(:gauge, :brokers, 'network.latency.avg', %w[rtt avg]),
48
+ RdKafkaMetric.new(:gauge, :brokers, 'network.latency.p95', %w[rtt p95]),
49
+ RdKafkaMetric.new(:gauge, :brokers, 'network.latency.p99', %w[rtt p99]),
50
+
51
+ # Topics metrics
52
+ RdKafkaMetric.new(:gauge, :topics, 'consumer.lags', 'consumer_lag_stored'),
53
+ RdKafkaMetric.new(:gauge, :topics, 'consumer.lags_delta', 'consumer_lag_stored_d')
54
+ ].freeze
55
+
56
+ configure
57
+
58
+ # @param block [Proc] configuration block
59
+ def initialize(&block)
60
+ configure
61
+ setup(&block) if block
62
+ end
63
+
64
+ # @param block [Proc] configuration block
65
+ # @note We define this alias to be consistent with `WaterDrop#setup`
66
+ def setup(&block)
67
+ configure(&block)
68
+ end
69
+
70
+ # Hooks up to WaterDrop instrumentation for emitted statistics
71
+ #
72
+ # @param event [Karafka::Core::Monitoring::Event]
73
+ def on_statistics_emitted(event)
74
+ statistics = event[:statistics]
75
+ consumer_group_id = event[:consumer_group_id]
76
+
77
+ base_tags = default_tags + ["consumer_group:#{consumer_group_id}"]
78
+
79
+ rd_kafka_metrics.each do |metric|
80
+ report_metric(metric, statistics, base_tags)
81
+ end
82
+ end
83
+
84
+ # Increases the errors count by 1
85
+ #
86
+ # @param event [Karafka::Core::Monitoring::Event]
87
+ def on_error_occurred(event)
88
+ extra_tags = ["type:#{event[:type]}"]
89
+
90
+ if event.payload[:caller].respond_to?(:messages)
91
+ extra_tags += consumer_tags(event.payload[:caller])
92
+ end
93
+
94
+ count('error_occurred', 1, tags: default_tags + extra_tags)
95
+ end
96
+
97
+ # Reports how many messages we've polled and how much time did we spend on it
98
+ #
99
+ # @param event [Karafka::Core::Monitoring::Event]
100
+ def on_connection_listener_fetch_loop_received(event)
101
+ time_taken = event[:time]
102
+ messages_count = event[:messages_buffer].size
103
+
104
+ consumer_group_id = event[:subscription_group].consumer_group_id
105
+
106
+ extra_tags = ["consumer_group:#{consumer_group_id}"]
107
+
108
+ histogram('listener.polling.time_taken', time_taken, tags: default_tags + extra_tags)
109
+ histogram('listener.polling.messages', messages_count, tags: default_tags + extra_tags)
110
+ end
111
+
112
+ # Here we report majority of things related to processing as we have access to the
113
+ # consumer
114
+ # @param event [Karafka::Core::Monitoring::Event]
115
+ def on_consumer_consumed(event)
116
+ consumer = event.payload[:caller]
117
+ messages = consumer.messages
118
+ metadata = messages.metadata
119
+
120
+ tags = default_tags + consumer_tags(consumer)
121
+
122
+ count('consumer.messages', messages.count, tags: tags)
123
+ count('consumer.batches', 1, tags: tags)
124
+ gauge('consumer.offset', metadata.last_offset, tags: tags)
125
+ histogram('consumer.consumed.time_taken', event[:time], tags: tags)
126
+ histogram('consumer.batch_size', messages.count, tags: tags)
127
+ histogram('consumer.processing_lag', metadata.processing_lag, tags: tags)
128
+ histogram('consumer.consumption_lag', metadata.consumption_lag, tags: tags)
129
+ end
130
+
131
+ # @param event [Karafka::Core::Monitoring::Event]
132
+ def on_consumer_revoked(event)
133
+ tags = default_tags + consumer_tags(event.payload[:caller])
134
+
135
+ count('consumer.revoked', 1, tags: tags)
136
+ end
137
+
138
+ # @param event [Karafka::Core::Monitoring::Event]
139
+ def on_consumer_shutdown(event)
140
+ tags = default_tags + consumer_tags(event.payload[:caller])
141
+
142
+ count('consumer.shutdown', 1, tags: tags)
143
+ end
144
+
145
+ # Worker related metrics
146
+ # @param event [Karafka::Core::Monitoring::Event]
147
+ def on_worker_process(event)
148
+ jq_stats = event[:jobs_queue].statistics
149
+
150
+ gauge('worker.total_threads', Karafka::App.config.concurrency, tags: default_tags)
151
+ histogram('worker.processing', jq_stats[:busy], tags: default_tags)
152
+ histogram('worker.enqueued_jobs', jq_stats[:enqueued], tags: default_tags)
153
+ end
154
+
155
+ # We report this metric before and after processing for higher accuracy
156
+ # Without this, the utilization would not be fully reflected
157
+ # @param event [Karafka::Core::Monitoring::Event]
158
+ def on_worker_processed(event)
159
+ jq_stats = event[:jobs_queue].statistics
160
+
161
+ histogram('worker.processing', jq_stats[:busy], tags: default_tags)
162
+ end
163
+
164
+ private
165
+
166
+ %i[
167
+ count
168
+ gauge
169
+ histogram
170
+ increment
171
+ decrement
172
+ ].each do |metric_type|
173
+ class_eval <<~METHODS, __FILE__, __LINE__ + 1
174
+ def #{metric_type}(key, *args)
175
+ client.#{metric_type}(
176
+ namespaced_metric(key),
177
+ *args
178
+ )
179
+ end
180
+ METHODS
181
+ end
182
+
183
+ # Wraps metric name in listener's namespace
184
+ # @param metric_name [String] RdKafkaMetric name
185
+ # @return [String]
186
+ def namespaced_metric(metric_name)
187
+ "#{namespace}.#{metric_name}"
188
+ end
189
+
190
+ # Reports a given metric statistics to Datadog
191
+ # @param metric [RdKafkaMetric] metric value object
192
+ # @param statistics [Hash] hash with all the statistics emitted
193
+ # @param base_tags [Array<String>] base tags we want to start with
194
+ def report_metric(metric, statistics, base_tags)
195
+ case metric.scope
196
+ when :root
197
+ public_send(
198
+ metric.type,
199
+ metric.name,
200
+ statistics.fetch(*metric.key_location),
201
+ tags: base_tags
202
+ )
203
+ when :brokers
204
+ statistics.fetch('brokers').each_value do |broker_statistics|
205
+ # Skip bootstrap nodes
206
+ # Bootstrap nodes have nodeid -1, other nodes have positive
207
+ # node ids
208
+ next if broker_statistics['nodeid'] == -1
209
+
210
+ public_send(
211
+ metric.type,
212
+ metric.name,
213
+ broker_statistics.dig(*metric.key_location),
214
+ tags: base_tags + ["broker:#{broker_statistics['nodename']}"]
215
+ )
216
+ end
217
+ when :topics
218
+ statistics.fetch('topics').each do |topic_name, topic_values|
219
+ topic_values['partitions'].each do |partition_name, partition_statistics|
220
+ next if partition_name == '-1'
221
+ # Skip until lag info is available
222
+ next if partition_statistics['consumer_lag'] == -1
223
+
224
+ public_send(
225
+ metric.type,
226
+ metric.name,
227
+ partition_statistics.dig(*metric.key_location),
228
+ tags: base_tags + [
229
+ "topic:#{topic_name}",
230
+ "partition:#{partition_name}"
231
+ ]
232
+ )
233
+ end
234
+ end
235
+ else
236
+ raise ArgumentError, metric.scope
237
+ end
238
+ end
239
+
240
+ # Builds basic per consumer tags for publication
241
+ #
242
+ # @param consumer [Karafka::BaseConsumer]
243
+ # @return [Array<String>]
244
+ def consumer_tags(consumer)
245
+ messages = consumer.messages
246
+ metadata = messages.metadata
247
+ consumer_group_id = consumer.topic.consumer_group.id
248
+
249
+ [
250
+ "topic:#{metadata.topic}",
251
+ "partition:#{metadata.partition}",
252
+ "consumer_group:#{consumer_group_id}"
253
+ ]
254
+ end
255
+ end
256
+ end
257
+ end
258
+ end
259
+ end
@@ -3,5 +3,5 @@
3
3
  # Main module namespace
4
4
  module Karafka
5
5
  # Current Karafka version
6
- VERSION = '2.0.33'
6
+ VERSION = '2.0.34'
7
7
  end
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: karafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.0.33
4
+ version: 2.0.34
5
5
  platform: ruby
6
6
  authors:
7
7
  - Maciej Mensfeld
@@ -35,7 +35,7 @@ cert_chain:
35
35
  Qf04B9ceLUaC4fPVEz10FyobjaFoY4i32xRto3XnrzeAgfEe4swLq8bQsR3w/EF3
36
36
  MGU0FeSV2Yj7Xc2x/7BzLK8xQn5l7Yy75iPF+KP3vVmDHnNl
37
37
  -----END CERTIFICATE-----
38
- date: 2023-02-24 00:00:00.000000000 Z
38
+ date: 2023-03-04 00:00:00.000000000 Z
39
39
  dependencies:
40
40
  - !ruby/object:Gem::Dependency
41
41
  name: karafka-core
@@ -198,6 +198,7 @@ files:
198
198
  - lib/karafka/instrumentation/vendors/datadog/dashboard.json
199
199
  - lib/karafka/instrumentation/vendors/datadog/listener.rb
200
200
  - lib/karafka/instrumentation/vendors/datadog/logger_listener.rb
201
+ - lib/karafka/instrumentation/vendors/datadog/metrics_listener.rb
201
202
  - lib/karafka/licenser.rb
202
203
  - lib/karafka/messages/batch_metadata.rb
203
204
  - lib/karafka/messages/builders/batch_metadata.rb
metadata.gz.sig CHANGED
Binary file