ruby-kafka 0.7.6 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. checksums.yaml +4 -4
  2. data/.circleci/config.yml +135 -3
  3. data/.github/workflows/stale.yml +19 -0
  4. data/CHANGELOG.md +34 -0
  5. data/README.md +27 -0
  6. data/lib/kafka/async_producer.rb +3 -0
  7. data/lib/kafka/broker.rb +12 -0
  8. data/lib/kafka/client.rb +53 -4
  9. data/lib/kafka/cluster.rb +52 -0
  10. data/lib/kafka/compression.rb +13 -11
  11. data/lib/kafka/compressor.rb +1 -0
  12. data/lib/kafka/connection.rb +3 -0
  13. data/lib/kafka/consumer.rb +56 -11
  14. data/lib/kafka/consumer_group.rb +10 -1
  15. data/lib/kafka/datadog.rb +18 -11
  16. data/lib/kafka/fetched_batch.rb +5 -1
  17. data/lib/kafka/fetched_batch_generator.rb +4 -1
  18. data/lib/kafka/fetched_message.rb +1 -0
  19. data/lib/kafka/fetcher.rb +5 -2
  20. data/lib/kafka/gzip_codec.rb +4 -0
  21. data/lib/kafka/lz4_codec.rb +4 -0
  22. data/lib/kafka/offset_manager.rb +12 -1
  23. data/lib/kafka/producer.rb +20 -1
  24. data/lib/kafka/prometheus.rb +316 -0
  25. data/lib/kafka/protocol.rb +8 -0
  26. data/lib/kafka/protocol/add_offsets_to_txn_request.rb +29 -0
  27. data/lib/kafka/protocol/add_offsets_to_txn_response.rb +19 -0
  28. data/lib/kafka/protocol/join_group_request.rb +8 -2
  29. data/lib/kafka/protocol/metadata_response.rb +1 -1
  30. data/lib/kafka/protocol/offset_fetch_request.rb +3 -1
  31. data/lib/kafka/protocol/produce_request.rb +3 -1
  32. data/lib/kafka/protocol/record_batch.rb +5 -4
  33. data/lib/kafka/protocol/txn_offset_commit_request.rb +46 -0
  34. data/lib/kafka/protocol/txn_offset_commit_response.rb +18 -0
  35. data/lib/kafka/sasl/scram.rb +15 -12
  36. data/lib/kafka/snappy_codec.rb +4 -0
  37. data/lib/kafka/ssl_context.rb +5 -1
  38. data/lib/kafka/ssl_socket_with_timeout.rb +1 -0
  39. data/lib/kafka/tagged_logger.rb +25 -20
  40. data/lib/kafka/transaction_manager.rb +25 -0
  41. data/lib/kafka/version.rb +1 -1
  42. data/lib/kafka/zstd_codec.rb +27 -0
  43. data/ruby-kafka.gemspec +5 -3
  44. metadata +48 -7
@@ -43,5 +43,6 @@ module Kafka
43
43
  def is_control_record
44
44
  @message.is_control_record
45
45
  end
46
+
46
47
  end
47
48
  end
@@ -4,7 +4,7 @@ require "kafka/fetch_operation"
4
4
 
5
5
  module Kafka
6
6
  class Fetcher
7
- attr_reader :queue
7
+ attr_reader :queue, :max_wait_time
8
8
 
9
9
  def initialize(cluster:, logger:, instrumenter:, max_queue_size:, group:)
10
10
  @cluster = cluster
@@ -17,6 +17,9 @@ module Kafka
17
17
  @commands = Queue.new
18
18
  @next_offsets = Hash.new { |h, k| h[k] = {} }
19
19
 
20
+ # We are only running when someone calls start.
21
+ @running = false
22
+
20
23
  # Long poll until at least this many bytes can be fetched.
21
24
  @min_bytes = 1
22
25
 
@@ -110,7 +113,7 @@ module Kafka
110
113
  elsif @queue.size < @max_queue_size
111
114
  step
112
115
  else
113
- @logger.warn "Reached max fetcher queue size (#{@max_queue_size}), sleeping 1s"
116
+ @logger.info "Reached max fetcher queue size (#{@max_queue_size}), sleeping 1s"
114
117
  sleep 1
115
118
  end
116
119
  ensure
@@ -6,6 +6,10 @@ module Kafka
6
6
  1
7
7
  end
8
8
 
9
+ def produce_api_min_version
10
+ 0
11
+ end
12
+
9
13
  def load
10
14
  require "zlib"
11
15
  end
@@ -6,6 +6,10 @@ module Kafka
6
6
  3
7
7
  end
8
8
 
9
+ def produce_api_min_version
10
+ 0
11
+ end
12
+
9
13
  def load
10
14
  require "extlz4"
11
15
  rescue LoadError
@@ -50,9 +50,20 @@ module Kafka
50
50
  # @param offset [Integer] the offset of the message that should be marked as processed.
51
51
  # @return [nil]
52
52
  def mark_as_processed(topic, partition, offset)
53
- @uncommitted_offsets += 1
53
+ unless @group.assigned_to?(topic, partition)
54
+ @logger.debug "Not marking #{topic}/#{partition}:#{offset} as processed for partition not assigned to this consumer."
55
+ return
56
+ end
54
57
  @processed_offsets[topic] ||= {}
55
58
 
59
+ last_processed_offset = @processed_offsets[topic][partition] || -1
60
+ if last_processed_offset > offset + 1
61
+ @logger.debug "Not overwriting newer offset #{topic}/#{partition}:#{last_processed_offset - 1} with older #{offset}"
62
+ return
63
+ end
64
+
65
+ @uncommitted_offsets += 1
66
+
56
67
  # The committed offset should always be the offset of the next message that the
57
68
  # application will read, thus adding one to the last message processed.
58
69
  @processed_offsets[topic][partition] = offset + 1
@@ -68,6 +68,8 @@ module Kafka
68
68
  #
69
69
  # * `:snappy` for [Snappy](http://google.github.io/snappy/) compression.
70
70
  # * `:gzip` for [gzip](https://en.wikipedia.org/wiki/Gzip) compression.
71
+ # * `:lz4` for [LZ4](https://en.wikipedia.org/wiki/LZ4_(compression_algorithm)) compression.
72
+ # * `:zstd` for [zstd](https://facebook.github.io/zstd/) compression.
71
73
  #
72
74
  # By default, all message sets will be compressed if you specify a compression
73
75
  # codec. To increase the compression threshold, set `compression_threshold` to
@@ -186,11 +188,14 @@ module Kafka
186
188
  # @raise [BufferOverflow] if the maximum buffer size has been reached.
187
189
  # @return [nil]
188
190
  def produce(value, key: nil, headers: {}, topic:, partition: nil, partition_key: nil, create_time: Time.now)
191
+ # We want to fail fast if `topic` isn't a String
192
+ topic = topic.to_str
193
+
189
194
  message = PendingMessage.new(
190
195
  value: value && value.to_s,
191
196
  key: key && key.to_s,
192
197
  headers: headers,
193
- topic: topic.to_s,
198
+ topic: topic,
194
199
  partition: partition && Integer(partition),
195
200
  partition_key: partition_key && partition_key.to_s,
196
201
  create_time: create_time
@@ -328,6 +333,20 @@ module Kafka
328
333
  @transaction_manager.abort_transaction
329
334
  end
330
335
 
336
+ # Sends batch last offset to the consumer group coordinator, and also marks
337
+ # this offset as part of the current transaction. This offset will be considered
338
+ # committed only if the transaction is committed successfully.
339
+ #
340
+ # This method should be used when you need to batch consumed and produced messages
341
+ # together, typically in a consume-transform-produce pattern. Thus, the specified
342
+ # group_id should be the same as config parameter group_id of the used
343
+ # consumer.
344
+ #
345
+ # @return [nil]
346
+ def send_offsets_to_transaction(batch:, group_id:)
347
+ @transaction_manager.send_offsets_to_txn(offsets: { batch.topic => { batch.partition => { offset: batch.last_offset + 1, leader_epoch: batch.leader_epoch } } }, group_id: group_id)
348
+ end
349
+
331
350
  # Syntactic sugar to enable easier transaction usage. Do the following steps
332
351
  #
333
352
  # - Start the transaction (with Producer#begin_transaction)
@@ -0,0 +1,316 @@
1
+ # frozen_string_literal: true
2
+
3
+ #
4
+ # Subscriber to ruby_kafka to report metrics to prometheus
5
+ #
6
+ # Usage:
7
+ # require "kafka/prometheus"
8
+ #
9
+ # Once the file has been required, no further configuration is needed, all operational
10
+ # metrics are automatically emitted (Unless PROMETHEUS_NO_AUTO_START is set).
11
+ #
12
+ # By Peter Mustel, T2 Data AB
13
+ #
14
+ begin
15
+ require 'prometheus/client'
16
+ rescue LoadError
17
+ warn 'In order to report Kafka client metrics to Prometheus you need to install the `prometheus-client` gem.'
18
+ raise
19
+ end
20
+
21
+ require 'active_support/subscriber'
22
+
23
+ module Kafka
24
+ module Prometheus
25
+ SIZE_BUCKETS = [1, 10, 100, 1000, 10_000, 100_000, 1_000_000].freeze
26
+ LATENCY_BUCKETS = [0.0001, 0.001, 0.01, 0.1, 1.0, 10, 100, 1000].freeze
27
+ DELAY_BUCKETS = [1, 3, 10, 30, 100, 300, 1000, 3000, 10_000, 30_000].freeze
28
+
29
+ class << self
30
+ attr_accessor :registry
31
+
32
+ def start(registry = ::Prometheus::Client.registry)
33
+ @registry = registry
34
+ ConnectionSubscriber.attach_to 'connection.kafka'
35
+ ConsumerSubscriber.attach_to 'consumer.kafka'
36
+ ProducerSubscriber.attach_to 'producer.kafka'
37
+ AsyncProducerSubscriber.attach_to 'async_producer.kafka'
38
+ FetcherSubscriber.attach_to 'fetcher.kafka'
39
+ end
40
+ end
41
+
42
+ class ConnectionSubscriber < ActiveSupport::Subscriber
43
+ def initialize
44
+ super
45
+ @api_calls = Prometheus.registry.counter(:api_calls, docstring: 'Total calls', labels: [:client, :api, :broker])
46
+ @api_latency = Prometheus.registry.histogram(:api_latency, docstring: 'Latency', buckets: LATENCY_BUCKETS, labels: [:client, :api, :broker])
47
+ @api_request_size = Prometheus.registry.histogram(:api_request_size, docstring: 'Request size', buckets: SIZE_BUCKETS, labels: [:client, :api, :broker])
48
+ @api_response_size = Prometheus.registry.histogram(:api_response_size, docstring: 'Response size', buckets: SIZE_BUCKETS, labels: [:client, :api, :broker])
49
+ @api_errors = Prometheus.registry.counter(:api_errors, docstring: 'Errors', labels: [:client, :api, :broker])
50
+ end
51
+
52
+ def request(event)
53
+ key = {
54
+ client: event.payload.fetch(:client_id),
55
+ api: event.payload.fetch(:api, 'unknown'),
56
+ broker: event.payload.fetch(:broker_host)
57
+ }
58
+ request_size = event.payload.fetch(:request_size, 0)
59
+ response_size = event.payload.fetch(:response_size, 0)
60
+
61
+ @api_calls.increment(labels: key)
62
+ @api_latency.observe(event.duration, labels: key)
63
+ @api_request_size.observe(request_size, labels: key)
64
+ @api_response_size.observe(response_size, labels: key)
65
+ @api_errors.increment(labels: key) if event.payload.key?(:exception)
66
+ end
67
+ end
68
+
69
+ class ConsumerSubscriber < ActiveSupport::Subscriber
70
+ def initialize
71
+ super
72
+ @process_messages = Prometheus.registry.counter(:consumer_process_messages, docstring: 'Total messages', labels: [:client, :group_id, :topic, :partition])
73
+ @process_message_errors = Prometheus.registry.counter(:consumer_process_message_errors, docstring: 'Total errors', labels: [:client, :group_id, :topic, :partition])
74
+ @process_message_latency =
75
+ Prometheus.registry.histogram(:consumer_process_message_latency, docstring: 'Latency', buckets: LATENCY_BUCKETS, labels: [:client, :group_id, :topic, :partition])
76
+ @offset_lag = Prometheus.registry.gauge(:consumer_offset_lag, docstring: 'Offset lag', labels: [:client, :group_id, :topic, :partition])
77
+ @time_lag = Prometheus.registry.gauge(:consumer_time_lag, docstring: 'Time lag of message', labels: [:client, :group_id, :topic, :partition])
78
+ @process_batch_errors = Prometheus.registry.counter(:consumer_process_batch_errors, docstring: 'Total errors in batch', labels: [:client, :group_id, :topic, :partition])
79
+ @process_batch_latency =
80
+ Prometheus.registry.histogram(:consumer_process_batch_latency, docstring: 'Latency in batch', buckets: LATENCY_BUCKETS, labels: [:client, :group_id, :topic, :partition])
81
+ @batch_size = Prometheus.registry.histogram(:consumer_batch_size, docstring: 'Size of batch', buckets: SIZE_BUCKETS, labels: [:client, :group_id, :topic, :partition])
82
+ @join_group = Prometheus.registry.histogram(:consumer_join_group, docstring: 'Time to join group', buckets: DELAY_BUCKETS, labels: [:client, :group_id])
83
+ @join_group_errors = Prometheus.registry.counter(:consumer_join_group_errors, docstring: 'Total error in joining group', labels: [:client, :group_id])
84
+ @sync_group = Prometheus.registry.histogram(:consumer_sync_group, docstring: 'Time to sync group', buckets: DELAY_BUCKETS, labels: [:client, :group_id])
85
+ @sync_group_errors = Prometheus.registry.counter(:consumer_sync_group_errors, docstring: 'Total error in syncing group', labels: [:client, :group_id])
86
+ @leave_group = Prometheus.registry.histogram(:consumer_leave_group, docstring: 'Time to leave group', buckets: DELAY_BUCKETS, labels: [:client, :group_id])
87
+ @leave_group_errors = Prometheus.registry.counter(:consumer_leave_group_errors, docstring: 'Total error in leaving group', labels: [:client, :group_id])
88
+ @pause_duration = Prometheus.registry.gauge(:consumer_pause_duration, docstring: 'Pause duration', labels: [:client, :group_id, :topic, :partition])
89
+ end
90
+
91
+ def process_message(event)
92
+ key = {
93
+ client: event.payload.fetch(:client_id),
94
+ group_id: event.payload.fetch(:group_id),
95
+ topic: event.payload.fetch(:topic),
96
+ partition: event.payload.fetch(:partition)
97
+ }
98
+
99
+ offset_lag = event.payload.fetch(:offset_lag)
100
+ create_time = event.payload.fetch(:create_time)
101
+
102
+ time_lag = create_time && ((Time.now - create_time) * 1000).to_i
103
+
104
+ if event.payload.key?(:exception)
105
+ @process_message_errors.increment(labels: key)
106
+ else
107
+ @process_message_latency.observe(event.duration, labels: key)
108
+ @process_messages.increment(labels: key)
109
+ end
110
+
111
+ @offset_lag.set(offset_lag, labels: key)
112
+
113
+ # Not all messages have timestamps.
114
+ return unless time_lag
115
+
116
+ @time_lag.set(time_lag, labels: key)
117
+ end
118
+
119
+ def process_batch(event)
120
+ key = {
121
+ client: event.payload.fetch(:client_id),
122
+ group_id: event.payload.fetch(:group_id),
123
+ topic: event.payload.fetch(:topic),
124
+ partition: event.payload.fetch(:partition)
125
+ }
126
+ message_count = event.payload.fetch(:message_count)
127
+
128
+ if event.payload.key?(:exception)
129
+ @process_batch_errors.increment(labels: key)
130
+ else
131
+ @process_batch_latency.observe(event.duration, labels: key)
132
+ @process_messages.increment(by: message_count, labels: key)
133
+ end
134
+ end
135
+
136
+ def fetch_batch(event)
137
+ key = {
138
+ client: event.payload.fetch(:client_id),
139
+ group_id: event.payload.fetch(:group_id),
140
+ topic: event.payload.fetch(:topic),
141
+ partition: event.payload.fetch(:partition)
142
+ }
143
+ offset_lag = event.payload.fetch(:offset_lag)
144
+ batch_size = event.payload.fetch(:message_count)
145
+
146
+ @batch_size.observe(batch_size, labels: key)
147
+ @offset_lag.set(offset_lag, labels: key)
148
+ end
149
+
150
+ def join_group(event)
151
+ key = { client: event.payload.fetch(:client_id), group_id: event.payload.fetch(:group_id) }
152
+ @join_group.observe(event.duration, labels: key)
153
+
154
+ @join_group_errors.increment(labels: key) if event.payload.key?(:exception)
155
+ end
156
+
157
+ def sync_group(event)
158
+ key = { client: event.payload.fetch(:client_id), group_id: event.payload.fetch(:group_id) }
159
+ @sync_group.observe(event.duration, labels: key)
160
+
161
+ @sync_group_errors.increment(labels: key) if event.payload.key?(:exception)
162
+ end
163
+
164
+ def leave_group(event)
165
+ key = { client: event.payload.fetch(:client_id), group_id: event.payload.fetch(:group_id) }
166
+ @leave_group.observe(event.duration, labels: key)
167
+
168
+ @leave_group_errors.increment(labels: key) if event.payload.key?(:exception)
169
+ end
170
+
171
+ def pause_status(event)
172
+ key = {
173
+ client: event.payload.fetch(:client_id),
174
+ group_id: event.payload.fetch(:group_id),
175
+ topic: event.payload.fetch(:topic),
176
+ partition: event.payload.fetch(:partition)
177
+ }
178
+
179
+ duration = event.payload.fetch(:duration)
180
+ @pause_duration.set(duration, labels: key)
181
+ end
182
+ end
183
+
184
+ class ProducerSubscriber < ActiveSupport::Subscriber
185
+ def initialize
186
+ super
187
+ @produce_messages = Prometheus.registry.counter(:producer_produced_messages, docstring: 'Produced messages total', labels: [:client, :topic])
188
+ @produce_message_size =
189
+ Prometheus.registry.histogram(:producer_message_size, docstring: 'Message size', buckets: SIZE_BUCKETS, labels: [:client, :topic])
190
+ @buffer_size = Prometheus.registry.histogram(:producer_buffer_size, docstring: 'Buffer size', buckets: SIZE_BUCKETS, labels: [:client])
191
+ @buffer_fill_ratio = Prometheus.registry.histogram(:producer_buffer_fill_ratio, docstring: 'Buffer fill ratio', labels: [:client])
192
+ @buffer_fill_percentage = Prometheus.registry.histogram(:producer_buffer_fill_percentage, docstring: 'Buffer fill percentage', labels: [:client])
193
+ @produce_errors = Prometheus.registry.counter(:producer_produce_errors, docstring: 'Produce errors', labels: [:client, :topic])
194
+ @deliver_errors = Prometheus.registry.counter(:producer_deliver_errors, docstring: 'Deliver error', labels: [:client])
195
+ @deliver_latency =
196
+ Prometheus.registry.histogram(:producer_deliver_latency, docstring: 'Delivery latency', buckets: LATENCY_BUCKETS, labels: [:client])
197
+ @deliver_messages = Prometheus.registry.counter(:producer_deliver_messages, docstring: 'Total count of delivered messages', labels: [:client])
198
+ @deliver_attempts = Prometheus.registry.histogram(:producer_deliver_attempts, docstring: 'Delivery attempts', labels: [:client])
199
+ @ack_messages = Prometheus.registry.counter(:producer_ack_messages, docstring: 'Ack', labels: [:client, :topic])
200
+ @ack_delay = Prometheus.registry.histogram(:producer_ack_delay, docstring: 'Ack delay', buckets: LATENCY_BUCKETS, labels: [:client, :topic])
201
+ @ack_errors = Prometheus.registry.counter(:producer_ack_errors, docstring: 'Ack errors', labels: [:client, :topic])
202
+ end
203
+
204
+ def produce_message(event)
205
+ client = event.payload.fetch(:client_id)
206
+ key = { client: client, topic: event.payload.fetch(:topic) }
207
+
208
+ message_size = event.payload.fetch(:message_size)
209
+ buffer_size = event.payload.fetch(:buffer_size)
210
+ max_buffer_size = event.payload.fetch(:max_buffer_size)
211
+ buffer_fill_ratio = buffer_size.to_f / max_buffer_size.to_f
212
+ buffer_fill_percentage = buffer_fill_ratio * 100.0
213
+
214
+ # This gets us the write rate.
215
+ @produce_messages.increment(labels: key)
216
+ @produce_message_size.observe(message_size, labels: key)
217
+
218
+ # This gets us the avg/max buffer size per producer.
219
+ @buffer_size.observe(buffer_size, labels: { client: client })
220
+
221
+ # This gets us the avg/max buffer fill ratio per producer.
222
+ @buffer_fill_ratio.observe(buffer_fill_ratio, labels: { client: client })
223
+ @buffer_fill_percentage.observe(buffer_fill_percentage, labels: { client: client })
224
+ end
225
+
226
+ def buffer_overflow(event)
227
+ key = { client: event.payload.fetch(:client_id), topic: event.payload.fetch(:topic) }
228
+ @produce_errors.increment(labels: key)
229
+ end
230
+
231
+ def deliver_messages(event)
232
+ key = { client: event.payload.fetch(:client_id) }
233
+ message_count = event.payload.fetch(:delivered_message_count)
234
+ attempts = event.payload.fetch(:attempts)
235
+
236
+ @deliver_errors.increment(labels: key) if event.payload.key?(:exception)
237
+ @deliver_latency.observe(event.duration, labels: key)
238
+
239
+ # Messages delivered to Kafka:
240
+ @deliver_messages.increment(by: message_count, labels: key)
241
+
242
+ # Number of attempts to deliver messages:
243
+ @deliver_attempts.observe(attempts, labels: key)
244
+ end
245
+
246
+ def ack_message(event)
247
+ key = { client: event.payload.fetch(:client_id), topic: event.payload.fetch(:topic) }
248
+
249
+ # Number of messages ACK'd for the topic.
250
+ @ack_messages.increment(labels: key)
251
+
252
+ # Histogram of delay between a message being produced and it being ACK'd.
253
+ @ack_delay.observe(event.payload.fetch(:delay), labels: key)
254
+ end
255
+
256
+ def topic_error(event)
257
+ key = { client: event.payload.fetch(:client_id), topic: event.payload.fetch(:topic) }
258
+
259
+ @ack_errors.increment(labels: key)
260
+ end
261
+ end
262
+
263
+ class AsyncProducerSubscriber < ActiveSupport::Subscriber
264
+ def initialize
265
+ super
266
+ @queue_size = Prometheus.registry.histogram(:async_producer_queue_size, docstring: 'Queue size', buckets: SIZE_BUCKETS, labels: [:client, :topic])
267
+ @queue_fill_ratio = Prometheus.registry.histogram(:async_producer_queue_fill_ratio, docstring: 'Queue fill ratio', labels: [:client, :topic])
268
+ @produce_errors = Prometheus.registry.counter(:async_producer_produce_errors, docstring: 'Producer errors', labels: [:client, :topic])
269
+ @dropped_messages = Prometheus.registry.counter(:async_producer_dropped_messages, docstring: 'Dropped messages', labels: [:client])
270
+ end
271
+
272
+ def enqueue_message(event)
273
+ key = { client: event.payload.fetch(:client_id), topic: event.payload.fetch(:topic) }
274
+
275
+ queue_size = event.payload.fetch(:queue_size)
276
+ max_queue_size = event.payload.fetch(:max_queue_size)
277
+ queue_fill_ratio = queue_size.to_f / max_queue_size.to_f
278
+
279
+ # This gets us the avg/max queue size per producer.
280
+ @queue_size.observe(queue_size, labels: key)
281
+
282
+ # This gets us the avg/max queue fill ratio per producer.
283
+ @queue_fill_ratio.observe(queue_fill_ratio, labels: key)
284
+ end
285
+
286
+ def buffer_overflow(event)
287
+ key = { client: event.payload.fetch(:client_id), topic: event.payload.fetch(:topic) }
288
+ @produce_errors.increment(labels: key)
289
+ end
290
+
291
+ def drop_messages(event)
292
+ key = { client: event.payload.fetch(:client_id) }
293
+ message_count = event.payload.fetch(:message_count)
294
+ @dropped_messages.increment(by: message_count, labels: key)
295
+ end
296
+ end
297
+
298
+ class FetcherSubscriber < ActiveSupport::Subscriber
299
+ def initialize
300
+ super
301
+ @queue_size = Prometheus.registry.gauge(:fetcher_queue_size, docstring: 'Queue size', labels: [:client, :group_id])
302
+ end
303
+
304
+ def loop(event)
305
+ queue_size = event.payload.fetch(:queue_size)
306
+ client = event.payload.fetch(:client_id)
307
+ group_id = event.payload.fetch(:group_id)
308
+
309
+ @queue_size.set(queue_size, labels: { client: client, group_id: group_id })
310
+ end
311
+ end
312
+ end
313
+ end
314
+
315
+ # To enable testability, it is possible to skip the start until test time
316
+ Kafka::Prometheus.start unless defined?(PROMETHEUS_NO_AUTO_START)
@@ -33,7 +33,9 @@ module Kafka
33
33
  DELETE_TOPICS_API = 20
34
34
  INIT_PRODUCER_ID_API = 22
35
35
  ADD_PARTITIONS_TO_TXN_API = 24
36
+ ADD_OFFSETS_TO_TXN_API = 25
36
37
  END_TXN_API = 26
38
+ TXN_OFFSET_COMMIT_API = 28
37
39
  DESCRIBE_CONFIGS_API = 32
38
40
  ALTER_CONFIGS_API = 33
39
41
  CREATE_PARTITIONS_API = 37
@@ -57,7 +59,9 @@ module Kafka
57
59
  DELETE_TOPICS_API => :delete_topics,
58
60
  INIT_PRODUCER_ID_API => :init_producer_id_api,
59
61
  ADD_PARTITIONS_TO_TXN_API => :add_partitions_to_txn_api,
62
+ ADD_OFFSETS_TO_TXN_API => :add_offsets_to_txn_api,
60
63
  END_TXN_API => :end_txn_api,
64
+ TXN_OFFSET_COMMIT_API => :txn_offset_commit_api,
61
65
  DESCRIBE_CONFIGS_API => :describe_configs_api,
62
66
  CREATE_PARTITIONS_API => :create_partitions
63
67
  }
@@ -177,6 +181,10 @@ require "kafka/protocol/fetch_request"
177
181
  require "kafka/protocol/fetch_response"
178
182
  require "kafka/protocol/list_offset_request"
179
183
  require "kafka/protocol/list_offset_response"
184
+ require "kafka/protocol/add_offsets_to_txn_request"
185
+ require "kafka/protocol/add_offsets_to_txn_response"
186
+ require "kafka/protocol/txn_offset_commit_request"
187
+ require "kafka/protocol/txn_offset_commit_response"
180
188
  require "kafka/protocol/find_coordinator_request"
181
189
  require "kafka/protocol/find_coordinator_response"
182
190
  require "kafka/protocol/join_group_request"