ruby-kafka-temp-fork 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (144) hide show
  1. checksums.yaml +7 -0
  2. data/.circleci/config.yml +393 -0
  3. data/.github/workflows/stale.yml +19 -0
  4. data/.gitignore +13 -0
  5. data/.readygo +1 -0
  6. data/.rspec +3 -0
  7. data/.rubocop.yml +44 -0
  8. data/.ruby-version +1 -0
  9. data/.yardopts +3 -0
  10. data/CHANGELOG.md +310 -0
  11. data/Gemfile +5 -0
  12. data/ISSUE_TEMPLATE.md +23 -0
  13. data/LICENSE.txt +176 -0
  14. data/Procfile +2 -0
  15. data/README.md +1342 -0
  16. data/Rakefile +8 -0
  17. data/benchmarks/message_encoding.rb +23 -0
  18. data/bin/console +8 -0
  19. data/bin/setup +5 -0
  20. data/docker-compose.yml +39 -0
  21. data/examples/consumer-group.rb +35 -0
  22. data/examples/firehose-consumer.rb +64 -0
  23. data/examples/firehose-producer.rb +54 -0
  24. data/examples/simple-consumer.rb +34 -0
  25. data/examples/simple-producer.rb +42 -0
  26. data/examples/ssl-producer.rb +44 -0
  27. data/lib/kafka.rb +373 -0
  28. data/lib/kafka/async_producer.rb +291 -0
  29. data/lib/kafka/broker.rb +217 -0
  30. data/lib/kafka/broker_info.rb +16 -0
  31. data/lib/kafka/broker_pool.rb +41 -0
  32. data/lib/kafka/broker_uri.rb +43 -0
  33. data/lib/kafka/client.rb +833 -0
  34. data/lib/kafka/cluster.rb +513 -0
  35. data/lib/kafka/compression.rb +45 -0
  36. data/lib/kafka/compressor.rb +86 -0
  37. data/lib/kafka/connection.rb +223 -0
  38. data/lib/kafka/connection_builder.rb +33 -0
  39. data/lib/kafka/consumer.rb +642 -0
  40. data/lib/kafka/consumer_group.rb +231 -0
  41. data/lib/kafka/consumer_group/assignor.rb +63 -0
  42. data/lib/kafka/crc32_hash.rb +15 -0
  43. data/lib/kafka/datadog.rb +420 -0
  44. data/lib/kafka/digest.rb +22 -0
  45. data/lib/kafka/fetch_operation.rb +115 -0
  46. data/lib/kafka/fetched_batch.rb +58 -0
  47. data/lib/kafka/fetched_batch_generator.rb +120 -0
  48. data/lib/kafka/fetched_message.rb +48 -0
  49. data/lib/kafka/fetched_offset_resolver.rb +48 -0
  50. data/lib/kafka/fetcher.rb +224 -0
  51. data/lib/kafka/gzip_codec.rb +34 -0
  52. data/lib/kafka/heartbeat.rb +25 -0
  53. data/lib/kafka/instrumenter.rb +38 -0
  54. data/lib/kafka/interceptors.rb +33 -0
  55. data/lib/kafka/lz4_codec.rb +27 -0
  56. data/lib/kafka/message_buffer.rb +87 -0
  57. data/lib/kafka/murmur2_hash.rb +17 -0
  58. data/lib/kafka/offset_manager.rb +259 -0
  59. data/lib/kafka/partitioner.rb +40 -0
  60. data/lib/kafka/pause.rb +92 -0
  61. data/lib/kafka/pending_message.rb +29 -0
  62. data/lib/kafka/pending_message_queue.rb +41 -0
  63. data/lib/kafka/produce_operation.rb +205 -0
  64. data/lib/kafka/producer.rb +528 -0
  65. data/lib/kafka/prometheus.rb +316 -0
  66. data/lib/kafka/protocol.rb +225 -0
  67. data/lib/kafka/protocol/add_offsets_to_txn_request.rb +29 -0
  68. data/lib/kafka/protocol/add_offsets_to_txn_response.rb +21 -0
  69. data/lib/kafka/protocol/add_partitions_to_txn_request.rb +34 -0
  70. data/lib/kafka/protocol/add_partitions_to_txn_response.rb +47 -0
  71. data/lib/kafka/protocol/alter_configs_request.rb +44 -0
  72. data/lib/kafka/protocol/alter_configs_response.rb +49 -0
  73. data/lib/kafka/protocol/api_versions_request.rb +21 -0
  74. data/lib/kafka/protocol/api_versions_response.rb +53 -0
  75. data/lib/kafka/protocol/consumer_group_protocol.rb +19 -0
  76. data/lib/kafka/protocol/create_partitions_request.rb +42 -0
  77. data/lib/kafka/protocol/create_partitions_response.rb +28 -0
  78. data/lib/kafka/protocol/create_topics_request.rb +45 -0
  79. data/lib/kafka/protocol/create_topics_response.rb +26 -0
  80. data/lib/kafka/protocol/decoder.rb +175 -0
  81. data/lib/kafka/protocol/delete_topics_request.rb +33 -0
  82. data/lib/kafka/protocol/delete_topics_response.rb +26 -0
  83. data/lib/kafka/protocol/describe_configs_request.rb +35 -0
  84. data/lib/kafka/protocol/describe_configs_response.rb +73 -0
  85. data/lib/kafka/protocol/describe_groups_request.rb +27 -0
  86. data/lib/kafka/protocol/describe_groups_response.rb +73 -0
  87. data/lib/kafka/protocol/encoder.rb +184 -0
  88. data/lib/kafka/protocol/end_txn_request.rb +29 -0
  89. data/lib/kafka/protocol/end_txn_response.rb +19 -0
  90. data/lib/kafka/protocol/fetch_request.rb +70 -0
  91. data/lib/kafka/protocol/fetch_response.rb +136 -0
  92. data/lib/kafka/protocol/find_coordinator_request.rb +29 -0
  93. data/lib/kafka/protocol/find_coordinator_response.rb +29 -0
  94. data/lib/kafka/protocol/heartbeat_request.rb +27 -0
  95. data/lib/kafka/protocol/heartbeat_response.rb +17 -0
  96. data/lib/kafka/protocol/init_producer_id_request.rb +26 -0
  97. data/lib/kafka/protocol/init_producer_id_response.rb +27 -0
  98. data/lib/kafka/protocol/join_group_request.rb +47 -0
  99. data/lib/kafka/protocol/join_group_response.rb +41 -0
  100. data/lib/kafka/protocol/leave_group_request.rb +25 -0
  101. data/lib/kafka/protocol/leave_group_response.rb +17 -0
  102. data/lib/kafka/protocol/list_groups_request.rb +23 -0
  103. data/lib/kafka/protocol/list_groups_response.rb +35 -0
  104. data/lib/kafka/protocol/list_offset_request.rb +53 -0
  105. data/lib/kafka/protocol/list_offset_response.rb +89 -0
  106. data/lib/kafka/protocol/member_assignment.rb +42 -0
  107. data/lib/kafka/protocol/message.rb +172 -0
  108. data/lib/kafka/protocol/message_set.rb +55 -0
  109. data/lib/kafka/protocol/metadata_request.rb +31 -0
  110. data/lib/kafka/protocol/metadata_response.rb +185 -0
  111. data/lib/kafka/protocol/offset_commit_request.rb +47 -0
  112. data/lib/kafka/protocol/offset_commit_response.rb +29 -0
  113. data/lib/kafka/protocol/offset_fetch_request.rb +38 -0
  114. data/lib/kafka/protocol/offset_fetch_response.rb +56 -0
  115. data/lib/kafka/protocol/produce_request.rb +94 -0
  116. data/lib/kafka/protocol/produce_response.rb +63 -0
  117. data/lib/kafka/protocol/record.rb +88 -0
  118. data/lib/kafka/protocol/record_batch.rb +223 -0
  119. data/lib/kafka/protocol/request_message.rb +26 -0
  120. data/lib/kafka/protocol/sasl_handshake_request.rb +33 -0
  121. data/lib/kafka/protocol/sasl_handshake_response.rb +28 -0
  122. data/lib/kafka/protocol/sync_group_request.rb +33 -0
  123. data/lib/kafka/protocol/sync_group_response.rb +26 -0
  124. data/lib/kafka/protocol/txn_offset_commit_request.rb +46 -0
  125. data/lib/kafka/protocol/txn_offset_commit_response.rb +47 -0
  126. data/lib/kafka/round_robin_assignment_strategy.rb +52 -0
  127. data/lib/kafka/sasl/gssapi.rb +76 -0
  128. data/lib/kafka/sasl/oauth.rb +64 -0
  129. data/lib/kafka/sasl/plain.rb +39 -0
  130. data/lib/kafka/sasl/scram.rb +180 -0
  131. data/lib/kafka/sasl_authenticator.rb +61 -0
  132. data/lib/kafka/snappy_codec.rb +29 -0
  133. data/lib/kafka/socket_with_timeout.rb +96 -0
  134. data/lib/kafka/ssl_context.rb +66 -0
  135. data/lib/kafka/ssl_socket_with_timeout.rb +188 -0
  136. data/lib/kafka/statsd.rb +296 -0
  137. data/lib/kafka/tagged_logger.rb +77 -0
  138. data/lib/kafka/transaction_manager.rb +306 -0
  139. data/lib/kafka/transaction_state_machine.rb +72 -0
  140. data/lib/kafka/version.rb +5 -0
  141. data/lib/kafka/zstd_codec.rb +27 -0
  142. data/lib/ruby-kafka-temp-fork.rb +5 -0
  143. data/ruby-kafka-temp-fork.gemspec +54 -0
  144. metadata +520 -0
@@ -0,0 +1,291 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "thread"
4
+
5
+ module Kafka
6
+
7
+ # A Kafka producer that does all its work in the background so as to not block
8
+ # the calling thread. Calls to {#deliver_messages} are asynchronous and return
9
+ # immediately.
10
+ #
11
+ # In addition to this property it's possible to define automatic delivery
12
+ # policies. These allow placing an upper bound on the number of buffered
13
+ # messages and the time between message deliveries.
14
+ #
15
+ # * If `delivery_threshold` is set to a value _n_ higher than zero, the producer
16
+ # will automatically deliver its messages once its buffer size reaches _n_.
17
+ # * If `delivery_interval` is set to a value _n_ higher than zero, the producer
18
+ # will automatically deliver its messages every _n_ seconds.
19
+ #
20
+ # By default, automatic delivery is disabled and you'll have to call
21
+ # {#deliver_messages} manually.
22
+ #
23
+ # ## Buffer Overflow and Backpressure
24
+ #
25
+ # The calling thread communicates with the background thread doing the actual
26
+ # work using a thread safe queue. While the background thread is busy delivering
27
+ # messages, new messages will be buffered in the queue. In order to avoid
28
+ # the queue growing uncontrollably in cases where the background thread gets
29
+ # stuck or can't follow the pace of the calling thread, there's a maximum
30
+ # number of messages that is allowed to be buffered. You can configure this
31
+ # value by setting `max_queue_size`.
32
+ #
33
+ # If you produce messages faster than the background producer thread can
34
+ # deliver them to Kafka you will eventually fill the producer's buffer. Once
35
+ # this happens, the background thread will stop popping messages off the
36
+ # queue until it can successfully deliver the buffered messages. The queue
37
+ # will therefore grow in size, potentially hitting the `max_queue_size` limit.
38
+ # Once this happens, calls to {#produce} will raise a {BufferOverflow} error.
39
+ #
40
+ # Depending on your use case you may want to slow down the rate of messages
41
+ # being produced or perhaps halt your application completely until the
42
+ # producer can deliver the buffered messages and clear the message queue.
43
+ #
44
+ # ## Example
45
+ #
46
+ # producer = kafka.async_producer(
47
+ # # Keep at most 1.000 messages in the buffer before delivering:
48
+ # delivery_threshold: 1000,
49
+ #
50
+ # # Deliver messages every 30 seconds:
51
+ # delivery_interval: 30,
52
+ # )
53
+ #
54
+ # # There's no need to manually call #deliver_messages, it will happen
55
+ # # automatically in the background.
56
+ # producer.produce("hello", topic: "greetings")
57
+ #
58
+ # # Remember to shut down the producer when you're done with it.
59
+ # producer.shutdown
60
+ #
61
+ class AsyncProducer
62
+ # Initializes a new AsyncProducer.
63
+ #
64
+ # @param sync_producer [Kafka::Producer] the synchronous producer that should
65
+ # be used in the background.
66
+ # @param max_queue_size [Integer] the maximum number of messages allowed in
67
+ # the queue.
68
+ # @param delivery_threshold [Integer] if greater than zero, the number of
69
+ # buffered messages that will automatically trigger a delivery.
70
+ # @param delivery_interval [Integer] if greater than zero, the number of
71
+ # seconds between automatic message deliveries.
72
+ #
73
+ def initialize(sync_producer:, max_queue_size: 1000, delivery_threshold: 0, delivery_interval: 0, max_retries: -1, retry_backoff: 0, instrumenter:, logger:)
74
+ raise ArgumentError unless max_queue_size > 0
75
+ raise ArgumentError unless delivery_threshold >= 0
76
+ raise ArgumentError unless delivery_interval >= 0
77
+
78
+ @queue = Queue.new
79
+ @max_queue_size = max_queue_size
80
+ @instrumenter = instrumenter
81
+ @logger = TaggedLogger.new(logger)
82
+
83
+ @worker = Worker.new(
84
+ queue: @queue,
85
+ producer: sync_producer,
86
+ delivery_threshold: delivery_threshold,
87
+ max_retries: max_retries,
88
+ retry_backoff: retry_backoff,
89
+ instrumenter: instrumenter,
90
+ logger: logger
91
+ )
92
+
93
+ # The timer will no-op if the delivery interval is zero.
94
+ @timer = Timer.new(queue: @queue, interval: delivery_interval)
95
+
96
+ @thread_mutex = Mutex.new
97
+ end
98
+
99
+ # Produces a message to the specified topic.
100
+ #
101
+ # @see Kafka::Producer#produce
102
+ # @param (see Kafka::Producer#produce)
103
+ # @raise [BufferOverflow] if the message queue is full.
104
+ # @return [nil]
105
+ def produce(value, topic:, **options)
106
+ # We want to fail fast if `topic` isn't a String
107
+ topic = topic.to_str
108
+
109
+ ensure_threads_running!
110
+
111
+ if @queue.size >= @max_queue_size
112
+ buffer_overflow topic,
113
+ "Cannot produce to #{topic}, max queue size (#{@max_queue_size} messages) reached"
114
+ end
115
+
116
+ args = [value, **options.merge(topic: topic)]
117
+ @queue << [:produce, args]
118
+
119
+ @instrumenter.instrument("enqueue_message.async_producer", {
120
+ topic: topic,
121
+ queue_size: @queue.size,
122
+ max_queue_size: @max_queue_size,
123
+ })
124
+
125
+ nil
126
+ end
127
+
128
+ # Asynchronously delivers the buffered messages. This method will return
129
+ # immediately and the actual work will be done in the background.
130
+ #
131
+ # @see Kafka::Producer#deliver_messages
132
+ # @return [nil]
133
+ def deliver_messages
134
+ ensure_threads_running!
135
+
136
+ @queue << [:deliver_messages, nil]
137
+
138
+ nil
139
+ end
140
+
141
+ # Shuts down the producer, releasing the network resources used. This
142
+ # method will block until the buffered messages have been delivered.
143
+ #
144
+ # @see Kafka::Producer#shutdown
145
+ # @return [nil]
146
+ def shutdown
147
+ ensure_threads_running!
148
+
149
+ @timer_thread && @timer_thread.exit
150
+ @queue << [:shutdown, nil]
151
+ @worker_thread && @worker_thread.join
152
+
153
+ nil
154
+ end
155
+
156
+ private
157
+
158
+ def ensure_threads_running!
159
+ return if worker_thread_alive? && timer_thread_alive?
160
+
161
+ @thread_mutex.synchronize do
162
+ @worker_thread = Thread.new { @worker.run } unless worker_thread_alive?
163
+ @timer_thread = Thread.new { @timer.run } unless timer_thread_alive?
164
+ end
165
+ end
166
+
167
+ def worker_thread_alive?
168
+ !!@worker_thread && @worker_thread.alive?
169
+ end
170
+
171
+ def timer_thread_alive?
172
+ !!@timer_thread && @timer_thread.alive?
173
+ end
174
+
175
+ def buffer_overflow(topic, message)
176
+ @instrumenter.instrument("buffer_overflow.async_producer", {
177
+ topic: topic,
178
+ })
179
+
180
+ raise BufferOverflow, message
181
+ end
182
+
183
+ class Timer
184
+ def initialize(interval:, queue:)
185
+ @queue = queue
186
+ @interval = interval
187
+ end
188
+
189
+ def run
190
+ # Permanently sleep if the timer interval is zero.
191
+ Thread.stop if @interval.zero?
192
+
193
+ loop do
194
+ sleep(@interval)
195
+ @queue << [:deliver_messages, nil]
196
+ end
197
+ end
198
+ end
199
+
200
+ class Worker
201
+ def initialize(queue:, producer:, delivery_threshold:, max_retries: -1, retry_backoff: 0, instrumenter:, logger:)
202
+ @queue = queue
203
+ @producer = producer
204
+ @delivery_threshold = delivery_threshold
205
+ @max_retries = max_retries
206
+ @retry_backoff = retry_backoff
207
+ @instrumenter = instrumenter
208
+ @logger = TaggedLogger.new(logger)
209
+ end
210
+
211
+ def run
212
+ @logger.push_tags(@producer.to_s)
213
+ @logger.info "Starting async producer in the background..."
214
+
215
+ loop do
216
+ operation, payload = @queue.pop
217
+
218
+ case operation
219
+ when :produce
220
+ produce(payload[0], **payload[1])
221
+ deliver_messages if threshold_reached?
222
+ when :deliver_messages
223
+ deliver_messages
224
+ when :shutdown
225
+ begin
226
+ # Deliver any pending messages first.
227
+ @producer.deliver_messages
228
+ rescue Error => e
229
+ @logger.error("Failed to deliver messages during shutdown: #{e.message}")
230
+
231
+ @instrumenter.instrument("drop_messages.async_producer", {
232
+ message_count: @producer.buffer_size + @queue.size,
233
+ })
234
+ end
235
+
236
+ # Stop the run loop.
237
+ break
238
+ else
239
+ raise "Unknown operation #{operation.inspect}"
240
+ end
241
+ end
242
+ rescue Kafka::Error => e
243
+ @logger.error "Unexpected Kafka error #{e.class}: #{e.message}\n#{e.backtrace.join("\n")}"
244
+ @logger.info "Restarting in 10 seconds..."
245
+
246
+ sleep 10
247
+ retry
248
+ rescue Exception => e
249
+ @logger.error "Unexpected Kafka error #{e.class}: #{e.message}\n#{e.backtrace.join("\n")}"
250
+ @logger.error "Async producer crashed!"
251
+ ensure
252
+ @producer.shutdown
253
+ @logger.pop_tags
254
+ end
255
+
256
+ private
257
+
258
+ def produce(value, **kwargs)
259
+ retries = 0
260
+ begin
261
+ @producer.produce(value, **kwargs)
262
+ rescue BufferOverflow => e
263
+ deliver_messages
264
+ if @max_retries == -1
265
+ retry
266
+ elsif retries < @max_retries
267
+ retries += 1
268
+ sleep @retry_backoff**retries
269
+ retry
270
+ else
271
+ @logger.error("Failed to asynchronously produce messages due to BufferOverflow")
272
+ @instrumenter.instrument("error.async_producer", { error: e })
273
+ end
274
+ end
275
+ end
276
+
277
+ def deliver_messages
278
+ @producer.deliver_messages
279
+ rescue DeliveryFailed, ConnectionError => e
280
+ # Failed to deliver messages -- nothing to do but log and try again later.
281
+ @logger.error("Failed to asynchronously deliver messages: #{e.message}")
282
+ @instrumenter.instrument("error.async_producer", { error: e })
283
+ end
284
+
285
+ def threshold_reached?
286
+ @delivery_threshold > 0 &&
287
+ @producer.buffer_size >= @delivery_threshold
288
+ end
289
+ end
290
+ end
291
+ end
@@ -0,0 +1,217 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "logger"
4
+ require "kafka/connection"
5
+ require "kafka/protocol"
6
+
7
+ module Kafka
8
+ class Broker
9
+ def initialize(connection_builder:, host:, port:, node_id: nil, logger:)
10
+ @connection_builder = connection_builder
11
+ @connection = nil
12
+ @host = host
13
+ @port = port
14
+ @node_id = node_id
15
+ @logger = TaggedLogger.new(logger)
16
+ end
17
+
18
+ def address_match?(host, port)
19
+ host == @host && port == @port
20
+ end
21
+
22
+ # @return [String]
23
+ def to_s
24
+ "#{@host}:#{@port} (node_id=#{@node_id.inspect})"
25
+ end
26
+
27
+ # @return [nil]
28
+ def disconnect
29
+ connection.close if connected?
30
+ end
31
+
32
+ # @return [Boolean]
33
+ def connected?
34
+ !@connection.nil?
35
+ end
36
+
37
+ # Fetches cluster metadata from the broker.
38
+ #
39
+ # @param (see Kafka::Protocol::MetadataRequest#initialize)
40
+ # @return [Kafka::Protocol::MetadataResponse]
41
+ def fetch_metadata(**options)
42
+ request = Protocol::MetadataRequest.new(**options)
43
+
44
+ send_request(request)
45
+ end
46
+
47
+ # Fetches messages from a specified topic and partition.
48
+ #
49
+ # @param (see Kafka::Protocol::FetchRequest#initialize)
50
+ # @return [Kafka::Protocol::FetchResponse]
51
+ def fetch_messages(**options)
52
+ request = Protocol::FetchRequest.new(**options)
53
+
54
+ send_request(request)
55
+ end
56
+
57
+ # Lists the offset of the specified topics and partitions.
58
+ #
59
+ # @param (see Kafka::Protocol::ListOffsetRequest#initialize)
60
+ # @return [Kafka::Protocol::ListOffsetResponse]
61
+ def list_offsets(**options)
62
+ request = Protocol::ListOffsetRequest.new(**options)
63
+
64
+ send_request(request)
65
+ end
66
+
67
+ # Produces a set of messages to the broker.
68
+ #
69
+ # @param (see Kafka::Protocol::ProduceRequest#initialize)
70
+ # @return [Kafka::Protocol::ProduceResponse]
71
+ def produce(**options)
72
+ request = Protocol::ProduceRequest.new(**options)
73
+
74
+ send_request(request)
75
+ end
76
+
77
+ def fetch_offsets(**options)
78
+ request = Protocol::OffsetFetchRequest.new(**options)
79
+
80
+ send_request(request)
81
+ end
82
+
83
+ def commit_offsets(**options)
84
+ request = Protocol::OffsetCommitRequest.new(**options)
85
+
86
+ send_request(request)
87
+ end
88
+
89
+ def join_group(**options)
90
+ request = Protocol::JoinGroupRequest.new(**options)
91
+
92
+ send_request(request)
93
+ end
94
+
95
+ def sync_group(**options)
96
+ request = Protocol::SyncGroupRequest.new(**options)
97
+
98
+ send_request(request)
99
+ end
100
+
101
+ def leave_group(**options)
102
+ request = Protocol::LeaveGroupRequest.new(**options)
103
+
104
+ send_request(request)
105
+ end
106
+
107
+ def find_coordinator(**options)
108
+ request = Protocol::FindCoordinatorRequest.new(**options)
109
+
110
+ send_request(request)
111
+ end
112
+
113
+ def heartbeat(**options)
114
+ request = Protocol::HeartbeatRequest.new(**options)
115
+
116
+ send_request(request)
117
+ end
118
+
119
+ def create_topics(**options)
120
+ request = Protocol::CreateTopicsRequest.new(**options)
121
+
122
+ send_request(request)
123
+ end
124
+
125
+ def delete_topics(**options)
126
+ request = Protocol::DeleteTopicsRequest.new(**options)
127
+
128
+ send_request(request)
129
+ end
130
+
131
+ def describe_configs(**options)
132
+ request = Protocol::DescribeConfigsRequest.new(**options)
133
+
134
+ send_request(request)
135
+ end
136
+
137
+ def alter_configs(**options)
138
+ request = Protocol::AlterConfigsRequest.new(**options)
139
+
140
+ send_request(request)
141
+ end
142
+
143
+ def create_partitions(**options)
144
+ request = Protocol::CreatePartitionsRequest.new(**options)
145
+
146
+ send_request(request)
147
+ end
148
+
149
+ def list_groups
150
+ request = Protocol::ListGroupsRequest.new
151
+
152
+ send_request(request)
153
+ end
154
+
155
+ def api_versions
156
+ request = Protocol::ApiVersionsRequest.new
157
+
158
+ send_request(request)
159
+ end
160
+
161
+ def describe_groups(**options)
162
+ request = Protocol::DescribeGroupsRequest.new(**options)
163
+
164
+ send_request(request)
165
+ end
166
+
167
+ def init_producer_id(**options)
168
+ request = Protocol::InitProducerIDRequest.new(**options)
169
+
170
+ send_request(request)
171
+ end
172
+
173
+ def add_partitions_to_txn(**options)
174
+ request = Protocol::AddPartitionsToTxnRequest.new(**options)
175
+
176
+ send_request(request)
177
+ end
178
+
179
+ def end_txn(**options)
180
+ request = Protocol::EndTxnRequest.new(**options)
181
+
182
+ send_request(request)
183
+ end
184
+
185
+ def add_offsets_to_txn(**options)
186
+ request = Protocol::AddOffsetsToTxnRequest.new(**options)
187
+
188
+ send_request(request)
189
+ end
190
+
191
+ def txn_offset_commit(**options)
192
+ request = Protocol::TxnOffsetCommitRequest.new(**options)
193
+
194
+ send_request(request)
195
+ end
196
+
197
+ private
198
+
199
+ def send_request(request)
200
+ connection.send_request(request)
201
+ rescue IdleConnection
202
+ @logger.warn "Connection has been unused for too long, re-connecting..."
203
+ @connection.close rescue nil
204
+ @connection = nil
205
+ retry
206
+ rescue ConnectionError
207
+ @connection.close rescue nil
208
+ @connection = nil
209
+
210
+ raise
211
+ end
212
+
213
+ def connection
214
+ @connection ||= @connection_builder.build_connection(@host, @port)
215
+ end
216
+ end
217
+ end