ruby-kafka 0.7.6 → 1.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (44) hide show
  1. checksums.yaml +4 -4
  2. data/.circleci/config.yml +135 -3
  3. data/.github/workflows/stale.yml +19 -0
  4. data/CHANGELOG.md +34 -0
  5. data/README.md +27 -0
  6. data/lib/kafka/async_producer.rb +3 -0
  7. data/lib/kafka/broker.rb +12 -0
  8. data/lib/kafka/client.rb +53 -4
  9. data/lib/kafka/cluster.rb +52 -0
  10. data/lib/kafka/compression.rb +13 -11
  11. data/lib/kafka/compressor.rb +1 -0
  12. data/lib/kafka/connection.rb +3 -0
  13. data/lib/kafka/consumer.rb +56 -11
  14. data/lib/kafka/consumer_group.rb +10 -1
  15. data/lib/kafka/datadog.rb +18 -11
  16. data/lib/kafka/fetched_batch.rb +5 -1
  17. data/lib/kafka/fetched_batch_generator.rb +4 -1
  18. data/lib/kafka/fetched_message.rb +1 -0
  19. data/lib/kafka/fetcher.rb +5 -2
  20. data/lib/kafka/gzip_codec.rb +4 -0
  21. data/lib/kafka/lz4_codec.rb +4 -0
  22. data/lib/kafka/offset_manager.rb +12 -1
  23. data/lib/kafka/producer.rb +20 -1
  24. data/lib/kafka/prometheus.rb +316 -0
  25. data/lib/kafka/protocol.rb +8 -0
  26. data/lib/kafka/protocol/add_offsets_to_txn_request.rb +29 -0
  27. data/lib/kafka/protocol/add_offsets_to_txn_response.rb +19 -0
  28. data/lib/kafka/protocol/join_group_request.rb +8 -2
  29. data/lib/kafka/protocol/metadata_response.rb +1 -1
  30. data/lib/kafka/protocol/offset_fetch_request.rb +3 -1
  31. data/lib/kafka/protocol/produce_request.rb +3 -1
  32. data/lib/kafka/protocol/record_batch.rb +5 -4
  33. data/lib/kafka/protocol/txn_offset_commit_request.rb +46 -0
  34. data/lib/kafka/protocol/txn_offset_commit_response.rb +18 -0
  35. data/lib/kafka/sasl/scram.rb +15 -12
  36. data/lib/kafka/snappy_codec.rb +4 -0
  37. data/lib/kafka/ssl_context.rb +5 -1
  38. data/lib/kafka/ssl_socket_with_timeout.rb +1 -0
  39. data/lib/kafka/tagged_logger.rb +25 -20
  40. data/lib/kafka/transaction_manager.rb +25 -0
  41. data/lib/kafka/version.rb +1 -1
  42. data/lib/kafka/zstd_codec.rb +27 -0
  43. data/ruby-kafka.gemspec +5 -3
  44. metadata +48 -7
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: '08731d84b1c82d5bf46625993d552e80face5fa5dbf71af68ff60cb454f740bf'
4
- data.tar.gz: 64aaa4a1b1dcd4200e058bfa1157d07749044752362ff296b5eadd6108c365da
3
+ metadata.gz: 22b958f3d452f35c3c6084a5b8b790f370b96069e87ef5974838e5ea4b6945e1
4
+ data.tar.gz: eb6d704dbaace9c13a99bc93924d8a6b84ee5eb686c9c863a81d085ffeb7e92d
5
5
  SHA512:
6
- metadata.gz: d154e9ecbbea7014d33ef93b6e21b1897df7628abb51a04094c2209518b43097414b54f03175fa0d59f89368ca2821559b9739c054503046ea74ef6cd875a4d6
7
- data.tar.gz: 23680e5dadc11ae5a810708a8d2d7de8e5dbeb0f964164b2d29288737bcd5fa4c9885d10b9660a538a6b9faa27d1accf83af44dabce4ef6d2c56f6b105bb9c91
6
+ metadata.gz: 22c1d59bcdd42849849122b559f0d161653a5cfa492ffb36f28a875b109444d670a76bc62ba0856ae2ebce95fd10abce9e16834928fc7fe6eb2ee006827d307c
7
+ data.tar.gz: 3a51df6b1d40e1edbd96c06f3473319f0ac1b072b040b676a8e1d980fbaad114ad0248a7670cb7ae0a51a2c79ac390f2f4cfe8f65eedb5dd55be020c6011bc18
@@ -145,21 +145,149 @@ jobs:
145
145
  environment:
146
146
  LOG_LEVEL: DEBUG
147
147
  - image: wurstmeister/zookeeper
148
- - image: wurstmeister/kafka:2.12-2.1.0
148
+ - image: wurstmeister/kafka:2.12-2.1.1
149
149
  environment:
150
150
  KAFKA_ADVERTISED_HOST_NAME: localhost
151
151
  KAFKA_ADVERTISED_PORT: 9092
152
152
  KAFKA_PORT: 9092
153
153
  KAFKA_ZOOKEEPER_CONNECT: localhost:2181
154
154
  KAFKA_DELETE_TOPIC_ENABLE: true
155
- - image: wurstmeister/kafka:2.12-2.1.0
155
+ - image: wurstmeister/kafka:2.12-2.1.1
156
156
  environment:
157
157
  KAFKA_ADVERTISED_HOST_NAME: localhost
158
158
  KAFKA_ADVERTISED_PORT: 9093
159
159
  KAFKA_PORT: 9093
160
160
  KAFKA_ZOOKEEPER_CONNECT: localhost:2181
161
161
  KAFKA_DELETE_TOPIC_ENABLE: true
162
- - image: wurstmeister/kafka:2.12-2.1.0
162
+ - image: wurstmeister/kafka:2.12-2.1.1
163
+ environment:
164
+ KAFKA_ADVERTISED_HOST_NAME: localhost
165
+ KAFKA_ADVERTISED_PORT: 9094
166
+ KAFKA_PORT: 9094
167
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
168
+ KAFKA_DELETE_TOPIC_ENABLE: true
169
+ steps:
170
+ - checkout
171
+ - run: bundle install --path vendor/bundle
172
+ - run: bundle exec rspec --profile --tag functional spec/functional
173
+
174
+ kafka-2.2:
175
+ docker:
176
+ - image: circleci/ruby:2.5.1-node
177
+ environment:
178
+ LOG_LEVEL: DEBUG
179
+ - image: wurstmeister/zookeeper
180
+ - image: wurstmeister/kafka:2.12-2.2.1
181
+ environment:
182
+ KAFKA_ADVERTISED_HOST_NAME: localhost
183
+ KAFKA_ADVERTISED_PORT: 9092
184
+ KAFKA_PORT: 9092
185
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
186
+ KAFKA_DELETE_TOPIC_ENABLE: true
187
+ - image: wurstmeister/kafka:2.12-2.2.1
188
+ environment:
189
+ KAFKA_ADVERTISED_HOST_NAME: localhost
190
+ KAFKA_ADVERTISED_PORT: 9093
191
+ KAFKA_PORT: 9093
192
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
193
+ KAFKA_DELETE_TOPIC_ENABLE: true
194
+ - image: wurstmeister/kafka:2.12-2.2.1
195
+ environment:
196
+ KAFKA_ADVERTISED_HOST_NAME: localhost
197
+ KAFKA_ADVERTISED_PORT: 9094
198
+ KAFKA_PORT: 9094
199
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
200
+ KAFKA_DELETE_TOPIC_ENABLE: true
201
+ steps:
202
+ - checkout
203
+ - run: bundle install --path vendor/bundle
204
+ - run: bundle exec rspec --profile --tag functional spec/functional
205
+
206
+ kafka-2.3:
207
+ docker:
208
+ - image: circleci/ruby:2.5.1-node
209
+ environment:
210
+ LOG_LEVEL: DEBUG
211
+ - image: wurstmeister/zookeeper
212
+ - image: wurstmeister/kafka:2.12-2.3.1
213
+ environment:
214
+ KAFKA_ADVERTISED_HOST_NAME: localhost
215
+ KAFKA_ADVERTISED_PORT: 9092
216
+ KAFKA_PORT: 9092
217
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
218
+ KAFKA_DELETE_TOPIC_ENABLE: true
219
+ - image: wurstmeister/kafka:2.12-2.3.1
220
+ environment:
221
+ KAFKA_ADVERTISED_HOST_NAME: localhost
222
+ KAFKA_ADVERTISED_PORT: 9093
223
+ KAFKA_PORT: 9093
224
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
225
+ KAFKA_DELETE_TOPIC_ENABLE: true
226
+ - image: wurstmeister/kafka:2.12-2.3.1
227
+ environment:
228
+ KAFKA_ADVERTISED_HOST_NAME: localhost
229
+ KAFKA_ADVERTISED_PORT: 9094
230
+ KAFKA_PORT: 9094
231
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
232
+ KAFKA_DELETE_TOPIC_ENABLE: true
233
+ steps:
234
+ - checkout
235
+ - run: bundle install --path vendor/bundle
236
+ - run: bundle exec rspec --profile --tag functional spec/functional
237
+
238
+ kafka-2.4:
239
+ docker:
240
+ - image: circleci/ruby:2.5.1-node
241
+ environment:
242
+ LOG_LEVEL: DEBUG
243
+ - image: wurstmeister/zookeeper
244
+ - image: wurstmeister/kafka:2.12-2.4.0
245
+ environment:
246
+ KAFKA_ADVERTISED_HOST_NAME: localhost
247
+ KAFKA_ADVERTISED_PORT: 9092
248
+ KAFKA_PORT: 9092
249
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
250
+ KAFKA_DELETE_TOPIC_ENABLE: true
251
+ - image: wurstmeister/kafka:2.12-2.4.0
252
+ environment:
253
+ KAFKA_ADVERTISED_HOST_NAME: localhost
254
+ KAFKA_ADVERTISED_PORT: 9093
255
+ KAFKA_PORT: 9093
256
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
257
+ KAFKA_DELETE_TOPIC_ENABLE: true
258
+ - image: wurstmeister/kafka:2.12-2.4.0
259
+ environment:
260
+ KAFKA_ADVERTISED_HOST_NAME: localhost
261
+ KAFKA_ADVERTISED_PORT: 9094
262
+ KAFKA_PORT: 9094
263
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
264
+ KAFKA_DELETE_TOPIC_ENABLE: true
265
+ steps:
266
+ - checkout
267
+ - run: bundle install --path vendor/bundle
268
+ - run: bundle exec rspec --profile --tag functional spec/functional
269
+
270
+ kafka-2.5:
271
+ docker:
272
+ - image: circleci/ruby:2.5.1-node
273
+ environment:
274
+ LOG_LEVEL: DEBUG
275
+ - image: wurstmeister/zookeeper
276
+ - image: wurstmeister/kafka:2.12-2.5.0
277
+ environment:
278
+ KAFKA_ADVERTISED_HOST_NAME: localhost
279
+ KAFKA_ADVERTISED_PORT: 9092
280
+ KAFKA_PORT: 9092
281
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
282
+ KAFKA_DELETE_TOPIC_ENABLE: true
283
+ - image: wurstmeister/kafka:2.12-2.5.0
284
+ environment:
285
+ KAFKA_ADVERTISED_HOST_NAME: localhost
286
+ KAFKA_ADVERTISED_PORT: 9093
287
+ KAFKA_PORT: 9093
288
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
289
+ KAFKA_DELETE_TOPIC_ENABLE: true
290
+ - image: wurstmeister/kafka:2.12-2.5.0
163
291
  environment:
164
292
  KAFKA_ADVERTISED_HOST_NAME: localhost
165
293
  KAFKA_ADVERTISED_PORT: 9094
@@ -181,3 +309,7 @@ workflows:
181
309
  - kafka-1.1
182
310
  - kafka-2.0
183
311
  - kafka-2.1
312
+ - kafka-2.2
313
+ - kafka-2.3
314
+ - kafka-2.4
315
+ - kafka-2.5
@@ -0,0 +1,19 @@
1
+ name: Mark stale issues and pull requests
2
+
3
+ on:
4
+ schedule:
5
+ - cron: "0 0 * * *"
6
+
7
+ jobs:
8
+ stale:
9
+
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - uses: actions/stale@v1
14
+ with:
15
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
16
+ stale-issue-message: 'Issue has been marked as stale due to a lack of activity.'
17
+ stale-pr-message: 'Pull request has been marked as stale due to a lack of activity.'
18
+ stale-issue-label: 'no-issue-activity'
19
+ stale-pr-label: 'no-pr-activity'
@@ -4,6 +4,40 @@ Changes and additions to the library will be listed here.
4
4
 
5
5
  ## Unreleased
6
6
 
7
+ ## 1.1.0
8
+
9
+ - Extra sanity checking when marking offsets as processed (#824).
10
+ - Make `verify_hostname` settable for SSL contexts (#828).
11
+ - Instrument `create_time` from last message in batch (#811).
12
+ - Add client function for fetching topic replica count (#822).
13
+ - Allow consumers to refresh the topic lists (#818).
14
+ - Disconnect after leaving a group (#817).
15
+ - Use `max_wait_time` as the sleep instead of hardcoded 2 seconds (#825).
16
+
17
+ ## 1.0.0
18
+
19
+ - Add client methods to manage configs (#759)
20
+ - Support Kafka 2.3 and 2.4.
21
+
22
+ ## 0.7.10
23
+
24
+ - Fix logger again (#762)
25
+
26
+ ## 0.7.9
27
+
28
+ - Fix SSL authentication for ruby < 2.4.0 (#742)
29
+ - Add metrics for prometheus/client (#739)
30
+ - Do not add nil message entries when ignoring old messages (#746)
31
+ - Scram authentication thread save (#743)
32
+
33
+ ## 0.7.8
34
+ - Optionally verify hostname on SSL certs (#733)
35
+
36
+ ## 0.7.7
37
+ - Producer send offsets in transaction (#723)
38
+ - Support zstd compression (#724)
39
+ - Verify SSL Certificates (#730)
40
+
7
41
  ## 0.7.6
8
42
  - Introduce regex matching in `Consumer#subscribe` (#700)
9
43
  - Only rejoin group on error if we're not in shutdown mode (#711)
data/README.md CHANGED
@@ -108,6 +108,26 @@ Or install it yourself as:
108
108
  <td>Limited support</td>
109
109
  <td>Limited support</td>
110
110
  </tr>
111
+ <tr>
112
+ <th>Kafka 2.2</th>
113
+ <td>Limited support</td>
114
+ <td>Limited support</td>
115
+ </tr>
116
+ <tr>
117
+ <th>Kafka 2.3</th>
118
+ <td>Limited support</td>
119
+ <td>Limited support</td>
120
+ </tr>
121
+ <tr>
122
+ <th>Kafka 2.4</th>
123
+ <td>Limited support</td>
124
+ <td>Limited support</td>
125
+ </tr>
126
+ <tr>
127
+ <th>Kafka 2.5</th>
128
+ <td>Limited support</td>
129
+ <td>Limited support</td>
130
+ </tr>
111
131
  </table>
112
132
 
113
133
  This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with the v0.5.x series. There's limited support for Kafka 0.8, and things should work with Kafka 0.11, although there may be performance issues due to changes in the protocol.
@@ -119,6 +139,10 @@ This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with t
119
139
  - **Kafka 1.0:** Everything that works with Kafka 0.11 should still work, but so far no features specific to Kafka 1.0 have been added.
120
140
  - **Kafka 2.0:** Everything that works with Kafka 1.0 should still work, but so far no features specific to Kafka 2.0 have been added.
121
141
  - **Kafka 2.1:** Everything that works with Kafka 2.0 should still work, but so far no features specific to Kafka 2.1 have been added.
142
+ - **Kafka 2.2:** Everything that works with Kafka 2.1 should still work, but so far no features specific to Kafka 2.2 have been added.
143
+ - **Kafka 2.3:** Everything that works with Kafka 2.2 should still work, but so far no features specific to Kafka 2.3 have been added.
144
+ - **Kafka 2.4:** Everything that works with Kafka 2.3 should still work, but so far no features specific to Kafka 2.4 have been added.
145
+ - **Kafka 2.5:** Everything that works with Kafka 2.4 should still work, but so far no features specific to Kafka 2.5 have been added.
122
146
 
123
147
  This library requires Ruby 2.1 or higher.
124
148
 
@@ -424,6 +448,7 @@ Compression is enabled by passing the `compression_codec` parameter to `#produce
424
448
  * `:snappy` for [Snappy](http://google.github.io/snappy/) compression.
425
449
  * `:gzip` for [gzip](https://en.wikipedia.org/wiki/Gzip) compression.
426
450
  * `:lz4` for [LZ4](https://en.wikipedia.org/wiki/LZ4_(compression_algorithm)) compression.
451
+ * `:zstd` for [zstd](https://facebook.github.io/zstd/) compression.
427
452
 
428
453
  By default, all message sets will be compressed if you specify a compression codec. To increase the compression threshold, set `compression_threshold` to an integer value higher than one.
429
454
 
@@ -927,6 +952,8 @@ This configures the store to look up CA certificates from the system default cer
927
952
 
928
953
  In order to authenticate the client to the cluster, you need to pass in a certificate and key created for the client and trusted by the brokers.
929
954
 
955
+ **NOTE**: You can disable hostname validation by passing `verify_hostname: false`.
956
+
930
957
  ```ruby
931
958
  kafka = Kafka.new(
932
959
  ["kafka1:9092"],
@@ -103,6 +103,9 @@ module Kafka
103
103
  # @raise [BufferOverflow] if the message queue is full.
104
104
  # @return [nil]
105
105
  def produce(value, topic:, **options)
106
+ # We want to fail fast if `topic` isn't a String
107
+ topic = topic.to_str
108
+
106
109
  ensure_threads_running!
107
110
 
108
111
  if @queue.size >= @max_queue_size
@@ -182,6 +182,18 @@ module Kafka
182
182
  send_request(request)
183
183
  end
184
184
 
185
+ def add_offsets_to_txn(**options)
186
+ request = Protocol::AddOffsetsToTxnRequest.new(**options)
187
+
188
+ send_request(request)
189
+ end
190
+
191
+ def txn_offset_commit(**options)
192
+ request = Protocol::TxnOffsetCommitRequest.new(**options)
193
+
194
+ send_request(request)
195
+ end
196
+
185
197
  private
186
198
 
187
199
  def send_request(request)
@@ -65,13 +65,17 @@ module Kafka
65
65
  # @param sasl_oauth_token_provider [Object, nil] OAuthBearer Token Provider instance that
66
66
  # implements method token. See {Sasl::OAuth#initialize}
67
67
  #
68
+ # @param verify_hostname [Boolean, true] whether to verify that the host serving
69
+ # the SSL certificate and the signing chain of the certificate have the correct domains
70
+ # based on the CA certificate
71
+ #
68
72
  # @return [Client]
69
73
  def initialize(seed_brokers:, client_id: "ruby-kafka", logger: nil, connect_timeout: nil, socket_timeout: nil,
70
74
  ssl_ca_cert_file_path: nil, ssl_ca_cert: nil, ssl_client_cert: nil, ssl_client_cert_key: nil,
71
75
  ssl_client_cert_key_password: nil, ssl_client_cert_chain: nil, sasl_gssapi_principal: nil,
72
76
  sasl_gssapi_keytab: nil, sasl_plain_authzid: '', sasl_plain_username: nil, sasl_plain_password: nil,
73
77
  sasl_scram_username: nil, sasl_scram_password: nil, sasl_scram_mechanism: nil,
74
- sasl_over_ssl: true, ssl_ca_certs_from_system: false, sasl_oauth_token_provider: nil)
78
+ sasl_over_ssl: true, ssl_ca_certs_from_system: false, sasl_oauth_token_provider: nil, ssl_verify_hostname: true)
75
79
  @logger = TaggedLogger.new(logger)
76
80
  @instrumenter = Instrumenter.new(client_id: client_id)
77
81
  @seed_brokers = normalize_seed_brokers(seed_brokers)
@@ -84,6 +88,7 @@ module Kafka
84
88
  client_cert_key_password: ssl_client_cert_key_password,
85
89
  client_cert_chain: ssl_client_cert_chain,
86
90
  ca_certs_from_system: ssl_ca_certs_from_system,
91
+ verify_hostname: ssl_verify_hostname
87
92
  )
88
93
 
89
94
  sasl_authenticator = SaslAuthenticator.new(
@@ -137,6 +142,9 @@ module Kafka
137
142
  def deliver_message(value, key: nil, headers: {}, topic:, partition: nil, partition_key: nil, retries: 1)
138
143
  create_time = Time.now
139
144
 
145
+ # We want to fail fast if `topic` isn't a String
146
+ topic = topic.to_str
147
+
140
148
  message = PendingMessage.new(
141
149
  value: value,
142
150
  key: key,
@@ -233,8 +241,8 @@ module Kafka
233
241
  # result in {BufferOverflow} being raised.
234
242
  #
235
243
  # @param compression_codec [Symbol, nil] the name of the compression codec to
236
- # use, or nil if no compression should be performed. Valid codecs: `:snappy`
237
- # and `:gzip`.
244
+ # use, or nil if no compression should be performed. Valid codecs: `:snappy`,
245
+ # `:gzip`, `:lz4`, `:zstd`
238
246
  #
239
247
  # @param compression_threshold [Integer] the number of messages that needs to
240
248
  # be in a message set before it should be compressed. Note that message sets
@@ -332,15 +340,20 @@ module Kafka
332
340
  # @param fetcher_max_queue_size [Integer] max number of items in the fetch queue that
333
341
  # are stored for further processing. Note, that each item in the queue represents a
334
342
  # response from a single broker.
343
+ # @param refresh_topic_interval [Integer] interval of refreshing the topic list.
344
+ # If it is 0, the topic list won't be refreshed (default)
345
+ # If it is n (n > 0), the topic list will be refreshed every n seconds
335
346
  # @return [Consumer]
336
347
  def consumer(
337
348
  group_id:,
338
349
  session_timeout: 30,
350
+ rebalance_timeout: 60,
339
351
  offset_commit_interval: 10,
340
352
  offset_commit_threshold: 0,
341
353
  heartbeat_interval: 10,
342
354
  offset_retention_time: nil,
343
- fetcher_max_queue_size: 100
355
+ fetcher_max_queue_size: 100,
356
+ refresh_topic_interval: 0
344
357
  )
345
358
  cluster = initialize_cluster
346
359
 
@@ -356,6 +369,7 @@ module Kafka
356
369
  logger: @logger,
357
370
  group_id: group_id,
358
371
  session_timeout: session_timeout,
372
+ rebalance_timeout: rebalance_timeout,
359
373
  retention_time: retention_time,
360
374
  instrumenter: instrumenter,
361
375
  )
@@ -393,6 +407,7 @@ module Kafka
393
407
  fetcher: fetcher,
394
408
  session_timeout: session_timeout,
395
409
  heartbeat: heartbeat,
410
+ refresh_topic_interval: refresh_topic_interval
396
411
  )
397
412
  end
398
413
 
@@ -529,6 +544,24 @@ module Kafka
529
544
  end
530
545
  end
531
546
 
547
+ # Describe broker configs
548
+ #
549
+ # @param broker_id [int] the id of the broker
550
+ # @param configs [Array] array of config keys.
551
+ # @return [Array<Kafka::Protocol::DescribeConfigsResponse::ConfigEntry>]
552
+ def describe_configs(broker_id, configs = [])
553
+ @cluster.describe_configs(broker_id, configs)
554
+ end
555
+
556
+ # Alter broker configs
557
+ #
558
+ # @param broker_id [int] the id of the broker
559
+ # @param configs [Array] array of config strings.
560
+ # @return [nil]
561
+ def alter_configs(broker_id, configs = [])
562
+ @cluster.alter_configs(broker_id, configs)
563
+ end
564
+
532
565
  # Creates a topic in the cluster.
533
566
  #
534
567
  # @example Creating a topic with log compaction
@@ -614,6 +647,14 @@ module Kafka
614
647
  @cluster.describe_group(group_id)
615
648
  end
616
649
 
650
+ # Fetch all committed offsets for a consumer group
651
+ #
652
+ # @param group_id [String] the id of the consumer group
653
+ # @return [Hash<String, Hash<Integer, Kafka::Protocol::OffsetFetchResponse::PartitionOffsetInfo>>]
654
+ def fetch_group_offsets(group_id)
655
+ @cluster.fetch_group_offsets(group_id)
656
+ end
657
+
617
658
  # Create partitions for a topic.
618
659
  #
619
660
  # @param name [String] the name of the topic.
@@ -662,6 +703,14 @@ module Kafka
662
703
  @cluster.partitions_for(topic).count
663
704
  end
664
705
 
706
+ # Counts the number of replicas for a topic's partition
707
+ #
708
+ # @param topic [String]
709
+ # @return [Integer] the number of replica nodes for the topic's partition
710
+ def replica_count_for(topic)
711
+ @cluster.partitions_for(topic).first.replicas.count
712
+ end
713
+
665
714
  # Retrieve the offset of the last message in a partition. If there are no
666
715
  # messages in the partition -1 is returned.
667
716
  #