ruby-kafka 0.7.10 → 1.3.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 5aa84a39ade791c3bce57152d426e95cb7d29575af3e8a9cb29fcded1c6cdad5
4
- data.tar.gz: 46be738fb1a2b71bbbd5d5d3b2fc95b705aef5740f666095eeaf40c9a6980fc4
3
+ metadata.gz: b16e9e52014e784610725bb2ba5c5a431694ce6da7878b6c1ff024504d6ef6c4
4
+ data.tar.gz: '039026011e9cd5e5dce59aed4f53b2a8970aa8025fe3b008620bdef8a802bbf5'
5
5
  SHA512:
6
- metadata.gz: 42e8059ab9b49e16a8e9c70e5739c9694d2142d5d440f8e2c18477cc151a24183536ae1548aed66a7e7ee6ea64c96ff2c9aacb63366ed92bd1612af6e52d8300
7
- data.tar.gz: 5008c803cc61fbea993f7f2a22ca7ac3ebb448f91cbdca40d540b9f4604dab8b4e38fcf4b20006a16641b5239e78d6045ccbec70168171219b716090ea72ba09
6
+ metadata.gz: 1d13c2032f4bd38e09a714fe40d59467ff61b2c51e0a604daf7f9e1a3431af32f77e62433b863862a68e6bc74f53ff4eceed7e88145acc102613ac6fa2600b9f
7
+ data.tar.gz: f424e6e2bee5f318766880f3b7eb338ce706783ea647908902a8a6d409fcf8f9ac3f0afe7ca8a932710a9d8f192e44a25a346da6639861fb7d377f3dd3257309
@@ -203,6 +203,102 @@ jobs:
203
203
  - run: bundle install --path vendor/bundle
204
204
  - run: bundle exec rspec --profile --tag functional spec/functional
205
205
 
206
+ kafka-2.3:
207
+ docker:
208
+ - image: circleci/ruby:2.5.1-node
209
+ environment:
210
+ LOG_LEVEL: DEBUG
211
+ - image: wurstmeister/zookeeper
212
+ - image: wurstmeister/kafka:2.12-2.3.1
213
+ environment:
214
+ KAFKA_ADVERTISED_HOST_NAME: localhost
215
+ KAFKA_ADVERTISED_PORT: 9092
216
+ KAFKA_PORT: 9092
217
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
218
+ KAFKA_DELETE_TOPIC_ENABLE: true
219
+ - image: wurstmeister/kafka:2.12-2.3.1
220
+ environment:
221
+ KAFKA_ADVERTISED_HOST_NAME: localhost
222
+ KAFKA_ADVERTISED_PORT: 9093
223
+ KAFKA_PORT: 9093
224
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
225
+ KAFKA_DELETE_TOPIC_ENABLE: true
226
+ - image: wurstmeister/kafka:2.12-2.3.1
227
+ environment:
228
+ KAFKA_ADVERTISED_HOST_NAME: localhost
229
+ KAFKA_ADVERTISED_PORT: 9094
230
+ KAFKA_PORT: 9094
231
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
232
+ KAFKA_DELETE_TOPIC_ENABLE: true
233
+ steps:
234
+ - checkout
235
+ - run: bundle install --path vendor/bundle
236
+ - run: bundle exec rspec --profile --tag functional spec/functional
237
+
238
+ kafka-2.4:
239
+ docker:
240
+ - image: circleci/ruby:2.5.1-node
241
+ environment:
242
+ LOG_LEVEL: DEBUG
243
+ - image: wurstmeister/zookeeper
244
+ - image: wurstmeister/kafka:2.12-2.4.0
245
+ environment:
246
+ KAFKA_ADVERTISED_HOST_NAME: localhost
247
+ KAFKA_ADVERTISED_PORT: 9092
248
+ KAFKA_PORT: 9092
249
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
250
+ KAFKA_DELETE_TOPIC_ENABLE: true
251
+ - image: wurstmeister/kafka:2.12-2.4.0
252
+ environment:
253
+ KAFKA_ADVERTISED_HOST_NAME: localhost
254
+ KAFKA_ADVERTISED_PORT: 9093
255
+ KAFKA_PORT: 9093
256
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
257
+ KAFKA_DELETE_TOPIC_ENABLE: true
258
+ - image: wurstmeister/kafka:2.12-2.4.0
259
+ environment:
260
+ KAFKA_ADVERTISED_HOST_NAME: localhost
261
+ KAFKA_ADVERTISED_PORT: 9094
262
+ KAFKA_PORT: 9094
263
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
264
+ KAFKA_DELETE_TOPIC_ENABLE: true
265
+ steps:
266
+ - checkout
267
+ - run: bundle install --path vendor/bundle
268
+ - run: bundle exec rspec --profile --tag functional spec/functional
269
+
270
+ kafka-2.5:
271
+ docker:
272
+ - image: circleci/ruby:2.5.1-node
273
+ environment:
274
+ LOG_LEVEL: DEBUG
275
+ - image: wurstmeister/zookeeper
276
+ - image: wurstmeister/kafka:2.12-2.5.0
277
+ environment:
278
+ KAFKA_ADVERTISED_HOST_NAME: localhost
279
+ KAFKA_ADVERTISED_PORT: 9092
280
+ KAFKA_PORT: 9092
281
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
282
+ KAFKA_DELETE_TOPIC_ENABLE: true
283
+ - image: wurstmeister/kafka:2.12-2.5.0
284
+ environment:
285
+ KAFKA_ADVERTISED_HOST_NAME: localhost
286
+ KAFKA_ADVERTISED_PORT: 9093
287
+ KAFKA_PORT: 9093
288
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
289
+ KAFKA_DELETE_TOPIC_ENABLE: true
290
+ - image: wurstmeister/kafka:2.12-2.5.0
291
+ environment:
292
+ KAFKA_ADVERTISED_HOST_NAME: localhost
293
+ KAFKA_ADVERTISED_PORT: 9094
294
+ KAFKA_PORT: 9094
295
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
296
+ KAFKA_DELETE_TOPIC_ENABLE: true
297
+ steps:
298
+ - checkout
299
+ - run: bundle install --path vendor/bundle
300
+ - run: bundle exec rspec --profile --tag functional spec/functional
301
+
206
302
  workflows:
207
303
  version: 2
208
304
  test:
@@ -214,3 +310,6 @@ workflows:
214
310
  - kafka-2.0
215
311
  - kafka-2.1
216
312
  - kafka-2.2
313
+ - kafka-2.3
314
+ - kafka-2.4
315
+ - kafka-2.5
@@ -0,0 +1,19 @@
1
+ name: Mark stale issues and pull requests
2
+
3
+ on:
4
+ schedule:
5
+ - cron: "0 0 * * *"
6
+
7
+ jobs:
8
+ stale:
9
+
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - uses: actions/stale@v1
14
+ with:
15
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
16
+ stale-issue-message: 'Issue has been marked as stale due to a lack of activity.'
17
+ stale-pr-message: 'Pull request has been marked as stale due to a lack of activity.'
18
+ stale-issue-label: 'no-issue-activity'
19
+ stale-pr-label: 'no-pr-activity'
@@ -1 +1 @@
1
- 2.5.1
1
+ 2.7.1
@@ -2,6 +2,33 @@
2
2
 
3
3
  Changes and additions to the library will be listed here.
4
4
 
5
+ ## Unreleased
6
+
7
+ ## 1.3.0
8
+
9
+ - Support custom assignment strategy (#846).
10
+ - Improved Exceptions in TransactionManager (#862).
11
+
12
+ ## 1.2.0
13
+
14
+ - Add producer consumer interceptors (#837).
15
+ - Add support for configuring the client partitioner (#848).
16
+
17
+ ## 1.1.0
18
+
19
+ - Extra sanity checking when marking offsets as processed (#824).
20
+ - Make `verify_hostname` settable for SSL contexts (#828).
21
+ - Instrument `create_time` from last message in batch (#811).
22
+ - Add client function for fetching topic replica count (#822).
23
+ - Allow consumers to refresh the topic lists (#818).
24
+ - Disconnect after leaving a group (#817).
25
+ - Use `max_wait_time` as the sleep instead of hardcoded 2 seconds (#825).
26
+
27
+ ## 1.0.0
28
+
29
+ - Add client methods to manage configs (#759)
30
+ - Support Kafka 2.3 and 2.4.
31
+
5
32
  ## 0.7.10
6
33
 
7
34
  - Fix logger again (#762)
data/README.md CHANGED
@@ -26,6 +26,7 @@ A Ruby client library for [Apache Kafka](http://kafka.apache.org/), a distribute
26
26
  4. [Shutting Down a Consumer](#shutting-down-a-consumer)
27
27
  5. [Consuming Messages in Batches](#consuming-messages-in-batches)
28
28
  6. [Balancing Throughput and Latency](#balancing-throughput-and-latency)
29
+ 7. [Customizing Partition Assignment Strategy](#customizing-partition-assignment-strategy)
29
30
  4. [Thread Safety](#thread-safety)
30
31
  5. [Logging](#logging)
31
32
  6. [Instrumentation](#instrumentation)
@@ -113,6 +114,21 @@ Or install it yourself as:
113
114
  <td>Limited support</td>
114
115
  <td>Limited support</td>
115
116
  </tr>
117
+ <tr>
118
+ <th>Kafka 2.3</th>
119
+ <td>Limited support</td>
120
+ <td>Limited support</td>
121
+ </tr>
122
+ <tr>
123
+ <th>Kafka 2.4</th>
124
+ <td>Limited support</td>
125
+ <td>Limited support</td>
126
+ </tr>
127
+ <tr>
128
+ <th>Kafka 2.5</th>
129
+ <td>Limited support</td>
130
+ <td>Limited support</td>
131
+ </tr>
116
132
  </table>
117
133
 
118
134
  This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with the v0.5.x series. There's limited support for Kafka 0.8, and things should work with Kafka 0.11, although there may be performance issues due to changes in the protocol.
@@ -124,6 +140,10 @@ This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with t
124
140
  - **Kafka 1.0:** Everything that works with Kafka 0.11 should still work, but so far no features specific to Kafka 1.0 have been added.
125
141
  - **Kafka 2.0:** Everything that works with Kafka 1.0 should still work, but so far no features specific to Kafka 2.0 have been added.
126
142
  - **Kafka 2.1:** Everything that works with Kafka 2.0 should still work, but so far no features specific to Kafka 2.1 have been added.
143
+ - **Kafka 2.2:** Everything that works with Kafka 2.1 should still work, but so far no features specific to Kafka 2.2 have been added.
144
+ - **Kafka 2.3:** Everything that works with Kafka 2.2 should still work, but so far no features specific to Kafka 2.3 have been added.
145
+ - **Kafka 2.4:** Everything that works with Kafka 2.3 should still work, but so far no features specific to Kafka 2.4 have been added.
146
+ - **Kafka 2.5:** Everything that works with Kafka 2.4 should still work, but so far no features specific to Kafka 2.5 have been added.
127
147
 
128
148
  This library requires Ruby 2.1 or higher.
129
149
 
@@ -330,6 +350,26 @@ partition = PartitioningScheme.assign(partitions, event)
330
350
  producer.produce(event, topic: "events", partition: partition)
331
351
  ```
332
352
 
353
+ Another option is to configure a custom client partitioner that implements `call(partition_count, message)` and uses the same schema as the other client. For example:
354
+
355
+ ```ruby
356
+ class CustomPartitioner
357
+ def call(partition_count, message)
358
+ ...
359
+ end
360
+ end
361
+
362
+ partitioner = CustomPartitioner.new
363
+ Kafka.new(partitioner: partitioner, ...)
364
+ ```
365
+
366
+ Or, simply create a Proc handling the partitioning logic instead of having to add a new class. For example:
367
+
368
+ ```ruby
369
+ partitioner = -> (partition_count, message) { ... }
370
+ Kafka.new(partitioner: partitioner, ...)
371
+ ```
372
+
333
373
  #### Buffering and Error Handling
334
374
 
335
375
  The producer is designed for resilience in the face of temporary network errors, Kafka broker failovers, and other issues that prevent the client from writing messages to the destination topics. It does this by employing local, in-memory buffers. Only when messages are acknowledged by a Kafka broker will they be removed from the buffer.
@@ -704,6 +744,88 @@ consumer.each_message do |message|
704
744
  end
705
745
  ```
706
746
 
747
+ #### Customizing Partition Assignment Strategy
748
+
749
+ In some cases, you might want to assign more partitions to some consumers. For example, in applications inserting some records to a database, the consumers running on hosts nearby the database can process more messages than the consumers running on other hosts.
750
+ You can use a custom assignment strategy by passing an object that implements `#call` as the argument `assignment_strategy` like below:
751
+
752
+ ```ruby
753
+ class CustomAssignmentStrategy
754
+ def initialize(user_data)
755
+ @user_data = user_data
756
+ end
757
+
758
+ # Assign the topic partitions to the group members.
759
+ #
760
+ # @param cluster [Kafka::Cluster]
761
+ # @param members [Hash<String, Kafka::Protocol::JoinGroupResponse::Metadata>] a hash
762
+ # mapping member ids to metadata
763
+ # @param partitions [Array<Kafka::ConsumerGroup::Assignor::Partition>] a list of
764
+ # partitions the consumer group processes
765
+ # @return [Hash<String, Array<Kafka::ConsumerGroup::Assignor::Partition>] a hash
766
+ # mapping member ids to partitions.
767
+ def call(cluster:, members:, partitions:)
768
+ ...
769
+ end
770
+ end
771
+
772
+ strategy = CustomAssignmentStrategy.new("some-host-information")
773
+ consumer = kafka.consumer(group_id: "some-group", assignment_strategy: strategy)
774
+ ```
775
+
776
+ `members` is a hash mapping member IDs to metadata, and partitions is a list of partitions the consumer group processes. The method `call` must return a hash mapping member IDs to partitions. For example, the following strategy assigns partitions randomly:
777
+
778
+ ```ruby
779
+ class RandomAssignmentStrategy
780
+ def call(cluster:, members:, partitions:)
781
+ member_ids = members.keys
782
+ partitions.each_with_object(Hash.new {|h, k| h[k] = [] }) do |partition, partitions_per_member|
783
+ partitions_per_member[member_ids[rand(member_ids.count)]] << partition
784
+ end
785
+ end
786
+ end
787
+ ```
788
+
789
+ If the strategy needs user data, you should define the method `user_data` that returns user data on each consumer. For example, the following strategy uses the consumers' IP addresses as user data:
790
+
791
+ ```ruby
792
+ class NetworkTopologyAssignmentStrategy
793
+ def user_data
794
+ Socket.ip_address_list.find(&:ipv4_private?).ip_address
795
+ end
796
+
797
+ def call(cluster:, members:, partitions:)
798
+ # Display the pair of the member ID and IP address
799
+ members.each do |id, metadata|
800
+ puts "#{id}: #{metadata.user_data}"
801
+ end
802
+
803
+ # Assign partitions considering the network topology
804
+ ...
805
+ end
806
+ end
807
+ ```
808
+
809
+ Note that the strategy uses the class name as the default protocol name. You can change it by defining the method `protocol_name`:
810
+
811
+ ```ruby
812
+ class NetworkTopologyAssignmentStrategy
813
+ def protocol_name
814
+ "networktopology"
815
+ end
816
+
817
+ def user_data
818
+ Socket.ip_address_list.find(&:ipv4_private?).ip_address
819
+ end
820
+
821
+ def call(cluster:, members:, partitions:)
822
+ ...
823
+ end
824
+ end
825
+ ```
826
+
827
+ As the method `call` might receive different user data from what it expects, you should avoid using the same protocol name as another strategy that uses different user data.
828
+
707
829
 
708
830
  ### Thread Safety
709
831
 
@@ -933,6 +1055,8 @@ This configures the store to look up CA certificates from the system default cer
933
1055
 
934
1056
  In order to authenticate the client to the cluster, you need to pass in a certificate and key created for the client and trusted by the brokers.
935
1057
 
1058
+ **NOTE**: You can disable hostname validation by passing `ssl_verify_hostname: false`.
1059
+
936
1060
  ```ruby
937
1061
  kafka = Kafka.new(
938
1062
  ["kafka1:9092"],
@@ -940,6 +1064,7 @@ kafka = Kafka.new(
940
1064
  ssl_client_cert: File.read('my_client_cert.pem'),
941
1065
  ssl_client_cert_key: File.read('my_client_cert_key.pem'),
942
1066
  ssl_client_cert_key_password: 'my_client_cert_key_password',
1067
+ ssl_verify_hostname: false,
943
1068
  # ...
944
1069
  )
945
1070
  ```
@@ -59,8 +59,6 @@ module Kafka
59
59
  # producer.shutdown
60
60
  #
61
61
  class AsyncProducer
62
- THREAD_MUTEX = Mutex.new
63
-
64
62
  # Initializes a new AsyncProducer.
65
63
  #
66
64
  # @param sync_producer [Kafka::Producer] the synchronous producer that should
@@ -94,6 +92,8 @@ module Kafka
94
92
 
95
93
  # The timer will no-op if the delivery interval is zero.
96
94
  @timer = Timer.new(queue: @queue, interval: delivery_interval)
95
+
96
+ @thread_mutex = Mutex.new
97
97
  end
98
98
 
99
99
  # Produces a message to the specified topic.
@@ -103,6 +103,9 @@ module Kafka
103
103
  # @raise [BufferOverflow] if the message queue is full.
104
104
  # @return [nil]
105
105
  def produce(value, topic:, **options)
106
+ # We want to fail fast if `topic` isn't a String
107
+ topic = topic.to_str
108
+
106
109
  ensure_threads_running!
107
110
 
108
111
  if @queue.size >= @max_queue_size
@@ -128,6 +131,8 @@ module Kafka
128
131
  # @see Kafka::Producer#deliver_messages
129
132
  # @return [nil]
130
133
  def deliver_messages
134
+ ensure_threads_running!
135
+
131
136
  @queue << [:deliver_messages, nil]
132
137
 
133
138
  nil
@@ -139,6 +144,8 @@ module Kafka
139
144
  # @see Kafka::Producer#shutdown
140
145
  # @return [nil]
141
146
  def shutdown
147
+ ensure_threads_running!
148
+
142
149
  @timer_thread && @timer_thread.exit
143
150
  @queue << [:shutdown, nil]
144
151
  @worker_thread && @worker_thread.join
@@ -149,17 +156,22 @@ module Kafka
149
156
  private
150
157
 
151
158
  def ensure_threads_running!
152
- THREAD_MUTEX.synchronize do
153
- @worker_thread = nil unless @worker_thread && @worker_thread.alive?
154
- @worker_thread ||= Thread.new { @worker.run }
155
- end
159
+ return if worker_thread_alive? && timer_thread_alive?
156
160
 
157
- THREAD_MUTEX.synchronize do
158
- @timer_thread = nil unless @timer_thread && @timer_thread.alive?
159
- @timer_thread ||= Thread.new { @timer.run }
161
+ @thread_mutex.synchronize do
162
+ @worker_thread = Thread.new { @worker.run } unless worker_thread_alive?
163
+ @timer_thread = Thread.new { @timer.run } unless timer_thread_alive?
160
164
  end
161
165
  end
162
166
 
167
+ def worker_thread_alive?
168
+ !!@worker_thread && @worker_thread.alive?
169
+ end
170
+
171
+ def timer_thread_alive?
172
+ !!@timer_thread && @timer_thread.alive?
173
+ end
174
+
163
175
  def buffer_overflow(topic, message)
164
176
  @instrumenter.instrument("buffer_overflow.async_producer", {
165
177
  topic: topic,
@@ -205,7 +217,7 @@ module Kafka
205
217
 
206
218
  case operation
207
219
  when :produce
208
- produce(*payload)
220
+ produce(payload[0], **payload[1])
209
221
  deliver_messages if threshold_reached?
210
222
  when :deliver_messages
211
223
  deliver_messages
@@ -243,10 +255,10 @@ module Kafka
243
255
 
244
256
  private
245
257
 
246
- def produce(*args)
258
+ def produce(value, **kwargs)
247
259
  retries = 0
248
260
  begin
249
- @producer.produce(*args)
261
+ @producer.produce(value, **kwargs)
250
262
  rescue BufferOverflow => e
251
263
  deliver_messages
252
264
  if @max_retries == -1
@@ -62,16 +62,25 @@ module Kafka
62
62
  #
63
63
  # @param sasl_over_ssl [Boolean] whether to enforce SSL with SASL
64
64
  #
65
+ # @param ssl_ca_certs_from_system [Boolean] whether to use the CA certs from the
66
+ # system's default certificate store.
67
+ #
68
+ # @param partitioner [Partitioner, nil] the partitioner that should be used by the client.
69
+ #
65
70
  # @param sasl_oauth_token_provider [Object, nil] OAuthBearer Token Provider instance that
66
71
  # implements method token. See {Sasl::OAuth#initialize}
67
72
  #
73
+ # @param ssl_verify_hostname [Boolean, true] whether to verify that the host serving
74
+ # the SSL certificate and the signing chain of the certificate have the correct domains
75
+ # based on the CA certificate
76
+ #
68
77
  # @return [Client]
69
78
  def initialize(seed_brokers:, client_id: "ruby-kafka", logger: nil, connect_timeout: nil, socket_timeout: nil,
70
79
  ssl_ca_cert_file_path: nil, ssl_ca_cert: nil, ssl_client_cert: nil, ssl_client_cert_key: nil,
71
80
  ssl_client_cert_key_password: nil, ssl_client_cert_chain: nil, sasl_gssapi_principal: nil,
72
81
  sasl_gssapi_keytab: nil, sasl_plain_authzid: '', sasl_plain_username: nil, sasl_plain_password: nil,
73
82
  sasl_scram_username: nil, sasl_scram_password: nil, sasl_scram_mechanism: nil,
74
- sasl_over_ssl: true, ssl_ca_certs_from_system: false, sasl_oauth_token_provider: nil, ssl_verify_hostname: true)
83
+ sasl_over_ssl: true, ssl_ca_certs_from_system: false, partitioner: nil, sasl_oauth_token_provider: nil, ssl_verify_hostname: true)
75
84
  @logger = TaggedLogger.new(logger)
76
85
  @instrumenter = Instrumenter.new(client_id: client_id)
77
86
  @seed_brokers = normalize_seed_brokers(seed_brokers)
@@ -115,6 +124,7 @@ module Kafka
115
124
  )
116
125
 
117
126
  @cluster = initialize_cluster
127
+ @partitioner = partitioner || Partitioner.new
118
128
  end
119
129
 
120
130
  # Delivers a single message to the Kafka cluster.
@@ -138,6 +148,9 @@ module Kafka
138
148
  def deliver_message(value, key: nil, headers: {}, topic:, partition: nil, partition_key: nil, retries: 1)
139
149
  create_time = Time.now
140
150
 
151
+ # We want to fail fast if `topic` isn't a String
152
+ topic = topic.to_str
153
+
141
154
  message = PendingMessage.new(
142
155
  value: value,
143
156
  key: key,
@@ -150,7 +163,7 @@ module Kafka
150
163
 
151
164
  if partition.nil?
152
165
  partition_count = @cluster.partitions_for(topic).count
153
- partition = Partitioner.partition_for_key(partition_count, message)
166
+ partition = @partitioner.call(partition_count, message)
154
167
  end
155
168
 
156
169
  buffer = MessageBuffer.new
@@ -241,6 +254,9 @@ module Kafka
241
254
  # be in a message set before it should be compressed. Note that message sets
242
255
  # are per-partition rather than per-topic or per-producer.
243
256
  #
257
+ # @param interceptors [Array<Object>] a list of producer interceptors the implement
258
+ # `call(Kafka::PendingMessage)`.
259
+ #
244
260
  # @return [Kafka::Producer] the Kafka producer.
245
261
  def producer(
246
262
  compression_codec: nil,
@@ -254,7 +270,8 @@ module Kafka
254
270
  idempotent: false,
255
271
  transactional: false,
256
272
  transactional_id: nil,
257
- transactional_timeout: 60
273
+ transactional_timeout: 60,
274
+ interceptors: []
258
275
  )
259
276
  cluster = initialize_cluster
260
277
  compressor = Compressor.new(
@@ -284,6 +301,8 @@ module Kafka
284
301
  retry_backoff: retry_backoff,
285
302
  max_buffer_size: max_buffer_size,
286
303
  max_buffer_bytesize: max_buffer_bytesize,
304
+ partitioner: @partitioner,
305
+ interceptors: interceptors
287
306
  )
288
307
  end
289
308
 
@@ -333,15 +352,26 @@ module Kafka
333
352
  # @param fetcher_max_queue_size [Integer] max number of items in the fetch queue that
334
353
  # are stored for further processing. Note, that each item in the queue represents a
335
354
  # response from a single broker.
355
+ # @param refresh_topic_interval [Integer] interval of refreshing the topic list.
356
+ # If it is 0, the topic list won't be refreshed (default)
357
+ # If it is n (n > 0), the topic list will be refreshed every n seconds
358
+ # @param interceptors [Array<Object>] a list of consumer interceptors that implement
359
+ # `call(Kafka::FetchedBatch)`.
360
+ # @param assignment_strategy [Object] a partition assignment strategy that
361
+ # implements `protocol_type()`, `user_data()`, and `assign(members:, partitions:)`
336
362
  # @return [Consumer]
337
363
  def consumer(
338
364
  group_id:,
339
365
  session_timeout: 30,
366
+ rebalance_timeout: 60,
340
367
  offset_commit_interval: 10,
341
368
  offset_commit_threshold: 0,
342
369
  heartbeat_interval: 10,
343
370
  offset_retention_time: nil,
344
- fetcher_max_queue_size: 100
371
+ fetcher_max_queue_size: 100,
372
+ refresh_topic_interval: 0,
373
+ interceptors: [],
374
+ assignment_strategy: nil
345
375
  )
346
376
  cluster = initialize_cluster
347
377
 
@@ -357,8 +387,10 @@ module Kafka
357
387
  logger: @logger,
358
388
  group_id: group_id,
359
389
  session_timeout: session_timeout,
390
+ rebalance_timeout: rebalance_timeout,
360
391
  retention_time: retention_time,
361
392
  instrumenter: instrumenter,
393
+ assignment_strategy: assignment_strategy
362
394
  )
363
395
 
364
396
  fetcher = Fetcher.new(
@@ -394,6 +426,8 @@ module Kafka
394
426
  fetcher: fetcher,
395
427
  session_timeout: session_timeout,
396
428
  heartbeat: heartbeat,
429
+ refresh_topic_interval: refresh_topic_interval,
430
+ interceptors: interceptors
397
431
  )
398
432
  end
399
433
 
@@ -530,6 +564,24 @@ module Kafka
530
564
  end
531
565
  end
532
566
 
567
+ # Describe broker configs
568
+ #
569
+ # @param broker_id [int] the id of the broker
570
+ # @param configs [Array] array of config keys.
571
+ # @return [Array<Kafka::Protocol::DescribeConfigsResponse::ConfigEntry>]
572
+ def describe_configs(broker_id, configs = [])
573
+ @cluster.describe_configs(broker_id, configs)
574
+ end
575
+
576
+ # Alter broker configs
577
+ #
578
+ # @param broker_id [int] the id of the broker
579
+ # @param configs [Array] array of config strings.
580
+ # @return [nil]
581
+ def alter_configs(broker_id, configs = [])
582
+ @cluster.alter_configs(broker_id, configs)
583
+ end
584
+
533
585
  # Creates a topic in the cluster.
534
586
  #
535
587
  # @example Creating a topic with log compaction
@@ -615,6 +667,14 @@ module Kafka
615
667
  @cluster.describe_group(group_id)
616
668
  end
617
669
 
670
+ # Fetch all committed offsets for a consumer group
671
+ #
672
+ # @param group_id [String] the id of the consumer group
673
+ # @return [Hash<String, Hash<Integer, Kafka::Protocol::OffsetFetchResponse::PartitionOffsetInfo>>]
674
+ def fetch_group_offsets(group_id)
675
+ @cluster.fetch_group_offsets(group_id)
676
+ end
677
+
618
678
  # Create partitions for a topic.
619
679
  #
620
680
  # @param name [String] the name of the topic.
@@ -663,6 +723,14 @@ module Kafka
663
723
  @cluster.partitions_for(topic).count
664
724
  end
665
725
 
726
+ # Counts the number of replicas for a topic's partition
727
+ #
728
+ # @param topic [String]
729
+ # @return [Integer] the number of replica nodes for the topic's partition
730
+ def replica_count_for(topic)
731
+ @cluster.partitions_for(topic).first.replicas.count
732
+ end
733
+
666
734
  # Retrieve the offset of the last message in a partition. If there are no
667
735
  # messages in the partition -1 is returned.
668
736
  #