ruby-kafka 0.7.10 → 1.5.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/.circleci/config.yml +179 -0
  3. data/.github/workflows/stale.yml +19 -0
  4. data/.ruby-version +1 -1
  5. data/CHANGELOG.md +40 -0
  6. data/README.md +167 -0
  7. data/lib/kafka/async_producer.rb +60 -42
  8. data/lib/kafka/client.rb +92 -6
  9. data/lib/kafka/cluster.rb +82 -24
  10. data/lib/kafka/connection.rb +3 -0
  11. data/lib/kafka/consumer.rb +61 -11
  12. data/lib/kafka/consumer_group/assignor.rb +63 -0
  13. data/lib/kafka/consumer_group.rb +29 -6
  14. data/lib/kafka/crc32_hash.rb +15 -0
  15. data/lib/kafka/datadog.rb +20 -13
  16. data/lib/kafka/digest.rb +22 -0
  17. data/lib/kafka/fetcher.rb +5 -2
  18. data/lib/kafka/interceptors.rb +33 -0
  19. data/lib/kafka/murmur2_hash.rb +17 -0
  20. data/lib/kafka/offset_manager.rb +12 -1
  21. data/lib/kafka/partitioner.rb +8 -3
  22. data/lib/kafka/producer.rb +13 -5
  23. data/lib/kafka/prometheus.rb +78 -79
  24. data/lib/kafka/protocol/add_offsets_to_txn_response.rb +2 -0
  25. data/lib/kafka/protocol/encoder.rb +1 -1
  26. data/lib/kafka/protocol/join_group_request.rb +8 -2
  27. data/lib/kafka/protocol/join_group_response.rb +9 -1
  28. data/lib/kafka/protocol/metadata_response.rb +1 -1
  29. data/lib/kafka/protocol/offset_fetch_request.rb +3 -1
  30. data/lib/kafka/protocol/record_batch.rb +2 -2
  31. data/lib/kafka/protocol/sasl_handshake_request.rb +1 -1
  32. data/lib/kafka/protocol/sync_group_response.rb +5 -2
  33. data/lib/kafka/protocol/txn_offset_commit_response.rb +34 -5
  34. data/lib/kafka/round_robin_assignment_strategy.rb +37 -39
  35. data/lib/kafka/sasl/awsmskiam.rb +133 -0
  36. data/lib/kafka/sasl_authenticator.rb +15 -2
  37. data/lib/kafka/ssl_context.rb +6 -5
  38. data/lib/kafka/tagged_logger.rb +1 -0
  39. data/lib/kafka/transaction_manager.rb +30 -10
  40. data/lib/kafka/version.rb +1 -1
  41. data/ruby-kafka.gemspec +5 -4
  42. metadata +39 -13
@@ -59,8 +59,6 @@ module Kafka
59
59
  # producer.shutdown
60
60
  #
61
61
  class AsyncProducer
62
- THREAD_MUTEX = Mutex.new
63
-
64
62
  # Initializes a new AsyncProducer.
65
63
  #
66
64
  # @param sync_producer [Kafka::Producer] the synchronous producer that should
@@ -94,6 +92,8 @@ module Kafka
94
92
 
95
93
  # The timer will no-op if the delivery interval is zero.
96
94
  @timer = Timer.new(queue: @queue, interval: delivery_interval)
95
+
96
+ @thread_mutex = Mutex.new
97
97
  end
98
98
 
99
99
  # Produces a message to the specified topic.
@@ -103,6 +103,9 @@ module Kafka
103
103
  # @raise [BufferOverflow] if the message queue is full.
104
104
  # @return [nil]
105
105
  def produce(value, topic:, **options)
106
+ # We want to fail fast if `topic` isn't a String
107
+ topic = topic.to_str
108
+
106
109
  ensure_threads_running!
107
110
 
108
111
  if @queue.size >= @max_queue_size
@@ -128,6 +131,8 @@ module Kafka
128
131
  # @see Kafka::Producer#deliver_messages
129
132
  # @return [nil]
130
133
  def deliver_messages
134
+ ensure_threads_running!
135
+
131
136
  @queue << [:deliver_messages, nil]
132
137
 
133
138
  nil
@@ -139,6 +144,8 @@ module Kafka
139
144
  # @see Kafka::Producer#shutdown
140
145
  # @return [nil]
141
146
  def shutdown
147
+ ensure_threads_running!
148
+
142
149
  @timer_thread && @timer_thread.exit
143
150
  @queue << [:shutdown, nil]
144
151
  @worker_thread && @worker_thread.join
@@ -149,17 +156,22 @@ module Kafka
149
156
  private
150
157
 
151
158
  def ensure_threads_running!
152
- THREAD_MUTEX.synchronize do
153
- @worker_thread = nil unless @worker_thread && @worker_thread.alive?
154
- @worker_thread ||= Thread.new { @worker.run }
155
- end
159
+ return if worker_thread_alive? && timer_thread_alive?
156
160
 
157
- THREAD_MUTEX.synchronize do
158
- @timer_thread = nil unless @timer_thread && @timer_thread.alive?
159
- @timer_thread ||= Thread.new { @timer.run }
161
+ @thread_mutex.synchronize do
162
+ @worker_thread = Thread.new { @worker.run } unless worker_thread_alive?
163
+ @timer_thread = Thread.new { @timer.run } unless timer_thread_alive?
160
164
  end
161
165
  end
162
166
 
167
+ def worker_thread_alive?
168
+ !!@worker_thread && @worker_thread.alive?
169
+ end
170
+
171
+ def timer_thread_alive?
172
+ !!@timer_thread && @timer_thread.alive?
173
+ end
174
+
163
175
  def buffer_overflow(topic, message)
164
176
  @instrumenter.instrument("buffer_overflow.async_producer", {
165
177
  topic: topic,
@@ -200,31 +212,45 @@ module Kafka
200
212
  @logger.push_tags(@producer.to_s)
201
213
  @logger.info "Starting async producer in the background..."
202
214
 
215
+ do_loop
216
+ rescue Exception => e
217
+ @logger.error "Unexpected Kafka error #{e.class}: #{e.message}\n#{e.backtrace.join("\n")}"
218
+ @logger.error "Async producer crashed!"
219
+ ensure
220
+ @producer.shutdown
221
+ @logger.pop_tags
222
+ end
223
+
224
+ private
225
+
226
+ def do_loop
203
227
  loop do
204
- operation, payload = @queue.pop
205
-
206
- case operation
207
- when :produce
208
- produce(*payload)
209
- deliver_messages if threshold_reached?
210
- when :deliver_messages
211
- deliver_messages
212
- when :shutdown
213
- begin
214
- # Deliver any pending messages first.
215
- @producer.deliver_messages
216
- rescue Error => e
217
- @logger.error("Failed to deliver messages during shutdown: #{e.message}")
218
-
219
- @instrumenter.instrument("drop_messages.async_producer", {
220
- message_count: @producer.buffer_size + @queue.size,
221
- })
228
+ begin
229
+ operation, payload = @queue.pop
230
+
231
+ case operation
232
+ when :produce
233
+ produce(payload[0], **payload[1])
234
+ deliver_messages if threshold_reached?
235
+ when :deliver_messages
236
+ deliver_messages
237
+ when :shutdown
238
+ begin
239
+ # Deliver any pending messages first.
240
+ @producer.deliver_messages
241
+ rescue Error => e
242
+ @logger.error("Failed to deliver messages during shutdown: #{e.message}")
243
+
244
+ @instrumenter.instrument("drop_messages.async_producer", {
245
+ message_count: @producer.buffer_size + @queue.size,
246
+ })
247
+ end
248
+
249
+ # Stop the run loop.
250
+ break
251
+ else
252
+ raise "Unknown operation #{operation.inspect}"
222
253
  end
223
-
224
- # Stop the run loop.
225
- break
226
- else
227
- raise "Unknown operation #{operation.inspect}"
228
254
  end
229
255
  end
230
256
  rescue Kafka::Error => e
@@ -233,20 +259,12 @@ module Kafka
233
259
 
234
260
  sleep 10
235
261
  retry
236
- rescue Exception => e
237
- @logger.error "Unexpected Kafka error #{e.class}: #{e.message}\n#{e.backtrace.join("\n")}"
238
- @logger.error "Async producer crashed!"
239
- ensure
240
- @producer.shutdown
241
- @logger.pop_tags
242
262
  end
243
263
 
244
- private
245
-
246
- def produce(*args)
264
+ def produce(value, **kwargs)
247
265
  retries = 0
248
266
  begin
249
- @producer.produce(*args)
267
+ @producer.produce(value, **kwargs)
250
268
  rescue BufferOverflow => e
251
269
  deliver_messages
252
270
  if @max_retries == -1
data/lib/kafka/client.rb CHANGED
@@ -1,3 +1,4 @@
1
+ # coding: utf-8
1
2
  # frozen_string_literal: true
2
3
 
3
4
  require "kafka/ssl_context"
@@ -38,8 +39,8 @@ module Kafka
38
39
  # @param ssl_ca_cert [String, Array<String>, nil] a PEM encoded CA cert, or an Array of
39
40
  # PEM encoded CA certs, to use with an SSL connection.
40
41
  #
41
- # @param ssl_ca_cert_file_path [String, nil] a path on the filesystem to a PEM encoded CA cert
42
- # to use with an SSL connection.
42
+ # @param ssl_ca_cert_file_path [String, Array<String>, nil] a path on the filesystem, or an
43
+ # Array of paths, to PEM encoded CA cert(s) to use with an SSL connection.
43
44
  #
44
45
  # @param ssl_client_cert [String, nil] a PEM encoded client cert to use with an
45
46
  # SSL connection. Must be used in combination with ssl_client_cert_key.
@@ -62,19 +63,38 @@ module Kafka
62
63
  #
63
64
  # @param sasl_over_ssl [Boolean] whether to enforce SSL with SASL
64
65
  #
66
+ # @param ssl_ca_certs_from_system [Boolean] whether to use the CA certs from the
67
+ # system's default certificate store.
68
+ #
69
+ # @param partitioner [Partitioner, nil] the partitioner that should be used by the client.
70
+ #
65
71
  # @param sasl_oauth_token_provider [Object, nil] OAuthBearer Token Provider instance that
66
72
  # implements method token. See {Sasl::OAuth#initialize}
67
73
  #
74
+ # @param ssl_verify_hostname [Boolean, true] whether to verify that the host serving
75
+ # the SSL certificate and the signing chain of the certificate have the correct domains
76
+ # based on the CA certificate
77
+ #
78
+ # @param resolve_seed_brokers [Boolean] whether to resolve each hostname of the seed brokers.
79
+ # If a broker is resolved to multiple IP addresses, the client tries to connect to each
80
+ # of the addresses until it can connect.
81
+ #
68
82
  # @return [Client]
69
83
  def initialize(seed_brokers:, client_id: "ruby-kafka", logger: nil, connect_timeout: nil, socket_timeout: nil,
70
84
  ssl_ca_cert_file_path: nil, ssl_ca_cert: nil, ssl_client_cert: nil, ssl_client_cert_key: nil,
71
85
  ssl_client_cert_key_password: nil, ssl_client_cert_chain: nil, sasl_gssapi_principal: nil,
72
86
  sasl_gssapi_keytab: nil, sasl_plain_authzid: '', sasl_plain_username: nil, sasl_plain_password: nil,
73
87
  sasl_scram_username: nil, sasl_scram_password: nil, sasl_scram_mechanism: nil,
74
- sasl_over_ssl: true, ssl_ca_certs_from_system: false, sasl_oauth_token_provider: nil, ssl_verify_hostname: true)
88
+ sasl_aws_msk_iam_access_key_id: nil,
89
+ sasl_aws_msk_iam_secret_key_id: nil,
90
+ sasl_aws_msk_iam_aws_region: nil,
91
+ sasl_aws_msk_iam_session_token: nil,
92
+ sasl_over_ssl: true, ssl_ca_certs_from_system: false, partitioner: nil, sasl_oauth_token_provider: nil, ssl_verify_hostname: true,
93
+ resolve_seed_brokers: false)
75
94
  @logger = TaggedLogger.new(logger)
76
95
  @instrumenter = Instrumenter.new(client_id: client_id)
77
96
  @seed_brokers = normalize_seed_brokers(seed_brokers)
97
+ @resolve_seed_brokers = resolve_seed_brokers
78
98
 
79
99
  ssl_context = SslContext.build(
80
100
  ca_cert_file_path: ssl_ca_cert_file_path,
@@ -96,6 +116,10 @@ module Kafka
96
116
  sasl_scram_username: sasl_scram_username,
97
117
  sasl_scram_password: sasl_scram_password,
98
118
  sasl_scram_mechanism: sasl_scram_mechanism,
119
+ sasl_aws_msk_iam_access_key_id: sasl_aws_msk_iam_access_key_id,
120
+ sasl_aws_msk_iam_secret_key_id: sasl_aws_msk_iam_secret_key_id,
121
+ sasl_aws_msk_iam_aws_region: sasl_aws_msk_iam_aws_region,
122
+ sasl_aws_msk_iam_session_token: sasl_aws_msk_iam_session_token,
99
123
  sasl_oauth_token_provider: sasl_oauth_token_provider,
100
124
  logger: @logger
101
125
  )
@@ -115,6 +139,7 @@ module Kafka
115
139
  )
116
140
 
117
141
  @cluster = initialize_cluster
142
+ @partitioner = partitioner || Partitioner.new
118
143
  end
119
144
 
120
145
  # Delivers a single message to the Kafka cluster.
@@ -138,6 +163,9 @@ module Kafka
138
163
  def deliver_message(value, key: nil, headers: {}, topic:, partition: nil, partition_key: nil, retries: 1)
139
164
  create_time = Time.now
140
165
 
166
+ # We want to fail fast if `topic` isn't a String
167
+ topic = topic.to_str
168
+
141
169
  message = PendingMessage.new(
142
170
  value: value,
143
171
  key: key,
@@ -150,7 +178,7 @@ module Kafka
150
178
 
151
179
  if partition.nil?
152
180
  partition_count = @cluster.partitions_for(topic).count
153
- partition = Partitioner.partition_for_key(partition_count, message)
181
+ partition = @partitioner.call(partition_count, message)
154
182
  end
155
183
 
156
184
  buffer = MessageBuffer.new
@@ -191,6 +219,8 @@ module Kafka
191
219
  attempt = 1
192
220
 
193
221
  begin
222
+ @cluster.refresh_metadata_if_necessary!
223
+
194
224
  operation.execute
195
225
 
196
226
  unless buffer.empty?
@@ -241,6 +271,9 @@ module Kafka
241
271
  # be in a message set before it should be compressed. Note that message sets
242
272
  # are per-partition rather than per-topic or per-producer.
243
273
  #
274
+ # @param interceptors [Array<Object>] a list of producer interceptors the implement
275
+ # `call(Kafka::PendingMessage)`.
276
+ #
244
277
  # @return [Kafka::Producer] the Kafka producer.
245
278
  def producer(
246
279
  compression_codec: nil,
@@ -254,7 +287,8 @@ module Kafka
254
287
  idempotent: false,
255
288
  transactional: false,
256
289
  transactional_id: nil,
257
- transactional_timeout: 60
290
+ transactional_timeout: 60,
291
+ interceptors: []
258
292
  )
259
293
  cluster = initialize_cluster
260
294
  compressor = Compressor.new(
@@ -284,6 +318,8 @@ module Kafka
284
318
  retry_backoff: retry_backoff,
285
319
  max_buffer_size: max_buffer_size,
286
320
  max_buffer_bytesize: max_buffer_bytesize,
321
+ partitioner: @partitioner,
322
+ interceptors: interceptors
287
323
  )
288
324
  end
289
325
 
@@ -333,15 +369,26 @@ module Kafka
333
369
  # @param fetcher_max_queue_size [Integer] max number of items in the fetch queue that
334
370
  # are stored for further processing. Note, that each item in the queue represents a
335
371
  # response from a single broker.
372
+ # @param refresh_topic_interval [Integer] interval of refreshing the topic list.
373
+ # If it is 0, the topic list won't be refreshed (default)
374
+ # If it is n (n > 0), the topic list will be refreshed every n seconds
375
+ # @param interceptors [Array<Object>] a list of consumer interceptors that implement
376
+ # `call(Kafka::FetchedBatch)`.
377
+ # @param assignment_strategy [Object] a partition assignment strategy that
378
+ # implements `protocol_type()`, `user_data()`, and `assign(members:, partitions:)`
336
379
  # @return [Consumer]
337
380
  def consumer(
338
381
  group_id:,
339
382
  session_timeout: 30,
383
+ rebalance_timeout: 60,
340
384
  offset_commit_interval: 10,
341
385
  offset_commit_threshold: 0,
342
386
  heartbeat_interval: 10,
343
387
  offset_retention_time: nil,
344
- fetcher_max_queue_size: 100
388
+ fetcher_max_queue_size: 100,
389
+ refresh_topic_interval: 0,
390
+ interceptors: [],
391
+ assignment_strategy: nil
345
392
  )
346
393
  cluster = initialize_cluster
347
394
 
@@ -357,8 +404,10 @@ module Kafka
357
404
  logger: @logger,
358
405
  group_id: group_id,
359
406
  session_timeout: session_timeout,
407
+ rebalance_timeout: rebalance_timeout,
360
408
  retention_time: retention_time,
361
409
  instrumenter: instrumenter,
410
+ assignment_strategy: assignment_strategy
362
411
  )
363
412
 
364
413
  fetcher = Fetcher.new(
@@ -394,6 +443,8 @@ module Kafka
394
443
  fetcher: fetcher,
395
444
  session_timeout: session_timeout,
396
445
  heartbeat: heartbeat,
446
+ refresh_topic_interval: refresh_topic_interval,
447
+ interceptors: interceptors
397
448
  )
398
449
  end
399
450
 
@@ -530,6 +581,24 @@ module Kafka
530
581
  end
531
582
  end
532
583
 
584
+ # Describe broker configs
585
+ #
586
+ # @param broker_id [int] the id of the broker
587
+ # @param configs [Array] array of config keys.
588
+ # @return [Array<Kafka::Protocol::DescribeConfigsResponse::ConfigEntry>]
589
+ def describe_configs(broker_id, configs = [])
590
+ @cluster.describe_configs(broker_id, configs)
591
+ end
592
+
593
+ # Alter broker configs
594
+ #
595
+ # @param broker_id [int] the id of the broker
596
+ # @param configs [Array] array of config strings.
597
+ # @return [nil]
598
+ def alter_configs(broker_id, configs = [])
599
+ @cluster.alter_configs(broker_id, configs)
600
+ end
601
+
533
602
  # Creates a topic in the cluster.
534
603
  #
535
604
  # @example Creating a topic with log compaction
@@ -615,6 +684,14 @@ module Kafka
615
684
  @cluster.describe_group(group_id)
616
685
  end
617
686
 
687
+ # Fetch all committed offsets for a consumer group
688
+ #
689
+ # @param group_id [String] the id of the consumer group
690
+ # @return [Hash<String, Hash<Integer, Kafka::Protocol::OffsetFetchResponse::PartitionOffsetInfo>>]
691
+ def fetch_group_offsets(group_id)
692
+ @cluster.fetch_group_offsets(group_id)
693
+ end
694
+
618
695
  # Create partitions for a topic.
619
696
  #
620
697
  # @param name [String] the name of the topic.
@@ -663,6 +740,14 @@ module Kafka
663
740
  @cluster.partitions_for(topic).count
664
741
  end
665
742
 
743
+ # Counts the number of replicas for a topic's partition
744
+ #
745
+ # @param topic [String]
746
+ # @return [Integer] the number of replica nodes for the topic's partition
747
+ def replica_count_for(topic)
748
+ @cluster.partitions_for(topic).first.replicas.count
749
+ end
750
+
666
751
  # Retrieve the offset of the last message in a partition. If there are no
667
752
  # messages in the partition -1 is returned.
668
753
  #
@@ -741,6 +826,7 @@ module Kafka
741
826
  seed_brokers: @seed_brokers,
742
827
  broker_pool: broker_pool,
743
828
  logger: @logger,
829
+ resolve_seed_brokers: @resolve_seed_brokers,
744
830
  )
745
831
  end
746
832
 
data/lib/kafka/cluster.rb CHANGED
@@ -1,6 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require "kafka/broker_pool"
4
+ require "resolv"
4
5
  require "set"
5
6
 
6
7
  module Kafka
@@ -18,7 +19,8 @@ module Kafka
18
19
  # @param seed_brokers [Array<URI>]
19
20
  # @param broker_pool [Kafka::BrokerPool]
20
21
  # @param logger [Logger]
21
- def initialize(seed_brokers:, broker_pool:, logger:)
22
+ # @param resolve_seed_brokers [Boolean] See {Kafka::Client#initialize}
23
+ def initialize(seed_brokers:, broker_pool:, logger:, resolve_seed_brokers: false)
22
24
  if seed_brokers.empty?
23
25
  raise ArgumentError, "At least one seed broker must be configured"
24
26
  end
@@ -26,6 +28,7 @@ module Kafka
26
28
  @logger = TaggedLogger.new(logger)
27
29
  @seed_brokers = seed_brokers
28
30
  @broker_pool = broker_pool
31
+ @resolve_seed_brokers = resolve_seed_brokers
29
32
  @cluster_info = nil
30
33
  @stale = true
31
34
 
@@ -45,6 +48,10 @@ module Kafka
45
48
  new_topics = topics - @target_topics
46
49
 
47
50
  unless new_topics.empty?
51
+ if new_topics.any? { |topic| topic.nil? or topic.empty? }
52
+ raise ArgumentError, "Topic must not be nil or empty"
53
+ end
54
+
48
55
  @logger.info "New topics added to target list: #{new_topics.to_a.join(', ')}"
49
56
 
50
57
  @target_topics.merge(new_topics)
@@ -113,7 +120,7 @@ module Kafka
113
120
 
114
121
  # Finds the broker acting as the coordinator of the given group.
115
122
  #
116
- # @param group_id: [String]
123
+ # @param group_id [String]
117
124
  # @return [Broker] the broker that's currently coordinator.
118
125
  def get_group_coordinator(group_id:)
119
126
  @logger.debug "Getting group coordinator for `#{group_id}`"
@@ -123,7 +130,7 @@ module Kafka
123
130
 
124
131
  # Finds the broker acting as the coordinator of the given transaction.
125
132
  #
126
- # @param transactional_id: [String]
133
+ # @param transactional_id [String]
127
134
  # @return [Broker] the broker that's currently coordinator.
128
135
  def get_transaction_coordinator(transactional_id:)
129
136
  @logger.debug "Getting transaction coordinator for `#{transactional_id}`"
@@ -139,6 +146,40 @@ module Kafka
139
146
  end
140
147
  end
141
148
 
149
+ def describe_configs(broker_id, configs = [])
150
+ options = {
151
+ resources: [[Kafka::Protocol::RESOURCE_TYPE_CLUSTER, broker_id.to_s, configs]]
152
+ }
153
+
154
+ info = cluster_info.brokers.find {|broker| broker.node_id == broker_id }
155
+ broker = @broker_pool.connect(info.host, info.port, node_id: info.node_id)
156
+
157
+ response = broker.describe_configs(**options)
158
+
159
+ response.resources.each do |resource|
160
+ Protocol.handle_error(resource.error_code, resource.error_message)
161
+ end
162
+
163
+ response.resources.first.configs
164
+ end
165
+
166
+ def alter_configs(broker_id, configs = [])
167
+ options = {
168
+ resources: [[Kafka::Protocol::RESOURCE_TYPE_CLUSTER, broker_id.to_s, configs]]
169
+ }
170
+
171
+ info = cluster_info.brokers.find {|broker| broker.node_id == broker_id }
172
+ broker = @broker_pool.connect(info.host, info.port, node_id: info.node_id)
173
+
174
+ response = broker.alter_configs(**options)
175
+
176
+ response.resources.each do |resource|
177
+ Protocol.handle_error(resource.error_code, resource.error_message)
178
+ end
179
+
180
+ nil
181
+ end
182
+
142
183
  def partitions_for(topic)
143
184
  add_target_topics([topic])
144
185
  refresh_metadata_if_necessary!
@@ -252,6 +293,20 @@ module Kafka
252
293
  group
253
294
  end
254
295
 
296
+ def fetch_group_offsets(group_id)
297
+ topics = get_group_coordinator(group_id: group_id)
298
+ .fetch_offsets(group_id: group_id, topics: nil)
299
+ .topics
300
+
301
+ topics.each do |_, partitions|
302
+ partitions.each do |_, response|
303
+ Protocol.handle_error(response.error_code)
304
+ end
305
+ end
306
+
307
+ topics
308
+ end
309
+
255
310
  def create_partitions_for(name, num_partitions:, timeout:)
256
311
  options = {
257
312
  topics: [[name, num_partitions, nil]],
@@ -366,32 +421,35 @@ module Kafka
366
421
  # @return [Protocol::MetadataResponse] the cluster metadata.
367
422
  def fetch_cluster_info
368
423
  errors = []
369
-
370
424
  @seed_brokers.shuffle.each do |node|
371
- @logger.info "Fetching cluster metadata from #{node}"
372
-
373
- begin
374
- broker = @broker_pool.connect(node.hostname, node.port)
375
- cluster_info = broker.fetch_metadata(topics: @target_topics)
376
-
377
- if cluster_info.brokers.empty?
378
- @logger.error "No brokers in cluster"
379
- else
380
- @logger.info "Discovered cluster metadata; nodes: #{cluster_info.brokers.join(', ')}"
381
-
382
- @stale = false
383
-
384
- return cluster_info
425
+ (@resolve_seed_brokers ? Resolv.getaddresses(node.hostname).shuffle : [node.hostname]).each do |hostname_or_ip|
426
+ node_info = node.to_s
427
+ node_info << " (#{hostname_or_ip})" if node.hostname != hostname_or_ip
428
+ @logger.info "Fetching cluster metadata from #{node_info}"
429
+
430
+ begin
431
+ broker = @broker_pool.connect(hostname_or_ip, node.port)
432
+ cluster_info = broker.fetch_metadata(topics: @target_topics)
433
+
434
+ if cluster_info.brokers.empty?
435
+ @logger.error "No brokers in cluster"
436
+ else
437
+ @logger.info "Discovered cluster metadata; nodes: #{cluster_info.brokers.join(', ')}"
438
+
439
+ @stale = false
440
+
441
+ return cluster_info
442
+ end
443
+ rescue Error => e
444
+ @logger.error "Failed to fetch metadata from #{node_info}: #{e}"
445
+ errors << [node_info, e]
446
+ ensure
447
+ broker.disconnect unless broker.nil?
385
448
  end
386
- rescue Error => e
387
- @logger.error "Failed to fetch metadata from #{node}: #{e}"
388
- errors << [node, e]
389
- ensure
390
- broker.disconnect unless broker.nil?
391
449
  end
392
450
  end
393
451
 
394
- error_description = errors.map {|node, exception| "- #{node}: #{exception}" }.join("\n")
452
+ error_description = errors.map {|node_info, exception| "- #{node_info}: #{exception}" }.join("\n")
395
453
 
396
454
  raise ConnectionError, "Could not connect to any of the seed brokers:\n#{error_description}"
397
455
  end
@@ -58,6 +58,9 @@ module Kafka
58
58
  @connect_timeout = connect_timeout || CONNECT_TIMEOUT
59
59
  @socket_timeout = socket_timeout || SOCKET_TIMEOUT
60
60
  @ssl_context = ssl_context
61
+
62
+ @socket = nil
63
+ @last_request = nil
61
64
  end
62
65
 
63
66
  def to_s