ruby-kafka 1.2.0 → 1.5.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 0f8391cc7b1989cb5f669796bc4ad647b77d882e6506fae42bab18acb8a6bcc6
4
- data.tar.gz: 012baaff5d2cc9eb17e3a7b7342f49f7c905a5f91d26078fa0ecf2f0fa81a2ad
3
+ metadata.gz: '037679129d871686838235c368d870493439212f9aed0e897c1254fbfd5e4751'
4
+ data.tar.gz: bf61f193e97eb326b62a47b525a865911f1a7307983b6e765c08cc95e368a262
5
5
  SHA512:
6
- metadata.gz: 7f4e9302ca0ab41a6fded75f95ed866d959a2a027b90051ed7f3d7fba573aa63e57be7692d004dbeb3bacd99fe44b24188f81b0f3bed50f68e1da5189262271f
7
- data.tar.gz: 7266bdd50e66a7ab9b3c71025468af0a4bea778fd312c41d4c699e2609420c11c28accb3246407ce98330bf521c109691bd1d4943dd532a44ee6732f1a410922
6
+ metadata.gz: 8d53ee98b08cda3c8b64dd44ccb27d16fe6f9c5b65986115ce1db8c6d4fae03543fb9f83dc5765f40e4455e23986c323593fc8b9fc89e14a5dae4ec912a83981
7
+ data.tar.gz: 48229b285251618f66f55963074285e01de96c71be4f65a2340a4c34e394db494aea76c3f8254075eb5670881a43a2ed51b2874d0c188c94d9bac9d7f27d38df
data/.circleci/config.yml CHANGED
@@ -7,6 +7,7 @@ jobs:
7
7
  LOG_LEVEL: DEBUG
8
8
  steps:
9
9
  - checkout
10
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
10
11
  - run: bundle install --path vendor/bundle
11
12
  - run: bundle exec rspec
12
13
  - run: bundle exec rubocop
@@ -40,6 +41,7 @@ jobs:
40
41
  KAFKA_DELETE_TOPIC_ENABLE: true
41
42
  steps:
42
43
  - checkout
44
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
43
45
  - run: bundle install --path vendor/bundle
44
46
  - run: bundle exec rspec --profile --tag functional spec/functional
45
47
 
@@ -72,6 +74,7 @@ jobs:
72
74
  KAFKA_DELETE_TOPIC_ENABLE: true
73
75
  steps:
74
76
  - checkout
77
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
75
78
  - run: bundle install --path vendor/bundle
76
79
  - run: bundle exec rspec --profile --tag functional spec/functional
77
80
 
@@ -104,6 +107,7 @@ jobs:
104
107
  KAFKA_DELETE_TOPIC_ENABLE: true
105
108
  steps:
106
109
  - checkout
110
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
107
111
  - run: bundle install --path vendor/bundle
108
112
  - run: bundle exec rspec --profile --tag functional spec/functional
109
113
 
@@ -136,6 +140,7 @@ jobs:
136
140
  KAFKA_DELETE_TOPIC_ENABLE: true
137
141
  steps:
138
142
  - checkout
143
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
139
144
  - run: bundle install --path vendor/bundle
140
145
  - run: bundle exec rspec --profile --tag functional spec/functional
141
146
 
@@ -168,6 +173,7 @@ jobs:
168
173
  KAFKA_DELETE_TOPIC_ENABLE: true
169
174
  steps:
170
175
  - checkout
176
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
171
177
  - run: bundle install --path vendor/bundle
172
178
  - run: bundle exec rspec --profile --tag functional spec/functional
173
179
 
@@ -200,6 +206,7 @@ jobs:
200
206
  KAFKA_DELETE_TOPIC_ENABLE: true
201
207
  steps:
202
208
  - checkout
209
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
203
210
  - run: bundle install --path vendor/bundle
204
211
  - run: bundle exec rspec --profile --tag functional spec/functional
205
212
 
@@ -232,6 +239,7 @@ jobs:
232
239
  KAFKA_DELETE_TOPIC_ENABLE: true
233
240
  steps:
234
241
  - checkout
242
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
235
243
  - run: bundle install --path vendor/bundle
236
244
  - run: bundle exec rspec --profile --tag functional spec/functional
237
245
 
@@ -264,6 +272,7 @@ jobs:
264
272
  KAFKA_DELETE_TOPIC_ENABLE: true
265
273
  steps:
266
274
  - checkout
275
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
267
276
  - run: bundle install --path vendor/bundle
268
277
  - run: bundle exec rspec --profile --tag functional spec/functional
269
278
 
@@ -296,6 +305,75 @@ jobs:
296
305
  KAFKA_DELETE_TOPIC_ENABLE: true
297
306
  steps:
298
307
  - checkout
308
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
309
+ - run: bundle install --path vendor/bundle
310
+ - run: bundle exec rspec --profile --tag functional spec/functional
311
+
312
+ kafka-2.6:
313
+ docker:
314
+ - image: circleci/ruby:2.5.1-node
315
+ environment:
316
+ LOG_LEVEL: DEBUG
317
+ - image: wurstmeister/zookeeper
318
+ - image: wurstmeister/kafka:2.13-2.6.0
319
+ environment:
320
+ KAFKA_ADVERTISED_HOST_NAME: localhost
321
+ KAFKA_ADVERTISED_PORT: 9092
322
+ KAFKA_PORT: 9092
323
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
324
+ KAFKA_DELETE_TOPIC_ENABLE: true
325
+ - image: wurstmeister/kafka:2.13-2.6.0
326
+ environment:
327
+ KAFKA_ADVERTISED_HOST_NAME: localhost
328
+ KAFKA_ADVERTISED_PORT: 9093
329
+ KAFKA_PORT: 9093
330
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
331
+ KAFKA_DELETE_TOPIC_ENABLE: true
332
+ - image: wurstmeister/kafka:2.13-2.6.0
333
+ environment:
334
+ KAFKA_ADVERTISED_HOST_NAME: localhost
335
+ KAFKA_ADVERTISED_PORT: 9094
336
+ KAFKA_PORT: 9094
337
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
338
+ KAFKA_DELETE_TOPIC_ENABLE: true
339
+ steps:
340
+ - checkout
341
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
342
+ - run: bundle install --path vendor/bundle
343
+ - run: bundle exec rspec --profile --tag functional spec/functional
344
+
345
+ kafka-2.7:
346
+ docker:
347
+ - image: circleci/ruby:2.5.1-node
348
+ environment:
349
+ LOG_LEVEL: DEBUG
350
+ - image: bitnami/zookeeper
351
+ environment:
352
+ ALLOW_ANONYMOUS_LOGIN: yes
353
+ - image: bitnami/kafka:2.7.0
354
+ environment:
355
+ ALLOW_PLAINTEXT_LISTENER: yes
356
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9092
357
+ KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
358
+ KAFKA_CFG_ZOOKEEPER_CONNECT: localhost:2181
359
+ KAFKA_DELETE_TOPIC_ENABLE: true
360
+ - image: bitnami/kafka:2.7.0
361
+ environment:
362
+ ALLOW_PLAINTEXT_LISTENER: yes
363
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9093
364
+ KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9093
365
+ KAFKA_CFG_ZOOKEEPER_CONNECT: localhost:2181
366
+ KAFKA_DELETE_TOPIC_ENABLE: true
367
+ - image: bitnami/kafka:2.7.0
368
+ environment:
369
+ ALLOW_PLAINTEXT_LISTENER: yes
370
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9094
371
+ KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9094
372
+ KAFKA_CFG_ZOOKEEPER_CONNECT: localhost:2181
373
+ KAFKA_DELETE_TOPIC_ENABLE: true
374
+ steps:
375
+ - checkout
376
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
299
377
  - run: bundle install --path vendor/bundle
300
378
  - run: bundle exec rspec --profile --tag functional spec/functional
301
379
 
@@ -313,3 +391,5 @@ workflows:
313
391
  - kafka-2.3
314
392
  - kafka-2.4
315
393
  - kafka-2.5
394
+ - kafka-2.6
395
+ - kafka-2.7
data/CHANGELOG.md CHANGED
@@ -4,6 +4,24 @@ Changes and additions to the library will be listed here.
4
4
 
5
5
  ## Unreleased
6
6
 
7
+ ## 1.5.0
8
+ - Add support for AWS IAM Authentication to an MSK cluster (#907).
9
+ - Added session token to the IAM mechanism; necessary for auth via temporary credentials (#937)
10
+
11
+ ## 1.4.0
12
+
13
+ - Refresh a stale cluster's metadata if necessary on `Kafka::Client#deliver_message` (#901).
14
+ - Fix `Kafka::TransactionManager#send_offsets_to_txn` (#866).
15
+ - Add support for `murmur2` based partitioning.
16
+ - Add `resolve_seed_brokers` option to support seed brokers' hostname with multiple addresses (#877).
17
+ - Handle SyncGroup responses with a non-zero error and no assignments (#896).
18
+ - Add support for non-identical topic subscriptions within the same consumer group (#525 / #764).
19
+
20
+ ## 1.3.0
21
+
22
+ - Support custom assignment strategy (#846).
23
+ - Improved Exceptions in TransactionManager (#862).
24
+
7
25
  ## 1.2.0
8
26
 
9
27
  - Add producer consumer interceptors (#837).
data/README.md CHANGED
@@ -26,6 +26,7 @@ A Ruby client library for [Apache Kafka](http://kafka.apache.org/), a distribute
26
26
  4. [Shutting Down a Consumer](#shutting-down-a-consumer)
27
27
  5. [Consuming Messages in Batches](#consuming-messages-in-batches)
28
28
  6. [Balancing Throughput and Latency](#balancing-throughput-and-latency)
29
+ 7. [Customizing Partition Assignment Strategy](#customizing-partition-assignment-strategy)
29
30
  4. [Thread Safety](#thread-safety)
30
31
  5. [Logging](#logging)
31
32
  6. [Instrumentation](#instrumentation)
@@ -128,6 +129,16 @@ Or install it yourself as:
128
129
  <td>Limited support</td>
129
130
  <td>Limited support</td>
130
131
  </tr>
132
+ <tr>
133
+ <th>Kafka 2.6</th>
134
+ <td>Limited support</td>
135
+ <td>Limited support</td>
136
+ </tr>
137
+ <tr>
138
+ <th>Kafka 2.7</th>
139
+ <td>Limited support</td>
140
+ <td>Limited support</td>
141
+ </tr>
131
142
  </table>
132
143
 
133
144
  This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with the v0.5.x series. There's limited support for Kafka 0.8, and things should work with Kafka 0.11, although there may be performance issues due to changes in the protocol.
@@ -143,6 +154,8 @@ This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with t
143
154
  - **Kafka 2.3:** Everything that works with Kafka 2.2 should still work, but so far no features specific to Kafka 2.3 have been added.
144
155
  - **Kafka 2.4:** Everything that works with Kafka 2.3 should still work, but so far no features specific to Kafka 2.4 have been added.
145
156
  - **Kafka 2.5:** Everything that works with Kafka 2.4 should still work, but so far no features specific to Kafka 2.5 have been added.
157
+ - **Kafka 2.6:** Everything that works with Kafka 2.5 should still work, but so far no features specific to Kafka 2.6 have been added.
158
+ - **Kafka 2.7:** Everything that works with Kafka 2.6 should still work, but so far no features specific to Kafka 2.7 have been added.
146
159
 
147
160
  This library requires Ruby 2.1 or higher.
148
161
 
@@ -163,6 +176,12 @@ require "kafka"
163
176
  kafka = Kafka.new(["kafka1:9092", "kafka2:9092"], client_id: "my-application")
164
177
  ```
165
178
 
179
+ You can also use a hostname with seed brokers' IP addresses:
180
+
181
+ ```ruby
182
+ kafka = Kafka.new("seed-brokers:9092", client_id: "my-application", resolve_seed_brokers: true)
183
+ ```
184
+
166
185
  ### Producing Messages to Kafka
167
186
 
168
187
  The simplest way to write a message to a Kafka topic is to call `#deliver_message`:
@@ -369,6 +388,16 @@ partitioner = -> (partition_count, message) { ... }
369
388
  Kafka.new(partitioner: partitioner, ...)
370
389
  ```
371
390
 
391
+ ##### Supported partitioning schemes
392
+
393
+ In order for semantic partitioning to work a `partition_key` must map to the same partition number every time. The general approach, and the one used by this library, is to hash the key and mod it by the number of partitions. There are many different algorithms that can be used to calculate a hash. By default `crc32` is used. `murmur2` is also supported for compatibility with Java based Kafka producers.
394
+
395
+ To use `murmur2` hashing pass it as an argument to `Partitioner`. For example:
396
+
397
+ ```ruby
398
+ Kafka.new(partitioner: Kafka::Partitioner.new(hash_function: :murmur2))
399
+ ```
400
+
372
401
  #### Buffering and Error Handling
373
402
 
374
403
  The producer is designed for resilience in the face of temporary network errors, Kafka broker failovers, and other issues that prevent the client from writing messages to the destination topics. It does this by employing local, in-memory buffers. Only when messages are acknowledged by a Kafka broker will they be removed from the buffer.
@@ -743,6 +772,88 @@ consumer.each_message do |message|
743
772
  end
744
773
  ```
745
774
 
775
+ #### Customizing Partition Assignment Strategy
776
+
777
+ In some cases, you might want to assign more partitions to some consumers. For example, in applications inserting some records to a database, the consumers running on hosts nearby the database can process more messages than the consumers running on other hosts.
778
+ You can use a custom assignment strategy by passing an object that implements `#call` as the argument `assignment_strategy` like below:
779
+
780
+ ```ruby
781
+ class CustomAssignmentStrategy
782
+ def initialize(user_data)
783
+ @user_data = user_data
784
+ end
785
+
786
+ # Assign the topic partitions to the group members.
787
+ #
788
+ # @param cluster [Kafka::Cluster]
789
+ # @param members [Hash<String, Kafka::Protocol::JoinGroupResponse::Metadata>] a hash
790
+ # mapping member ids to metadata
791
+ # @param partitions [Array<Kafka::ConsumerGroup::Assignor::Partition>] a list of
792
+ # partitions the consumer group processes
793
+ # @return [Hash<String, Array<Kafka::ConsumerGroup::Assignor::Partition>] a hash
794
+ # mapping member ids to partitions.
795
+ def call(cluster:, members:, partitions:)
796
+ ...
797
+ end
798
+ end
799
+
800
+ strategy = CustomAssignmentStrategy.new("some-host-information")
801
+ consumer = kafka.consumer(group_id: "some-group", assignment_strategy: strategy)
802
+ ```
803
+
804
+ `members` is a hash mapping member IDs to metadata, and partitions is a list of partitions the consumer group processes. The method `call` must return a hash mapping member IDs to partitions. For example, the following strategy assigns partitions randomly:
805
+
806
+ ```ruby
807
+ class RandomAssignmentStrategy
808
+ def call(cluster:, members:, partitions:)
809
+ member_ids = members.keys
810
+ partitions.each_with_object(Hash.new {|h, k| h[k] = [] }) do |partition, partitions_per_member|
811
+ partitions_per_member[member_ids[rand(member_ids.count)]] << partition
812
+ end
813
+ end
814
+ end
815
+ ```
816
+
817
+ If the strategy needs user data, you should define the method `user_data` that returns user data on each consumer. For example, the following strategy uses the consumers' IP addresses as user data:
818
+
819
+ ```ruby
820
+ class NetworkTopologyAssignmentStrategy
821
+ def user_data
822
+ Socket.ip_address_list.find(&:ipv4_private?).ip_address
823
+ end
824
+
825
+ def call(cluster:, members:, partitions:)
826
+ # Display the pair of the member ID and IP address
827
+ members.each do |id, metadata|
828
+ puts "#{id}: #{metadata.user_data}"
829
+ end
830
+
831
+ # Assign partitions considering the network topology
832
+ ...
833
+ end
834
+ end
835
+ ```
836
+
837
+ Note that the strategy uses the class name as the default protocol name. You can change it by defining the method `protocol_name`:
838
+
839
+ ```ruby
840
+ class NetworkTopologyAssignmentStrategy
841
+ def protocol_name
842
+ "networktopology"
843
+ end
844
+
845
+ def user_data
846
+ Socket.ip_address_list.find(&:ipv4_private?).ip_address
847
+ end
848
+
849
+ def call(cluster:, members:, partitions:)
850
+ ...
851
+ end
852
+ end
853
+ ```
854
+
855
+ As the method `call` might receive different user data from what it expects, you should avoid using the same protocol name as another strategy that uses different user data.
856
+
746
857
 
747
858
  ### Thread Safety
748
859
 
@@ -972,7 +1083,7 @@ This configures the store to look up CA certificates from the system default cer
972
1083
 
973
1084
  In order to authenticate the client to the cluster, you need to pass in a certificate and key created for the client and trusted by the brokers.
974
1085
 
975
- **NOTE**: You can disable hostname validation by passing `verify_hostname: false`.
1086
+ **NOTE**: You can disable hostname validation by passing `ssl_verify_hostname: false`.
976
1087
 
977
1088
  ```ruby
978
1089
  kafka = Kafka.new(
@@ -981,6 +1092,7 @@ kafka = Kafka.new(
981
1092
  ssl_client_cert: File.read('my_client_cert.pem'),
982
1093
  ssl_client_cert_key: File.read('my_client_cert_key.pem'),
983
1094
  ssl_client_cert_key_password: 'my_client_cert_key_password',
1095
+ ssl_verify_hostname: false,
984
1096
  # ...
985
1097
  )
986
1098
  ```
@@ -1009,6 +1121,20 @@ kafka = Kafka.new(
1009
1121
  )
1010
1122
  ```
1011
1123
 
1124
+ ##### AWS MSK (IAM)
1125
+ In order to authenticate using IAM w/ an AWS MSK cluster, set your access key, secret key, and region when initializing the Kafka client:
1126
+
1127
+ ```ruby
1128
+ k = Kafka.new(
1129
+ ["kafka1:9092"],
1130
+ sasl_aws_msk_iam_access_key_id: 'iam_access_key',
1131
+ sasl_aws_msk_iam_secret_key_id: 'iam_secret_key',
1132
+ sasl_aws_msk_iam_aws_region: 'us-west-2',
1133
+ ssl_ca_certs_from_system: true,
1134
+ # ...
1135
+ )
1136
+ ```
1137
+
1012
1138
  ##### PLAIN
1013
1139
  In order to authenticate using PLAIN, you must set your username and password when initializing the Kafka client:
1014
1140
 
@@ -59,8 +59,6 @@ module Kafka
59
59
  # producer.shutdown
60
60
  #
61
61
  class AsyncProducer
62
- THREAD_MUTEX = Mutex.new
63
-
64
62
  # Initializes a new AsyncProducer.
65
63
  #
66
64
  # @param sync_producer [Kafka::Producer] the synchronous producer that should
@@ -94,6 +92,8 @@ module Kafka
94
92
 
95
93
  # The timer will no-op if the delivery interval is zero.
96
94
  @timer = Timer.new(queue: @queue, interval: delivery_interval)
95
+
96
+ @thread_mutex = Mutex.new
97
97
  end
98
98
 
99
99
  # Produces a message to the specified topic.
@@ -131,6 +131,8 @@ module Kafka
131
131
  # @see Kafka::Producer#deliver_messages
132
132
  # @return [nil]
133
133
  def deliver_messages
134
+ ensure_threads_running!
135
+
134
136
  @queue << [:deliver_messages, nil]
135
137
 
136
138
  nil
@@ -142,6 +144,8 @@ module Kafka
142
144
  # @see Kafka::Producer#shutdown
143
145
  # @return [nil]
144
146
  def shutdown
147
+ ensure_threads_running!
148
+
145
149
  @timer_thread && @timer_thread.exit
146
150
  @queue << [:shutdown, nil]
147
151
  @worker_thread && @worker_thread.join
@@ -152,17 +156,22 @@ module Kafka
152
156
  private
153
157
 
154
158
  def ensure_threads_running!
155
- THREAD_MUTEX.synchronize do
156
- @worker_thread = nil unless @worker_thread && @worker_thread.alive?
157
- @worker_thread ||= Thread.new { @worker.run }
158
- end
159
+ return if worker_thread_alive? && timer_thread_alive?
159
160
 
160
- THREAD_MUTEX.synchronize do
161
- @timer_thread = nil unless @timer_thread && @timer_thread.alive?
162
- @timer_thread ||= Thread.new { @timer.run }
161
+ @thread_mutex.synchronize do
162
+ @worker_thread = Thread.new { @worker.run } unless worker_thread_alive?
163
+ @timer_thread = Thread.new { @timer.run } unless timer_thread_alive?
163
164
  end
164
165
  end
165
166
 
167
+ def worker_thread_alive?
168
+ !!@worker_thread && @worker_thread.alive?
169
+ end
170
+
171
+ def timer_thread_alive?
172
+ !!@timer_thread && @timer_thread.alive?
173
+ end
174
+
166
175
  def buffer_overflow(topic, message)
167
176
  @instrumenter.instrument("buffer_overflow.async_producer", {
168
177
  topic: topic,
@@ -203,31 +212,45 @@ module Kafka
203
212
  @logger.push_tags(@producer.to_s)
204
213
  @logger.info "Starting async producer in the background..."
205
214
 
215
+ do_loop
216
+ rescue Exception => e
217
+ @logger.error "Unexpected Kafka error #{e.class}: #{e.message}\n#{e.backtrace.join("\n")}"
218
+ @logger.error "Async producer crashed!"
219
+ ensure
220
+ @producer.shutdown
221
+ @logger.pop_tags
222
+ end
223
+
224
+ private
225
+
226
+ def do_loop
206
227
  loop do
207
- operation, payload = @queue.pop
208
-
209
- case operation
210
- when :produce
211
- produce(*payload)
212
- deliver_messages if threshold_reached?
213
- when :deliver_messages
214
- deliver_messages
215
- when :shutdown
216
- begin
217
- # Deliver any pending messages first.
218
- @producer.deliver_messages
219
- rescue Error => e
220
- @logger.error("Failed to deliver messages during shutdown: #{e.message}")
221
-
222
- @instrumenter.instrument("drop_messages.async_producer", {
223
- message_count: @producer.buffer_size + @queue.size,
224
- })
228
+ begin
229
+ operation, payload = @queue.pop
230
+
231
+ case operation
232
+ when :produce
233
+ produce(payload[0], **payload[1])
234
+ deliver_messages if threshold_reached?
235
+ when :deliver_messages
236
+ deliver_messages
237
+ when :shutdown
238
+ begin
239
+ # Deliver any pending messages first.
240
+ @producer.deliver_messages
241
+ rescue Error => e
242
+ @logger.error("Failed to deliver messages during shutdown: #{e.message}")
243
+
244
+ @instrumenter.instrument("drop_messages.async_producer", {
245
+ message_count: @producer.buffer_size + @queue.size,
246
+ })
247
+ end
248
+
249
+ # Stop the run loop.
250
+ break
251
+ else
252
+ raise "Unknown operation #{operation.inspect}"
225
253
  end
226
-
227
- # Stop the run loop.
228
- break
229
- else
230
- raise "Unknown operation #{operation.inspect}"
231
254
  end
232
255
  end
233
256
  rescue Kafka::Error => e
@@ -236,16 +259,8 @@ module Kafka
236
259
 
237
260
  sleep 10
238
261
  retry
239
- rescue Exception => e
240
- @logger.error "Unexpected Kafka error #{e.class}: #{e.message}\n#{e.backtrace.join("\n")}"
241
- @logger.error "Async producer crashed!"
242
- ensure
243
- @producer.shutdown
244
- @logger.pop_tags
245
262
  end
246
263
 
247
- private
248
-
249
264
  def produce(value, **kwargs)
250
265
  retries = 0
251
266
  begin
data/lib/kafka/client.rb CHANGED
@@ -1,3 +1,4 @@
1
+ # coding: utf-8
1
2
  # frozen_string_literal: true
2
3
 
3
4
  require "kafka/ssl_context"
@@ -38,8 +39,8 @@ module Kafka
38
39
  # @param ssl_ca_cert [String, Array<String>, nil] a PEM encoded CA cert, or an Array of
39
40
  # PEM encoded CA certs, to use with an SSL connection.
40
41
  #
41
- # @param ssl_ca_cert_file_path [String, nil] a path on the filesystem to a PEM encoded CA cert
42
- # to use with an SSL connection.
42
+ # @param ssl_ca_cert_file_path [String, Array<String>, nil] a path on the filesystem, or an
43
+ # Array of paths, to PEM encoded CA cert(s) to use with an SSL connection.
43
44
  #
44
45
  # @param ssl_client_cert [String, nil] a PEM encoded client cert to use with an
45
46
  # SSL connection. Must be used in combination with ssl_client_cert_key.
@@ -74,16 +75,26 @@ module Kafka
74
75
  # the SSL certificate and the signing chain of the certificate have the correct domains
75
76
  # based on the CA certificate
76
77
  #
78
+ # @param resolve_seed_brokers [Boolean] whether to resolve each hostname of the seed brokers.
79
+ # If a broker is resolved to multiple IP addresses, the client tries to connect to each
80
+ # of the addresses until it can connect.
81
+ #
77
82
  # @return [Client]
78
83
  def initialize(seed_brokers:, client_id: "ruby-kafka", logger: nil, connect_timeout: nil, socket_timeout: nil,
79
84
  ssl_ca_cert_file_path: nil, ssl_ca_cert: nil, ssl_client_cert: nil, ssl_client_cert_key: nil,
80
85
  ssl_client_cert_key_password: nil, ssl_client_cert_chain: nil, sasl_gssapi_principal: nil,
81
86
  sasl_gssapi_keytab: nil, sasl_plain_authzid: '', sasl_plain_username: nil, sasl_plain_password: nil,
82
87
  sasl_scram_username: nil, sasl_scram_password: nil, sasl_scram_mechanism: nil,
83
- sasl_over_ssl: true, ssl_ca_certs_from_system: false, partitioner: nil, sasl_oauth_token_provider: nil, ssl_verify_hostname: true)
88
+ sasl_aws_msk_iam_access_key_id: nil,
89
+ sasl_aws_msk_iam_secret_key_id: nil,
90
+ sasl_aws_msk_iam_aws_region: nil,
91
+ sasl_aws_msk_iam_session_token: nil,
92
+ sasl_over_ssl: true, ssl_ca_certs_from_system: false, partitioner: nil, sasl_oauth_token_provider: nil, ssl_verify_hostname: true,
93
+ resolve_seed_brokers: false)
84
94
  @logger = TaggedLogger.new(logger)
85
95
  @instrumenter = Instrumenter.new(client_id: client_id)
86
96
  @seed_brokers = normalize_seed_brokers(seed_brokers)
97
+ @resolve_seed_brokers = resolve_seed_brokers
87
98
 
88
99
  ssl_context = SslContext.build(
89
100
  ca_cert_file_path: ssl_ca_cert_file_path,
@@ -105,6 +116,10 @@ module Kafka
105
116
  sasl_scram_username: sasl_scram_username,
106
117
  sasl_scram_password: sasl_scram_password,
107
118
  sasl_scram_mechanism: sasl_scram_mechanism,
119
+ sasl_aws_msk_iam_access_key_id: sasl_aws_msk_iam_access_key_id,
120
+ sasl_aws_msk_iam_secret_key_id: sasl_aws_msk_iam_secret_key_id,
121
+ sasl_aws_msk_iam_aws_region: sasl_aws_msk_iam_aws_region,
122
+ sasl_aws_msk_iam_session_token: sasl_aws_msk_iam_session_token,
108
123
  sasl_oauth_token_provider: sasl_oauth_token_provider,
109
124
  logger: @logger
110
125
  )
@@ -204,6 +219,8 @@ module Kafka
204
219
  attempt = 1
205
220
 
206
221
  begin
222
+ @cluster.refresh_metadata_if_necessary!
223
+
207
224
  operation.execute
208
225
 
209
226
  unless buffer.empty?
@@ -357,6 +374,8 @@ module Kafka
357
374
  # If it is n (n > 0), the topic list will be refreshed every n seconds
358
375
  # @param interceptors [Array<Object>] a list of consumer interceptors that implement
359
376
  # `call(Kafka::FetchedBatch)`.
377
+ # @param assignment_strategy [Object] a partition assignment strategy that
378
+ # implements `protocol_type()`, `user_data()`, and `assign(members:, partitions:)`
360
379
  # @return [Consumer]
361
380
  def consumer(
362
381
  group_id:,
@@ -368,7 +387,8 @@ module Kafka
368
387
  offset_retention_time: nil,
369
388
  fetcher_max_queue_size: 100,
370
389
  refresh_topic_interval: 0,
371
- interceptors: []
390
+ interceptors: [],
391
+ assignment_strategy: nil
372
392
  )
373
393
  cluster = initialize_cluster
374
394
 
@@ -387,6 +407,7 @@ module Kafka
387
407
  rebalance_timeout: rebalance_timeout,
388
408
  retention_time: retention_time,
389
409
  instrumenter: instrumenter,
410
+ assignment_strategy: assignment_strategy
390
411
  )
391
412
 
392
413
  fetcher = Fetcher.new(
@@ -805,6 +826,7 @@ module Kafka
805
826
  seed_brokers: @seed_brokers,
806
827
  broker_pool: broker_pool,
807
828
  logger: @logger,
829
+ resolve_seed_brokers: @resolve_seed_brokers,
808
830
  )
809
831
  end
810
832