ruby-kafka 0.7.10 → 1.5.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/.circleci/config.yml +179 -0
  3. data/.github/workflows/stale.yml +19 -0
  4. data/.ruby-version +1 -1
  5. data/CHANGELOG.md +40 -0
  6. data/README.md +167 -0
  7. data/lib/kafka/async_producer.rb +60 -42
  8. data/lib/kafka/client.rb +92 -6
  9. data/lib/kafka/cluster.rb +82 -24
  10. data/lib/kafka/connection.rb +3 -0
  11. data/lib/kafka/consumer.rb +61 -11
  12. data/lib/kafka/consumer_group/assignor.rb +63 -0
  13. data/lib/kafka/consumer_group.rb +29 -6
  14. data/lib/kafka/crc32_hash.rb +15 -0
  15. data/lib/kafka/datadog.rb +20 -13
  16. data/lib/kafka/digest.rb +22 -0
  17. data/lib/kafka/fetcher.rb +5 -2
  18. data/lib/kafka/interceptors.rb +33 -0
  19. data/lib/kafka/murmur2_hash.rb +17 -0
  20. data/lib/kafka/offset_manager.rb +12 -1
  21. data/lib/kafka/partitioner.rb +8 -3
  22. data/lib/kafka/producer.rb +13 -5
  23. data/lib/kafka/prometheus.rb +78 -79
  24. data/lib/kafka/protocol/add_offsets_to_txn_response.rb +2 -0
  25. data/lib/kafka/protocol/encoder.rb +1 -1
  26. data/lib/kafka/protocol/join_group_request.rb +8 -2
  27. data/lib/kafka/protocol/join_group_response.rb +9 -1
  28. data/lib/kafka/protocol/metadata_response.rb +1 -1
  29. data/lib/kafka/protocol/offset_fetch_request.rb +3 -1
  30. data/lib/kafka/protocol/record_batch.rb +2 -2
  31. data/lib/kafka/protocol/sasl_handshake_request.rb +1 -1
  32. data/lib/kafka/protocol/sync_group_response.rb +5 -2
  33. data/lib/kafka/protocol/txn_offset_commit_response.rb +34 -5
  34. data/lib/kafka/round_robin_assignment_strategy.rb +37 -39
  35. data/lib/kafka/sasl/awsmskiam.rb +133 -0
  36. data/lib/kafka/sasl_authenticator.rb +15 -2
  37. data/lib/kafka/ssl_context.rb +6 -5
  38. data/lib/kafka/tagged_logger.rb +1 -0
  39. data/lib/kafka/transaction_manager.rb +30 -10
  40. data/lib/kafka/version.rb +1 -1
  41. data/ruby-kafka.gemspec +5 -4
  42. metadata +39 -13
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 5aa84a39ade791c3bce57152d426e95cb7d29575af3e8a9cb29fcded1c6cdad5
4
- data.tar.gz: 46be738fb1a2b71bbbd5d5d3b2fc95b705aef5740f666095eeaf40c9a6980fc4
3
+ metadata.gz: '037679129d871686838235c368d870493439212f9aed0e897c1254fbfd5e4751'
4
+ data.tar.gz: bf61f193e97eb326b62a47b525a865911f1a7307983b6e765c08cc95e368a262
5
5
  SHA512:
6
- metadata.gz: 42e8059ab9b49e16a8e9c70e5739c9694d2142d5d440f8e2c18477cc151a24183536ae1548aed66a7e7ee6ea64c96ff2c9aacb63366ed92bd1612af6e52d8300
7
- data.tar.gz: 5008c803cc61fbea993f7f2a22ca7ac3ebb448f91cbdca40d540b9f4604dab8b4e38fcf4b20006a16641b5239e78d6045ccbec70168171219b716090ea72ba09
6
+ metadata.gz: 8d53ee98b08cda3c8b64dd44ccb27d16fe6f9c5b65986115ce1db8c6d4fae03543fb9f83dc5765f40e4455e23986c323593fc8b9fc89e14a5dae4ec912a83981
7
+ data.tar.gz: 48229b285251618f66f55963074285e01de96c71be4f65a2340a4c34e394db494aea76c3f8254075eb5670881a43a2ed51b2874d0c188c94d9bac9d7f27d38df
data/.circleci/config.yml CHANGED
@@ -7,6 +7,7 @@ jobs:
7
7
  LOG_LEVEL: DEBUG
8
8
  steps:
9
9
  - checkout
10
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
10
11
  - run: bundle install --path vendor/bundle
11
12
  - run: bundle exec rspec
12
13
  - run: bundle exec rubocop
@@ -40,6 +41,7 @@ jobs:
40
41
  KAFKA_DELETE_TOPIC_ENABLE: true
41
42
  steps:
42
43
  - checkout
44
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
43
45
  - run: bundle install --path vendor/bundle
44
46
  - run: bundle exec rspec --profile --tag functional spec/functional
45
47
 
@@ -72,6 +74,7 @@ jobs:
72
74
  KAFKA_DELETE_TOPIC_ENABLE: true
73
75
  steps:
74
76
  - checkout
77
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
75
78
  - run: bundle install --path vendor/bundle
76
79
  - run: bundle exec rspec --profile --tag functional spec/functional
77
80
 
@@ -104,6 +107,7 @@ jobs:
104
107
  KAFKA_DELETE_TOPIC_ENABLE: true
105
108
  steps:
106
109
  - checkout
110
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
107
111
  - run: bundle install --path vendor/bundle
108
112
  - run: bundle exec rspec --profile --tag functional spec/functional
109
113
 
@@ -136,6 +140,7 @@ jobs:
136
140
  KAFKA_DELETE_TOPIC_ENABLE: true
137
141
  steps:
138
142
  - checkout
143
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
139
144
  - run: bundle install --path vendor/bundle
140
145
  - run: bundle exec rspec --profile --tag functional spec/functional
141
146
 
@@ -168,6 +173,7 @@ jobs:
168
173
  KAFKA_DELETE_TOPIC_ENABLE: true
169
174
  steps:
170
175
  - checkout
176
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
171
177
  - run: bundle install --path vendor/bundle
172
178
  - run: bundle exec rspec --profile --tag functional spec/functional
173
179
 
@@ -200,6 +206,174 @@ jobs:
200
206
  KAFKA_DELETE_TOPIC_ENABLE: true
201
207
  steps:
202
208
  - checkout
209
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
210
+ - run: bundle install --path vendor/bundle
211
+ - run: bundle exec rspec --profile --tag functional spec/functional
212
+
213
+ kafka-2.3:
214
+ docker:
215
+ - image: circleci/ruby:2.5.1-node
216
+ environment:
217
+ LOG_LEVEL: DEBUG
218
+ - image: wurstmeister/zookeeper
219
+ - image: wurstmeister/kafka:2.12-2.3.1
220
+ environment:
221
+ KAFKA_ADVERTISED_HOST_NAME: localhost
222
+ KAFKA_ADVERTISED_PORT: 9092
223
+ KAFKA_PORT: 9092
224
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
225
+ KAFKA_DELETE_TOPIC_ENABLE: true
226
+ - image: wurstmeister/kafka:2.12-2.3.1
227
+ environment:
228
+ KAFKA_ADVERTISED_HOST_NAME: localhost
229
+ KAFKA_ADVERTISED_PORT: 9093
230
+ KAFKA_PORT: 9093
231
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
232
+ KAFKA_DELETE_TOPIC_ENABLE: true
233
+ - image: wurstmeister/kafka:2.12-2.3.1
234
+ environment:
235
+ KAFKA_ADVERTISED_HOST_NAME: localhost
236
+ KAFKA_ADVERTISED_PORT: 9094
237
+ KAFKA_PORT: 9094
238
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
239
+ KAFKA_DELETE_TOPIC_ENABLE: true
240
+ steps:
241
+ - checkout
242
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
243
+ - run: bundle install --path vendor/bundle
244
+ - run: bundle exec rspec --profile --tag functional spec/functional
245
+
246
+ kafka-2.4:
247
+ docker:
248
+ - image: circleci/ruby:2.5.1-node
249
+ environment:
250
+ LOG_LEVEL: DEBUG
251
+ - image: wurstmeister/zookeeper
252
+ - image: wurstmeister/kafka:2.12-2.4.0
253
+ environment:
254
+ KAFKA_ADVERTISED_HOST_NAME: localhost
255
+ KAFKA_ADVERTISED_PORT: 9092
256
+ KAFKA_PORT: 9092
257
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
258
+ KAFKA_DELETE_TOPIC_ENABLE: true
259
+ - image: wurstmeister/kafka:2.12-2.4.0
260
+ environment:
261
+ KAFKA_ADVERTISED_HOST_NAME: localhost
262
+ KAFKA_ADVERTISED_PORT: 9093
263
+ KAFKA_PORT: 9093
264
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
265
+ KAFKA_DELETE_TOPIC_ENABLE: true
266
+ - image: wurstmeister/kafka:2.12-2.4.0
267
+ environment:
268
+ KAFKA_ADVERTISED_HOST_NAME: localhost
269
+ KAFKA_ADVERTISED_PORT: 9094
270
+ KAFKA_PORT: 9094
271
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
272
+ KAFKA_DELETE_TOPIC_ENABLE: true
273
+ steps:
274
+ - checkout
275
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
276
+ - run: bundle install --path vendor/bundle
277
+ - run: bundle exec rspec --profile --tag functional spec/functional
278
+
279
+ kafka-2.5:
280
+ docker:
281
+ - image: circleci/ruby:2.5.1-node
282
+ environment:
283
+ LOG_LEVEL: DEBUG
284
+ - image: wurstmeister/zookeeper
285
+ - image: wurstmeister/kafka:2.12-2.5.0
286
+ environment:
287
+ KAFKA_ADVERTISED_HOST_NAME: localhost
288
+ KAFKA_ADVERTISED_PORT: 9092
289
+ KAFKA_PORT: 9092
290
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
291
+ KAFKA_DELETE_TOPIC_ENABLE: true
292
+ - image: wurstmeister/kafka:2.12-2.5.0
293
+ environment:
294
+ KAFKA_ADVERTISED_HOST_NAME: localhost
295
+ KAFKA_ADVERTISED_PORT: 9093
296
+ KAFKA_PORT: 9093
297
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
298
+ KAFKA_DELETE_TOPIC_ENABLE: true
299
+ - image: wurstmeister/kafka:2.12-2.5.0
300
+ environment:
301
+ KAFKA_ADVERTISED_HOST_NAME: localhost
302
+ KAFKA_ADVERTISED_PORT: 9094
303
+ KAFKA_PORT: 9094
304
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
305
+ KAFKA_DELETE_TOPIC_ENABLE: true
306
+ steps:
307
+ - checkout
308
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
309
+ - run: bundle install --path vendor/bundle
310
+ - run: bundle exec rspec --profile --tag functional spec/functional
311
+
312
+ kafka-2.6:
313
+ docker:
314
+ - image: circleci/ruby:2.5.1-node
315
+ environment:
316
+ LOG_LEVEL: DEBUG
317
+ - image: wurstmeister/zookeeper
318
+ - image: wurstmeister/kafka:2.13-2.6.0
319
+ environment:
320
+ KAFKA_ADVERTISED_HOST_NAME: localhost
321
+ KAFKA_ADVERTISED_PORT: 9092
322
+ KAFKA_PORT: 9092
323
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
324
+ KAFKA_DELETE_TOPIC_ENABLE: true
325
+ - image: wurstmeister/kafka:2.13-2.6.0
326
+ environment:
327
+ KAFKA_ADVERTISED_HOST_NAME: localhost
328
+ KAFKA_ADVERTISED_PORT: 9093
329
+ KAFKA_PORT: 9093
330
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
331
+ KAFKA_DELETE_TOPIC_ENABLE: true
332
+ - image: wurstmeister/kafka:2.13-2.6.0
333
+ environment:
334
+ KAFKA_ADVERTISED_HOST_NAME: localhost
335
+ KAFKA_ADVERTISED_PORT: 9094
336
+ KAFKA_PORT: 9094
337
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
338
+ KAFKA_DELETE_TOPIC_ENABLE: true
339
+ steps:
340
+ - checkout
341
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
342
+ - run: bundle install --path vendor/bundle
343
+ - run: bundle exec rspec --profile --tag functional spec/functional
344
+
345
+ kafka-2.7:
346
+ docker:
347
+ - image: circleci/ruby:2.5.1-node
348
+ environment:
349
+ LOG_LEVEL: DEBUG
350
+ - image: bitnami/zookeeper
351
+ environment:
352
+ ALLOW_ANONYMOUS_LOGIN: yes
353
+ - image: bitnami/kafka:2.7.0
354
+ environment:
355
+ ALLOW_PLAINTEXT_LISTENER: yes
356
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9092
357
+ KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
358
+ KAFKA_CFG_ZOOKEEPER_CONNECT: localhost:2181
359
+ KAFKA_DELETE_TOPIC_ENABLE: true
360
+ - image: bitnami/kafka:2.7.0
361
+ environment:
362
+ ALLOW_PLAINTEXT_LISTENER: yes
363
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9093
364
+ KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9093
365
+ KAFKA_CFG_ZOOKEEPER_CONNECT: localhost:2181
366
+ KAFKA_DELETE_TOPIC_ENABLE: true
367
+ - image: bitnami/kafka:2.7.0
368
+ environment:
369
+ ALLOW_PLAINTEXT_LISTENER: yes
370
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9094
371
+ KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9094
372
+ KAFKA_CFG_ZOOKEEPER_CONNECT: localhost:2181
373
+ KAFKA_DELETE_TOPIC_ENABLE: true
374
+ steps:
375
+ - checkout
376
+ - run: sudo apt-get update && sudo apt-get install -y cmake # For installing snappy
203
377
  - run: bundle install --path vendor/bundle
204
378
  - run: bundle exec rspec --profile --tag functional spec/functional
205
379
 
@@ -214,3 +388,8 @@ workflows:
214
388
  - kafka-2.0
215
389
  - kafka-2.1
216
390
  - kafka-2.2
391
+ - kafka-2.3
392
+ - kafka-2.4
393
+ - kafka-2.5
394
+ - kafka-2.6
395
+ - kafka-2.7
@@ -0,0 +1,19 @@
1
+ name: Mark stale issues and pull requests
2
+
3
+ on:
4
+ schedule:
5
+ - cron: "0 0 * * *"
6
+
7
+ jobs:
8
+ stale:
9
+
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - uses: actions/stale@v1
14
+ with:
15
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
16
+ stale-issue-message: 'Issue has been marked as stale due to a lack of activity.'
17
+ stale-pr-message: 'Pull request has been marked as stale due to a lack of activity.'
18
+ stale-issue-label: 'no-issue-activity'
19
+ stale-pr-label: 'no-pr-activity'
data/.ruby-version CHANGED
@@ -1 +1 @@
1
- 2.5.1
1
+ 2.7.1
data/CHANGELOG.md CHANGED
@@ -2,6 +2,46 @@
2
2
 
3
3
  Changes and additions to the library will be listed here.
4
4
 
5
+ ## Unreleased
6
+
7
+ ## 1.5.0
8
+ - Add support for AWS IAM Authentication to an MSK cluster (#907).
9
+ - Added session token to the IAM mechanism; necessary for auth via temporary credentials (#937)
10
+
11
+ ## 1.4.0
12
+
13
+ - Refresh a stale cluster's metadata if necessary on `Kafka::Client#deliver_message` (#901).
14
+ - Fix `Kafka::TransactionManager#send_offsets_to_txn` (#866).
15
+ - Add support for `murmur2` based partitioning.
16
+ - Add `resolve_seed_brokers` option to support seed brokers' hostname with multiple addresses (#877).
17
+ - Handle SyncGroup responses with a non-zero error and no assignments (#896).
18
+ - Add support for non-identical topic subscriptions within the same consumer group (#525 / #764).
19
+
20
+ ## 1.3.0
21
+
22
+ - Support custom assignment strategy (#846).
23
+ - Improved Exceptions in TransactionManager (#862).
24
+
25
+ ## 1.2.0
26
+
27
+ - Add producer consumer interceptors (#837).
28
+ - Add support for configuring the client partitioner (#848).
29
+
30
+ ## 1.1.0
31
+
32
+ - Extra sanity checking when marking offsets as processed (#824).
33
+ - Make `verify_hostname` settable for SSL contexts (#828).
34
+ - Instrument `create_time` from last message in batch (#811).
35
+ - Add client function for fetching topic replica count (#822).
36
+ - Allow consumers to refresh the topic lists (#818).
37
+ - Disconnect after leaving a group (#817).
38
+ - Use `max_wait_time` as the sleep instead of hardcoded 2 seconds (#825).
39
+
40
+ ## 1.0.0
41
+
42
+ - Add client methods to manage configs (#759)
43
+ - Support Kafka 2.3 and 2.4.
44
+
5
45
  ## 0.7.10
6
46
 
7
47
  - Fix logger again (#762)
data/README.md CHANGED
@@ -26,6 +26,7 @@ A Ruby client library for [Apache Kafka](http://kafka.apache.org/), a distribute
26
26
  4. [Shutting Down a Consumer](#shutting-down-a-consumer)
27
27
  5. [Consuming Messages in Batches](#consuming-messages-in-batches)
28
28
  6. [Balancing Throughput and Latency](#balancing-throughput-and-latency)
29
+ 7. [Customizing Partition Assignment Strategy](#customizing-partition-assignment-strategy)
29
30
  4. [Thread Safety](#thread-safety)
30
31
  5. [Logging](#logging)
31
32
  6. [Instrumentation](#instrumentation)
@@ -113,6 +114,31 @@ Or install it yourself as:
113
114
  <td>Limited support</td>
114
115
  <td>Limited support</td>
115
116
  </tr>
117
+ <tr>
118
+ <th>Kafka 2.3</th>
119
+ <td>Limited support</td>
120
+ <td>Limited support</td>
121
+ </tr>
122
+ <tr>
123
+ <th>Kafka 2.4</th>
124
+ <td>Limited support</td>
125
+ <td>Limited support</td>
126
+ </tr>
127
+ <tr>
128
+ <th>Kafka 2.5</th>
129
+ <td>Limited support</td>
130
+ <td>Limited support</td>
131
+ </tr>
132
+ <tr>
133
+ <th>Kafka 2.6</th>
134
+ <td>Limited support</td>
135
+ <td>Limited support</td>
136
+ </tr>
137
+ <tr>
138
+ <th>Kafka 2.7</th>
139
+ <td>Limited support</td>
140
+ <td>Limited support</td>
141
+ </tr>
116
142
  </table>
117
143
 
118
144
  This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with the v0.5.x series. There's limited support for Kafka 0.8, and things should work with Kafka 0.11, although there may be performance issues due to changes in the protocol.
@@ -124,6 +150,12 @@ This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with t
124
150
  - **Kafka 1.0:** Everything that works with Kafka 0.11 should still work, but so far no features specific to Kafka 1.0 have been added.
125
151
  - **Kafka 2.0:** Everything that works with Kafka 1.0 should still work, but so far no features specific to Kafka 2.0 have been added.
126
152
  - **Kafka 2.1:** Everything that works with Kafka 2.0 should still work, but so far no features specific to Kafka 2.1 have been added.
153
+ - **Kafka 2.2:** Everything that works with Kafka 2.1 should still work, but so far no features specific to Kafka 2.2 have been added.
154
+ - **Kafka 2.3:** Everything that works with Kafka 2.2 should still work, but so far no features specific to Kafka 2.3 have been added.
155
+ - **Kafka 2.4:** Everything that works with Kafka 2.3 should still work, but so far no features specific to Kafka 2.4 have been added.
156
+ - **Kafka 2.5:** Everything that works with Kafka 2.4 should still work, but so far no features specific to Kafka 2.5 have been added.
157
+ - **Kafka 2.6:** Everything that works with Kafka 2.5 should still work, but so far no features specific to Kafka 2.6 have been added.
158
+ - **Kafka 2.7:** Everything that works with Kafka 2.6 should still work, but so far no features specific to Kafka 2.7 have been added.
127
159
 
128
160
  This library requires Ruby 2.1 or higher.
129
161
 
@@ -144,6 +176,12 @@ require "kafka"
144
176
  kafka = Kafka.new(["kafka1:9092", "kafka2:9092"], client_id: "my-application")
145
177
  ```
146
178
 
179
+ You can also use a hostname with seed brokers' IP addresses:
180
+
181
+ ```ruby
182
+ kafka = Kafka.new("seed-brokers:9092", client_id: "my-application", resolve_seed_brokers: true)
183
+ ```
184
+
147
185
  ### Producing Messages to Kafka
148
186
 
149
187
  The simplest way to write a message to a Kafka topic is to call `#deliver_message`:
@@ -330,6 +368,36 @@ partition = PartitioningScheme.assign(partitions, event)
330
368
  producer.produce(event, topic: "events", partition: partition)
331
369
  ```
332
370
 
371
+ Another option is to configure a custom client partitioner that implements `call(partition_count, message)` and uses the same schema as the other client. For example:
372
+
373
+ ```ruby
374
+ class CustomPartitioner
375
+ def call(partition_count, message)
376
+ ...
377
+ end
378
+ end
379
+
380
+ partitioner = CustomPartitioner.new
381
+ Kafka.new(partitioner: partitioner, ...)
382
+ ```
383
+
384
+ Or, simply create a Proc handling the partitioning logic instead of having to add a new class. For example:
385
+
386
+ ```ruby
387
+ partitioner = -> (partition_count, message) { ... }
388
+ Kafka.new(partitioner: partitioner, ...)
389
+ ```
390
+
391
+ ##### Supported partitioning schemes
392
+
393
+ In order for semantic partitioning to work a `partition_key` must map to the same partition number every time. The general approach, and the one used by this library, is to hash the key and mod it by the number of partitions. There are many different algorithms that can be used to calculate a hash. By default `crc32` is used. `murmur2` is also supported for compatibility with Java based Kafka producers.
394
+
395
+ To use `murmur2` hashing pass it as an argument to `Partitioner`. For example:
396
+
397
+ ```ruby
398
+ Kafka.new(partitioner: Kafka::Partitioner.new(hash_function: :murmur2))
399
+ ```
400
+
333
401
  #### Buffering and Error Handling
334
402
 
335
403
  The producer is designed for resilience in the face of temporary network errors, Kafka broker failovers, and other issues that prevent the client from writing messages to the destination topics. It does this by employing local, in-memory buffers. Only when messages are acknowledged by a Kafka broker will they be removed from the buffer.
@@ -704,6 +772,88 @@ consumer.each_message do |message|
704
772
  end
705
773
  ```
706
774
 
775
+ #### Customizing Partition Assignment Strategy
776
+
777
+ In some cases, you might want to assign more partitions to some consumers. For example, in applications inserting some records to a database, the consumers running on hosts nearby the database can process more messages than the consumers running on other hosts.
778
+ You can use a custom assignment strategy by passing an object that implements `#call` as the argument `assignment_strategy` like below:
779
+
780
+ ```ruby
781
+ class CustomAssignmentStrategy
782
+ def initialize(user_data)
783
+ @user_data = user_data
784
+ end
785
+
786
+ # Assign the topic partitions to the group members.
787
+ #
788
+ # @param cluster [Kafka::Cluster]
789
+ # @param members [Hash<String, Kafka::Protocol::JoinGroupResponse::Metadata>] a hash
790
+ # mapping member ids to metadata
791
+ # @param partitions [Array<Kafka::ConsumerGroup::Assignor::Partition>] a list of
792
+ # partitions the consumer group processes
793
+ # @return [Hash<String, Array<Kafka::ConsumerGroup::Assignor::Partition>] a hash
794
+ # mapping member ids to partitions.
795
+ def call(cluster:, members:, partitions:)
796
+ ...
797
+ end
798
+ end
799
+
800
+ strategy = CustomAssignmentStrategy.new("some-host-information")
801
+ consumer = kafka.consumer(group_id: "some-group", assignment_strategy: strategy)
802
+ ```
803
+
804
+ `members` is a hash mapping member IDs to metadata, and partitions is a list of partitions the consumer group processes. The method `call` must return a hash mapping member IDs to partitions. For example, the following strategy assigns partitions randomly:
805
+
806
+ ```ruby
807
+ class RandomAssignmentStrategy
808
+ def call(cluster:, members:, partitions:)
809
+ member_ids = members.keys
810
+ partitions.each_with_object(Hash.new {|h, k| h[k] = [] }) do |partition, partitions_per_member|
811
+ partitions_per_member[member_ids[rand(member_ids.count)]] << partition
812
+ end
813
+ end
814
+ end
815
+ ```
816
+
817
+ If the strategy needs user data, you should define the method `user_data` that returns user data on each consumer. For example, the following strategy uses the consumers' IP addresses as user data:
818
+
819
+ ```ruby
820
+ class NetworkTopologyAssignmentStrategy
821
+ def user_data
822
+ Socket.ip_address_list.find(&:ipv4_private?).ip_address
823
+ end
824
+
825
+ def call(cluster:, members:, partitions:)
826
+ # Display the pair of the member ID and IP address
827
+ members.each do |id, metadata|
828
+ puts "#{id}: #{metadata.user_data}"
829
+ end
830
+
831
+ # Assign partitions considering the network topology
832
+ ...
833
+ end
834
+ end
835
+ ```
836
+
837
+ Note that the strategy uses the class name as the default protocol name. You can change it by defining the method `protocol_name`:
838
+
839
+ ```ruby
840
+ class NetworkTopologyAssignmentStrategy
841
+ def protocol_name
842
+ "networktopology"
843
+ end
844
+
845
+ def user_data
846
+ Socket.ip_address_list.find(&:ipv4_private?).ip_address
847
+ end
848
+
849
+ def call(cluster:, members:, partitions:)
850
+ ...
851
+ end
852
+ end
853
+ ```
854
+
855
+ As the method `call` might receive different user data from what it expects, you should avoid using the same protocol name as another strategy that uses different user data.
856
+
707
857
 
708
858
  ### Thread Safety
709
859
 
@@ -933,6 +1083,8 @@ This configures the store to look up CA certificates from the system default cer
933
1083
 
934
1084
  In order to authenticate the client to the cluster, you need to pass in a certificate and key created for the client and trusted by the brokers.
935
1085
 
1086
+ **NOTE**: You can disable hostname validation by passing `ssl_verify_hostname: false`.
1087
+
936
1088
  ```ruby
937
1089
  kafka = Kafka.new(
938
1090
  ["kafka1:9092"],
@@ -940,6 +1092,7 @@ kafka = Kafka.new(
940
1092
  ssl_client_cert: File.read('my_client_cert.pem'),
941
1093
  ssl_client_cert_key: File.read('my_client_cert_key.pem'),
942
1094
  ssl_client_cert_key_password: 'my_client_cert_key_password',
1095
+ ssl_verify_hostname: false,
943
1096
  # ...
944
1097
  )
945
1098
  ```
@@ -968,6 +1121,20 @@ kafka = Kafka.new(
968
1121
  )
969
1122
  ```
970
1123
 
1124
+ ##### AWS MSK (IAM)
1125
+ In order to authenticate using IAM w/ an AWS MSK cluster, set your access key, secret key, and region when initializing the Kafka client:
1126
+
1127
+ ```ruby
1128
+ k = Kafka.new(
1129
+ ["kafka1:9092"],
1130
+ sasl_aws_msk_iam_access_key_id: 'iam_access_key',
1131
+ sasl_aws_msk_iam_secret_key_id: 'iam_secret_key',
1132
+ sasl_aws_msk_iam_aws_region: 'us-west-2',
1133
+ ssl_ca_certs_from_system: true,
1134
+ # ...
1135
+ )
1136
+ ```
1137
+
971
1138
  ##### PLAIN
972
1139
  In order to authenticate using PLAIN, you must set your username and password when initializing the Kafka client:
973
1140