ruby-kafka 0.7.8 → 1.1.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f85ddf1d516d86a5cedcf4de17f400cc93e8e20846dd657b3acb85ba6aca1bf6
4
- data.tar.gz: d7a55e0fad7767fed575ac2d05b3cba3837402dc97c908e7f47709bd550a484d
3
+ metadata.gz: 22b958f3d452f35c3c6084a5b8b790f370b96069e87ef5974838e5ea4b6945e1
4
+ data.tar.gz: eb6d704dbaace9c13a99bc93924d8a6b84ee5eb686c9c863a81d085ffeb7e92d
5
5
  SHA512:
6
- metadata.gz: 1c4a94639c6c6a889d4be943c4201c60d6d29d872d20fde38b86c54168cb98ea1bfa0b1f151595033841792ee64db73cfdf1138ccd3ad51180a18ad742613230
7
- data.tar.gz: 4bde456d53429e192dc2d7dae7a75e009c15d1b793be658fb7402831376556cf9b2ea44eeaf858229a5d2b5e8fa75c2fa74e1ea1f89777a2b5728838e79422d2
6
+ metadata.gz: 22c1d59bcdd42849849122b559f0d161653a5cfa492ffb36f28a875b109444d670a76bc62ba0856ae2ebce95fd10abce9e16834928fc7fe6eb2ee006827d307c
7
+ data.tar.gz: 3a51df6b1d40e1edbd96c06f3473319f0ac1b072b040b676a8e1d980fbaad114ad0248a7670cb7ae0a51a2c79ac390f2f4cfe8f65eedb5dd55be020c6011bc18
@@ -145,21 +145,149 @@ jobs:
145
145
  environment:
146
146
  LOG_LEVEL: DEBUG
147
147
  - image: wurstmeister/zookeeper
148
- - image: wurstmeister/kafka:2.12-2.1.0
148
+ - image: wurstmeister/kafka:2.12-2.1.1
149
149
  environment:
150
150
  KAFKA_ADVERTISED_HOST_NAME: localhost
151
151
  KAFKA_ADVERTISED_PORT: 9092
152
152
  KAFKA_PORT: 9092
153
153
  KAFKA_ZOOKEEPER_CONNECT: localhost:2181
154
154
  KAFKA_DELETE_TOPIC_ENABLE: true
155
- - image: wurstmeister/kafka:2.12-2.1.0
155
+ - image: wurstmeister/kafka:2.12-2.1.1
156
156
  environment:
157
157
  KAFKA_ADVERTISED_HOST_NAME: localhost
158
158
  KAFKA_ADVERTISED_PORT: 9093
159
159
  KAFKA_PORT: 9093
160
160
  KAFKA_ZOOKEEPER_CONNECT: localhost:2181
161
161
  KAFKA_DELETE_TOPIC_ENABLE: true
162
- - image: wurstmeister/kafka:2.12-2.1.0
162
+ - image: wurstmeister/kafka:2.12-2.1.1
163
+ environment:
164
+ KAFKA_ADVERTISED_HOST_NAME: localhost
165
+ KAFKA_ADVERTISED_PORT: 9094
166
+ KAFKA_PORT: 9094
167
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
168
+ KAFKA_DELETE_TOPIC_ENABLE: true
169
+ steps:
170
+ - checkout
171
+ - run: bundle install --path vendor/bundle
172
+ - run: bundle exec rspec --profile --tag functional spec/functional
173
+
174
+ kafka-2.2:
175
+ docker:
176
+ - image: circleci/ruby:2.5.1-node
177
+ environment:
178
+ LOG_LEVEL: DEBUG
179
+ - image: wurstmeister/zookeeper
180
+ - image: wurstmeister/kafka:2.12-2.2.1
181
+ environment:
182
+ KAFKA_ADVERTISED_HOST_NAME: localhost
183
+ KAFKA_ADVERTISED_PORT: 9092
184
+ KAFKA_PORT: 9092
185
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
186
+ KAFKA_DELETE_TOPIC_ENABLE: true
187
+ - image: wurstmeister/kafka:2.12-2.2.1
188
+ environment:
189
+ KAFKA_ADVERTISED_HOST_NAME: localhost
190
+ KAFKA_ADVERTISED_PORT: 9093
191
+ KAFKA_PORT: 9093
192
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
193
+ KAFKA_DELETE_TOPIC_ENABLE: true
194
+ - image: wurstmeister/kafka:2.12-2.2.1
195
+ environment:
196
+ KAFKA_ADVERTISED_HOST_NAME: localhost
197
+ KAFKA_ADVERTISED_PORT: 9094
198
+ KAFKA_PORT: 9094
199
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
200
+ KAFKA_DELETE_TOPIC_ENABLE: true
201
+ steps:
202
+ - checkout
203
+ - run: bundle install --path vendor/bundle
204
+ - run: bundle exec rspec --profile --tag functional spec/functional
205
+
206
+ kafka-2.3:
207
+ docker:
208
+ - image: circleci/ruby:2.5.1-node
209
+ environment:
210
+ LOG_LEVEL: DEBUG
211
+ - image: wurstmeister/zookeeper
212
+ - image: wurstmeister/kafka:2.12-2.3.1
213
+ environment:
214
+ KAFKA_ADVERTISED_HOST_NAME: localhost
215
+ KAFKA_ADVERTISED_PORT: 9092
216
+ KAFKA_PORT: 9092
217
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
218
+ KAFKA_DELETE_TOPIC_ENABLE: true
219
+ - image: wurstmeister/kafka:2.12-2.3.1
220
+ environment:
221
+ KAFKA_ADVERTISED_HOST_NAME: localhost
222
+ KAFKA_ADVERTISED_PORT: 9093
223
+ KAFKA_PORT: 9093
224
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
225
+ KAFKA_DELETE_TOPIC_ENABLE: true
226
+ - image: wurstmeister/kafka:2.12-2.3.1
227
+ environment:
228
+ KAFKA_ADVERTISED_HOST_NAME: localhost
229
+ KAFKA_ADVERTISED_PORT: 9094
230
+ KAFKA_PORT: 9094
231
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
232
+ KAFKA_DELETE_TOPIC_ENABLE: true
233
+ steps:
234
+ - checkout
235
+ - run: bundle install --path vendor/bundle
236
+ - run: bundle exec rspec --profile --tag functional spec/functional
237
+
238
+ kafka-2.4:
239
+ docker:
240
+ - image: circleci/ruby:2.5.1-node
241
+ environment:
242
+ LOG_LEVEL: DEBUG
243
+ - image: wurstmeister/zookeeper
244
+ - image: wurstmeister/kafka:2.12-2.4.0
245
+ environment:
246
+ KAFKA_ADVERTISED_HOST_NAME: localhost
247
+ KAFKA_ADVERTISED_PORT: 9092
248
+ KAFKA_PORT: 9092
249
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
250
+ KAFKA_DELETE_TOPIC_ENABLE: true
251
+ - image: wurstmeister/kafka:2.12-2.4.0
252
+ environment:
253
+ KAFKA_ADVERTISED_HOST_NAME: localhost
254
+ KAFKA_ADVERTISED_PORT: 9093
255
+ KAFKA_PORT: 9093
256
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
257
+ KAFKA_DELETE_TOPIC_ENABLE: true
258
+ - image: wurstmeister/kafka:2.12-2.4.0
259
+ environment:
260
+ KAFKA_ADVERTISED_HOST_NAME: localhost
261
+ KAFKA_ADVERTISED_PORT: 9094
262
+ KAFKA_PORT: 9094
263
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
264
+ KAFKA_DELETE_TOPIC_ENABLE: true
265
+ steps:
266
+ - checkout
267
+ - run: bundle install --path vendor/bundle
268
+ - run: bundle exec rspec --profile --tag functional spec/functional
269
+
270
+ kafka-2.5:
271
+ docker:
272
+ - image: circleci/ruby:2.5.1-node
273
+ environment:
274
+ LOG_LEVEL: DEBUG
275
+ - image: wurstmeister/zookeeper
276
+ - image: wurstmeister/kafka:2.12-2.5.0
277
+ environment:
278
+ KAFKA_ADVERTISED_HOST_NAME: localhost
279
+ KAFKA_ADVERTISED_PORT: 9092
280
+ KAFKA_PORT: 9092
281
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
282
+ KAFKA_DELETE_TOPIC_ENABLE: true
283
+ - image: wurstmeister/kafka:2.12-2.5.0
284
+ environment:
285
+ KAFKA_ADVERTISED_HOST_NAME: localhost
286
+ KAFKA_ADVERTISED_PORT: 9093
287
+ KAFKA_PORT: 9093
288
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
289
+ KAFKA_DELETE_TOPIC_ENABLE: true
290
+ - image: wurstmeister/kafka:2.12-2.5.0
163
291
  environment:
164
292
  KAFKA_ADVERTISED_HOST_NAME: localhost
165
293
  KAFKA_ADVERTISED_PORT: 9094
@@ -181,3 +309,7 @@ workflows:
181
309
  - kafka-1.1
182
310
  - kafka-2.0
183
311
  - kafka-2.1
312
+ - kafka-2.2
313
+ - kafka-2.3
314
+ - kafka-2.4
315
+ - kafka-2.5
@@ -0,0 +1,19 @@
1
+ name: Mark stale issues and pull requests
2
+
3
+ on:
4
+ schedule:
5
+ - cron: "0 0 * * *"
6
+
7
+ jobs:
8
+ stale:
9
+
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - uses: actions/stale@v1
14
+ with:
15
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
16
+ stale-issue-message: 'Issue has been marked as stale due to a lack of activity.'
17
+ stale-pr-message: 'Pull request has been marked as stale due to a lack of activity.'
18
+ stale-issue-label: 'no-issue-activity'
19
+ stale-pr-label: 'no-pr-activity'
@@ -4,6 +4,32 @@ Changes and additions to the library will be listed here.
4
4
 
5
5
  ## Unreleased
6
6
 
7
+ ## 1.1.0
8
+
9
+ - Extra sanity checking when marking offsets as processed (#824).
10
+ - Make `verify_hostname` settable for SSL contexts (#828).
11
+ - Instrument `create_time` from last message in batch (#811).
12
+ - Add client function for fetching topic replica count (#822).
13
+ - Allow consumers to refresh the topic lists (#818).
14
+ - Disconnect after leaving a group (#817).
15
+ - Use `max_wait_time` as the sleep instead of hardcoded 2 seconds (#825).
16
+
17
+ ## 1.0.0
18
+
19
+ - Add client methods to manage configs (#759)
20
+ - Support Kafka 2.3 and 2.4.
21
+
22
+ ## 0.7.10
23
+
24
+ - Fix logger again (#762)
25
+
26
+ ## 0.7.9
27
+
28
+ - Fix SSL authentication for ruby < 2.4.0 (#742)
29
+ - Add metrics for prometheus/client (#739)
30
+ - Do not add nil message entries when ignoring old messages (#746)
31
+ - Scram authentication thread save (#743)
32
+
7
33
  ## 0.7.8
8
34
  - Optionally verify hostname on SSL certs (#733)
9
35
 
data/README.md CHANGED
@@ -108,6 +108,26 @@ Or install it yourself as:
108
108
  <td>Limited support</td>
109
109
  <td>Limited support</td>
110
110
  </tr>
111
+ <tr>
112
+ <th>Kafka 2.2</th>
113
+ <td>Limited support</td>
114
+ <td>Limited support</td>
115
+ </tr>
116
+ <tr>
117
+ <th>Kafka 2.3</th>
118
+ <td>Limited support</td>
119
+ <td>Limited support</td>
120
+ </tr>
121
+ <tr>
122
+ <th>Kafka 2.4</th>
123
+ <td>Limited support</td>
124
+ <td>Limited support</td>
125
+ </tr>
126
+ <tr>
127
+ <th>Kafka 2.5</th>
128
+ <td>Limited support</td>
129
+ <td>Limited support</td>
130
+ </tr>
111
131
  </table>
112
132
 
113
133
  This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with the v0.5.x series. There's limited support for Kafka 0.8, and things should work with Kafka 0.11, although there may be performance issues due to changes in the protocol.
@@ -119,6 +139,10 @@ This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with t
119
139
  - **Kafka 1.0:** Everything that works with Kafka 0.11 should still work, but so far no features specific to Kafka 1.0 have been added.
120
140
  - **Kafka 2.0:** Everything that works with Kafka 1.0 should still work, but so far no features specific to Kafka 2.0 have been added.
121
141
  - **Kafka 2.1:** Everything that works with Kafka 2.0 should still work, but so far no features specific to Kafka 2.1 have been added.
142
+ - **Kafka 2.2:** Everything that works with Kafka 2.1 should still work, but so far no features specific to Kafka 2.2 have been added.
143
+ - **Kafka 2.3:** Everything that works with Kafka 2.2 should still work, but so far no features specific to Kafka 2.3 have been added.
144
+ - **Kafka 2.4:** Everything that works with Kafka 2.3 should still work, but so far no features specific to Kafka 2.4 have been added.
145
+ - **Kafka 2.5:** Everything that works with Kafka 2.4 should still work, but so far no features specific to Kafka 2.5 have been added.
122
146
 
123
147
  This library requires Ruby 2.1 or higher.
124
148
 
@@ -928,6 +952,8 @@ This configures the store to look up CA certificates from the system default cer
928
952
 
929
953
  In order to authenticate the client to the cluster, you need to pass in a certificate and key created for the client and trusted by the brokers.
930
954
 
955
+ **NOTE**: You can disable hostname validation by passing `verify_hostname: false`.
956
+
931
957
  ```ruby
932
958
  kafka = Kafka.new(
933
959
  ["kafka1:9092"],
@@ -103,6 +103,9 @@ module Kafka
103
103
  # @raise [BufferOverflow] if the message queue is full.
104
104
  # @return [nil]
105
105
  def produce(value, topic:, **options)
106
+ # We want to fail fast if `topic` isn't a String
107
+ topic = topic.to_str
108
+
106
109
  ensure_threads_running!
107
110
 
108
111
  if @queue.size >= @max_queue_size
@@ -65,6 +65,10 @@ module Kafka
65
65
  # @param sasl_oauth_token_provider [Object, nil] OAuthBearer Token Provider instance that
66
66
  # implements method token. See {Sasl::OAuth#initialize}
67
67
  #
68
+ # @param verify_hostname [Boolean, true] whether to verify that the host serving
69
+ # the SSL certificate and the signing chain of the certificate have the correct domains
70
+ # based on the CA certificate
71
+ #
68
72
  # @return [Client]
69
73
  def initialize(seed_brokers:, client_id: "ruby-kafka", logger: nil, connect_timeout: nil, socket_timeout: nil,
70
74
  ssl_ca_cert_file_path: nil, ssl_ca_cert: nil, ssl_client_cert: nil, ssl_client_cert_key: nil,
@@ -138,6 +142,9 @@ module Kafka
138
142
  def deliver_message(value, key: nil, headers: {}, topic:, partition: nil, partition_key: nil, retries: 1)
139
143
  create_time = Time.now
140
144
 
145
+ # We want to fail fast if `topic` isn't a String
146
+ topic = topic.to_str
147
+
141
148
  message = PendingMessage.new(
142
149
  value: value,
143
150
  key: key,
@@ -333,15 +340,20 @@ module Kafka
333
340
  # @param fetcher_max_queue_size [Integer] max number of items in the fetch queue that
334
341
  # are stored for further processing. Note, that each item in the queue represents a
335
342
  # response from a single broker.
343
+ # @param refresh_topic_interval [Integer] interval of refreshing the topic list.
344
+ # If it is 0, the topic list won't be refreshed (default)
345
+ # If it is n (n > 0), the topic list will be refreshed every n seconds
336
346
  # @return [Consumer]
337
347
  def consumer(
338
348
  group_id:,
339
349
  session_timeout: 30,
350
+ rebalance_timeout: 60,
340
351
  offset_commit_interval: 10,
341
352
  offset_commit_threshold: 0,
342
353
  heartbeat_interval: 10,
343
354
  offset_retention_time: nil,
344
- fetcher_max_queue_size: 100
355
+ fetcher_max_queue_size: 100,
356
+ refresh_topic_interval: 0
345
357
  )
346
358
  cluster = initialize_cluster
347
359
 
@@ -357,6 +369,7 @@ module Kafka
357
369
  logger: @logger,
358
370
  group_id: group_id,
359
371
  session_timeout: session_timeout,
372
+ rebalance_timeout: rebalance_timeout,
360
373
  retention_time: retention_time,
361
374
  instrumenter: instrumenter,
362
375
  )
@@ -394,6 +407,7 @@ module Kafka
394
407
  fetcher: fetcher,
395
408
  session_timeout: session_timeout,
396
409
  heartbeat: heartbeat,
410
+ refresh_topic_interval: refresh_topic_interval
397
411
  )
398
412
  end
399
413
 
@@ -530,6 +544,24 @@ module Kafka
530
544
  end
531
545
  end
532
546
 
547
+ # Describe broker configs
548
+ #
549
+ # @param broker_id [int] the id of the broker
550
+ # @param configs [Array] array of config keys.
551
+ # @return [Array<Kafka::Protocol::DescribeConfigsResponse::ConfigEntry>]
552
+ def describe_configs(broker_id, configs = [])
553
+ @cluster.describe_configs(broker_id, configs)
554
+ end
555
+
556
+ # Alter broker configs
557
+ #
558
+ # @param broker_id [int] the id of the broker
559
+ # @param configs [Array] array of config strings.
560
+ # @return [nil]
561
+ def alter_configs(broker_id, configs = [])
562
+ @cluster.alter_configs(broker_id, configs)
563
+ end
564
+
533
565
  # Creates a topic in the cluster.
534
566
  #
535
567
  # @example Creating a topic with log compaction
@@ -615,6 +647,14 @@ module Kafka
615
647
  @cluster.describe_group(group_id)
616
648
  end
617
649
 
650
+ # Fetch all committed offsets for a consumer group
651
+ #
652
+ # @param group_id [String] the id of the consumer group
653
+ # @return [Hash<String, Hash<Integer, Kafka::Protocol::OffsetFetchResponse::PartitionOffsetInfo>>]
654
+ def fetch_group_offsets(group_id)
655
+ @cluster.fetch_group_offsets(group_id)
656
+ end
657
+
618
658
  # Create partitions for a topic.
619
659
  #
620
660
  # @param name [String] the name of the topic.
@@ -663,6 +703,14 @@ module Kafka
663
703
  @cluster.partitions_for(topic).count
664
704
  end
665
705
 
706
+ # Counts the number of replicas for a topic's partition
707
+ #
708
+ # @param topic [String]
709
+ # @return [Integer] the number of replica nodes for the topic's partition
710
+ def replica_count_for(topic)
711
+ @cluster.partitions_for(topic).first.replicas.count
712
+ end
713
+
666
714
  # Retrieve the offset of the last message in a partition. If there are no
667
715
  # messages in the partition -1 is returned.
668
716
  #
@@ -45,6 +45,10 @@ module Kafka
45
45
  new_topics = topics - @target_topics
46
46
 
47
47
  unless new_topics.empty?
48
+ if new_topics.any? { |topic| topic.nil? or topic.empty? }
49
+ raise ArgumentError, "Topic must not be nil or empty"
50
+ end
51
+
48
52
  @logger.info "New topics added to target list: #{new_topics.to_a.join(', ')}"
49
53
 
50
54
  @target_topics.merge(new_topics)
@@ -139,6 +143,40 @@ module Kafka
139
143
  end
140
144
  end
141
145
 
146
+ def describe_configs(broker_id, configs = [])
147
+ options = {
148
+ resources: [[Kafka::Protocol::RESOURCE_TYPE_CLUSTER, broker_id.to_s, configs]]
149
+ }
150
+
151
+ info = cluster_info.brokers.find {|broker| broker.node_id == broker_id }
152
+ broker = @broker_pool.connect(info.host, info.port, node_id: info.node_id)
153
+
154
+ response = broker.describe_configs(**options)
155
+
156
+ response.resources.each do |resource|
157
+ Protocol.handle_error(resource.error_code, resource.error_message)
158
+ end
159
+
160
+ response.resources.first.configs
161
+ end
162
+
163
+ def alter_configs(broker_id, configs = [])
164
+ options = {
165
+ resources: [[Kafka::Protocol::RESOURCE_TYPE_CLUSTER, broker_id.to_s, configs]]
166
+ }
167
+
168
+ info = cluster_info.brokers.find {|broker| broker.node_id == broker_id }
169
+ broker = @broker_pool.connect(info.host, info.port, node_id: info.node_id)
170
+
171
+ response = broker.alter_configs(**options)
172
+
173
+ response.resources.each do |resource|
174
+ Protocol.handle_error(resource.error_code, resource.error_message)
175
+ end
176
+
177
+ nil
178
+ end
179
+
142
180
  def partitions_for(topic)
143
181
  add_target_topics([topic])
144
182
  refresh_metadata_if_necessary!
@@ -252,6 +290,20 @@ module Kafka
252
290
  group
253
291
  end
254
292
 
293
+ def fetch_group_offsets(group_id)
294
+ topics = get_group_coordinator(group_id: group_id)
295
+ .fetch_offsets(group_id: group_id, topics: nil)
296
+ .topics
297
+
298
+ topics.each do |_, partitions|
299
+ partitions.each do |_, response|
300
+ Protocol.handle_error(response.error_code)
301
+ end
302
+ end
303
+
304
+ topics
305
+ end
306
+
255
307
  def create_partitions_for(name, num_partitions:, timeout:)
256
308
  options = {
257
309
  topics: [[name, num_partitions, nil]],