ruby-kafka 0.4.3 → 0.4.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 6c934181fd72d8f78eb9b4438b7297a6a71a51eb
4
- data.tar.gz: e3a970abf6839ab03bdd34caeb53dfe9fe91666d
3
+ metadata.gz: 3f09ed3cd80d943c7aa595e43c3de6fd1bd0f883
4
+ data.tar.gz: 5906a940b05edf1ee429c184e03d4f2490b2454a
5
5
  SHA512:
6
- metadata.gz: 266b2624a721b56c991797a6848531e00675274a24241db59de6d8649ac70ba41799b40d39efbfd8c90a1b467aa142fe68526533e23933c606fbaac7b9ae780d
7
- data.tar.gz: 1d41e52b87201e803a3edf62a56a0086c1f8aed86dc255a9c73eee5b295acc27113e7d82adba4558666142509664610f5531c654049e596c546c89e468f153b5
6
+ metadata.gz: e352a9d47347f413ffafe9e89fca1369a6aa2de17da5b65de2adf3ba659784aea6939b127411626f7889941e084d57d4329b047e5aba26f83d31a5f312b3caaf
7
+ data.tar.gz: c1e97414fa1a314ec8e6f9f63fe992544d50c3dd6c0650ac1c2f76ba5866a0c3ed3457f38d71021f9669e6dcc904498a2f9c6ee63542234e94fd5ff000ff4c27
@@ -4,10 +4,13 @@ Changes and additions to the library will be listed here.
4
4
 
5
5
  ## Unreleased
6
6
 
7
+ ## v0.4.4
8
+
9
+ - Include the offset lag in batch consumer metrics (Statsd).
10
+
7
11
  ## v0.4.3
8
12
 
9
13
  - Restart the async producer thread automatically after errors.
10
- - Include the offset lag in batch consumer metrics (Statsd).
11
14
  - Make the default `max_wait_time` more sane.
12
15
  - Fix issue with cached default offset lookups (#431).
13
16
  - Upgrade to Datadog client version 3.
@@ -0,0 +1,26 @@
1
+ $LOAD_PATH.unshift(File.expand_path("../../lib", __FILE__))
2
+
3
+ require "kafka"
4
+
5
+ logger = Logger.new(STDOUT)
6
+ brokers = ENV.fetch("KAFKA_BROKERS", "localhost:9092").split(",")
7
+
8
+ # Make sure to create this topic in your Kafka cluster or configure the
9
+ # cluster to auto-create topics.
10
+ topic = "text"
11
+
12
+ kafka = Kafka.new(
13
+ seed_brokers: brokers,
14
+ client_id: "test",
15
+ socket_timeout: 20,
16
+ logger: logger,
17
+ )
18
+
19
+ consumer = kafka.consumer(group_id: "test")
20
+ consumer.subscribe(topic)
21
+
22
+ trap("TERM") { consumer.stop }
23
+
24
+ consumer.each_message do |message|
25
+ puts message.value
26
+ end
@@ -17,6 +17,10 @@ module Kafka
17
17
  end
18
18
  end
19
19
 
20
+ # A fetch operation was executed with no partitions specified.
21
+ class NoPartitionsToFetchFrom < Error
22
+ end
23
+
20
24
  # Subclasses of this exception class map to an error code described in the
21
25
  # Kafka protocol specification.
22
26
  #
@@ -221,9 +225,6 @@ module Kafka
221
225
  class FetchError < Error
222
226
  end
223
227
 
224
- class NoPartitionsAssignedError < Error
225
- end
226
-
227
228
  # Initializes a new Kafka client.
228
229
  #
229
230
  # @see Client#initialize
@@ -10,6 +10,10 @@ module Kafka
10
10
  @logger = logger
11
11
  end
12
12
 
13
+ def address_match?(host, port)
14
+ @connection.address_match?(host, port)
15
+ end
16
+
13
17
  # @return [String]
14
18
  def to_s
15
19
  "#{@connection} (node_id=#{@node_id.inspect})"
@@ -9,7 +9,12 @@ module Kafka
9
9
  end
10
10
 
11
11
  def connect(host, port, node_id: nil)
12
- return @brokers.fetch(node_id) if @brokers.key?(node_id)
12
+ if @brokers.key?(node_id)
13
+ broker = @brokers.fetch(node_id)
14
+ return broker if broker.address_match?(host, port)
15
+ broker.disconnect
16
+ @brokers[node_id] = nil
17
+ end
13
18
 
14
19
  broker = Broker.new(
15
20
  connection: @connection_builder.build_connection(host, port),
@@ -93,9 +93,20 @@ module Kafka
93
93
  Protocol.handle_error(response.error_code)
94
94
 
95
95
  coordinator_id = response.coordinator_id
96
- coordinator = connect_to_broker(coordinator_id)
97
96
 
98
- @logger.debug "Coordinator for group `#{group_id}` is #{coordinator}"
97
+ @logger.debug "Coordinator for group `#{group_id}` is #{coordinator_id}. Connecting..."
98
+
99
+ # It's possible that a new broker is introduced to the cluster and
100
+ # becomes the coordinator before we have a chance to refresh_metadata.
101
+ coordinator = begin
102
+ connect_to_broker(coordinator_id)
103
+ rescue Kafka::NoSuchBroker
104
+ @logger.debug "Broker #{coordinator_id} missing from broker cache, refreshing"
105
+ refresh_metadata!
106
+ connect_to_broker(coordinator_id)
107
+ end
108
+
109
+ @logger.debug "Connected to coordinator: #{coordinator} for group `#{group_id}`"
99
110
 
100
111
  return coordinator
101
112
  rescue GroupCoordinatorNotAvailable
@@ -59,6 +59,10 @@ module Kafka
59
59
  @sasl_authenticator = sasl_authenticator
60
60
  end
61
61
 
62
+ def address_match?(host, port)
63
+ @host == host && @port == port
64
+ end
65
+
62
66
  def to_s
63
67
  "#{@host}:#{@port}"
64
68
  end
@@ -216,6 +216,8 @@ module Kafka
216
216
 
217
217
  # We may not have received any messages, but it's still a good idea to
218
218
  # commit offsets if we've processed messages in the last set of batches.
219
+ # This also ensures the offsets are retained if we haven't read any messages
220
+ # since the offset retention period has elapsed.
219
221
  @offset_manager.commit_offsets_if_necessary
220
222
  end
221
223
  end
@@ -279,6 +281,12 @@ module Kafka
279
281
 
280
282
  return if !@running
281
283
  end
284
+
285
+ # We may not have received any messages, but it's still a good idea to
286
+ # commit offsets if we've processed messages in the last set of batches.
287
+ # This also ensures the offsets are retained if we haven't read any messages
288
+ # since the offset retention period has elapsed.
289
+ @offset_manager.commit_offsets_if_necessary
282
290
  end
283
291
  end
284
292
 
@@ -377,8 +385,6 @@ module Kafka
377
385
 
378
386
  @heartbeat.send_if_necessary
379
387
 
380
- raise NoPartitionsAssignedError if subscribed_partitions.empty?
381
-
382
388
  operation = FetchOperation.new(
383
389
  cluster: @cluster,
384
390
  logger: @logger,
@@ -401,6 +407,13 @@ module Kafka
401
407
  end
402
408
 
403
409
  operation.execute
410
+ rescue NoPartitionsToFetchFrom
411
+ backoff = max_wait_time > 0 ? max_wait_time : 1
412
+
413
+ @logger.info "There are no partitions to fetch from, sleeping for #{backoff}s"
414
+ sleep backoff
415
+
416
+ retry
404
417
  rescue OffsetOutOfRange => e
405
418
  @logger.error "Invalid offset for #{e.topic}/#{e.partition}, resetting to default offset"
406
419
 
@@ -46,6 +46,10 @@ module Kafka
46
46
 
47
47
  topics_by_broker = {}
48
48
 
49
+ if @topics.none? {|topic, partitions| partitions.any? }
50
+ raise NoPartitionsToFetchFrom
51
+ end
52
+
49
53
  @topics.each do |topic, partitions|
50
54
  partitions.each do |partition, options|
51
55
  broker = @cluster.get_leader(topic, partition)
@@ -97,6 +97,7 @@ module Kafka
97
97
  end
98
98
 
99
99
  def process_batch(event)
100
+ lag = event.payload.fetch(:offset_lag)
100
101
  messages = event.payload.fetch(:message_count)
101
102
  client = event.payload.fetch(:client_id)
102
103
  group_id = event.payload.fetch(:group_id)
@@ -109,6 +110,8 @@ module Kafka
109
110
  timing("consumer.#{client}.#{group_id}.#{topic}.#{partition}.process_batch.latency", event.duration)
110
111
  count("consumer.#{client}.#{group_id}.#{topic}.#{partition}.messages", messages)
111
112
  end
113
+
114
+ gauge("consumer.#{client}.#{group_id}.#{topic}.#{partition}.lag", lag)
112
115
  end
113
116
 
114
117
  def join_group(event)
@@ -1,3 +1,3 @@
1
1
  module Kafka
2
- VERSION = "0.4.3"
2
+ VERSION = "0.4.4"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.3
4
+ version: 0.4.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Daniel Schierbeck
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2017-10-20 00:00:00.000000000 Z
11
+ date: 2017-11-06 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -272,6 +272,7 @@ files:
272
272
  - ci/init.rb
273
273
  - ci/producer.rb
274
274
  - circle.yml
275
+ - examples/consumer-group.rb
275
276
  - examples/firehose-consumer.rb
276
277
  - examples/firehose-producer.rb
277
278
  - examples/simple-consumer.rb
@@ -380,7 +381,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
380
381
  version: '0'
381
382
  requirements: []
382
383
  rubyforge_project:
383
- rubygems_version: 2.6.11
384
+ rubygems_version: 2.6.13
384
385
  signing_key:
385
386
  specification_version: 4
386
387
  summary: A client library for the Kafka distributed commit log.