ruby-kafka 0.3.11 → 0.3.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 7b1f414825e2d64312d9b4ad80747fe56b591408
4
- data.tar.gz: ed5ec3a67e16f375061a5040829c2830e0b57395
3
+ metadata.gz: 29c5259a5c2650e5a9eec0c513aa997a90834f9d
4
+ data.tar.gz: b4eb6a8de42d27f2a3b215cb690d7c803f706bad
5
5
  SHA512:
6
- metadata.gz: a2cadfb3b2be1d37d79c6139f9fc5081c5d1315d60306c9c2ce9dd325a94d5d0e720cd0b826624e14003bbd89cab2b916f337bc671b273ec947f50f2149cc9bf
7
- data.tar.gz: 1a64728b38159b9a26685563826a970882262c46449c1cebc2470a90e68dc8ec52fdd3a3626d620d6047cabf2fe8627ddc48dea5f72f75d4e81c4ae259eae4c9
6
+ metadata.gz: 43c219f2e1956264a864a9ce41e75f2a3b46ac6cc5c248cdc99a28a15b2871ed8fdb1554422797c9ca8e913996874981aadf3f35f23fea30110e616bfc3013e0
7
+ data.tar.gz: ad849644e4f75761164f41ffad9d69e67e0ac71eb2796d222afc224f1745bc9199491165d38fd346ebf0b8f7da6ea25e60fb9fe7a853b12bc1699f069331d8d5
@@ -4,6 +4,10 @@ Changes and additions to the library will be listed here.
4
4
 
5
5
  ## Unreleased
6
6
 
7
+ ## v0.3.12
8
+
9
+ - Fix a bug in the consumer.
10
+
7
11
  ## v0.3.11
8
12
 
9
13
  - Fix bug in the simple consumer loop.
@@ -60,6 +60,21 @@ module Kafka
60
60
  @cluster = initialize_cluster
61
61
  end
62
62
 
63
+ # Delivers a single message to the Kafka cluster.
64
+ #
65
+ # **Note:** Only use this API for low-throughput scenarios. If you want to deliver
66
+ # many messages at a high rate, or if you want to configure the way messages are
67
+ # sent, use the {#producer} or {#async_producer} APIs instead.
68
+ #
69
+ # @param value [String, nil] the message value.
70
+ # @param key [String, nil] the message key.
71
+ # @param topic [String] the topic that the message should be written to.
72
+ # @param partition [Integer, nil] the partition that the message should be written
73
+ # to, or `nil` if either `partition_key` is passed or the partition should be
74
+ # chosen at random.
75
+ # @param partition_key [String] a value used to deterministically choose a
76
+ # partition to write to.
77
+ # @return [nil]
63
78
  def deliver_message(value, key: nil, topic:, partition: nil, partition_key: nil)
64
79
  create_time = Time.now
65
80
 
@@ -123,6 +123,11 @@ module Kafka
123
123
  raise ConnectionError, e
124
124
  end
125
125
 
126
+ def reopen
127
+ close
128
+ open
129
+ end
130
+
126
131
  # Writes a request over the connection.
127
132
  #
128
133
  # @param request [#encode] the request that should be encoded and written.
@@ -140,14 +145,29 @@ module Kafka
140
145
  )
141
146
 
142
147
  data = Kafka::Protocol::Encoder.encode_with(message)
148
+ retried = false
143
149
  notification[:request_size] = data.bytesize
144
150
 
145
- @encoder.write_bytes(data)
151
+ begin
152
+ @encoder.write_bytes(data)
153
+ rescue Errno::ETIMEDOUT
154
+ @logger.error "Timed out while writing request #{@correlation_id}"
155
+ raise
156
+ rescue Errno::EPIPE, Errno::ECONNRESET, EOFError
157
+ # Kafka brokers automatically close client connections after a period of
158
+ # inactivity. If this has happened, it's safe to re-open the connection
159
+ # and retry the request.
160
+ if retried
161
+ raise
162
+ else
163
+ @logger.warn "Connection has been closed by the server, retrying..."
164
+ retried = true
165
+ reopen
166
+ retry
167
+ end
168
+ end
146
169
 
147
170
  nil
148
- rescue Errno::ETIMEDOUT
149
- @logger.error "Timed out while writing request #{@correlation_id}"
150
- raise
151
171
  end
152
172
 
153
173
  # Reads a response from the connection.
@@ -130,6 +130,10 @@ module Kafka
130
130
  return if !@running
131
131
  end
132
132
  end
133
+
134
+ # We may not have received any messages, but it's still a good idea to
135
+ # commit offsets if we've processed messages in the last set of batches.
136
+ @offset_manager.commit_offsets_if_necessary
133
137
  end
134
138
  end
135
139
 
@@ -11,7 +11,7 @@ module Kafka
11
11
  @processed_offsets = {}
12
12
  @default_offsets = {}
13
13
  @committed_offsets = nil
14
- @last_commit = Time.at(0)
14
+ @last_commit = Time.now
15
15
  end
16
16
 
17
17
  def set_default_offset(topic, default_offset)
@@ -21,7 +21,8 @@ module Kafka
21
21
  def mark_as_processed(topic, partition, offset)
22
22
  @uncommitted_offsets += 1
23
23
  @processed_offsets[topic] ||= {}
24
- @processed_offsets[topic][partition] = offset + 1
24
+ @processed_offsets[topic][partition] = offset
25
+ @logger.debug "Marking #{topic}/#{partition}:#{offset} as committed"
25
26
  end
26
27
 
27
28
  def next_offset_for(topic, partition)
@@ -32,20 +33,21 @@ module Kafka
32
33
  # A negative offset means that no offset has been committed, so we need to
33
34
  # resolve the default offset for the topic.
34
35
  if offset < 0
35
- offset = @default_offsets.fetch(topic)
36
- offset = @cluster.resolve_offset(topic, partition, offset)
37
-
38
- # Make sure we commit this offset so that we don't repeat have to
39
- # resolve the default offset every time.
40
- mark_as_processed(topic, partition, offset - 1)
36
+ default_offset = @default_offsets.fetch(topic)
37
+ @cluster.resolve_offset(topic, partition, default_offset)
38
+ else
39
+ # The next offset is the last offset plus one.
40
+ offset + 1
41
41
  end
42
-
43
- offset
44
42
  end
45
43
 
46
44
  def commit_offsets
47
45
  unless @processed_offsets.empty?
48
- @logger.info "Committing offsets for #{@uncommitted_offsets} messages"
46
+ pretty_offsets = @processed_offsets.flat_map {|topic, partitions|
47
+ partitions.map {|partition, offset| "#{topic}/#{partition}:#{offset}" }
48
+ }.join(", ")
49
+
50
+ @logger.info "Committing offsets: #{pretty_offsets}"
49
51
 
50
52
  @group.commit_offsets(@processed_offsets)
51
53
 
@@ -1,3 +1,3 @@
1
1
  module Kafka
2
- VERSION = "0.3.11"
2
+ VERSION = "0.3.12"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.11
4
+ version: 0.3.12
5
5
  platform: ruby
6
6
  authors:
7
7
  - Daniel Schierbeck
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2016-07-14 00:00:00.000000000 Z
11
+ date: 2016-07-28 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler