ruby-kafka 0.3.16.beta2 → 0.3.16
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +1 -1
- data/lib/kafka/consumer.rb +8 -7
- data/lib/kafka/consumer_group.rb +4 -0
- data/lib/kafka/produce_operation.rb +10 -7
- data/lib/kafka/version.rb +1 -1
- metadata +4 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 02443c4134a1dd98934b6bd2f73b2edc09fe4cf5
|
4
|
+
data.tar.gz: b03995318665f91f070ecb00a14aa483e4c4444e
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 2c69cb9baa6eed992cdd7dc66b14547df32059144748b0c12fe269ec126bbd4ed62b18300aacb955f62b17235e26e605cfea53425adf278a57edcb99d4e7c272
|
7
|
+
data.tar.gz: d2b7dd37bc7eb3ac6666808172d289334d926d9cb17b01690c19e1f67174af4f054c003fcc204fb9f711441d4207a4693e2d54419af873930306ae9f2fe8d1a0
|
data/CHANGELOG.md
CHANGED
data/lib/kafka/consumer.rb
CHANGED
@@ -67,7 +67,7 @@ module Kafka
|
|
67
67
|
# Typically you either want to start reading messages from the very
|
68
68
|
# beginning of the topic's partitions or you simply want to wait for new
|
69
69
|
# messages to be written. In the former case, set `start_from_beginning`
|
70
|
-
# true (the default); in the latter, set it to false.
|
70
|
+
# to true (the default); in the latter, set it to false.
|
71
71
|
#
|
72
72
|
# @param topic [String] the name of the topic to subscribe to.
|
73
73
|
# @param default_offset [Symbol] whether to start from the beginning or the
|
@@ -105,7 +105,8 @@ module Kafka
|
|
105
105
|
#
|
106
106
|
# @param topic [String]
|
107
107
|
# @param partition [Integer]
|
108
|
-
# @param timeout [Integer] the number of seconds to pause the partition for
|
108
|
+
# @param timeout [Integer] the number of seconds to pause the partition for,
|
109
|
+
# or `nil` if the partition should not be automatically resumed.
|
109
110
|
# @return [nil]
|
110
111
|
def pause(topic, partition, timeout: nil)
|
111
112
|
@paused_partitions[topic] ||= {}
|
@@ -164,7 +165,7 @@ module Kafka
|
|
164
165
|
# @param min_bytes [Integer] the minimum number of bytes to read before
|
165
166
|
# returning messages from the server; if `max_wait_time` is reached, this
|
166
167
|
# is ignored.
|
167
|
-
# @param max_wait_time [Integer] the maximum duration of time to wait before
|
168
|
+
# @param max_wait_time [Integer, Float] the maximum duration of time to wait before
|
168
169
|
# returning messages from the server, in seconds.
|
169
170
|
# @yieldparam message [Kafka::FetchedMessage] a message fetched from Kafka.
|
170
171
|
# @raise [Kafka::ProcessingError] if there was an error processing a message.
|
@@ -227,7 +228,7 @@ module Kafka
|
|
227
228
|
# @param min_bytes [Integer] the minimum number of bytes to read before
|
228
229
|
# returning messages from the server; if `max_wait_time` is reached, this
|
229
230
|
# is ignored.
|
230
|
-
# @param max_wait_time [Integer] the maximum duration of time to wait before
|
231
|
+
# @param max_wait_time [Integer, Float] the maximum duration of time to wait before
|
231
232
|
# returning messages from the server, in seconds.
|
232
233
|
# @yieldparam batch [Kafka::FetchedBatch] a message batch fetched from Kafka.
|
233
234
|
# @return [nil]
|
@@ -318,11 +319,11 @@ module Kafka
|
|
318
319
|
def fetch_batches(min_bytes:, max_wait_time:)
|
319
320
|
join_group unless @group.member?
|
320
321
|
|
321
|
-
|
322
|
+
subscribed_partitions = @group.subscribed_partitions
|
322
323
|
|
323
324
|
@heartbeat.send_if_necessary
|
324
325
|
|
325
|
-
raise "No partitions assigned!" if
|
326
|
+
raise "No partitions assigned!" if subscribed_partitions.empty?
|
326
327
|
|
327
328
|
operation = FetchOperation.new(
|
328
329
|
cluster: @cluster,
|
@@ -331,7 +332,7 @@ module Kafka
|
|
331
332
|
max_wait_time: max_wait_time,
|
332
333
|
)
|
333
334
|
|
334
|
-
|
335
|
+
subscribed_partitions.each do |topic, partitions|
|
335
336
|
partitions.each do |partition|
|
336
337
|
offset = @offset_manager.next_offset_for(topic, partition)
|
337
338
|
max_bytes = @max_bytes.fetch(topic)
|
data/lib/kafka/consumer_group.rb
CHANGED
@@ -7,22 +7,24 @@ module Kafka
|
|
7
7
|
#
|
8
8
|
# ## Instrumentation
|
9
9
|
#
|
10
|
-
# When executing the operation, an `
|
11
|
-
# emitted for each message
|
10
|
+
# When executing the operation, an `ack_message.producer.kafka` notification will be
|
11
|
+
# emitted for each message that was successfully appended to a topic partition.
|
12
12
|
# The following keys will be found in the payload:
|
13
13
|
#
|
14
14
|
# * `:topic` — the topic that was written to.
|
15
15
|
# * `:partition` — the partition that the message set was appended to.
|
16
|
-
# * `:offset` — the offset of the
|
17
|
-
# * `:
|
16
|
+
# * `:offset` — the offset of the message in the partition.
|
17
|
+
# * `:key` — the message key.
|
18
|
+
# * `:value` — the message value.
|
19
|
+
# * `:delay` — the time between the message was produced and when it was acknowledged.
|
18
20
|
#
|
19
21
|
# In addition to these notifications, a `send_messages.producer.kafka` notification will
|
20
22
|
# be emitted after the operation completes, regardless of whether it succeeds. This
|
21
23
|
# notification will have the following keys:
|
22
24
|
#
|
23
|
-
# *
|
25
|
+
# * `:message_count` – the total number of messages that the operation tried to
|
24
26
|
# send. Note that not all messages may get delivered.
|
25
|
-
# *
|
27
|
+
# * `:sent_message_count` – the number of messages that were successfully sent.
|
26
28
|
#
|
27
29
|
class ProduceOperation
|
28
30
|
def initialize(cluster:, buffer:, compressor:, required_acks:, ack_timeout:, logger:, instrumenter:)
|
@@ -125,12 +127,13 @@ module Kafka
|
|
125
127
|
raise e
|
126
128
|
end
|
127
129
|
|
128
|
-
messages.
|
130
|
+
messages.each_with_index do |message, index|
|
129
131
|
@instrumenter.instrument("ack_message.producer", {
|
130
132
|
key: message.key,
|
131
133
|
value: message.value,
|
132
134
|
topic: topic,
|
133
135
|
partition: partition,
|
136
|
+
offset: partition_info.offset + index,
|
134
137
|
delay: ack_time - message.create_time,
|
135
138
|
})
|
136
139
|
end
|
data/lib/kafka/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby-kafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.3.16
|
4
|
+
version: 0.3.16
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Daniel Schierbeck
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2017-01-20 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|
@@ -314,9 +314,9 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
314
314
|
version: 2.1.0
|
315
315
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
316
316
|
requirements:
|
317
|
-
- - "
|
317
|
+
- - ">="
|
318
318
|
- !ruby/object:Gem::Version
|
319
|
-
version:
|
319
|
+
version: '0'
|
320
320
|
requirements: []
|
321
321
|
rubyforge_project:
|
322
322
|
rubygems_version: 2.4.5.1
|
@@ -324,4 +324,3 @@ signing_key:
|
|
324
324
|
specification_version: 4
|
325
325
|
summary: A client library for the Kafka distributed commit log.
|
326
326
|
test_files: []
|
327
|
-
has_rdoc:
|