ruby-kafka 0.1.3 → 0.1.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +17 -3
- data/Rakefile +1 -1
- data/examples/simple-consumer.rb +48 -0
- data/examples/simple-producer.rb +7 -1
- data/lib/kafka.rb +1 -1
- data/lib/kafka/broker.rb +32 -0
- data/lib/kafka/broker_pool.rb +16 -102
- data/lib/kafka/client.rb +91 -9
- data/lib/kafka/cluster.rb +130 -0
- data/lib/kafka/connection.rb +1 -3
- data/lib/kafka/fetch_operation.rb +127 -0
- data/lib/kafka/fetched_message.rb +27 -0
- data/lib/kafka/instrumentation.rb +1 -1
- data/lib/kafka/message_buffer.rb +8 -1
- data/lib/kafka/partitioner.rb +13 -8
- data/lib/kafka/pending_message.rb +13 -0
- data/lib/kafka/produce_operation.rb +116 -0
- data/lib/kafka/producer.rb +64 -30
- data/lib/kafka/protocol.rb +9 -0
- data/lib/kafka/protocol/decoder.rb +7 -0
- data/lib/kafka/protocol/fetch_request.rb +53 -0
- data/lib/kafka/protocol/fetch_response.rb +75 -0
- data/lib/kafka/protocol/list_offset_request.rb +41 -0
- data/lib/kafka/protocol/list_offset_response.rb +82 -0
- data/lib/kafka/protocol/message.rb +15 -0
- data/lib/kafka/protocol/message_set.rb +25 -0
- data/lib/kafka/protocol/metadata_response.rb +3 -1
- data/lib/kafka/protocol/produce_request.rb +3 -0
- data/lib/kafka/protocol/topic_metadata_request.rb +4 -0
- data/lib/kafka/socket_with_timeout.rb +3 -3
- data/lib/kafka/version.rb +1 -1
- data/ruby-kafka.gemspec +3 -1
- metadata +14 -4
- data/lib/kafka/transmission.rb +0 -76
@@ -36,6 +36,21 @@ module Kafka
|
|
36
36
|
@key == other.key && @value == other.value && @attributes == other.attributes
|
37
37
|
end
|
38
38
|
|
39
|
+
def self.decode(decoder)
|
40
|
+
crc = decoder.int32
|
41
|
+
magic_byte = decoder.int8
|
42
|
+
|
43
|
+
unless magic_byte == MAGIC_BYTE
|
44
|
+
raise Kafka::Error, "Invalid magic byte: #{magic_byte}"
|
45
|
+
end
|
46
|
+
|
47
|
+
attributes = decoder.int8
|
48
|
+
key = decoder.bytes
|
49
|
+
value = decoder.bytes
|
50
|
+
|
51
|
+
new(key: key, value: value, attributes: attributes)
|
52
|
+
end
|
53
|
+
|
39
54
|
private
|
40
55
|
|
41
56
|
def encode_without_crc
|
@@ -0,0 +1,25 @@
|
|
1
|
+
module Kafka
|
2
|
+
module Protocol
|
3
|
+
class MessageSet
|
4
|
+
attr_reader :messages
|
5
|
+
|
6
|
+
def initialize(messages:)
|
7
|
+
@messages = messages
|
8
|
+
end
|
9
|
+
|
10
|
+
def self.decode(decoder)
|
11
|
+
fetched_messages = []
|
12
|
+
|
13
|
+
until decoder.eof?
|
14
|
+
offset = decoder.int64
|
15
|
+
message_decoder = Decoder.from_string(decoder.bytes)
|
16
|
+
message = Message.decode(message_decoder)
|
17
|
+
|
18
|
+
fetched_messages << [offset, message]
|
19
|
+
end
|
20
|
+
|
21
|
+
new(messages: fetched_messages)
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
@@ -40,7 +40,7 @@ module Kafka
|
|
40
40
|
@port = port
|
41
41
|
end
|
42
42
|
|
43
|
-
def
|
43
|
+
def to_s
|
44
44
|
"#{host}:#{port} (node_id=#{node_id})"
|
45
45
|
end
|
46
46
|
end
|
@@ -131,6 +131,8 @@ module Kafka
|
|
131
131
|
raise UnknownTopicOrPartition, "unknown topic #{topic_name}"
|
132
132
|
end
|
133
133
|
|
134
|
+
Protocol.handle_error(topic.topic_error_code)
|
135
|
+
|
134
136
|
topic.partitions
|
135
137
|
end
|
136
138
|
|
@@ -27,6 +27,9 @@ module Kafka
|
|
27
27
|
class ProduceRequest
|
28
28
|
attr_reader :required_acks, :timeout, :messages_for_topics
|
29
29
|
|
30
|
+
# @param required_acks [Integer]
|
31
|
+
# @param timeout [Integer]
|
32
|
+
# @param messages_for_topics [Hash]
|
30
33
|
def initialize(required_acks:, timeout:, messages_for_topics:)
|
31
34
|
@required_acks = required_acks
|
32
35
|
@timeout = timeout
|
@@ -28,15 +28,15 @@ module Kafka
|
|
28
28
|
@socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1)
|
29
29
|
|
30
30
|
begin
|
31
|
-
# Initiate the socket connection in the background. If it doesn't fail
|
32
|
-
# immediately it will raise an IO::WaitWritable (Errno::EINPROGRESS)
|
31
|
+
# Initiate the socket connection in the background. If it doesn't fail
|
32
|
+
# immediately it will raise an IO::WaitWritable (Errno::EINPROGRESS)
|
33
33
|
# indicating the connection is in progress.
|
34
34
|
@socket.connect_nonblock(sockaddr)
|
35
35
|
rescue IO::WaitWritable
|
36
36
|
# IO.select will block until the socket is writable or the timeout
|
37
37
|
# is exceeded, whichever comes first.
|
38
38
|
unless IO.select(nil, [@socket], nil, connect_timeout)
|
39
|
-
# IO.select returns nil when the socket is not ready before timeout
|
39
|
+
# IO.select returns nil when the socket is not ready before timeout
|
40
40
|
# seconds have elapsed
|
41
41
|
@socket.close
|
42
42
|
raise Errno::ETIMEDOUT
|
data/lib/kafka/version.rb
CHANGED
data/ruby-kafka.gemspec
CHANGED
@@ -9,7 +9,7 @@ Gem::Specification.new do |spec|
|
|
9
9
|
spec.authors = ["Daniel Schierbeck"]
|
10
10
|
spec.email = ["daniel.schierbeck@gmail.com"]
|
11
11
|
|
12
|
-
spec.summary =
|
12
|
+
spec.summary = "A client library for the Kafka distributed commit log."
|
13
13
|
|
14
14
|
spec.description = <<-DESC.gsub(/^ /, "").strip
|
15
15
|
A client library for the Kafka distributed commit log.
|
@@ -20,6 +20,8 @@ Gem::Specification.new do |spec|
|
|
20
20
|
spec.homepage = "https://github.com/zendesk/ruby-kafka"
|
21
21
|
spec.license = "Apache License Version 2.0"
|
22
22
|
|
23
|
+
spec.required_ruby_version = '>= 2.1.0'
|
24
|
+
|
23
25
|
spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
|
24
26
|
spec.bindir = "exe"
|
25
27
|
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby-kafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1.
|
4
|
+
version: 0.1.4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Daniel Schierbeck
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2016-02-
|
11
|
+
date: 2016-02-15 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|
@@ -148,27 +148,37 @@ files:
|
|
148
148
|
- bin/console
|
149
149
|
- bin/setup
|
150
150
|
- circle.yml
|
151
|
+
- examples/simple-consumer.rb
|
151
152
|
- examples/simple-producer.rb
|
152
153
|
- lib/kafka.rb
|
153
154
|
- lib/kafka/broker.rb
|
154
155
|
- lib/kafka/broker_pool.rb
|
155
156
|
- lib/kafka/client.rb
|
157
|
+
- lib/kafka/cluster.rb
|
156
158
|
- lib/kafka/connection.rb
|
159
|
+
- lib/kafka/fetch_operation.rb
|
160
|
+
- lib/kafka/fetched_message.rb
|
157
161
|
- lib/kafka/instrumentation.rb
|
158
162
|
- lib/kafka/message_buffer.rb
|
159
163
|
- lib/kafka/partitioner.rb
|
164
|
+
- lib/kafka/pending_message.rb
|
165
|
+
- lib/kafka/produce_operation.rb
|
160
166
|
- lib/kafka/producer.rb
|
161
167
|
- lib/kafka/protocol.rb
|
162
168
|
- lib/kafka/protocol/decoder.rb
|
163
169
|
- lib/kafka/protocol/encoder.rb
|
170
|
+
- lib/kafka/protocol/fetch_request.rb
|
171
|
+
- lib/kafka/protocol/fetch_response.rb
|
172
|
+
- lib/kafka/protocol/list_offset_request.rb
|
173
|
+
- lib/kafka/protocol/list_offset_response.rb
|
164
174
|
- lib/kafka/protocol/message.rb
|
175
|
+
- lib/kafka/protocol/message_set.rb
|
165
176
|
- lib/kafka/protocol/metadata_response.rb
|
166
177
|
- lib/kafka/protocol/produce_request.rb
|
167
178
|
- lib/kafka/protocol/produce_response.rb
|
168
179
|
- lib/kafka/protocol/request_message.rb
|
169
180
|
- lib/kafka/protocol/topic_metadata_request.rb
|
170
181
|
- lib/kafka/socket_with_timeout.rb
|
171
|
-
- lib/kafka/transmission.rb
|
172
182
|
- lib/kafka/version.rb
|
173
183
|
- lib/ruby-kafka.rb
|
174
184
|
- ruby-kafka.gemspec
|
@@ -184,7 +194,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
184
194
|
requirements:
|
185
195
|
- - ">="
|
186
196
|
- !ruby/object:Gem::Version
|
187
|
-
version:
|
197
|
+
version: 2.1.0
|
188
198
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
189
199
|
requirements:
|
190
200
|
- - ">="
|
data/lib/kafka/transmission.rb
DELETED
@@ -1,76 +0,0 @@
|
|
1
|
-
module Kafka
|
2
|
-
class Transmission
|
3
|
-
def initialize(broker_pool:, buffer:, required_acks:, ack_timeout:, logger:)
|
4
|
-
@broker_pool = broker_pool
|
5
|
-
@buffer = buffer
|
6
|
-
@required_acks = required_acks
|
7
|
-
@ack_timeout = ack_timeout
|
8
|
-
@logger = logger
|
9
|
-
end
|
10
|
-
|
11
|
-
def send_messages
|
12
|
-
messages_for_broker = {}
|
13
|
-
|
14
|
-
@buffer.each do |topic, partition, messages|
|
15
|
-
broker = @broker_pool.get_leader(topic, partition)
|
16
|
-
|
17
|
-
@logger.debug "Current leader for #{topic}/#{partition} is node #{broker}"
|
18
|
-
|
19
|
-
messages_for_broker[broker] ||= MessageBuffer.new
|
20
|
-
messages_for_broker[broker].concat(messages, topic: topic, partition: partition)
|
21
|
-
end
|
22
|
-
|
23
|
-
messages_for_broker.each do |broker, message_set|
|
24
|
-
begin
|
25
|
-
response = broker.produce(
|
26
|
-
messages_for_topics: message_set.to_h,
|
27
|
-
required_acks: @required_acks,
|
28
|
-
timeout: @ack_timeout * 1000, # Kafka expects the timeout in milliseconds.
|
29
|
-
)
|
30
|
-
|
31
|
-
handle_response(response) if response
|
32
|
-
rescue ConnectionError => e
|
33
|
-
@logger.error "Could not connect to broker #{broker}: #{e}"
|
34
|
-
|
35
|
-
# Mark the broker pool as stale in order to force a cluster metadata refresh.
|
36
|
-
@broker_pool.mark_as_stale!
|
37
|
-
end
|
38
|
-
end
|
39
|
-
end
|
40
|
-
|
41
|
-
private
|
42
|
-
|
43
|
-
def handle_response(response)
|
44
|
-
response.each_partition do |topic_info, partition_info|
|
45
|
-
topic = topic_info.topic
|
46
|
-
partition = partition_info.partition
|
47
|
-
|
48
|
-
begin
|
49
|
-
Protocol.handle_error(partition_info.error_code)
|
50
|
-
rescue Kafka::CorruptMessage
|
51
|
-
@logger.error "Corrupt message when writing to #{topic}/#{partition}"
|
52
|
-
rescue Kafka::UnknownTopicOrPartition
|
53
|
-
@logger.error "Unknown topic or partition #{topic}/#{partition}"
|
54
|
-
rescue Kafka::LeaderNotAvailable
|
55
|
-
@logger.error "Leader currently not available for #{topic}/#{partition}"
|
56
|
-
@broker_pool.mark_as_stale!
|
57
|
-
rescue Kafka::NotLeaderForPartition
|
58
|
-
@logger.error "Broker not currently leader for #{topic}/#{partition}"
|
59
|
-
@broker_pool.mark_as_stale!
|
60
|
-
rescue Kafka::RequestTimedOut
|
61
|
-
@logger.error "Timed out while writing to #{topic}/#{partition}"
|
62
|
-
rescue Kafka::NotEnoughReplicas
|
63
|
-
@logger.error "Not enough in-sync replicas for #{topic}/#{partition}"
|
64
|
-
rescue Kafka::NotEnoughReplicasAfterAppend
|
65
|
-
@logger.error "Messages written, but to fewer in-sync replicas than required for #{topic}/#{partition}"
|
66
|
-
else
|
67
|
-
offset = partition_info.offset
|
68
|
-
@logger.info "Successfully sent messages for #{topic}/#{partition}; new offset is #{offset}"
|
69
|
-
|
70
|
-
# The messages were successfully written; clear them from the buffer.
|
71
|
-
@buffer.clear_messages(topic: topic, partition: partition)
|
72
|
-
end
|
73
|
-
end
|
74
|
-
end
|
75
|
-
end
|
76
|
-
end
|