codeclimate-poseidon 0.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +21 -0
- data/.rspec +2 -0
- data/.travis.yml +14 -0
- data/.yardopts +8 -0
- data/CHANGES.md +31 -0
- data/Gemfile +13 -0
- data/LICENSE.txt +22 -0
- data/README.md +72 -0
- data/Rakefile +20 -0
- data/TODO.md +27 -0
- data/examples/consumer.rb +18 -0
- data/examples/producer.rb +9 -0
- data/lib/poseidon.rb +120 -0
- data/lib/poseidon/broker_pool.rb +86 -0
- data/lib/poseidon/cluster_metadata.rb +94 -0
- data/lib/poseidon/compressed_value.rb +23 -0
- data/lib/poseidon/compression.rb +30 -0
- data/lib/poseidon/compression/gzip_codec.rb +23 -0
- data/lib/poseidon/compression/snappy_codec.rb +29 -0
- data/lib/poseidon/connection.rb +169 -0
- data/lib/poseidon/fetched_message.rb +37 -0
- data/lib/poseidon/message.rb +151 -0
- data/lib/poseidon/message_conductor.rb +86 -0
- data/lib/poseidon/message_set.rb +80 -0
- data/lib/poseidon/message_to_send.rb +33 -0
- data/lib/poseidon/messages_for_broker.rb +56 -0
- data/lib/poseidon/messages_to_send.rb +47 -0
- data/lib/poseidon/messages_to_send_batch.rb +27 -0
- data/lib/poseidon/partition_consumer.rb +225 -0
- data/lib/poseidon/producer.rb +199 -0
- data/lib/poseidon/producer_compression_config.rb +37 -0
- data/lib/poseidon/protocol.rb +122 -0
- data/lib/poseidon/protocol/protocol_struct.rb +256 -0
- data/lib/poseidon/protocol/request_buffer.rb +77 -0
- data/lib/poseidon/protocol/response_buffer.rb +72 -0
- data/lib/poseidon/sync_producer.rb +161 -0
- data/lib/poseidon/topic_metadata.rb +89 -0
- data/lib/poseidon/version.rb +4 -0
- data/log/.gitkeep +0 -0
- data/poseidon.gemspec +27 -0
- data/spec/integration/multiple_brokers/consumer_spec.rb +45 -0
- data/spec/integration/multiple_brokers/metadata_failures_spec.rb +144 -0
- data/spec/integration/multiple_brokers/rebalance_spec.rb +69 -0
- data/spec/integration/multiple_brokers/round_robin_spec.rb +41 -0
- data/spec/integration/multiple_brokers/spec_helper.rb +60 -0
- data/spec/integration/simple/compression_spec.rb +23 -0
- data/spec/integration/simple/connection_spec.rb +35 -0
- data/spec/integration/simple/multiple_brokers_spec.rb +10 -0
- data/spec/integration/simple/simple_producer_and_consumer_spec.rb +121 -0
- data/spec/integration/simple/spec_helper.rb +16 -0
- data/spec/integration/simple/truncated_messages_spec.rb +46 -0
- data/spec/integration/simple/unavailable_broker_spec.rb +72 -0
- data/spec/spec_helper.rb +32 -0
- data/spec/test_cluster.rb +211 -0
- data/spec/unit/broker_pool_spec.rb +98 -0
- data/spec/unit/cluster_metadata_spec.rb +46 -0
- data/spec/unit/compression/gzip_codec_spec.rb +34 -0
- data/spec/unit/compression/snappy_codec_spec.rb +49 -0
- data/spec/unit/compression_spec.rb +17 -0
- data/spec/unit/connection_spec.rb +4 -0
- data/spec/unit/fetched_message_spec.rb +11 -0
- data/spec/unit/message_conductor_spec.rb +164 -0
- data/spec/unit/message_set_spec.rb +42 -0
- data/spec/unit/message_spec.rb +129 -0
- data/spec/unit/message_to_send_spec.rb +10 -0
- data/spec/unit/messages_for_broker_spec.rb +54 -0
- data/spec/unit/messages_to_send_batch_spec.rb +25 -0
- data/spec/unit/messages_to_send_spec.rb +63 -0
- data/spec/unit/partition_consumer_spec.rb +142 -0
- data/spec/unit/producer_compression_config_spec.rb +42 -0
- data/spec/unit/producer_spec.rb +51 -0
- data/spec/unit/protocol/request_buffer_spec.rb +16 -0
- data/spec/unit/protocol_spec.rb +54 -0
- data/spec/unit/sync_producer_spec.rb +156 -0
- data/spec/unit/topic_metadata_spec.rb +43 -0
- metadata +225 -0
@@ -0,0 +1,144 @@
|
|
1
|
+
require 'integration/multiple_brokers/spec_helper'
|
2
|
+
|
3
|
+
# Created because you can't use a block form for receive with and_call_original
|
4
|
+
# https://github.com/rspec/rspec-mocks/issues/774
|
5
|
+
RSpec::Matchers.define :a_broker_id_of do |id|
|
6
|
+
match { |actual| actual.broker_id == id }
|
7
|
+
end
|
8
|
+
|
9
|
+
RSpec::Matchers.define :a_broker_id_not do |id|
|
10
|
+
match { |actual| actual.broker_id != id }
|
11
|
+
end
|
12
|
+
|
13
|
+
RSpec::Matchers.define :needing_metadata do |val|
|
14
|
+
match { |actual| actual.send(:needs_metadata?) == val }
|
15
|
+
end
|
16
|
+
|
17
|
+
RSpec.describe "handling failures", :type => :request do
|
18
|
+
include_context "a multiple broker cluster"
|
19
|
+
|
20
|
+
describe "metadata failures" do
|
21
|
+
before(:each) do
|
22
|
+
@messages_to_send = [
|
23
|
+
MessageToSend.new("topic1", "hello"),
|
24
|
+
MessageToSend.new("topic2", "hello")
|
25
|
+
]
|
26
|
+
end
|
27
|
+
|
28
|
+
describe "unable to connect to brokers" do
|
29
|
+
before(:each) do
|
30
|
+
@p = Producer.new(["localhost:1092","localhost:1093","localhost:1094"], "producer")
|
31
|
+
end
|
32
|
+
|
33
|
+
it "triggers callback failures for both topics" do
|
34
|
+
expect {
|
35
|
+
@p.send_messages(@messages_to_send)
|
36
|
+
}.to raise_error(Poseidon::Errors::UnableToFetchMetadata)
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
describe "unknown topic" do
|
42
|
+
it "receives error callback" do
|
43
|
+
pending "need a way to turn off auto-topic creation just for this test"
|
44
|
+
@p = Producer.new(["localhost:9092","localhost:9093","localhost:9094"], "producer")
|
45
|
+
|
46
|
+
expect {
|
47
|
+
@p.send_messages([MessageToSend.new("imnothere", "hello")])
|
48
|
+
}.to raise_error(Poseidon::Errors::UnableToFetchMetadata)
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
describe "leader node loss" do
|
53
|
+
let(:topic) { "testing" }
|
54
|
+
let(:kafka_partitions) { 1 }
|
55
|
+
|
56
|
+
it "is still able to send messages" do
|
57
|
+
@p = Producer.new(["localhost:9092","localhost:9093","localhost:9094"], "producer", :required_acks => 1)
|
58
|
+
|
59
|
+
# Send one to force topic creation
|
60
|
+
expect(@p.send_messages([MessageToSend.new(topic, "hello")])).to be_truthy
|
61
|
+
|
62
|
+
@producer = @p.instance_variable_get(:@producer)
|
63
|
+
@cluster_metadata = @producer.instance_variable_get(:@cluster_metadata)
|
64
|
+
topic_metadata = @cluster_metadata.metadata_for_topics([topic])[topic]
|
65
|
+
|
66
|
+
expect(topic_metadata.available_partitions.length).to be(kafka_partitions)
|
67
|
+
|
68
|
+
# Now, lets kill the topic leader
|
69
|
+
leader = topic_metadata.available_partitions.first.leader
|
70
|
+
broker = $tc.brokers[topic_metadata.available_partitions.first.leader]
|
71
|
+
expect(broker.id).to be(leader)
|
72
|
+
|
73
|
+
broker.without_process do
|
74
|
+
expect(@cluster_metadata.metadata_for_topics([topic])[topic].available_partitions.first.leader).to be(leader)
|
75
|
+
expect(@producer.send(:refresh_interval_elapsed?)).to be_falsy
|
76
|
+
|
77
|
+
# Setup expectations that the consumer updates its info
|
78
|
+
expect(@producer).to receive(:ensure_metadata_available_for_topics).with(needing_metadata(false)).ordered.and_call_original
|
79
|
+
expect(@producer).to receive(:send_to_broker).with(a_broker_id_of(leader)).ordered.and_call_original
|
80
|
+
|
81
|
+
expect(@producer).to receive(:reset_metadata).ordered.and_call_original
|
82
|
+
expect(@producer).to receive(:ensure_metadata_available_for_topics).ordered.and_call_original
|
83
|
+
expect(@producer).to receive(:send_to_broker).with(a_broker_id_not(leader)).ordered.and_call_original
|
84
|
+
|
85
|
+
expect(@p.send_messages([MessageToSend.new(topic, "hello")])).to be_truthy
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
89
|
+
|
90
|
+
describe "partition replica loss" do
|
91
|
+
let(:topic) { "testing" }
|
92
|
+
let(:kafka_partitions) { 1 }
|
93
|
+
|
94
|
+
it "refreshes metadata correctly" do
|
95
|
+
@p = Producer.new(["localhost:9092","localhost:9093","localhost:9094"], "producer", :required_acks => 1)
|
96
|
+
|
97
|
+
# Send one to force topic creation
|
98
|
+
expect(@p.send_messages([MessageToSend.new(topic, "hello")])).to be_truthy
|
99
|
+
|
100
|
+
@producer = @p.instance_variable_get(:@producer)
|
101
|
+
@cluster_metadata = @producer.instance_variable_get(:@cluster_metadata)
|
102
|
+
topic_metadata = @cluster_metadata.metadata_for_topics([topic])[topic]
|
103
|
+
|
104
|
+
expect(topic_metadata.available_partitions.length).to be(kafka_partitions)
|
105
|
+
expect(topic_metadata.available_partitions.first.error).to be(0)
|
106
|
+
|
107
|
+
# Now, lets kill the topic leader
|
108
|
+
partition_metadata = topic_metadata.available_partitions.first
|
109
|
+
expect(partition_metadata.replicas.length).to be(2)
|
110
|
+
leader = partition_metadata.leader
|
111
|
+
replica = (partition_metadata.replicas - [leader]).first
|
112
|
+
|
113
|
+
broker = $tc.brokers[replica]
|
114
|
+
expect(broker.id).to_not be(partition_metadata.leader)
|
115
|
+
expect(broker.id).to be(replica)
|
116
|
+
|
117
|
+
broker.without_process do
|
118
|
+
expect(@cluster_metadata.metadata_for_topics([topic])[topic].available_partitions.first.replicas).to include(replica)
|
119
|
+
expect(@producer.send(:refresh_interval_elapsed?)).to be_falsy
|
120
|
+
|
121
|
+
Timecop.travel(@producer.metadata_refresh_interval_ms) do
|
122
|
+
expect(@producer.send(:refresh_interval_elapsed?)).to be_truthy
|
123
|
+
|
124
|
+
# Setup expectations that the consumer updates its info
|
125
|
+
expect(@producer).to receive(:refresh_metadata).with(Set.new([topic])).ordered.and_call_original
|
126
|
+
expect(@producer).to receive(:ensure_metadata_available_for_topics).with(needing_metadata(false)).ordered.and_call_original
|
127
|
+
expect(@producer).to receive(:send_to_broker).with(a_broker_id_of(partition_metadata.leader)).ordered.and_call_original
|
128
|
+
|
129
|
+
# Make sure we don't error out
|
130
|
+
expect(@producer).to_not receive(:reset_metadata)
|
131
|
+
|
132
|
+
expect(@p.send_messages([MessageToSend.new(topic, "hello")])).to be_truthy
|
133
|
+
|
134
|
+
# Check the valid metadata
|
135
|
+
updated_topic_metadata = @cluster_metadata.metadata_for_topics([topic])[topic]
|
136
|
+
expect(updated_topic_metadata.available_partitions.length).to be(kafka_partitions)
|
137
|
+
expect(updated_topic_metadata.available_partitions.first.leader).to be(leader)
|
138
|
+
expect(updated_topic_metadata.available_partitions.first.replicas).to eq([leader])
|
139
|
+
expect(updated_topic_metadata.available_partitions.first.error).to be(9)
|
140
|
+
end
|
141
|
+
end
|
142
|
+
end
|
143
|
+
end
|
144
|
+
end
|
@@ -0,0 +1,69 @@
|
|
1
|
+
require 'integration/multiple_brokers/spec_helper'
|
2
|
+
|
3
|
+
RSpec.describe "producer handles rebalancing", :type => :request do
|
4
|
+
include_context "a multiple broker cluster"
|
5
|
+
|
6
|
+
before(:each) do
|
7
|
+
# autocreate the topic by asking for information about it
|
8
|
+
@c = Connection.new("localhost", 9093, "metadata_fetcher", 10_000)
|
9
|
+
@c.topic_metadata(["failure_spec"])
|
10
|
+
sleep 1
|
11
|
+
end
|
12
|
+
|
13
|
+
def current_leadership_mapping(c)
|
14
|
+
metadata = c.topic_metadata(["failure_spec"])
|
15
|
+
topic_metadata = metadata.topics.find { |t| t.name == "failure_spec" }
|
16
|
+
(0..2).map { |p| topic_metadata.partition_leader(p) }
|
17
|
+
end
|
18
|
+
|
19
|
+
it "produces a bunch of messages and consumes all without error" do
|
20
|
+
@p = Producer.new(["localhost:9092","localhost:9093","localhost:9094"], "test",
|
21
|
+
:required_acks => -1)
|
22
|
+
|
23
|
+
1.upto(25) do |n|
|
24
|
+
@p.send_messages([MessageToSend.new("failure_spec", n.to_s)])
|
25
|
+
end
|
26
|
+
|
27
|
+
# The goal here is to have the producer attempt to send messages
|
28
|
+
# to a broker which is no longer the leader for the partition.
|
29
|
+
#
|
30
|
+
# We accomplish this by turning off a broker which causes leadership
|
31
|
+
# to failover. Then we turn that broker back on and begin sending
|
32
|
+
# messages. While sending messages, the kafka cluster should rebalance
|
33
|
+
# the partitions causing leadership to switch back to the original
|
34
|
+
# broker in the midst of messages being sent.
|
35
|
+
#
|
36
|
+
# We compare leadership before and after the message sending period
|
37
|
+
# to make sure we were successful.
|
38
|
+
$tc.stop_first_broker
|
39
|
+
sleep 30
|
40
|
+
SPEC_LOGGER.info "Pre start #{current_leadership_mapping(@c).inspect}"
|
41
|
+
$tc.start_first_broker
|
42
|
+
|
43
|
+
pre_send_leadership = current_leadership_mapping(@c)
|
44
|
+
SPEC_LOGGER.info "Pre send #{pre_send_leadership.inspect}"
|
45
|
+
26.upto(50) do |n|
|
46
|
+
sleep 0.5
|
47
|
+
@p.send_messages([MessageToSend.new("failure_spec", n.to_s)])
|
48
|
+
end
|
49
|
+
post_send_leadership = current_leadership_mapping(@c)
|
50
|
+
SPEC_LOGGER.info "Post send #{post_send_leadership.inspect}"
|
51
|
+
|
52
|
+
expect(pre_send_leadership).to_not eq(post_send_leadership)
|
53
|
+
|
54
|
+
messages = []
|
55
|
+
0.upto(2) do |partition|
|
56
|
+
consumer = PartitionConsumer.consumer_for_partition("consumer_failure_spect",
|
57
|
+
["localhost:9092","localhost:9093","localhost:9094"],
|
58
|
+
"failure_spec",
|
59
|
+
partition,
|
60
|
+
:earliest_offset)
|
61
|
+
while (fetched = consumer.fetch).any?
|
62
|
+
messages.push(*fetched)
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
expect(messages.size).to eq(50)
|
67
|
+
expect(messages.map { |m| m.value.to_i }.sort).to eq((1..50).to_a)
|
68
|
+
end
|
69
|
+
end
|
@@ -0,0 +1,41 @@
|
|
1
|
+
require 'integration/multiple_brokers/spec_helper'
|
2
|
+
|
3
|
+
RSpec.describe "round robin sending", :type => :request do
|
4
|
+
include_context "a multiple broker cluster"
|
5
|
+
|
6
|
+
describe "with small message batches" do
|
7
|
+
it "evenly distributes messages across brokers" do
|
8
|
+
c = Connection.new("localhost", 9092, "metadata_fetcher", 10_000)
|
9
|
+
md = c.topic_metadata(["test"])
|
10
|
+
sleep 1
|
11
|
+
md = c.topic_metadata(["test"])
|
12
|
+
|
13
|
+
test_topic = md.topics.first
|
14
|
+
|
15
|
+
consumers = test_topic.send(:partitions).map do |partition|
|
16
|
+
leader_id = partition.leader
|
17
|
+
broker = md.brokers.find { |b| b.id == leader_id }
|
18
|
+
PartitionConsumer.new("test_consumer_#{partition.id}", broker.host,
|
19
|
+
broker.port, "test", partition.id, -1)
|
20
|
+
end
|
21
|
+
|
22
|
+
# Update offsets to current position before adding test messages
|
23
|
+
consumers.each do |c|
|
24
|
+
c.fetch
|
25
|
+
end
|
26
|
+
|
27
|
+
@p = Producer.new(["localhost:9092","localhost:9093","localhost:9094"], "test",
|
28
|
+
:required_acks => 1)
|
29
|
+
24.times do
|
30
|
+
@p.send_messages([MessageToSend.new("test", "hello")])
|
31
|
+
end
|
32
|
+
|
33
|
+
sleep 5
|
34
|
+
|
35
|
+
consumers.each do |c|
|
36
|
+
messages = c.fetch
|
37
|
+
expect(messages.size).to eq(8)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
@@ -0,0 +1,60 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
require 'test_cluster'
|
4
|
+
|
5
|
+
class ThreeBrokerCluster
|
6
|
+
attr_reader :brokers, :zookeeper
|
7
|
+
|
8
|
+
def initialize(partitions, properties = {})
|
9
|
+
@zookeeper = ZookeeperRunner.new
|
10
|
+
@brokers = (9092..9094).map { |port| BrokerRunner.new(port - 9092, port,
|
11
|
+
partitions,
|
12
|
+
2,
|
13
|
+
properties) }
|
14
|
+
end
|
15
|
+
|
16
|
+
def start
|
17
|
+
@zookeeper.start
|
18
|
+
@brokers.each(&:start)
|
19
|
+
sleep 5
|
20
|
+
end
|
21
|
+
|
22
|
+
def stop
|
23
|
+
SPEC_LOGGER.info "Stopping three broker cluster"
|
24
|
+
SPEC_LOGGER.info "Stopping brokers"
|
25
|
+
@brokers.each(&:stop)
|
26
|
+
sleep 5
|
27
|
+
|
28
|
+
SPEC_LOGGER.info "Stopping ZK"
|
29
|
+
@zookeeper.stop
|
30
|
+
sleep 5
|
31
|
+
end
|
32
|
+
|
33
|
+
def stop_first_broker
|
34
|
+
SPEC_LOGGER.info "Stopping first broker"
|
35
|
+
@brokers.first.stop
|
36
|
+
sleep 5
|
37
|
+
end
|
38
|
+
|
39
|
+
def start_first_broker
|
40
|
+
SPEC_LOGGER.info "Starting first broker"
|
41
|
+
@brokers.first.start
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
RSpec.shared_context "a multiple broker cluster" do
|
46
|
+
let(:kafka_partitions) { 3 }
|
47
|
+
|
48
|
+
before(:each) do
|
49
|
+
JavaRunner.remove_tmp
|
50
|
+
JavaRunner.set_kafka_path!
|
51
|
+
$tc = ThreeBrokerCluster.new(kafka_partitions)
|
52
|
+
$tc.start
|
53
|
+
SPEC_LOGGER.info "Waiting on cluster"
|
54
|
+
sleep 10 # wait for cluster to come up
|
55
|
+
end
|
56
|
+
|
57
|
+
after(:each) do
|
58
|
+
$tc.stop if $tc
|
59
|
+
end
|
60
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
require 'integration/simple/spec_helper'
|
2
|
+
|
3
|
+
RSpec.describe "compression", :type => :request do
|
4
|
+
include_context "a single broker cluster"
|
5
|
+
|
6
|
+
it "roundtrips" do
|
7
|
+
i = rand(1000)
|
8
|
+
|
9
|
+
@consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
|
10
|
+
"test12", 0, -2)
|
11
|
+
|
12
|
+
@producer = Producer.new(["localhost:9092"],
|
13
|
+
"test_client",
|
14
|
+
:type => :sync,
|
15
|
+
:compression_codec => :gzip)
|
16
|
+
messages = [MessageToSend.new("test12", "Hello World: #{i}")]
|
17
|
+
|
18
|
+
expect(@producer.send_messages(messages)).to eq(true)
|
19
|
+
sleep 1
|
20
|
+
messages = @consumer.fetch
|
21
|
+
expect(messages.last.value).to eq("Hello World: #{i}")
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,35 @@
|
|
1
|
+
require 'integration/simple/spec_helper'
|
2
|
+
|
3
|
+
include Protocol
|
4
|
+
RSpec.describe Connection, :type => :request do
|
5
|
+
include_context "a single broker cluster"
|
6
|
+
|
7
|
+
before(:each) do
|
8
|
+
@connection = Connection.new("localhost", 9092, "test", 10_000)
|
9
|
+
end
|
10
|
+
|
11
|
+
it 'sends and parses topic metadata requests' do
|
12
|
+
@connection.topic_metadata(["test2"])
|
13
|
+
end
|
14
|
+
|
15
|
+
it 'sends and parsers produce requests' do
|
16
|
+
message = MessageStruct.new(0, 0, nil, "hello")
|
17
|
+
message_with_offset = MessageWithOffsetStruct.new(0, message)
|
18
|
+
message_set = MessageSetStruct.new([message_with_offset])
|
19
|
+
messages_for_partitions = [MessagesForPartition.new(0,message_set)]
|
20
|
+
messages_for_topics = [MessagesForTopic.new("test2",messages_for_partitions)]
|
21
|
+
@connection.produce(1, 10_000, messages_for_topics)
|
22
|
+
end
|
23
|
+
|
24
|
+
it 'sends and parsers fetch requests' do
|
25
|
+
partition_fetches = [PartitionFetch.new(0,0,1024*1024)]
|
26
|
+
topic_fetches = [TopicFetch.new("test2", partition_fetches)]
|
27
|
+
@connection.fetch(1000, 0, topic_fetches)
|
28
|
+
end
|
29
|
+
|
30
|
+
it 'sends and parsers offset requests' do
|
31
|
+
partition_offset_requests = [PartitionOffsetRequest.new(0,-1,1000)]
|
32
|
+
offset_topic_requests = [TopicOffsetRequest.new("test2", partition_offset_requests)]
|
33
|
+
@connection.offset(offset_topic_requests)
|
34
|
+
end
|
35
|
+
end
|
@@ -0,0 +1,121 @@
|
|
1
|
+
require 'integration/simple/spec_helper'
|
2
|
+
|
3
|
+
RSpec.describe "simple producer and consumer", :type => :request do
|
4
|
+
include_context "a single broker cluster"
|
5
|
+
|
6
|
+
describe "writing and consuming one topic" do
|
7
|
+
it "fetches produced messages" do
|
8
|
+
@producer = Producer.new(["localhost:9092"],
|
9
|
+
"test_client",
|
10
|
+
:type => :sync)
|
11
|
+
|
12
|
+
|
13
|
+
messages = [MessageToSend.new("topic_simple_producer_and_consumer", "Hello World")]
|
14
|
+
expect(@producer.send_messages(messages)).to eq(true)
|
15
|
+
|
16
|
+
@consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
|
17
|
+
"topic_simple_producer_and_consumer", 0, -2)
|
18
|
+
messages = @consumer.fetch
|
19
|
+
expect(messages.last.value).to eq("Hello World")
|
20
|
+
|
21
|
+
@producer.close
|
22
|
+
end
|
23
|
+
|
24
|
+
it "fetches only messages since the last offset" do
|
25
|
+
@producer = Producer.new(["localhost:9092"],
|
26
|
+
"test_client",
|
27
|
+
:type => :sync,
|
28
|
+
:required_acks => 1)
|
29
|
+
|
30
|
+
@consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
|
31
|
+
"topic_simple_producer_and_consumer", 0, -1)
|
32
|
+
|
33
|
+
# Read up to the end of the current messages (if there are any)
|
34
|
+
begin
|
35
|
+
@consumer.fetch
|
36
|
+
rescue Errors::UnknownTopicOrPartition
|
37
|
+
end
|
38
|
+
|
39
|
+
# First Batch
|
40
|
+
messages = [MessageToSend.new("topic_simple_producer_and_consumer", "Hello World")]
|
41
|
+
expect(@producer.send_messages(messages)).to eq(true)
|
42
|
+
|
43
|
+
messages = @consumer.fetch
|
44
|
+
expect(messages.last.value).to eq("Hello World")
|
45
|
+
|
46
|
+
# Second Batch
|
47
|
+
messages = [MessageToSend.new("topic_simple_producer_and_consumer", "Hello World Again")]
|
48
|
+
expect(@producer.send_messages(messages)).to eq(true)
|
49
|
+
|
50
|
+
messages = @consumer.fetch
|
51
|
+
expect(messages.map(&:value)).to eq(["Hello World Again"])
|
52
|
+
|
53
|
+
# Empty Batch
|
54
|
+
messages = @consumer.fetch
|
55
|
+
expect(messages.empty?).to eq(true)
|
56
|
+
end
|
57
|
+
|
58
|
+
it "waits for messages" do
|
59
|
+
# Create topic
|
60
|
+
@c = Connection.new("localhost", 9092, "metadata_fetcher", 10_000)
|
61
|
+
@c.topic_metadata(["simple_wait_test"])
|
62
|
+
|
63
|
+
sleep 5
|
64
|
+
@consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
|
65
|
+
"simple_wait_test", 0, :earliest_offset,
|
66
|
+
:max_wait_ms => 2500)
|
67
|
+
|
68
|
+
require 'benchmark'
|
69
|
+
n = Benchmark.realtime do
|
70
|
+
@consumer.fetch
|
71
|
+
end
|
72
|
+
expect(n).to be_within(0.25).of(2.5)
|
73
|
+
end
|
74
|
+
|
75
|
+
# Not sure what's going on here, will revisit.
|
76
|
+
=begin
|
77
|
+
it "fetches larger messages with a larger max bytes size" do
|
78
|
+
@producer = Producer.new(["localhost:9092"],
|
79
|
+
"test_client",
|
80
|
+
:type => :sync,
|
81
|
+
:required_acks => 1)
|
82
|
+
|
83
|
+
@consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
|
84
|
+
"topic_simple_producer_and_consumer", 0, -2)
|
85
|
+
|
86
|
+
messages = []
|
87
|
+
2000.times do
|
88
|
+
messages << MessageToSend.new("topic_simple_producer_and_consumer",'KcjNyFBtqfSbpwjjcGKckMKLUCWz83IVcp21C8FQzs8JJKKTTrc4OLxSjLpYc5z7fsncX59te2cBn0sWDRaYmRuZyttRMLMHvXrM5o3QReKPIYUKzVCFahC4cb3Ivcbb5ZuS98Ohnb7Io42Bz9FucXwwGkQyFhJwyn3nD3BYs5r8TZM8Q76CGR2kTH1rjnFeB7J3hrRKukztxCrDY3smrQE1bbVR80IF3yWlhzkdfv3cpfwnD0TKadtt21sFJANFmORAJ0HKs6Z2262hcBQyF7WcWypC2RoLWVgKVQxbouVUP7yV6YYOAQEevYrl9sOB0Yi6h1mS8fTBUmRTmWLqyl8KzwbnbQvmCvgnX26F5JEzIoXsVaoDT2ks5eep9RyE1zm5yPtbYVmd2Sz7t5ru0wj6YiAmbF7Xgiw2l4VpNOxG0Ec6rFxXRXs0bahyBd2YtxpGyZBeruIK1RAN4n0t97xVXgZG5CGoVhL1oRDxw2pTbwEO1cvwHiiYXpXSqaxF7G9kiiPsQt24Vu7chXrJT7Xqv4RIg1aOT5Os5JVlISaJCmx8ZLtbC3OjAdGtF1ZkDuUeQHHohqeKh0qBJjw7Rv1oSDwcM0MRazjF36jijpYg26Qml9lSEnGYIFLQWHVDWKqqhl2GIntjxDXn1IyI')
|
89
|
+
end
|
90
|
+
expect(@producer.send_messages(messages)).to eq(true)
|
91
|
+
|
92
|
+
messages = @consumer.fetch
|
93
|
+
expect(messages.length).to be > 2
|
94
|
+
|
95
|
+
@consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
|
96
|
+
"topic_simple_producer_and_consumer", 0, -2)
|
97
|
+
messages = @consumer.fetch(:max_bytes => 1400000)
|
98
|
+
expect(messages.length).to be > 2
|
99
|
+
end
|
100
|
+
=end
|
101
|
+
end
|
102
|
+
|
103
|
+
describe "broker that becomes unavailable" do
|
104
|
+
it "fails the fetch" do
|
105
|
+
@producer = Producer.new(["localhost:9092"],
|
106
|
+
"test_client",
|
107
|
+
:type => :sync)
|
108
|
+
|
109
|
+
|
110
|
+
messages = [MessageToSend.new("topic_simple_producer_and_consumer", "Hello World")]
|
111
|
+
expect(@producer.send_messages(messages)).to eq(true)
|
112
|
+
|
113
|
+
@consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
|
114
|
+
"topic_simple_producer_and_consumer", 0, -2)
|
115
|
+
|
116
|
+
$tc.broker.without_process do
|
117
|
+
expect { @consumer.fetch }.to raise_error(Connection::ConnectionFailedError)
|
118
|
+
end
|
119
|
+
end
|
120
|
+
end
|
121
|
+
end
|