codeclimate-poseidon 0.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +21 -0
- data/.rspec +2 -0
- data/.travis.yml +14 -0
- data/.yardopts +8 -0
- data/CHANGES.md +31 -0
- data/Gemfile +13 -0
- data/LICENSE.txt +22 -0
- data/README.md +72 -0
- data/Rakefile +20 -0
- data/TODO.md +27 -0
- data/examples/consumer.rb +18 -0
- data/examples/producer.rb +9 -0
- data/lib/poseidon.rb +120 -0
- data/lib/poseidon/broker_pool.rb +86 -0
- data/lib/poseidon/cluster_metadata.rb +94 -0
- data/lib/poseidon/compressed_value.rb +23 -0
- data/lib/poseidon/compression.rb +30 -0
- data/lib/poseidon/compression/gzip_codec.rb +23 -0
- data/lib/poseidon/compression/snappy_codec.rb +29 -0
- data/lib/poseidon/connection.rb +169 -0
- data/lib/poseidon/fetched_message.rb +37 -0
- data/lib/poseidon/message.rb +151 -0
- data/lib/poseidon/message_conductor.rb +86 -0
- data/lib/poseidon/message_set.rb +80 -0
- data/lib/poseidon/message_to_send.rb +33 -0
- data/lib/poseidon/messages_for_broker.rb +56 -0
- data/lib/poseidon/messages_to_send.rb +47 -0
- data/lib/poseidon/messages_to_send_batch.rb +27 -0
- data/lib/poseidon/partition_consumer.rb +225 -0
- data/lib/poseidon/producer.rb +199 -0
- data/lib/poseidon/producer_compression_config.rb +37 -0
- data/lib/poseidon/protocol.rb +122 -0
- data/lib/poseidon/protocol/protocol_struct.rb +256 -0
- data/lib/poseidon/protocol/request_buffer.rb +77 -0
- data/lib/poseidon/protocol/response_buffer.rb +72 -0
- data/lib/poseidon/sync_producer.rb +161 -0
- data/lib/poseidon/topic_metadata.rb +89 -0
- data/lib/poseidon/version.rb +4 -0
- data/log/.gitkeep +0 -0
- data/poseidon.gemspec +27 -0
- data/spec/integration/multiple_brokers/consumer_spec.rb +45 -0
- data/spec/integration/multiple_brokers/metadata_failures_spec.rb +144 -0
- data/spec/integration/multiple_brokers/rebalance_spec.rb +69 -0
- data/spec/integration/multiple_brokers/round_robin_spec.rb +41 -0
- data/spec/integration/multiple_brokers/spec_helper.rb +60 -0
- data/spec/integration/simple/compression_spec.rb +23 -0
- data/spec/integration/simple/connection_spec.rb +35 -0
- data/spec/integration/simple/multiple_brokers_spec.rb +10 -0
- data/spec/integration/simple/simple_producer_and_consumer_spec.rb +121 -0
- data/spec/integration/simple/spec_helper.rb +16 -0
- data/spec/integration/simple/truncated_messages_spec.rb +46 -0
- data/spec/integration/simple/unavailable_broker_spec.rb +72 -0
- data/spec/spec_helper.rb +32 -0
- data/spec/test_cluster.rb +211 -0
- data/spec/unit/broker_pool_spec.rb +98 -0
- data/spec/unit/cluster_metadata_spec.rb +46 -0
- data/spec/unit/compression/gzip_codec_spec.rb +34 -0
- data/spec/unit/compression/snappy_codec_spec.rb +49 -0
- data/spec/unit/compression_spec.rb +17 -0
- data/spec/unit/connection_spec.rb +4 -0
- data/spec/unit/fetched_message_spec.rb +11 -0
- data/spec/unit/message_conductor_spec.rb +164 -0
- data/spec/unit/message_set_spec.rb +42 -0
- data/spec/unit/message_spec.rb +129 -0
- data/spec/unit/message_to_send_spec.rb +10 -0
- data/spec/unit/messages_for_broker_spec.rb +54 -0
- data/spec/unit/messages_to_send_batch_spec.rb +25 -0
- data/spec/unit/messages_to_send_spec.rb +63 -0
- data/spec/unit/partition_consumer_spec.rb +142 -0
- data/spec/unit/producer_compression_config_spec.rb +42 -0
- data/spec/unit/producer_spec.rb +51 -0
- data/spec/unit/protocol/request_buffer_spec.rb +16 -0
- data/spec/unit/protocol_spec.rb +54 -0
- data/spec/unit/sync_producer_spec.rb +156 -0
- data/spec/unit/topic_metadata_spec.rb +43 -0
- metadata +225 -0
@@ -0,0 +1,16 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
require 'test_cluster'
|
4
|
+
|
5
|
+
RSpec.shared_context "a single broker cluster" do
|
6
|
+
before(:each) do
|
7
|
+
JavaRunner.remove_tmp
|
8
|
+
JavaRunner.set_kafka_path!
|
9
|
+
$tc = TestCluster.new
|
10
|
+
$tc.start
|
11
|
+
end
|
12
|
+
|
13
|
+
after(:each) do
|
14
|
+
$tc.stop
|
15
|
+
end
|
16
|
+
end
|
@@ -0,0 +1,46 @@
|
|
1
|
+
require 'integration/simple/spec_helper'
|
2
|
+
|
3
|
+
RSpec.describe "truncated messages", :type => :request do
|
4
|
+
include_context "a single broker cluster"
|
5
|
+
|
6
|
+
before(:each) do
|
7
|
+
@s1 = "a" * 335
|
8
|
+
@s2 = "b" * 338
|
9
|
+
|
10
|
+
@producer = Producer.new(["localhost:9092"],
|
11
|
+
"test_client",
|
12
|
+
:type => :sync)
|
13
|
+
|
14
|
+
@producer.send_messages([Message.new(:topic => 'test_max_bytes', :value => @s1), Message.new(:topic => 'test_max_bytes', :value => @s2)])
|
15
|
+
end
|
16
|
+
|
17
|
+
it "correctly handles max_byte lengths smallert than a message" do
|
18
|
+
0.upto(360) do |n|
|
19
|
+
consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
|
20
|
+
"test_max_bytes", 0, :earliest_offset)
|
21
|
+
expect(consumer.fetch(:max_bytes => n)).to eq([])
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
it "correctly handles max_byte lengths that should return a single message" do
|
26
|
+
361.upto(724) do |n|
|
27
|
+
consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
|
28
|
+
"test_max_bytes", 0, :earliest_offset)
|
29
|
+
|
30
|
+
messages = consumer.fetch(:max_bytes => n)
|
31
|
+
expect(messages.size).to eq(1)
|
32
|
+
expect(messages.first.value).to eq(@s1)
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
it "correctly handles max_byte lengths that should return two messages" do
|
37
|
+
725.upto(1000) do |n|
|
38
|
+
consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
|
39
|
+
"test_max_bytes", 0, :earliest_offset)
|
40
|
+
|
41
|
+
messages = consumer.fetch(:max_bytes => n)
|
42
|
+
expect(messages.size).to eq(2)
|
43
|
+
expect(messages.map(&:value)).to eq([@s1, @s2])
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
@@ -0,0 +1,72 @@
|
|
1
|
+
require 'integration/simple/spec_helper'
|
2
|
+
|
3
|
+
RSpec.describe "unavailable broker scenarios:", :type => :request do
|
4
|
+
include_context "a single broker cluster"
|
5
|
+
|
6
|
+
context "producer with a dead broker in bootstrap list" do
|
7
|
+
before(:each) do
|
8
|
+
@p = Producer.new(["localhost:9091","localhost:9092"], "test")
|
9
|
+
end
|
10
|
+
|
11
|
+
it "succesfully sends a message" do
|
12
|
+
expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(true)
|
13
|
+
|
14
|
+
pc = PartitionConsumer.new("test_consumer", "localhost",
|
15
|
+
9092, "test", 0, -2)
|
16
|
+
|
17
|
+
messages = pc.fetch
|
18
|
+
expect(messages.last.value).to eq("hello")
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
context "producer with required_acks set to 1" do
|
23
|
+
before(:each) do
|
24
|
+
@p = Producer.new(["localhost:9092"], "test", :required_acks => 1)
|
25
|
+
end
|
26
|
+
|
27
|
+
context "broker stops running" do
|
28
|
+
it "fails to send" do
|
29
|
+
expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(true)
|
30
|
+
|
31
|
+
$tc.broker.without_process do
|
32
|
+
expect {
|
33
|
+
@p.send_messages([MessageToSend.new("test", "hello")])
|
34
|
+
}.to raise_error(Poseidon::Errors::UnableToFetchMetadata)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
context "broker stops running but starts again" do
|
40
|
+
it "sends succesfully once broker returns" do
|
41
|
+
expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(true)
|
42
|
+
|
43
|
+
$tc.broker.without_process do
|
44
|
+
expect {
|
45
|
+
@p.send_messages([MessageToSend.new("test", "hello")])
|
46
|
+
}.to raise_error(Poseidon::Errors::UnableToFetchMetadata)
|
47
|
+
end
|
48
|
+
|
49
|
+
expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(true)
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
context "producer with required_acks set to 0" do
|
55
|
+
before(:each) do
|
56
|
+
@p = Producer.new(["localhost:9092"], "test", :required_acks => 0)
|
57
|
+
end
|
58
|
+
|
59
|
+
context "broker stops running" do
|
60
|
+
it "fails to send" do
|
61
|
+
expect(@p.send_messages([MessageToSend.new("test", "hello_a")])).to eq(true)
|
62
|
+
|
63
|
+
$tc.broker.without_process do
|
64
|
+
@p.send_messages([MessageToSend.new("test", "hello_b")])
|
65
|
+
expect {
|
66
|
+
@p.send_messages([MessageToSend.new("test", "hello_b")])
|
67
|
+
}.to raise_error(Poseidon::Errors::UnableToFetchMetadata)
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
data/spec/spec_helper.rb
ADDED
@@ -0,0 +1,32 @@
|
|
1
|
+
# This file was generated by the `rspec --init` command. Conventionally, all
|
2
|
+
# specs live under a `spec` directory, which RSpec adds to the `$LOAD_PATH`.
|
3
|
+
# Require this file using `require "spec_helper"` to ensure that it is only
|
4
|
+
# loaded once.
|
5
|
+
#
|
6
|
+
# See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration
|
7
|
+
RSpec.configure do |config|
|
8
|
+
# Run specs in random order to surface order dependencies. If you find an
|
9
|
+
# order dependency and want to debug it, you can fix the order by providing
|
10
|
+
# the seed, which is printed after each run.
|
11
|
+
# --seed 1234
|
12
|
+
config.order = 'random'
|
13
|
+
|
14
|
+
config.disable_monkey_patching!
|
15
|
+
end
|
16
|
+
|
17
|
+
POSEIDON_PATH = File.absolute_path(File.dirname(__FILE__) + "/../")
|
18
|
+
|
19
|
+
require 'logger'
|
20
|
+
SPEC_LOGGER = Logger.new(File.join(POSEIDON_PATH, "log", "spec.log"))
|
21
|
+
|
22
|
+
require 'simplecov'
|
23
|
+
SimpleCov.start
|
24
|
+
|
25
|
+
require 'poseidon'
|
26
|
+
include Poseidon
|
27
|
+
|
28
|
+
require 'coveralls'
|
29
|
+
Coveralls.wear!
|
30
|
+
|
31
|
+
require 'timecop'
|
32
|
+
Timecop.safe_mode = true
|
@@ -0,0 +1,211 @@
|
|
1
|
+
class TestCluster
|
2
|
+
attr_reader :broker, :zookeeper
|
3
|
+
def initialize
|
4
|
+
@zookeeper = ZookeeperRunner.new
|
5
|
+
@broker = BrokerRunner.new(0, 9092)
|
6
|
+
end
|
7
|
+
|
8
|
+
def start
|
9
|
+
@zookeeper.start
|
10
|
+
@broker.start
|
11
|
+
sleep 5
|
12
|
+
end
|
13
|
+
|
14
|
+
def stop
|
15
|
+
# The broker will end up in a state where it ignores SIGTERM
|
16
|
+
# if zookeeper is stopped before the broker.
|
17
|
+
@broker.stop
|
18
|
+
sleep 5
|
19
|
+
|
20
|
+
@zookeeper.stop
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
class JavaRunner
|
25
|
+
def self.remove_tmp
|
26
|
+
FileUtils.rm_rf("#{POSEIDON_PATH}/tmp")
|
27
|
+
end
|
28
|
+
|
29
|
+
def self.set_kafka_path!
|
30
|
+
if ENV['KAFKA_PATH']
|
31
|
+
JavaRunner.kafka_path = ENV['KAFKA_PATH']
|
32
|
+
else
|
33
|
+
puts "******To run integration specs you must set KAFKA_PATH to kafka src directory. See README*****"
|
34
|
+
exit
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
def self.kafka_path=(kafka_path)
|
39
|
+
@kafka_path = kafka_path
|
40
|
+
end
|
41
|
+
|
42
|
+
def self.kafka_path
|
43
|
+
@kafka_path
|
44
|
+
end
|
45
|
+
|
46
|
+
def initialize(id, start_cmd, pid_cmd, kill_signal, properties = {})
|
47
|
+
@id = id
|
48
|
+
@properties = properties
|
49
|
+
@start_cmd = start_cmd
|
50
|
+
@pid_cmd = pid_cmd
|
51
|
+
@kill_signal = kill_signal
|
52
|
+
@stopped = false
|
53
|
+
end
|
54
|
+
|
55
|
+
def start
|
56
|
+
write_properties
|
57
|
+
run
|
58
|
+
end
|
59
|
+
|
60
|
+
def stop
|
61
|
+
if !@stopped
|
62
|
+
killed_at = Time.now
|
63
|
+
loop do
|
64
|
+
if (pid = `#{@pid_cmd}`.to_i) == 0
|
65
|
+
SPEC_LOGGER.info "Killed."
|
66
|
+
break
|
67
|
+
end
|
68
|
+
|
69
|
+
if Time.now - killed_at > 30
|
70
|
+
raise "Failed to kill process!"
|
71
|
+
end
|
72
|
+
|
73
|
+
SPEC_LOGGER.info "Sending #{@kill_signal} To #{pid}"
|
74
|
+
SPEC_LOGGER.info "(#{@start_cmd})"
|
75
|
+
`kill -#{@kill_signal} #{pid}`
|
76
|
+
|
77
|
+
sleep 5
|
78
|
+
end
|
79
|
+
@stopped = true
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
def without_process
|
84
|
+
stop
|
85
|
+
sleep 5
|
86
|
+
begin
|
87
|
+
yield
|
88
|
+
ensure
|
89
|
+
start
|
90
|
+
sleep 5
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
private
|
95
|
+
|
96
|
+
def run
|
97
|
+
FileUtils.mkdir_p(log_dir)
|
98
|
+
`LOG_DIR=#{log_dir} #{@start_cmd} #{config_path}`
|
99
|
+
@stopped = false
|
100
|
+
end
|
101
|
+
|
102
|
+
def write_properties
|
103
|
+
FileUtils.mkdir_p(config_dir)
|
104
|
+
File.open(config_path, "w+") do |f|
|
105
|
+
@properties.each do |k,v|
|
106
|
+
f.puts "#{k}=#{v}"
|
107
|
+
end
|
108
|
+
end
|
109
|
+
end
|
110
|
+
|
111
|
+
def log_dir
|
112
|
+
"#{file_path}/log"
|
113
|
+
end
|
114
|
+
|
115
|
+
def config_path
|
116
|
+
"#{config_dir}/#{@id}.properties"
|
117
|
+
end
|
118
|
+
|
119
|
+
def config_dir
|
120
|
+
"#{file_path}/config"
|
121
|
+
end
|
122
|
+
|
123
|
+
def file_path
|
124
|
+
POSEIDON_PATH + "/tmp/"
|
125
|
+
end
|
126
|
+
end
|
127
|
+
|
128
|
+
class BrokerRunner
|
129
|
+
DEFAULT_PROPERTIES = {
|
130
|
+
"broker.id" => 0,
|
131
|
+
"port" => 9092,
|
132
|
+
"num.network.threads" => 2,
|
133
|
+
"num.io.threads" => 2,
|
134
|
+
"socket.send.buffer.bytes" => 1048576,
|
135
|
+
"socket.receive.buffer.bytes" => 1048576,
|
136
|
+
"socket.request.max.bytes" => 104857600,
|
137
|
+
"log.dir" => "#{POSEIDON_PATH}/tmp/kafka-logs",
|
138
|
+
"num.partitions" => 1,
|
139
|
+
"log.flush.interval.messages" => 10000,
|
140
|
+
"log.flush.interval.ms" => 1000,
|
141
|
+
"log.retention.hours" => 168,
|
142
|
+
"log.segment.bytes" => 536870912,
|
143
|
+
#"log.cleanup.interval.mins" => 1,
|
144
|
+
"zookeeper.connect" => "localhost:2181",
|
145
|
+
"zookeeper.connection.timeout.ms" => 1000000,
|
146
|
+
#"kafka.metrics.polling.interval.secs" => 5,
|
147
|
+
#"kafka.metrics.reporters" => "kafka.metrics.KafkaCSVMetricsReporter",
|
148
|
+
#"kafka.csv.metrics.dir" => "#{POSEIDON_PATH}/tmp/kafka_metrics",
|
149
|
+
#"kafka.csv.metrics.reporter.enabled" => "false",
|
150
|
+
"auto.create.topics.enable" => "true",
|
151
|
+
|
152
|
+
# Trigger rebalances often to catch edge cases.
|
153
|
+
"auto.leader.rebalance.enable" => "true",
|
154
|
+
"leader.imbalance.check.interval.seconds" => 5
|
155
|
+
}
|
156
|
+
|
157
|
+
attr_reader :id
|
158
|
+
|
159
|
+
def initialize(id, port, partition_count = 1, replication_factor = 1, properties = {})
|
160
|
+
@id = id
|
161
|
+
@port = port
|
162
|
+
@jr = JavaRunner.new("broker_#{id}",
|
163
|
+
"#{ENV['KAFKA_PATH']}/bin/kafka-run-class.sh -daemon -name broker_#{id} kafka.Kafka",
|
164
|
+
"ps ax | grep -i 'kafka\.Kafka' | grep java | grep broker_#{id} | grep -v grep | awk '{print $1}'",
|
165
|
+
"SIGTERM",
|
166
|
+
DEFAULT_PROPERTIES.merge(
|
167
|
+
"broker.id" => id,
|
168
|
+
"port" => port,
|
169
|
+
"log.dir" => "#{POSEIDON_PATH}/tmp/kafka-logs_#{id}",
|
170
|
+
"default.replication.factor" => replication_factor,
|
171
|
+
"num.partitions" => partition_count
|
172
|
+
).merge(properties))
|
173
|
+
end
|
174
|
+
|
175
|
+
def start
|
176
|
+
@jr.start
|
177
|
+
end
|
178
|
+
|
179
|
+
def stop
|
180
|
+
@jr.stop
|
181
|
+
end
|
182
|
+
|
183
|
+
def without_process
|
184
|
+
@jr.without_process { yield }
|
185
|
+
end
|
186
|
+
end
|
187
|
+
|
188
|
+
|
189
|
+
class ZookeeperRunner
|
190
|
+
def initialize
|
191
|
+
@jr = JavaRunner.new("zookeeper",
|
192
|
+
"#{ENV['KAFKA_PATH']}/bin/zookeeper-server-start.sh -daemon",
|
193
|
+
"ps ax | grep -i 'zookeeper' | grep -v grep | awk '{print $1}'",
|
194
|
+
"SIGKILL",
|
195
|
+
:dataDir => "#{POSEIDON_PATH}/tmp/zookeeper",
|
196
|
+
:clientPort => 2181,
|
197
|
+
:maxClientCnxns => 0)
|
198
|
+
end
|
199
|
+
|
200
|
+
def pid
|
201
|
+
@jr.pid
|
202
|
+
end
|
203
|
+
|
204
|
+
def start
|
205
|
+
@jr.start
|
206
|
+
end
|
207
|
+
|
208
|
+
def stop
|
209
|
+
@jr.stop
|
210
|
+
end
|
211
|
+
end
|
@@ -0,0 +1,98 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
RSpec.describe BrokerPool do
|
4
|
+
context "empty broker list" do
|
5
|
+
it "raises UnknownBroker error when trying to produce data" do
|
6
|
+
expect { BrokerPool.new("test_client", [], 10_000).execute_api_call(0, :produce) }.to raise_error(BrokerPool::UnknownBroker)
|
7
|
+
end
|
8
|
+
end
|
9
|
+
|
10
|
+
describe "fetching metadata" do
|
11
|
+
context "single broker" do
|
12
|
+
it "initializes connection properly" do
|
13
|
+
@broker_pool = BrokerPool.new("test_client", ["localhost:9092"], 2_000)
|
14
|
+
@broker = double('Poseidon::Connection', :topic_metadata => nil)
|
15
|
+
|
16
|
+
expected_args = ["localhost", "9092", "test_client", 2_000]
|
17
|
+
connection = double('conn').as_null_object
|
18
|
+
|
19
|
+
expect(Connection).to receive(:new).with(*expected_args).and_return(connection)
|
20
|
+
|
21
|
+
@broker_pool.fetch_metadata(Set.new)
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
context "no seed brokers" do
|
26
|
+
it "raises Error" do
|
27
|
+
@broker_pool = BrokerPool.new("test_client", [], 10_000)
|
28
|
+
expect { @broker_pool.fetch_metadata(Set.new) }.to raise_error(Errors::UnableToFetchMetadata)
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
context "2 seed brokers" do
|
33
|
+
before(:each) do
|
34
|
+
@broker_pool = BrokerPool.new("test_client", ["first:9092","second:9092"], 10_000)
|
35
|
+
@broker_1 = double('Poseidon::Connection_1', :topic_metadata => nil, :close => nil)
|
36
|
+
@broker_2 = double('Poseidon::Connection_2', :topic_metadata => double('topic_metadata').as_null_object, :close => nil)
|
37
|
+
allow(Connection).to receive(:new).and_return(@broker_1, @broker_2)
|
38
|
+
end
|
39
|
+
|
40
|
+
context ", first doesn't have metadata" do
|
41
|
+
it "asks the second" do
|
42
|
+
expect(@broker_2).to receive(:topic_metadata)
|
43
|
+
|
44
|
+
@broker_pool.fetch_metadata(Set.new)
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
it "cleans up its connections" do
|
49
|
+
expect(@broker_1).to receive(:close)
|
50
|
+
expect(@broker_2).to receive(:close)
|
51
|
+
|
52
|
+
@broker_pool.fetch_metadata(Set.new)
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
context "which knowns about two brokers" do
|
58
|
+
before(:each) do
|
59
|
+
@broker_pool = BrokerPool.new("test_client", [], 10_000)
|
60
|
+
@broker_pool.update_known_brokers({0 => { :host => "localhost", :port => 9092 }, 1 => {:host => "localhost", :port => 9093 }})
|
61
|
+
end
|
62
|
+
|
63
|
+
describe "when executing a call" do
|
64
|
+
|
65
|
+
it "creates a connection for the correct broker" do
|
66
|
+
c = double('conn').as_null_object
|
67
|
+
expected_args = ["localhost", 9092, "test_client", 10_000]
|
68
|
+
|
69
|
+
expect(Connection).to receive(:new).with(*expected_args).and_return(c)
|
70
|
+
@broker_pool.execute_api_call(0, :produce)
|
71
|
+
end
|
72
|
+
|
73
|
+
it "it does so on the correct broker" do
|
74
|
+
c = double('conn').as_null_object
|
75
|
+
allow(Connection).to receive(:new).and_return(c)
|
76
|
+
|
77
|
+
expect(c).to receive(:produce)
|
78
|
+
@broker_pool.execute_api_call(0, :produce)
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
describe "when executing two calls" do
|
83
|
+
it "reuses the connection" do
|
84
|
+
c = double('conn').as_null_object
|
85
|
+
|
86
|
+
expect(Connection).to receive(:new).once.and_return(c)
|
87
|
+
@broker_pool.execute_api_call(0, :produce)
|
88
|
+
@broker_pool.execute_api_call(0, :produce)
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
92
|
+
describe "executing a call for an unknown broker" do
|
93
|
+
it "raises UnknownBroker" do
|
94
|
+
expect { @broker_pool.execute_api_call(2, :produce) }.to raise_error(BrokerPool::UnknownBroker)
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
98
|
+
end
|