poseidon 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +19 -0
- data/.rspec +2 -0
- data/.travis.yml +12 -0
- data/.yardopts +8 -0
- data/Gemfile +13 -0
- data/LICENSE.txt +22 -0
- data/README.md +71 -0
- data/Rakefile +17 -0
- data/TODO.md +27 -0
- data/examples/consumer.rb +18 -0
- data/examples/producer.rb +9 -0
- data/lib/poseidon/broker_pool.rb +72 -0
- data/lib/poseidon/cluster_metadata.rb +63 -0
- data/lib/poseidon/compressed_value.rb +23 -0
- data/lib/poseidon/compression/gzip_codec.rb +23 -0
- data/lib/poseidon/compression/snappy_codec.rb +17 -0
- data/lib/poseidon/compression.rb +30 -0
- data/lib/poseidon/connection.rb +138 -0
- data/lib/poseidon/fetched_message.rb +37 -0
- data/lib/poseidon/message.rb +151 -0
- data/lib/poseidon/message_conductor.rb +84 -0
- data/lib/poseidon/message_set.rb +80 -0
- data/lib/poseidon/message_to_send.rb +33 -0
- data/lib/poseidon/messages_for_broker.rb +39 -0
- data/lib/poseidon/messages_to_send.rb +47 -0
- data/lib/poseidon/messages_to_send_batch.rb +27 -0
- data/lib/poseidon/partition_consumer.rb +154 -0
- data/lib/poseidon/producer.rb +193 -0
- data/lib/poseidon/producer_compression_config.rb +36 -0
- data/lib/poseidon/protocol/protocol_struct.rb +238 -0
- data/lib/poseidon/protocol/request_buffer.rb +78 -0
- data/lib/poseidon/protocol/response_buffer.rb +72 -0
- data/lib/poseidon/protocol.rb +122 -0
- data/lib/poseidon/sync_producer.rb +117 -0
- data/lib/poseidon/topic_metadata.rb +65 -0
- data/lib/poseidon/version.rb +4 -0
- data/lib/poseidon.rb +102 -0
- data/poseidon.gemspec +24 -0
- data/spec/bin/kafka-run-class.sh +65 -0
- data/spec/integration/multiple_brokers/round_robin_spec.rb +39 -0
- data/spec/integration/multiple_brokers/spec_helper.rb +34 -0
- data/spec/integration/simple/compression_spec.rb +20 -0
- data/spec/integration/simple/connection_spec.rb +33 -0
- data/spec/integration/simple/multiple_brokers_spec.rb +8 -0
- data/spec/integration/simple/simple_producer_and_consumer_spec.rb +97 -0
- data/spec/integration/simple/spec_helper.rb +17 -0
- data/spec/integration/simple/unavailable_broker_spec.rb +77 -0
- data/spec/spec_helper.rb +32 -0
- data/spec/test_cluster.rb +205 -0
- data/spec/unit/broker_pool_spec.rb +77 -0
- data/spec/unit/cluster_metadata_spec.rb +41 -0
- data/spec/unit/compression_spec.rb +17 -0
- data/spec/unit/connection_spec.rb +4 -0
- data/spec/unit/fetched_message_spec.rb +11 -0
- data/spec/unit/message_conductor_spec.rb +147 -0
- data/spec/unit/message_set_spec.rb +42 -0
- data/spec/unit/message_spec.rb +112 -0
- data/spec/unit/message_to_send_spec.rb +10 -0
- data/spec/unit/messages_for_broker_spec.rb +54 -0
- data/spec/unit/messages_to_send_batch_spec.rb +25 -0
- data/spec/unit/messages_to_send_spec.rb +63 -0
- data/spec/unit/partition_consumer_spec.rb +124 -0
- data/spec/unit/producer_compression_config_spec.rb +35 -0
- data/spec/unit/producer_spec.rb +45 -0
- data/spec/unit/protocol_spec.rb +54 -0
- data/spec/unit/sync_producer_spec.rb +141 -0
- data/spec/unit/topic_metadata_spec.rb +17 -0
- metadata +206 -0
@@ -0,0 +1,97 @@
|
|
1
|
+
require 'integration/simple/spec_helper'
|
2
|
+
|
3
|
+
describe "simple producer and consumer" do
|
4
|
+
|
5
|
+
describe "writing and consuming one topic" do
|
6
|
+
it "fetches produced messages" do
|
7
|
+
@producer = Producer.new(["localhost:9092"],
|
8
|
+
"test_client",
|
9
|
+
:type => :sync)
|
10
|
+
|
11
|
+
|
12
|
+
messages = [MessageToSend.new("topic_simple_producer_and_consumer", "Hello World")]
|
13
|
+
expect(@producer.send_messages(messages)).to eq(true)
|
14
|
+
|
15
|
+
@consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
|
16
|
+
"topic_simple_producer_and_consumer", 0, -2)
|
17
|
+
messages = @consumer.fetch
|
18
|
+
expect(messages.last.value).to eq("Hello World")
|
19
|
+
|
20
|
+
@producer.shutdown
|
21
|
+
end
|
22
|
+
|
23
|
+
it "fetches only messages since the last offset" do
|
24
|
+
@producer = Producer.new(["localhost:9092"],
|
25
|
+
"test_client",
|
26
|
+
:type => :sync,
|
27
|
+
:required_acks => 1)
|
28
|
+
|
29
|
+
@consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
|
30
|
+
"topic_simple_producer_and_consumer", 0, -1)
|
31
|
+
|
32
|
+
# Read up to the end of the current messages
|
33
|
+
@consumer.fetch
|
34
|
+
|
35
|
+
# First Batch
|
36
|
+
messages = [MessageToSend.new("topic_simple_producer_and_consumer", "Hello World")]
|
37
|
+
expect(@producer.send_messages(messages)).to eq(true)
|
38
|
+
|
39
|
+
messages = @consumer.fetch
|
40
|
+
expect(messages.last.value).to eq("Hello World")
|
41
|
+
|
42
|
+
# Second Batch
|
43
|
+
messages = [MessageToSend.new("topic_simple_producer_and_consumer", "Hello World Again")]
|
44
|
+
expect(@producer.send_messages(messages)).to eq(true)
|
45
|
+
|
46
|
+
messages = @consumer.fetch
|
47
|
+
expect(messages.map(&:value)).to eq(["Hello World Again"])
|
48
|
+
|
49
|
+
# Empty Batch
|
50
|
+
messages = @consumer.fetch
|
51
|
+
expect(messages.empty?).to eq(true)
|
52
|
+
end
|
53
|
+
|
54
|
+
it "fetches larger messages with a larger max bytes size" do
|
55
|
+
@producer = Producer.new(["localhost:9092"],
|
56
|
+
"test_client",
|
57
|
+
:type => :sync,
|
58
|
+
:required_acks => 1)
|
59
|
+
|
60
|
+
@consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
|
61
|
+
"topic_simple_producer_and_consumer", 0, -2)
|
62
|
+
|
63
|
+
messages = []
|
64
|
+
2000.times do
|
65
|
+
messages << MessageToSend.new("topic_simple_producer_and_consumer",'KcjNyFBtqfSbpwjjcGKckMKLUCWz83IVcp21C8FQzs8JJKKTTrc4OLxSjLpYc5z7fsncX59te2cBn0sWDRaYmRuZyttRMLMHvXrM5o3QReKPIYUKzVCFahC4cb3Ivcbb5ZuS98Ohnb7Io42Bz9FucXwwGkQyFhJwyn3nD3BYs5r8TZM8Q76CGR2kTH1rjnFeB7J3hrRKukztxCrDY3smrQE1bbVR80IF3yWlhzkdfv3cpfwnD0TKadtt21sFJANFmORAJ0HKs6Z2262hcBQyF7WcWypC2RoLWVgKVQxbouVUP7yV6YYOAQEevYrl9sOB0Yi6h1mS8fTBUmRTmWLqyl8KzwbnbQvmCvgnX26F5JEzIoXsVaoDT2ks5eep9RyE1zm5yPtbYVmd2Sz7t5ru0wj6YiAmbF7Xgiw2l4VpNOxG0Ec6rFxXRXs0bahyBd2YtxpGyZBeruIK1RAN4n0t97xVXgZG5CGoVhL1oRDxw2pTbwEO1cvwHiiYXpXSqaxF7G9kiiPsQt24Vu7chXrJT7Xqv4RIg1aOT5Os5JVlISaJCmx8ZLtbC3OjAdGtF1ZkDuUeQHHohqeKh0qBJjw7Rv1oSDwcM0MRazjF36jijpYg26Qml9lSEnGYIFLQWHVDWKqqhl2GIntjxDXn1IyI')
|
66
|
+
end
|
67
|
+
expect(@producer.send_messages(messages)).to eq(true)
|
68
|
+
|
69
|
+
messages = @consumer.fetch
|
70
|
+
expect(messages.length).to be > 2
|
71
|
+
|
72
|
+
@consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
|
73
|
+
"topic_simple_producer_and_consumer", 0, -2)
|
74
|
+
messages = @consumer.fetch(:max_bytes => 1400000)
|
75
|
+
expect(messages.length).to be > 2
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
describe "broker that becomes unavailable" do
|
80
|
+
it "fails the fetch" do
|
81
|
+
@producer = Producer.new(["localhost:9092"],
|
82
|
+
"test_client",
|
83
|
+
:type => :sync)
|
84
|
+
|
85
|
+
|
86
|
+
messages = [MessageToSend.new("topic_simple_producer_and_consumer", "Hello World")]
|
87
|
+
expect(@producer.send_messages(messages)).to eq(true)
|
88
|
+
|
89
|
+
@consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
|
90
|
+
"topic_simple_producer_and_consumer", 0, -2)
|
91
|
+
|
92
|
+
$tc.broker.without_process do
|
93
|
+
expect { @consumer.fetch }.to raise_error(Connection::ConnectionFailedError)
|
94
|
+
end
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
require 'test_cluster'
|
4
|
+
|
5
|
+
RSpec.configure do |config|
|
6
|
+
config.before(:suite) do
|
7
|
+
JavaRunner.remove_tmp
|
8
|
+
JavaRunner.set_kafka_path!
|
9
|
+
$tc = TestCluster.new
|
10
|
+
$tc.start
|
11
|
+
sleep 5
|
12
|
+
end
|
13
|
+
|
14
|
+
config.after(:suite) do
|
15
|
+
$tc.stop
|
16
|
+
end
|
17
|
+
end
|
@@ -0,0 +1,77 @@
|
|
1
|
+
require 'integration/simple/spec_helper'
|
2
|
+
|
3
|
+
describe "unavailable broker scenarios:" do
|
4
|
+
context "producer with a dead broker in bootstrap list" do
|
5
|
+
before(:each) do
|
6
|
+
@p = Producer.new(["localhost:9091","localhost:9092"], "test")
|
7
|
+
end
|
8
|
+
|
9
|
+
it "succesfully sends a message" do
|
10
|
+
expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(true)
|
11
|
+
|
12
|
+
pc = PartitionConsumer.new("test_consumer", "localhost",
|
13
|
+
9092, "test", 0, -2)
|
14
|
+
|
15
|
+
messages = pc.fetch
|
16
|
+
expect(messages.last.value).to eq("hello")
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
context "producer with required_acks set to 1" do
|
21
|
+
before(:each) do
|
22
|
+
@p = Producer.new(["localhost:9092"], "test", :required_acks => 1)
|
23
|
+
end
|
24
|
+
|
25
|
+
context "broker stops running" do
|
26
|
+
it "fails to send" do
|
27
|
+
expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(true)
|
28
|
+
|
29
|
+
$tc.broker.without_process do
|
30
|
+
expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(false)
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
context "broker stops running but starts again" do
|
36
|
+
it "sends succesfully once broker returns" do
|
37
|
+
expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(true)
|
38
|
+
|
39
|
+
$tc.broker.without_process do
|
40
|
+
expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(false)
|
41
|
+
end
|
42
|
+
|
43
|
+
expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(true)
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
context "producer with required_acks set to 0" do
|
49
|
+
before(:each) do
|
50
|
+
@p = Producer.new(["localhost:9092"], "test", :required_acks => 0)
|
51
|
+
end
|
52
|
+
|
53
|
+
context "broker stops running" do
|
54
|
+
it "fails to send" do
|
55
|
+
expect(@p.send_messages([MessageToSend.new("test", "hello_a")])).to eq(true)
|
56
|
+
|
57
|
+
$tc.broker.without_process do
|
58
|
+
@p.send_messages([MessageToSend.new("test", "hello_b")])
|
59
|
+
expect(@p.send_messages([MessageToSend.new("test", "hello_b")])).to eq(false)
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
context "broker stops running but starts again" do
|
65
|
+
it "sends succesfully once broker returns" do
|
66
|
+
expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(true)
|
67
|
+
|
68
|
+
$tc.broker.without_process do
|
69
|
+
@p.send_messages([MessageToSend.new("test", "hello")])
|
70
|
+
expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(false)
|
71
|
+
end
|
72
|
+
|
73
|
+
expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(true)
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
data/spec/spec_helper.rb
ADDED
@@ -0,0 +1,32 @@
|
|
1
|
+
# This file was generated by the `rspec --init` command. Conventionally, all
|
2
|
+
# specs live under a `spec` directory, which RSpec adds to the `$LOAD_PATH`.
|
3
|
+
# Require this file using `require "spec_helper"` to ensure that it is only
|
4
|
+
# loaded once.
|
5
|
+
#
|
6
|
+
# See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration
|
7
|
+
RSpec.configure do |config|
|
8
|
+
config.treat_symbols_as_metadata_keys_with_true_values = true
|
9
|
+
config.run_all_when_everything_filtered = true
|
10
|
+
config.filter_run :focus
|
11
|
+
|
12
|
+
# Run specs in random order to surface order dependencies. If you find an
|
13
|
+
# order dependency and want to debug it, you can fix the order by providing
|
14
|
+
# the seed, which is printed after each run.
|
15
|
+
# --seed 1234
|
16
|
+
config.order = 'random'
|
17
|
+
|
18
|
+
config.expect_with :rspec do |c|
|
19
|
+
c.syntax = :expect
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
POSEIDON_PATH = File.absolute_path(File.dirname(__FILE__) + "/../")
|
24
|
+
|
25
|
+
require 'simplecov'
|
26
|
+
SimpleCov.start
|
27
|
+
|
28
|
+
require 'poseidon'
|
29
|
+
include Poseidon
|
30
|
+
|
31
|
+
require 'coveralls'
|
32
|
+
Coveralls.wear!
|
@@ -0,0 +1,205 @@
|
|
1
|
+
require 'daemon_controller'
|
2
|
+
|
3
|
+
class TestCluster
|
4
|
+
attr_reader :broker, :zookeeper
|
5
|
+
def initialize
|
6
|
+
@zookeeper = ZookeeperRunner.new
|
7
|
+
@broker = BrokerRunner.new(0, 9092)
|
8
|
+
end
|
9
|
+
|
10
|
+
def start
|
11
|
+
@zookeeper.start
|
12
|
+
@broker.start
|
13
|
+
end
|
14
|
+
|
15
|
+
def stop
|
16
|
+
@zookeeper.stop
|
17
|
+
@broker.stop
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
class JavaRunner
|
22
|
+
def self.remove_tmp
|
23
|
+
FileUtils.rm_rf("#{POSEIDON_PATH}/tmp")
|
24
|
+
end
|
25
|
+
|
26
|
+
def self.set_kafka_path!
|
27
|
+
if ENV['KAFKA_PATH']
|
28
|
+
JavaRunner.kafka_path = ENV['KAFKA_PATH']
|
29
|
+
else
|
30
|
+
puts "******To run integration specs you must set KAFKA_PATH to kafka src directory. See README*****"
|
31
|
+
exit
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
def self.kafka_path=(kafka_path)
|
36
|
+
@kafka_path = kafka_path
|
37
|
+
end
|
38
|
+
|
39
|
+
def self.kafka_path
|
40
|
+
@kafka_path
|
41
|
+
end
|
42
|
+
|
43
|
+
attr_reader :pid
|
44
|
+
def initialize(id, start_cmd, port, properties = {})
|
45
|
+
@id = id
|
46
|
+
@properties = properties
|
47
|
+
@pid = nil
|
48
|
+
@start_cmd = start_cmd
|
49
|
+
@port = port
|
50
|
+
end
|
51
|
+
|
52
|
+
def start
|
53
|
+
write_properties
|
54
|
+
run
|
55
|
+
end
|
56
|
+
|
57
|
+
def stop
|
58
|
+
daemon_controller.stop
|
59
|
+
end
|
60
|
+
|
61
|
+
def without_process
|
62
|
+
stop
|
63
|
+
begin
|
64
|
+
yield
|
65
|
+
ensure
|
66
|
+
start
|
67
|
+
sleep 5
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
private
|
72
|
+
|
73
|
+
def daemon_controller
|
74
|
+
@dc ||= DaemonController.new(
|
75
|
+
:identifier => @id,
|
76
|
+
:start_command => "#{@start_cmd} #{config_path} >>#{log_path} 2>&1 & echo $! > #{pid_path}",
|
77
|
+
:ping_command => [:tcp, '127.0.0.1', @port],
|
78
|
+
:pid_file => pid_path,
|
79
|
+
:log_file => log_path,
|
80
|
+
:start_timeout => 25
|
81
|
+
)
|
82
|
+
end
|
83
|
+
|
84
|
+
def run
|
85
|
+
FileUtils.mkdir_p(log_dir)
|
86
|
+
FileUtils.mkdir_p(pid_dir)
|
87
|
+
daemon_controller.start
|
88
|
+
end
|
89
|
+
|
90
|
+
def write_properties
|
91
|
+
FileUtils.mkdir_p(config_dir)
|
92
|
+
File.open(config_path, "w+") do |f|
|
93
|
+
@properties.each do |k,v|
|
94
|
+
f.puts "#{k}=#{v}"
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
def pid_path
|
100
|
+
"#{pid_dir}/#{@id}.pid"
|
101
|
+
end
|
102
|
+
|
103
|
+
def pid_dir
|
104
|
+
"#{file_path}/pid"
|
105
|
+
end
|
106
|
+
|
107
|
+
def log_path
|
108
|
+
"#{log_dir}/#{@id}.log"
|
109
|
+
end
|
110
|
+
|
111
|
+
def log_dir
|
112
|
+
"#{file_path}/log"
|
113
|
+
end
|
114
|
+
|
115
|
+
def config_path
|
116
|
+
"#{config_dir}/#{@id}.properties"
|
117
|
+
end
|
118
|
+
|
119
|
+
def config_dir
|
120
|
+
"#{file_path}/config"
|
121
|
+
end
|
122
|
+
|
123
|
+
def file_path
|
124
|
+
POSEIDON_PATH + "/tmp/"
|
125
|
+
end
|
126
|
+
end
|
127
|
+
|
128
|
+
class BrokerRunner
|
129
|
+
DEFAULT_PROPERTIES = {
|
130
|
+
"broker.id" => 0,
|
131
|
+
"port" => 9092,
|
132
|
+
"num.network.threads" => 2,
|
133
|
+
"num.io.threads" => 2,
|
134
|
+
"socket.send.buffer.bytes" => 1048576,
|
135
|
+
"socket.receive.buffer.bytes" => 1048576,
|
136
|
+
"socket.request.max.bytes" => 104857600,
|
137
|
+
"log.dir" => "#{POSEIDON_PATH}/tmp/kafka-logs",
|
138
|
+
"num.partitions" => 1,
|
139
|
+
"log.flush.interval.messages" => 10000,
|
140
|
+
"log.flush.interval.ms" => 1000,
|
141
|
+
"log.retention.hours" => 168,
|
142
|
+
"log.segment.bytes" => 536870912,
|
143
|
+
"log.cleanup.interval.mins" => 1,
|
144
|
+
"zookeeper.connect" => "localhost:2181",
|
145
|
+
"zookeeper.connection.timeout.ms" => 1000000,
|
146
|
+
"kafka.metrics.polling.interval.secs" => 5,
|
147
|
+
"kafka.metrics.reporters" => "kafka.metrics.KafkaCSVMetricsReporter",
|
148
|
+
"kafka.csv.metrics.dir" => "#{POSEIDON_PATH}/tmp/kafka_metrics",
|
149
|
+
"kafka.csv.metrics.reporter.enabled" => "false",
|
150
|
+
}
|
151
|
+
|
152
|
+
def initialize(id, port, partition_count = 1)
|
153
|
+
@id = id
|
154
|
+
@port = port
|
155
|
+
@jr = JavaRunner.new("broker_#{id}",
|
156
|
+
"#{POSEIDON_PATH}/spec/bin/kafka-run-class.sh kafka.Kafka",
|
157
|
+
port,
|
158
|
+
DEFAULT_PROPERTIES.merge(
|
159
|
+
"broker.id" => id,
|
160
|
+
"port" => port,
|
161
|
+
"log.dir" => "#{POSEIDON_PATH}/tmp/kafka-logs_#{id}",
|
162
|
+
"num.partitions" => partition_count
|
163
|
+
))
|
164
|
+
end
|
165
|
+
|
166
|
+
def pid
|
167
|
+
@jr.pid
|
168
|
+
end
|
169
|
+
|
170
|
+
def start
|
171
|
+
@jr.start
|
172
|
+
end
|
173
|
+
|
174
|
+
def stop
|
175
|
+
@jr.stop
|
176
|
+
end
|
177
|
+
|
178
|
+
def without_process
|
179
|
+
@jr.without_process { yield }
|
180
|
+
end
|
181
|
+
end
|
182
|
+
|
183
|
+
|
184
|
+
class ZookeeperRunner
|
185
|
+
def initialize
|
186
|
+
@jr = JavaRunner.new("zookeeper",
|
187
|
+
"#{POSEIDON_PATH}/spec/bin/kafka-run-class.sh org.apache.zookeeper.server.quorum.QuorumPeerMain",
|
188
|
+
2181,
|
189
|
+
:dataDir => "#{POSEIDON_PATH}/tmp/zookeeper",
|
190
|
+
:clientPort => 2181,
|
191
|
+
:maxClientCnxns => 0)
|
192
|
+
end
|
193
|
+
|
194
|
+
def pid
|
195
|
+
@jr.pid
|
196
|
+
end
|
197
|
+
|
198
|
+
def start
|
199
|
+
@jr.start
|
200
|
+
end
|
201
|
+
|
202
|
+
def stop
|
203
|
+
@jr.stop
|
204
|
+
end
|
205
|
+
end
|
@@ -0,0 +1,77 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
describe BrokerPool do
|
4
|
+
context "empty broker list" do
|
5
|
+
it "raises UnknownBroker error when trying to produce data" do
|
6
|
+
expect { BrokerPool.new("test_client", []).execute_api_call(0, :produce) }.to raise_error(BrokerPool::UnknownBroker)
|
7
|
+
end
|
8
|
+
end
|
9
|
+
|
10
|
+
describe "fetching metadata" do
|
11
|
+
context "no seed brokers" do
|
12
|
+
it "raises Error" do
|
13
|
+
@broker_pool = BrokerPool.new("test_client", [])
|
14
|
+
expect { @broker_pool.fetch_metadata(Set.new) }.to raise_error(Errors::UnableToFetchMetadata)
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
context "2 seed brokers" do
|
19
|
+
before(:each) do
|
20
|
+
@broker_pool = BrokerPool.new("test_client", ["first:9092","second:9092"])
|
21
|
+
@broker_1 = double('Poseidon::Connection_1', :topic_metadata => nil)
|
22
|
+
@broker_2 = double('Poseidon::Connection_2', :topic_metadata => double('topic_metadata').as_null_object)
|
23
|
+
Connection.stub!(:new).and_return(@broker_1, @broker_2)
|
24
|
+
end
|
25
|
+
|
26
|
+
context ", first doesn't have metadata" do
|
27
|
+
it "asks the second" do
|
28
|
+
@broker_2.should_receive(:topic_metadata)
|
29
|
+
|
30
|
+
@broker_pool.fetch_metadata(Set.new)
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
context "which knowns about two brokers" do
|
37
|
+
before(:each) do
|
38
|
+
@broker_pool = BrokerPool.new("test_client", [])
|
39
|
+
@broker_pool.update_known_brokers({0 => { :host => "localhost", :port => 9092 }, 1 => {:host => "localhost", :port => 9093 }})
|
40
|
+
end
|
41
|
+
|
42
|
+
describe "when executing a call" do
|
43
|
+
|
44
|
+
it "creates a connection for the correct broker" do
|
45
|
+
c = stub('conn').as_null_object
|
46
|
+
expected_args = ["localhost", 9092, "test_client"]
|
47
|
+
|
48
|
+
Connection.should_receive(:new).with(*expected_args).and_return(c)
|
49
|
+
@broker_pool.execute_api_call(0, :produce)
|
50
|
+
end
|
51
|
+
|
52
|
+
it "it does so on the correct broker" do
|
53
|
+
c = stub('conn').as_null_object
|
54
|
+
Connection.stub(:new).and_return(c)
|
55
|
+
|
56
|
+
c.should_receive(:produce)
|
57
|
+
@broker_pool.execute_api_call(0, :produce)
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
describe "when executing two calls" do
|
62
|
+
it "reuses the connection" do
|
63
|
+
c = stub('conn').as_null_object
|
64
|
+
|
65
|
+
Connection.should_receive(:new).once.and_return(c)
|
66
|
+
@broker_pool.execute_api_call(0, :produce)
|
67
|
+
@broker_pool.execute_api_call(0, :produce)
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
describe "executing a call for an unknown broker" do
|
72
|
+
it "raises UnknownBroker" do
|
73
|
+
expect { @broker_pool.execute_api_call(2, :produce) }.to raise_error(BrokerPool::UnknownBroker)
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
@@ -0,0 +1,41 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
include Protocol
|
4
|
+
describe ClusterMetadata do
|
5
|
+
describe "populated" do
|
6
|
+
before(:each) do
|
7
|
+
partitions = [
|
8
|
+
PartitionMetadata.new(1, 1, [1,2], [1,2], nil),
|
9
|
+
PartitionMetadata.new(2, 2, [2,1], [2,1], nil)
|
10
|
+
]
|
11
|
+
topics = [TopicMetadata.new(TopicMetadataStruct.new(nil, "test", partitions))]
|
12
|
+
|
13
|
+
brokers = [Broker.new(1, "host1", 1), Broker.new(2, "host2", 2)]
|
14
|
+
|
15
|
+
@mr = MetadataResponse.new(nil, brokers, topics)
|
16
|
+
|
17
|
+
@cm = ClusterMetadata.new
|
18
|
+
@cm.update(@mr)
|
19
|
+
end
|
20
|
+
|
21
|
+
it "knows when it has metadata for a set of topics" do
|
22
|
+
have_metadata = @cm.have_metadata_for_topics?(Set.new(["test"]))
|
23
|
+
expect(have_metadata).to eq(true)
|
24
|
+
end
|
25
|
+
|
26
|
+
it "knows when it doesn't have metadata for a topic" do
|
27
|
+
have_metadata = @cm.have_metadata_for_topics?(Set.new(["test", "no_data"]))
|
28
|
+
expect(have_metadata).to eq(false)
|
29
|
+
end
|
30
|
+
|
31
|
+
it "provides topic metadata for a set of topics" do
|
32
|
+
topic_metadata = @cm.metadata_for_topics(Set.new(["test"]))
|
33
|
+
expect(topic_metadata).to eq({ "test" => @mr.topics.first })
|
34
|
+
end
|
35
|
+
|
36
|
+
it "provides broker information" do
|
37
|
+
broker = @cm.broker(1)
|
38
|
+
expect(broker).to eq(@mr.brokers.first)
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
describe Compression do
|
4
|
+
it 'returns GzipCompessor for codec_id of 1' do
|
5
|
+
codec = Compression.find_codec(1)
|
6
|
+
expect(codec).to eq(Compression::GzipCodec)
|
7
|
+
end
|
8
|
+
|
9
|
+
it 'returns SnappyCompessor for codec_id of 2' do
|
10
|
+
codec = Compression.find_codec(2)
|
11
|
+
expect(codec).to eq(Compression::SnappyCodec)
|
12
|
+
end
|
13
|
+
|
14
|
+
it 'raises UnrecognizedCompressionCodec for codec_id of 3' do
|
15
|
+
expect { Compression.find_codec(3) }.to raise_error(Compression::UnrecognizedCompressionCodec)
|
16
|
+
end
|
17
|
+
end
|
@@ -0,0 +1,11 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
describe FetchedMessage do
|
4
|
+
it "provides access to topic,value,key,offset" do
|
5
|
+
mts = FetchedMessage.new("hello_topic", "Hello World", "key", 0)
|
6
|
+
expect(mts.topic).to eq("hello_topic")
|
7
|
+
expect(mts.value).to eq("Hello World")
|
8
|
+
expect(mts.key).to eq("key")
|
9
|
+
expect(mts.offset).to eq(0)
|
10
|
+
end
|
11
|
+
end
|