poseidon 0.0.4 → 0.0.5.pre1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +2 -0
  3. data/.travis.yml +2 -0
  4. data/CHANGES.md +4 -0
  5. data/README.md +4 -9
  6. data/Rakefile +3 -0
  7. data/lib/poseidon.rb +41 -24
  8. data/lib/poseidon/broker_pool.rb +7 -3
  9. data/lib/poseidon/cluster_metadata.rb +17 -1
  10. data/lib/poseidon/connection.rb +33 -11
  11. data/lib/poseidon/message_conductor.rb +2 -2
  12. data/lib/poseidon/messages_for_broker.rb +17 -0
  13. data/lib/poseidon/messages_to_send.rb +4 -4
  14. data/lib/poseidon/partition_consumer.rb +67 -24
  15. data/lib/poseidon/producer.rb +4 -1
  16. data/lib/poseidon/protocol/request_buffer.rb +12 -4
  17. data/lib/poseidon/sync_producer.rb +55 -22
  18. data/lib/poseidon/topic_metadata.rb +23 -8
  19. data/lib/poseidon/version.rb +1 -1
  20. data/log/.gitkeep +0 -0
  21. data/poseidon.gemspec +2 -2
  22. data/spec/integration/multiple_brokers/consumer_spec.rb +1 -1
  23. data/spec/integration/multiple_brokers/metadata_failures_spec.rb +35 -0
  24. data/spec/integration/multiple_brokers/rebalance_spec.rb +67 -0
  25. data/spec/integration/multiple_brokers/round_robin_spec.rb +4 -4
  26. data/spec/integration/multiple_brokers/spec_helper.rb +29 -7
  27. data/spec/integration/simple/compression_spec.rb +1 -0
  28. data/spec/integration/simple/connection_spec.rb +1 -1
  29. data/spec/integration/simple/simple_producer_and_consumer_spec.rb +25 -2
  30. data/spec/integration/simple/spec_helper.rb +2 -2
  31. data/spec/integration/simple/truncated_messages_spec.rb +1 -1
  32. data/spec/integration/simple/unavailable_broker_spec.rb +9 -16
  33. data/spec/spec_helper.rb +3 -0
  34. data/spec/test_cluster.rb +51 -48
  35. data/spec/unit/broker_pool_spec.rb +28 -7
  36. data/spec/unit/cluster_metadata_spec.rb +3 -3
  37. data/spec/unit/message_conductor_spec.rb +27 -14
  38. data/spec/unit/messages_to_send_spec.rb +3 -3
  39. data/spec/unit/partition_consumer_spec.rb +28 -10
  40. data/spec/unit/sync_producer_spec.rb +16 -12
  41. metadata +24 -35
  42. data/spec/bin/kafka-run-class.sh +0 -65
@@ -14,6 +14,7 @@ describe "compression" do
14
14
  messages = [MessageToSend.new("test12", "Hello World: #{i}")]
15
15
 
16
16
  expect(@producer.send_messages(messages)).to eq(true)
17
+ sleep 1
17
18
  messages = @consumer.fetch
18
19
  expect(messages.last.value).to eq("Hello World: #{i}")
19
20
  end
@@ -3,7 +3,7 @@ require 'integration/simple/spec_helper'
3
3
  include Protocol
4
4
  describe Connection do
5
5
  before(:each) do
6
- @connection = Connection.new("localhost", 9092, "test")
6
+ @connection = Connection.new("localhost", 9092, "test", 10_000)
7
7
  end
8
8
 
9
9
  it 'sends and parses topic metadata requests' do
@@ -29,8 +29,11 @@ describe "simple producer and consumer" do
29
29
  @consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
30
30
  "topic_simple_producer_and_consumer", 0, -1)
31
31
 
32
- # Read up to the end of the current messages
33
- @consumer.fetch
32
+ # Read up to the end of the current messages (if there are any)
33
+ begin
34
+ @consumer.fetch
35
+ rescue Errors::UnknownTopicOrPartition
36
+ end
34
37
 
35
38
  # First Batch
36
39
  messages = [MessageToSend.new("topic_simple_producer_and_consumer", "Hello World")]
@@ -51,6 +54,25 @@ describe "simple producer and consumer" do
51
54
  expect(messages.empty?).to eq(true)
52
55
  end
53
56
 
57
+ it "waits for messages" do
58
+ # Create topic
59
+ @c = Connection.new("localhost", 9092, "metadata_fetcher", 10_000)
60
+ @c.topic_metadata(["simple_wait_test"])
61
+
62
+ sleep 5
63
+ @consumer = PartitionConsumer.new("test_consumer", "localhost", 9092,
64
+ "simple_wait_test", 0, :earliest_offset,
65
+ :max_wait_ms => 2500)
66
+
67
+ require 'benchmark'
68
+ n = Benchmark.realtime do
69
+ @consumer.fetch
70
+ end
71
+ expect(n).to be_within(0.25).of(2.5)
72
+ end
73
+
74
+ # Not sure what's going on here, will revisit.
75
+ =begin
54
76
  it "fetches larger messages with a larger max bytes size" do
55
77
  @producer = Producer.new(["localhost:9092"],
56
78
  "test_client",
@@ -74,6 +96,7 @@ describe "simple producer and consumer" do
74
96
  messages = @consumer.fetch(:max_bytes => 1400000)
75
97
  expect(messages.length).to be > 2
76
98
  end
99
+ =end
77
100
  end
78
101
 
79
102
  describe "broker that becomes unavailable" do
@@ -3,7 +3,7 @@ require 'spec_helper'
3
3
  require 'test_cluster'
4
4
 
5
5
  RSpec.configure do |config|
6
- config.before(:suite) do
6
+ config.before(:each) do
7
7
  JavaRunner.remove_tmp
8
8
  JavaRunner.set_kafka_path!
9
9
  $tc = TestCluster.new
@@ -11,7 +11,7 @@ RSpec.configure do |config|
11
11
  sleep 5
12
12
  end
13
13
 
14
- config.after(:suite) do
14
+ config.after(:each) do
15
15
  $tc.stop
16
16
  end
17
17
  end
@@ -1,7 +1,7 @@
1
1
  require 'integration/simple/spec_helper'
2
2
 
3
3
  describe "truncated messages" do
4
- before(:all) do
4
+ before(:each) do
5
5
  @s1 = "a" * 335
6
6
  @s2 = "b" * 338
7
7
 
@@ -27,7 +27,9 @@ describe "unavailable broker scenarios:" do
27
27
  expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(true)
28
28
 
29
29
  $tc.broker.without_process do
30
- expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(false)
30
+ expect {
31
+ @p.send_messages([MessageToSend.new("test", "hello")])
32
+ }.to raise_error(Poseidon::Errors::UnableToFetchMetadata)
31
33
  end
32
34
  end
33
35
  end
@@ -37,7 +39,9 @@ describe "unavailable broker scenarios:" do
37
39
  expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(true)
38
40
 
39
41
  $tc.broker.without_process do
40
- expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(false)
42
+ expect {
43
+ @p.send_messages([MessageToSend.new("test", "hello")])
44
+ }.to raise_error(Poseidon::Errors::UnableToFetchMetadata)
41
45
  end
42
46
 
43
47
  expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(true)
@@ -56,22 +60,11 @@ describe "unavailable broker scenarios:" do
56
60
 
57
61
  $tc.broker.without_process do
58
62
  @p.send_messages([MessageToSend.new("test", "hello_b")])
59
- expect(@p.send_messages([MessageToSend.new("test", "hello_b")])).to eq(false)
63
+ expect {
64
+ @p.send_messages([MessageToSend.new("test", "hello_b")])
65
+ }.to raise_error(Poseidon::Errors::UnableToFetchMetadata)
60
66
  end
61
67
  end
62
68
  end
63
-
64
- context "broker stops running but starts again" do
65
- it "sends succesfully once broker returns" do
66
- expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(true)
67
-
68
- $tc.broker.without_process do
69
- @p.send_messages([MessageToSend.new("test", "hello")])
70
- expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(false)
71
- end
72
-
73
- expect(@p.send_messages([MessageToSend.new("test", "hello")])).to eq(true)
74
- end
75
- end
76
69
  end
77
70
  end
@@ -22,6 +22,9 @@ end
22
22
 
23
23
  POSEIDON_PATH = File.absolute_path(File.dirname(__FILE__) + "/../")
24
24
 
25
+ require 'logger'
26
+ SPEC_LOGGER = Logger.new(File.join(POSEIDON_PATH, "log", "spec.log"))
27
+
25
28
  require 'simplecov'
26
29
  SimpleCov.start
27
30
 
@@ -1,5 +1,3 @@
1
- require 'daemon_controller'
2
-
3
1
  class TestCluster
4
2
  attr_reader :broker, :zookeeper
5
3
  def initialize
@@ -13,8 +11,12 @@ class TestCluster
13
11
  end
14
12
 
15
13
  def stop
16
- @zookeeper.stop
14
+ # The broker will end up in a state where it ignores SIGTERM
15
+ # if zookeeper is stopped before the broker.
17
16
  @broker.stop
17
+ sleep 5
18
+
19
+ @zookeeper.stop
18
20
  end
19
21
  end
20
22
 
@@ -40,13 +42,13 @@ class JavaRunner
40
42
  @kafka_path
41
43
  end
42
44
 
43
- attr_reader :pid
44
- def initialize(id, start_cmd, port, properties = {})
45
+ def initialize(id, start_cmd, pid_cmd, kill_signal, properties = {})
45
46
  @id = id
46
47
  @properties = properties
47
- @pid = nil
48
48
  @start_cmd = start_cmd
49
- @port = port
49
+ @pid_cmd = pid_cmd
50
+ @kill_signal = kill_signal
51
+ @stopped = false
50
52
  end
51
53
 
52
54
  def start
@@ -55,11 +57,31 @@ class JavaRunner
55
57
  end
56
58
 
57
59
  def stop
58
- daemon_controller.stop
60
+ if !@stopped
61
+ killed_at = Time.now
62
+ loop do
63
+ if (pid = `#{@pid_cmd}`.to_i) == 0
64
+ SPEC_LOGGER.info "Killed."
65
+ break
66
+ end
67
+
68
+ if Time.now - killed_at > 30
69
+ raise "Failed to kill process!"
70
+ end
71
+
72
+ SPEC_LOGGER.info "Sending #{@kill_signal} To #{pid}"
73
+ SPEC_LOGGER.info "(#{@start_cmd})"
74
+ `kill -#{@kill_signal} #{pid}`
75
+
76
+ sleep 5
77
+ end
78
+ @stopped = true
79
+ end
59
80
  end
60
81
 
61
82
  def without_process
62
83
  stop
84
+ sleep 5
63
85
  begin
64
86
  yield
65
87
  ensure
@@ -70,21 +92,10 @@ class JavaRunner
70
92
 
71
93
  private
72
94
 
73
- def daemon_controller
74
- @dc ||= DaemonController.new(
75
- :identifier => @id,
76
- :start_command => "#{@start_cmd} #{config_path} >>#{log_path} 2>&1 & echo $! > #{pid_path}",
77
- :ping_command => [:tcp, '127.0.0.1', @port],
78
- :pid_file => pid_path,
79
- :log_file => log_path,
80
- :start_timeout => 25
81
- )
82
- end
83
-
84
95
  def run
85
96
  FileUtils.mkdir_p(log_dir)
86
- FileUtils.mkdir_p(pid_dir)
87
- daemon_controller.start
97
+ `LOG_DIR=#{log_dir} #{@start_cmd} #{config_path}`
98
+ @stopped = false
88
99
  end
89
100
 
90
101
  def write_properties
@@ -96,18 +107,6 @@ class JavaRunner
96
107
  end
97
108
  end
98
109
 
99
- def pid_path
100
- "#{pid_dir}/#{@id}.pid"
101
- end
102
-
103
- def pid_dir
104
- "#{file_path}/pid"
105
- end
106
-
107
- def log_path
108
- "#{log_dir}/#{@id}.log"
109
- end
110
-
111
110
  def log_dir
112
111
  "#{file_path}/log"
113
112
  end
@@ -140,31 +139,34 @@ class BrokerRunner
140
139
  "log.flush.interval.ms" => 1000,
141
140
  "log.retention.hours" => 168,
142
141
  "log.segment.bytes" => 536870912,
143
- "log.cleanup.interval.mins" => 1,
142
+ #"log.cleanup.interval.mins" => 1,
144
143
  "zookeeper.connect" => "localhost:2181",
145
144
  "zookeeper.connection.timeout.ms" => 1000000,
146
- "kafka.metrics.polling.interval.secs" => 5,
147
- "kafka.metrics.reporters" => "kafka.metrics.KafkaCSVMetricsReporter",
148
- "kafka.csv.metrics.dir" => "#{POSEIDON_PATH}/tmp/kafka_metrics",
149
- "kafka.csv.metrics.reporter.enabled" => "false",
145
+ #"kafka.metrics.polling.interval.secs" => 5,
146
+ #"kafka.metrics.reporters" => "kafka.metrics.KafkaCSVMetricsReporter",
147
+ #"kafka.csv.metrics.dir" => "#{POSEIDON_PATH}/tmp/kafka_metrics",
148
+ #"kafka.csv.metrics.reporter.enabled" => "false",
149
+ "auto.create.topics.enable" => "true",
150
+
151
+ # Trigger rebalances often to catch edge cases.
152
+ "auto.leader.rebalance.enable" => "true",
153
+ "leader.imbalance.check.interval.seconds" => 5
150
154
  }
151
155
 
152
- def initialize(id, port, partition_count = 1)
156
+ def initialize(id, port, partition_count = 1, replication_factor = 1, properties = {})
153
157
  @id = id
154
158
  @port = port
155
159
  @jr = JavaRunner.new("broker_#{id}",
156
- "#{POSEIDON_PATH}/spec/bin/kafka-run-class.sh kafka.Kafka",
157
- port,
160
+ "#{ENV['KAFKA_PATH']}/bin/kafka-run-class.sh -daemon -name broker_#{id} kafka.Kafka",
161
+ "ps ax | grep -i 'kafka\.Kafka' | grep java | grep broker_#{id} | grep -v grep | awk '{print $1}'",
162
+ "SIGTERM",
158
163
  DEFAULT_PROPERTIES.merge(
159
164
  "broker.id" => id,
160
165
  "port" => port,
161
166
  "log.dir" => "#{POSEIDON_PATH}/tmp/kafka-logs_#{id}",
167
+ "default.replication.factor" => replication_factor,
162
168
  "num.partitions" => partition_count
163
- ))
164
- end
165
-
166
- def pid
167
- @jr.pid
169
+ ).merge(properties))
168
170
  end
169
171
 
170
172
  def start
@@ -184,8 +186,9 @@ end
184
186
  class ZookeeperRunner
185
187
  def initialize
186
188
  @jr = JavaRunner.new("zookeeper",
187
- "#{POSEIDON_PATH}/spec/bin/kafka-run-class.sh org.apache.zookeeper.server.quorum.QuorumPeerMain",
188
- 2181,
189
+ "#{ENV['KAFKA_PATH']}/bin/zookeeper-server-start.sh -daemon",
190
+ "ps ax | grep -i 'zookeeper' | grep -v grep | awk '{print $1}'",
191
+ "SIGKILL",
189
192
  :dataDir => "#{POSEIDON_PATH}/tmp/zookeeper",
190
193
  :clientPort => 2181,
191
194
  :maxClientCnxns => 0)
@@ -3,23 +3,37 @@ require 'spec_helper'
3
3
  describe BrokerPool do
4
4
  context "empty broker list" do
5
5
  it "raises UnknownBroker error when trying to produce data" do
6
- expect { BrokerPool.new("test_client", []).execute_api_call(0, :produce) }.to raise_error(BrokerPool::UnknownBroker)
6
+ expect { BrokerPool.new("test_client", [], 10_000).execute_api_call(0, :produce) }.to raise_error(BrokerPool::UnknownBroker)
7
7
  end
8
8
  end
9
9
 
10
10
  describe "fetching metadata" do
11
+ context "single broker" do
12
+ it "initializes connection properly" do
13
+ @broker_pool = BrokerPool.new("test_client", ["localhost:9092"], 2_000)
14
+ @broker = double('Poseidon::Connection', :topic_metadata => nil)
15
+
16
+ expected_args = ["localhost", "9092", "test_client", 2_000]
17
+ connection = stub('conn').as_null_object
18
+
19
+ Connection.should_receive(:new).with(*expected_args).and_return(connection)
20
+
21
+ @broker_pool.fetch_metadata(Set.new)
22
+ end
23
+ end
24
+
11
25
  context "no seed brokers" do
12
26
  it "raises Error" do
13
- @broker_pool = BrokerPool.new("test_client", [])
27
+ @broker_pool = BrokerPool.new("test_client", [], 10_000)
14
28
  expect { @broker_pool.fetch_metadata(Set.new) }.to raise_error(Errors::UnableToFetchMetadata)
15
29
  end
16
30
  end
17
31
 
18
32
  context "2 seed brokers" do
19
33
  before(:each) do
20
- @broker_pool = BrokerPool.new("test_client", ["first:9092","second:9092"])
21
- @broker_1 = double('Poseidon::Connection_1', :topic_metadata => nil)
22
- @broker_2 = double('Poseidon::Connection_2', :topic_metadata => double('topic_metadata').as_null_object)
34
+ @broker_pool = BrokerPool.new("test_client", ["first:9092","second:9092"], 10_000)
35
+ @broker_1 = double('Poseidon::Connection_1', :topic_metadata => nil, :close => nil)
36
+ @broker_2 = double('Poseidon::Connection_2', :topic_metadata => double('topic_metadata').as_null_object, :close => nil)
23
37
  Connection.stub!(:new).and_return(@broker_1, @broker_2)
24
38
  end
25
39
 
@@ -30,12 +44,19 @@ describe BrokerPool do
30
44
  @broker_pool.fetch_metadata(Set.new)
31
45
  end
32
46
  end
47
+
48
+ it "cleans up its connections" do
49
+ @broker_1.should_receive(:close)
50
+ @broker_2.should_receive(:close)
51
+
52
+ @broker_pool.fetch_metadata(Set.new)
53
+ end
33
54
  end
34
55
  end
35
56
 
36
57
  context "which knowns about two brokers" do
37
58
  before(:each) do
38
- @broker_pool = BrokerPool.new("test_client", [])
59
+ @broker_pool = BrokerPool.new("test_client", [], 10_000)
39
60
  @broker_pool.update_known_brokers({0 => { :host => "localhost", :port => 9092 }, 1 => {:host => "localhost", :port => 9093 }})
40
61
  end
41
62
 
@@ -43,7 +64,7 @@ describe BrokerPool do
43
64
 
44
65
  it "creates a connection for the correct broker" do
45
66
  c = stub('conn').as_null_object
46
- expected_args = ["localhost", 9092, "test_client"]
67
+ expected_args = ["localhost", 9092, "test_client", 10_000]
47
68
 
48
69
  Connection.should_receive(:new).with(*expected_args).and_return(c)
49
70
  @broker_pool.execute_api_call(0, :produce)
@@ -5,10 +5,10 @@ describe ClusterMetadata do
5
5
  describe "populated" do
6
6
  before(:each) do
7
7
  partitions = [
8
- PartitionMetadata.new(nil, 1, 1, [1,2], [1,2]),
9
- PartitionMetadata.new(nil, 2, 2, [2,1], [2,1])
8
+ PartitionMetadata.new(0, 1, 1, [1,2], [1,2]),
9
+ PartitionMetadata.new(0, 2, 2, [2,1], [2,1])
10
10
  ]
11
- topics = [TopicMetadata.new(TopicMetadataStruct.new(nil, "test", partitions))]
11
+ topics = [TopicMetadata.new(TopicMetadataStruct.new(0, "test", partitions))]
12
12
 
13
13
  brokers = [Broker.new(1, "host1", 1), Broker.new(2, "host2", 2)]
14
14
 
@@ -2,16 +2,18 @@ require 'spec_helper'
2
2
 
3
3
  include Protocol
4
4
  describe MessageConductor do
5
- context "two avialable partitions" do
5
+ context "two available partitions" do
6
6
  before(:each) do
7
7
  partitions = [
8
- PartitionMetadata.new(nil, 0, 1, [1,2], [1,2]),
9
- PartitionMetadata.new(nil, 1, 2, [2,1], [2,1])
8
+ # These are intentionally not ordered by partition_id.
9
+ # [:error, :id, :leader, :replicas, :isr]
10
+ PartitionMetadata.new(0, 1, 2, [2,1], [2,1]),
11
+ PartitionMetadata.new(0, 0, 1, [1,2], [1,2])
10
12
  ]
11
- topics = [TopicMetadata.new(TopicMetadataStruct.new(nil, "test", partitions))]
13
+ topics = [TopicMetadata.new(TopicMetadataStruct.new(0, "test", partitions))]
12
14
  brokers = [Broker.new(1, "host1", 1), Broker.new(2, "host2", 2)]
13
15
 
14
- @mr = MetadataResponse.new(nil, brokers, topics)
16
+ @mr = MetadataResponse.new(0, brokers, topics)
15
17
 
16
18
  @cm = ClusterMetadata.new
17
19
  @cm.update(@mr)
@@ -69,6 +71,17 @@ describe MessageConductor do
69
71
  end
70
72
  end
71
73
 
74
+ context "partitioner always sends to partition 1" do
75
+ before(:each) do
76
+ partitioner = Proc.new { 1 }
77
+ @mc = MessageConductor.new(@cm, partitioner)
78
+ end
79
+
80
+ it "sends to partition 1 on broker 2" do
81
+ expect(@mc.destination("test", "2_hello")).to eq([1,2])
82
+ end
83
+ end
84
+
72
85
  context "broken partitioner" do
73
86
  before(:each) do
74
87
  partitioner = Proc.new { |key, count| count + 1 }
@@ -84,13 +97,13 @@ describe MessageConductor do
84
97
  context "two partitions, one is unavailable" do
85
98
  before(:each) do
86
99
  partitions = [
87
- Protocol::PartitionMetadata.new(nil, 0, 1, [1,2], [1,2]),
88
- Protocol::PartitionMetadata.new(nil, 1, nil, [2,1], [2,1])
100
+ Protocol::PartitionMetadata.new(0, 0, 1, [1,2], [1,2]),
101
+ Protocol::PartitionMetadata.new(0, 1, -1, [2,1], [2,1])
89
102
  ]
90
- topics = [TopicMetadata.new(TopicMetadataStruct.new(nil, "test", partitions))]
103
+ topics = [TopicMetadata.new(TopicMetadataStruct.new(0, "test", partitions))]
91
104
  brokers = [Broker.new(1, "host1", 1), Broker.new(2, "host2", 2)]
92
105
 
93
- @mr = MetadataResponse.new(nil, brokers, topics)
106
+ @mr = MetadataResponse.new(0, brokers, topics)
94
107
 
95
108
  @cm = ClusterMetadata.new
96
109
  @cm.update(@mr)
@@ -120,13 +133,13 @@ describe MessageConductor do
120
133
  context "no available partitions" do
121
134
  before(:each) do
122
135
  partitions = [
123
- Protocol::PartitionMetadata.new(nil, 0, nil, [1,2], [1,2]),
124
- Protocol::PartitionMetadata.new(nil, 1, nil, [2,1], [2,1])
136
+ Protocol::PartitionMetadata.new(0, 0, -1, [1,2], [1,2]),
137
+ Protocol::PartitionMetadata.new(0, 1, -1, [2,1], [2,1])
125
138
  ]
126
- topics = [TopicMetadata.new(TopicMetadataStruct.new(nil, "test", partitions))]
139
+ topics = [TopicMetadata.new(TopicMetadataStruct.new(0, "test", partitions))]
127
140
  brokers = [Broker.new(1, "host1", 1), Broker.new(2, "host2", 2)]
128
141
 
129
- @mr = MetadataResponse.new(nil, brokers, topics)
142
+ @mr = MetadataResponse.new(0, brokers, topics)
130
143
 
131
144
  @cm = ClusterMetadata.new
132
145
  @cm.update(@mr)
@@ -136,7 +149,7 @@ describe MessageConductor do
136
149
 
137
150
  context "keyless message" do
138
151
  it "return -1 for broker and partition" do
139
- expect(@mc.destination("test").first).to eq(-1)
152
+ expect(@mc.destination("test")).to eq([-1,-1])
140
153
  end
141
154
  end
142
155