kafkat-onfocusio 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +8 -0
  3. data/.rspec +2 -0
  4. data/.simplecov +5 -0
  5. data/.travis.yml +10 -0
  6. data/CHANGELOG.md +12 -0
  7. data/Gemfile +2 -0
  8. data/LICENSE.txt +202 -0
  9. data/README.md +72 -0
  10. data/Rakefile +25 -0
  11. data/_kafkat +21 -0
  12. data/bin/kafkat +4 -0
  13. data/kafkat.gemspec +34 -0
  14. data/lib/kafkat/cli.rb +71 -0
  15. data/lib/kafkat/cluster/assignment.rb +4 -0
  16. data/lib/kafkat/cluster/broker.rb +4 -0
  17. data/lib/kafkat/cluster/partition.rb +11 -0
  18. data/lib/kafkat/cluster/topic.rb +4 -0
  19. data/lib/kafkat/cluster.rb +4 -0
  20. data/lib/kafkat/command/brokers.rb +16 -0
  21. data/lib/kafkat/command/clean-indexes.rb +30 -0
  22. data/lib/kafkat/command/cluster_restart.rb +336 -0
  23. data/lib/kafkat/command/controller.rb +18 -0
  24. data/lib/kafkat/command/drain.rb +109 -0
  25. data/lib/kafkat/command/elect-leaders.rb +31 -0
  26. data/lib/kafkat/command/partitions.rb +50 -0
  27. data/lib/kafkat/command/reassign.rb +80 -0
  28. data/lib/kafkat/command/resign-rewrite.rb +76 -0
  29. data/lib/kafkat/command/set-replication-factor.rb +173 -0
  30. data/lib/kafkat/command/shutdown.rb +30 -0
  31. data/lib/kafkat/command/topics.rb +16 -0
  32. data/lib/kafkat/command/verify-reassign.rb +18 -0
  33. data/lib/kafkat/command/verify-replicas.rb +92 -0
  34. data/lib/kafkat/command.rb +70 -0
  35. data/lib/kafkat/config.rb +50 -0
  36. data/lib/kafkat/interface/admin.rb +115 -0
  37. data/lib/kafkat/interface/kafka_logs.rb +54 -0
  38. data/lib/kafkat/interface/zookeeper.rb +178 -0
  39. data/lib/kafkat/interface.rb +3 -0
  40. data/lib/kafkat/reboot.rb +0 -0
  41. data/lib/kafkat/utility/command_io.rb +21 -0
  42. data/lib/kafkat/utility/formatting.rb +68 -0
  43. data/lib/kafkat/utility/logging.rb +7 -0
  44. data/lib/kafkat/utility.rb +4 -0
  45. data/lib/kafkat/version.rb +3 -0
  46. data/lib/kafkat.rb +14 -0
  47. data/spec/factories/topic.rb +53 -0
  48. data/spec/lib/kafkat/command/cluster_restart_spec.rb +197 -0
  49. data/spec/lib/kafkat/command/drain_spec.rb +59 -0
  50. data/spec/lib/kafkat/command/verify-replicas_spec.rb +50 -0
  51. data/spec/spec_helper.rb +102 -0
  52. metadata +294 -0
@@ -0,0 +1,178 @@
1
+ require 'thread'
2
+ require 'zk'
3
+
4
+ module Kafkat
5
+ module Interface
6
+ class Zookeeper
7
+ class NotFoundError < StandardError; end
8
+ class WriteConflictError < StandardError; end
9
+
10
+ attr_reader :zk_path
11
+
12
+ def initialize(config)
13
+ @zk_path = config.zk_path
14
+ end
15
+
16
+ def get_broker_ids
17
+ zk.children(brokers_path)
18
+ end
19
+
20
+ def get_brokers(ids=nil)
21
+ brokers = {}
22
+ ids ||= zk.children(brokers_path)
23
+
24
+ threads = ids.map do |id|
25
+ id = id.to_i
26
+ Thread.new do
27
+ begin
28
+ brokers[id] = get_broker(id)
29
+ rescue
30
+ end
31
+ end
32
+ end
33
+ threads.map(&:join)
34
+
35
+ brokers
36
+ end
37
+
38
+ def get_broker(id)
39
+ path = broker_path(id)
40
+ string = zk.get(path).first
41
+ json = JSON.parse(string)
42
+ host, port = json['host'], json['port']
43
+ Broker.new(id, host, port)
44
+ rescue ZK::Exceptions::NoNode
45
+ raise NotFoundError
46
+ end
47
+
48
+ def get_topic_names()
49
+ return zk.children(topics_path)
50
+ end
51
+
52
+ def get_topics(names=nil)
53
+ error_msgs = {}
54
+ topics = {}
55
+
56
+ if names == nil
57
+ pool.with_connection do |cnx|
58
+ names = cnx.children(topics_path)
59
+ end
60
+ end
61
+
62
+ threads = names.map do |name|
63
+ Thread.new do
64
+ begin
65
+ topics[name] = get_topic(name)
66
+ rescue => e
67
+ error_msgs[name] = e
68
+ end
69
+ end
70
+ end
71
+ threads.map(&:join)
72
+
73
+ unless error_msgs.empty?
74
+ STDERR.print "ERROR: zk cmds failed on get_topics: \n#{error_msgs.values.join("\n")}\n"
75
+ exit 1
76
+ end
77
+ topics
78
+ end
79
+
80
+ def get_topic(name)
81
+ partition_queue = Queue.new
82
+ path1 = topic_path(name)
83
+ path2 = topic_partitions_path(name)
84
+
85
+ partitions = []
86
+ topic_string = pool.with_connection { |cnx| cnx.get(path1).first }
87
+ partition_ids = pool.with_connection { |cnx| cnx.children(path2) }
88
+
89
+ topic_json = JSON.parse(topic_string)
90
+
91
+ threads = partition_ids.map do |id|
92
+ id = id.to_i
93
+
94
+ Thread.new do
95
+ path3 = topic_partition_state_path(name, id)
96
+ partition_string = pool.with_connection { |cnx| cnx.get(path3).first }
97
+ partition_json = JSON.parse(partition_string)
98
+ replicas = topic_json['partitions'][id.to_s]
99
+ leader = partition_json['leader']
100
+ isr = partition_json['isr']
101
+
102
+ partition_queue << Partition.new(name, id, replicas, leader, isr)
103
+ end
104
+ end
105
+ threads.map(&:join)
106
+
107
+ until partition_queue.empty? do
108
+ partitions << partition_queue.pop
109
+ end
110
+
111
+ partitions.sort_by!(&:id)
112
+ Topic.new(name, partitions)
113
+ rescue ZK::Exceptions::NoNode
114
+ raise NotFoundError
115
+ end
116
+
117
+ def get_controller
118
+ string = zk.get(controller_path).first
119
+ controller_json = JSON.parse(string)
120
+ controller_id = controller_json['brokerid']
121
+ get_broker(controller_id)
122
+ rescue ZK::Exceptions::NoNode
123
+ raise NotFoundError
124
+ end
125
+
126
+ def write_leader(partition, broker_id)
127
+ path = topic_partition_state_path(partition.topic_name, partition.id)
128
+ string, stat = zk.get(path)
129
+
130
+ partition_json = JSON.parse(string)
131
+ partition_json['leader'] = broker_id
132
+ new_string = JSON.dump(partition_json)
133
+
134
+ unless zk.set(path, new_string, version: stat.version)
135
+ raise ChangedDuringUpdateError
136
+ end
137
+ end
138
+
139
+ private
140
+
141
+ def pool
142
+ @pool ||= ZK.new_pool(zk_path, :min_clients => 10, :max_clients => 300, :timeout => 1)
143
+ end
144
+
145
+ def zk
146
+ @zk ||= ZK.new(zk_path)
147
+ end
148
+
149
+ def brokers_path
150
+ '/brokers/ids'
151
+ end
152
+
153
+ def broker_path(id)
154
+ "/brokers/ids/#{id}"
155
+ end
156
+
157
+ def topics_path
158
+ '/brokers/topics'
159
+ end
160
+
161
+ def topic_path(name)
162
+ "/brokers/topics/#{name}"
163
+ end
164
+
165
+ def topic_partitions_path(name)
166
+ "/brokers/topics/#{name}/partitions"
167
+ end
168
+
169
+ def topic_partition_state_path(name, id)
170
+ "/brokers/topics/#{name}/partitions/#{id}/state"
171
+ end
172
+
173
+ def controller_path
174
+ "/controller"
175
+ end
176
+ end
177
+ end
178
+ end
@@ -0,0 +1,3 @@
1
+ require 'kafkat/interface/admin'
2
+ require 'kafkat/interface/kafka_logs'
3
+ require 'kafkat/interface/zookeeper'
File without changes
@@ -0,0 +1,21 @@
1
+ module Kafkat
2
+ module CommandIO
3
+ def prompt_and_execute_assignments(assignments)
4
+ print "This operation executes the following assignments:\n\n"
5
+ print_assignment_header
6
+ assignments.each { |a| print_assignment(a) }
7
+ print "\n"
8
+
9
+ return unless agree("Proceed (y/n)?")
10
+
11
+ result = nil
12
+ begin
13
+ print "\nBeginning.\n"
14
+ result = admin.reassign!(assignments)
15
+ print "Started.\n"
16
+ rescue Interface::Admin::ExecutionFailedError
17
+ print result
18
+ end
19
+ end
20
+ end
21
+ end
@@ -0,0 +1,68 @@
1
+ module Kafkat
2
+ module Formatting
3
+ def justify(field, width=2)
4
+ field = field.to_s
5
+ count = [width - (field.length / 8), 1].max
6
+ field + "\t" * count
7
+ end
8
+
9
+ def print_broker(broker)
10
+ print justify(broker.id)
11
+ print justify("#{broker.host}:#{broker.port}")
12
+ print "\n"
13
+ end
14
+
15
+ def print_broker_header
16
+ print justify('Broker')
17
+ print justify('Socket')
18
+ print "\n"
19
+ end
20
+
21
+ def print_topic(topic)
22
+ print justify(topic.name)
23
+ print "\n"
24
+ end
25
+
26
+ def print_topic_name(topic_name)
27
+ print justify(topic_name)
28
+ print "\n"
29
+ end
30
+
31
+ def print_topic_header
32
+ print justify('Topic')
33
+ print "\n"
34
+ end
35
+
36
+ def print_partition(partition)
37
+ print justify(partition.topic_name)
38
+ print justify(partition.id)
39
+ print justify(partition.leader || 'none')
40
+ print justify(partition.replicas.inspect, 7)
41
+ print justify(partition.isr.inspect, 7)
42
+ print "\n"
43
+ end
44
+
45
+ def print_partition_header
46
+ print justify('Topic')
47
+ print justify('Partition')
48
+ print justify('Leader')
49
+ print justify('Replicas', 7)
50
+ print justify('ISRs', 7)
51
+ print "\n"
52
+ end
53
+
54
+ def print_assignment(assignment)
55
+ print justify(assignment.topic_name)
56
+ print justify(assignment.partition_id)
57
+ print justify(assignment.replicas.inspect)
58
+ print "\n"
59
+ end
60
+
61
+ def print_assignment_header
62
+ print justify('Topic')
63
+ print justify('Partition')
64
+ print justify('Replicas')
65
+ print "\n"
66
+ end
67
+ end
68
+ end
@@ -0,0 +1,7 @@
1
+ module Kafkat
2
+ module Logging
3
+ def print_err(message)
4
+ STDERR.print message
5
+ end
6
+ end
7
+ end
@@ -0,0 +1,4 @@
1
+ require 'kafkat/utility/formatting'
2
+ require 'kafkat/utility/command_io'
3
+ require 'kafkat/utility/logging'
4
+
@@ -0,0 +1,3 @@
1
+ module Kafkat
2
+ VERSION = '0.3.1'
3
+ end
data/lib/kafkat.rb ADDED
@@ -0,0 +1,14 @@
1
+ require 'zk'
2
+ require 'json'
3
+ require 'trollop'
4
+ require 'retryable'
5
+ require 'highline/import'
6
+ require 'colored'
7
+
8
+ require 'kafkat/version'
9
+ require 'kafkat/utility'
10
+ require 'kafkat/config'
11
+ require 'kafkat/cluster'
12
+ require 'kafkat/interface'
13
+ require 'kafkat/command'
14
+ require 'kafkat/cli'
@@ -0,0 +1,53 @@
1
+ module Kafkat
2
+ FactoryGirl.define do
3
+ factory :topic, class:Topic do
4
+ name "topic_name"
5
+
6
+ factory :topic_with_one_empty_broker do
7
+ partitions {[Partition.new(name, 0, [0], 0, [0]),
8
+ Partition.new(name, 1, [1], 1, [1]),
9
+ Partition.new(name, 2, [0], 0, [0]),
10
+ Partition.new(name, 3, [0], 0, [0]),
11
+ Partition.new(name, 4, [1], 1, [1])]}
12
+ end
13
+
14
+ factory :topic_rep_factor_one do
15
+ partitions {[Partition.new(name, 0, [0], 0, [0]),
16
+ Partition.new(name, 1, [1], 1, [1]),
17
+ Partition.new(name, 2, [2], 2, [2]),
18
+ Partition.new(name, 3, [0], 0, [0]),
19
+ Partition.new(name, 4, [1], 1, [1])]}
20
+ end
21
+
22
+ factory :topic_rep_factor_two do
23
+ partitions {[Partition.new(name, 0, [0, 1], 0, [0]),
24
+ Partition.new(name, 1, [0, 2], 2, [2]),
25
+ Partition.new(name, 2, [1, 2], 1, [1]),
26
+ Partition.new(name, 3, [0, 1], 0, [0]),
27
+ Partition.new(name, 4, [0, 2], 2, [2])]}
28
+ end
29
+
30
+ factory :topic_rep_factor_three do
31
+ partitions {[Partition.new(name, 0, [0, 1, 2], 0, [0]),
32
+ Partition.new(name, 1, [0, 1, 2], 1, [1]),
33
+ Partition.new(name, 2, [0, 1, 2], 2, [2]),
34
+ Partition.new(name, 3, [0, 1, 2], 0, [0]),
35
+ Partition.new(name, 4, [0, 1, 2], 1, [1])]}
36
+ end
37
+
38
+ factory :topic_rep_factor_three_with_four_replicas_in_partition1 do
39
+ name "topic_name1"
40
+ partitions [Partition.new("topic_name1", 0, [0, 1, 2], 0, [0]),
41
+ Partition.new("topic_name1", 1, [0, 1, 2, 6], 1, [1]),
42
+ Partition.new("topic_name1", 2, [0, 1, 2], 2, [2])]
43
+ end
44
+
45
+ factory :topic2_rep_factor_three do
46
+ name "topic_name2"
47
+ partitions [Partition.new("topic_name2", 0, [3, 4, 5], 0, [0]),
48
+ Partition.new("topic_name2", 1, [3, 4, 5], 0, [0]),
49
+ Partition.new("topic_name2", 2, [3, 4, 5], 1, [1])]
50
+ end
51
+ end
52
+ end
53
+ end
@@ -0,0 +1,197 @@
1
+ require 'spec_helper'
2
+
3
+ module Kafkat
4
+ module ClusterRestart
5
+
6
+ describe Kafkat::ClusterRestart do
7
+ around(:all) do |example|
8
+ prev_home = ENV['HOME']
9
+ tmp = Dir.mktmpdir
10
+ ENV['HOME'] = tmp
11
+ begin
12
+ example.run
13
+ ensure
14
+ FileUtils.rm_rf tmp
15
+ ENV['HOME'] = prev_home
16
+ end
17
+ end
18
+
19
+ describe Session do
20
+
21
+ describe '#allBrokersRestarted?' do
22
+ context 'when some brokers have not been restarted' do
23
+ let (:session) {
24
+ Session.new('broker_states' => {'1' => Session::STATE_NOT_RESTARTED, '2' => Session::STATE_RESTARTED})
25
+ }
26
+
27
+ it do
28
+ expect(session.all_restarted?).to be false
29
+ end
30
+ end
31
+
32
+ context 'when all brokers have been restarted' do
33
+ let (:session) {
34
+ Session.new('broker_states' => {'1' => Session::STATE_RESTARTED, '2' => Session::STATE_RESTARTED})
35
+ }
36
+
37
+ it do
38
+ expect(session.all_restarted?).to be true
39
+ end
40
+ end
41
+ end
42
+
43
+ describe '#update_states!' do
44
+ let (:session) {
45
+ Session.new('broker_states' => {'1' => Session::STATE_NOT_RESTARTED, '2' => Session::STATE_RESTARTED})
46
+ }
47
+
48
+ it 'validates state' do
49
+ expect { session.update_states!('my_state', []) }.to raise_error UnknownStateError
50
+ end
51
+
52
+ it 'validates broker ids' do
53
+ expect { session.update_states!(Session::STATE_RESTARTED, ['1', '4']) }.to raise_error UnknownBrokerError
54
+ end
55
+
56
+ it 'changes the states' do
57
+ session.update_states!(Session::STATE_PENDING, ['1'])
58
+ expect(session.broker_states['1']).to eql(Session::STATE_PENDING)
59
+ end
60
+ end
61
+
62
+ end
63
+
64
+
65
+ describe Subcommands do
66
+ let(:p1) { Partition.new('topic1', 'p1', ['1', '2', '3'], '1', 1) }
67
+ let(:p2) { Partition.new('topic1', 'p2', ['1', '2', '3'], '2', 1) }
68
+ let(:p3) { Partition.new('topic1', 'p3', ['2', '3', '4'], '3', 1) }
69
+ let (:topics) {
70
+ {
71
+ 'topic1' => Topic.new('topic1', [p1, p2, p3])
72
+ }
73
+ }
74
+ let(:zookeeper) { double('zookeeper') }
75
+ let(:broker_ids) { ['1', '2', '3', '4'] }
76
+ let(:broker_4) { Broker.new('4', 'i-xxxxxx.inst.aws.airbnb.com', 9092) }
77
+ let(:session) { Session.from_brokers(broker_ids) }
78
+
79
+ describe Subcommands::Next do
80
+
81
+ let(:next_command) { Subcommands::Next.new({}) }
82
+
83
+ it 'execute next with 4 brokers and 3 partitions' do
84
+ allow(zookeeper).to receive(:get_broker_ids).and_return(broker_ids)
85
+ allow(zookeeper).to receive(:get_broker).and_return(broker_4)
86
+ allow(zookeeper).to receive(:get_topics).and_return(topics)
87
+ allow(Session).to receive(:exists?).and_return(true)
88
+ allow(Session).to receive(:load!).and_return(session)
89
+ allow(session).to receive(:save!)
90
+ allow(next_command).to receive(:zookeeper).and_return(zookeeper)
91
+
92
+ expect(Session).to receive(:load!)
93
+ expect(session).to receive(:save!)
94
+
95
+ next_command.run
96
+ expect(next_command.session.broker_states['4']).to eq(Session::STATE_PENDING)
97
+ end
98
+ end
99
+
100
+ describe Subcommands::Good do
101
+ let(:good_command) { Subcommands::Good.new({}) }
102
+
103
+ let(:session){
104
+ Session.new('broker_states' => {'1' => Session::STATE_PENDING})
105
+ }
106
+
107
+ it 'set one broker to be restarted' do
108
+ allow(Session).to receive(:exists?).and_return(true)
109
+ allow(Session).to receive(:load!).and_return(session)
110
+ allow(session).to receive(:save!)
111
+
112
+ # expect(Session).to receive(:load!)
113
+ expect(session).to receive(:save!)
114
+ good_command.restart('1')
115
+ expect(good_command.session.broker_states['1']).to eq(Session::STATE_RESTARTED)
116
+ end
117
+ end
118
+ end
119
+
120
+ describe ClusterRestartHelper do
121
+ let(:p1) { Partition.new('topic1', 'p1', ['1', '2', '3'], '1', 1) }
122
+ let(:p2) { Partition.new('topic1', 'p2', ['1', '2', '3'], '2', 1) }
123
+ let(:p3) { Partition.new('topic1', 'p3', ['2', '3', '4'], '3', 1) }
124
+ let (:topics) {
125
+ {
126
+ 'topic1' => Topic.new('topic1', [p1, p2, p3])
127
+ }
128
+ }
129
+ let(:zookeeper) { double('zookeeper') }
130
+ let(:broker_ids) { ['1', '2', '3', '4'] }
131
+
132
+ describe '#get_broker_to_leader_partition_mapping' do
133
+ it 'initialize a new mapping with 4 nodes' do
134
+ broker_to_partition = ClusterRestartHelper.get_broker_to_leader_partition_mapping(topics)
135
+
136
+ expect(broker_to_partition['1']).to eq([p1])
137
+ expect(broker_to_partition['2']).to eq([p2])
138
+ expect(broker_to_partition['3']).to eq([p3])
139
+ expect(broker_to_partition['4']).to eq([])
140
+ end
141
+ end
142
+
143
+
144
+ describe '#calculate_costs' do
145
+ context 'when no restarted brokers' do
146
+ it do
147
+ broker_to_partition = ClusterRestartHelper.get_broker_to_leader_partition_mapping(topics)
148
+ session = Session.from_brokers(broker_ids)
149
+
150
+ expect(ClusterRestartHelper.calculate_cost('1', broker_to_partition['1'], session)).to eq(3)
151
+ expect(ClusterRestartHelper.calculate_cost('2', broker_to_partition['2'], session)).to eq(3)
152
+ expect(ClusterRestartHelper.calculate_cost('3', broker_to_partition['3'], session)).to eq(3)
153
+ expect(ClusterRestartHelper.calculate_cost('4', broker_to_partition['4'], session)).to eq(0)
154
+ end
155
+ end
156
+
157
+ context 'when one broker has been restarted' do
158
+ it do
159
+ broker_to_partition = ClusterRestartHelper.get_broker_to_leader_partition_mapping(topics)
160
+ session = Session.from_brokers(broker_ids)
161
+ session.update_states!(Session::STATE_RESTARTED, ['4'])
162
+
163
+ expect(ClusterRestartHelper.calculate_cost('1', broker_to_partition['1'], session)).to eq(3)
164
+ expect(ClusterRestartHelper.calculate_cost('2', broker_to_partition['2'], session)).to eq(3)
165
+ expect(ClusterRestartHelper.calculate_cost('3', broker_to_partition['3'], session)).to eq(2)
166
+ expect(ClusterRestartHelper.calculate_cost('4', broker_to_partition['4'], session)).to eq(0)
167
+ end
168
+ end
169
+ end
170
+
171
+ describe '#select_broker_with_min_cost' do
172
+ context 'when no restarted brokers' do
173
+ it do
174
+ session = Session.from_brokers(broker_ids)
175
+
176
+ broker_id, cost = ClusterRestartHelper.select_broker_with_min_cost(session, topics)
177
+ expect(broker_id).to eq('4')
178
+ expect(cost).to eq(0)
179
+ end
180
+ end
181
+
182
+ context 'when next selection after one broker is restarted' do
183
+ it do
184
+ session = Session.from_brokers(broker_ids)
185
+ session.update_states!(Session::STATE_RESTARTED, ['4'])
186
+
187
+ broker_id, cost = ClusterRestartHelper.select_broker_with_min_cost(session, topics)
188
+ expect(broker_id).to eq('3')
189
+ expect(cost).to eq(2)
190
+ end
191
+ end
192
+ end
193
+ end
194
+ end
195
+ end
196
+ end
197
+
@@ -0,0 +1,59 @@
1
+ require 'spec_helper'
2
+
3
+ module Kafkat
4
+ RSpec.describe Command::Drain do
5
+ let(:drain) { Command::Drain.new({}) }
6
+ let(:broker_id) { 0 }
7
+ let(:destination_broker_ids) { [1, 2] }
8
+
9
+ context 'three nodes with replication factor 1' do
10
+ let(:topic_rep_factor_one) { FactoryGirl.build(:topic_rep_factor_one) }
11
+
12
+ it 'should put replicas to broker with lowest number of replicas' do
13
+ assignments = drain.generate_assignments(broker_id,
14
+ {"topic_name" => topic_rep_factor_one},
15
+ destination_broker_ids)
16
+ expect(assignments).to have_exactly(2).items
17
+ expect(assignments[0].replicas).to eq([2])
18
+ expect(assignments[1].replicas).to eq([1])
19
+ end
20
+ end
21
+
22
+ context 'three nodes with replication factor 2' do
23
+ let(:topic_rep_factor_two) { FactoryGirl.build(:topic_rep_factor_two) }
24
+ it 'should put replicas to broker with lowest number of replicas' do
25
+ assignments = drain.generate_assignments(broker_id,
26
+ {"topic_name" => topic_rep_factor_two},
27
+ destination_broker_ids)
28
+ expect(assignments).to have_exactly(4).items
29
+ expect(assignments[0].replicas).to eq([2, 1])
30
+ expect(assignments[1].replicas).to eq([1, 2])
31
+ expect(assignments[2].replicas).to eq([2, 1])
32
+ expect(assignments[3].replicas).to eq([1, 2])
33
+ end
34
+ end
35
+
36
+ context 'not enough brokers to keep all replicas' do
37
+ let(:topic_rep_factor_three) { FactoryGirl.build(:topic_rep_factor_three) }
38
+
39
+ it 'should raise SystemExit' do
40
+ expect do
41
+ drain.generate_assignments(broker_id,
42
+ {"topic_name" => topic_rep_factor_three},
43
+ destination_broker_ids)
44
+ end.to raise_error(SystemExit)
45
+ end
46
+ end
47
+
48
+ context 'one destination broker is empty' do
49
+ let(:topic_with_one_empty_broker) { FactoryGirl.build(:topic_with_one_empty_broker) }
50
+ it 'should not raise exception' do
51
+ expect do
52
+ drain.generate_assignments(broker_id,
53
+ {"topic_name" => topic_with_one_empty_broker},
54
+ destination_broker_ids)
55
+ end.not_to raise_error
56
+ end
57
+ end
58
+ end
59
+ end
@@ -0,0 +1,50 @@
1
+ require 'spec_helper'
2
+
3
+ module Kafkat
4
+ RSpec.describe Command::VerifyReplicas do
5
+ let(:verify_replicas) { Command::VerifyReplicas.new({}) }
6
+
7
+ context 'two topics with replication factor 3' do
8
+ let(:topic_rep_factor_three_with_four_replicas_in_partition1) {
9
+ FactoryGirl.build(:topic_rep_factor_three_with_four_replicas_in_partition1) }
10
+ let(:topic2_rep_factor_three) { FactoryGirl.build(:topic2_rep_factor_three) }
11
+
12
+ it 'should return empty mismatched partitions for all brokers' do
13
+ partition_replica_size, partition_replica_size_stat = verify_replicas.verify_replicas(nil,
14
+ {"topic_name2" => topic2_rep_factor_three})
15
+
16
+ expect(partition_replica_size).to eq({"topic_name2" => {0 => 3, 1 => 3, 2 => 3}})
17
+ expect(partition_replica_size_stat).to eq({"topic_name2" => {3 => 3}})
18
+ end
19
+
20
+ it 'should return topic 1 partition 1 for all brokers' do
21
+ partition_replica_size, partition_replica_size_stat = verify_replicas.verify_replicas(nil,
22
+ {"topic_name1" => topic_rep_factor_three_with_four_replicas_in_partition1,
23
+ "topic_name2" => topic2_rep_factor_three})
24
+
25
+ expect(partition_replica_size).to eq({"topic_name1" => {0 => 3, 1 => 4, 2 => 3}, "topic_name2" => {0 => 3, 1 => 3, 2 => 3}})
26
+ expect(partition_replica_size_stat).to eq({"topic_name1" => {3 => 2, 4 => 1}, "topic_name2" => {3 => 3}})
27
+ end
28
+
29
+ it 'should return topic 1 partition 1 for broker 6' do
30
+ partition_replica_size, partition_replica_size_stat = verify_replicas.verify_replicas(6,
31
+ {"topic_name1" => topic_rep_factor_three_with_four_replicas_in_partition1,
32
+ "topic_name2" => topic2_rep_factor_three})
33
+
34
+ expect(partition_replica_size).to eq({"topic_name1" => {1 => 4}, "topic_name2" => {}})
35
+ expect(partition_replica_size_stat).to eq({"topic_name1" => {4 => 1}, "topic_name2" => {}})
36
+ end
37
+
38
+ it 'should return empty mismatched partition for broker 3' do
39
+ partition_replica_size, partition_replica_size_stat = verify_replicas.verify_replicas(3,
40
+ {"topic_name1" => topic_rep_factor_three_with_four_replicas_in_partition1,
41
+ "topic_name2" => topic2_rep_factor_three})
42
+
43
+ expect(partition_replica_size).to eq({"topic_name1" => {}, "topic_name2" => {0 => 3, 1 => 3, 2 => 3}})
44
+ expect(partition_replica_size_stat).to eq({"topic_name1" => {}, "topic_name2" => {3 => 3}})
45
+ end
46
+ end
47
+
48
+
49
+ end
50
+ end