kafka-consumer 0.0.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,82 @@
1
+ module Kazoo
2
+ class Cluster
3
+
4
+ attr_reader :zookeeper, :chroot
5
+
6
+ def initialize(zookeeper, chroot: "")
7
+ @zookeeper, @chroot = zookeeper, chroot
8
+ @zk_mutex, @brokers_mutex, @topics_mutex, @consumergroups_mutex = Mutex.new, Mutex.new, Mutex.new, Mutex.new
9
+ end
10
+
11
+ def zk
12
+ @zk_mutex.synchronize do
13
+ @zk ||= Zookeeper.new(zookeeper)
14
+ end
15
+ end
16
+
17
+ def brokers
18
+ @brokers_mutex.synchronize do
19
+ @brokers ||= begin
20
+ brokers = zk.get_children(path: node_with_chroot("/brokers/ids"))
21
+ result, threads, mutex = {}, ThreadGroup.new, Mutex.new
22
+ brokers.fetch(:children).map do |id|
23
+ t = Thread.new do
24
+ broker_info = zk.get(path: node_with_chroot("/brokers/ids/#{id}"))
25
+ broker = Kazoo::Broker.from_json(self, id, JSON.parse(broker_info.fetch(:data)))
26
+ mutex.synchronize { result[id.to_i] = broker }
27
+ end
28
+ threads.add(t)
29
+ end
30
+ threads.list.each(&:join)
31
+ result
32
+ end
33
+ end
34
+ end
35
+
36
+ def consumergroups
37
+ @consumergroups ||= begin
38
+ consumers = zk.get_children(path: node_with_chroot("/consumers"))
39
+ consumers.fetch(:children).map { |name| Kazoo::Consumergroup.new(self, name) }
40
+ end
41
+ end
42
+
43
+ def topics
44
+ @topics_mutex.synchronize do
45
+ @topics ||= begin
46
+ topics = zk.get_children(path: node_with_chroot("/brokers/topics"))
47
+ result, threads, mutex = {}, ThreadGroup.new, Mutex.new
48
+ topics.fetch(:children).each do |name|
49
+ t = Thread.new do
50
+ topic_info = zk.get(path: node_with_chroot("/brokers/topics/#{name}"))
51
+ topic = Kazoo::Topic.from_json(self, name, JSON.parse(topic_info.fetch(:data)))
52
+ mutex.synchronize { result[name] = topic }
53
+ end
54
+ threads.add(t)
55
+ end
56
+ threads.list.each(&:join)
57
+ result
58
+ end
59
+ end
60
+ end
61
+
62
+ def partitions
63
+ topics.values.flat_map(&:partitions)
64
+ end
65
+
66
+ def reset_metadata
67
+ @topics, @brokers = nil, nil
68
+ end
69
+
70
+ def under_replicated?
71
+ partitions.any?(&:under_replicated?)
72
+ end
73
+
74
+ def node_with_chroot(path)
75
+ "#{@chroot}#{path}"
76
+ end
77
+
78
+ def close
79
+ zk.close
80
+ end
81
+ end
82
+ end
@@ -0,0 +1,229 @@
1
+ module Kazoo
2
+ class Consumergroup
3
+ attr_reader :cluster, :name
4
+
5
+ def initialize(cluster, name)
6
+ @cluster, @name = cluster, name
7
+ end
8
+
9
+ def create
10
+ cluster.zk.create(path: cluster.node_with_chroot("/consumers/#{name}"))
11
+ cluster.zk.create(path: cluster.node_with_chroot("/consumers/#{name}/ids"))
12
+ cluster.zk.create(path: cluster.node_with_chroot("/consumers/#{name}/owners"))
13
+ cluster.zk.create(path: cluster.node_with_chroot("/consumers/#{name}/offsets"))
14
+ end
15
+
16
+ def exists?
17
+ stat = cluster.zk.stat(path: cluster.node_with_chroot("/consumers/#{name}"))
18
+ stat.fetch(:stat).exists?
19
+ end
20
+
21
+
22
+ def instantiate(id: nil)
23
+ Instance.new(self, id: id)
24
+ end
25
+
26
+ def instances
27
+ instances = cluster.zk.get_children(path: cluster.node_with_chroot("/consumers/#{name}/ids"))
28
+ instances.fetch(:children).map { |id| Instance.new(self, id: id) }
29
+ end
30
+
31
+ def watch_instances(&block)
32
+ cb = Zookeeper::Callbacks::WatcherCallback.create(&block)
33
+ result = cluster.zk.get_children(
34
+ path: cluster.node_with_chroot("/consumers/#{name}/ids"),
35
+ watcher: cb,
36
+ )
37
+
38
+ instances = result.fetch(:children).map { |id| Instance.new(self, id: id) }
39
+
40
+ if result.fetch(:rc) != Zookeeper::Constants::ZOK
41
+ raise Kazoo::Error, "Failed to watch instances. Error code result[:rc]"
42
+ end
43
+
44
+ [instances, cb]
45
+ end
46
+
47
+
48
+ def watch_partition_claim(partition, &block)
49
+ cb = Zookeeper::Callbacks::WatcherCallback.create(&block)
50
+
51
+ result = cluster.zk.get(
52
+ path: cluster.node_with_chroot("/consumers/#{name}/owners/#{partition.topic.name}/#{partition.id}"),
53
+ watcher: cb
54
+ )
55
+
56
+ case result.fetch(:rc)
57
+ when Zookeeper::Constants::ZNONODE # Nobody is claiming this partition yet
58
+ [nil, nil]
59
+ when Zookeeper::Constants::ZOK
60
+ [Kazoo::Consumergroup::Instance.new(self, id: result.fetch(:data)), cb]
61
+ else
62
+ raise Kazoo::Error, "Failed set watch for partition claim of #{partition.topic.name}/#{partition.id}. Error code: #{result.fetch(:rc)}"
63
+ end
64
+ end
65
+
66
+ def retrieve_offset(partition)
67
+ result = cluster.zk.get(path: cluster.node_with_chroot("/consumers/#{name}/offsets/#{partition.topic.name}/#{partition.id}"))
68
+ case result.fetch(:rc)
69
+ when Zookeeper::Constants::ZOK;
70
+ result.fetch(:data).to_i
71
+ when Zookeeper::Constants::ZNONODE;
72
+ nil
73
+ else
74
+ raise Kazoo::Error, "Failed to retrieve offset for partition #{partition.topic.name}/#{partition.id}. Error code: #{result.fetch(:rc)}"
75
+ end
76
+ end
77
+
78
+ def commit_offset(partition, offset)
79
+ result = cluster.zk.set(
80
+ path: cluster.node_with_chroot("/consumers/#{name}/offsets/#{partition.topic.name}/#{partition.id}"),
81
+ data: (offset + 1).to_s
82
+ )
83
+
84
+ if result.fetch(:rc) == Zookeeper::Constants::ZNONODE
85
+ result = cluster.zk.create(path: cluster.node_with_chroot("/consumers/#{name}/offsets/#{partition.topic.name}"))
86
+ case result.fetch(:rc)
87
+ when Zookeeper::Constants::ZOK, Zookeeper::Constants::ZNODEEXISTS
88
+ else
89
+ raise Kazoo::Error, "Failed to commit offset #{offset} for partition #{partition.topic.name}/#{partition.id}. Error code: #{result.fetch(:rc)}"
90
+ end
91
+
92
+ result = cluster.zk.create(
93
+ path: cluster.node_with_chroot("/consumers/#{name}/offsets/#{partition.topic.name}/#{partition.id}"),
94
+ data: (offset + 1).to_s
95
+ )
96
+ end
97
+
98
+ if result.fetch(:rc) != Zookeeper::Constants::ZOK
99
+ raise Kazoo::Error, "Failed to commit offset #{offset} for partition #{partition.topic.name}/#{partition.id}. Error code: #{result.fetch(:rc)}"
100
+ end
101
+ end
102
+
103
+ def reset_offsets
104
+ result = cluster.zk.get_children(path: cluster.node_with_chroot("/consumers/#{name}/offsets"))
105
+ raise Kazoo::Error unless result.fetch(:rc) == Zookeeper::Constants::ZOK
106
+
107
+ result.fetch(:children).each do |topic|
108
+ result = cluster.zk.get_children(path: cluster.node_with_chroot("/consumers/#{name}/offsets/#{topic}"))
109
+ raise Kazoo::Error unless result.fetch(:rc) == Zookeeper::Constants::ZOK
110
+
111
+ result.fetch(:children).each do |partition|
112
+ cluster.zk.delete(path: cluster.node_with_chroot("/consumers/#{name}/offsets/#{topic}/#{partition}"))
113
+ raise Kazoo::Error unless result.fetch(:rc) == Zookeeper::Constants::ZOK
114
+ end
115
+
116
+ cluster.zk.delete(path: cluster.node_with_chroot("/consumers/#{name}/offsets/#{topic}"))
117
+ raise Kazoo::Error unless result.fetch(:rc) == Zookeeper::Constants::ZOK
118
+ end
119
+ end
120
+
121
+ def inspect
122
+ "#<Kazoo::Consumergroup name=#{name}>"
123
+ end
124
+
125
+ def eql?(other)
126
+ other.kind_of?(Kazoo::Consumergroup) && cluster == other.cluster && name == other.name
127
+ end
128
+
129
+ alias_method :==, :eql?
130
+
131
+ def hash
132
+ [cluster, name].hash
133
+ end
134
+
135
+ class Instance
136
+
137
+ def self.generate_id
138
+ "#{Socket.gethostname}:#{SecureRandom.uuid}"
139
+ end
140
+
141
+ attr_reader :group, :id
142
+
143
+ def initialize(group, id: nil)
144
+ @group = group
145
+ @id = id || self.class.generate_id
146
+ end
147
+
148
+ def registered?
149
+ stat = cluster.zk.stat(path: cluster.node_with_chroot("/consumers/#{group.name}/ids/#{id}"))
150
+ stat.fetch(:stat).exists?
151
+ end
152
+
153
+ def register(subscription)
154
+ result = cluster.zk.create(
155
+ path: cluster.node_with_chroot("/consumers/#{group.name}/ids/#{id}"),
156
+ ephemeral: true,
157
+ data: JSON.generate({
158
+ version: 1,
159
+ timestamp: Time.now.to_i,
160
+ pattern: "static",
161
+ subscription: Hash[*subscription.flat_map { |topic| [topic.name, 1] } ]
162
+ })
163
+ )
164
+
165
+ if result.fetch(:rc) != Zookeeper::Constants::ZOK
166
+ raise Kazoo::ConsumerInstanceRegistrationFailed, "Failed to register instance #{id} for consumer group #{group.name}! Error code: #{result.fetch(:rc)}"
167
+ end
168
+
169
+ subscription.each do |topic|
170
+ stat = cluster.zk.stat(path: cluster.node_with_chroot("/consumers/#{group.name}/owners/#{topic.name}"))
171
+ unless stat.fetch(:stat).exists?
172
+ result = cluster.zk.create(path: cluster.node_with_chroot("/consumers/#{group.name}/owners/#{topic.name}"))
173
+ if result.fetch(:rc) != Zookeeper::Constants::ZOK
174
+ raise Kazoo::ConsumerInstanceRegistrationFailed, "Failed to register subscription of #{topic.name} for consumer group #{group.name}! Error code: #{result.fetch(:rc)}"
175
+ end
176
+ end
177
+ end
178
+ end
179
+
180
+ def deregister
181
+ cluster.zk.delete(path: cluster.node_with_chroot("/consumers/#{group.name}/ids/#{id}"))
182
+ end
183
+
184
+ def claim_partition(partition)
185
+ result = cluster.zk.create(
186
+ path: cluster.node_with_chroot("/consumers/#{group.name}/owners/#{partition.topic.name}/#{partition.id}"),
187
+ ephemeral: true,
188
+ data: id,
189
+ )
190
+
191
+ case result.fetch(:rc)
192
+ when Zookeeper::Constants::ZOK
193
+ return true
194
+ when Zookeeper::Constants::ZNODEEXISTS
195
+ raise Kazoo::PartitionAlreadyClaimed, "Partition #{partition.topic.name}/#{partition.id} is already claimed!"
196
+ else
197
+ raise Kazoo::Error, "Failed to claim partition #{partition.topic.name}/#{partition.id}. Error code: #{result.fetch(:rc)}"
198
+ end
199
+ end
200
+
201
+ def release_partition(partition)
202
+ result = cluster.zk.delete(path: cluster.node_with_chroot("/consumers/#{group.name}/owners/#{partition.topic.name}/#{partition.id}"))
203
+ if result.fetch(:rc) != Zookeeper::Constants::ZOK
204
+ raise Kazoo::Error, "Failed to release partition #{partition.topic.name}/#{partition.id}. Error code: #{result.fetch(:rc)}"
205
+ end
206
+ end
207
+
208
+ def inspect
209
+ "#<Kazoo::Consumergroup::Instance group=#{group.name} id=#{id}>"
210
+ end
211
+
212
+ def hash
213
+ [group, id].hash
214
+ end
215
+
216
+ def eql?(other)
217
+ other.kind_of?(Kazoo::Consumergroup::Instance) && group == other.group && id == other.id
218
+ end
219
+
220
+ alias_method :==, :eql?
221
+
222
+ private
223
+
224
+ def cluster
225
+ group.cluster
226
+ end
227
+ end
228
+ end
229
+ end
@@ -0,0 +1,62 @@
1
+ module Kazoo
2
+ class Partition
3
+ attr_reader :topic, :id, :replicas
4
+
5
+ def initialize(topic, id, replicas: nil)
6
+ @topic, @id, @replicas = topic, id, replicas
7
+ @mutex = Mutex.new
8
+ end
9
+
10
+ def cluster
11
+ topic.cluster
12
+ end
13
+
14
+ def replication_factor
15
+ replicas.length
16
+ end
17
+
18
+ def leader
19
+ @mutex.synchronize do
20
+ refresh_state if @leader.nil?
21
+ @leader
22
+ end
23
+ end
24
+
25
+ def isr
26
+ @mutex.synchronize do
27
+ refresh_state if @isr.nil?
28
+ @isr
29
+ end
30
+ end
31
+
32
+ def under_replicated?
33
+ isr.length < replication_factor
34
+ end
35
+
36
+ def inspect
37
+ "#<Kazoo::Partition #{topic.name}/#{id}>"
38
+ end
39
+
40
+ def eql?(other)
41
+ other.kind_of?(Kazoo::Partition) && topic == other.topic && id == other.id
42
+ end
43
+
44
+ alias_method :==, :eql?
45
+
46
+ def hash
47
+ [topic, id].hash
48
+ end
49
+
50
+ protected
51
+
52
+ def refresh_state
53
+ state_json = cluster.zk.get(path: cluster.node_with_chroot("/brokers/topics/#{topic.name}/partitions/#{id}/state"))
54
+ set_state(JSON.parse(state_json.fetch(:data)))
55
+ end
56
+
57
+ def set_state(json)
58
+ @leader = cluster.brokers.fetch(json.fetch('leader'))
59
+ @isr = json.fetch('isr').map { |r| cluster.brokers.fetch(r) }
60
+ end
61
+ end
62
+ end
@@ -0,0 +1,46 @@
1
+ module Kazoo
2
+ class Topic
3
+
4
+ attr_reader :cluster, :name
5
+ attr_accessor :partitions
6
+
7
+ def initialize(cluster, name, partitions: nil)
8
+ @cluster, @name, @partitions = cluster, name, partitions
9
+ end
10
+
11
+ def self.from_json(cluster, name, json)
12
+ topic = new(cluster, name)
13
+ topic.partitions = json.fetch('partitions').map do |(id, replicas)|
14
+ topic.partition(id.to_i, replicas: replicas.map { |b| cluster.brokers[b] })
15
+ end.sort_by(&:id)
16
+
17
+ return topic
18
+ end
19
+
20
+ def partition(*args)
21
+ Kazoo::Partition.new(self, *args)
22
+ end
23
+
24
+ def replication_factor
25
+ partitions.map(&:replication_factor).min
26
+ end
27
+
28
+ def under_replicated?
29
+ partitions.any?(:under_replicated?)
30
+ end
31
+
32
+ def inspect
33
+ "#<Kazoo::Topic #{name}>"
34
+ end
35
+
36
+ def eql?(other)
37
+ other.kind_of?(Kazoo::Topic) && cluster == other.cluster && name == other.name
38
+ end
39
+
40
+ alias_method :==, :eql?
41
+
42
+ def hash
43
+ [cluster, name].hash
44
+ end
45
+ end
46
+ end
@@ -0,0 +1,3 @@
1
+ module Kazoo
2
+ VERSION = "0.1.0"
3
+ end
data/lib/kazoo.rb ADDED
@@ -0,0 +1,19 @@
1
+ require 'zookeeper'
2
+ require 'json'
3
+ require 'thread'
4
+ require 'socket'
5
+ require 'securerandom'
6
+
7
+ module Kazoo
8
+ Error = Class.new(StandardError)
9
+ ConsumerInstanceRegistrationFailed = Class.new(Kazoo::Error)
10
+ PartitionAlreadyClaimed = Class.new(Kazoo::Error)
11
+ ReleasePartitionFailure = Class.new(Kazoo::Error)
12
+ end
13
+
14
+ require 'kazoo/cluster'
15
+ require 'kazoo/broker'
16
+ require 'kazoo/topic'
17
+ require 'kazoo/partition'
18
+ require 'kazoo/consumergroup'
19
+ require 'kazoo/version'
@@ -0,0 +1,45 @@
1
+ require 'test_helper'
2
+
3
+ class BrokerTest < Minitest::Test
4
+ include MockCluster
5
+
6
+ def setup
7
+ @cluster = mock_cluster
8
+ end
9
+
10
+ def test_broker_critical?
11
+ refute @cluster.brokers[1].critical?(replicas: 1), "We have 2 in-sync replicas for everything so we can lose 1."
12
+ assert @cluster.brokers[2].critical?(replicas: 2), "We only have 2 replicas so we can never lose 2."
13
+
14
+ # Simulate losing a broker from the ISR for a partition.
15
+ # This partition lives on broker 1 and 3
16
+ @cluster.topics['test.4'].partitions[2].expects(:isr).returns([@cluster.brokers[1]])
17
+
18
+ assert @cluster.brokers[1].critical?(replicas: 1), "Final remaining broker for this partition's ISR set, cannot lose"
19
+ refute @cluster.brokers[2].critical?(replicas: 1), "Not related to the under-replicated partitions"
20
+ refute @cluster.brokers[3].critical?(replicas: 1), "Already down, so not critical"
21
+ end
22
+
23
+ def test_from_json
24
+ json_payload = '{"jmx_port":9999,"timestamp":"1431719964125","host":"kafka03.example.com","version":1,"port":9092}'
25
+ broker = Kazoo::Broker.from_json(mock('cluster'), 3, JSON.parse(json_payload))
26
+
27
+ assert_equal 3, broker.id
28
+ assert_equal 'kafka03.example.com', broker.host
29
+ assert_equal 9092, broker.port
30
+ assert_equal 9999, broker.jmx_port
31
+ assert_equal "kafka03.example.com:9092", broker.addr
32
+ end
33
+
34
+ def test_replicated_partitions
35
+ assert_equal 3, @cluster.brokers[1].replicated_partitions.length
36
+ assert_equal 4, @cluster.brokers[2].replicated_partitions.length
37
+ assert_equal 3, @cluster.brokers[3].replicated_partitions.length
38
+ end
39
+
40
+ def test_led_partitions
41
+ assert_equal 2, @cluster.brokers[1].led_partitions.length
42
+ assert_equal 2, @cluster.brokers[2].led_partitions.length
43
+ assert_equal 1, @cluster.brokers[3].led_partitions.length
44
+ end
45
+ end
@@ -0,0 +1,16 @@
1
+ require 'test_helper'
2
+
3
+ class ClusterTest < Minitest::Test
4
+ include MockCluster
5
+
6
+ def setup
7
+ @cluster = mock_cluster
8
+ end
9
+
10
+ def test_cluster_under_replicated?
11
+ refute @cluster.under_replicated?
12
+
13
+ @cluster.topics['test.4'].partitions[2].expects(:isr).returns([@cluster.brokers[1]])
14
+ assert @cluster.under_replicated?
15
+ end
16
+ end
@@ -0,0 +1,117 @@
1
+ require 'test_helper'
2
+
3
+ class PartitionDistributionTest < Minitest::Test
4
+
5
+ def setup
6
+ @cluster = Object.new
7
+ @group = Kazoo::Consumergroup.new(@cluster, 'test.ruby')
8
+ @topic = Kazoo::Topic.new(@cluster, 'test.4')
9
+ @partitions = [
10
+ Kazoo::Partition.new(@topic, 0),
11
+ Kazoo::Partition.new(@topic, 1),
12
+ Kazoo::Partition.new(@topic, 2),
13
+ Kazoo::Partition.new(@topic, 3),
14
+ ]
15
+ end
16
+
17
+ def test_single_consumer_gets_everything
18
+ instances = [Kazoo::Consumergroup::Instance.new(@group, id: '1')]
19
+ instance_copy = Kazoo::Consumergroup::Instance.new(@group, id: '1')
20
+
21
+ distribution = Kafka::Consumer.distribute_partitions(instances, @partitions)
22
+ assert_equal Set.new(instances), Set.new(distribution.keys)
23
+ assert_equal Set.new(@partitions), Set.new(distribution.values.flatten)
24
+
25
+ assert_equal @partitions, distribution[instance_copy]
26
+ end
27
+
28
+ def test_two_consumers_split_the_load
29
+ instances = [
30
+ Kazoo::Consumergroup::Instance.new(@group, id: '1'),
31
+ Kazoo::Consumergroup::Instance.new(@group, id: '2'),
32
+ ]
33
+
34
+ distribution = Kafka::Consumer.distribute_partitions(instances, @partitions)
35
+ assert_equal Set.new(instances), Set.new(distribution.keys)
36
+ assert_equal Set.new(@partitions), Set.new(distribution.values.flatten)
37
+
38
+ assert_equal 2, distribution[instances[0]].length
39
+ assert_equal 2, distribution[instances[1]].length
40
+ end
41
+
42
+
43
+ def test_three_consumers_split_the_load
44
+ instances = [
45
+ Kazoo::Consumergroup::Instance.new(@group, id: '1'),
46
+ Kazoo::Consumergroup::Instance.new(@group, id: '2'),
47
+ Kazoo::Consumergroup::Instance.new(@group, id: '3'),
48
+ ]
49
+
50
+ distribution = Kafka::Consumer.distribute_partitions(instances, @partitions)
51
+ assert_equal Set.new(instances), Set.new(distribution.keys)
52
+ assert_equal Set.new(@partitions), Set.new(distribution.values.flatten)
53
+
54
+ assert_equal 2, distribution[instances[0]].length
55
+ assert_equal 1, distribution[instances[1]].length
56
+ assert_equal 1, distribution[instances[2]].length
57
+ end
58
+
59
+ def test_four_consumers_split_the_load
60
+ instances = [
61
+ Kazoo::Consumergroup::Instance.new(@group, id: '1'),
62
+ Kazoo::Consumergroup::Instance.new(@group, id: '2'),
63
+ Kazoo::Consumergroup::Instance.new(@group, id: '3'),
64
+ Kazoo::Consumergroup::Instance.new(@group, id: '4'),
65
+ ]
66
+
67
+ distribution = Kafka::Consumer.distribute_partitions(instances, @partitions)
68
+ assert_equal Set.new(instances), Set.new(distribution.keys)
69
+ assert_equal Set.new(@partitions), Set.new(distribution.values.flatten)
70
+
71
+ assert_equal 1, distribution[instances[0]].length
72
+ assert_equal 1, distribution[instances[1]].length
73
+ assert_equal 1, distribution[instances[2]].length
74
+ assert_equal 1, distribution[instances[3]].length
75
+ end
76
+
77
+ def test_four_consumers_split_the_load_and_one_is_standby
78
+ instances = [
79
+ Kazoo::Consumergroup::Instance.new(@group, id: '1'),
80
+ Kazoo::Consumergroup::Instance.new(@group, id: '2'),
81
+ Kazoo::Consumergroup::Instance.new(@group, id: '3'),
82
+ Kazoo::Consumergroup::Instance.new(@group, id: '4'),
83
+ Kazoo::Consumergroup::Instance.new(@group, id: '5'),
84
+ ]
85
+
86
+ distribution = Kafka::Consumer.distribute_partitions(instances, @partitions)
87
+ assert_equal Set.new(instances[0..3]), Set.new(distribution.keys)
88
+ assert_equal Set.new(@partitions), Set.new(distribution.values.flatten)
89
+
90
+ assert_equal 1, distribution[instances[0]].length
91
+ assert_equal 1, distribution[instances[1]].length
92
+ assert_equal 1, distribution[instances[2]].length
93
+ assert_equal 1, distribution[instances[3]].length
94
+
95
+ assert_nil distribution[instances[4]]
96
+ end
97
+
98
+ def test_assign_everything_with_random_number_of_partitions_and_instances
99
+ partitions = (0 .. rand(500)).map { |i| Kazoo::Partition.new(@topic, i) }
100
+ instances = (0 .. rand(100)).map { |i| Kazoo::Consumergroup::Instance.new(@group, id: i.to_s) }
101
+
102
+ distribution = Kafka::Consumer.distribute_partitions(instances, partitions)
103
+ assert_equal [partitions.length, instances.length].min, distribution.keys.length
104
+ assert_equal Set.new(partitions), Set.new(distribution.values.flatten)
105
+ end
106
+
107
+ def test_assign_zero_partitions
108
+ instances = [Kazoo::Consumergroup::Instance.new(@group, id: '1')]
109
+ distribution = Kafka::Consumer.distribute_partitions(instances, [])
110
+ assert distribution.empty?
111
+ end
112
+
113
+ def test_assign_to_zero_instances
114
+ distribution = Kafka::Consumer.distribute_partitions([], @partitions)
115
+ assert distribution.empty?
116
+ end
117
+ end
@@ -0,0 +1,25 @@
1
+ require 'test_helper'
2
+
3
+ class PartitionTest < Minitest::Test
4
+ include MockCluster
5
+
6
+ def setup
7
+ @cluster = mock_cluster
8
+ end
9
+
10
+ def test_replication_factor
11
+ assert_equal 2, @cluster.topics['test.1'].partitions[0].replication_factor
12
+ end
13
+
14
+ def test_state
15
+ partition = @cluster.topics['test.1'].partitions[0]
16
+ partition.unstub(:leader)
17
+ partition.unstub(:isr)
18
+
19
+ json_payload = '{"controller_epoch":157,"leader":1,"version":1,"leader_epoch":8,"isr":[3,2,1]}'
20
+ @cluster.zk.expects(:get).with(path: "/brokers/topics/test.1/partitions/0/state").returns(data: json_payload)
21
+
22
+ assert_equal 1, partition.leader.id
23
+ assert_equal [3,2,1], partition.isr.map(&:id)
24
+ end
25
+ end