kazoo-ruby 0.1.0 → 0.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 8459c642f0d085f68811488f9d6ab92f186fd296
4
- data.tar.gz: 2f187078b0e87d345e71277195750ebecbd3b5b4
3
+ metadata.gz: 87213b930132989503c56e226e6755b8c2f51559
4
+ data.tar.gz: 5c62c5a979ad421634272a4839b746777ed63cc6
5
5
  SHA512:
6
- metadata.gz: e22aa111a85057bd295d80485371c46c84b1306d89160e04e2d2a21b985c407da14c59c4936d49239232f53e0309fb59fd9ca71f8affcc6cab8dd3f428ab55b7
7
- data.tar.gz: 570508f6fe5f8d10bb8081b06c43f44b5f20da4e28d57801fcc5c6204f97a1f4557ac7234fd67bc72c7173111d145a42f6a5eabc5479267ccf9eb27095706fac
6
+ metadata.gz: d3a2b95c9e360fdf62c98a68ef752b220e35ddd4d9f8e05081a4eb95176a6e08d753125c47354a98ad171490458d3596199adf8eaea0edfa4768203c244302ab
7
+ data.tar.gz: e5af8de385e6946a53aa4a276c2708fb772d611cbf6afb99135c8af4c8f3a61969a52baf9c9fca8ea1f75b70c8c602689ec00024b163b748702b4e7411f47dbf
data/.gitignore CHANGED
@@ -7,6 +7,7 @@
7
7
  /pkg/
8
8
  /spec/reports/
9
9
  /tmp/
10
+ /confluent/
10
11
  *.bundle
11
12
  *.so
12
13
  *.o
@@ -1,6 +1,14 @@
1
1
  language: ruby
2
2
  sudo: false
3
3
 
4
+ env:
5
+ global:
6
+ - ZOOKEEPER_PEERS=localhost:2181
7
+ - DEBUG=true
8
+
9
+ before_script:
10
+ - make confluent/kafka/start
11
+
4
12
  rvm:
5
13
  - "2.0"
6
14
  - "2.1"
@@ -0,0 +1,53 @@
1
+ .PHONY: confluent/kafka/* confluent/zookeeper/* confluent/registry/* confluent/start confluent/stop
2
+
3
+
4
+ # Confluent platform tasks
5
+
6
+ confluent/start: confluent/rest/start
7
+
8
+ confluent/stop: confluent/rest/stop confluent/registry/stop confluent/kafka/stop confluent/zookeeper/stop
9
+
10
+ # Download & extract tasks
11
+
12
+ confluent/confluent.tgz:
13
+ mkdir -p confluent && wget http://packages.confluent.io/archive/1.0/confluent-1.0-2.10.4.tar.gz -O confluent/confluent.tgz
14
+
15
+ confluent/EXTRACTED: confluent/confluent.tgz
16
+ tar xzf confluent/confluent.tgz -C confluent --strip-components 1 && mkdir confluent/logs && touch confluent/EXTRACTED
17
+ echo "delete.topic.enable=true" >> confluent/etc/kafka/server.properties
18
+
19
+ # Zookeeper tasks
20
+
21
+ confluent/zookeeper/start: confluent/EXTRACTED
22
+ nohup confluent/bin/zookeeper-server-start confluent/etc/kafka/zookeeper.properties 2> confluent/logs/zookeeper.err > confluent/logs/zookeeper.out < /dev/null &
23
+ while ! nc localhost 2181 </dev/null; do echo "Waiting for zookeeper..."; sleep 1; done
24
+
25
+ confluent/zookeeper/stop: confluent/EXTRACTED
26
+ confluent/bin/zookeeper-server-stop
27
+
28
+ # Kafka tasks
29
+
30
+ confluent/kafka/start: confluent/zookeeper/start confluent/EXTRACTED
31
+ nohup confluent/bin/kafka-server-start confluent/etc/kafka/server.properties 2> confluent/logs/kafka.err > confluent/logs/kafka.out < /dev/null &
32
+ while ! nc localhost 9092 </dev/null; do echo "Waiting for Kafka..."; sleep 1; done
33
+
34
+ confluent/kafka/stop: confluent/EXTRACTED
35
+ confluent/bin/kafka-server-stop
36
+
37
+ # schema-registry tasks
38
+
39
+ confluent/registry/start: confluent/kafka/start confluent/EXTRACTED
40
+ nohup confluent/bin/schema-registry-start confluent/etc/schema-registry/schema-registry.properties 2> confluent/logs/schema-registry.err > confluent/logs/schema-registry.out < /dev/null &
41
+ while ! nc localhost 8081 </dev/null; do echo "Waiting for schema registry..."; sleep 1; done
42
+
43
+ confluent/registry/stop: confluent/EXTRACTED
44
+ confluent/bin/kafka-server-stop
45
+
46
+ # REST proxy tasks
47
+
48
+ confluent/rest/start: confluent/registry/start confluent/EXTRACTED
49
+ nohup confluent/bin/kafka-rest-start confluent/etc/kafka-rest/kafka-rest.properties 2> confluent/logs/kafka-rest.err > confluent/logs/kafka-rest.out < /dev/null &
50
+ while ! nc localhost 8082 </dev/null; do echo "Waiting for REST proxy..."; sleep 1; done
51
+
52
+ confluent/rest/stop: confluent/EXTRACTED
53
+ confluent/bin/kafka-rest-stop
@@ -7,10 +7,15 @@ require 'securerandom'
7
7
  module Kazoo
8
8
  Error = Class.new(StandardError)
9
9
 
10
+ ValidationError = Class.new(Kazoo::Error)
10
11
  NoClusterRegistered = Class.new(Kazoo::Error)
11
12
  ConsumerInstanceRegistrationFailed = Class.new(Kazoo::Error)
12
13
  PartitionAlreadyClaimed = Class.new(Kazoo::Error)
13
14
  ReleasePartitionFailure = Class.new(Kazoo::Error)
15
+
16
+ def self.connect(zookeeper)
17
+ Kazoo::Cluster.new(zookeeper)
18
+ end
14
19
  end
15
20
 
16
21
  require 'kazoo/cluster'
@@ -3,7 +3,7 @@ require 'thor'
3
3
 
4
4
  module Kazoo
5
5
  class CLI < Thor
6
- class_option :zookeeper, :type => :string, :default => ENV['ZOOKEEPER']
6
+ class_option :zookeeper, type: :string, default: ENV['ZOOKEEPER']
7
7
 
8
8
  desc "cluster", "Describes the Kafka cluster as registered in Zookeeper"
9
9
  def cluster
@@ -23,7 +23,25 @@ module Kazoo
23
23
  end
24
24
  end
25
25
 
26
- option :topic, :type => :string
26
+ desc "create_topic", "Creates a new topic"
27
+ option :name, type: :string, required: true
28
+ option :partitions, type: :numeric, required: true
29
+ option :replication_factor, type: :numeric, required: true
30
+ def create_topic
31
+ validate_class_options!
32
+
33
+ kafka_cluster.create_topic(options[:name], partitions: options[:partitions], replication_factor: options[:replication_factor])
34
+ end
35
+
36
+ desc "delete_topic", "Removes a topic"
37
+ option :name, type: :string, required: true
38
+ def delete_topic
39
+ validate_class_options!
40
+
41
+ kafka_cluster.topics.fetch(options[:name]).destroy
42
+ end
43
+
44
+ option :topic, type: :string
27
45
  desc "partitions", "Lists partitions"
28
46
  def partitions
29
47
  validate_class_options!
@@ -39,7 +57,7 @@ module Kazoo
39
57
  end
40
58
  end
41
59
 
42
- option :replicas, :type => :numeric, :default => 1
60
+ option :replicas, type: :numeric, default: 1
43
61
  desc "critical <broker>", "Determine whether a broker is critical"
44
62
  def critical(broker_name)
45
63
  validate_class_options!
@@ -52,6 +70,65 @@ module Kazoo
52
70
  end
53
71
 
54
72
 
73
+ desc "consumergroups", "Lists the consumergroups registered for this Kafka cluster"
74
+ def consumergroups
75
+ validate_class_options!
76
+
77
+ kafka_cluster.consumergroups.each do |group|
78
+ instances = group.instances
79
+ if instances.length == 0
80
+ puts "- #{group.name}: inactive"
81
+ else
82
+ puts "- #{group.name}: #{instances.length} running instances"
83
+ end
84
+ end
85
+ end
86
+
87
+ desc "consumergroup", "Prints information about a consumer group"
88
+ option :name, type: :string, required: true
89
+ def consumergroup
90
+ validate_class_options!
91
+
92
+ cg = kafka_cluster.consumergroup(options[:name])
93
+ raise Kazoo::Error, "Consumergroup #{options[:name]} is not registered in Zookeeper" unless cg.exists?
94
+
95
+ puts "Consumer group: #{cg.name}\n"
96
+
97
+ if cg.active?
98
+ puts "Running instances:"
99
+ cg.instances.each do |instance|
100
+ puts "- #{instance.id}"
101
+ end
102
+ else
103
+ puts "This consumer group is inactive."
104
+ end
105
+ end
106
+
107
+ desc "delete_consumergroup", "Removes a consumer group from Zookeeper"
108
+ option :name, type: :string, required: true
109
+ def delete_consumergroup
110
+ validate_class_options!
111
+
112
+ cg = kafka_cluster.consumergroup(options[:name])
113
+ raise Kazoo::Error, "Consumergroup #{options[:name]} is not registered in Zookeeper" unless cg.exists?
114
+ raise Kazoo::Error, "Cannot remove consumergroup #{cg.name} because it's still active" if cg.active?
115
+
116
+ cg.destroy
117
+ end
118
+
119
+ desc "reset_consumergroup", "Resets all the offsets stored for a consumergroup"
120
+ option :name, type: :string, required: true
121
+ def reset_consumergroup
122
+ validate_class_options!
123
+
124
+ cg = kafka_cluster.consumergroup(options[:name])
125
+ raise Kazoo::Error, "Consumergroup #{options[:name]} is not registered in Zookeeper" unless cg.exists?
126
+ raise Kazoo::Error, "Cannot remove consumergroup #{cg.name} because it's still active" if cg.active?
127
+
128
+ cg.reset_all_offsets
129
+ end
130
+
131
+
55
132
  private
56
133
 
57
134
  def validate_class_options!
@@ -47,6 +47,10 @@ module Kazoo
47
47
  end
48
48
  end
49
49
 
50
+ def consumergroup(name)
51
+ Kazoo::Consumergroup.new(self, name)
52
+ end
53
+
50
54
  def topics
51
55
  @topics_mutex.synchronize do
52
56
  @topics ||= begin
@@ -70,6 +74,17 @@ module Kazoo
70
74
  end
71
75
  end
72
76
 
77
+ def topic(name)
78
+ Kazoo::Topic.new(self, name)
79
+ end
80
+
81
+ def create_topic(name, partitions: nil, replication_factor: nil)
82
+ raise ArgumentError, "partitions must be a positive integer" if Integer(partitions) <= 0
83
+ raise ArgumentError, "replication_factor must be a positive integer" if Integer(replication_factor) <= 0
84
+
85
+ Kazoo::Topic.create(self, name, partitions: Integer(partitions), replication_factor: Integer(replication_factor))
86
+ end
87
+
73
88
  def partitions
74
89
  topics.values.flat_map(&:partitions)
75
90
  end
@@ -85,5 +100,42 @@ module Kazoo
85
100
  def close
86
101
  zk.close
87
102
  end
103
+
104
+ protected
105
+
106
+ def recursive_create(path: nil)
107
+ raise ArgumentError, "path is a required argument" if path.nil?
108
+
109
+ result = zk.stat(path: path)
110
+ case result.fetch(:rc)
111
+ when Zookeeper::Constants::ZOK
112
+ return
113
+ when Zookeeper::Constants::ZNONODE
114
+ recursive_create(path: File.dirname(path))
115
+ result = zk.create(path: path)
116
+ raise Kazoo::Error, "Failed to create node #{path}. Result code: #{result.fetch(:rc)}" unless result.fetch(:rc) == Zookeeper::Constants::ZOK
117
+ else
118
+ raise Kazoo::Error, "Failed to create node #{path}. Result code: #{result.fetch(:rc)}"
119
+ end
120
+ end
121
+
122
+ def recursive_delete(path: nil)
123
+ raise ArgumentError, "path is a required argument" if path.nil?
124
+
125
+ result = zk.get_children(path: path)
126
+ raise Kazoo::Error, "Failed to list children of #{path} to delete them. Result code: #{result.fetch(:rc)}" if result.fetch(:rc) != Zookeeper::Constants::ZOK
127
+
128
+ threads = []
129
+ result.fetch(:children).each do |name|
130
+ threads << Thread.new do
131
+ Thread.abort_on_exception = true
132
+ recursive_delete(path: File.join(path, name))
133
+ end
134
+ threads.each(&:join)
135
+ end
136
+
137
+ result = zk.delete(path: path)
138
+ raise Kazoo::Error, "Failed to delete node #{path}. Result code: #{result.fetch(:rc)}" if result.fetch(:rc) != Zookeeper::Constants::ZOK
139
+ end
88
140
  end
89
141
  end
@@ -7,10 +7,12 @@ module Kazoo
7
7
  end
8
8
 
9
9
  def create
10
- cluster.zk.create(path: "/consumers/#{name}")
11
- cluster.zk.create(path: "/consumers/#{name}/ids")
12
- cluster.zk.create(path: "/consumers/#{name}/owners")
13
- cluster.zk.create(path: "/consumers/#{name}/offsets")
10
+ cluster.send(:recursive_create, path: "/consumers/#{name}/ids")
11
+ cluster.send(:recursive_create, path: "/consumers/#{name}/owners")
12
+ end
13
+
14
+ def destroy
15
+ cluster.send(:recursive_delete, path: "/consumers/#{name}")
14
16
  end
15
17
 
16
18
  def exists?
@@ -23,9 +25,20 @@ module Kazoo
23
25
  Instance.new(self, id: id)
24
26
  end
25
27
 
28
+ def active?
29
+ instances.length > 0
30
+ end
31
+
26
32
  def instances
27
- instances = cluster.zk.get_children(path: "/consumers/#{name}/ids")
28
- instances.fetch(:children).map { |id| Instance.new(self, id: id) }
33
+ result = cluster.zk.get_children(path: "/consumers/#{name}/ids")
34
+ case result.fetch(:rc)
35
+ when Zookeeper::Constants::ZOK
36
+ result.fetch(:children).map { |id| Instance.new(self, id: id) }
37
+ when Zookeeper::Constants::ZNONODE
38
+ []
39
+ else
40
+ raise Kazoo::Error, "Failed getting a list of runniong instances for #{name}. Error code: #{result.fetch(:rc)}"
41
+ end
29
42
  end
30
43
 
31
44
  def watch_instances(&block)
@@ -52,58 +65,82 @@ module Kazoo
52
65
  when Zookeeper::Constants::ZOK
53
66
  [Kazoo::Consumergroup::Instance.new(self, id: result.fetch(:data)), cb]
54
67
  else
55
- raise Kazoo::Error, "Failed set watch for partition claim of #{partition.topic.name}/#{partition.id}. Error code: #{result.fetch(:rc)}"
68
+ raise Kazoo::Error, "Failed to set watch for partition claim of #{partition.topic.name}/#{partition.id}. Error code: #{result.fetch(:rc)}"
56
69
  end
57
70
  end
58
71
 
59
72
  def retrieve_offset(partition)
60
73
  result = cluster.zk.get(path: "/consumers/#{name}/offsets/#{partition.topic.name}/#{partition.id}")
61
74
  case result.fetch(:rc)
62
- when Zookeeper::Constants::ZOK;
63
- result.fetch(:data).to_i
64
- when Zookeeper::Constants::ZNONODE;
65
- nil
66
- else
67
- raise Kazoo::Error, "Failed to retrieve offset for partition #{partition.topic.name}/#{partition.id}. Error code: #{result.fetch(:rc)}"
75
+ when Zookeeper::Constants::ZOK;
76
+ result.fetch(:data).to_i
77
+ when Zookeeper::Constants::ZNONODE;
78
+ nil
79
+ else
80
+ raise Kazoo::Error, "Failed to retrieve offset for partition #{partition.topic.name}/#{partition.id}. Error code: #{result.fetch(:rc)}"
68
81
  end
69
82
  end
70
83
 
71
- def commit_offset(partition, offset)
72
- result = cluster.zk.set(path: "/consumers/#{name}/offsets/#{partition.topic.name}/#{partition.id}", data: (offset + 1).to_s)
73
- if result.fetch(:rc) == Zookeeper::Constants::ZNONODE
74
- result = cluster.zk.create(path: "/consumers/#{name}/offsets/#{partition.topic.name}")
75
- case result.fetch(:rc)
76
- when Zookeeper::Constants::ZOK, Zookeeper::Constants::ZNODEEXISTS
77
- else
78
- raise Kazoo::Error, "Failed to commit offset #{offset} for partition #{partition.topic.name}/#{partition.id}. Error code: #{result.fetch(:rc)}"
79
- end
84
+ def retrieve_all_offsets
85
+ offsets, threads, mutex = {}, [], Mutex.new
80
86
 
81
- result = cluster.zk.create(path: "/consumers/#{name}/offsets/#{partition.topic.name}/#{partition.id}", data: (offset + 1).to_s)
87
+ topic_result = cluster.zk.get_children(path: "/consumers/#{name}/offsets")
88
+ case topic_result.fetch(:rc)
89
+ when Zookeeper::Constants::ZOK;
90
+ # continue
91
+ when Zookeeper::Constants::ZNONODE;
92
+ return offsets
93
+ else
94
+ raise Kazoo::Error, "Failed to retrieve offset for partition #{partition.topic.name}/#{partition.id}. Error code: #{topic_result.fetch(:rc)}"
82
95
  end
83
96
 
84
- if result.fetch(:rc) != Zookeeper::Constants::ZOK
85
- raise Kazoo::Error, "Failed to commit offset #{offset} for partition #{partition.topic.name}/#{partition.id}. Error code: #{result.fetch(:rc)}"
86
- end
87
- end
97
+ topic_result.fetch(:children).each do |topic_name|
98
+ threads << Thread.new do
99
+ Thread.abort_on_exception = true
100
+
101
+ topic = Kazoo::Topic.new(cluster, topic_name)
102
+ partition_result = cluster.zk.get_children(path: "/consumers/#{name}/offsets/#{topic.name}")
103
+ raise Kazoo::Error, "Failed to retrieve offsets. Error code: #{partition_result.fetch(:rc)}" if partition_result.fetch(:rc) != Zookeeper::Constants::ZOK
88
104
 
89
- def reset_offsets
90
- result = cluster.zk.get_children(path: "/consumers/#{name}/offsets")
91
- raise Kazoo::Error unless result.fetch(:rc) == Zookeeper::Constants::ZOK
105
+ partition_threads = []
106
+ partition_result.fetch(:children).each do |partition_id|
107
+ partition_threads << Thread.new do
108
+ Thread.abort_on_exception = true
92
109
 
93
- result.fetch(:children).each do |topic|
94
- result = cluster.zk.get_children(path: "/consumers/#{name}/offsets/#{topic}")
95
- raise Kazoo::Error unless result.fetch(:rc) == Zookeeper::Constants::ZOK
110
+ partition = topic.partition(partition_id.to_i)
111
+ offset_result = cluster.zk.get(path: "/consumers/#{name}/offsets/#{topic.name}/#{partition.id}")
112
+ raise Kazoo::Error, "Failed to retrieve offsets. Error code: #{offset_result.fetch(:rc)}" if offset_result.fetch(:rc) != Zookeeper::Constants::ZOK
96
113
 
97
- result.fetch(:children).each do |partition|
98
- cluster.zk.delete(path: "/consumers/#{name}/offsets/#{topic}/#{partition}")
99
- raise Kazoo::Error unless result.fetch(:rc) == Zookeeper::Constants::ZOK
114
+ mutex.synchronize { offsets[partition] = offset_result.fetch(:data).to_i }
115
+ end
116
+ end
117
+ partition_threads.each(&:join)
100
118
  end
119
+ end
120
+
121
+ threads.each(&:join)
122
+ return offsets
123
+ end
101
124
 
102
- cluster.zk.delete(path: "/consumers/#{name}/offsets/#{topic}")
103
- raise Kazoo::Error unless result.fetch(:rc) == Zookeeper::Constants::ZOK
125
+ def commit_offset(partition, offset)
126
+ partition_offset_path = "/consumers/#{name}/offsets/#{partition.topic.name}/#{partition.id}"
127
+ next_offset_data = (offset + 1).to_s
128
+
129
+ result = cluster.zk.set(path: partition_offset_path, data: next_offset_data)
130
+ if result.fetch(:rc) == Zookeeper::Constants::ZNONODE
131
+ cluster.send(:recursive_create, path: File.dirname(partition_offset_path))
132
+ result = cluster.zk.create(path: partition_offset_path, data: next_offset_data)
133
+ end
134
+
135
+ if result.fetch(:rc) != Zookeeper::Constants::ZOK
136
+ raise Kazoo::Error, "Failed to commit offset #{offset} for partition #{partition.topic.name}/#{partition.id}. Error code: #{result.fetch(:rc)}"
104
137
  end
105
138
  end
106
139
 
140
+ def reset_all_offsets
141
+ cluster.send(:recursive_delete, path: "/consumers/#{name}/offsets")
142
+ end
143
+
107
144
  def inspect
108
145
  "#<Kazoo::Consumergroup name=#{name}>"
109
146
  end
@@ -33,6 +33,19 @@ module Kazoo
33
33
  isr.length < replication_factor
34
34
  end
35
35
 
36
+ def validate
37
+ raise Kazoo::ValidationError, "No replicas defined for #{topic.name}/#{id}" if replicas.length == 0
38
+ raise Kazoo::ValidationError, "The replicas of #{topic.name}/#{id} should be assigned to different brokers" if replicas.length > replicas.uniq.length
39
+
40
+ true
41
+ end
42
+
43
+ def valid?
44
+ validate
45
+ rescue Kazoo::ValidationError
46
+ false
47
+ end
48
+
36
49
  def inspect
37
50
  "#<Kazoo::Partition #{topic.name}/#{id}>"
38
51
  end
@@ -47,6 +60,19 @@ module Kazoo
47
60
  [topic, id].hash
48
61
  end
49
62
 
63
+ def wait_for_leader
64
+ current_leader = nil
65
+ while current_leader.nil?
66
+ current_leader = begin
67
+ leader
68
+ rescue Kazoo::Error
69
+ nil
70
+ end
71
+
72
+ sleep(0.1) if current_leader.nil?
73
+ end
74
+ end
75
+
50
76
  protected
51
77
 
52
78
  def refresh_state
@@ -1,11 +1,14 @@
1
1
  module Kazoo
2
2
  class Topic
3
+ VALID_TOPIC_NAMES = %r{\A[a-zA-Z0-9\\._\\-]+\z}
4
+ BLACKLISTED_TOPIC_NAMES = %r{\A\.\.?\z}
3
5
 
4
6
  attr_reader :cluster, :name
5
7
  attr_accessor :partitions
6
8
 
7
- def initialize(cluster, name, partitions: nil)
8
- @cluster, @name, @partitions = cluster, name, partitions
9
+ def initialize(cluster, name)
10
+ @cluster, @name = cluster, name
11
+ @partitions = []
9
12
  end
10
13
 
11
14
  def self.from_json(cluster, name, json)
@@ -42,5 +45,117 @@ module Kazoo
42
45
  def hash
43
46
  [cluster, name].hash
44
47
  end
48
+
49
+ def exists?
50
+ stat = cluster.zk.stat(path: "/brokers/topics/#{name}")
51
+ stat.fetch(:stat).exists?
52
+ end
53
+
54
+ def validate
55
+ raise Kazoo::ValidationError, "#{name} is not a valid topic name" if VALID_TOPIC_NAMES !~ name
56
+ raise Kazoo::ValidationError, "#{name} is not a valid topic name" if BLACKLISTED_TOPIC_NAMES =~ name
57
+ raise Kazoo::ValidationError, "#{name} is too long" if name.length > 255
58
+ raise Kazoo::ValidationError, "The topic has no partitions defined" if partitions.length == 0
59
+ partitions.each(&:validate)
60
+
61
+ true
62
+ end
63
+
64
+ def valid?
65
+ validate
66
+ rescue Kazoo::ValidationError
67
+ false
68
+ end
69
+
70
+ def create
71
+ raise Kazoo::Error, "The topic #{name} already exists!" if exists?
72
+ validate
73
+
74
+ result = cluster.zk.create(
75
+ path: "/brokers/topics/#{name}",
76
+ data: JSON.dump(version: 1, partitions: partitions_as_json)
77
+ )
78
+
79
+ if result.fetch(:rc) != Zookeeper::Constants::ZOK
80
+ raise Kazoo::Error, "Failed to create topic #{name}. Error code: #{result.fetch(:rc)}"
81
+ end
82
+
83
+ wait_for_partitions
84
+ end
85
+
86
+ def destroy
87
+ t = Thread.current
88
+ cb = Zookeeper::Callbacks::WatcherCallback.create do |event|
89
+ case event.type
90
+ when Zookeeper::Constants::ZOO_DELETED_EVENT
91
+ t.run if t.status == 'sleep'
92
+ else
93
+ raise Kazoo::Error, "Unexpected Zookeeper event: #{event.type}"
94
+ end
95
+ end
96
+
97
+ result = cluster.zk.stat(path: "/brokers/topics/#{name}", watcher: cb)
98
+ case result.fetch(:rc)
99
+ when Zookeeper::Constants::ZOK
100
+ # continue
101
+ when Zookeeper::Constants::ZNONODE
102
+ raise Kazoo::Error, "Topic #{name} does not exist!"
103
+ else
104
+ raise Kazoo::Error, "Failed to monitor topic"
105
+ end
106
+
107
+
108
+ result = cluster.zk.create(path: "/admin/delete_topics/#{name}")
109
+ case result.fetch(:rc)
110
+ when Zookeeper::Constants::ZOK
111
+ Thread.stop unless cb.completed?
112
+ when Zookeeper::Constants::ZNODEEXISTS
113
+ raise Kazoo::Error, "The topic #{name} is already marked for deletion!"
114
+ else
115
+ raise Kazoo::Error, "Failed to delete topic #{name}. Error code: #{result.fetch(:rc)}"
116
+ end
117
+ end
118
+
119
+ def self.create(cluster, name, partitions: nil, replication_factor: nil)
120
+ topic = new(cluster, name)
121
+ topic.send(:sequentially_assign_partitions, partitions, replication_factor)
122
+ topic.create
123
+ topic
124
+ end
125
+
126
+ protected
127
+
128
+ def wait_for_partitions
129
+ threads = []
130
+ partitions.each do |partition|
131
+ threads << Thread.new do
132
+ Thread.abort_on_exception = true
133
+ partition.wait_for_leader
134
+ end
135
+ end
136
+ threads.each(&:join)
137
+ end
138
+
139
+ def sequentially_assign_partitions(partition_count, replication_factor, brokers: nil)
140
+ brokers = cluster.brokers.values if brokers.nil?
141
+ raise ArgumentError, "replication_factor should be smaller or equal to the number of brokers" if replication_factor > brokers.length
142
+
143
+ # Sequentially assign replicas to brokers. There might be a better way.
144
+ @partitions = 0.upto(partition_count - 1).map do |partition_index|
145
+ replicas = 0.upto(replication_factor - 1).map do |replica_index|
146
+ broker_index = (partition_index + replica_index) % brokers.length
147
+ brokers[broker_index]
148
+ end
149
+
150
+ self.partition(partition_index, replicas: replicas)
151
+ end
152
+ end
153
+
154
+ def partitions_as_json
155
+ partitions.inject({}) do |hash, partition|
156
+ hash[partition.id] = partition.replicas.map(&:id)
157
+ hash
158
+ end
159
+ end
45
160
  end
46
161
  end
@@ -1,3 +1,3 @@
1
1
  module Kazoo
2
- VERSION = "0.1.0"
2
+ VERSION = "0.2.0"
3
3
  end
@@ -0,0 +1,93 @@
1
+ require 'test_helper'
2
+
3
+ class FunctionalConsumergroupTest < Minitest::Test
4
+ def setup
5
+ zookeeper = ENV["ZOOKEEPER_PEERS"] || "127.0.0.1:2181"
6
+ @cluster = Kazoo.connect(zookeeper)
7
+ @cg = Kazoo::Consumergroup.new(@cluster, 'test.kazoo')
8
+ @cg.create
9
+ end
10
+
11
+ def teardown
12
+ cg = Kazoo::Consumergroup.new(@cluster, 'test.kazoo')
13
+ cg.destroy if cg.exists?
14
+
15
+ @cluster.close
16
+ end
17
+
18
+ def test_create_and_destroy_consumergroup
19
+ cg = Kazoo::Consumergroup.new(@cluster, 'test.kazoo.2')
20
+ refute cg.exists?
21
+
22
+ cg.create
23
+ assert cg.exists?
24
+
25
+ cg.destroy
26
+ refute cg.exists?
27
+ end
28
+
29
+ def test_retrieve_and_commit_offsets
30
+ topic = Kazoo::Topic.new(@cluster, 'test.1')
31
+ partition = Kazoo::Partition.new(topic, 0)
32
+
33
+ assert_nil @cg.retrieve_offset(partition)
34
+
35
+ @cg.commit_offset(partition, 1234)
36
+
37
+ assert_equal 1234 + 1, @cg.retrieve_offset(partition)
38
+
39
+ @cg.reset_all_offsets
40
+ assert_nil @cg.retrieve_offset(partition)
41
+ end
42
+
43
+ def test_retrieve_all_offsets
44
+ topic1 = Kazoo::Topic.new(@cluster, 'test.1')
45
+ partition10 = Kazoo::Partition.new(topic1, 0)
46
+
47
+ topic4 = Kazoo::Topic.new(@cluster, 'test.4')
48
+ partition40 = Kazoo::Partition.new(topic4, 0)
49
+ partition41 = Kazoo::Partition.new(topic4, 1)
50
+ partition42 = Kazoo::Partition.new(topic4, 2)
51
+ partition43 = Kazoo::Partition.new(topic4, 3)
52
+
53
+ assert_equal Hash.new, @cg.retrieve_all_offsets
54
+
55
+ @cg.commit_offset(partition10, 10)
56
+ @cg.commit_offset(partition40, 40)
57
+ @cg.commit_offset(partition41, 41)
58
+ @cg.commit_offset(partition42, 42)
59
+ @cg.commit_offset(partition43, 43)
60
+
61
+ offsets = @cg.retrieve_all_offsets
62
+
63
+ assert_equal 5, offsets.length
64
+ assert_equal 11, offsets[partition10]
65
+ assert_equal 41, offsets[partition40]
66
+ assert_equal 42, offsets[partition41]
67
+ assert_equal 43, offsets[partition42]
68
+ assert_equal 44, offsets[partition43]
69
+
70
+ @cg.reset_all_offsets
71
+ assert_equal Hash.new, @cg.retrieve_all_offsets
72
+ end
73
+
74
+ def test_watch_instances
75
+ topic = Kazoo::Topic.new(@cluster, 'test.1')
76
+
77
+ instance1 = @cg.instantiate
78
+ instance1.register([topic])
79
+ instance2 = @cg.instantiate
80
+ instance2.register([topic])
81
+
82
+ t = Thread.current
83
+ instances, cb = @cg.watch_instances { t.run if t.status == 'sleep' }
84
+ assert_equal Set[instance1, instance2], Set.new(instances)
85
+
86
+ Thread.new { instance2.deregister }
87
+
88
+ Thread.stop unless cb.completed?
89
+
90
+ assert assert_equal Set[instance1], Set.new(@cg.instances)
91
+ instance1.deregister
92
+ end
93
+ end
@@ -0,0 +1,22 @@
1
+ require 'test_helper'
2
+
3
+ class FunctionalTopicManagementTest < Minitest::Test
4
+ def setup
5
+ zookeeper = ENV["ZOOKEEPER_PEERS"] || "127.0.0.1:2181"
6
+ @cluster = Kazoo.connect(zookeeper)
7
+ end
8
+
9
+ def test_create_and_delete_topic
10
+ topic = @cluster.create_topic('test.kazoo', partitions: 8, replication_factor: 1)
11
+
12
+ assert @cluster.topics.key?(topic.name)
13
+ assert topic.partitions.all? { |partition| @cluster.brokers.values.include?(partition.leader) }
14
+ assert_equal 8, topic.partitions.length
15
+
16
+ topic.destroy
17
+ @cluster.reset_metadata
18
+
19
+ refute topic.exists?
20
+ refute @cluster.topics.key?(topic.name)
21
+ end
22
+ end
@@ -37,4 +37,10 @@ class PartitionTest < Minitest::Test
37
37
  assert p1 != Kazoo::Partition.new(@cluster.topics['test.4'], 0)
38
38
  assert_equal p1.hash, p2.hash
39
39
  end
40
+
41
+ def test_validate
42
+ partition = Kazoo::Partition.new(@cluster.topics['test.1'], 1, replicas: [@cluster.brokers[1], @cluster.brokers[1]])
43
+ refute partition.valid?
44
+ assert_raises(Kazoo::ValidationError) { partition.validate }
45
+ end
40
46
  end
@@ -50,4 +50,45 @@ class TopicTest < Minitest::Test
50
50
  assert t1 != Kazoo::Topic.new(@cluster, 'test.2')
51
51
  assert_equal t1.hash, t2.hash
52
52
  end
53
+
54
+ def test_validate
55
+ t = Kazoo::Topic.new(@cluster, "normal")
56
+ t.partitions = [t.partition(0, replicas: [@cluster.brokers[1]])]
57
+ assert t.valid?
58
+
59
+ t = Kazoo::Topic.new(@cluster, "invalid/character")
60
+ t.partitions = [t.partition(0, replicas: [@cluster.brokers[1]])]
61
+ refute t.valid?
62
+
63
+ t = Kazoo::Topic.new(@cluster, "..")
64
+ t.partitions = [t.partition(0, replicas: [@cluster.brokers[1]])]
65
+ refute t.valid?
66
+
67
+ t = Kazoo::Topic.new(@cluster, "l#{'o' * 253}ng")
68
+ t.partitions = [t.partition(0, replicas: [@cluster.brokers[1]])]
69
+ refute t.valid?
70
+
71
+ t = Kazoo::Topic.new(@cluster, "normal")
72
+ t.partitions = [t.partition(0, replicas: [])]
73
+ refute t.valid?
74
+ end
75
+
76
+ def test_sequentially_assign_partitions
77
+ topic = Kazoo::Topic.new(@cluster, 'test.new')
78
+
79
+ assert_raises(ArgumentError) { topic.send(:sequentially_assign_partitions, 4, 100) }
80
+
81
+ topic.send(:sequentially_assign_partitions, 4, 3)
82
+
83
+ assert_equal 4, topic.partitions.length
84
+ assert_equal 3, topic.replication_factor
85
+ assert topic.partitions.all? { |p| p.replicas.length == 3 }
86
+ assert topic.valid?
87
+ end
88
+
89
+ def test_partitions_as_json
90
+ assignment = @cluster.topics['test.1'].send(:partitions_as_json)
91
+ assert_equal 1, assignment.length
92
+ assert_equal [1,2], assignment[0]
93
+ end
53
94
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: kazoo-ruby
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.0
4
+ version: 0.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Willem van Bergen
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2015-08-13 00:00:00.000000000 Z
11
+ date: 2015-08-28 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: thor
@@ -106,6 +106,7 @@ files:
106
106
  - .travis.yml
107
107
  - Gemfile
108
108
  - LICENSE.txt
109
+ - Makefile
109
110
  - README.md
110
111
  - Rakefile
111
112
  - bin/kazoo
@@ -120,6 +121,8 @@ files:
120
121
  - lib/kazoo/version.rb
121
122
  - test/broker_test.rb
122
123
  - test/cluster_test.rb
124
+ - test/functional/functional_consumergroup_test.rb
125
+ - test/functional/functional_topic_management_test.rb
123
126
  - test/partition_test.rb
124
127
  - test/test_helper.rb
125
128
  - test/topic_test.rb
@@ -150,6 +153,8 @@ summary: Library to access and manipulate Kafka metadata in Zookeeper
150
153
  test_files:
151
154
  - test/broker_test.rb
152
155
  - test/cluster_test.rb
156
+ - test/functional/functional_consumergroup_test.rb
157
+ - test/functional/functional_topic_management_test.rb
153
158
  - test/partition_test.rb
154
159
  - test/test_helper.rb
155
160
  - test/topic_test.rb