ktl 1.0.0-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/LICENSE.txt +12 -0
- data/README.md +106 -0
- data/bin/ktl +8 -0
- data/lib/ext/kafka.rb +145 -0
- data/lib/ext/thor.rb +14 -0
- data/lib/ktl.rb +51 -0
- data/lib/ktl/cli.rb +11 -0
- data/lib/ktl/cluster.rb +108 -0
- data/lib/ktl/cluster_stats_task.rb +41 -0
- data/lib/ktl/command.rb +33 -0
- data/lib/ktl/decommission_plan.rb +68 -0
- data/lib/ktl/migration_plan.rb +27 -0
- data/lib/ktl/reassigner.rb +133 -0
- data/lib/ktl/reassignment_progress.rb +57 -0
- data/lib/ktl/reassignment_task.rb +46 -0
- data/lib/ktl/shell_formatter.rb +32 -0
- data/lib/ktl/shuffle_plan.rb +145 -0
- data/lib/ktl/topic.rb +123 -0
- data/lib/ktl/version.rb +5 -0
- data/lib/ktl/zookeeper_client.rb +111 -0
- metadata +101 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: 57f0bf86bb51e0401585eaec3993a4fb971d00c6
|
4
|
+
data.tar.gz: 7270677f6a1e5d7550dc35cdd51b351ac293e4bd
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 75986093b1f59323ff894d9400b2921a7e66a3da5a60acc4b2639a24118011fa52a7e6143cde031b751c6a68f9cb865c0bd0a43ac80589ab960b75706b8f02d6
|
7
|
+
data.tar.gz: 0ed21803a5f5b99cd7809a63307625cb27f85a970adef1e39be894da140d41c675e85e8243a14e57ac108a90cfc10cba5c25cb80b10382548bdf899759b3b803
|
data/LICENSE.txt
ADDED
@@ -0,0 +1,12 @@
|
|
1
|
+
Copyright (c) 2015, Burt AB
|
2
|
+
All rights reserved.
|
3
|
+
|
4
|
+
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
5
|
+
|
6
|
+
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
7
|
+
|
8
|
+
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
9
|
+
|
10
|
+
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
11
|
+
|
12
|
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
data/README.md
ADDED
@@ -0,0 +1,106 @@
|
|
1
|
+
# ktl
|
2
|
+
|
3
|
+
`ktl`, short for "Kafka Control Tool", is a command-line tool that attempts to
|
4
|
+
make the management of Kafka clusters that host a lot of topics and partitions
|
5
|
+
easier.
|
6
|
+
|
7
|
+
## Motivation
|
8
|
+
|
9
|
+
The main motivation for creating `ktl` was to automate, as much as possible, the
|
10
|
+
process of moving partition leadership between brokers when adding, removing or
|
11
|
+
replacing brokers, as (unfortunately) the tools bundled with Kafka are rather
|
12
|
+
manual and quite cumbersome to use when one host a lot (> 1000) of topics /
|
13
|
+
partitions.
|
14
|
+
|
15
|
+
I highly recommend that you read the [official documentation](https://kafka.apache.org/documentation.html#operations)
|
16
|
+
regarding operating Kafka before reading further, as quite a lot of the
|
17
|
+
terminology is explained in the documentation and not necessarily explained in
|
18
|
+
any great depth in this README but merely mentioned.
|
19
|
+
|
20
|
+
## Usage
|
21
|
+
|
22
|
+
`ktl` supports a couple of different commands for common tasks such as
|
23
|
+
performing preferred replica elections, shuffling partition leadership, checking
|
24
|
+
consumer lag, creating topics, and so on.
|
25
|
+
Most of the commands supports the usage of regular expressions for filtering
|
26
|
+
which topics that will be acted upon.
|
27
|
+
For example it's possible to perform preferred replica elections or reassign
|
28
|
+
partitions for a subset of topics.
|
29
|
+
|
30
|
+
```shell
|
31
|
+
$ ktl --help
|
32
|
+
Commands:
|
33
|
+
ktl cluster SUBCOMMAND ...ARGS # Commands for managing cluster(s)
|
34
|
+
ktl consumer SUBCOMMAND ...ARGS # Commands for managing consumers
|
35
|
+
ktl help [COMMAND] # Describe available commands or one specific command
|
36
|
+
ktl topic SUBCOMMAND ...ARGS # Commands for managing topics
|
37
|
+
```
|
38
|
+
|
39
|
+
### Partition reassignments
|
40
|
+
|
41
|
+
A "**partition reassignment**" (or just "reassignment" for short) is basically a
|
42
|
+
JSON document that describes which brokers that should be leader and/or replica
|
43
|
+
for certain partitions.
|
44
|
+
There are currently three subcommands of the `cluster` command that will generate
|
45
|
+
reassignments: `migrate-broker`, `shuffle` and `decommission-broker`.
|
46
|
+
|
47
|
+
When starting a new reassignment, `ktl` will write the reassignment JSON to a
|
48
|
+
`reassign` znode under a `/ktl`prefix in ZooKeeper, so that it's possible to
|
49
|
+
track the progress of a reassignment.
|
50
|
+
|
51
|
+
The `shuffle` subcommand can either perform a random reassignment of partitions,
|
52
|
+
or it can use [rendezvous hashing](http://en.wikipedia.org/wiki/Rendezvous_hashing),
|
53
|
+
which will minimize the number of partitions that has to move between replicas
|
54
|
+
when adding or removing brokers. There is also rack aware rendezvous hashing,
|
55
|
+
which will ensure that data is properly replicated across racks.
|
56
|
+
|
57
|
+
To start a random reassignment of the replica assignment of partitions matching
|
58
|
+
`^test.*`, but leaving the rest alone:
|
59
|
+
```shell
|
60
|
+
$ ktl cluster shuffle '^test.*' -z localhost:2181/test
|
61
|
+
```
|
62
|
+
To do the same thing but using rendezvous hashing:
|
63
|
+
```shell
|
64
|
+
$ ktl cluster shuffle '^test.*' -R -z localhost:2181/test
|
65
|
+
```
|
66
|
+
To do the same thing using rack aware rendezvous hashing:
|
67
|
+
```shell
|
68
|
+
$ ktl cluster shuffle '^test.*' -a -z localhost:2181/test
|
69
|
+
```
|
70
|
+
|
71
|
+
#### Dealing with ZooKeeper's znode limit
|
72
|
+
|
73
|
+
If the resulting reassignment JSON is greater than 1 MB (which is usually the
|
74
|
+
hard limit for a single znode), `ktl` will split the reassignment into smaller
|
75
|
+
slices and store the "overflow" in ZooKeeper under a `overflow` znode (in
|
76
|
+
properly-sized slices) so that it's available for further invocations.
|
77
|
+
|
78
|
+
The previously mentioned "reassignment" commands also take an optional `-l /
|
79
|
+
--limit` parameter to limit the number of partitions that will be included in
|
80
|
+
the reassignment, as it can be beneficial to perform a reassignment in steps
|
81
|
+
rather than all at once, as moving replicas for a lot of partitions will slow
|
82
|
+
down your producers and consumers.
|
83
|
+
The default is however to reassign as many partitions as possible.
|
84
|
+
|
85
|
+
Note that `ktl` will not wait for the reassignment to finish, but to proceed
|
86
|
+
with the next slice of the reassignment is just a matter of running the same
|
87
|
+
command again and it'll prompt you whether to continue you were you left off
|
88
|
+
or generate a new reassignment.
|
89
|
+
|
90
|
+
### Managing topics
|
91
|
+
|
92
|
+
While the main motivation for `ktl` was to deal with partition reassignments,
|
93
|
+
there are also some commands for managing topics, most of which merely wraps the
|
94
|
+
`kafka-topics.sh` tool that is bundled with Kafka, but with a slightly different
|
95
|
+
interface.
|
96
|
+
|
97
|
+
The most notable difference is the `create` subcommand that has the ability to create
|
98
|
+
a consistent hashing plan and assign it to the new topic during creation.
|
99
|
+
|
100
|
+
```shell
|
101
|
+
$ ktl topic create 'some.topic' --partitions 3 --replication_factor 3 --rack_aware_allocation -z localhost:2181/test
|
102
|
+
```
|
103
|
+
|
104
|
+
## Copyright
|
105
|
+
|
106
|
+
© 2015 Burt AB, see LICENSE.txt (BSD 3-Clause).
|
data/bin/ktl
ADDED
data/lib/ext/kafka.rb
ADDED
@@ -0,0 +1,145 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
require 'kafka-jars'
|
4
|
+
|
5
|
+
module Log4j
|
6
|
+
include_package 'org.apache.log4j'
|
7
|
+
java_import 'org.apache.log4j.Logger'
|
8
|
+
|
9
|
+
BasicConfigurator.configure
|
10
|
+
org.apache.log4j.Logger.root_logger.set_level(Level::ERROR)
|
11
|
+
end
|
12
|
+
|
13
|
+
module ZkClient
|
14
|
+
java_import 'org.I0Itec.zkclient.ZkClient'
|
15
|
+
|
16
|
+
module Exception
|
17
|
+
include_package 'org.I0Itec.zkclient.exception'
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
module Scala
|
22
|
+
java_import 'scala.Console'
|
23
|
+
java_import 'scala.Tuple2'
|
24
|
+
java_import 'scala.Option'
|
25
|
+
|
26
|
+
class Tuple2
|
27
|
+
alias_method :first, :_1
|
28
|
+
alias_method :last, :_2
|
29
|
+
|
30
|
+
def elements
|
31
|
+
[first, last]
|
32
|
+
end
|
33
|
+
end
|
34
|
+
Tuple = Tuple2
|
35
|
+
|
36
|
+
module Collection
|
37
|
+
include_package 'scala.collection'
|
38
|
+
|
39
|
+
module Mutable
|
40
|
+
include_package 'scala.collection.mutable'
|
41
|
+
end
|
42
|
+
|
43
|
+
module Immutable
|
44
|
+
include_package 'scala.collection.immutable'
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
class ScalaEnumerable
|
50
|
+
include Enumerable
|
51
|
+
|
52
|
+
def initialize(underlying)
|
53
|
+
@underlying = underlying
|
54
|
+
end
|
55
|
+
|
56
|
+
def each(&block)
|
57
|
+
@underlying.foreach(&block)
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
module Kafka
|
62
|
+
module Utils
|
63
|
+
include_package 'kafka.utils'
|
64
|
+
|
65
|
+
def self.new_zk_client(zk_connect, timeout=30_000)
|
66
|
+
::ZkClient::ZkClient.new(zk_connect, timeout, timeout, ZKStringSerializer)
|
67
|
+
end
|
68
|
+
|
69
|
+
def self.get_partitions_for_topic(zk, topic)
|
70
|
+
topics = Scala::Collection::Immutable::List.from_array([topic].to_java)
|
71
|
+
partitions = zk.get_partitions_for_topics(topics)
|
72
|
+
partitions.get(topic).get
|
73
|
+
end
|
74
|
+
|
75
|
+
def self.delete_topic(zk, topic)
|
76
|
+
acl = Kafka::Utils::ZkUtils::DefaultAcls(false)
|
77
|
+
zk.create_persistent_path(ZkUtils.get_delete_topic_path(topic), '', acl)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
module Api
|
82
|
+
include_package 'kafka.api'
|
83
|
+
end
|
84
|
+
|
85
|
+
module Cluster
|
86
|
+
include_package 'kafka.cluster'
|
87
|
+
end
|
88
|
+
|
89
|
+
module Admin
|
90
|
+
include_package 'kafka.admin'
|
91
|
+
|
92
|
+
TopicCommandOptions = TopicCommand::TopicCommandOptions
|
93
|
+
|
94
|
+
def self.to_topic_options(hash)
|
95
|
+
options = hash.flat_map do |key, value|
|
96
|
+
kafka_key = '--' + key.to_s.gsub('_', '-')
|
97
|
+
if value.is_a?(Hash)
|
98
|
+
value.map { |k, v| [kafka_key, [k, v].join('=')] }
|
99
|
+
elsif value.is_a?(Array)
|
100
|
+
value.map { |v| [kafka_key, v] }
|
101
|
+
else
|
102
|
+
[kafka_key, value].compact
|
103
|
+
end
|
104
|
+
end
|
105
|
+
TopicCommandOptions.new(options.flatten)
|
106
|
+
end
|
107
|
+
|
108
|
+
def self.preferred_replica(zk_client, topics_partitions)
|
109
|
+
PreferredReplicaLeaderElectionCommand.write_preferred_replica_election_data(zk_client, topics_partitions)
|
110
|
+
end
|
111
|
+
|
112
|
+
def self.assign_replicas_to_brokers(brokers, partitions, repl_factor, index=-1, partition=-1)
|
113
|
+
assignment = AdminUtils.assign_replicas_to_brokers(brokers, partitions.to_java(:int), repl_factor.to_java(:int), index.to_java(:int), partition.to_java(:int))
|
114
|
+
ScalaEnumerable.new(assignment)
|
115
|
+
end
|
116
|
+
|
117
|
+
def self.get_broker_metadatas(zk_client, brokers, force_rack = true)
|
118
|
+
rack_aware = if force_rack
|
119
|
+
JRuby.runtime.jruby_class_loader.load_class('kafka.admin.RackAwareMode$Enforced$').get_declared_field('MODULE$').get(nil)
|
120
|
+
else
|
121
|
+
JRuby.runtime.jruby_class_loader.load_class('kafka.admin.RackAwareMode$Safe$').get_declared_field('MODULE$').get(nil)
|
122
|
+
end
|
123
|
+
broker_metadatas = Kafka::Admin::AdminUtils.get_broker_metadatas(
|
124
|
+
zk_client.utils,
|
125
|
+
rack_aware,
|
126
|
+
Scala::Option[Scala::Collection::JavaConversions.as_scala_iterable(brokers).to_list]
|
127
|
+
)
|
128
|
+
Scala::Collection::JavaConversions.seq_as_java_list(broker_metadatas).to_a
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
module Protocol
|
133
|
+
java_import 'org.apache.kafka.common.protocol.SecurityProtocol'
|
134
|
+
end
|
135
|
+
|
136
|
+
module Common
|
137
|
+
include_package 'kafka.common'
|
138
|
+
end
|
139
|
+
|
140
|
+
TopicAndPartition = Common::TopicAndPartition
|
141
|
+
|
142
|
+
module Tools
|
143
|
+
include_package 'kafka.tools'
|
144
|
+
end
|
145
|
+
end
|
data/lib/ext/thor.rb
ADDED
data/lib/ktl.rb
ADDED
@@ -0,0 +1,51 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
unless defined?($SLF4J_BACKEND)
|
4
|
+
$SLF4J_BACKEND = 'log4j12'
|
5
|
+
end
|
6
|
+
|
7
|
+
require 'thor'
|
8
|
+
require 'json'
|
9
|
+
require 'logger'
|
10
|
+
require 'ext/kafka'
|
11
|
+
require 'ext/thor'
|
12
|
+
|
13
|
+
module Ktl
|
14
|
+
KtlError = Class.new(StandardError)
|
15
|
+
InsufficientBrokersRemainingError = Class.new(KtlError)
|
16
|
+
|
17
|
+
CanBuildFrom = Scala::Collection::Immutable::List.can_build_from
|
18
|
+
|
19
|
+
module JavaConcurrent
|
20
|
+
include_package 'java.util.concurrent'
|
21
|
+
end
|
22
|
+
|
23
|
+
class NullLogger
|
24
|
+
def close(*); end
|
25
|
+
def debug(*); end
|
26
|
+
def debug?; false end
|
27
|
+
def error(*); end
|
28
|
+
def error?; false end
|
29
|
+
def fatal(*); end
|
30
|
+
def fatal?; false end
|
31
|
+
def info(*); end
|
32
|
+
def info?; false end
|
33
|
+
def unknown(*); end
|
34
|
+
def warn(*); end
|
35
|
+
def warn?; false end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
require 'ktl/command'
|
40
|
+
require 'ktl/cluster'
|
41
|
+
require 'ktl/cluster_stats_task'
|
42
|
+
require 'ktl/decommission_plan'
|
43
|
+
require 'ktl/migration_plan'
|
44
|
+
require 'ktl/reassigner'
|
45
|
+
require 'ktl/reassignment_progress'
|
46
|
+
require 'ktl/reassignment_task'
|
47
|
+
require 'ktl/shuffle_plan'
|
48
|
+
require 'ktl/shell_formatter'
|
49
|
+
require 'ktl/topic'
|
50
|
+
require 'ktl/cli'
|
51
|
+
require 'ktl/zookeeper_client'
|
data/lib/ktl/cli.rb
ADDED
@@ -0,0 +1,11 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
module Ktl
|
4
|
+
class Cli < Thor
|
5
|
+
desc 'cluster SUBCOMMAND ...ARGS', 'Commands for managing a cluster'
|
6
|
+
subcommand 'cluster', Cluster
|
7
|
+
|
8
|
+
desc 'topic SUBCOMMAND ...ARGS', 'Commands for managing topics'
|
9
|
+
subcommand 'topic', Topic
|
10
|
+
end
|
11
|
+
end
|
data/lib/ktl/cluster.rb
ADDED
@@ -0,0 +1,108 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
module Ktl
|
4
|
+
class Cluster < Command
|
5
|
+
desc 'stats', 'Show statistics about cluster'
|
6
|
+
option :zookeeper, aliases: %w[-z], required: true, desc: 'ZooKeeper URI'
|
7
|
+
def stats
|
8
|
+
with_zk_client do |zk_client|
|
9
|
+
task = ClusterStatsTask.new(zk_client, shell)
|
10
|
+
task.execute
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
desc 'preferred-replica [REGEXP]', 'Perform preferred replica leader elections'
|
15
|
+
option :zookeeper, aliases: %w[-z], required: true, desc: 'ZooKeeper URI'
|
16
|
+
def preferred_replica(regexp='.*')
|
17
|
+
with_zk_client do |zk_client|
|
18
|
+
regexp = Regexp.new(regexp)
|
19
|
+
partitions = zk_client.all_partitions
|
20
|
+
partitions = partitions.filter { |tp| !!tp.topic.match(regexp) }.to_set
|
21
|
+
if partitions.size > 0
|
22
|
+
logger.info 'performing preferred replica leader election on %d partitions' % partitions.size
|
23
|
+
Kafka::Admin.preferred_replica(zk_client.raw_client, partitions)
|
24
|
+
else
|
25
|
+
logger.info 'no topics matched %s' % regexp.inspect
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
desc 'migrate-broker', 'Migrate partitions from one broker to another'
|
31
|
+
option :from, aliases: %w[-f], type: :numeric, required: true, desc: 'Broker ID of old leader'
|
32
|
+
option :to, aliases: %w[-t], type: :numeric, required: true, desc: 'Broker ID of new leader'
|
33
|
+
option :zookeeper, aliases: %w[-z], required: true, desc: 'ZooKeeper URI'
|
34
|
+
option :limit, aliases: %w[-l], type: :numeric, desc: 'Max number of partitions to reassign at a time'
|
35
|
+
def migrate_broker
|
36
|
+
with_zk_client do |zk_client|
|
37
|
+
old_leader, new_leader = options.values_at(:from, :to)
|
38
|
+
plan = MigrationPlan.new(zk_client, old_leader, new_leader)
|
39
|
+
reassigner = Reassigner.new(zk_client, limit: options.limit, logger: logger)
|
40
|
+
execute_reassignment(reassigner, plan)
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
desc 'shuffle [REGEXP]', 'Shuffle leaders and replicas for partitions'
|
45
|
+
option :brokers, type: :array, desc: 'Broker IDs'
|
46
|
+
option :blacklist, type: :array, desc: 'Broker IDs to exclude'
|
47
|
+
option :rendezvous, aliases: %w[-R], type: :boolean, desc: 'Whether to use Rendezvous-hashing based shuffle'
|
48
|
+
option :rack_aware, aliases: %w[-a], type: :boolean, desc: 'Whether to use Rack aware + Rendezvous-hashing based shuffle'
|
49
|
+
option :replication_factor, aliases: %w[-r], type: :numeric, desc: 'Replication factor to use'
|
50
|
+
option :limit, aliases: %w[-l], type: :numeric, desc: 'Max number of partitions to reassign at a time'
|
51
|
+
option :zookeeper, aliases: %w[-z], required: true, desc: 'ZooKeeper URI'
|
52
|
+
option :verbose, aliases: %w[-v], desc: 'Verbose output'
|
53
|
+
option :dryrun, aliases: %w[-d], desc: 'Output reassignment plan without executing'
|
54
|
+
def shuffle(regexp='.*')
|
55
|
+
with_zk_client do |zk_client|
|
56
|
+
plan_factory = if options.rack_aware
|
57
|
+
RackAwareShufflePlan
|
58
|
+
elsif options.rendezvous
|
59
|
+
RendezvousShufflePlan
|
60
|
+
else
|
61
|
+
ShufflePlan
|
62
|
+
end
|
63
|
+
plan = plan_factory.new(zk_client, {
|
64
|
+
filter: Regexp.new(regexp),
|
65
|
+
brokers: options.brokers,
|
66
|
+
blacklist: options.blacklist,
|
67
|
+
replication_factor: options.replication_factor,
|
68
|
+
logger: logger,
|
69
|
+
log_plan: options.dryrun,
|
70
|
+
})
|
71
|
+
reassigner = Reassigner.new(zk_client, limit: options.limit, logger: logger, log_assignments: options.verbose)
|
72
|
+
execute_reassignment(reassigner, plan, options.dryrun)
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
desc 'decommission-broker BROKER_ID', 'Decommission a broker'
|
77
|
+
option :limit, aliases: %w[-l], type: :numeric, desc: 'Max number of partitions to reassign at a time'
|
78
|
+
option :rendezvous, aliases: %w[-R], type: :boolean, desc: 'Whether to use Rendezvous-hashing'
|
79
|
+
option :zookeeper, aliases: %w[-z], required: true, desc: 'ZooKeeper URI'
|
80
|
+
def decommission_broker(broker_id)
|
81
|
+
with_zk_client do |zk_client|
|
82
|
+
if options.rendezvous?
|
83
|
+
plan = RendezvousShufflePlan.new(zk_client, blacklist: [broker_id.to_i])
|
84
|
+
else
|
85
|
+
plan = DecommissionPlan.new(zk_client, broker_id.to_i)
|
86
|
+
end
|
87
|
+
reassigner = Reassigner.new(zk_client, limit: options.limit, logger: logger)
|
88
|
+
execute_reassignment(reassigner, plan)
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
92
|
+
desc 'reassignment-progress', 'Show progress of latest reassignment command'
|
93
|
+
option :verbose, aliases: %w[-v], desc: 'Verbose output'
|
94
|
+
option :zookeeper, aliases: %w[-z], required: true, desc: 'ZooKeeper URI'
|
95
|
+
def reassignment_progress
|
96
|
+
with_zk_client do |zk_client|
|
97
|
+
progress = ReassignmentProgress.new(zk_client, options.merge(logger: logger))
|
98
|
+
progress.display(shell)
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
private
|
103
|
+
|
104
|
+
def execute_reassignment(reassigner, plan, dryrun = false)
|
105
|
+
ReassignmentTask.new(reassigner, plan, shell, logger: logger).execute(dryrun)
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|