ktl 1.0.0-java → 1.1.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 57f0bf86bb51e0401585eaec3993a4fb971d00c6
4
- data.tar.gz: 7270677f6a1e5d7550dc35cdd51b351ac293e4bd
3
+ metadata.gz: 350c93c4fed5af08036c7e9e867df6f7c54cf26a
4
+ data.tar.gz: b3b963caa62735ca426a2d46919fbd09a350d67b
5
5
  SHA512:
6
- metadata.gz: 75986093b1f59323ff894d9400b2921a7e66a3da5a60acc4b2639a24118011fa52a7e6143cde031b751c6a68f9cb865c0bd0a43ac80589ab960b75706b8f02d6
7
- data.tar.gz: 0ed21803a5f5b99cd7809a63307625cb27f85a970adef1e39be894da140d41c675e85e8243a14e57ac108a90cfc10cba5c25cb80b10382548bdf899759b3b803
6
+ metadata.gz: b4b1da24ea11e84cca3fbbf678fecd01cbfb98d955ced80d44927935df43702a29ebc477e880dc9ffa0369c46f3235e6dd3ffa2e7cb51a903a8a89124b8b9ae6
7
+ data.tar.gz: 7d24168d5cde8623362a4e9873eb1b33e118c2e15c07975e7c6abbebc23a52911ef8878152cae2052d30b2c8ba36c2603afec39b52c32f0997a620f4f8511998
@@ -12,6 +12,9 @@ end
12
12
 
13
13
  module ZkClient
14
14
  java_import 'org.I0Itec.zkclient.ZkClient'
15
+ java_import 'org.I0Itec.zkclient.IZkStateListener'
16
+ java_import 'org.I0Itec.zkclient.IZkDataListener'
17
+ java_import 'org.I0Itec.zkclient.IZkChildListener'
15
18
 
16
19
  module Exception
17
20
  include_package 'org.I0Itec.zkclient.exception'
@@ -127,6 +130,23 @@ module Kafka
127
130
  )
128
131
  Scala::Collection::JavaConversions.seq_as_java_list(broker_metadatas).to_a
129
132
  end
133
+
134
+ def self.get_broker_rack(zk_client, broker_id)
135
+ broker_metadata = Kafka::Admin.get_broker_metadatas(zk_client, [broker_id]).first
136
+ if broker_metadata
137
+ rack = broker_metadata.rack
138
+ unless rack.defined?
139
+ raise "Broker #{broker_metadata.id} is missing rack information, unable to create rack aware shuffle plan."
140
+ end
141
+ rack.get
142
+ end
143
+ rescue Java::KafkaAdmin::AdminOperationException => e
144
+ if e.message.include? '--disable-rack-aware'
145
+ raise "Not all brokers have rack information. Unable to create rack aware shuffle plan."
146
+ else
147
+ raise e
148
+ end
149
+ end
130
150
  end
131
151
 
132
152
  module Protocol
data/lib/ktl.rb CHANGED
@@ -42,6 +42,7 @@ require 'ktl/cluster_stats_task'
42
42
  require 'ktl/decommission_plan'
43
43
  require 'ktl/migration_plan'
44
44
  require 'ktl/reassigner'
45
+ require 'ktl/continous_reassigner'
45
46
  require 'ktl/reassignment_progress'
46
47
  require 'ktl/reassignment_task'
47
48
  require 'ktl/shuffle_plan'
@@ -28,16 +28,21 @@ module Ktl
28
28
  end
29
29
 
30
30
  desc 'migrate-broker', 'Migrate partitions from one broker to another'
31
- option :from, aliases: %w[-f], type: :numeric, required: true, desc: 'Broker ID of old leader'
32
- option :to, aliases: %w[-t], type: :numeric, required: true, desc: 'Broker ID of new leader'
31
+ option :from, aliases: %w[-f], type: :array, required: true, desc: 'Broker IDs to migrate away from'
32
+ option :to, aliases: %w[-t], type: :array, required: true, desc: 'Broker IDs to migrate to'
33
33
  option :zookeeper, aliases: %w[-z], required: true, desc: 'ZooKeeper URI'
34
34
  option :limit, aliases: %w[-l], type: :numeric, desc: 'Max number of partitions to reassign at a time'
35
+ option :verbose, aliases: %w[-v], desc: 'Verbose output'
36
+ option :dryrun, aliases: %w[-d], desc: 'Output reassignment plan without executing'
37
+ option :wait, aliases: %w[-w], type: :boolean, desc: 'Wait for all reassignments to finish'
38
+ option :delay, type: :numeric, desc: 'Delay in seconds between continous reassignment iterations, default 5s'
39
+ option :multi_step_migration, type: :boolean, default: true, desc: 'Perform migration in multiple steps, mirroring partitions to new brokers before removing the old'
35
40
  def migrate_broker
36
41
  with_zk_client do |zk_client|
37
- old_leader, new_leader = options.values_at(:from, :to)
38
- plan = MigrationPlan.new(zk_client, old_leader, new_leader)
39
- reassigner = Reassigner.new(zk_client, limit: options.limit, logger: logger)
40
- execute_reassignment(reassigner, plan)
42
+ old_brokers, new_brokers = options.values_at(:from, :to)
43
+ plan = MigrationPlan.new(zk_client, old_brokers.map(&:to_i), new_brokers.map(&:to_i), log_plan: options.verbose, logger: logger)
44
+ reassigner = create_reassigner(zk_client, options)
45
+ execute_reassignment(reassigner, plan, options)
41
46
  end
42
47
  end
43
48
 
@@ -51,6 +56,9 @@ module Ktl
51
56
  option :zookeeper, aliases: %w[-z], required: true, desc: 'ZooKeeper URI'
52
57
  option :verbose, aliases: %w[-v], desc: 'Verbose output'
53
58
  option :dryrun, aliases: %w[-d], desc: 'Output reassignment plan without executing'
59
+ option :wait, aliases: %w[-w], type: :boolean, desc: 'Wait for all reassignments to finish'
60
+ option :delay, type: :numeric, desc: 'Delay in seconds between continous reassignment iterations, default 5s'
61
+ option :multi_step_migration, type: :boolean, default: true, desc: 'Perform migration in multiple steps, mirroring partitions to new brokers before removing the old'
54
62
  def shuffle(regexp='.*')
55
63
  with_zk_client do |zk_client|
56
64
  plan_factory = if options.rack_aware
@@ -68,8 +76,8 @@ module Ktl
68
76
  logger: logger,
69
77
  log_plan: options.dryrun,
70
78
  })
71
- reassigner = Reassigner.new(zk_client, limit: options.limit, logger: logger, log_assignments: options.verbose)
72
- execute_reassignment(reassigner, plan, options.dryrun)
79
+ reassigner = create_reassigner(zk_client, options)
80
+ execute_reassignment(reassigner, plan, options)
73
81
  end
74
82
  end
75
83
 
@@ -77,6 +85,10 @@ module Ktl
77
85
  option :limit, aliases: %w[-l], type: :numeric, desc: 'Max number of partitions to reassign at a time'
78
86
  option :rendezvous, aliases: %w[-R], type: :boolean, desc: 'Whether to use Rendezvous-hashing'
79
87
  option :zookeeper, aliases: %w[-z], required: true, desc: 'ZooKeeper URI'
88
+ option :verbose, aliases: %w[-v], desc: 'Verbose output'
89
+ option :dryrun, aliases: %w[-d], desc: 'Output reassignment plan without executing'
90
+ option :wait, aliases: %w[-w], type: :boolean, desc: 'Wait for all reassignments to finish'
91
+ option :delay, type: :numeric, desc: 'Delay in seconds between continous reassignment iterations, default 5s'
80
92
  def decommission_broker(broker_id)
81
93
  with_zk_client do |zk_client|
82
94
  if options.rendezvous?
@@ -84,8 +96,8 @@ module Ktl
84
96
  else
85
97
  plan = DecommissionPlan.new(zk_client, broker_id.to_i)
86
98
  end
87
- reassigner = Reassigner.new(zk_client, limit: options.limit, logger: logger)
88
- execute_reassignment(reassigner, plan)
99
+ reassigner = create_reassigner(zk_client, options)
100
+ execute_reassignment(reassigner, plan, options)
89
101
  end
90
102
  end
91
103
 
@@ -101,8 +113,16 @@ module Ktl
101
113
 
102
114
  private
103
115
 
104
- def execute_reassignment(reassigner, plan, dryrun = false)
105
- ReassignmentTask.new(reassigner, plan, shell, logger: logger).execute(dryrun)
116
+ def execute_reassignment(reassigner, plan, options)
117
+ ReassignmentTask.new(reassigner, plan, shell, logger: logger).execute(options.dryrun)
118
+ end
119
+
120
+ def create_reassigner(zk_client, options)
121
+ if options.wait?
122
+ ContinousReassigner.new(zk_client, limit: options.limit, logger: logger, log_assignments: options.verbose, delay: options.delay, shell: shell, multi_step_migration: options.multi_step_migration)
123
+ else
124
+ Reassigner.new(zk_client, limit: options.limit, logger: logger, log_assignments: options.verbose, multi_step_migration: options.multi_step_migration)
125
+ end
106
126
  end
107
127
  end
108
128
  end
@@ -19,7 +19,7 @@ module Ktl
19
19
  brokers.foreach do |broker|
20
20
  leader_for = ownership[broker.id]
21
21
  share = leader_for.fdiv(partitions.size.to_f) * 100
22
- @shell.say ' - %d leader for %d partitions (%.2f %%)' % [broker.id, leader_for, share]
22
+ @shell.say ' - %s leader for %d partitions (%.2f %%)' % [broker.to_s, leader_for, share]
23
23
  end
24
24
  end
25
25
 
@@ -0,0 +1,53 @@
1
+ # encoding: utf-8
2
+
3
+ module Ktl
4
+ class ContinousReassigner < Reassigner
5
+ include ZkClient::IZkDataListener
6
+
7
+ def initialize(zk_client, options={})
8
+ super(zk_client, options)
9
+ @latch = JavaConcurrent::CountDownLatch.new(1)
10
+ @sleeper = options[:sleeper] || java.lang.Thread
11
+ @delay = options[:delay] || 5
12
+ @shell = options[:shell]
13
+ end
14
+
15
+ def execute(reassignment)
16
+ Signal.trap('SIGINT', proc { @logger.info 'Exiting due to Ctrl-C'; @latch.count_down })
17
+ @zk_client.watch_data(zk_utils.class.reassign_partitions_path, self)
18
+ if reassignment_in_progress?
19
+ @logger.info 'reassignment already in progress, watching for changes...'
20
+ progress = ReassignmentProgress.new(@zk_client, logger: @logger, verbose: true)
21
+ progress.display(@shell)
22
+ else
23
+ reassign(reassignment)
24
+ end
25
+ @latch.await
26
+ end
27
+
28
+ def handle_data_change(path, data)
29
+ parsed_data = JSON.parse(data)
30
+ if (partitions = parsed_data['partitions'])
31
+ partitions = partitions.map { |r| r.values_at('topic', 'partition').join(':') }
32
+ @logger.debug sprintf('%d partitions left to reassign (%p)', partitions.size, partitions.size <= 5 ? partitions : '...')
33
+ else
34
+ @logger.info sprintf('Data without `partitions` key: %p', parsed_data)
35
+ end
36
+ rescue => e
37
+ @logger.error sprintf('Bad data: %p', data)
38
+ end
39
+
40
+ def handle_data_deleted(path)
41
+ reassignment = load_overflow
42
+ if reassignment.empty?
43
+ @zk_client.unsubscribe_data(zk_utils.class.reassign_partitions_path, self)
44
+ delete_previous_state
45
+ @latch.count_down
46
+ else
47
+ @logger.info sprintf('Waiting %ds before next assignment', @delay)
48
+ @sleeper.sleep(@delay * 1000)
49
+ reassign(reassignment)
50
+ end
51
+ end
52
+ end
53
+ end
@@ -2,10 +2,22 @@
2
2
 
3
3
  module Ktl
4
4
  class MigrationPlan
5
- def initialize(zk_client, old_leader, new_leader)
5
+ def initialize(zk_client, from_brokers, to_brokers, options = {})
6
6
  @zk_client = zk_client
7
- @old_leader = old_leader.to_java
8
- @new_leader = new_leader.to_java
7
+ @from_brokers = from_brokers
8
+ @to_brokers = to_brokers
9
+ if @from_brokers.length != @to_brokers.length
10
+ raise ArgumentError, "Both brokers lists must be of equal length. From: #{@from_brokers}, To: #{@to_brokers}"
11
+ elsif !(@from_brokers & @to_brokers).empty?
12
+ raise ArgumentError, "Broker lists must be mutually exclusive. From: #{@from_brokers}, To: #{@to_brokers}"
13
+ end
14
+ from_racks = from_brokers.map {|broker_id| Kafka::Admin.get_broker_rack(zk_client, broker_id)}
15
+ to_racks = to_brokers.map {|broker_id| Kafka::Admin.get_broker_rack(zk_client, broker_id)}
16
+ if from_racks != to_racks && from_racks.compact.any?
17
+ raise ArgumentError, "Both broker lists must have the same rack setup. From: #{from_racks}, To: #{to_racks}"
18
+ end
19
+ @logger = options[:logger] || NullLogger.new
20
+ @log_plan = !!options[:log_plan]
9
21
  end
10
22
 
11
23
  def generate
@@ -15,9 +27,16 @@ module Ktl
15
27
  assignments.each do |item|
16
28
  topic_partition = item.first
17
29
  replicas = item.last
18
- if replicas.contains?(@old_leader)
19
- index = replicas.index_of(@old_leader)
20
- new_replicas = replicas.updated(index, @new_leader, CanBuildFrom)
30
+ new_replicas = replicas
31
+ @from_brokers.each_with_index do |from_broker, index|
32
+ to_broker = @to_brokers[index]
33
+ if new_replicas.contains?(from_broker)
34
+ replacement_index = new_replicas.index_of(from_broker)
35
+ new_replicas = new_replicas.updated(replacement_index, to_broker, CanBuildFrom)
36
+ end
37
+ end
38
+ if replicas != new_replicas
39
+ @logger.debug "Moving #{topic_partition.topic},#{topic_partition.partition} from #{replicas} to #{new_replicas}" if @log_plan
21
40
  plan += Scala::Tuple.new(topic_partition, new_replicas)
22
41
  end
23
42
  end
@@ -2,6 +2,9 @@
2
2
 
3
3
  module Ktl
4
4
  class Reassigner
5
+
6
+ attr_reader :limit
7
+
5
8
  def initialize(zk_client, options={})
6
9
  @zk_client = zk_client
7
10
  @limit = options[:limit]
@@ -9,6 +12,7 @@ module Ktl
9
12
  @state_path = '/ktl/reassign'
10
13
  @logger = options[:logger] || NullLogger.new
11
14
  @log_assignments = !!options[:log_assignments]
15
+ @multi_step_migration = options[:multi_step_migration]
12
16
  end
13
17
 
14
18
  def reassignment_in_progress?
@@ -31,29 +35,72 @@ module Ktl
31
35
  data = parse_reassignment_json(overflow_json)
32
36
  overflow = overflow.send('++', data)
33
37
  end
34
- delete_previous_overflow
35
38
  overflow
39
+ rescue ZkClient::Exception::ZkNoNodeException
40
+ Scala::Collection::Map.empty
36
41
  end
37
42
 
38
43
  def execute(reassignment)
44
+ reassign(reassignment)
45
+ end
46
+
47
+ private
48
+
49
+ JSON_MAX_SIZE = 1024**2
50
+
51
+ def reassign(reassignment)
52
+ if (limit)
53
+ @logger.info 'reassigning %d of %d partitions' % [limit, reassignment.size]
54
+ else
55
+ @logger.info 'reassigning %d partitions' % reassignment.size
56
+ end
57
+
39
58
  reassignments = split(reassignment, @limit)
40
- actual_reassignment = reassignments.shift
41
- if @log_assignments
42
- Scala::Collection::JavaConversions.as_java_iterable(actual_reassignment).each do |pr|
43
- topic_and_partition, replicas = pr.elements
59
+ reassignment_candidates = reassignments.shift
60
+ actual_reassignment = Scala::Collection::Map.empty
61
+ next_step_assignments = Scala::Collection::Map.empty
62
+ Scala::Collection::JavaConversions.as_java_iterable(reassignment_candidates).each do |pr|
63
+ topic_and_partition, replicas = pr.elements
64
+ if @multi_step_migration && step1_replicas = is_two_step_operation(topic_and_partition, replicas)
65
+ if step1_replicas.uniq != step1_replicas
66
+ raise "Multiple replicas on the same broker, this should not happen... #{step1_replicas}"
67
+ end
68
+ step1_replicas = Scala::Collection::JavaConversions.as_scala_iterable(step1_replicas)
69
+ next_step_assignments += pr
70
+ actual_reassignment += Scala::Tuple.new(topic_and_partition, step1_replicas)
71
+ brokers = Scala::Collection::JavaConversions.as_java_iterable(step1_replicas).to_a
72
+ eventual_brokers = Scala::Collection::JavaConversions.as_java_iterable(replicas).to_a
73
+ @logger.debug "Mirroring #{topic_and_partition.topic},#{topic_and_partition.partition} to #{brokers.join(',')} for eventual transition to #{eventual_brokers.join(',')}" if @log_assignments
74
+ else
75
+ actual_reassignment += pr
44
76
  brokers = Scala::Collection::JavaConversions.as_java_iterable(replicas).to_a
45
- @logger.info "Assigning #{topic_and_partition.topic},#{topic_and_partition.partition} to #{brokers.join(',')}"
77
+ @logger.debug "Assigning #{topic_and_partition.topic},#{topic_and_partition.partition} to #{brokers.join(',')}" if @log_assignments
46
78
  end
47
79
  end
48
80
  json = reassignment_json(actual_reassignment)
49
81
  @zk_client.reassign_partitions(json)
50
- manage_overflow(reassignments)
82
+ manage_overflow(split(next_step_assignments, nil) + reassignments)
51
83
  manage_progress_state(actual_reassignment)
52
84
  end
53
85
 
54
- private
86
+ def is_two_step_operation(topic_and_partition, final_replicas)
87
+ replicas = Scala::Collection::JavaConversions.as_java_iterable(final_replicas).to_a.uniq
88
+ topic_list = Scala::Collection::JavaConversions.as_scala_iterable([topic_and_partition.topic])
89
+ assignments = ScalaEnumerable.new(@zk_client.replica_assignment_for_topics(topic_list))
90
+ assignments.each do |item|
91
+ item_topic_partition = item.first
92
+ if item_topic_partition.partition == topic_and_partition.partition
93
+ item_replicas = Scala::Collection::JavaConversions.as_java_iterable(item.last).to_a.uniq
94
+ diff_replicas = replicas - item_replicas
95
+ unless diff_replicas.empty?
96
+ transition_replicas = item_replicas + diff_replicas
97
+ return transition_replicas
98
+ end
99
+ end
100
+ end
55
101
 
56
- JSON_MAX_SIZE = 1024**2
102
+ false
103
+ end
57
104
 
58
105
  def manage_progress_state(reassignment)
59
106
  delete_previous_state
@@ -73,6 +120,9 @@ module Ktl
73
120
  @zk_client.delete_znode(overflow_path(index))
74
121
  end
75
122
  rescue ZkClient::Exception::ZkNoNodeException
123
+ # no-op
124
+ rescue => e
125
+ puts e.backtrace.join($/)
76
126
  end
77
127
 
78
128
  def manage_overflow(reassignments)
@@ -11,7 +11,12 @@ module Ktl
11
11
 
12
12
  def execute(dryrun = false)
13
13
  if @reassigner.reassignment_in_progress?
14
- @logger.warn 'reassignment already in progress, exiting'
14
+ if @reassigner.is_a?(ContinousReassigner)
15
+ reassignment = @reassigner.load_overflow
16
+ @reassigner.execute(reassignment)
17
+ else
18
+ @logger.warn 'reassignment already in progress, exiting'
19
+ end
15
20
  else
16
21
  if use_overflow?
17
22
  @logger.info 'loading overflow data'
@@ -21,7 +26,6 @@ module Ktl
21
26
  reassignment = @plan.generate
22
27
  end
23
28
  if reassignment.size > 0
24
- @logger.info 'reassigning %d partitions' % reassignment.size
25
29
  if dryrun
26
30
  @logger.info 'dryrun detected, skipping reassignment'
27
31
  else
@@ -43,4 +47,3 @@ module Ktl
43
47
  end
44
48
  end
45
49
  end
46
-
@@ -55,9 +55,11 @@ module Ktl
55
55
  end
56
56
 
57
57
  def assign_replicas_to_brokers(topic, brokers, partition_count, replica_count)
58
- broker_metadatas = brokers.map { |x| Kafka::Admin::BrokerMetadata.new(x.to_java(:int), Scala::Option[nil]) }
59
- broker_metadatas = Scala::Collection::JavaConversions.as_scala_iterable(broker_metadatas).to_seq
60
- Kafka::Admin.assign_replicas_to_brokers(broker_metadatas, partition_count, replica_count)
58
+ @broker_metadatas ||= begin
59
+ broker_metadatas = Kafka::Admin.get_broker_metadatas(@zk_client, brokers)
60
+ Scala::Collection::JavaConversions.as_scala_iterable(broker_metadatas).to_seq
61
+ end
62
+ Kafka::Admin.assign_replicas_to_brokers(@broker_metadatas, partition_count, replica_count)
61
63
  rescue Kafka::Admin::AdminOperationException => e
62
64
  raise ArgumentError, sprintf('%s (%s)', e.message, e.class.name), e.backtrace
63
65
  end
@@ -125,21 +127,7 @@ module Ktl
125
127
  private
126
128
 
127
129
  def rack_for(broker_id)
128
- unless @rack_mappings[broker_id]
129
- broker_metadata = Kafka::Admin.get_broker_metadatas(@zk_client, [broker_id]).first
130
- rack = broker_metadata.rack
131
- unless rack.isDefined
132
- raise "Broker #{broker_metadata.id} is missing rack information, unable to create rack aware shuffle plan."
133
- end
134
- @rack_mappings[broker_id] = rack.get
135
- end
136
- @rack_mappings[broker_id]
137
- rescue Java::KafkaAdmin::AdminOperationException => e
138
- if e.message.match '--disable-rack-aware'
139
- raise "Not all brokers have rack information. Unable to create rack aware shuffle plan."
140
- else
141
- raise e
142
- end
130
+ @rack_mappings[broker_id] ||= Kafka::Admin.get_broker_rack(@zk_client, broker_id)
143
131
  end
144
132
  end
145
133
  end
@@ -1,5 +1,5 @@
1
1
  # encoding: utf-8
2
2
 
3
3
  module Ktl
4
- VERSION = '1.0.0'.freeze
4
+ VERSION = '1.1.0'.freeze
5
5
  end
@@ -84,6 +84,22 @@ module Ktl
84
84
  @utils.path_exists(path)
85
85
  end
86
86
 
87
+ def watch_state(path, listener)
88
+ zk_client.subscribe_state_changes(path, listener)
89
+ end
90
+
91
+ def watch_data(path, listener)
92
+ zk_client.subscribe_data_changes(path, listener)
93
+ end
94
+
95
+ def watch_child(path, listener)
96
+ zk_client.subscribe_child_changes(path, listener)
97
+ end
98
+
99
+ def unsubscribe_data(path, listener)
100
+ zk_client.unsubscribe_data_changes(path, listener)
101
+ end
102
+
87
103
  private
88
104
 
89
105
  CONCURRENCY = 8
@@ -107,5 +123,9 @@ module Ktl
107
123
  acc.send('++', v)
108
124
  end
109
125
  end
126
+
127
+ def zk_client
128
+ @zk_client ||= @utils.class.create_zk_client(@uri, 5_000, 5_000)
129
+ end
110
130
  end
111
131
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ktl
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.0
4
+ version: 1.1.0
5
5
  platform: java
6
6
  authors:
7
7
  - Burt Platform Team
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2017-02-15 00:00:00.000000000 Z
11
+ date: 2017-08-28 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement
@@ -64,6 +64,7 @@ files:
64
64
  - lib/ktl/cluster.rb
65
65
  - lib/ktl/cluster_stats_task.rb
66
66
  - lib/ktl/command.rb
67
+ - lib/ktl/continous_reassigner.rb
67
68
  - lib/ktl/decommission_plan.rb
68
69
  - lib/ktl/migration_plan.rb
69
70
  - lib/ktl/reassigner.rb