cloudflock 0.6.1 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +15 -0
- data/bin/cloudflock +7 -1
- data/bin/cloudflock-files +2 -14
- data/bin/cloudflock-profile +3 -15
- data/bin/cloudflock-servers +3 -22
- data/bin/cloudflock.default +3 -22
- data/lib/cloudflock/app/common/cleanup/unix.rb +23 -0
- data/lib/cloudflock/app/common/cleanup.rb +107 -0
- data/lib/cloudflock/app/common/exclusions/unix/centos.rb +18 -0
- data/lib/cloudflock/app/common/exclusions/unix/redhat.rb +18 -0
- data/lib/cloudflock/app/common/exclusions/unix.rb +58 -0
- data/lib/cloudflock/app/common/exclusions.rb +57 -0
- data/lib/cloudflock/app/common/platform_action.rb +59 -0
- data/lib/cloudflock/app/common/rackspace.rb +63 -0
- data/lib/cloudflock/app/common/servers.rb +673 -0
- data/lib/cloudflock/app/files-migrate.rb +246 -0
- data/lib/cloudflock/app/server-migrate.rb +327 -0
- data/lib/cloudflock/app/server-profile.rb +130 -0
- data/lib/cloudflock/app.rb +87 -0
- data/lib/cloudflock/error.rb +6 -19
- data/lib/cloudflock/errstr.rb +31 -0
- data/lib/cloudflock/remote/files.rb +82 -22
- data/lib/cloudflock/remote/ssh.rb +234 -278
- data/lib/cloudflock/target/servers/platform.rb +92 -115
- data/lib/cloudflock/target/servers/profile.rb +331 -340
- data/lib/cloudflock/task/server-profile.rb +651 -0
- data/lib/cloudflock.rb +6 -8
- metadata +49 -68
- data/lib/cloudflock/interface/cli/app/common/servers.rb +0 -128
- data/lib/cloudflock/interface/cli/app/files.rb +0 -179
- data/lib/cloudflock/interface/cli/app/servers/migrate.rb +0 -491
- data/lib/cloudflock/interface/cli/app/servers/profile.rb +0 -88
- data/lib/cloudflock/interface/cli/app/servers.rb +0 -2
- data/lib/cloudflock/interface/cli/console.rb +0 -213
- data/lib/cloudflock/interface/cli/opts/servers.rb +0 -20
- data/lib/cloudflock/interface/cli/opts.rb +0 -87
- data/lib/cloudflock/interface/cli.rb +0 -15
- data/lib/cloudflock/target/servers/data/exceptions/base.txt +0 -44
- data/lib/cloudflock/target/servers/data/exceptions/platform/amazon.txt +0 -10
- data/lib/cloudflock/target/servers/data/exceptions/platform/centos.txt +0 -7
- data/lib/cloudflock/target/servers/data/exceptions/platform/debian.txt +0 -0
- data/lib/cloudflock/target/servers/data/exceptions/platform/redhat.txt +0 -7
- data/lib/cloudflock/target/servers/data/exceptions/platform/suse.txt +0 -1
- data/lib/cloudflock/target/servers/data/post-migration/chroot/base.txt +0 -1
- data/lib/cloudflock/target/servers/data/post-migration/chroot/platform/amazon.txt +0 -19
- data/lib/cloudflock/target/servers/data/post-migration/pre/base.txt +0 -3
- data/lib/cloudflock/target/servers/data/post-migration/pre/platform/amazon.txt +0 -4
- data/lib/cloudflock/target/servers/migrate.rb +0 -466
- data/lib/cloudflock/target/servers/platform/v1.rb +0 -97
- data/lib/cloudflock/target/servers/platform/v2.rb +0 -93
- data/lib/cloudflock/target/servers.rb +0 -5
- data/lib/cloudflock/version.rb +0 -3
@@ -0,0 +1,673 @@
|
|
1
|
+
require 'socket'
|
2
|
+
require 'console-glitter'
|
3
|
+
require 'cloudflock/app'
|
4
|
+
require 'cloudflock/remote/ssh'
|
5
|
+
require 'cloudflock/app/common/rackspace'
|
6
|
+
require 'cloudflock/app/common/exclusions'
|
7
|
+
require 'cloudflock/app/common/cleanup'
|
8
|
+
|
9
|
+
module CloudFlock; module App
|
10
|
+
# Public: The Common module provides common methods for CLI interaction
|
11
|
+
# pertaining to interaction with remote (Unix) servers and the Rackspace API.
|
12
|
+
module Common
|
13
|
+
include Rackspace
|
14
|
+
include ConsoleGlitter
|
15
|
+
include CloudFlock::App
|
16
|
+
include CloudFlock::Remote
|
17
|
+
|
18
|
+
# Path to the base directory in which any CloudFlock files will be stored.
|
19
|
+
DATA_DIR = '/root/.cloudflock'
|
20
|
+
|
21
|
+
# Path to the file in which paths excluded from being migrated are stored.
|
22
|
+
EXCLUSIONS = "#{DATA_DIR}/migration_exclusions"
|
23
|
+
|
24
|
+
# Path to the private key to be generated for migration.
|
25
|
+
PRIVATE_KEY = "#{DATA_DIR}/migration_id_rsa"
|
26
|
+
|
27
|
+
# Path to the public key corresponding to PRIVATE_KEY.
|
28
|
+
PUBLIC_KEY = "#{PRIVATE_KEY}.pub"
|
29
|
+
|
30
|
+
# Path to the default path for root partition of the destination host to be
|
31
|
+
# mounted.
|
32
|
+
MOUNT_POINT = '/mnt/migration_target'
|
33
|
+
|
34
|
+
# Commonly used arguments to ssh.
|
35
|
+
SSH_ARGUMENTS = CloudFlock::Remote::SSH::SSH_ARGUMENTS
|
36
|
+
|
37
|
+
# Public: Collect information about the source server to be migrated.
|
38
|
+
#
|
39
|
+
# host - Hash containing any options which may pertain to the host.
|
40
|
+
#
|
41
|
+
# Returns a Hash containing information pertinent to logging in to a host.
|
42
|
+
def define_source(host)
|
43
|
+
define_host(host, 'Source')
|
44
|
+
end
|
45
|
+
|
46
|
+
# Public: Collect information about the destination server to which data
|
47
|
+
# will be migrated.
|
48
|
+
#
|
49
|
+
# host - Hash containing any options which may pertain to the host.
|
50
|
+
#
|
51
|
+
# Returns a Hash containing information pertinent to logging in to a host.
|
52
|
+
def define_destination(host)
|
53
|
+
host.select! { |key| /dest_/.match(key) }
|
54
|
+
host = host.reduce({}) do |c, e|
|
55
|
+
key = e[0].to_s
|
56
|
+
c[key.gsub(/dest_/, '').to_sym] = e[1]
|
57
|
+
c
|
58
|
+
end
|
59
|
+
define_host(host, 'Destination')
|
60
|
+
end
|
61
|
+
|
62
|
+
# Public: Collect information about a named server to be migrated.
|
63
|
+
#
|
64
|
+
# opts - Hash containing any applicable options mappings for the server in
|
65
|
+
# question.
|
66
|
+
# name - String containing the name/description for the host.
|
67
|
+
#
|
68
|
+
# Returns a Hash containing information pertinent to logging in to a host.
|
69
|
+
def define_host(host, name)
|
70
|
+
host = host.dup
|
71
|
+
check_option(host, :hostname, "#{name} host")
|
72
|
+
check_option(host, :port, "#{name} SSH port", default_answer: '22')
|
73
|
+
check_option(host, :username, "#{name} username", default_answer: 'root')
|
74
|
+
check_option(host, :password, "#{name} password",
|
75
|
+
default_answer: '', allow_empty: true)
|
76
|
+
|
77
|
+
key_path = File.join(Dir.home, '.ssh', 'id_rsa')
|
78
|
+
key_path = '' unless File.exists?(key_path)
|
79
|
+
check_option(host, :ssh_key, "#{name} SSH Key",
|
80
|
+
default_answer: key_path, allow_empty: true)
|
81
|
+
|
82
|
+
# Using sudo is only applicable if the user isn't root
|
83
|
+
host[:sudo] = false if host[:username] == 'root'
|
84
|
+
check_option(host, :sudo, 'Use sudo? (Y/N)', default_answer: 'Y')
|
85
|
+
|
86
|
+
# If non-root and using su, the root password is needed
|
87
|
+
if host[:username] == 'root' || host[:sudo]
|
88
|
+
host[:root_password] = host[:password]
|
89
|
+
else
|
90
|
+
check_option(host, :root_password, 'Password for root')
|
91
|
+
end
|
92
|
+
|
93
|
+
host
|
94
|
+
end
|
95
|
+
|
96
|
+
# Public: Have the user select from a list of available images to provision
|
97
|
+
# a new host.
|
98
|
+
#
|
99
|
+
# api - Authenticated Fog API instance.
|
100
|
+
# profile - Profile of the source host.
|
101
|
+
# constrained - Whether the list should be constrained to flavors which
|
102
|
+
# appear to be appropriate. (default: true)
|
103
|
+
#
|
104
|
+
# Returns a String.
|
105
|
+
def define_compute_image(api, profile, constrained = true)
|
106
|
+
image_list = filter_compute_images(api, profile, constrained)
|
107
|
+
if image_list.length == 1
|
108
|
+
puts "Suggested image: #{UI.blue { image_list.first[:name] }}"
|
109
|
+
if UI.prompt_yn('Use this image? (Y/N)', default_answer: 'Y')
|
110
|
+
return image_list.first[:id]
|
111
|
+
end
|
112
|
+
elsif image_list.length > 1
|
113
|
+
puts generate_selection_table(image_list, constrained)
|
114
|
+
|
115
|
+
image = UI.prompt('Image to provision', valid_answers: [/^\d+$/, 'A'])
|
116
|
+
return image_list[image.to_i][:id] unless /A/.match(image)
|
117
|
+
end
|
118
|
+
|
119
|
+
define_compute_image(api, profile, false)
|
120
|
+
end
|
121
|
+
|
122
|
+
# Public: Filter available images to those expected to be appropriate for
|
123
|
+
# a given amount of resource usage.
|
124
|
+
#
|
125
|
+
# api - Authenticated Fog API instance.
|
126
|
+
# profile - Profile of the source host.
|
127
|
+
# constrained - Whether the list should be constrained to images which
|
128
|
+
# appear to be appropriate.
|
129
|
+
#
|
130
|
+
# Returns an Array of Hashes mapping :name to the image name and :id to the
|
131
|
+
# image's internal id.
|
132
|
+
def filter_compute_images(api, profile, constrained)
|
133
|
+
image_list = api.images.to_a
|
134
|
+
if constrained
|
135
|
+
cpe = profile.cpe
|
136
|
+
search = [cpe.vendor, cpe.version]
|
137
|
+
search.map! { |s| Regexp.new(s, Regexp::IGNORECASE) }
|
138
|
+
|
139
|
+
image_list.select! do |image|
|
140
|
+
search.reduce(true) { |c,e| e.match(image.name) && c }
|
141
|
+
end
|
142
|
+
end
|
143
|
+
image_list.map! { |image| { name: image.name, id: image.id } }
|
144
|
+
rescue Excon::Errors::Timeout
|
145
|
+
retry if retry_prompt('Unable to fetch a list of available images.')
|
146
|
+
raise
|
147
|
+
end
|
148
|
+
|
149
|
+
# Public: Have the user select from a list of available flavors to
|
150
|
+
# provision a new host.
|
151
|
+
#
|
152
|
+
# api - Authenticated Fog API instance.
|
153
|
+
# profile - Profile of the source host.
|
154
|
+
# constrained - Whether the list should be constrained to flavors which
|
155
|
+
# appear to be appropriate. (default: true)
|
156
|
+
#
|
157
|
+
# Returns a String.
|
158
|
+
def define_compute_flavor(api, profile, constrained = true)
|
159
|
+
flavor_list = filter_compute_flavors(api, profile, constrained)
|
160
|
+
|
161
|
+
puts "Suggested flavor: #{UI.blue { flavor_list.first[:name] }}"
|
162
|
+
if UI.prompt_yn('Use this flavor? (Y/N)', default_answer: 'Y')
|
163
|
+
return flavor_list.first[:id]
|
164
|
+
end
|
165
|
+
|
166
|
+
puts generate_selection_table(flavor_list, constrained)
|
167
|
+
flavor = UI.prompt('Flavor to provision', valid_answers: [/^\d+$/, 'A'])
|
168
|
+
return flavor_list[flavor.to_i][:id] unless /A/.match(flavor)
|
169
|
+
|
170
|
+
define_compute_flavor(api, profile, false)
|
171
|
+
end
|
172
|
+
|
173
|
+
# Public: Filter available flavors to those expected to be appropriate for
|
174
|
+
# a given amount of resource usage.
|
175
|
+
#
|
176
|
+
# api - Authenticated Fog API instance.
|
177
|
+
# profile - Profile of the source host.
|
178
|
+
# constrained - Whether the list should be constrained to flavors which
|
179
|
+
# appear to be appropriate.
|
180
|
+
#
|
181
|
+
# Returns an Array of Hashes mapping :name to the flavor name and :id to
|
182
|
+
# the flavor's internal id.
|
183
|
+
def filter_compute_flavors(api, profile, constrained)
|
184
|
+
flavor_list = api.flavors.to_a
|
185
|
+
if constrained
|
186
|
+
hdd = profile.select_entries(/Storage/, /Usage/)
|
187
|
+
ram = profile.select_entries(/Memory/, /Used/)
|
188
|
+
hdd = hdd.first.to_i
|
189
|
+
ram = ram.first.to_i
|
190
|
+
|
191
|
+
flavor_list.select! { |flavor| flavor.disk > hdd && flavor.ram > ram }
|
192
|
+
end
|
193
|
+
flavor_list.map! { |flavor| { name: flavor.name, id: flavor.id } }
|
194
|
+
end
|
195
|
+
|
196
|
+
# Public: Prompt user for the name of a new host to be created, presenting
|
197
|
+
# the hostname of the source host as a default option.
|
198
|
+
#
|
199
|
+
# profile - Profile of the source host.
|
200
|
+
#
|
201
|
+
# Returns a String.
|
202
|
+
def define_compute_name(profile)
|
203
|
+
name = profile.select_entries(/System/, 'Hostname').join
|
204
|
+
|
205
|
+
new_name = UI.prompt("Name", default_answer: name, allow_empty: false)
|
206
|
+
new_name.gsub(/[^a-zA-Z0-9_-]/, '-')
|
207
|
+
end
|
208
|
+
|
209
|
+
# Public: Create a printable table with options to be presented to a user.
|
210
|
+
#
|
211
|
+
# options - Array of Hashes containing columns to be desplayed, with
|
212
|
+
# the following keys:
|
213
|
+
# :selection_id - ID for the user to select the option.
|
214
|
+
# :name - String containing the option's name.
|
215
|
+
# constrained - Whether the table is constrained (and a "View All" option
|
216
|
+
# is appropriate).
|
217
|
+
#
|
218
|
+
# Returns a String.
|
219
|
+
def generate_selection_table(options, constrained)
|
220
|
+
options = options.each_with_index.map do |option, index|
|
221
|
+
{ selection_id: index.to_s, name: option[:name] }
|
222
|
+
end
|
223
|
+
options << { selection_id: 'A', name: 'View All' } if constrained
|
224
|
+
labels = { selection_id: 'ID', name: 'Name' }
|
225
|
+
UI.build_grid(options, labels)
|
226
|
+
end
|
227
|
+
|
228
|
+
# Public: Create a new compute instance via API.
|
229
|
+
#
|
230
|
+
# api - Authenticated Fog API instance.
|
231
|
+
# managed - Whether the instance is expected to be managed (if
|
232
|
+
# Rackspace public cloud).
|
233
|
+
# compute_spec - Hash containing parameters to pass via the API call.
|
234
|
+
#
|
235
|
+
# Returns a Hash with information necessary to log in to the new host.
|
236
|
+
def provision_compute(api, managed, compute_spec)
|
237
|
+
host = api.servers.create(compute_spec)
|
238
|
+
provision_wait(host, compute_spec[:name])
|
239
|
+
managed_wait(host) if managed
|
240
|
+
rescue_compute(host)
|
241
|
+
|
242
|
+
{ username: 'root', port: '22' }.merge(get_host_details(host))
|
243
|
+
rescue Fog::Errors::TimeoutError, Excon::Errors::Timeout
|
244
|
+
retry if retry_prompt('Provisioning failed.')
|
245
|
+
raise
|
246
|
+
end
|
247
|
+
|
248
|
+
# Public: Wait for a Rackspace Cloud instance to be provisioned.
|
249
|
+
#
|
250
|
+
# host - Fog::Compute instance.
|
251
|
+
# name - String containing the name of the server.
|
252
|
+
#
|
253
|
+
# Returns nothing.
|
254
|
+
def provision_wait(host, name)
|
255
|
+
UI.spinner("Waiting for #{name} to provision") do
|
256
|
+
host.wait_for { ready? }
|
257
|
+
end
|
258
|
+
rescue Fog::Errors::TimeoutError, Excon::Errors::Timeout
|
259
|
+
error = UI.red { 'Provisioning is taking an unusually long time.' }
|
260
|
+
|
261
|
+
retry if UI.prompt_yn("#{error} Continue waiting? (Y/N)",
|
262
|
+
default_answer: 'Y')
|
263
|
+
raise
|
264
|
+
end
|
265
|
+
|
266
|
+
# Public: Wait for a Rackspace Cloud instance with Managed service level to
|
267
|
+
# finish post-provisioning automation.
|
268
|
+
#
|
269
|
+
# host - Fog::Compute instance.
|
270
|
+
#
|
271
|
+
# Returns nothing.
|
272
|
+
def managed_wait(host)
|
273
|
+
finished = '/tmp/rs_managed_cloud_automation_complete'
|
274
|
+
connect = { username: 'root', port: '22' }.merge(get_host_details(host))
|
275
|
+
|
276
|
+
ssh = ssh_connect(connect)
|
277
|
+
UI.spinner('Waiting for managed cloud automation to complete') do
|
278
|
+
ssh.as_root("while [ ! -f #{finished} ]; do sleep 5; done", 3600)
|
279
|
+
end
|
280
|
+
end
|
281
|
+
|
282
|
+
# Public: Get details for a Fog::Compute instance.
|
283
|
+
#
|
284
|
+
# host - Fog::Compute instance.
|
285
|
+
#
|
286
|
+
# Returns a Hash containing the host's address and root password.
|
287
|
+
def get_host_details(host)
|
288
|
+
{ hostname: host.ipv4_address,
|
289
|
+
password: host.password,
|
290
|
+
root_password: host.password }
|
291
|
+
end
|
292
|
+
|
293
|
+
# Public: Bring a host into Rescue mode.
|
294
|
+
#
|
295
|
+
# host - Fog::Compute instance.
|
296
|
+
#
|
297
|
+
# Returns nothing.
|
298
|
+
def rescue_compute(host)
|
299
|
+
host.rescue
|
300
|
+
begin
|
301
|
+
UI.spinner("Waiting for Rescue Mode (password: #{host.password})") do
|
302
|
+
host.wait_for { state == 'RESCUE' }
|
303
|
+
end
|
304
|
+
rescue Fog::Errors::TimeoutError
|
305
|
+
retry if retry_prompt('Timeout exceeded waiting for the host.')
|
306
|
+
|
307
|
+
host.destroy
|
308
|
+
raise
|
309
|
+
end
|
310
|
+
rescue Excon::Errors::Timeout
|
311
|
+
retry if retry_prompt('API timed out waiting for server status update.')
|
312
|
+
end
|
313
|
+
|
314
|
+
# Public: Connect to a host via SSH, automatically retrying a set number of
|
315
|
+
# times, and prompting whether to continue trying beyond that.
|
316
|
+
#
|
317
|
+
# host - Hash containing information about the host. Defaults are
|
318
|
+
# defined in the CloudFlock::Remote::SSH Class.
|
319
|
+
# attempts - Number of times to retry connecting before alerting the user
|
320
|
+
# to failures and asking whether to continue. (Default: 5)
|
321
|
+
#
|
322
|
+
# Returns an SSH Object.
|
323
|
+
def ssh_connect(host, attempts = 5)
|
324
|
+
attempt = 0
|
325
|
+
|
326
|
+
UI.spinner("Logging in to #{host[:hostname]}") do
|
327
|
+
begin
|
328
|
+
SSH.new(host)
|
329
|
+
rescue Net::SSH::Disconnect
|
330
|
+
sleep 10
|
331
|
+
attempt += 1
|
332
|
+
retry if attempt < 5
|
333
|
+
end
|
334
|
+
end
|
335
|
+
rescue Net::SSH::Disconnect
|
336
|
+
retry if retry_prompt('Unable to establish a connection.')
|
337
|
+
raise
|
338
|
+
end
|
339
|
+
|
340
|
+
# Public: Perform the final preperatory steps necessary as well as the
|
341
|
+
# migration.
|
342
|
+
#
|
343
|
+
# source_shell - SSH object logged in to the source host.
|
344
|
+
# dest_shell - SSH object logged in to the destination host.
|
345
|
+
# exclusions - String containing the exclusions list for the migration.
|
346
|
+
#
|
347
|
+
# Returns a String containing the host's new ssh public key.
|
348
|
+
def migrate_server(source_shell, dest_shell, exclusions)
|
349
|
+
pubkey = UI.spinner('Generating a keypair for the source environment') do
|
350
|
+
generate_keypair(source_shell)
|
351
|
+
end
|
352
|
+
|
353
|
+
UI.spinner('Preparing the destination environment') do
|
354
|
+
setup_destination(dest_shell, pubkey)
|
355
|
+
end
|
356
|
+
|
357
|
+
rsync = UI.spinner('Preparing the source environment') do
|
358
|
+
location = setup_source(source_shell, exclusions)
|
359
|
+
if location.empty?
|
360
|
+
location = transfer_rsync(source_shell, dest_shell)
|
361
|
+
end
|
362
|
+
|
363
|
+
location
|
364
|
+
end
|
365
|
+
|
366
|
+
dest_address = UI.spinner('Checking for ServiceNet') do
|
367
|
+
determine_target_address(source_shell, dest_shell)
|
368
|
+
end
|
369
|
+
|
370
|
+
rsync = "#{rsync} -azP -e 'ssh #{SSH_ARGUMENTS} -i #{PRIVATE_KEY}' " +
|
371
|
+
"--exclude-from='#{EXCLUSIONS}' / #{dest_address}:#{MOUNT_POINT}"
|
372
|
+
|
373
|
+
UI.spinner('Performing rsync migration') do
|
374
|
+
2.times do
|
375
|
+
# TODO: this dies in exceptional cases
|
376
|
+
source_shell.as_root(rsync, 7200)
|
377
|
+
source_shell.as_root("sed -i 's/\/var\/log//g' #{EXCLUSIONS}")
|
378
|
+
end
|
379
|
+
end
|
380
|
+
end
|
381
|
+
|
382
|
+
# Public: Create a temporary ssh key to be used for passwordless access to
|
383
|
+
# the destination host.
|
384
|
+
#
|
385
|
+
# shell - SSH object logged in to the source host.
|
386
|
+
#
|
387
|
+
# Returns a String containing the host's new ssh public key.
|
388
|
+
def generate_keypair(shell)
|
389
|
+
shell.as_root("mkdir #{DATA_DIR}")
|
390
|
+
shell.as_root("ssh-keygen -b 4096 -q -t rsa -f #{PRIVATE_KEY} -P ''")
|
391
|
+
shell.as_root("cat #{PUBLIC_KEY}")
|
392
|
+
end
|
393
|
+
|
394
|
+
# Public: Prepare the source host for migration by populating the
|
395
|
+
# exclusions list in the file located at EXCLUSIONS and determining the
|
396
|
+
# location of rsync on the system.
|
397
|
+
#
|
398
|
+
# shell - SSH object logged in to the source host.
|
399
|
+
# exclusions - String containing the exclusions list for the source host.
|
400
|
+
#
|
401
|
+
# Returns a String containing path to rsync on the host if present.
|
402
|
+
def setup_source(shell, exclusions)
|
403
|
+
shell.as_root("cat <<EOF> #{EXCLUSIONS}\n#{exclusions}\nEOF")
|
404
|
+
shell.as_root('which rsync 2>/dev/null')
|
405
|
+
end
|
406
|
+
|
407
|
+
# Public: Transfer rsync from the destination host to the source host to
|
408
|
+
# facilitate the migration.
|
409
|
+
#
|
410
|
+
# source_shell - SSH object logged in to the source host.
|
411
|
+
# dest_shell - SSH object logged in to the source host.
|
412
|
+
#
|
413
|
+
# Raises NoRsyncAvailable if rsync doesn't exist on the destination host.
|
414
|
+
#
|
415
|
+
# Returns a String.
|
416
|
+
def transfer_rsync(source_shell, dest_shell)
|
417
|
+
host = dest_shell.hostname
|
418
|
+
location = dest_shell.as_root('which rsync')
|
419
|
+
raise(NoRsyncAvailable, Errstr::NO_RSYNC) if location.empty?
|
420
|
+
|
421
|
+
scp = "scp #{SSH_ARGUMENTS} -i #{PRIVATE_KEY} #{host}:#{location} " +
|
422
|
+
"#{DATA_DIR}/"
|
423
|
+
|
424
|
+
source_shell.as_root(scp)
|
425
|
+
"#{DATA_DIR}/rsync"
|
426
|
+
end
|
427
|
+
|
428
|
+
# Public: Prepare the destination host for migration by verifying that
|
429
|
+
# rsync is present, mounting the primary disk to /mnt/migration_target,
|
430
|
+
# installing a temporary ssh public key for root, and backing up the
|
431
|
+
# original passwd, shadow and group files.
|
432
|
+
#
|
433
|
+
# shell - SSH object logged in to the destination host.
|
434
|
+
# pubkey - String containing the text of the ssh public key to install for
|
435
|
+
# root.
|
436
|
+
#
|
437
|
+
# Returns nothing.
|
438
|
+
def setup_destination(shell, pubkey)
|
439
|
+
preserve_files = ['passwd', 'shadow', 'group']
|
440
|
+
path = "#{MOUNT_POINT}/etc"
|
441
|
+
|
442
|
+
# TODO: Dynamic mountpoint/block device support
|
443
|
+
# Presently mount point and block device are hard-coded. This will be
|
444
|
+
# changed in a future release.
|
445
|
+
shell.as_root("mkdir -p #{MOUNT_POINT}")
|
446
|
+
shell.as_root("mount -o acl /dev/xvdb1 #{MOUNT_POINT}")
|
447
|
+
|
448
|
+
preserve_files.each do |file|
|
449
|
+
original = "#{path}/#{file}"
|
450
|
+
backup = "#{original}.migration"
|
451
|
+
shell.as_root("[ -f #{backup} ] || /bin/cp -a #{original} #{backup}")
|
452
|
+
end
|
453
|
+
|
454
|
+
# TODO: Better distro support
|
455
|
+
# Only Debian- and RedHat-based Unix hosts support automatic rsync
|
456
|
+
# installation at this time. This will be fixed in a future release.
|
457
|
+
unless /rsync error/.match(shell.as_root('rsync'))
|
458
|
+
package_manager = shell.as_root('which {yum,apt-get} 2>/dev/null')
|
459
|
+
raise NoRsyncAvailable if package_manager.empty?
|
460
|
+
shell.as_root("#{package_manager} install rsync -y", 300)
|
461
|
+
end
|
462
|
+
|
463
|
+
ssh_key = "mkdir $HOME/.ssh; chmod 0700 $HOME/.ssh; printf " +
|
464
|
+
"'#{pubkey}\\n' >> $HOME/.ssh/authorized_keys"
|
465
|
+
shell.as_root(ssh_key)
|
466
|
+
end
|
467
|
+
|
468
|
+
# Public: Determine what address should be used when connecting from source
|
469
|
+
# to destination for the purpose of a migration. Prefer RFC1918 networks.
|
470
|
+
#
|
471
|
+
# source_shell - SSH object logged in to the source host.
|
472
|
+
# dest_shell - SSH object logged in to the destination host.
|
473
|
+
#
|
474
|
+
# Returns a String containing the appropriate address.
|
475
|
+
def determine_target_address(source_shell, dest_shell)
|
476
|
+
hostname = dest_shell.hostname
|
477
|
+
host_key = check_hostkey(dest_shell, '127.0.0.1')
|
478
|
+
|
479
|
+
ips = dest_shell.query('ifconfig')
|
480
|
+
ips = ips.lines.select { |line| /inet[^6]/.match(line) }
|
481
|
+
|
482
|
+
ips.map! { |line| line.strip.split(/\s+/)[1] }
|
483
|
+
ips.map! { |ip| ip.gsub(/[^0-9\.]/, '') }
|
484
|
+
|
485
|
+
ips.select! { |ip| check_hostkey(source_shell, ip) == host_key }
|
486
|
+
return ips.last if ips.any?
|
487
|
+
|
488
|
+
hostname
|
489
|
+
end
|
490
|
+
|
491
|
+
# Public: Determine the ssh hostkey visible to a given host from an IP.
|
492
|
+
#
|
493
|
+
# shell - SSH object logged in to a host.
|
494
|
+
# ip - String containing an ip (or hostname) for which to get a key.
|
495
|
+
#
|
496
|
+
# Returns a String containing the key given by the host, or false if none
|
497
|
+
# given.
|
498
|
+
def check_hostkey(shell, ip)
|
499
|
+
ssh_cmd = 'ssh -o UserKnownHostsFile=/dev/null ' \
|
500
|
+
'-o NumberOfPasswordPrompts=0'
|
501
|
+
|
502
|
+
hostkey = shell.query("#{ssh_cmd} #{ip}", 15, true)
|
503
|
+
key = hostkey.lines.select { |line| /fingerprint is/.match(line) }
|
504
|
+
key.first.to_s.strip.gsub(/.*fingerprint is /, '').gsub(/\.$/, '')
|
505
|
+
end
|
506
|
+
|
507
|
+
# Public: Perform post-migration cleanup tasks.
|
508
|
+
#
|
509
|
+
# shell - SSH object logged in to the target host.
|
510
|
+
# profile - CPE describing the platform in question.
|
511
|
+
#
|
512
|
+
# Returns nothing.
|
513
|
+
def cleanup_destination(shell, profile)
|
514
|
+
UI.spinner('Performing post-migration cleanup') do
|
515
|
+
cleanup = Cleanup.new(profile)
|
516
|
+
chroot_command = "chroot #{MOUNT_POINT} /bin/sh -C " \
|
517
|
+
"#{DATA_DIR}/chroot.sh"
|
518
|
+
|
519
|
+
restore_rackspace_users(cleanup)
|
520
|
+
|
521
|
+
shell.as_root("mkdir #{DATA_DIR}")
|
522
|
+
shell.as_root("cat <<EOF> #{DATA_DIR}/pre.sh\n#{cleanup.pre_s}\nEOF")
|
523
|
+
shell.as_root("cat <<EOF> #{DATA_DIR}/post.sh\n#{cleanup.post_s}\nEOF")
|
524
|
+
|
525
|
+
chroot = "cat <<EOF> #{MOUNT_POINT}#{DATA_DIR}/chroot.sh\n" \
|
526
|
+
"#{cleanup.chroot_s}\nEOF"
|
527
|
+
shell.as_root("mkdir -p #{MOUNT_POINT}#{DATA_DIR}")
|
528
|
+
shell.as_root(chroot)
|
529
|
+
|
530
|
+
shell.as_root("/bin/sh #{DATA_DIR}/pre.sh", 0)
|
531
|
+
shell.as_root(chroot_command, 0)
|
532
|
+
shell.as_root("/bin/sh #{DATA_DIR}/post.sh", 0)
|
533
|
+
|
534
|
+
cleanup_rackspace_server(shell)
|
535
|
+
end
|
536
|
+
end
|
537
|
+
|
538
|
+
# Public: Restore any users added by Rackspace automation in order to
|
539
|
+
# maintain access on hosts which are meant to be managed by Rackspace.
|
540
|
+
#
|
541
|
+
# cleanup - Cleanup object to which to add chroot tasks.
|
542
|
+
#
|
543
|
+
# Returns nothing.
|
544
|
+
def restore_rackspace_users(cleanup)
|
545
|
+
['rack', 'rackconnect'].each do |user|
|
546
|
+
check_user = "grep '^#{user}:' /etc/passwd.migration"
|
547
|
+
cleanup.chroot_step("#{check_user} \&\& useradd #{user}")
|
548
|
+
end
|
549
|
+
end
|
550
|
+
|
551
|
+
# Public: Restore any users added by Rackspace automation in order to
|
552
|
+
# maintain access on hosts which are meant to be managed by Rackspace.
|
553
|
+
#
|
554
|
+
# shell - SSH object logged in to the target host.
|
555
|
+
#
|
556
|
+
# Returns nothing.
|
557
|
+
def cleanup_rackspace_server(shell)
|
558
|
+
if restore_user(shell, 'rackconnect')
|
559
|
+
sudoers = "rackconnect ALL=(ALL) NOPASSWD: ALL\n" \
|
560
|
+
"Defaults:rackconnect !requiretty"
|
561
|
+
sudoers_command = "cat <<EOF >> #{MOUNT_POINT}/etc/sudoers\n\n" \
|
562
|
+
"#{sudoers}\nEOF"
|
563
|
+
|
564
|
+
shell.as_root(sudoers_command)
|
565
|
+
end
|
566
|
+
|
567
|
+
if restore_user(shell, 'rack')
|
568
|
+
sudoers = "rack ALL=(ALL) NOPASSWD: ALL"
|
569
|
+
sudoers_command = "cat <<EOF >> #{MOUNT_POINT}/etc/sudoers\n\n" \
|
570
|
+
"#{sudoers}\nEOF"
|
571
|
+
shell.as_root(sudoers_command)
|
572
|
+
end
|
573
|
+
end
|
574
|
+
|
575
|
+
# Public: If a given user had previously existed, create that user in the
|
576
|
+
# current environment and copy password hash from a backup copy of
|
577
|
+
# /etc/shadow.
|
578
|
+
#
|
579
|
+
# shell - SSH instance logged in to the target host.
|
580
|
+
# user - String containing the username to restore.
|
581
|
+
#
|
582
|
+
# Returns true if the user was successfully restored, false otherwise.
|
583
|
+
def restore_user(shell, user)
|
584
|
+
passwd_path = "#{MOUNT_POINT}/etc/passwd"
|
585
|
+
shadow_path = "#{MOUNT_POINT}/etc/shadow"
|
586
|
+
|
587
|
+
present = ['passwd', 'shadow'].map do |file|
|
588
|
+
path = "#{MOUNT_POINT}/etc/#{file}.migration"
|
589
|
+
shell.as_root("grep '^#{user}:' #{path}")
|
590
|
+
end
|
591
|
+
return false if present.include?('')
|
592
|
+
|
593
|
+
passwd, shadow = present
|
594
|
+
uid, gid = passwd.split(/:/)[2..3]
|
595
|
+
|
596
|
+
steps = ["chown -R #{uid}.#{gid} #{MOUNT_POINT}/home/#{user}",
|
597
|
+
"sed -i '/^#{user}:.*$/d' #{shadow_path}",
|
598
|
+
"printf '#{shadow}\\n' >> #{shadow_path}"]
|
599
|
+
steps.each { |step| shell.as_root(step) }
|
600
|
+
|
601
|
+
true
|
602
|
+
end
|
603
|
+
|
604
|
+
# Public: For each IP detected on the source host, perform IP remediation
|
605
|
+
# on the destination host post-migration. Allow the list of IPs and the
|
606
|
+
# list of directories to target to be overridden by the user.
|
607
|
+
#
|
608
|
+
# shell - SSH object logged in to the target host.
|
609
|
+
# profile - Profile containing IPs gathered from the source host.
|
610
|
+
#
|
611
|
+
# Returns nothing.
|
612
|
+
def configure_ips(shell, profile)
|
613
|
+
destination_profile = CloudFlock::Task::ServerProfile.new(shell)
|
614
|
+
source_ips = profile.select_entries(/IP Usage/, /./)
|
615
|
+
destination_ips = destination_profile.select_entries(/IP Usage/, /./)
|
616
|
+
target_directories = ['/etc']
|
617
|
+
|
618
|
+
puts "Detected IPs on the source: #{source_ips.join(', ')} "
|
619
|
+
if UI.prompt_yn('Edit IP list? (Y/N)', default_answer: 'N')
|
620
|
+
source_ips = edit_ip_list(source_ips)
|
621
|
+
end
|
622
|
+
|
623
|
+
puts 'By default only config files under /etc will be remediated. '
|
624
|
+
if UI.prompt_yn('Edit remediation targets? (Y/N)', default_answer: 'N')
|
625
|
+
target_directories = edit_directory_list(target_directories)
|
626
|
+
end
|
627
|
+
|
628
|
+
puts "Detected IPs on the destination: #{destination_ips.join(', ')}"
|
629
|
+
source_ips.each do |ip|
|
630
|
+
appropriate = destination_ips.select do |dest_ip|
|
631
|
+
Addrinfo.ip(ip).ipv4_private? == Addrinfo.ip(dest_ip).ipv4_private?
|
632
|
+
end
|
633
|
+
suggested = appropriate.first || destination_ips.first
|
634
|
+
remediate_ip(shell, ip, suggested, target_directories)
|
635
|
+
end
|
636
|
+
end
|
637
|
+
|
638
|
+
# Public: Perform post-migration IP remediation in configuration files for
|
639
|
+
# a given IP.
|
640
|
+
#
|
641
|
+
# shell - SSH object logged in to the target host.
|
642
|
+
# source_ip - String containing the IP to replace.
|
643
|
+
# default_ip - String containing an IP to suggest as the default
|
644
|
+
# replacement.
|
645
|
+
# target_directories - Array containing Strings of directories to target
|
646
|
+
# for IP remediation.
|
647
|
+
#
|
648
|
+
# Returns nothing.
|
649
|
+
def remediate_ip(shell, source_ip, default_ip, target_directories)
|
650
|
+
replace = UI.prompt("Replacement for #{source_ip}",
|
651
|
+
allow_empty: true, default_answer: default_ip).strip
|
652
|
+
return if replace.empty? || target_directories.empty?
|
653
|
+
|
654
|
+
sed = "sed -i 's/#{source_ip}/#{replace}/g' {} \\;"
|
655
|
+
UI.spinner("Remediating IP: #{source_ip}") do
|
656
|
+
target_directories.each do |dir|
|
657
|
+
shell.as_root("find #{MOUNT_POINT}#{dir} -type f -exec #{sed}", 7200)
|
658
|
+
end
|
659
|
+
end
|
660
|
+
end
|
661
|
+
|
662
|
+
# Public: Display a failure message to the user and prompt whether to
|
663
|
+
# retry.
|
664
|
+
#
|
665
|
+
# message - String containing a failure message.
|
666
|
+
#
|
667
|
+
# Returns true or false indicating whether the user wishes to retry.
|
668
|
+
def retry_prompt(message)
|
669
|
+
error = UI.red { "#{message} Try again? (Y/N)" }
|
670
|
+
UI.prompt_yn(error, default_answer: 'Y')
|
671
|
+
end
|
672
|
+
end
|
673
|
+
end; end
|