CloudyScripts 1.8.29 → 1.8.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/Rakefile CHANGED
@@ -12,7 +12,7 @@ require 'rake/testtask'
12
12
 
13
13
  spec = Gem::Specification.new do |s|
14
14
  s.name = 'CloudyScripts'
15
- s.version = '1.8.29'
15
+ s.version = '1.8.30'
16
16
  s.has_rdoc = true
17
17
  s.extra_rdoc_files = ['README.rdoc', 'LICENSE']
18
18
  s.summary = 'Scripts to facilitate programming for infrastructure clouds.'
@@ -34,7 +34,7 @@ class RemoteCommandHandler
34
34
  # * keyfile: path of the keyfile to be used for authentication
35
35
  def connect_with_keyfile(ip, user_name, keyfile, timeout = 30)
36
36
  @use_sudo = false
37
- @ssh_session = Net::SSH.start(ip, user_name, {:keys => [keyfile], :timeout => timeout})
37
+ @ssh_session = Net::SSH.start(ip, user_name, :keys => [keyfile], :timeout => timeout, :verbose => :warn)
38
38
  @use_sudo = true unless user_name.strip == 'root'
39
39
  end
40
40
 
@@ -45,7 +45,7 @@ class RemoteCommandHandler
45
45
  # * key_data: key_data to be used for authentication
46
46
  def connect(ip, user, key_data, timeout = 30)
47
47
  @use_sudo = false
48
- @ssh_session = Net::SSH.start(ip, user, {:key_data => [key_data], :timeout => timeout})
48
+ @ssh_session = Net::SSH.start(ip, user, :key_data => [key_data], :timeout => timeout, :verbose => :warn)
49
49
  @use_sudo = true unless user.strip == 'root'
50
50
  end
51
51
 
@@ -63,7 +63,27 @@ class RemoteCommandHandler
63
63
  def retrieve_os()
64
64
  get_output("uname -r").strip
65
65
  end
66
-
66
+
67
+ # Get root partition label
68
+ def get_root_device()
69
+ get_output("cat /etc/mtab | grep -E '[[:blank:]]+\/[[:blank:]]+' | cut -d ' ' -f 1").strip
70
+ end
71
+
72
+ # Get root partition label
73
+ def get_root_label(root_device)
74
+ get_output("e2label #{root_device}").strip
75
+ end
76
+
77
+ # Set root partition label
78
+ def set_root_label(root_device, label)
79
+ remote_execute("e2label #{root_device} #{label}", nil, false)
80
+ end
81
+
82
+ # Get filesystem type
83
+ def get_root_fs_type()
84
+ get_output("cat /etc/mtab | grep -E '[[:blank:]]+\/[[:blank:]]+' | cut -d ' ' -f 3").strip
85
+ end
86
+
67
87
  # Installs the software package specified.
68
88
  def install(software_package)
69
89
  e = "yum -yq install #{software_package}"
@@ -87,7 +107,8 @@ class RemoteCommandHandler
87
107
 
88
108
  def create_filesystem(fs_type, volume)
89
109
  e = "mkfs -t #{fs_type} #{volume}"
90
- remote_execute(e, "y") #TODO: quiet mode?
110
+ #remote_execute(e, "y") #TODO: quiet mode?
111
+ remote_execute(e, nil, false)
91
112
  end
92
113
 
93
114
  def mkdir(path)
@@ -148,19 +169,49 @@ class RemoteCommandHandler
148
169
  end
149
170
  e = "rsync -avHx #{exclude} #{source_path} #{dest_path}"
150
171
  @logger.debug "going to execute #{e}"
151
- remote_exec_helper(e, nil, nil, false) #TODO: handle output in stderr?
172
+ remote_exec_helper(e, nil, nil, true) #TODO: handle output in stderr?
152
173
  end
153
174
 
154
- # Copy directory via an ssh-tunnel.
155
- def remote_rsync(keyfile, source_path, dest_ip, dest_path)
175
+ # Rsync directory via an ssh-tunnel.
176
+ def remote_rsync_old(keyfile, source_path, dest_ip, dest_path)
156
177
  e = "rsync -rlpgoDzq -e "+'"'+"ssh -o stricthostkeychecking=no -i #{keyfile}"+'"'+" #{source_path} root@#{dest_ip}:#{dest_path}"
157
178
  @logger.debug "going to execute #{e}"
158
179
  remote_exec_helper(e, nil, nil, false) #TODO: handle output in stderr?
159
180
  end
160
181
 
182
+ # Disable 'Defaults requiretty' option in sudoers file
183
+ def disable_sudoers_requiretty()
184
+ e = "sed -r -e \'s/^(Defaults[[:blank:]]+requiretty)$/# \\1/\' -i /etc/sudoers"
185
+ @logger.debug "going to execute '#{e}'"
186
+ status = remote_exec_helper(e, nil, nil, true)
187
+ if status != true
188
+ raise Exception.new("disabling 'requiretty' from sudoers failed with status: #{status}")
189
+ end
190
+ end
191
+
192
+ # Enable 'Defaults requiretty' option in sudoers file
193
+ def enable_sudoers_requiretty()
194
+ e = "sed -r -e \'s/^#[[:blank:]]*(Defaults[[:blank:]]+requiretty)$/\\1/\' -i /etc/sudoers"
195
+ @logger.debug "going to execute '#{e}'"
196
+ status = remote_exec_helper(e, nil, nil, true)
197
+ if status != true
198
+ raise Exception.new("enabling 'requiretty' from sudoers failed with status: #{status}")
199
+ end
200
+ end
201
+
202
+ def remote_rsync(keyfile, source_path, dest_ip, dest_user, dest_path)
203
+ e = "rsync -rlpgoDzq --rsh 'ssh -o stricthostkeychecking=no -i #{keyfile}' --rsync-path='sudo rsync'"+
204
+ " #{source_path} #{dest_user}@#{dest_ip}:#{dest_path}"
205
+ @logger.debug "going to execute #{e}"
206
+ status = remote_exec_helper(e, nil, nil, true) #TODO: handle output in stderr?
207
+ if status != true
208
+ raise Exception.new("rsync bewteen source and target servers failed with status: #{status}")
209
+ end
210
+ end
211
+
161
212
  # Copy directory via an ssh-tunnel.
162
- def scp(keyfile, source_path, dest_ip, dest_path)
163
- e = "scp -Cpqr -o stricthostkeychecking=no -i #{keyfile} #{source_path} root@#{dest_ip}:#{dest_path}"
213
+ def scp(keyfile, source_path, dest_ip, dest_user, dest_path)
214
+ e = "scp -Cpqr -o stricthostkeychecking=no -i #{keyfile} #{source_path} #{dest_user}@#{dest_ip}:#{dest_path}"
164
215
  @logger.debug "going to execute #{e}"
165
216
  remote_exec_helper(e, nil, nil, false) #TODO: handle output in stderr?
166
217
  end
@@ -195,7 +246,6 @@ class RemoteCommandHandler
195
246
  # When #raise_exception is set, an exception will be raised instead of
196
247
  # returning false.
197
248
  def remote_execute(exec_string, push_data = nil, raise_exception = false)
198
- #XXX: command line: echo -e 'y' | mkfs -t ext3 /dev/sdf
199
249
  exec_string = "echo #{push_data} >tmp.txt; #{exec_string} <tmp.txt; rm -f tmp.txt" unless push_data == nil
200
250
  stdout = []
201
251
  stderr = []
@@ -251,9 +301,18 @@ class RemoteCommandHandler
251
301
  result = true
252
302
  sudo = (@use_sudo ? "sudo " : "")
253
303
  the_channel = @ssh_session.open_channel do |channel|
304
+ if sudo
305
+ channel.request_pty do |ch, success|
306
+ if success
307
+ @logger.debug("pty successfully obtained")
308
+ else
309
+ @logger.debug("could not obtain pty")
310
+ end
311
+ end
312
+ end
254
313
  channel.exec("#{sudo}#{exec_string}") do |ch, success|
255
314
  if success
256
- @logger.debug("RemoteCommandHandler: starts executing #{sudo}#{exec_string}") if debug
315
+ @logger.debug("RemoteCommandHandler: starts executing '#{sudo}#{exec_string}'") if debug
257
316
  ch.on_data() do |ch, data|
258
317
  stdout << data unless data == nil || stdout == nil
259
318
  end
@@ -112,6 +112,7 @@ module StateTransitionHelper
112
112
  image_props = ec2_handler.describe_images(:image_id => ami_id)
113
113
  architecture = image_props['imagesSet']['item'][0]['architecture']
114
114
  instance_type = "m1.small"
115
+ #instance_type = "t1.micro"
115
116
  if architecture != "i386"
116
117
  instance_type = "m1.large"
117
118
  end
@@ -455,11 +456,41 @@ module StateTransitionHelper
455
456
  post_message("filesystem system successfully created")
456
457
  end
457
458
 
459
+ # Create a file-system on a given machine (assumes to be connected already).
460
+ # Input Parameters:
461
+ # * dns_name => IP used
462
+ # * device => device to be used for file-system creation (e.g. /dev/sdj)
463
+ # * type => filesystem type (ext2, ext3, ext4)
464
+ # * label => add a label to the partition
465
+ def create_labeled_fs(dns_name, device, type, label)
466
+ post_message("going to create filesystem on #{dns_name} to #{device}...")
467
+ @logger.debug "create filesystem of type '#{type}' (default is ext3) on '#{dns_name}' to '#{device}'"
468
+ fs_type = "ext3"
469
+ if !type.nil? && !type.empty?
470
+ fs_type = type
471
+ end
472
+ @logger.debug "create '#{fs_type}' filesystem on device '#{device}'"
473
+ status = remote_handler().create_filesystem(fs_type, device)
474
+ if status == false
475
+ raise Exception.new("failed to create #{type} filesystem on #{device} device on #{dns_name}")
476
+ end
477
+ post_message("filesystem system successfully created")
478
+ if !label.nil? && !label.empty?
479
+ post_message("going to add label #{label} for device #{device}...")
480
+ @logger.debug "add label '#{label}' to device '#{device}'"
481
+ if remote_handler().set_root_label(device, label)
482
+ post_message("label #{label} added to device #{device}")
483
+ else
484
+ raise Exception.new("failed to add label #{label} to device #{device}")
485
+ end
486
+ end
487
+ end
488
+
458
489
  # Create a file-system on a given machine (assumes to be connected already).
459
490
  # Input Parameters:
460
491
  # * mount_point => directory to be mounted on the device
461
492
  # * device => device used for mounting
462
- def mount_fs(mount_point, device)
493
+ def mount_fs_old(mount_point, device)
463
494
  post_message("going to mount #{device} on #{mount_point}...")
464
495
  @logger.debug "mount #{device} on #{mount_point}"
465
496
  if !remote_handler.file_exists?(mount_point)
@@ -482,6 +513,52 @@ module StateTransitionHelper
482
513
  post_message("mount successful")
483
514
  end
484
515
 
516
+ def mount_fs(mount_point, device)
517
+ post_message("going to mount #{device} on #{mount_point}...")
518
+ @logger.debug "mount #{device} on #{mount_point}"
519
+ if !remote_handler.file_exists?(mount_point)
520
+ post_message("creating mount point #{mount_point}...")
521
+ @logger.debug "creating mount point #{mount_point}"
522
+ remote_handler().mkdir(mount_point)
523
+ end
524
+ #XXX: detect new kernel that have /dev/xvdX device node instaed of /dev/sdX
525
+ if device =~ /\/dev\/sd[a-z]/
526
+ if !remote_handler().file_exists?(device)
527
+ post_message("'#{device}' device node not found, checking for new kernel support...")
528
+ @logger.debug "'#{device}' device node not found, checking for new kernel support"
529
+ new_device = device.gsub('sd', 'xvd')
530
+ if remote_handler().file_exists?(new_device)
531
+ post_message("'#{new_device}' device node found")
532
+ @logger.debug "'#{new_device}' device node found"
533
+ device = new_device
534
+ end
535
+ end
536
+ #elsif device =~/\/dev\/xvd[a-z]/
537
+ end
538
+
539
+ done = false
540
+ timeout = 120
541
+ while timeout > 0
542
+ res = remote_handler().mount(device, mount_point)
543
+ if remote_handler().drive_mounted?(mount_point)
544
+ done = true
545
+ timeout = 0
546
+ end
547
+ sleep(5)
548
+ timeout -= 5
549
+ end
550
+ msg = ""
551
+ if !done
552
+ msg = "Failed to mount device '#{device}' to '#{mount_point}"
553
+ @logger.error "#{msg}"
554
+ raise Exception.new("device #{device} not mounted")
555
+ else
556
+ msg = "device #{device} successfully mounted"
557
+ @logger.info "#{msg}"
558
+ end
559
+ post_message("#{msg}")
560
+ end
561
+
485
562
  # Unmount a drive
486
563
  # Input Parameters:
487
564
  # * mount_point => directory to be unmounted
@@ -496,6 +573,39 @@ module StateTransitionHelper
496
573
  post_message("device unmounted")
497
574
  end
498
575
 
576
+ # Get root partition label
577
+ def get_root_partition_label()
578
+ post_message("Retrieving '/' root partition label if any...")
579
+ @logger.debug "get root partition label"
580
+ # get root device and then its label
581
+ root_device = remote_handler().get_root_device()
582
+ @logger.debug "Found '#{root_device}' as root device"
583
+ label = remote_handler().get_root_label(root_device)
584
+ @logger.debug "Found label '#{label}'"
585
+ if label.nil? || label.empty?
586
+ post_message("'/' root partition has no label specified")
587
+ else
588
+ post_message("'/' root partition label '#{label}' for root device node '#{root_device}'")
589
+ end
590
+ return label
591
+ end
592
+
593
+ # get root filesytem type
594
+ def get_root_partition_fs_type()
595
+ post_message("Retrieving '/' root partition filesystem type...")
596
+ @logger.debug "get root partition filesystel type"
597
+ # get root device and then its fs type
598
+ root_fs_type = remote_handler().get_root_fs_type()
599
+ @logger.debug "Found '#{root_fs_type}' as root filesystem type"
600
+ if root_fs_type.nil? || root_fs_type.empty?
601
+ raise Exception.new("Failed to retrieve filesystem type for '/' root partition")
602
+ else
603
+ post_message("'/' root partition contains an #{root_fs_type} filesystem")
604
+ end
605
+ return root_fs_type
606
+
607
+ end
608
+
499
609
  # Copy all files of a running linux distribution via rsync to a mounted directory
500
610
  # Input Parameters:
501
611
  # * destination_path => where to copy to
@@ -541,7 +651,7 @@ module StateTransitionHelper
541
651
  post_message("EBS volume successfully zipped")
542
652
  end
543
653
 
544
- def remote_copy(user_name, keyname, source_dir, dest_machine, dest_dir)
654
+ def remote_copy_old(user_name, keyname, source_dir, dest_machine, dest_dir)
545
655
  post_message("going to remote copy all files from volume. This may take some time...")
546
656
  key_path_candidates = ["/#{user_name}/.ssh/", "/home/#{user_name}/.ssh/"]
547
657
  key_path_candidates.each() {|key_path|
@@ -560,6 +670,39 @@ module StateTransitionHelper
560
670
  post_message("remote copy operation done")
561
671
  end
562
672
 
673
+ def disable_ssh_tty(host)
674
+ post_message("going to disable SSH tty on #{host}...")
675
+ @logger.debug "disable SSH tty on "
676
+ remote_handler().disable_sudoers_requiretty()
677
+ post_message("SSH tty disabled")
678
+ end
679
+
680
+ def enable_ssh_tty(host)
681
+ post_message("going to enable SSH tty on #{host}...")
682
+ @logger.debug "enable SSH tty on"
683
+ remote_handler().enable_sudoers_requiretty()
684
+ post_message("SSH tty enabled")
685
+ end
686
+
687
+ def remote_copy(user_name, keyname, source_dir, dest_machine, dest_user, dest_dir)
688
+ post_message("going to remote copy all files from volume. This may take some time...")
689
+ key_path_candidates = ["/#{user_name}/.ssh/", "/home/#{user_name}/.ssh/"]
690
+ key_path_candidates.each() {|key_path|
691
+ key_file = "#{key_path}#{keyname}.pem"
692
+ if remote_handler().file_exists?(key_path)
693
+ if remote_handler().tools_installed?("rsync")
694
+ @logger.debug "use rsync command on #{key_file}"
695
+ remote_handler().remote_rsync(key_file, source_dir, dest_machine, dest_user, dest_dir)
696
+ else
697
+ @logger.debug "use scp command #{key_file}"
698
+ remote_handler().scp(key_file, source_dir, dest_machine, dest_user, dest_dir)
699
+ end
700
+ break
701
+ end
702
+ }
703
+ post_message("remote copy operation done")
704
+ end
705
+
563
706
  def upload_file(ip, user, key_data, file, target_file)
564
707
  post_message("going to upload #{file} to #{user}@#{ip}:#{target_file}")
565
708
  remote_handler().upload(ip, user, key_data, file, target_file)
@@ -582,6 +725,122 @@ module StateTransitionHelper
582
725
  end
583
726
  end
584
727
 
728
+ # Mapping AmazonKernel Image IDs
729
+ # From documentation: http://docs.amazonwebservices.com/AWSEC2/latest/UserGuide/index.html?UserProvidedkernels.html
730
+ # * US-East-1
731
+ # aki-4c7d9525 ec2-public-images/pv-grub-hd00-V1.01-i386.gz.manifest.xml
732
+ # aki-4e7d9527 ec2-public-images/pv-grub-hd00-V1.01-x86_64.gz.manifest.xml
733
+ # aki-407d9529 ec2-public-images/pv-grub-hd0-V1.01-i386.gz.manifest.xml
734
+ # aki-427d952b ec2-public-images/pv-grub-hd0-V1.01-x86_64.gz.manifest.xml
735
+ # aki-525ea73b ec2-public-images/pv-grub-hd00_1.02-i386.gz.manifest.xml
736
+ # aki-8e5ea7e7 ec2-public-images/pv-grub-hd00_1.02-x86_64.gz.manifest.xml
737
+ # aki-805ea7e9 ec2-public-images/pv-grub-hd0_1.02-i386.gz.manifest.xml
738
+ # aki-825ea7eb ec2-public-images/pv-grub-hd0_1.02-x86_64.gz.manifest.xml
739
+ # * US-West-1
740
+ # aki-9da0f1d8 ec2-public-images-us-west-1/pv-grub-hd00-V1.01-i386.gz.manifest.xml
741
+ # aki-9fa0f1da ec2-public-images-us-west-1/pv-grub-hd00-V1.01-x86_64.gz.manifest.xml
742
+ # aki-99a0f1dc ec2-public-images-us-west-1/pv-grub-hd0-V1.01-i386.gz.manifest.xml
743
+ # aki-9ba0f1de ec2-public-images-us-west-1/pv-grub-hd0-V1.01-x86_64.gz.manifest.xml
744
+ # aki-87396bc2 ec2-public-images-us-west-1/pv-grub-hd00_1.02-i386.gz.manifest.xml
745
+ # aki-81396bc4 ec2-public-images-us-west-1/pv-grub-hd00_1.02-x86_64.gz.manifest.xml
746
+ # aki-83396bc6 ec2-public-images-us-west-1/pv-grub-hd0_1.02-i386.gz.manifest.xml
747
+ # aki-8d396bc8 ec2-public-images-us-west-1/pv-grub-hd0_1.02-x86_64.gz.manifest.xml
748
+ # * EU-West-1
749
+ # aki-47eec433 ec2-public-images-eu/pv-grub-hd00-V1.01-i386.gz.manifest.xml
750
+ # aki-41eec435 ec2-public-images-eu/pv-grub-hd00-V1.01-x86_64.gz.manifest.xml
751
+ # aki-4deec439 ec2-public-images-eu/pv-grub-hd0-V1.01-i386.gz.manifest.xml
752
+ # aki-4feec43b ec2-public-images-eu/pv-grub-hd0-V1.01-x86_64.gz.manifest.xml
753
+ # aki-8a6657fe ec2-public-images-eu/pv-grub-hd00_1.02-i386.gz.manifest.xml
754
+ # aki-60695814 ec2-public-images-eu/pv-grub-hd00_1.02-x86_64.gz.manifest.xml
755
+ # aki-64695810 ec2-public-images-eu/pv-grub-hd0_1.02-i386.gz.manifest.xml
756
+ # aki-62695816 ec2-public-images-eu/pv-grub-hd0_1.02-x86_64.gz.manifest.xml
757
+ # * AP-SouthEast-1
758
+ # aki-6fd5aa3d ec2-public-images-ap-southeast-1/pv-grub-hd00-V1.01-i386.gz.manifest.xml
759
+ # aki-6dd5aa3f ec2-public-images-ap-southeast-1/pv-grub-hd00-V1.01-x86_64.gz.manifest.xml
760
+ # aki-13d5aa41 ec2-public-images-ap-southeast-1/pv-grub-hd0-V1.01-i386.gz.manifest.xml
761
+ # aki-11d5aa43 ec2-public-images-ap-southeast-1/pv-grub-hd0-V1.01-x86_64.gz.manifest.xml
762
+ # aki-a0225af2 ec2-public-images-ap-southeast-1/pv-grub-hd00_1.02-i386.gz.manifest.xml
763
+ # aki-a6225af4 ec2-public-images-ap-southeast-1/pv-grub-hd00_1.02-x86_64.gz.manifest.xml
764
+ # aki-a4225af6 ec2-public-images-ap-southeast-1/pv-grub-hd0_1.02-i386.gz.manifest.xml
765
+ # aki-aa225af8 ec2-public-images-ap-southeast-1/pv-grub-hd0_1.02-x86_64.gz.manifest.xml
766
+ # * AP-NorthEast-1
767
+ # aki-d209a2d3 ec2-public-images-ap-northeast-1/pv-grub-hd0-V1.01-i386.gz.manifest.xml
768
+ # aki-d409a2d5 ec2-public-images-ap-northeast-1/pv-grub-hd0-V1.01-x86_64.gz.manifest.xml
769
+ # aki-d609a2d7 ec2-public-images-ap-northeast-1/pv-grub-hd00-V1.01-i386.gz.manifest.xml
770
+ # aki-d809a2d9 ec2-public-images-ap-northeast-1/pv-grub-hd00-V1.01-x86_64.gz.manifest.xml
771
+ # aki-e85df7e9 ec2-public-images-ap-northeast-1/pv-grub-hd00_1.02-i386.gz.manifest.xml
772
+ # aki-ea5df7eb ec2-public-images-ap-northeast-1/pv-grub-hd00_1.02-x86_64.gz.manifest.xml
773
+ # aki-ec5df7ed ec2-public-images-ap-northeast-1/pv-grub-hd0_1.02-i386.gz.manifest.xml
774
+ # aki-ee5df7ef ec2-public-images-ap-northeast-1/pv-grub-hd0_1.02-x86_64.gz.manifest.xml
775
+ def get_aws_kernel_image_aki(source_region, source_aki, target_region)
776
+ map = { 'us-east-1' => {'aki-4c7d9525' => 'pv-grub-hd00-V1.01-i386',
777
+ 'aki-4e7d9527' => 'pv-grub-hd00-V1.01-x86_64',
778
+ 'aki-407d9529' => 'pv-grub-hd0-V1.01-i386',
779
+ 'aki-427d952b' => 'pv-grub-hd0-V1.01-x86_64',
780
+ 'aki-525ea73b' => 'pv-grub-hd00_1.02-i386',
781
+ 'aki-8e5ea7e7' => 'pv-grub-hd00_1.02-x86_64',
782
+ 'aki-805ea7e9' => 'pv-grub-hd0_1.02-i386',
783
+ 'aki-825ea7eb' => 'pv-grub-hd0_1.02-x86_64'
784
+ },
785
+ 'us-west-1' => {'aki-9da0f1d8' => 'pv-grub-hd00-V1.01-i386',
786
+ 'aki-9fa0f1da' => 'pv-grub-hd00-V1.01-x86_64',
787
+ 'aki-99a0f1dc' => 'pv-grub-hd0-V1.01-i386',
788
+ 'aki-9ba0f1de' => 'pv-grub-hd0-V1.01-x86_64',
789
+ 'aki-87396bc2' => 'pv-grub-hd00_1.02-i386',
790
+ 'aki-81396bc4' => 'pv-grub-hd00_1.02-x86_64',
791
+ 'aki-83396bc6' => 'pv-grub-hd0_1.02-i386',
792
+ 'aki-8d396bc8' => 'pv-grub-hd0_1.02-x86_64'
793
+ },
794
+ 'eu-west-1' => {'aki-47eec433' => 'pv-grub-hd00-V1.01-i386',
795
+ 'aki-41eec435' => 'pv-grub-hd00-V1.01-x86_64',
796
+ 'aki-4deec439' => 'pv-grub-hd0-V1.01-i386',
797
+ 'aki-4feec43b' => 'pv-grub-hd0-V1.01-x86_64',
798
+ 'aki-8a6657fe' => 'pv-grub-hd00_1.02-i386',
799
+ 'aki-60695814' => 'pv-grub-hd00_1.02-x86_64',
800
+ 'aki-64695810' => 'pv-grub-hd0_1.02-i386',
801
+ 'aki-62695816' => 'pv-grub-hd0_1.02-x86_64'
802
+ },
803
+ 'ap-southeast-1' => {'aki-6fd5aa3d' => 'pv-grub-hd00-V1.01-i386',
804
+ 'aki-6dd5aa3f' => 'pv-grub-hd00-V1.01-x86_64',
805
+ 'aki-13d5aa41' => 'pv-grub-hd0-V1.01-i386',
806
+ 'aki-11d5aa43' => 'pv-grub-hd0-V1.01-x86_64',
807
+ 'aki-a0225af2' => 'pv-grub-hd00_1.02-i386',
808
+ 'aki-a6225af4' => 'pv-grub-hd00_1.02-x86_64',
809
+ 'aki-a4225af6' => 'pv-grub-hd0_1.02-i386',
810
+ 'aki-aa225af8' => 'pv-grub-hd0_1.02-x86_64'
811
+ },
812
+ 'ap-northeast-1' => {'aki-d209a2d3' => 'pv-grub-hd00-V1.01-i386',
813
+ 'aki-d409a2d5' => 'pv-grub-hd00-V1.01-x86_64',
814
+ 'aki-d609a2d7' => 'pv-grub-hd0-V1.01-i386',
815
+ 'aki-d809a2d9' => 'pv-grub-hd0-V1.01-x86_64',
816
+ 'aki-e85df7e9' => 'pv-grub-hd00_1.02-i386',
817
+ 'aki-ea5df7eb' => 'pv-grub-hd00_1.02-x86_64',
818
+ 'aki-ec5df7ed' => 'pv-grub-hd0_1.02-i386',
819
+ 'aki-ee5df7ef' => 'pv-grub-hd0_1.02-x86_64'
820
+ }
821
+ }
822
+ target_aki = ''
823
+ if map[source_region] == nil
824
+ Exception.new("source region not supported")
825
+ elsif map[target_region] == nil
826
+ Exception.new("target region not supported")
827
+ else
828
+ if map[source_region][source_aki] == nil
829
+ Exception.new("aki not found in source region")
830
+ else
831
+ pv_grub_info = map[source_region][source_aki]
832
+ map[target_region].each() {|key, value|
833
+ if pv_grub_info.eql?(value)
834
+ target_aki = key
835
+ break
836
+ end
837
+ }
838
+ end
839
+ end
840
+ return target_aki
841
+ end
842
+
843
+
585
844
  #setting/retrieving handlers
586
845
 
587
846
  def remote_handler()
@@ -112,7 +112,10 @@ class Ami2EbsConversion < Ec2Script
112
112
  @context[:ssh_keyfile], @context[:ssh_keydata],
113
113
  @context[:connect_trials], @context[:connect_interval]
114
114
  )
115
- create_fs(@context[:dns_name], @context[:temp_device_name])
115
+ # get root partition label and filesystem type
116
+ @context[:label] = get_root_partition_label()
117
+ @context[:fs_type] = get_root_partition_fs_type()
118
+ create_labeled_fs(@context[:dns_name], device, @context[:fs_type], @context[:label])
116
119
  FileSystemCreated.new(@context)
117
120
  end
118
121
  end
@@ -120,7 +123,6 @@ class Ami2EbsConversion < Ec2Script
120
123
  # File system created. Mount it.
121
124
  class FileSystemCreated < Ami2EbsConversionState
122
125
  def enter
123
- #@context[:mount_dir] = "/mnt/tmp_#{@context[:volume_id]}"
124
126
  @context[:mount_dir] = "/ebs_#{@context[:volume_id]}"
125
127
  mount_fs(@context[:mount_dir], @context[:temp_device_name])
126
128
  FileSystemMounted.new(@context)
@@ -118,8 +118,11 @@ class CopyAmi < Ec2Script
118
118
  device = @context[:temp_device_name]
119
119
  mount_point = "/mnt/tmp_#{@context[:source_volume_id]}"
120
120
  attach_volume(@context[:source_volume_id], @context[:source_instance_id], device)
121
- connect(@context[:source_dns_name], @context[:source_ssh_username], nil, @context[:source_ssh_keydata])
121
+ connect(@context[:source_dns_name], @context[:source_ssh_username], nil, @context[:source_ssh_keydata])
122
122
  mount_fs(mount_point, device)
123
+ # get root partition label and filesystem type
124
+ @context[:label] = get_root_partition_label()
125
+ @context[:fs_type] = get_root_partition_fs_type()
123
126
  disconnect()
124
127
  SourceVolumeReadyState.new(@context)
125
128
  end
@@ -151,7 +154,7 @@ class CopyAmi < Ec2Script
151
154
  mount_point = "/mnt/tmp_#{@context[:target_volume_id]}"
152
155
  attach_volume(@context[:target_volume_id], @context[:target_instance_id], device)
153
156
  connect(@context[:target_dns_name], @context[:target_ssh_username], nil, @context[:target_ssh_keydata])
154
- create_fs(@context[:target_dns_name], device)
157
+ create_labeled_fs(@context[:target_dns_name], device, @context[:fs_type], @context[:label])
155
158
  mount_fs(mount_point, device)
156
159
  disconnect()
157
160
  TargetVolumeReadyState.new(@context)
@@ -177,10 +180,19 @@ class CopyAmi < Ec2Script
177
180
  # Now we can copy.
178
181
  class KeyInPlaceState < CopyAmiState
179
182
  def enter()
183
+ connect(@context[:target_dns_name], @context[:target_ssh_username], nil, @context[:target_ssh_keydata])
184
+ disable_ssh_tty(@context[:target_dns_name])
185
+ disconnect()
186
+ #
180
187
  connect(@context[:source_dns_name], @context[:source_ssh_username], nil, @context[:source_ssh_keydata])
181
188
  source_dir = "/mnt/tmp_#{@context[:source_volume_id]}/"
182
189
  dest_dir = "/mnt/tmp_#{@context[:target_volume_id]}"
183
- remote_copy(@context[:source_ssh_username], @context[:target_key_name], source_dir, @context[:target_dns_name], dest_dir)
190
+ remote_copy(@context[:source_ssh_username], @context[:target_key_name], source_dir,
191
+ @context[:target_dns_name], @context[:target_ssh_username], dest_dir)
192
+ disconnect()
193
+ #
194
+ connect(@context[:target_dns_name], @context[:target_ssh_username], nil, @context[:target_ssh_keydata])
195
+ enable_ssh_tty(@context[:target_dns_name])
184
196
  disconnect()
185
197
  DataCopiedState.new(@context)
186
198
  end
@@ -200,8 +212,14 @@ class CopyAmi < Ec2Script
200
212
  class TargetSnapshotCreatedState < CopyAmiState
201
213
  def enter()
202
214
  remote_region()
215
+ # Get Amazon Kernel Image ID
216
+ aki = get_aws_kernel_image_aki(@context[:ec2_api_handler].server.split('.')[1], @context[:kernel_id],
217
+ @context[:target_ec2_handler].server.split('.')[1])
218
+ #@context[:result][:image_id] = register_snapshot(@context[:new_snapshot_id], @context[:name],
219
+ # @context[:root_device_name], @context[:description], nil,
220
+ # nil, @context[:architecture])
203
221
  @context[:result][:image_id] = register_snapshot(@context[:new_snapshot_id], @context[:name],
204
- @context[:root_device_name], @context[:description], nil,
222
+ @context[:root_device_name], @context[:description], aki,
205
223
  nil, @context[:architecture])
206
224
  AmiRegisteredState.new(@context)
207
225
  end
@@ -96,6 +96,9 @@ class CopySnapshot< Ec2Script
96
96
  attach_volume(@context[:source_volume_id], @context[:source_instance_id], device)
97
97
  connect(@context[:source_dns_name], @context[:source_ssh_username], nil, @context[:source_ssh_keydata])
98
98
  mount_fs(mount_point, device)
99
+ # get root partition label and filesystem type
100
+ @context[:label] = get_root_partition_label()
101
+ @context[:fs_type] = get_root_partition_fs_type()
99
102
  disconnect()
100
103
  SourceVolumeReadyState.new(@context)
101
104
  end
@@ -127,7 +130,7 @@ class CopySnapshot< Ec2Script
127
130
  mount_point = "/mnt/tmp_#{@context[:target_volume_id]}"
128
131
  attach_volume(@context[:target_volume_id], @context[:target_instance_id], device)
129
132
  connect(@context[:target_dns_name], @context[:target_ssh_username], nil, @context[:target_ssh_keydata])
130
- create_fs(@context[:target_dns_name], device)
133
+ create_labeled_fs(@context[:target_dns_name], device, @context[:fs_type], @context[:label])
131
134
  mount_fs(mount_point, device)
132
135
  disconnect()
133
136
  TargetVolumeReadyState.new(@context)
@@ -153,10 +156,19 @@ class CopySnapshot< Ec2Script
153
156
  # Now we can copy.
154
157
  class KeyInPlaceState < CopySnapshotState
155
158
  def enter()
159
+ connect(@context[:target_dns_name], @context[:target_ssh_username], nil, @context[:target_ssh_keydata])
160
+ disable_ssh_tty(@context[:target_dns_name])
161
+ disconnect()
162
+ #
156
163
  connect(@context[:source_dns_name], @context[:source_ssh_username], nil, @context[:source_ssh_keydata])
157
164
  source_dir = "/mnt/tmp_#{@context[:source_volume_id]}/"
158
165
  dest_dir = "/mnt/tmp_#{@context[:target_volume_id]}"
159
- remote_copy(@context[:source_ssh_username], @context[:target_key_name], source_dir, @context[:target_dns_name], dest_dir)
166
+ remote_copy(@context[:source_ssh_username], @context[:target_key_name], source_dir,
167
+ @context[:target_dns_name], @context[:target_ssh_username], dest_dir)
168
+ disconnect()
169
+ #
170
+ connect(@context[:target_dns_name], @context[:target_ssh_username], nil, @context[:target_ssh_keydata])
171
+ enable_ssh_tty(@context[:target_dns_name])
160
172
  disconnect()
161
173
  DataCopiedState.new(@context)
162
174
  end
@@ -204,4 +216,4 @@ end
204
216
  #perform an rsynch
205
217
  #sync -PHAXaz --rsh "ssh -i /home/${src_user}/.ssh/id_${dst_keypair}" --rsync-path "sudo rsync" ${src_dir}/ ${dst_user}@${dst_public_fqdn}:${dst_dir}/
206
218
  #create a snapshot of the volume
207
- #clean-up everything
219
+ #clean-up everything
metadata CHANGED
@@ -1,13 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: CloudyScripts
3
3
  version: !ruby/object:Gem::Version
4
- hash: 13
4
+ hash: 11
5
5
  prerelease: false
6
6
  segments:
7
7
  - 1
8
8
  - 8
9
- - 29
10
- version: 1.8.29
9
+ - 30
10
+ version: 1.8.30
11
11
  platform: ruby
12
12
  authors:
13
13
  - Matthias Jung
@@ -15,7 +15,7 @@ autorequire:
15
15
  bindir: bin
16
16
  cert_chain: []
17
17
 
18
- date: 2011-07-07 00:00:00 +00:00
18
+ date: 2011-07-19 00:00:00 +00:00
19
19
  default_executable:
20
20
  dependencies:
21
21
  - !ruby/object:Gem::Dependency