CloudyScripts 2.14.62 → 2.14.63

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/Rakefile CHANGED
@@ -12,16 +12,17 @@ require 'rake/testtask'
12
12
 
13
13
  spec = Gem::Specification.new do |s|
14
14
  s.name = 'CloudyScripts'
15
- #s.version = '2.14.54' #<number cloud-stacks supported>.<number cloud-scripts>.<counting releases>
16
- s.version = '2.14.62' #<number cloud-stacks supported>.<number cloud-scripts>.<counting releases>
15
+ s.version = '2.14.63' #<number cloud-stacks supported>.<number cloud-scripts>.<counting releases>
17
16
  s.has_rdoc = true
18
17
  s.extra_rdoc_files = ['README.rdoc', 'LICENSE']
19
18
  s.summary = 'Scripts to facilitate programming for infrastructure clouds.'
20
19
  s.description = s.summary
21
20
  s.homepage = "http://elastic-security.com"
22
- s.rubyforge_project = "cloudyscripts"
23
- s.author = 'Matthias Jung'
24
- s.email = 'matthias.jung@gmail.com'
21
+ s.rubyforge_project = "CloudyScripts"
22
+ #s.author = 'Matthias Jung'
23
+ s.authors = ['Matthias Jung', 'Frederic Donnat']
24
+ #s.email = 'matthias.jung@gmail.com'
25
+ s.email = ['matthias.jung@gmail.com', 'fred@secludit.com']
25
26
  # s.executables = ['your_executable_here']
26
27
  s.files = %w(LICENSE README.rdoc Rakefile) + Dir.glob("{bin,lib,spec}/**/*")
27
28
  s.require_path = "lib"
@@ -130,7 +130,7 @@ class Ec2Helper
130
130
  end
131
131
  return amis['imagesSet']['item'][0][prop.to_s]
132
132
  rescue
133
- raise Exception.new("image #{ami_id} not found")
133
+ raise Exception.new("image #{ami_id} not found")
134
134
  end
135
135
  end
136
136
 
@@ -146,7 +146,7 @@ class Ec2Helper
146
146
  end
147
147
  return amis['imagesSet']['item'][0]['blockDeviceMapping']['item'][0]['ebs'][prop.to_s]
148
148
  rescue
149
- raise Exception.new("image #{ami_id} not found")
149
+ raise Exception.new("image #{ami_id} not found")
150
150
  end
151
151
  end
152
152
 
@@ -264,4 +264,10 @@ class Ec2Helper
264
264
  instance_info['instancesSet']['item'][0][prop.to_s]
265
265
  end
266
266
 
267
+ # Check if keypair exist
268
+ def check_keypair(keypair_name)
269
+ puts "check if '#{keypair_name}' keypair exists"
270
+ @ec2_api.describe_keypairs(:key_name => keypair_name)
271
+ end
272
+
267
273
  end
data/lib/help/helper.rb CHANGED
@@ -35,3 +35,12 @@ def check_aws_desc(str)
35
35
  return false
36
36
  end
37
37
  end
38
+
39
+ # Check SSH key for connecting AWS instance
40
+ # Constraints: A RSA Private Key
41
+ def check_ssh_key(key)
42
+ priv_key = OpenSSL::PKey::RSA.new(key)
43
+ if !priv_key.private?()
44
+ raise Exception.new("Invalid SSH Key")
45
+ end
46
+ end
@@ -69,6 +69,11 @@ class RemoteCommandHandler
69
69
  get_output("uname -r").strip
70
70
  end
71
71
 
72
+ # return teh result of mount
73
+ def mount_output()
74
+ get_output("mount").strip
75
+ end
76
+
72
77
  # Return all the partitions of a device
73
78
  def get_device_partition(device)
74
79
  get_output("ls #{device}*").strip
@@ -79,16 +84,18 @@ class RemoteCommandHandler
79
84
  get_output("sfdisk -d #{device}")
80
85
  end
81
86
 
82
- # GSet the partition table of a device
87
+ # Set the partition table of a device
83
88
  def set_partition_table(device, partition_table)
84
89
  push_data = "\"" + partition_table.gsub(/\/dev\/(s|xv)d[a-z]/, "#{device}") + "\""
85
90
  remote_execute("sfdisk -f #{device}", push_data, nil)
86
91
  end
87
92
 
88
93
  # Get root partition label
94
+ # NB: add '/dev' detection in order to avoid entries containing 'rootfs'
89
95
  def get_root_device()
90
96
  #get_output("cat /etc/mtab | grep -E '[[:blank:]]+\/[[:blank:]]+' | cut -d ' ' -f 1").strip
91
- get_output("mount | grep -E '[[:blank:]]+\/[[:blank:]]+' | cut -d ' ' -f 1").strip
97
+ #get_output("mount | grep -E '[[:blank:]]+\/[[:blank:]]+' | cut -d ' ' -f 1").strip
98
+ get_output("mount | grep -E '[[:blank:]]+\/[[:blank:]]+' | grep -E '\/dev' | cut -d ' ' -f 1").strip
92
99
  end
93
100
 
94
101
  # Get the device of a specific partition
@@ -385,9 +392,9 @@ class RemoteCommandHandler
385
392
  if sudo
386
393
  channel.request_pty do |ch, success|
387
394
  if success
388
- @logger.debug("pty successfully obtained")
395
+ @logger.debug("pty successfully obtained") if debug
389
396
  else
390
- @logger.debug("could not obtain pty")
397
+ @logger.debug("could not obtain pty") if debug
391
398
  end
392
399
  end
393
400
  end
@@ -39,11 +39,11 @@ module StateTransitionHelper
39
39
  # Returns:
40
40
  # * OS of the connected machine
41
41
  def connect(dns_name, user_name, ssh_keyfile = nil, ssh_keydata = nil,
42
- trials = 10, wait_between_trials = 20)
42
+ trials = 10, wait_between_trials = 30)
43
43
  post_message("connecting '#{user_name}' to #{dns_name}...")
44
44
  connected = false
45
45
  last_connection_problem = ""
46
- remaining_trials = trials-1
46
+ remaining_trials = trials
47
47
  while !connected && remaining_trials > 0
48
48
  remaining_trials -= 1
49
49
  if ssh_keyfile != nil
@@ -409,8 +409,8 @@ module StateTransitionHelper
409
409
  done = true
410
410
  timeout = 0
411
411
  end
412
- sleep(5)
413
- timeout -= 5
412
+ sleep(10)
413
+ timeout -= 10
414
414
  end
415
415
  msg = ""
416
416
  if !done
@@ -444,8 +444,8 @@ module StateTransitionHelper
444
444
  done = true
445
445
  timeout = 0
446
446
  end
447
- sleep(5)
448
- timeout -= 5
447
+ sleep(10)
448
+ timeout -= 10
449
449
  end
450
450
  msg = ""
451
451
  if !done
@@ -563,23 +563,26 @@ module StateTransitionHelper
563
563
  return image_id
564
564
  end
565
565
 
566
+ # Create a Security group with specific rules
567
+ # NB: - if the Security Group already exists add teh required rules if not present
568
+ # - do not try to delete the Security Group because this will disallow multi-tasks and parallelization
566
569
  def create_security_group_with_rules(name, desc, rules)
567
570
  post_message("going to create '#{name}' Security Group...")
568
571
  @logger.debug "create Security Group (name: #{name}, desc: #{desc})"
569
572
  begin
570
- res = ec2_handler().describe_security_groups(:group_name => name)
571
- if res['securityGroupInfo']['item'].size > 0
572
- @logger.warn "'#{name}' Security Group found. Another Security Group already exists with the same name. Deleting it first."
573
- res = ec2_handler().delete_security_group(:group_name => name)
574
- end
575
- rescue AWS::InvalidGroupNotFound => e
576
- @logger.debug "'#{name}' Security Group not found."
573
+ res = ec2_handler().create_security_group(:group_name => name, :group_description => desc)
574
+ rescue AWS::InvalidGroupDuplicate => e
575
+ @logger.warn "'#{name}' Security Group already exists: #{e.to_s}"
576
+ ensure
577
+ rules.each(){ |rule|
578
+ begin
579
+ ec2_handler().authorize_security_group_ingress(:group_name => name,
580
+ :ip_protocol => rule[:ip_protocol], :from_port => rule[:from_port], :to_port => rule[:to_port], :cidr_ip => rule[:cidr_ip])
581
+ rescue AWS::InvalidPermissionDuplicate => e
582
+ @logger.warn "'#{rule[:proto].upcase()} (#{rule[:from_port]}-#{rule[:to_port]})' rule already exists: #{e.to_s}"
583
+ end
584
+ }
577
585
  end
578
- res = ec2_handler().create_security_group(:group_name => name, :group_description => desc)
579
- rules.each(){ |rule|
580
- ec2_handler().authorize_security_group_ingress(:group_name => name,
581
- :ip_protocol => rule[:ip_protocol], :from_port => rule[:from_port], :to_port => rule[:to_port], :cidr_ip => rule[:cidr_ip])
582
- }
583
586
  return true
584
587
  end
585
588
 
@@ -605,9 +608,9 @@ module StateTransitionHelper
605
608
  @logger.debug "create filesystem on #{dns_name} to #{device}"
606
609
  status = remote_handler().create_filesystem("ext3", device)
607
610
  if status == false
608
- raise Exception.new("failed to create ext3 filesystem on #{device} device on #{dns_name}")
611
+ raise Exception.new("Failed to create ext3 filesystem on #{device} device on #{dns_name}")
609
612
  end
610
- post_message("filesystem system successfully created")
613
+ post_message("filesystem system successfully created on device #{device}")
611
614
  end
612
615
 
613
616
  # Create a file-system on a given machine (assumes to be connected already).
@@ -626,7 +629,7 @@ module StateTransitionHelper
626
629
  @logger.debug "create '#{fs_type}' filesystem on device '#{device}'"
627
630
  status = remote_handler().create_filesystem(fs_type, device)
628
631
  if status == false
629
- raise Exception.new("failed to create #{type} filesystem on #{device} device on #{dns_name}")
632
+ raise Exception.new("Failed to create #{type} filesystem on #{device} device on #{dns_name}")
630
633
  end
631
634
  post_message("#{fs_type} filesystem system successfully created on device #{device}")
632
635
  if !label.nil? && !label.empty?
@@ -635,7 +638,7 @@ module StateTransitionHelper
635
638
  if remote_handler().set_device_label_ext(device, label, fs_type)
636
639
  post_message("label #{label} added to device #{device}")
637
640
  else
638
- raise Exception.new("failed to add label #{label} to device #{device}")
641
+ raise Exception.new("Failed to add label #{label} to device #{device}")
639
642
  end
640
643
  end
641
644
  end
@@ -810,6 +813,9 @@ module StateTransitionHelper
810
813
  # Get root partition
811
814
  def get_root_device_name()
812
815
  post_message("Retrieving '/' root device name...")
816
+ @logger.debug "issuing very first mount..."
817
+ mount_output = remote_handler().mount_output()
818
+ @logger.debug "mount output:\n#{mount_output}"
813
819
  @logger.debug "get root device name"
814
820
  root_device = remote_handler().get_root_device()
815
821
  @logger.debug "Found '#{root_device}' as root device"
@@ -994,7 +1000,7 @@ module StateTransitionHelper
994
1000
  post_message("going to zip the EBS volume")
995
1001
  stderr = remote_handler().zip(source_dir, zip_file_dest+"/"+zip_file_name)
996
1002
  if stderr.size > 0
997
- @logger.info("zip operation generated error and might not be complete. output: #{stderr.join("\n")}")
1003
+ @logger.warn("zip operation generated error and might not be complete. output: #{stderr.join("\n")}")
998
1004
  post_message("zip operation generated error and might not be complete. output: #{stderr.join("\n")}")
999
1005
  end
1000
1006
  post_message("EBS volume successfully zipped")
@@ -4,6 +4,7 @@ require "help/remote_command_handler"
4
4
  #require "help/dm_crypt_helper"
5
5
  require "help/ec2_helper"
6
6
  require "AWS"
7
+ require "help/helper"
7
8
 
8
9
  # Creates a bootable EBS storage from an existing AMI.
9
10
  #
@@ -31,15 +32,23 @@ class Ami2EbsConversion < Ec2Script
31
32
  end
32
33
 
33
34
  def check_input_parameters()
34
- if @input_params[:security_group_name] == nil
35
- @input_params[:security_group_name] = "default"
36
- end
37
35
  if @input_params[:ami_id] == nil && !(@input_params[:ami_id] =~ /^ami-.*$/)
38
36
  raise Exception.new("Invalid AMI ID specified: #{@input_params[:ami_id]}")
39
37
  end
40
38
  ec2_helper = Ec2Helper.new(@input_params[:ec2_api_handler])
39
+ # AWS Security Group
40
+ if @input_params[:security_group_name] == nil
41
+ @input_params[:security_group_name] = "default"
42
+ end
41
43
  if !ec2_helper.check_open_port(@input_params[:security_group_name], 22)
42
- raise Exception.new("Port 22 must be opened for security group #{@input_params[:security_group_name]} to connect via SSH")
44
+ post_message("'#{@input_params[:security_group_name]}' Security Group not opened port 22 for connect via SSH in source region")
45
+ @input_params[:security_group_name] = nil
46
+ end
47
+ # AWS KeyPair
48
+ if @input_params[:key_name] == nil || @input_params[:key_name].empty?()
49
+ raise Exception.new("No KeyPair name specified")
50
+ else
51
+ ec2_helper.check_keypair(@input_params[:key_name])
43
52
  end
44
53
  if @input_params[:name] == nil
45
54
  @input_params[:name] = "Boot EBS (for AMI #{@input_params[:ami_id]}) at #{Time.now.strftime('%d/%m/%Y %H.%M.%S')}"
@@ -53,9 +62,16 @@ class Ami2EbsConversion < Ec2Script
53
62
  if @input_params[:root_device_name] == nil
54
63
  @input_params[:root_device_name] = "/dev/sda1"
55
64
  end
65
+ # SSH Parameters
56
66
  if @input_params[:ssh_username] == nil
57
67
  @input_params[:ssh_username] = "root"
58
68
  end
69
+ if @input_params[:ssh_keydata] == nil
70
+ raise Exception.new("No Private Key for source region")
71
+ else
72
+ post_message("Checking SSH key for source region...")
73
+ check_ssh_key(@input_params[:ssh_keydata])
74
+ end
59
75
  if @input_params[:connect_trials] == nil
60
76
  @input_params[:connect_trials] = 6
61
77
  end
@@ -84,6 +100,14 @@ class Ami2EbsConversion < Ec2Script
84
100
  class InitialState < Ami2EbsConversionState
85
101
  def enter
86
102
  puts "DEBUG: params: #{@context[:ami_id]}, #{@context[:key_name]}, #{@context[:security_group_name]}"
103
+ #XXX: create a CloudyScripts Security Group with TCP port 22 publicly opened
104
+ if @context[:security_group_name] == nil
105
+ @context[:security_group_name] = Ec2Script::CS_SEC_GRP_NAME
106
+ create_security_group_with_rules(@context[:security_group_name], Ec2Script::CS_SEC_GRP_DESC,
107
+ [{:ip_protocol => "tcp", :from_port => 22, :to_port => 22, :cidr_ip => "0.0.0.0/0"}])
108
+ post_message("'#{@context[:security_group_name]}' Security Group created with TCP port 22 publicly opened.")
109
+ end
110
+
87
111
  @context[:instance_id], @context[:dns_name], @context[:availability_zone],
88
112
  @context[:kernel_id], @context[:ramdisk_id], @context[:architecture] =
89
113
  launch_instance(@context[:ami_id], @context[:key_name], @context[:security_group_name])
@@ -102,7 +126,7 @@ class Ami2EbsConversion < Ec2Script
102
126
  # Storage created. Attach it.
103
127
  class StorageCreated < Ami2EbsConversionState
104
128
  def enter
105
- attach_volume(@context[:volume_id], @context[:instance_id], @context[:temp_device_name])
129
+ attach_volume(@context[:volume_id], @context[:instance_id], @context[:temp_device_name], Ec2Script::CS_AWS_TIMEOUT)
106
130
  StorageAttached.new(@context)
107
131
  end
108
132
  end
@@ -40,7 +40,8 @@ class CopyAmi < Ec2Script
40
40
  end
41
41
 
42
42
  def check_input_parameters()
43
- if @input_params[:ami_id] == nil && !(@input_params[:ami_id] =~ /^ami-.*$/)
43
+ post_message("Checking parameters...")
44
+ if @input_params[:ami_id] == nil || !(@input_params[:ami_id] =~ /^ami-.*$/)
44
45
  raise Exception.new("Invalid AMI ID specified: #{@input_params[:ami_id]}")
45
46
  end
46
47
  ec2_helper = Ec2Helper.new(@input_params[:ec2_api_handler])
@@ -48,6 +49,29 @@ class CopyAmi < Ec2Script
48
49
  raise Exception.new("must be an EBS type image")
49
50
  end
50
51
  local_ec2_helper = ec2_helper
52
+ remote_ec2_helper = Ec2Helper.new(@input_params[:target_ec2_handler])
53
+ # AWS KeyPair
54
+ if @input_params[:source_key_name] == nil || @input_params[:source_key_name].empty?()
55
+ raise Exception.new("No KeyPair name specified for source region")
56
+ else
57
+ begin
58
+ local_ec2_helper.check_keypair(@input_params[:source_key_name])
59
+ rescue Exception => e
60
+ post_message("'#{@input_params[:source_key_name]}' Key pair not found in source region")
61
+ raise Exception.new("source region: #{e.to_s}")
62
+ end
63
+ end
64
+ if @input_params[:target_key_name] == nil || @input_params[:target_key_name].empty?()
65
+ raise Exception.new("No KeyPair name specified for target region")
66
+ else
67
+ begin
68
+ remote_ec2_helper.check_keypair(@input_params[:target_key_name])
69
+ rescue Exception => e
70
+ post_message("'#{@input_params[:target_key_name]}' Key pair not found in target region")
71
+ raise Exception.new("target region: #{e.to_s}")
72
+ end
73
+ end
74
+ # AWS SecurityGroup
51
75
  if @input_params[:source_security_group] == nil
52
76
  @input_params[:source_security_group] = "default"
53
77
  end
@@ -57,7 +81,6 @@ class CopyAmi < Ec2Script
57
81
  else
58
82
  post_message("'#{@input_params[:source_security_group]}' Security Group opened port 22 for connect via SSH in source region")
59
83
  end
60
- remote_ec2_helper = Ec2Helper.new(@input_params[:target_ec2_handler])
61
84
  if @input_params[:target_security_group] == nil
62
85
  @input_params[:target_security_group] = "default"
63
86
  end
@@ -67,18 +90,41 @@ class CopyAmi < Ec2Script
67
90
  else
68
91
  post_message("'#{@input_params[:target_security_group]}' Security Group opened port 22 for connect via SSH in target region")
69
92
  end
93
+ # Device to use
70
94
  if @input_params[:root_device_name] == nil
71
95
  @input_params[:root_device_name] = "/dev/sda1"
72
96
  end
73
97
  if @input_params[:temp_device_name] == nil
74
98
  @input_params[:temp_device_name] = "/dev/sdj"
75
99
  end
100
+ # SSH Parameters
76
101
  if @input_params[:source_ssh_username] == nil
77
102
  @input_params[:source_ssh_username] = "root"
78
103
  end
79
104
  if @input_params[:target_ssh_username] == nil
80
105
  @input_params[:target_ssh_username] = "root"
81
106
  end
107
+ if @input_params[:source_ssh_keydata] == nil
108
+ raise Exception.new("No Private Key for source region")
109
+ else
110
+ begin
111
+ check_ssh_key(@input_params[:source_ssh_keydata])
112
+ rescue Exception => e
113
+ post_message("not a Private Key: #{e.to_s}")
114
+ raise Exception.new("Invalid Private Key for source region: #{e.to_s}")
115
+ end
116
+ end
117
+ if @input_params[:target_ssh_keydata] == nil
118
+ raise Exception.new("No Private Key for target region")
119
+ else
120
+ begin
121
+ check_ssh_key(@input_params[:target_ssh_keydata])
122
+ rescue Exception => e
123
+ post_message("not a Private Key: #{e.to_s}")
124
+ raise Exception.new("Invalid Private Key for target region: #{e.to_s}")
125
+ end
126
+ end
127
+ # AWS Name and Description
82
128
  if @input_params[:description] == nil || !check_aws_desc(@input_params[:description])
83
129
  @input_params[:description] = "Created by CloudyScripts - #{self.class.name}"
84
130
  end
@@ -150,7 +196,8 @@ class CopyAmi < Ec2Script
150
196
  @context[:source_availability_zone])
151
197
  device = @context[:temp_device_name]
152
198
  mount_point = "/mnt/tmp_#{@context[:source_volume_id]}"
153
- attach_volume(@context[:source_volume_id], @context[:source_instance_id], device)
199
+ #XXX: attach volume after root partition detection
200
+ #attach_volume(@context[:source_volume_id], @context[:source_instance_id], device)
154
201
  connect(@context[:source_dns_name], @context[:source_ssh_username], nil, @context[:source_ssh_keydata])
155
202
  # detect if there is a shift for device mapping (between AWS and the operating system of the system)
156
203
  root_device_name = get_root_device_name()
@@ -167,6 +214,9 @@ class CopyAmi < Ec2Script
167
214
  aws_device_letter.succ!
168
215
  end
169
216
 
217
+ # attach volume
218
+ attach_volume(@context[:source_volume_id], @context[:source_instance_id], device, Ec2Script::CS_AWS_TIMEOUT)
219
+
170
220
  device = "/dev/sd#{aws_device_letter}"
171
221
  # detect root partition vs root volume: simply check if we have several /dev/sdx* entries
172
222
  parts_count = get_partition_count(device)
@@ -229,7 +279,7 @@ class CopyAmi < Ec2Script
229
279
  @context[:target_volume_id] = create_volume(@context[:target_availability_zone], volume_size)
230
280
  device = @context[:temp_device_name]
231
281
  mount_point = "/mnt/tmp_#{@context[:target_volume_id]}"
232
- attach_volume(@context[:target_volume_id], @context[:target_instance_id], device)
282
+ attach_volume(@context[:target_volume_id], @context[:target_instance_id], device, Ec2Script::CS_AWS_TIMEOUT)
233
283
  connect(@context[:target_dns_name], @context[:target_ssh_username], nil, @context[:target_ssh_keydata])
234
284
  # check if we need to create a partition table
235
285
  if !(@context[:partition_table] == nil)
@@ -332,6 +382,7 @@ class CopyAmi < Ec2Script
332
382
  # in the both regions.
333
383
  class AmiRegisteredState < CopyAmiState
334
384
  def enter()
385
+ post_message("Cleaning Source and Target Regions...")
335
386
  error = []
336
387
  local_region()
337
388
  begin
@@ -0,0 +1,449 @@
1
+ require "help/script_execution_state"
2
+ require "scripts/ec2/ec2_script"
3
+ require "help/remote_command_handler"
4
+ require "help/dm_crypt_helper"
5
+ require "help/ec2_helper"
6
+ require "AWS"
7
+ require "help/helper"
8
+
9
+
10
+ # Copy a given snapshot to another region
11
+ # * start up instance in source-region, create a snapshot from the mounted EBS
12
+ # * then create volume from snapshot, attach volume, and mount it
13
+ # * start up instance in destination-region, create empty volume of same size, attache volume, and mount it
14
+ # * copy the destination key to the source instance
15
+ # * perform an rsynch
16
+ # sync -PHAXaz --rsh "ssh -i /home/${src_user}/.ssh/id_${dst_keypair}" --rsync-path "sudo rsync" ${src_dir}/ ${dst_user}@${dst_public_fqdn}:${dst_dir}/
17
+ # * create a snapshot of the volume
18
+ # * register the snapshot as AMI
19
+ # * clean-up everything
20
+
21
+ class CopyAmiTest < Ec2Script
22
+ # context information needed
23
+ # * the EC2 credentials (see #Ec2Script)
24
+ # * ami_id => the ID of the AMI to be copied in another region
25
+ # * target_ec2_handler => The EC2 handler connected to the region where the snapshot is being copied to
26
+ # * source_ssh_username => The username for ssh for source-instance (default = root)
27
+ # * source_key_name => Key name of the instance that manages the snaphot-volume in the source region
28
+ # * source_ssh_key_data => Key information for the security group that starts the AMI [if not set, use ssh_key_files]
29
+ # * source_ssh_key_files => Key information for the security group that starts the AMI
30
+ # * target_ssh_username => The username for ssh for target-instance (default = root)
31
+ # * target_key_name => Key name of the instance that manages the snaphot-volume in the target region
32
+ # * target_ssh_key_data => Key information for the security group that starts the AMI [if not set, use ssh_key_files]
33
+ # * target_ssh_key_files => Key information for the security group that starts the AMI
34
+ # * target_ami_id => ID of the AMI to start in the target region
35
+ # * name => name of new AMI to be created
36
+ # * description => description of new AMI to be created
37
+
38
+ def initialize(input_params)
39
+ super(input_params)
40
+ end
41
+
42
+ def check_input_parameters()
43
+ post_message("Checking parameters...")
44
+ if @input_params[:ami_id] == nil || !(@input_params[:ami_id] =~ /^ami-.*$/)
45
+ raise Exception.new("Invalid AMI ID specified: #{@input_params[:ami_id]}")
46
+ end
47
+ ec2_helper = Ec2Helper.new(@input_params[:ec2_api_handler])
48
+ if ec2_helper.ami_prop(@input_params[:ami_id], 'rootDeviceType') != "ebs"
49
+ raise Exception.new("must be an EBS type image")
50
+ end
51
+ local_ec2_helper = ec2_helper
52
+ remote_ec2_helper = Ec2Helper.new(@input_params[:target_ec2_handler])
53
+ # AWS KeyPair
54
+ if @input_params[:source_key_name] == nil || @input_params[:source_key_name].empty?()
55
+ raise Exception.new("No KeyPair name specified for source region")
56
+ else
57
+ begin
58
+ local_ec2_helper.check_keypair(@input_params[:source_key_name])
59
+ rescue Exception => e
60
+ post_message("'#{@input_params[:source_key_name]}' Key pair not found in source region")
61
+ raise Exception.new("source region: #{e.to_s}")
62
+ end
63
+ end
64
+ if @input_params[:target_key_name] == nil || @input_params[:target_key_name].empty?()
65
+ raise Exception.new("No KeyPair name specified for target region")
66
+ else
67
+ begin
68
+ remote_ec2_helper.check_keypair(@input_params[:target_key_name])
69
+ rescue Exception => e
70
+ post_message("'#{@input_params[:target_key_name]}' Key pair not found in target region")
71
+ raise Exception.new("target region: #{e.to_s}")
72
+ end
73
+ end
74
+ # AWS SecurityGroup
75
+ if @input_params[:source_security_group] == nil
76
+ @input_params[:source_security_group] = "default"
77
+ end
78
+ if !local_ec2_helper.check_open_port(@input_params[:source_security_group], 22)
79
+ post_message("'#{@input_params[:source_security_group]}' Security Group not opened port 22 for connect via SSH in source region")
80
+ @input_params[:source_security_group] = nil
81
+ else
82
+ post_message("'#{@input_params[:source_security_group]}' Security Group opened port 22 for connect via SSH in source region")
83
+ end
84
+ if @input_params[:target_security_group] == nil
85
+ @input_params[:target_security_group] = "default"
86
+ end
87
+ if !remote_ec2_helper.check_open_port(@input_params[:target_security_group], 22)
88
+ post_message("'#{@input_params[:target_security_group]}' Security Group not opened port 22 for connect via SSH in target region")
89
+ @input_params[:target_security_group] = nil
90
+ else
91
+ post_message("'#{@input_params[:target_security_group]}' Security Group opened port 22 for connect via SSH in target region")
92
+ end
93
+ # Device to use
94
+ if @input_params[:root_device_name] == nil
95
+ @input_params[:root_device_name] = "/dev/sda1"
96
+ end
97
+ if @input_params[:temp_device_name] == nil
98
+ @input_params[:temp_device_name] = "/dev/sdj"
99
+ end
100
+ # SSH Parameters
101
+ if @input_params[:source_ssh_username] == nil
102
+ @input_params[:source_ssh_username] = "root"
103
+ end
104
+ if @input_params[:target_ssh_username] == nil
105
+ @input_params[:target_ssh_username] = "root"
106
+ end
107
+ if @input_params[:source_ssh_keydata] == nil
108
+ raise Exception.new("No Private Key for source region")
109
+ else
110
+ begin
111
+ check_ssh_key(@input_params[:source_ssh_keydata])
112
+ rescue Exception => e
113
+ post_message("not a Private Key: #{e.to_s}")
114
+ raise Exception.new("Invalid Private Key for source region: #{e.to_s}")
115
+ end
116
+ end
117
+ if @input_params[:target_ssh_keydata] == nil
118
+ raise Exception.new("No Private Key for target region")
119
+ else
120
+ begin
121
+ check_ssh_key(@input_params[:target_ssh_keydata])
122
+ rescue Exception => e
123
+ post_message("not a Private Key: #{e.to_s}")
124
+ raise Exception.new("Invalid Private Key for target region: #{e.to_s}")
125
+ end
126
+ end
127
+ # AWS Name and Description
128
+ if @input_params[:description] == nil || !check_aws_desc(@input_params[:description])
129
+ @input_params[:description] = "Created by CloudyScripts - #{self.class.name}"
130
+ end
131
+ if @input_params[:name] == nil || !check_aws_name(@input_params[:name])
132
+ @input_params[:name] = "Created_by_CloudyScripts/#{self.class.name}_from_#{@input_params[:ami_id]}"
133
+ end
134
+ end
135
+
136
+ # Load the initial state for the script.
137
+ # Abstract method to be implemented by extending classes.
138
+ def load_initial_state()
139
+ CopyAmiTestState.load_state(@input_params)
140
+ end
141
+
142
+ private
143
+
144
+ # Here begins the state machine implementation
145
+ class CopyAmiTestState < ScriptExecutionState
146
+
147
+ def self.load_state(context)
148
+ InitialState.new(context)
149
+ end
150
+
151
+ def local_region
152
+ self.ec2_handler=(@context[:ec2_api_handler])
153
+ end
154
+
155
+ def remote_region
156
+ self.ec2_handler=(@context[:target_ec2_handler])
157
+ end
158
+ end
159
+
160
+ # Initial state: start up AMI in source region
161
+ class InitialState < CopyAmiTestState
162
+ def enter()
163
+ local_region()
164
+ #XXX: create a CloudyScripts Security Group with TCP port 22 publicly opened
165
+ if @context[:source_security_group] == nil
166
+ @context[:source_security_group] = Ec2Script::CS_SEC_GRP_NAME
167
+ create_security_group_with_rules(@context[:source_security_group], Ec2Script::CS_SEC_GRP_DESC,
168
+ [{:ip_protocol => "tcp", :from_port => 22, :to_port => 22, :cidr_ip => "0.0.0.0/0"}])
169
+ post_message("'#{@context[:source_security_group]}' Security Group created with TCP port 22 publicly opened.")
170
+ end
171
+
172
+ @context[:source_instance_id], @context[:source_dns_name], @context[:source_availability_zone],
173
+ @context[:kernel_id], @context[:ramdisk_id], @context[:architecture], @context[:root_device_name] =
174
+ launch_instance(@context[:ami_id], @context[:source_key_name], @context[:source_security_group])
175
+ ec2_helper = Ec2Helper.new(@context[:ec2_api_handler])
176
+ @context[:ebs_volume_id] = ec2_helper.get_attached_volumes(@context[:source_instance_id])[0]['volumeId'] #TODO: what when more root devices?
177
+
178
+ SourceInstanceLaunchedState.new(@context)
179
+ end
180
+ end
181
+
182
+ # Source is started. Create a snapshot on the volume that is linked to the instance.
183
+ class SourceInstanceLaunchedState < CopyAmiTestState
184
+ def enter()
185
+ @context[:snapshot_id] = create_snapshot(@context[:ebs_volume_id],
186
+ "Created by CloudyScripts - #{self.get_superclass_name()} from #{@context[:ebs_volume_id]}")
187
+
188
+ AmiSnapshotCreatedState.new(@context)
189
+ end
190
+ end
191
+
192
+ # Snapshot is created from the AMI. Create a volume from the snapshot, attach and mount the volume as second device.
193
+ class AmiSnapshotCreatedState < CopyAmiTestState
194
+ def enter()
195
+ @context[:source_volume_id] = create_volume_from_snapshot(@context[:snapshot_id],
196
+ @context[:source_availability_zone])
197
+ device = @context[:temp_device_name]
198
+ mount_point = "/mnt/tmp_#{@context[:source_volume_id]}"
199
+ #XXX: attach volume after root partition detection
200
+ attach_volume(@context[:source_volume_id], @context[:source_instance_id], device)
201
+ connect(@context[:source_dns_name], @context[:source_ssh_username], nil, @context[:source_ssh_keydata])
202
+ # detect if there is a shift for device mapping (between AWS and the operating system of the system)
203
+ root_device_name = get_root_device_name()
204
+ # detect letters
205
+ aws_root_device = @context[:root_device_name]
206
+ aws_letter = aws_root_device.split('/')[2].gsub('sd', '').gsub('xvd', '').gsub(/[0-9]/, '')
207
+ os_letter = root_device_name.split('/')[2].gsub('sd', '').gsub('xvd', '').gsub(/[0-9]/, '')
208
+ aws_device_letter = device.split('/')[2].gsub('sd', '').gsub('xvd', '').gsub(/[0-9]/, '')
209
+ if !aws_letter.eql?(os_letter)
210
+ post_message("Detected specific kernel with shift between AWS and Kernel OS for device naming: '#{aws_root_device}' vs '#{root_device_name}'")
211
+ end
212
+ while !aws_letter.eql?(os_letter)
213
+ aws_letter.succ!
214
+ aws_device_letter.succ!
215
+ end
216
+
217
+ # attach volume
218
+ #attach_volume(@context[:source_volume_id], @context[:source_instance_id], device)
219
+
220
+ device = "/dev/sd#{aws_device_letter}"
221
+ # detect root partition vs root volume: simply check if we have several /dev/sdx* entries
222
+ parts_count = get_partition_count(device)
223
+ if parts_count >= 2
224
+ # retrieve partition table, in order to restore it in the target region
225
+ post_message("Detected specific volume with a valid partition table on device '#{device}'...")
226
+ partition_table = get_partition_table(device)
227
+ @context[:partition_table] = partition_table
228
+ #XXX: HANDLE at a LOWER LEVEL
229
+ # update partition table with device
230
+ # s/device/@context[:temp_device_name]/ on partition table
231
+ #@context[:partition_table] = partition_table.gsub("#{device}", "#{@context[:temp_device_name]}")
232
+ # retrieve the root partition number
233
+ os_nb = root_device_name.split('/')[2].gsub('sd', '').gsub('xvd', '').gsub(/[a-z]/, '')
234
+ device = device + os_nb
235
+ @context[:root_partition_nb] = os_nb
236
+ post_message("Using root partition: '#{device}'...")
237
+ end
238
+ post_message("Using AWS name '#{@context[:temp_device_name]}' and OS name '#{device}'")
239
+ mount_fs(mount_point, device)
240
+ # get root partition label and filesystem type
241
+ #@context[:label] = get_root_partition_label()
242
+ #@context[:fs_type] = get_root_partition_fs_type()
243
+ @context[:fs_type], @context[:label] = get_root_partition_fs_type_and_label()
244
+ disconnect()
245
+
246
+ #XXX: go to clean up state
247
+ #SourceVolumeReadyState.new(@context)
248
+ AmiRegisteredState.new(@context)
249
+ end
250
+ end
251
+
252
+ # Source is ready. Now start instance in the target region
253
+ class SourceVolumeReadyState < CopyAmiTestState
254
+ def enter()
255
+ remote_region()
256
+ #XXX: create a CloudyScripts Security Group with TCP port 22 publicly opened
257
+ if @context[:target_security_group] == nil
258
+ @context[:target_security_group] = Ec2Script::CS_SEC_GRP_NAME
259
+ create_security_group_with_rules(@context[:target_security_group], Ec2Script::CS_SEC_GRP_DESC,
260
+ [{:ip_protocol => "tcp", :from_port => 22, :to_port => 22, :cidr_ip => "0.0.0.0/0"}])
261
+ post_message("'#{@context[:target_security_group]}' Security Group created with TCP port 22 publicly opened.")
262
+ end
263
+
264
+ result = launch_instance(@context[:target_ami_id], @context[:target_key_name], @context[:target_security_group])
265
+ @context[:target_instance_id] = result.first
266
+ @context[:target_dns_name] = result[1]
267
+ @context[:target_availability_zone] = result[2]
268
+
269
+ TargetInstanceLaunchedState.new(@context)
270
+ end
271
+ end
272
+
273
+ # Destination instance is started. Now configure storage.
274
+ class TargetInstanceLaunchedState < CopyAmiTestState
275
+ def enter()
276
+ local_region()
277
+ ec2_helper = Ec2Helper.new(@context[:ec2_api_handler])
278
+ volume_size = ec2_helper.snapshot_prop(@context[:snapshot_id], :volumeSize).to_i
279
+ #
280
+ remote_region()
281
+ @context[:target_volume_id] = create_volume(@context[:target_availability_zone], volume_size)
282
+ device = @context[:temp_device_name]
283
+ mount_point = "/mnt/tmp_#{@context[:target_volume_id]}"
284
+ attach_volume(@context[:target_volume_id], @context[:target_instance_id], device)
285
+ connect(@context[:target_dns_name], @context[:target_ssh_username], nil, @context[:target_ssh_keydata])
286
+ # check if we need to create a partition table
287
+ if !(@context[:partition_table] == nil)
288
+ post_message("Creating a partition table on device '#{device}'...")
289
+ set_partition_table(device, @context[:partition_table])
290
+ #XXX: HANDLE at a LOWER LEVEL
291
+ # before adding partition table, adjust device name
292
+ #set_partition_table(device, @context[:partition_table].gsub(/\/dev\/(s|xv)d[a-z]/, "#{@context[:temp_device_name]}"))
293
+ # adjust partition to mount
294
+ device = device + @context[:root_partition_nb]
295
+ end
296
+ # make root partition
297
+ create_labeled_fs(@context[:target_dns_name], device, @context[:fs_type], @context[:label])
298
+ mount_fs(mount_point, device)
299
+ disconnect()
300
+
301
+ TargetVolumeReadyState.new(@context)
302
+ end
303
+ end
304
+
305
+ # Storages are ready. Only thing missing: the key of the target region
306
+ # must be available on the instance in the source region to be able to perform
307
+ # a remote copy.
308
+ class TargetVolumeReadyState < CopyAmiTestState
309
+ def enter()
310
+ post_message("upload key of target-instance to source-instance...")
311
+ path_candidates = ["/#{@context[:source_ssh_username]}/.ssh/",
312
+ "/home/#{@context[:source_ssh_username]}/.ssh/"]
313
+ key_path = determine_file(@context[:source_dns_name], @context[:source_ssh_username], @context[:source_ssh_keydata], path_candidates)
314
+ #XXX: fix the problem fo key name with white space
315
+ #upload_file(@context[:source_dns_name], @context[:source_ssh_username], @context[:source_ssh_keydata],
316
+ # @context[:target_ssh_keyfile], "#{key_path}#{@context[:target_key_name]}.pem")
317
+ upload_file(@context[:source_dns_name], @context[:source_ssh_username], @context[:source_ssh_keydata],
318
+ @context[:target_ssh_keyfile], "#{key_path}#{@context[:target_key_name].gsub(/\s+/, '_')}.pem")
319
+ post_message("credentials are in place to connect source and target.")
320
+
321
+ KeyInPlaceState.new(@context)
322
+ end
323
+ end
324
+
325
+ # Now we can copy.
326
+ class KeyInPlaceState < CopyAmiTestState
327
+ def enter()
328
+ connect(@context[:target_dns_name], @context[:target_ssh_username], nil, @context[:target_ssh_keydata])
329
+ disable_ssh_tty(@context[:target_dns_name])
330
+ disconnect()
331
+ #
332
+ connect(@context[:source_dns_name], @context[:source_ssh_username], nil, @context[:source_ssh_keydata])
333
+ source_dir = "/mnt/tmp_#{@context[:source_volume_id]}/"
334
+ dest_dir = "/mnt/tmp_#{@context[:target_volume_id]}"
335
+ #XXX: fix the problem fo key name with white space
336
+ #remote_copy(@context[:source_ssh_username], @context[:target_key_name], source_dir,
337
+ # @context[:target_dns_name], @context[:target_ssh_username], dest_dir)
338
+ remote_copy(@context[:source_ssh_username], @context[:target_key_name].gsub(/\s+/, '_'), source_dir,
339
+ @context[:target_dns_name], @context[:target_ssh_username], dest_dir)
340
+ disconnect()
341
+ #
342
+ connect(@context[:target_dns_name], @context[:target_ssh_username], nil, @context[:target_ssh_keydata])
343
+ enable_ssh_tty(@context[:target_dns_name])
344
+ unmount_fs(dest_dir)
345
+ disconnect()
346
+
347
+ DataCopiedState.new(@context)
348
+ end
349
+ end
350
+
351
+ # Data of snapshot now copied to the new volume. Create a snapshot of the
352
+ # new volume.
353
+ class DataCopiedState < CopyAmiTestState
354
+ def enter()
355
+ remote_region()
356
+ @context[:new_snapshot_id] = create_snapshot(@context[:target_volume_id],
357
+ "Created by CloudyScripts - #{self.get_superclass_name()} from #{@context[:target_volume_id]}")
358
+
359
+ TargetSnapshotCreatedState.new(@context)
360
+ end
361
+ end
362
+
363
+ # Snapshot Operation done. Now this snapshot must be registered as AMI
364
+ class TargetSnapshotCreatedState < CopyAmiTestState
365
+ def enter()
366
+ remote_region()
367
+ # Get Amazon Kernel Image ID
368
+ aki = get_aws_kernel_image_aki(@context[:source_availability_zone], @context[:kernel_id],
369
+ @context[:target_availability_zone])
370
+ device = @context[:root_device_name]
371
+ if !(@context[:partition_table] == nil)
372
+ device.gsub!(/[0-9]/, '')
373
+ post_message("Using BlockDevice for snapshot registration rather than RootDevice '#{device}' due to a valid partition table on device...")
374
+ end
375
+ @context[:result][:image_id] = register_snapshot(@context[:new_snapshot_id], @context[:name],
376
+ device, @context[:description], aki, nil, @context[:architecture])
377
+
378
+ AmiRegisteredState.new(@context)
379
+ end
380
+ end
381
+
382
+ # AMI is registered. Now only cleanup is missing, i.e. shut down instances and
383
+ # remote the volumes that were created. Start with cleaning the ressources
384
+ # in the both regions.
385
+ class AmiRegisteredState < CopyAmiTestState
386
+ def enter()
387
+ post_message("Cleaning Source and Target Regions...")
388
+ error = []
389
+ local_region()
390
+ begin
391
+ shut_down_instance(@context[:source_instance_id])
392
+ rescue Exception => e
393
+ error << e
394
+ post_message("Unable to shutdown instance '#{@context[:source_instance_id]}' in source region: #{e.to_s}")
395
+ end
396
+ begin
397
+ delete_volume(@context[:source_volume_id])
398
+ rescue Exception => e
399
+ error << e
400
+ post_message("Unable to delete volume '#{@context[:source_volume_id]}' in source region: #{e.to_s}")
401
+ end
402
+ begin
403
+ delete_snapshot(@context[:snapshot_id])
404
+ rescue Exception => e
405
+ error << e
406
+ post_message("Unable to delete snapshot '#{@context[:snapshot_id]}' in source region: #{e.to_s}")
407
+ end
408
+ #XXX: delete Security Group according to its name
409
+ if @context[:source_security_group].eql?(Ec2Script::CS_SEC_GRP_NAME)
410
+ begin
411
+ delete_security_group(@context[:source_security_group])
412
+ rescue Exception => e
413
+ error << e
414
+ post_message("Unable to delete Security Group '#{@context[:source_security_group]}' in source region: #{e.to_s}")
415
+ end
416
+ end
417
+ #
418
+ remote_region()
419
+ begin
420
+ shut_down_instance(@context[:target_instance_id])
421
+ rescue Exception => e
422
+ error << e
423
+ post_message("Unable to shutdown instance '#{@context[:target_instance_id]}' in target region: #{e.to_s}")
424
+ end
425
+ begin
426
+ delete_volume(@context[:target_volume_id])
427
+ rescue Exception => e
428
+ error << e
429
+ post_message("Unable to delete volume '#{@context[:target_volume_id]}' in target region: #{e.to_s}")
430
+ end
431
+ #XXX: delete Security Group according to its name
432
+ if @context[:target_security_group].eql?(Ec2Script::CS_SEC_GRP_NAME)
433
+ begin
434
+ delete_security_group(@context[:target_security_group])
435
+ rescue
436
+ error << e
437
+ post_message("Unable to delete Security Group '#{@context[:target_security_group]}' in target region: #{e.to_s}")
438
+ end
439
+ end
440
+
441
+ if error.size() > 0
442
+ raise Exception.new("Cleanup error(s)")
443
+ end
444
+
445
+ Done.new(@context)
446
+ end
447
+ end
448
+
449
+ end