beaker 0.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +15 -0
- data/.gitignore +17 -0
- data/.rspec +2 -0
- data/.simplecov +14 -0
- data/DOCUMENTING.md +167 -0
- data/Gemfile +3 -0
- data/LICENSE +17 -0
- data/README.md +332 -0
- data/Rakefile +121 -0
- data/beaker.gemspec +42 -0
- data/beaker.rb +10 -0
- data/bin/beaker +9 -0
- data/lib/beaker.rb +36 -0
- data/lib/beaker/answers.rb +29 -0
- data/lib/beaker/answers/version28.rb +104 -0
- data/lib/beaker/answers/version30.rb +194 -0
- data/lib/beaker/cli.rb +113 -0
- data/lib/beaker/command.rb +241 -0
- data/lib/beaker/command_factory.rb +21 -0
- data/lib/beaker/dsl.rb +85 -0
- data/lib/beaker/dsl/assertions.rb +87 -0
- data/lib/beaker/dsl/helpers.rb +625 -0
- data/lib/beaker/dsl/install_utils.rb +299 -0
- data/lib/beaker/dsl/outcomes.rb +99 -0
- data/lib/beaker/dsl/roles.rb +97 -0
- data/lib/beaker/dsl/structure.rb +63 -0
- data/lib/beaker/dsl/wrappers.rb +100 -0
- data/lib/beaker/host.rb +193 -0
- data/lib/beaker/host/aix.rb +15 -0
- data/lib/beaker/host/aix/file.rb +16 -0
- data/lib/beaker/host/aix/group.rb +35 -0
- data/lib/beaker/host/aix/user.rb +32 -0
- data/lib/beaker/host/unix.rb +54 -0
- data/lib/beaker/host/unix/exec.rb +15 -0
- data/lib/beaker/host/unix/file.rb +16 -0
- data/lib/beaker/host/unix/group.rb +40 -0
- data/lib/beaker/host/unix/pkg.rb +22 -0
- data/lib/beaker/host/unix/user.rb +32 -0
- data/lib/beaker/host/windows.rb +44 -0
- data/lib/beaker/host/windows/exec.rb +18 -0
- data/lib/beaker/host/windows/file.rb +15 -0
- data/lib/beaker/host/windows/group.rb +36 -0
- data/lib/beaker/host/windows/pkg.rb +26 -0
- data/lib/beaker/host/windows/user.rb +32 -0
- data/lib/beaker/hypervisor.rb +37 -0
- data/lib/beaker/hypervisor/aixer.rb +52 -0
- data/lib/beaker/hypervisor/blimper.rb +123 -0
- data/lib/beaker/hypervisor/fusion.rb +56 -0
- data/lib/beaker/hypervisor/solaris.rb +65 -0
- data/lib/beaker/hypervisor/vagrant.rb +118 -0
- data/lib/beaker/hypervisor/vcloud.rb +175 -0
- data/lib/beaker/hypervisor/vsphere.rb +80 -0
- data/lib/beaker/hypervisor/vsphere_helper.rb +200 -0
- data/lib/beaker/logger.rb +167 -0
- data/lib/beaker/network_manager.rb +73 -0
- data/lib/beaker/options_parsing.rb +323 -0
- data/lib/beaker/result.rb +55 -0
- data/lib/beaker/shared.rb +15 -0
- data/lib/beaker/shared/error_handler.rb +17 -0
- data/lib/beaker/shared/host_handler.rb +46 -0
- data/lib/beaker/shared/repetition.rb +28 -0
- data/lib/beaker/ssh_connection.rb +198 -0
- data/lib/beaker/test_case.rb +225 -0
- data/lib/beaker/test_config.rb +148 -0
- data/lib/beaker/test_suite.rb +288 -0
- data/lib/beaker/utils.rb +7 -0
- data/lib/beaker/utils/ntp_control.rb +42 -0
- data/lib/beaker/utils/repo_control.rb +92 -0
- data/lib/beaker/utils/setup_helper.rb +77 -0
- data/lib/beaker/utils/validator.rb +27 -0
- data/spec/beaker/command_spec.rb +94 -0
- data/spec/beaker/dsl/assertions_spec.rb +104 -0
- data/spec/beaker/dsl/helpers_spec.rb +230 -0
- data/spec/beaker/dsl/install_utils_spec.rb +70 -0
- data/spec/beaker/dsl/outcomes_spec.rb +43 -0
- data/spec/beaker/dsl/roles_spec.rb +86 -0
- data/spec/beaker/dsl/structure_spec.rb +60 -0
- data/spec/beaker/dsl/wrappers_spec.rb +52 -0
- data/spec/beaker/host_spec.rb +95 -0
- data/spec/beaker/logger_spec.rb +117 -0
- data/spec/beaker/options_parsing_spec.rb +37 -0
- data/spec/beaker/puppet_command_spec.rb +128 -0
- data/spec/beaker/ssh_connection_spec.rb +39 -0
- data/spec/beaker/test_case_spec.rb +6 -0
- data/spec/beaker/test_suite_spec.rb +44 -0
- data/spec/mocks_and_helpers.rb +34 -0
- data/spec/spec_helper.rb +15 -0
- metadata +359 -0
@@ -0,0 +1,123 @@
|
|
1
|
+
module Beaker
|
2
|
+
class Blimper < Beaker::Hypervisor
|
3
|
+
|
4
|
+
def amiports(host)
|
5
|
+
roles = host['roles']
|
6
|
+
ports = [22]
|
7
|
+
|
8
|
+
if roles.include? 'database'
|
9
|
+
ports << 8080
|
10
|
+
ports << 8081
|
11
|
+
end
|
12
|
+
|
13
|
+
if roles.include? 'master'
|
14
|
+
ports << 8140
|
15
|
+
end
|
16
|
+
|
17
|
+
if roles.include? 'dashboard'
|
18
|
+
ports << 443
|
19
|
+
end
|
20
|
+
|
21
|
+
ports
|
22
|
+
end
|
23
|
+
|
24
|
+
def initialize(blimpy_hosts, options, config)
|
25
|
+
@options = options
|
26
|
+
@config = config['CONFIG'].dup
|
27
|
+
@logger = options[:logger]
|
28
|
+
@blimpy_hosts = blimpy_hosts
|
29
|
+
require 'rubygems' unless defined?(Gem)
|
30
|
+
require 'yaml' unless defined?(YAML)
|
31
|
+
begin
|
32
|
+
require 'blimpy'
|
33
|
+
rescue LoadError
|
34
|
+
raise "Unable to load Blimpy, please ensure its installed"
|
35
|
+
end
|
36
|
+
ami_spec= YAML.load_file('config/image_templates/ec2.yaml')["AMI"]
|
37
|
+
|
38
|
+
fleet = Blimpy.fleet do |fleet|
|
39
|
+
@blimpy_hosts.each do |host|
|
40
|
+
amitype = host['vmname'] || host['platform']
|
41
|
+
amisize = host['amisize'] || 'm1.small'
|
42
|
+
#use snapshot provided for this host
|
43
|
+
image_type = host['snapshot']
|
44
|
+
if not image_type
|
45
|
+
raise "No snapshot/image_type provided for blimpy provisioning"
|
46
|
+
end
|
47
|
+
ami = ami_spec[amitype]
|
48
|
+
fleet.add(:aws) do |ship|
|
49
|
+
ship.name = host.name
|
50
|
+
ship.ports = amiports(host)
|
51
|
+
ship.image_id = ami[:image][image_type.to_sym]
|
52
|
+
if not ship.image_id
|
53
|
+
raise "No image_id found for host #{ship.name} (#{amitype}:#{amisize}) using snapshot/image_type #{image_type}"
|
54
|
+
end
|
55
|
+
ship.flavor = amisize
|
56
|
+
ship.region = ami[:region]
|
57
|
+
ship.username = 'root'
|
58
|
+
end
|
59
|
+
@logger.debug "Added #{host.name} (#{amitype}:#{amisize}) using snapshot/image_type #{image_type} to blimpy fleet"
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
# Attempt to start the fleet, we wrap it with some error handling that deals
|
64
|
+
# with generic Fog errors and retrying in case these errors are transient.
|
65
|
+
fleet_retries = 0
|
66
|
+
begin
|
67
|
+
fleet.start
|
68
|
+
rescue Fog::Errors::Error => ex
|
69
|
+
fleet_retries += 1
|
70
|
+
if fleet_retries <= 3
|
71
|
+
sleep_time = rand(10) + 10
|
72
|
+
@logger.notify("Calling fleet.destroy, sleeping #{sleep_time} seconds and retrying fleet.start due to Fog::Errors::Error (#{
|
73
|
+
ex.message}), retry attempt #{fleet_retries}.")
|
74
|
+
begin
|
75
|
+
timeout(30) do
|
76
|
+
fleet.destroy
|
77
|
+
end
|
78
|
+
rescue
|
79
|
+
end
|
80
|
+
sleep sleep_time
|
81
|
+
retry
|
82
|
+
else
|
83
|
+
@logger.error("Retried Fog #{fleet_retries} times, giving up and throwing the exception")
|
84
|
+
raise ex
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
# Configure our nodes to match the blimp fleet
|
89
|
+
# Also generate hosts entries for the fleet, since we're iterating
|
90
|
+
etc_hosts = "127.0.0.1\tlocalhost localhost.localdomain\n"
|
91
|
+
fleet.ships.each do |ship|
|
92
|
+
ship.wait_for_sshd
|
93
|
+
name = ship.name
|
94
|
+
host = @blimpy_hosts.select { |host| host.name == name }[0]
|
95
|
+
host['ip'] = ship.dns
|
96
|
+
host.exec(Command.new("hostname #{name}"))
|
97
|
+
ip = get_ip(host)
|
98
|
+
domain = get_domain_name(host)
|
99
|
+
etc_hosts += "#{ip}\t#{name}\t#{name}.#{domain}\n"
|
100
|
+
end
|
101
|
+
|
102
|
+
# Send our hosts information to the nodes
|
103
|
+
@blimpy_hosts.each do |host|
|
104
|
+
set_etc_hosts(host, etc_hosts)
|
105
|
+
end
|
106
|
+
|
107
|
+
end #revert_blimpy
|
108
|
+
|
109
|
+
def cleanup
|
110
|
+
fleet = Blimpy.fleet do |fleet|
|
111
|
+
@blimpy_hosts.each do |host|
|
112
|
+
fleet.add(:aws) do |ship|
|
113
|
+
ship.name = host.name
|
114
|
+
end
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
@logger.notify "Destroying Blimpy boxes"
|
119
|
+
fleet.destroy
|
120
|
+
end
|
121
|
+
|
122
|
+
end
|
123
|
+
end
|
@@ -0,0 +1,56 @@
|
|
1
|
+
module Beaker
|
2
|
+
class Fusion < Beaker::Hypervisor
|
3
|
+
|
4
|
+
def initialize(fusion_hosts, options, config)
|
5
|
+
require 'rubygems' unless defined?(Gem)
|
6
|
+
begin
|
7
|
+
require 'fission'
|
8
|
+
rescue LoadError
|
9
|
+
raise "Unable to load fission, please ensure it is installed!"
|
10
|
+
end
|
11
|
+
@logger = options[:logger]
|
12
|
+
@options = options
|
13
|
+
@config = config['CONFIG'].dup
|
14
|
+
@fusion_hosts = fusion_hosts
|
15
|
+
|
16
|
+
|
17
|
+
available = Fission::VM.all.data.collect{|vm| vm.name}.sort.join(", ")
|
18
|
+
@logger.notify "Available VM names: #{available}"
|
19
|
+
|
20
|
+
@fusion_hosts.each do |host|
|
21
|
+
vm_name = host["vmname"] || host.name
|
22
|
+
vm = Fission::VM.new vm_name
|
23
|
+
raise "Could not find VM '#{vm_name}' for #{host.name}!" unless vm.exists?
|
24
|
+
|
25
|
+
available_snapshots = vm.snapshots.data.sort.join(", ")
|
26
|
+
@logger.notify "Available snapshots for #{host.name}: #{available_snapshots}"
|
27
|
+
snap_name = host["snapshot"]
|
28
|
+
raise "No snapshot specified for #{host.name}" unless snap_name
|
29
|
+
raise "Could not find snapshot '#{snap_name}' for host #{host.name}!" unless vm.snapshots.data.include? snap_name
|
30
|
+
|
31
|
+
@logger.notify "Reverting #{host.name} to snapshot '#{snap_name}'"
|
32
|
+
start = Time.now
|
33
|
+
vm.revert_to_snapshot snap_name
|
34
|
+
while vm.running?.data
|
35
|
+
sleep 1
|
36
|
+
end
|
37
|
+
time = Time.now - start
|
38
|
+
@logger.notify "Spent %.2f seconds reverting" % time
|
39
|
+
|
40
|
+
@logger.notify "Resuming #{host.name}"
|
41
|
+
start = Time.now
|
42
|
+
vm.start :headless => true
|
43
|
+
until vm.running?.data
|
44
|
+
sleep 1
|
45
|
+
end
|
46
|
+
time = Time.now - start
|
47
|
+
@logger.notify "Spent %.2f seconds resuming VM" % time
|
48
|
+
end
|
49
|
+
end #revert_fusion
|
50
|
+
|
51
|
+
def cleanup
|
52
|
+
@logger.notify "No cleanup for fusion boxes"
|
53
|
+
end
|
54
|
+
|
55
|
+
end
|
56
|
+
end
|
@@ -0,0 +1,65 @@
|
|
1
|
+
module Beaker
|
2
|
+
class Solaris < Beaker::Hypervisor
|
3
|
+
|
4
|
+
def initialize(solaris_hosts, options, config)
|
5
|
+
@options = options
|
6
|
+
@config = config['CONFIG'].dup
|
7
|
+
@logger = options[:logger]
|
8
|
+
@solaris_hosts = solaris_hosts
|
9
|
+
fog_file = nil
|
10
|
+
if File.exists?( File.join(ENV['HOME'], '.fog') )
|
11
|
+
fog_file = YAML.load_file( File.join(ENV['HOME'], '.fog') )
|
12
|
+
end
|
13
|
+
raise "Cant load ~/.fog config" unless fog_file
|
14
|
+
|
15
|
+
hypername = fog_file[:default][:solaris_hypervisor_server]
|
16
|
+
vmpath = fog_file[:default][:solaris_hypervisor_vmpath]
|
17
|
+
snappaths = fog_file[:default][:solaris_hypervisor_snappaths]
|
18
|
+
|
19
|
+
hyperconf = {
|
20
|
+
'HOSTS' => {
|
21
|
+
hypername => { 'platform' => 'solaris-11-sparc' }
|
22
|
+
},
|
23
|
+
'CONFIG' => {
|
24
|
+
'user' => fog_file[:default][:solaris_hypervisor_username] || ENV['USER'],
|
25
|
+
'ssh' => {
|
26
|
+
:keys => fog_file[:default][:solaris_hypervisor_keyfile] || "#{ENV['HOME']}/.ssh/id_rsa"
|
27
|
+
}
|
28
|
+
}
|
29
|
+
}
|
30
|
+
|
31
|
+
hyperconfig = Beaker::TestConfig.new( hyperconf, @options )
|
32
|
+
|
33
|
+
@logger.notify "Connecting to hypervisor at #{hypername}"
|
34
|
+
hypervisor = Beaker::Host.create( hypername, @options, hyperconfig )
|
35
|
+
|
36
|
+
@solaris_hosts.each do |host|
|
37
|
+
vm_name = host['vmname'] || host.name
|
38
|
+
#use the snapshot provided for this host
|
39
|
+
snapshot = host['snapshot']
|
40
|
+
|
41
|
+
|
42
|
+
@logger.notify "Reverting #{vm_name} to snapshot #{snapshot}"
|
43
|
+
start = Time.now
|
44
|
+
hypervisor.exec(Command.new("sudo /sbin/zfs rollback -Rf #{vmpath}/#{vm_name}@#{snapshot}"))
|
45
|
+
snappaths.each do |spath|
|
46
|
+
@logger.notify "Reverting #{vm_name}/#{spath} to snapshot #{snapshot}"
|
47
|
+
hypervisor.exec(Command.new("sudo /sbin/zfs rollback -Rf #{vmpath}/#{vm_name}/#{spath}@#{snapshot}"))
|
48
|
+
end
|
49
|
+
time = Time.now - start
|
50
|
+
@logger.notify "Spent %.2f seconds reverting" % time
|
51
|
+
|
52
|
+
@logger.notify "Booting #{vm_name}"
|
53
|
+
start = Time.now
|
54
|
+
hypervisor.exec(Command.new("sudo /sbin/zoneadm -z #{vm_name} boot"))
|
55
|
+
@logger.notify "Spent %.2f seconds booting #{vm_name}" % (Time.now - start)
|
56
|
+
end
|
57
|
+
hypervisor.close
|
58
|
+
end
|
59
|
+
|
60
|
+
def cleanup
|
61
|
+
@logger.notify "No cleanup for solaris boxes"
|
62
|
+
end
|
63
|
+
|
64
|
+
end
|
65
|
+
end
|
@@ -0,0 +1,118 @@
|
|
1
|
+
module Beaker
|
2
|
+
class Vagrant < Beaker::Hypervisor
|
3
|
+
|
4
|
+
# Return a random mac address
|
5
|
+
#
|
6
|
+
# @return [String] a random mac address
|
7
|
+
def randmac
|
8
|
+
"080027" + (1..3).map{"%0.2X"%rand(256)}.join
|
9
|
+
end
|
10
|
+
|
11
|
+
def rand_chunk
|
12
|
+
(1 + rand(253)).to_s #don't want a 0 or a 255
|
13
|
+
end
|
14
|
+
|
15
|
+
def randip
|
16
|
+
"192.168.#{rand_chunk}.#{rand_chunk}"
|
17
|
+
end
|
18
|
+
|
19
|
+
def make_vfile hosts
|
20
|
+
#HACK HACK HACK - add checks here to ensure that we have box + box_url
|
21
|
+
#generate the VagrantFile
|
22
|
+
vagrant_file = "Vagrant.configure(\"2\") do |c|\n"
|
23
|
+
hosts.each do |host|
|
24
|
+
host['ip'] ||= randip #use the existing ip, otherwise default to a random ip
|
25
|
+
vagrant_file << " c.vm.define '#{host.name}' do |v|\n"
|
26
|
+
vagrant_file << " v.vm.hostname = '#{host.name}'\n"
|
27
|
+
vagrant_file << " v.vm.box = '#{host['box']}'\n"
|
28
|
+
vagrant_file << " v.vm.box_url = '#{host['box_url']}'\n" unless host['box_url'].nil?
|
29
|
+
vagrant_file << " v.vm.base_mac = '#{randmac}'\n"
|
30
|
+
vagrant_file << " v.vm.network :private_network, ip: \"#{host['ip'].to_s}\"\n"
|
31
|
+
vagrant_file << " end\n"
|
32
|
+
@logger.debug "created Vagrantfile for VagrantHost #{host.name}"
|
33
|
+
end
|
34
|
+
vagrant_file << " c.vm.provider :virtualbox do |vb|\n"
|
35
|
+
vagrant_file << " vb.customize [\"modifyvm\", :id, \"--memory\", \"1024\"]\n"
|
36
|
+
vagrant_file << " end\n"
|
37
|
+
vagrant_file << "end\n"
|
38
|
+
f = File.open("Vagrantfile", 'w')
|
39
|
+
f.write(vagrant_file)
|
40
|
+
f.close()
|
41
|
+
end
|
42
|
+
|
43
|
+
def hack_etc_hosts hosts
|
44
|
+
etc_hosts = "127.0.0.1\tlocalhost localhost.localdomain\n"
|
45
|
+
hosts.each do |host|
|
46
|
+
etc_hosts += "#{host['ip'].to_s}\t#{host.name}\n"
|
47
|
+
end
|
48
|
+
hosts.each do |host|
|
49
|
+
set_etc_hosts(host, etc_hosts)
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
def copy_ssh_to_root host
|
54
|
+
#make is possible to log in as root by copying the ssh dir to root's account
|
55
|
+
@logger.debug "Give root a copy of vagrant's keys"
|
56
|
+
if host['platform'] =~ /windows/
|
57
|
+
host.exec(Command.new('sudo su -c "cp -r .ssh /home/Administrator/."'))
|
58
|
+
else
|
59
|
+
host.exec(Command.new('sudo su -c "cp -r .ssh /root/."'))
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
def set_ssh_config host, user
|
64
|
+
f = Tempfile.new("#{host.name}")
|
65
|
+
ssh_config = `vagrant ssh-config #{host.name}`
|
66
|
+
#replace hostname with ip
|
67
|
+
ssh_config = ssh_config.gsub(/#{host.name}/, host['ip'])
|
68
|
+
#set the user
|
69
|
+
ssh_config = ssh_config.gsub(/User vagrant/, "User #{user}")
|
70
|
+
f.write(ssh_config)
|
71
|
+
f.rewind
|
72
|
+
host['ssh'] = {:config => f.path()}
|
73
|
+
host['user'] = user
|
74
|
+
@temp_files << f
|
75
|
+
end
|
76
|
+
|
77
|
+
def initialize(vagrant_hosts, options, config)
|
78
|
+
require 'tempfile'
|
79
|
+
@options = options
|
80
|
+
@config = config['CONFIG'].dup
|
81
|
+
@logger = options[:logger]
|
82
|
+
@temp_files = []
|
83
|
+
@vagrant_hosts = vagrant_hosts
|
84
|
+
|
85
|
+
make_vfile @vagrant_hosts
|
86
|
+
|
87
|
+
#stop anything currently running, that way vagrant up will re-do networking on existing boxes
|
88
|
+
system("vagrant halt")
|
89
|
+
system("vagrant up")
|
90
|
+
|
91
|
+
@logger.debug "configure vagrant boxes (set ssh-config, switch to root user, hack etc/hosts)"
|
92
|
+
@vagrant_hosts.each do |host|
|
93
|
+
default_user = host['user']
|
94
|
+
|
95
|
+
set_ssh_config host, 'vagrant'
|
96
|
+
|
97
|
+
copy_ssh_to_root host
|
98
|
+
#shut down connection, will reconnect on next exec
|
99
|
+
host.close
|
100
|
+
|
101
|
+
set_ssh_config host, default_user
|
102
|
+
|
103
|
+
end
|
104
|
+
|
105
|
+
hack_etc_hosts @vagrant_hosts
|
106
|
+
end
|
107
|
+
|
108
|
+
def cleanup
|
109
|
+
@logger.debug "removing temporory ssh-config files per-vagrant box"
|
110
|
+
@temp_files.each do |f|
|
111
|
+
f.close()
|
112
|
+
end
|
113
|
+
@logger.notify "Destroying vagrant boxes"
|
114
|
+
system("vagrant destroy --force")
|
115
|
+
end
|
116
|
+
|
117
|
+
end
|
118
|
+
end
|
@@ -0,0 +1,175 @@
|
|
1
|
+
module Beaker
|
2
|
+
class Vcloud < Beaker::Hypervisor
|
3
|
+
|
4
|
+
def initialize(vcloud_hosts, options, config)
|
5
|
+
@options = options
|
6
|
+
@config = config['CONFIG'].dup
|
7
|
+
@logger = options[:logger]
|
8
|
+
@vcloud_hosts = vcloud_hosts
|
9
|
+
require 'yaml' unless defined?(YAML)
|
10
|
+
|
11
|
+
raise 'You must specify a datastore for vCloud instances!' unless @config['datastore']
|
12
|
+
raise 'You must specify a resource pool for vCloud instances!' unless @config['resourcepool']
|
13
|
+
raise 'You must specify a folder for vCloud instances!' unless @config['folder']
|
14
|
+
|
15
|
+
vsphere_credentials = VsphereHelper.load_config
|
16
|
+
|
17
|
+
@logger.notify "Connecting to vSphere at #{vsphere_credentials[:server]}" +
|
18
|
+
" with credentials for #{vsphere_credentials[:user]}"
|
19
|
+
|
20
|
+
vsphere_helper = VsphereHelper.new( vsphere_credentials )
|
21
|
+
vsphere_vms = {}
|
22
|
+
|
23
|
+
attempts = 10
|
24
|
+
|
25
|
+
start = Time.now
|
26
|
+
@vcloud_hosts.each_with_index do |h, i|
|
27
|
+
# Generate a randomized hostname
|
28
|
+
o = [('a'..'z'),('0'..'9')].map{|r| r.to_a}.flatten
|
29
|
+
h['vmhostname'] = o[rand(25)] + (0...14).map{o[rand(o.length)]}.join
|
30
|
+
|
31
|
+
if h['template'] =~ /\//
|
32
|
+
templatefolders = h['template'].split('/')
|
33
|
+
h['template'] = templatefolders.pop
|
34
|
+
end
|
35
|
+
|
36
|
+
@logger.notify "Deploying #{h['vmhostname']} (#{h.name}) to #{@config['folder']} from template '#{h['template']}'"
|
37
|
+
|
38
|
+
vm = {}
|
39
|
+
|
40
|
+
if templatefolders
|
41
|
+
vm[h['template']] = vsphere_helper.find_folder(templatefolders.join('/')).find(h['template'])
|
42
|
+
else
|
43
|
+
vm = vsphere_helper.find_vms(h['template'])
|
44
|
+
end
|
45
|
+
|
46
|
+
if vm.length == 0
|
47
|
+
raise "Unable to find template '#{h['template']}'!"
|
48
|
+
end
|
49
|
+
|
50
|
+
# Add VM annotation
|
51
|
+
configSpec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
52
|
+
:annotation =>
|
53
|
+
'Base template: ' + h['template'] + "\n" +
|
54
|
+
'Creation time: ' + Time.now.strftime("%Y-%m-%d %H:%M") + "\n\n" +
|
55
|
+
'CI build link: ' + ( ENV['BUILD_URL'] || 'Deployed independently of CI' )
|
56
|
+
)
|
57
|
+
|
58
|
+
# Are we using a customization spec?
|
59
|
+
customizationSpec = vsphere_helper.find_customization( h['template'] )
|
60
|
+
|
61
|
+
if customizationSpec
|
62
|
+
# Print a logger message if using a customization spec
|
63
|
+
@logger.notify "Found customization spec for '#{h['template']}', will apply after boot"
|
64
|
+
|
65
|
+
# Using a customization spec takes longer, set a longer timeout
|
66
|
+
attempts = attempts * 2
|
67
|
+
end
|
68
|
+
|
69
|
+
# Put the VM in the specified folder and resource pool
|
70
|
+
relocateSpec = RbVmomi::VIM.VirtualMachineRelocateSpec(
|
71
|
+
:datastore => vsphere_helper.find_datastore(@config['datastore']),
|
72
|
+
:pool => vsphere_helper.find_pool(@config['resourcepool']),
|
73
|
+
:diskMoveType => :moveChildMostDiskBacking
|
74
|
+
)
|
75
|
+
|
76
|
+
# Create a clone spec
|
77
|
+
spec = RbVmomi::VIM.VirtualMachineCloneSpec(
|
78
|
+
:config => configSpec,
|
79
|
+
:location => relocateSpec,
|
80
|
+
:customization => customizationSpec,
|
81
|
+
:powerOn => true,
|
82
|
+
:template => false
|
83
|
+
)
|
84
|
+
|
85
|
+
# Deploy from specified template
|
86
|
+
if (@vcloud_hosts.length == 1) or (i == @vcloud_hosts.length - 1)
|
87
|
+
vm[h['template']].CloneVM_Task( :folder => vsphere_helper.find_folder(@config['folder']), :name => h['vmhostname'], :spec => spec ).wait_for_completion
|
88
|
+
else
|
89
|
+
vm[h['template']].CloneVM_Task( :folder => vsphere_helper.find_folder(@config['folder']), :name => h['vmhostname'], :spec => spec )
|
90
|
+
end
|
91
|
+
end
|
92
|
+
@logger.notify 'Spent %.2f seconds deploying VMs' % (Time.now - start)
|
93
|
+
|
94
|
+
start = Time.now
|
95
|
+
@vcloud_hosts.each_with_index do |h, i|
|
96
|
+
@logger.notify "Booting #{h['vmhostname']} (#{h.name}) and waiting for it to register with vSphere"
|
97
|
+
try = 1
|
98
|
+
last_wait = 0
|
99
|
+
wait = 1
|
100
|
+
until
|
101
|
+
vsphere_helper.find_vms(h['vmhostname'])[h['vmhostname']].summary.guest.toolsRunningStatus == 'guestToolsRunning' and
|
102
|
+
vsphere_helper.find_vms(h['vmhostname'])[h['vmhostname']].summary.guest.ipAddress != nil
|
103
|
+
if try <= attempts
|
104
|
+
sleep wait
|
105
|
+
(last_wait, wait) = wait, last_wait + wait
|
106
|
+
try += 1
|
107
|
+
else
|
108
|
+
raise "vSphere registration failed after #{wait} seconds"
|
109
|
+
end
|
110
|
+
end
|
111
|
+
end
|
112
|
+
@logger.notify "Spent %.2f seconds booting and waiting for vSphere registration" % (Time.now - start)
|
113
|
+
|
114
|
+
start = Time.now
|
115
|
+
@vcloud_hosts.each_with_index do |h, i|
|
116
|
+
@logger.notify "Waiting for #{h['vmhostname']} DNS resolution"
|
117
|
+
try = 1
|
118
|
+
last_wait = 0
|
119
|
+
wait = 3
|
120
|
+
|
121
|
+
begin
|
122
|
+
Socket.getaddrinfo(h['vmhostname'], nil)
|
123
|
+
rescue
|
124
|
+
if try <= attempts
|
125
|
+
sleep wait
|
126
|
+
(last_wait, wait) = wait, last_wait + wait
|
127
|
+
try += 1
|
128
|
+
|
129
|
+
retry
|
130
|
+
else
|
131
|
+
raise "DNS resolution failed after #{wait} seconds"
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
135
|
+
@logger.notify "Spent %.2f seconds waiting for DNS resolution" % (Time.now - start)
|
136
|
+
|
137
|
+
vsphere_helper.close
|
138
|
+
end
|
139
|
+
|
140
|
+
def cleanup
|
141
|
+
@logger.notify "Destroying vCloud boxes"
|
142
|
+
vsphere_credentials = VsphereHelper.load_config
|
143
|
+
|
144
|
+
@logger.notify "Connecting to vSphere at #{vsphere_credentials[:server]}" +
|
145
|
+
" with credentials for #{vsphere_credentials[:user]}"
|
146
|
+
|
147
|
+
vsphere_helper = VsphereHelper.new( vsphere_credentials )
|
148
|
+
|
149
|
+
vm_names = @vcloud_hosts.map {|h| h['vmhostname'] }.compact
|
150
|
+
if @vcloud_hosts.length != vm_names.length
|
151
|
+
@logger.warn "Some hosts did not have vmhostname set correctly! This likely means VM provisioning was not successful"
|
152
|
+
end
|
153
|
+
vms = vsphere_helper.find_vms vm_names
|
154
|
+
vm_names.each do |name|
|
155
|
+
unless vm = vms[name]
|
156
|
+
raise "Couldn't find VM #{name} in vSphere!"
|
157
|
+
end
|
158
|
+
|
159
|
+
if vm.runtime.powerState == 'poweredOn'
|
160
|
+
@logger.notify "Shutting down #{vm.name}"
|
161
|
+
start = Time.now
|
162
|
+
vm.PowerOffVM_Task.wait_for_completion
|
163
|
+
@logger.notify "Spent %.2f seconds halting #{vm.name}" % (Time.now - start)
|
164
|
+
end
|
165
|
+
|
166
|
+
start = Time.now
|
167
|
+
vm.Destroy_Task
|
168
|
+
@logger.notify "Spent %.2f seconds destroying #{vm.name}" % (Time.now - start)
|
169
|
+
end
|
170
|
+
|
171
|
+
vsphere_helper.close
|
172
|
+
end
|
173
|
+
|
174
|
+
end
|
175
|
+
end
|