vm_shepherd 0.0.1 → 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Gemfile +0 -2
- data/LICENSE.txt +22 -0
- data/README.md +15 -2
- data/ci/run_specs.sh +6 -1
- data/lib/vm_shepherd.rb +12 -0
- data/lib/vm_shepherd/{ami_manager.rb → aws_manager.rb} +1 -3
- data/lib/vm_shepherd/openstack_manager.rb +102 -0
- data/lib/vm_shepherd/shepherd.rb +74 -81
- data/lib/vm_shepherd/vcloud_manager.rb +170 -0
- data/lib/vm_shepherd/version.rb +1 -1
- data/lib/vm_shepherd/vsphere_manager.rb +293 -0
- data/spec/fixtures/shepherd/openstack.yml +19 -0
- data/spec/support/patched_fog.rb +20 -0
- data/spec/vm_shepherd/{ami_manager_spec.rb → aws_manager_spec.rb} +9 -10
- data/spec/vm_shepherd/openstack_manager_spec.rb +237 -0
- data/spec/vm_shepherd/shepherd_spec.rb +115 -50
- data/spec/vm_shepherd/vcloud_manager_spec.rb +364 -0
- data/spec/vm_shepherd/vsphere_manager_spec.rb +16 -0
- data/vm_shepherd.gemspec +1 -1
- metadata +31 -32
- data/lib/vm_shepherd/ova_manager/base.rb +0 -31
- data/lib/vm_shepherd/ova_manager/deployer.rb +0 -202
- data/lib/vm_shepherd/ova_manager/destroyer.rb +0 -29
- data/lib/vm_shepherd/ova_manager/open_monkey_patch.rb +0 -14
- data/lib/vm_shepherd/vapp_manager/deployer.rb +0 -151
- data/lib/vm_shepherd/vapp_manager/destroyer.rb +0 -46
- data/spec/vm_shepherd/ova_manager/base_spec.rb +0 -56
- data/spec/vm_shepherd/ova_manager/deployer_spec.rb +0 -134
- data/spec/vm_shepherd/ova_manager/destroyer_spec.rb +0 -42
- data/spec/vm_shepherd/vapp_manager/deployer_spec.rb +0 -287
- data/spec/vm_shepherd/vapp_manager/destroyer_spec.rb +0 -104
data/lib/vm_shepherd/version.rb
CHANGED
@@ -0,0 +1,293 @@
|
|
1
|
+
require 'logger'
|
2
|
+
require 'rbvmomi'
|
3
|
+
|
4
|
+
module VmShepherd
|
5
|
+
class VsphereManager
|
6
|
+
TEMPLATE_PREFIX = 'tpl'.freeze
|
7
|
+
|
8
|
+
def initialize(host, username, password, datacenter_name)
|
9
|
+
@host = host
|
10
|
+
@username = username
|
11
|
+
@password = password
|
12
|
+
@datacenter_name = datacenter_name
|
13
|
+
@logger = Logger.new(STDERR)
|
14
|
+
end
|
15
|
+
|
16
|
+
def deploy(ova_path, vm_config, vsphere_config)
|
17
|
+
raise ArgumentError unless folder_name_is_valid?(vsphere_config[:folder])
|
18
|
+
|
19
|
+
ova_path = File.expand_path(ova_path.strip)
|
20
|
+
ensure_no_running_vm(vm_config)
|
21
|
+
|
22
|
+
tmp_dir = untar_vbox_ova(ova_path)
|
23
|
+
ovf_file_path = ovf_file_path_from_dir(tmp_dir)
|
24
|
+
|
25
|
+
template = deploy_ovf_template(ovf_file_path, vsphere_config)
|
26
|
+
vm = create_vm_from_template(template, vsphere_config)
|
27
|
+
|
28
|
+
reconfigure_vm(vm, vm_config)
|
29
|
+
power_on_vm(vm)
|
30
|
+
ensure
|
31
|
+
FileUtils.remove_entry_secure(ovf_file_path, force: true) unless ovf_file_path.nil?
|
32
|
+
end
|
33
|
+
|
34
|
+
def destroy(folder_name)
|
35
|
+
fail("#{folder_name.inspect} is not a valid folder name") unless folder_name_is_valid?(folder_name)
|
36
|
+
|
37
|
+
delete_folder_and_vms(folder_name)
|
38
|
+
|
39
|
+
fail("#{folder_name.inspect} already exists") unless datacenter.vmFolder.traverse(folder_name).nil?
|
40
|
+
|
41
|
+
datacenter.vmFolder.traverse(folder_name, RbVmomi::VIM::Folder, true)
|
42
|
+
end
|
43
|
+
|
44
|
+
private
|
45
|
+
|
46
|
+
attr_reader :host, :username, :password, :datacenter_name, :logger
|
47
|
+
|
48
|
+
def delete_folder_and_vms(folder_name)
|
49
|
+
return unless (folder = datacenter.vmFolder.traverse(folder_name))
|
50
|
+
|
51
|
+
find_vms(folder).each { |vm| power_off(vm) }
|
52
|
+
|
53
|
+
logger.info("BEGIN folder.destroy_task folder=#{folder_name}")
|
54
|
+
folder.Destroy_Task.wait_for_completion
|
55
|
+
logger.info("END folder.destroy_task folder=#{folder_name}")
|
56
|
+
rescue RbVmomi::Fault => e
|
57
|
+
logger.info("ERROR folder.destroy_task folder=#{folder_name}", e)
|
58
|
+
raise
|
59
|
+
end
|
60
|
+
|
61
|
+
def find_vms(folder)
|
62
|
+
vms = folder.childEntity.grep(RbVmomi::VIM::VirtualMachine)
|
63
|
+
vms << folder.childEntity.grep(RbVmomi::VIM::Folder).map { |child| find_vms(child) }
|
64
|
+
vms.flatten
|
65
|
+
end
|
66
|
+
|
67
|
+
def power_off(vm)
|
68
|
+
2.times do
|
69
|
+
break if vm.runtime.powerState == 'poweredOff'
|
70
|
+
|
71
|
+
begin
|
72
|
+
logger.info("BEGIN vm.power_off_task vm=#{vm.name}, power_state=#{vm.runtime.powerState}")
|
73
|
+
vm.PowerOffVM_Task.wait_for_completion
|
74
|
+
logger.info("END vm.power_off_task vm=#{vm.name}")
|
75
|
+
rescue StandardError => e
|
76
|
+
logger.info("ERROR vm.power_off_task vm=#{vm.name}")
|
77
|
+
raise unless e.message.start_with?('InvalidPowerState')
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
def folder_name_is_valid?(folder_name)
|
83
|
+
/\A([\w-]{1,80}\/)*[\w-]{1,80}\/?\z/.match(folder_name)
|
84
|
+
end
|
85
|
+
|
86
|
+
def ensure_no_running_vm(ova_config)
|
87
|
+
logger.info('--- Running: Checking for existing VM')
|
88
|
+
ip = ova_config[:external_ip] || ova_config[:ip]
|
89
|
+
port = ova_config[:external_port] || 443
|
90
|
+
fail("VM exists at #{ip}") if system("nc -z -w 5 #{ip} #{port}")
|
91
|
+
end
|
92
|
+
|
93
|
+
def untar_vbox_ova(ova_path)
|
94
|
+
logger.info("--- Running: Untarring #{ova_path}")
|
95
|
+
Dir.mktmpdir.tap do |dir|
|
96
|
+
system_or_exit("cd #{dir} && tar xfv '#{ova_path}'")
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
def ovf_file_path_from_dir(dir)
|
101
|
+
Dir["#{dir}/*.ovf"].first || fail('Failed to find ovf')
|
102
|
+
end
|
103
|
+
|
104
|
+
def deploy_ovf_template(ovf_file_path, vsphere_config)
|
105
|
+
template_name = [TEMPLATE_PREFIX, Time.new.strftime('%F-%H-%M'), cluster(vsphere_config).name].join('-')
|
106
|
+
logger.info("BEGIN deploy_ovf ovf_file=#{ovf_file_path} template_name=#{template_name}")
|
107
|
+
connection.serviceContent.ovfManager.deployOVF(
|
108
|
+
uri: ovf_file_path,
|
109
|
+
vmName: template_name,
|
110
|
+
vmFolder: target_folder(vsphere_config),
|
111
|
+
host: find_deploy_host(vsphere_config),
|
112
|
+
resourcePool: resource_pool(vsphere_config),
|
113
|
+
datastore: datastore(vsphere_config),
|
114
|
+
networkMappings: create_network_mappings(ovf_file_path, vsphere_config),
|
115
|
+
propertyMappings: {},
|
116
|
+
).tap do |ovf_template|
|
117
|
+
ovf_template.add_delta_disk_layer_on_all_disks
|
118
|
+
ovf_template.MarkAsTemplate
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
def find_deploy_host(vsphere_config)
|
123
|
+
property_collector = connection.serviceContent.propertyCollector
|
124
|
+
|
125
|
+
hosts = cluster(vsphere_config).host
|
126
|
+
host_properties_by_host =
|
127
|
+
property_collector.collectMultiple(
|
128
|
+
hosts,
|
129
|
+
'datastore',
|
130
|
+
'runtime.connectionState',
|
131
|
+
'runtime.inMaintenanceMode',
|
132
|
+
'name',
|
133
|
+
)
|
134
|
+
|
135
|
+
hosts.shuffle.find do |host|
|
136
|
+
(host_properties_by_host[host]['runtime.connectionState'] == 'connected') && # connected
|
137
|
+
host_properties_by_host[host]['datastore'].member?(datastore(vsphere_config)) && # must have the destination datastore
|
138
|
+
!host_properties_by_host[host]['runtime.inMaintenanceMode'] #not be in maintenance mode
|
139
|
+
end || fail('ERROR finding host to upload OVF to')
|
140
|
+
end
|
141
|
+
|
142
|
+
def create_network_mappings(ovf_file_path, vsphere_config)
|
143
|
+
ovf = Nokogiri::XML(File.read(ovf_file_path))
|
144
|
+
ovf.remove_namespaces!
|
145
|
+
networks = ovf.xpath('//NetworkSection/Network').map { |x| x['name'] }
|
146
|
+
Hash[networks.map { |ovf_network| [ovf_network, network(vsphere_config)] }]
|
147
|
+
end
|
148
|
+
|
149
|
+
def create_vm_from_template(template, vsphere_config)
|
150
|
+
logger.info("BEGIN clone_vm_task tempalte=#{template.name}")
|
151
|
+
template.CloneVM_Task(
|
152
|
+
folder: target_folder(vsphere_config),
|
153
|
+
name: "#{template.name}-vm",
|
154
|
+
spec: {
|
155
|
+
location: {
|
156
|
+
pool: resource_pool(vsphere_config),
|
157
|
+
datastore: datastore(vsphere_config),
|
158
|
+
diskMoveType: :moveChildMostDiskBacking,
|
159
|
+
},
|
160
|
+
powerOn: false,
|
161
|
+
template: false,
|
162
|
+
config: {numCPUs: 2, memoryMB: 2048},
|
163
|
+
}
|
164
|
+
).wait_for_completion
|
165
|
+
logger.info("END clone_vm_task tempalte=#{template.name}")
|
166
|
+
end
|
167
|
+
|
168
|
+
def reconfigure_vm(vm, vm_config)
|
169
|
+
virtual_machine_config_spec = create_virtual_machine_config_spec(vm_config)
|
170
|
+
logger.info("BEGIN reconfigure_vm_task virtual_machine_cofig_spec=#{virtual_machine_config_spec.inspect}")
|
171
|
+
vm.ReconfigVM_Task(spec: virtual_machine_config_spec).wait_for_completion
|
172
|
+
logger.info("END reconfigure_vm_task virtual_machine_cofig_spec=#{virtual_machine_config_spec.inspect}")
|
173
|
+
end
|
174
|
+
|
175
|
+
def create_virtual_machine_config_spec(vm_config)
|
176
|
+
logger.info('BEGIN VmConfigSpec creation')
|
177
|
+
vm_config_spec = RbVmomi::VIM::VmConfigSpec.new
|
178
|
+
vm_config_spec.ovfEnvironmentTransport = ['com.vmware.guestInfo']
|
179
|
+
vm_config_spec.property = create_vapp_property_specs(vm_config)
|
180
|
+
logger.info("END VmConfigSpec creation: #{vm_config_spec.inspect}")
|
181
|
+
|
182
|
+
logger.info('BEGIN VirtualMachineConfigSpec creation')
|
183
|
+
virtual_machine_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec.new
|
184
|
+
virtual_machine_config_spec.vAppConfig = vm_config_spec
|
185
|
+
logger.info("END VirtualMachineConfigSpec creation #{virtual_machine_config_spec.inspect}")
|
186
|
+
virtual_machine_config_spec
|
187
|
+
end
|
188
|
+
|
189
|
+
def create_vapp_property_specs(vm_config)
|
190
|
+
ip_configuration = {
|
191
|
+
'ip0' => vm_config[:ip],
|
192
|
+
'netmask0' => vm_config[:netmask],
|
193
|
+
'gateway' => vm_config[:gateway],
|
194
|
+
'DNS' => vm_config[:dns],
|
195
|
+
'ntp_servers' => vm_config[:ntp_servers],
|
196
|
+
}
|
197
|
+
|
198
|
+
vapp_property_specs = []
|
199
|
+
|
200
|
+
logger.info("BEGIN VAppPropertySpec creation configuration=#{ip_configuration.inspect}")
|
201
|
+
# IP Configuration key order must match OVF template property order
|
202
|
+
ip_configuration.each_with_index do |(key, value), i|
|
203
|
+
vapp_property_specs << RbVmomi::VIM::VAppPropertySpec.new.tap do |spec|
|
204
|
+
spec.operation = 'edit'
|
205
|
+
spec.info = RbVmomi::VIM::VAppPropertyInfo.new.tap do |p|
|
206
|
+
p.key = i
|
207
|
+
p.label = key
|
208
|
+
p.value = value
|
209
|
+
end
|
210
|
+
end
|
211
|
+
end
|
212
|
+
|
213
|
+
vapp_property_specs << RbVmomi::VIM::VAppPropertySpec.new.tap do |spec|
|
214
|
+
spec.operation = 'edit'
|
215
|
+
spec.info = RbVmomi::VIM::VAppPropertyInfo.new.tap do |p|
|
216
|
+
p.key = ip_configuration.length
|
217
|
+
p.label = 'admin_password'
|
218
|
+
p.value = vm_config[:vm_password]
|
219
|
+
end
|
220
|
+
end
|
221
|
+
logger.info("END VAppPropertySpec creation vapp_property_specs=#{vapp_property_specs.inspect}")
|
222
|
+
vapp_property_specs
|
223
|
+
end
|
224
|
+
|
225
|
+
def power_on_vm(vm)
|
226
|
+
logger.info('BEGIN power_on_vm_task')
|
227
|
+
vm.PowerOnVM_Task.wait_for_completion
|
228
|
+
logger.info('END power_on_vm_task')
|
229
|
+
|
230
|
+
Timeout.timeout(7*60) do
|
231
|
+
until vm.guest_ip
|
232
|
+
logger.info('BEGIN polling for VM IP address')
|
233
|
+
sleep 30
|
234
|
+
end
|
235
|
+
logger.info("END polling for VM IP address #{vm.guest_ip.inspect}")
|
236
|
+
end
|
237
|
+
end
|
238
|
+
|
239
|
+
def connection
|
240
|
+
RbVmomi::VIM.connect(
|
241
|
+
host: host,
|
242
|
+
user: username,
|
243
|
+
password: password,
|
244
|
+
ssl: true,
|
245
|
+
insecure: true,
|
246
|
+
)
|
247
|
+
end
|
248
|
+
|
249
|
+
def datacenter
|
250
|
+
connection.searchIndex.FindByInventoryPath(inventoryPath: datacenter_name).tap do |dc|
|
251
|
+
fail("ERROR finding datacenter #{datacenter_name.inspect}") unless dc.is_a?(RbVmomi::VIM::Datacenter)
|
252
|
+
end
|
253
|
+
end
|
254
|
+
|
255
|
+
def target_folder(vsphere_config)
|
256
|
+
datacenter.vmFolder.traverse(vsphere_config[:folder], RbVmomi::VIM::Folder, true)
|
257
|
+
end
|
258
|
+
|
259
|
+
def cluster(vsphere_config)
|
260
|
+
datacenter.find_compute_resource(vsphere_config[:cluster]) ||
|
261
|
+
fail("ERROR finding cluster #{vsphere_config[:cluster].inspect}")
|
262
|
+
end
|
263
|
+
|
264
|
+
def network(vsphere_config)
|
265
|
+
datacenter.networkFolder.traverse(vsphere_config[:network]) ||
|
266
|
+
fail("ERROR finding network #{vsphere_config[:network].inspect}")
|
267
|
+
end
|
268
|
+
|
269
|
+
def resource_pool(vsphere_config)
|
270
|
+
find_resource_pool(cluster(vsphere_config), vsphere_config[:resource_pool]) ||
|
271
|
+
fail("ERROR finding resource_pool #{vsphere_config[:resource_pool].inspect}")
|
272
|
+
end
|
273
|
+
|
274
|
+
def datastore(vsphere_config)
|
275
|
+
datacenter.find_datastore(vsphere_config[:datastore]) ||
|
276
|
+
fail("ERROR finding datastore #{vsphere_config[:datastore].inspect}")
|
277
|
+
end
|
278
|
+
|
279
|
+
def find_resource_pool(cluster, resource_pool_name)
|
280
|
+
if resource_pool_name
|
281
|
+
cluster.resourcePool.resourcePool.find { |rp| rp.name == resource_pool_name }
|
282
|
+
else
|
283
|
+
cluster.resourcePool
|
284
|
+
end
|
285
|
+
end
|
286
|
+
|
287
|
+
def system_or_exit(command)
|
288
|
+
logger.info("BEGIN running #{command.inspect}")
|
289
|
+
system(command) || fail("ERROR running #{command.inspect}")
|
290
|
+
logger.info("END running #{command.inspect}")
|
291
|
+
end
|
292
|
+
end
|
293
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
iaas_type: openstack
|
2
|
+
vm_deployer:
|
3
|
+
creds:
|
4
|
+
auth_url: 'http://example.com/version/tokens'
|
5
|
+
username: 'username'
|
6
|
+
api_key: 'api-key'
|
7
|
+
tenant: 'tenant'
|
8
|
+
vm:
|
9
|
+
name: 'some-vm-name'
|
10
|
+
flavor_parameters:
|
11
|
+
min_disk_size: 150
|
12
|
+
network_name: 'some-network'
|
13
|
+
key_name: 'some-key'
|
14
|
+
security_group_names:
|
15
|
+
- 'security-group-A'
|
16
|
+
- 'security-group-B'
|
17
|
+
- 'security-group-C'
|
18
|
+
public_ip: 198.11.195.5
|
19
|
+
private_ip: 192.168.100.100
|
@@ -0,0 +1,20 @@
|
|
1
|
+
require 'fog/openstack/requests/image/create_image'
|
2
|
+
require 'fog/openstack/requests/image/delete_image'
|
3
|
+
require 'fog/openstack/requests/image/list_public_images_detailed'
|
4
|
+
|
5
|
+
module PatchedFog
|
6
|
+
def self.included(spec)
|
7
|
+
spec.before do
|
8
|
+
stub_const('::Fog::Image::OpenStack::Mock', PatchedFog::ImageMock)
|
9
|
+
stub_const('::Fog::Time', ::Time)
|
10
|
+
end
|
11
|
+
end
|
12
|
+
|
13
|
+
class ImageMock < Fog::Image::OpenStack::Mock
|
14
|
+
def delete_image(image_id)
|
15
|
+
# Temporarily do what Fog's mock should have done–keep state of images up to date.
|
16
|
+
self.data[:images].delete(image_id)
|
17
|
+
super(image_id)
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -1,7 +1,7 @@
|
|
1
|
-
require 'vm_shepherd/
|
1
|
+
require 'vm_shepherd/aws_manager'
|
2
2
|
|
3
3
|
module VmShepherd
|
4
|
-
RSpec.describe
|
4
|
+
RSpec.describe AwsManager do
|
5
5
|
let(:access_key) { 'access-key' }
|
6
6
|
let(:secret_key) { 'secret-key' }
|
7
7
|
let(:ami_id) { 'ami-deadbeef' }
|
@@ -21,7 +21,7 @@ module VmShepherd
|
|
21
21
|
}
|
22
22
|
end
|
23
23
|
|
24
|
-
subject(:ami_manager) {
|
24
|
+
subject(:ami_manager) { AwsManager.new(aws_options) }
|
25
25
|
|
26
26
|
before do
|
27
27
|
expect(AWS).to receive(:config).with(
|
@@ -48,7 +48,6 @@ module VmShepherd
|
|
48
48
|
key_name: 'ssh-key-name',
|
49
49
|
security_group_ids: ['security-group-id'],
|
50
50
|
subnet: aws_options.fetch(:public_subnet_id),
|
51
|
-
private_ip_address: AmiManager::OPS_MANAGER_PRIVATE_IP,
|
52
51
|
instance_type: 'm3.medium').and_return(instance)
|
53
52
|
|
54
53
|
ami_manager.deploy(ami_file_path)
|
@@ -64,9 +63,9 @@ module VmShepherd
|
|
64
63
|
|
65
64
|
it 'stops retrying after 60 times' do
|
66
65
|
expect(instances).to receive(:create).and_raise(AWS::EC2::Errors::InvalidIPAddress::InUse).
|
67
|
-
exactly(
|
66
|
+
exactly(AwsManager::RETRY_LIMIT).times
|
68
67
|
|
69
|
-
expect { ami_manager.deploy(ami_file_path) }.to raise_error(
|
68
|
+
expect { ami_manager.deploy(ami_file_path) }.to raise_error(AwsManager::RetryLimitExceeded)
|
70
69
|
end
|
71
70
|
end
|
72
71
|
|
@@ -78,7 +77,7 @@ module VmShepherd
|
|
78
77
|
|
79
78
|
it 'handles API endpoints not knowing (right away) about the instance created' do
|
80
79
|
expect(instance).to receive(:status).and_raise(AWS::EC2::Errors::InvalidInstanceID::NotFound).
|
81
|
-
exactly(
|
80
|
+
exactly(AwsManager::RETRY_LIMIT - 1).times
|
82
81
|
expect(instance).to receive(:status).and_return(:running).once
|
83
82
|
|
84
83
|
ami_manager.deploy(ami_file_path)
|
@@ -86,9 +85,9 @@ module VmShepherd
|
|
86
85
|
|
87
86
|
it 'stops retrying after 60 times' do
|
88
87
|
expect(instance).to receive(:status).and_return(:pending).
|
89
|
-
exactly(
|
88
|
+
exactly(AwsManager::RETRY_LIMIT).times
|
90
89
|
|
91
|
-
expect { ami_manager.deploy(ami_file_path) }.to raise_error(
|
90
|
+
expect { ami_manager.deploy(ami_file_path) }.to raise_error(AwsManager::RetryLimitExceeded)
|
92
91
|
end
|
93
92
|
|
94
93
|
it 'attaches the elastic IP' do
|
@@ -131,7 +130,7 @@ module VmShepherd
|
|
131
130
|
let(:instances) { [instance1, instance2, persistent_instance] }
|
132
131
|
|
133
132
|
context 'when the do not terminate tag is present' do
|
134
|
-
let(:persist_tag) { {
|
133
|
+
let(:persist_tag) { { AwsManager::DO_NOT_TERMINATE_TAG_KEY => 'any value' } }
|
135
134
|
it 'does not attempt to terminate this instance' do
|
136
135
|
expect(instance1).to receive(:terminate)
|
137
136
|
expect(instance2).to receive(:terminate)
|
@@ -0,0 +1,237 @@
|
|
1
|
+
require 'vm_shepherd/openstack_manager'
|
2
|
+
require 'support/patched_fog'
|
3
|
+
|
4
|
+
module VmShepherd
|
5
|
+
RSpec.describe OpenstackManager do
|
6
|
+
include PatchedFog
|
7
|
+
|
8
|
+
let(:openstack_options) do
|
9
|
+
{
|
10
|
+
auth_url: 'http://example.com/version/tokens',
|
11
|
+
username: 'username',
|
12
|
+
api_key: 'api-key',
|
13
|
+
tenant: 'tenant',
|
14
|
+
}
|
15
|
+
end
|
16
|
+
let(:openstack_vm_options) do
|
17
|
+
{
|
18
|
+
name: 'some-vm-name',
|
19
|
+
min_disk_size: 150,
|
20
|
+
network_name: 'Public',
|
21
|
+
key_name: 'some-key',
|
22
|
+
security_group_names: [
|
23
|
+
'security-group-A',
|
24
|
+
'security-group-B',
|
25
|
+
'security-group-C',
|
26
|
+
],
|
27
|
+
public_ip: '192.168.27.129', #magik ip to Fog::Mock
|
28
|
+
private_ip: '192.168.27.100',
|
29
|
+
}
|
30
|
+
end
|
31
|
+
|
32
|
+
subject(:openstack_vm_manager) { OpenstackManager.new(openstack_options) }
|
33
|
+
|
34
|
+
describe '#service' do
|
35
|
+
it 'creates a Fog::Compute connection' do
|
36
|
+
expect(Fog::Compute).to receive(:new).with(
|
37
|
+
{
|
38
|
+
provider: 'openstack',
|
39
|
+
openstack_auth_url: openstack_options[:auth_url],
|
40
|
+
openstack_username: openstack_options[:username],
|
41
|
+
openstack_tenant: openstack_options[:tenant],
|
42
|
+
openstack_api_key: openstack_options[:api_key],
|
43
|
+
}
|
44
|
+
)
|
45
|
+
openstack_vm_manager.service
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
describe '#image_service' do
|
50
|
+
it 'creates a Fog::Image connection' do
|
51
|
+
expect(Fog::Image).to receive(:new).with(
|
52
|
+
{
|
53
|
+
provider: 'openstack',
|
54
|
+
openstack_auth_url: openstack_options[:auth_url],
|
55
|
+
openstack_username: openstack_options[:username],
|
56
|
+
openstack_tenant: openstack_options[:tenant],
|
57
|
+
openstack_api_key: openstack_options[:api_key],
|
58
|
+
openstack_endpoint_type: 'publicURL',
|
59
|
+
}
|
60
|
+
)
|
61
|
+
openstack_vm_manager.image_service
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
describe '#network_service' do
|
66
|
+
it 'creates a Fog::Network connection' do
|
67
|
+
expect(Fog::Network).to receive(:new).with(
|
68
|
+
{
|
69
|
+
provider: 'openstack',
|
70
|
+
openstack_auth_url: openstack_options[:auth_url],
|
71
|
+
openstack_username: openstack_options[:username],
|
72
|
+
openstack_tenant: openstack_options[:tenant],
|
73
|
+
openstack_api_key: openstack_options[:api_key],
|
74
|
+
openstack_endpoint_type: 'publicURL',
|
75
|
+
}
|
76
|
+
)
|
77
|
+
openstack_vm_manager.network_service
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
describe '#deploy' do
|
82
|
+
let(:path) { 'path/to/qcow2/file' }
|
83
|
+
let(:file_size) { 42 }
|
84
|
+
|
85
|
+
let(:compute_service) { openstack_vm_manager.service }
|
86
|
+
let(:image_service) { openstack_vm_manager.image_service }
|
87
|
+
let(:network_service) { openstack_vm_manager.network_service }
|
88
|
+
|
89
|
+
let(:servers) { compute_service.servers }
|
90
|
+
let(:addresses) { compute_service.addresses }
|
91
|
+
let(:instance) { servers.find { |server| server.name == openstack_vm_options[:name] } }
|
92
|
+
|
93
|
+
before do
|
94
|
+
allow(File).to receive(:size).with(path).and_return(file_size)
|
95
|
+
allow(openstack_vm_manager).to receive(:say)
|
96
|
+
|
97
|
+
Fog.mock!
|
98
|
+
Fog::Mock.reset
|
99
|
+
Fog::Mock.delay = 0
|
100
|
+
|
101
|
+
allow(compute_service).to receive(:servers).and_return(servers)
|
102
|
+
allow(compute_service).to receive(:addresses).and_return(addresses)
|
103
|
+
end
|
104
|
+
|
105
|
+
it 'uploads the image' do
|
106
|
+
file_size = 2
|
107
|
+
expect(File).to receive(:size).with(path).and_return(file_size)
|
108
|
+
|
109
|
+
openstack_vm_manager.deploy(path, openstack_vm_options)
|
110
|
+
|
111
|
+
uploaded_image = image_service.images.find { |image| image.name == openstack_vm_options[:name] }
|
112
|
+
expect(uploaded_image).to be
|
113
|
+
expect(uploaded_image.size).to eq(file_size)
|
114
|
+
end
|
115
|
+
|
116
|
+
context 'when launching an instance' do
|
117
|
+
it 'launches an image instance' do
|
118
|
+
openstack_vm_manager.deploy(path, openstack_vm_options)
|
119
|
+
|
120
|
+
expect(instance).to be
|
121
|
+
end
|
122
|
+
|
123
|
+
it 'uses the correct flavor for the instance' do
|
124
|
+
openstack_vm_manager.deploy(path, openstack_vm_options)
|
125
|
+
|
126
|
+
instance_flavor = compute_service.flavors.find { |flavor| flavor.id == instance.flavor['id'] }
|
127
|
+
expect(instance_flavor.disk).to be >= 150
|
128
|
+
end
|
129
|
+
|
130
|
+
it 'uses the previously uploaded image' do
|
131
|
+
openstack_vm_manager.deploy(path, openstack_vm_options)
|
132
|
+
|
133
|
+
instance_image = image_service.images.get instance.image['id']
|
134
|
+
expect(instance_image.name).to eq(openstack_vm_options[:name])
|
135
|
+
end
|
136
|
+
|
137
|
+
it 'assigns the correct key_name to the instance' do
|
138
|
+
expect(servers).to receive(:create).with(
|
139
|
+
hash_including(:key_name => openstack_vm_options[:key_name])
|
140
|
+
).and_call_original
|
141
|
+
|
142
|
+
openstack_vm_manager.deploy(path, openstack_vm_options)
|
143
|
+
end
|
144
|
+
|
145
|
+
it 'assigns the correct security groups' do
|
146
|
+
expect(servers).to receive(:create).with(
|
147
|
+
hash_including(:security_groups => openstack_vm_options[:security_group_names])
|
148
|
+
).and_call_original
|
149
|
+
|
150
|
+
openstack_vm_manager.deploy(path, openstack_vm_options)
|
151
|
+
end
|
152
|
+
|
153
|
+
it 'assigns the correct private network information' do
|
154
|
+
assigned_network = network_service.networks.find { |network| network.name == openstack_vm_options[:network_name] }
|
155
|
+
expect(servers).to receive(:create).with(
|
156
|
+
hash_including(:nics => [
|
157
|
+
{ net_id: assigned_network.id, v4_fixed_ip: openstack_vm_options[:private_ip]}
|
158
|
+
]
|
159
|
+
)
|
160
|
+
).and_call_original
|
161
|
+
|
162
|
+
openstack_vm_manager.deploy(path, openstack_vm_options)
|
163
|
+
end
|
164
|
+
end
|
165
|
+
|
166
|
+
it 'waits for the server to be ready' do
|
167
|
+
openstack_vm_manager.deploy(path, openstack_vm_options)
|
168
|
+
expect(instance.state).to eq('ACTIVE')
|
169
|
+
end
|
170
|
+
|
171
|
+
it 'assigns an IP to the instance' do
|
172
|
+
openstack_vm_manager.deploy(path, openstack_vm_options)
|
173
|
+
ip = addresses.find { |address| address.ip == openstack_vm_options[:public_ip] }
|
174
|
+
|
175
|
+
expect(ip.instance_id).to eq(instance.id)
|
176
|
+
end
|
177
|
+
end
|
178
|
+
|
179
|
+
describe '#destroy' do
|
180
|
+
let(:path) { 'path/to/qcow2/file' }
|
181
|
+
let(:file_size) { 42 }
|
182
|
+
|
183
|
+
let(:compute_service) { openstack_vm_manager.service }
|
184
|
+
let(:image_service) { openstack_vm_manager.image_service }
|
185
|
+
let(:network_service) { openstack_vm_manager.network_service }
|
186
|
+
|
187
|
+
let(:servers) { compute_service.servers }
|
188
|
+
let(:addresses) { compute_service.addresses }
|
189
|
+
let(:images) { image_service.images }
|
190
|
+
let(:image) { images.find { |image| image.name == openstack_vm_options[:name] } }
|
191
|
+
let(:instance) { servers.find { |server| server.name == openstack_vm_options[:name] } }
|
192
|
+
|
193
|
+
before do
|
194
|
+
allow(File).to receive(:size).with(path).and_return(file_size)
|
195
|
+
allow(openstack_vm_manager).to receive(:say)
|
196
|
+
|
197
|
+
Fog.mock!
|
198
|
+
Fog::Mock.reset
|
199
|
+
Fog::Mock.delay = 0
|
200
|
+
|
201
|
+
allow(compute_service).to receive(:servers).and_return(servers)
|
202
|
+
allow(compute_service).to receive(:addresses).and_return(addresses)
|
203
|
+
allow(image_service).to receive(:images).and_return(images)
|
204
|
+
|
205
|
+
openstack_vm_manager.deploy(path, openstack_vm_options)
|
206
|
+
end
|
207
|
+
|
208
|
+
it 'calls destroy on the correct instance' do
|
209
|
+
destroy_correct_server = change do
|
210
|
+
servers.reload
|
211
|
+
servers.find { |server| server.name == openstack_vm_options[:name] }
|
212
|
+
end.to(nil)
|
213
|
+
|
214
|
+
expect { openstack_vm_manager.destroy(openstack_vm_options) }.to(destroy_correct_server)
|
215
|
+
end
|
216
|
+
|
217
|
+
it 'calls destroy on the correct image' do
|
218
|
+
destroy_correct_image = change do
|
219
|
+
images.reload
|
220
|
+
images.find { |image| image.name == openstack_vm_options[:name] }
|
221
|
+
end.to(nil)
|
222
|
+
|
223
|
+
expect { openstack_vm_manager.destroy(openstack_vm_options) }.to(destroy_correct_image)
|
224
|
+
end
|
225
|
+
|
226
|
+
context 'when the server does not exist' do
|
227
|
+
before do
|
228
|
+
allow(servers).to receive(:get).and_return(nil)
|
229
|
+
end
|
230
|
+
|
231
|
+
it 'returns without error' do
|
232
|
+
expect { openstack_vm_manager.destroy(openstack_vm_options) }.not_to raise_error
|
233
|
+
end
|
234
|
+
end
|
235
|
+
end
|
236
|
+
end
|
237
|
+
end
|