bosh_vsphere_cpi 1.2865.0 → 1.2881.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/bin/vsphere_cpi +0 -16
- data/bin/vsphere_cpi_console +2 -9
- data/lib/cloud/vsphere/client.rb +38 -41
- data/lib/cloud/vsphere/cloud.rb +120 -273
- data/lib/cloud/vsphere/config.rb +0 -9
- data/lib/cloud/vsphere/disk_provider.rb +91 -0
- data/lib/cloud/vsphere/file_provider.rb +3 -1
- data/lib/cloud/vsphere/fixed_cluster_placer.rb +18 -7
- data/lib/cloud/vsphere/resources/cluster.rb +40 -81
- data/lib/cloud/vsphere/resources/datacenter.rb +37 -10
- data/lib/cloud/vsphere/resources/datastore.rb +21 -10
- data/lib/cloud/vsphere/resources/disk/disk_config.rb +5 -5
- data/lib/cloud/vsphere/resources/disk/ephemeral_disk.rb +3 -3
- data/lib/cloud/vsphere/resources/disk.rb +19 -0
- data/lib/cloud/vsphere/resources/resource_pool.rb +18 -13
- data/lib/cloud/vsphere/resources/scorer.rb +40 -97
- data/lib/cloud/vsphere/resources/vm.rb +185 -0
- data/lib/cloud/vsphere/resources.rb +106 -161
- data/lib/cloud/vsphere/version.rb +1 -1
- data/lib/cloud/vsphere/vm_creator.rb +45 -46
- data/lib/cloud/vsphere/vm_creator_builder.rb +2 -1
- data/lib/cloud/vsphere/vm_provider.rb +16 -0
- data/lib/cloud/vsphere.rb +1 -1
- metadata +94 -38
- data/db/migrations/20120123235022_initial.rb +0 -24
- data/db/migrations/20121204174707_add_uuid_to_disks.rb +0 -14
- data/lib/cloud/vsphere/models/disk.rb +0 -11
- data/lib/cloud/vsphere/resources/disk/persistent_disk.rb +0 -110
data/lib/cloud/vsphere/cloud.rb
CHANGED
@@ -14,23 +14,22 @@ require 'cloud/vsphere/resources/cluster'
|
|
14
14
|
require 'cloud/vsphere/resources/datacenter'
|
15
15
|
require 'cloud/vsphere/resources/datastore'
|
16
16
|
require 'cloud/vsphere/resources/folder'
|
17
|
-
require 'cloud/vsphere/resources/
|
17
|
+
require 'cloud/vsphere/resources/vm'
|
18
18
|
require 'cloud/vsphere/resources/resource_pool'
|
19
19
|
require 'cloud/vsphere/resources/scorer'
|
20
20
|
require 'cloud/vsphere/resources/util'
|
21
|
-
require 'cloud/vsphere/models/disk'
|
22
21
|
require 'cloud/vsphere/path_finder'
|
23
22
|
require 'cloud/vsphere/vm_creator_builder'
|
23
|
+
require 'cloud/vsphere/disk_provider'
|
24
|
+
require 'cloud/vsphere/vm_provider'
|
24
25
|
require 'cloud/vsphere/fixed_cluster_placer'
|
25
26
|
|
26
27
|
module VSphereCloud
|
27
|
-
|
28
28
|
class Cloud < Bosh::Cloud
|
29
29
|
include VimSdk
|
30
30
|
include RetryBlock
|
31
31
|
|
32
|
-
class TimeoutException < StandardError;
|
33
|
-
end
|
32
|
+
class TimeoutException < StandardError; end
|
34
33
|
|
35
34
|
attr_accessor :client
|
36
35
|
|
@@ -40,8 +39,9 @@ module VSphereCloud
|
|
40
39
|
@logger = config.logger
|
41
40
|
@client = config.client
|
42
41
|
@cloud_searcher = CloudSearcher.new(@client.service_content, @logger)
|
42
|
+
@datacenter = Resources::Datacenter.new(config)
|
43
43
|
|
44
|
-
@resources = Resources.new(config)
|
44
|
+
@resources = Resources.new(@datacenter, config)
|
45
45
|
@file_provider = FileProvider.new(config.rest_client, config.vcenter_host)
|
46
46
|
@agent_env = AgentEnv.new(client, @file_provider, @cloud_searcher)
|
47
47
|
|
@@ -69,25 +69,17 @@ module VSphereCloud
|
|
69
69
|
end
|
70
70
|
|
71
71
|
def has_vm?(vm_cid)
|
72
|
-
|
72
|
+
vm_provider.find(vm_cid)
|
73
73
|
true
|
74
74
|
rescue Bosh::Clouds::VMNotFound
|
75
75
|
false
|
76
76
|
end
|
77
77
|
|
78
|
-
def has_disk?(
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
# If the path is not set it means that disk was only created in
|
84
|
-
# CPI database and attach disk was not called or failed.
|
85
|
-
# We consider that disk is missing only if CPI desired state
|
86
|
-
# is to be present but it actually missing in infrastructure.
|
87
|
-
return true unless disk.path
|
88
|
-
return false unless disk.datacenter
|
89
|
-
|
90
|
-
@client.has_disk?(disk.path, disk.datacenter)
|
78
|
+
def has_disk?(disk_cid)
|
79
|
+
disk_provider.find(disk_cid)
|
80
|
+
true
|
81
|
+
rescue Bosh::Clouds::DiskNotFound
|
82
|
+
false
|
91
83
|
end
|
92
84
|
|
93
85
|
def create_stemcell(image, _)
|
@@ -102,11 +94,12 @@ module VSphereCloud
|
|
102
94
|
raise 'Missing OVF' if ovf_file.nil?
|
103
95
|
ovf_file = File.join(temp_dir, ovf_file)
|
104
96
|
|
105
|
-
name = "sc-#{
|
97
|
+
name = "sc-#{SecureRandom.uuid}"
|
106
98
|
@logger.info("Generated name: #{name}")
|
107
99
|
|
108
100
|
stemcell_size = File.size(image) / (1024 * 1024)
|
109
|
-
cluster
|
101
|
+
cluster = @resources.pick_cluster_for_vm(0, stemcell_size, [])
|
102
|
+
datastore = @resources.pick_ephemeral_datastore(cluster, stemcell_size)
|
110
103
|
@logger.info("Deploying to: #{cluster.mob} / #{datastore.mob}")
|
111
104
|
|
112
105
|
import_spec_result = import_ovf(name, ovf_file, cluster.resource_pool.mob, datastore.mob)
|
@@ -148,19 +141,17 @@ module VSphereCloud
|
|
148
141
|
def delete_stemcell(stemcell)
|
149
142
|
with_thread_name("delete_stemcell(#{stemcell})") do
|
150
143
|
Bosh::ThreadPool.new(max_threads: 32, logger: @logger).wrap do |pool|
|
151
|
-
@
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
@logger.info("Deleted: #{template_name}")
|
163
|
-
end
|
144
|
+
@logger.info("Looking for stemcell replicas in: #{@datacenter.name}")
|
145
|
+
templates = @cloud_searcher.get_property(@datacenter.template_folder.mob, Vim::Folder, 'childEntity', ensure_all: true)
|
146
|
+
template_properties = @cloud_searcher.get_properties(templates, Vim::VirtualMachine, ['name'])
|
147
|
+
template_properties.each_value do |properties|
|
148
|
+
template_name = properties['name'].gsub('%2f', '/')
|
149
|
+
if template_name.split('/').first.strip == stemcell
|
150
|
+
@logger.info("Found: #{template_name}")
|
151
|
+
pool.process do
|
152
|
+
@logger.info("Deleting: #{template_name}")
|
153
|
+
client.delete_vm(properties[:obj])
|
154
|
+
@logger.info("Deleted: #{template_name}")
|
164
155
|
end
|
165
156
|
end
|
166
157
|
end
|
@@ -168,24 +159,8 @@ module VSphereCloud
|
|
168
159
|
end
|
169
160
|
end
|
170
161
|
|
171
|
-
def disk_spec(persistent_disks)
|
172
|
-
disks = []
|
173
|
-
if persistent_disks
|
174
|
-
persistent_disks.each do |disk_cid|
|
175
|
-
disk = Models::Disk.first(uuid: disk_cid)
|
176
|
-
disks << {
|
177
|
-
size: disk.size,
|
178
|
-
dc_name: disk.datacenter,
|
179
|
-
ds_name: disk.datastore
|
180
|
-
}
|
181
|
-
end
|
182
|
-
end
|
183
|
-
disks
|
184
|
-
end
|
185
|
-
|
186
162
|
def stemcell_vm(name)
|
187
|
-
|
188
|
-
client.find_by_inventory_path([dc.name, 'vm', dc.template_folder.path_components, name])
|
163
|
+
client.find_by_inventory_path([@datacenter.name, 'vm', @datacenter.template_folder.path_components, name])
|
189
164
|
end
|
190
165
|
|
191
166
|
def create_vm(agent_id, stemcell, cloud_properties, networks, disk_locality = nil, environment = nil)
|
@@ -198,7 +173,8 @@ module VSphereCloud
|
|
198
173
|
@logger,
|
199
174
|
self,
|
200
175
|
@agent_env,
|
201
|
-
@file_provider
|
176
|
+
@file_provider,
|
177
|
+
disk_provider
|
202
178
|
).create(agent_id, stemcell, networks, disk_locality, environment)
|
203
179
|
end
|
204
180
|
end
|
@@ -207,39 +183,10 @@ module VSphereCloud
|
|
207
183
|
with_thread_name("delete_vm(#{vm_cid})") do
|
208
184
|
@logger.info("Deleting vm: #{vm_cid}")
|
209
185
|
|
210
|
-
vm =
|
211
|
-
|
212
|
-
properties =
|
213
|
-
@cloud_searcher.get_properties(
|
214
|
-
vm,
|
215
|
-
Vim::VirtualMachine,
|
216
|
-
['runtime.powerState', 'runtime.question', 'config.hardware.device', 'name'],
|
217
|
-
ensure: ['config.hardware.device']
|
218
|
-
)
|
219
|
-
|
220
|
-
retry_block do
|
221
|
-
question = properties['runtime.question']
|
222
|
-
if question
|
223
|
-
choices = question.choice
|
224
|
-
@logger.info("VM is blocked on a question: #{question.text}, " +
|
225
|
-
"providing default answer: #{choices.choice_info[choices.default_index].label}")
|
226
|
-
client.answer_vm(vm, question.id, choices.choice_info[choices.default_index].key)
|
227
|
-
power_state = @cloud_searcher.get_property(vm, Vim::VirtualMachine, 'runtime.powerState')
|
228
|
-
else
|
229
|
-
power_state = properties['runtime.powerState']
|
230
|
-
end
|
231
|
-
|
232
|
-
if power_state != Vim::VirtualMachine::PowerState::POWERED_OFF
|
233
|
-
@logger.info("Powering off vm: #{vm_cid}")
|
234
|
-
client.power_off_vm(vm)
|
235
|
-
end
|
236
|
-
end
|
237
|
-
|
238
|
-
# Detach any persistent disks in case they were not detached from the instance
|
239
|
-
devices = properties['config.hardware.device']
|
240
|
-
persistent_disks = devices.select { |device| device.kind_of?(Vim::Vm::Device::VirtualDisk) &&
|
241
|
-
device.backing.disk_mode == Vim::Vm::Device::VirtualDiskOption::DiskMode::INDEPENDENT_PERSISTENT }
|
186
|
+
vm = vm_provider.find(vm_cid)
|
187
|
+
vm.power_off
|
242
188
|
|
189
|
+
persistent_disks = vm.persistent_disks
|
243
190
|
unless persistent_disks.empty?
|
244
191
|
@logger.info("Found #{persistent_disks.size} persistent disk(s)")
|
245
192
|
config = Vim::Vm::ConfigSpec.new
|
@@ -248,41 +195,38 @@ module VSphereCloud
|
|
248
195
|
@logger.info("Detaching: #{virtual_disk.backing.file_name}")
|
249
196
|
config.device_change << create_delete_device_spec(virtual_disk)
|
250
197
|
end
|
251
|
-
retry_block { client.reconfig_vm(vm, config) }
|
198
|
+
retry_block { client.reconfig_vm(vm.mob, config) }
|
252
199
|
@logger.info("Detached #{persistent_disks.size} persistent disk(s)")
|
253
200
|
end
|
254
201
|
|
255
202
|
# Delete env.iso and VM specific files managed by the director
|
256
|
-
retry_block
|
257
|
-
cdrom = devices.find { |device| device.kind_of?(Vim::Vm::Device::VirtualCdrom) }
|
258
|
-
@agent_env.clean_env(vm) if cdrom
|
259
|
-
end
|
203
|
+
retry_block { @agent_env.clean_env(vm.mob) } if vm.cdrom
|
260
204
|
|
261
|
-
|
205
|
+
vm.delete
|
262
206
|
@logger.info("Deleted vm: #{vm_cid}")
|
263
207
|
end
|
264
208
|
end
|
265
209
|
|
266
210
|
def reboot_vm(vm_cid)
|
267
211
|
with_thread_name("reboot_vm(#{vm_cid})") do
|
268
|
-
vm =
|
269
|
-
datacenter = client.find_parent(vm, Vim::Datacenter)
|
270
|
-
power_state = @cloud_searcher.get_property(vm, Vim::VirtualMachine, 'runtime.powerState')
|
212
|
+
vm = vm_provider.find(vm_cid)
|
271
213
|
|
272
214
|
@logger.info("Reboot vm = #{vm_cid}")
|
273
|
-
|
274
|
-
|
215
|
+
|
216
|
+
unless vm.powered_on?
|
217
|
+
@logger.info("VM not in POWERED_ON state. Current state : #{vm.power_state}")
|
275
218
|
end
|
219
|
+
|
276
220
|
begin
|
277
|
-
vm.
|
221
|
+
vm.reboot
|
278
222
|
rescue => e
|
279
223
|
@logger.error("Soft reboot failed #{e} -#{e.backtrace.join("\n")}")
|
280
224
|
@logger.info('Try hard reboot')
|
225
|
+
|
281
226
|
# if we fail to perform a soft-reboot we force a hard-reboot
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
retry_block { client.power_on_vm(datacenter, vm) }
|
227
|
+
retry_block { vm.power_off } if vm.powered_on?
|
228
|
+
|
229
|
+
retry_block { vm.power_on }
|
286
230
|
end
|
287
231
|
end
|
288
232
|
end
|
@@ -303,11 +247,11 @@ module VSphereCloud
|
|
303
247
|
name_to_key_id[name] = field.key
|
304
248
|
end
|
305
249
|
|
306
|
-
vm =
|
250
|
+
vm = vm_provider.find(vm_cid)
|
307
251
|
|
308
252
|
metadata.each do |name, value|
|
309
253
|
value = '' if value.nil? # value is required
|
310
|
-
fields_manager.set_field(vm, name_to_key_id[name], value)
|
254
|
+
fields_manager.set_field(vm.mob, name_to_key_id[name], value)
|
311
255
|
end
|
312
256
|
rescue SoapError => e
|
313
257
|
if e.fault.kind_of?(Vim::Fault::NoPermission)
|
@@ -322,33 +266,13 @@ module VSphereCloud
|
|
322
266
|
|
323
267
|
def configure_networks(vm_cid, networks)
|
324
268
|
with_thread_name("configure_networks(#{vm_cid}, ...)") do
|
325
|
-
vm =
|
326
|
-
|
327
|
-
@logger.debug('Waiting for the VM to shutdown')
|
328
|
-
begin
|
329
|
-
begin
|
330
|
-
vm.shutdown_guest
|
331
|
-
rescue => e
|
332
|
-
@logger.debug("Ignoring possible race condition when a VM has powered off by the time we ask it to shutdown: #{e.inspect}")
|
333
|
-
end
|
334
|
-
|
335
|
-
wait_until_off(vm, 60)
|
336
|
-
rescue TimeoutException
|
337
|
-
@logger.debug('The guest did not shutdown in time, requesting it to power off')
|
338
|
-
client.power_off_vm(vm)
|
339
|
-
end
|
269
|
+
vm = vm_provider.find(vm_cid)
|
270
|
+
vm.shutdown
|
340
271
|
|
341
272
|
@logger.info("Configuring: #{vm_cid} to use the following network settings: #{networks.pretty_inspect}")
|
342
|
-
vm = get_vm_by_cid(vm_cid)
|
343
|
-
devices = @cloud_searcher.get_property(vm, Vim::VirtualMachine, 'config.hardware.device', ensure_all: true)
|
344
|
-
datacenter = client.find_parent(vm, Vim::Datacenter)
|
345
|
-
datacenter_name = config.datacenter_name
|
346
|
-
pci_controller = devices.find { |device| device.kind_of?(Vim::Vm::Device::VirtualPCIController) }
|
347
|
-
|
348
273
|
config = Vim::Vm::ConfigSpec.new
|
349
274
|
config.device_change = []
|
350
|
-
nics
|
351
|
-
nics.each do |nic|
|
275
|
+
vm.nics.each do |nic|
|
352
276
|
nic_config = create_delete_device_spec(nic)
|
353
277
|
config.device_change << nic_config
|
354
278
|
end
|
@@ -356,80 +280,54 @@ module VSphereCloud
|
|
356
280
|
dvs_index = {}
|
357
281
|
networks.each_value do |network|
|
358
282
|
v_network_name = network['cloud_properties']['name']
|
359
|
-
network_mob = client.find_by_inventory_path([
|
360
|
-
nic_config = create_nic_config_spec(v_network_name, network_mob, pci_controller.key, dvs_index)
|
283
|
+
network_mob = client.find_by_inventory_path([@datacenter.name, 'network', v_network_name])
|
284
|
+
nic_config = create_nic_config_spec(v_network_name, network_mob, vm.pci_controller.key, dvs_index)
|
361
285
|
config.device_change << nic_config
|
362
286
|
end
|
363
287
|
|
364
|
-
fix_device_unit_numbers(
|
288
|
+
vm.fix_device_unit_numbers(config.device_change)
|
365
289
|
@logger.debug('Reconfiguring the networks')
|
366
|
-
@client.reconfig_vm(vm, config)
|
290
|
+
@client.reconfig_vm(vm.mob, config)
|
367
291
|
|
368
|
-
env = @agent_env.get_current_env(vm,
|
292
|
+
env = @agent_env.get_current_env(vm.mob, @datacenter.name)
|
369
293
|
@logger.debug("Reading current agent env: #{env.pretty_inspect}")
|
370
294
|
|
371
|
-
devices = @cloud_searcher.get_property(vm, Vim::VirtualMachine, 'config.hardware.device', ensure_all: true)
|
295
|
+
devices = @cloud_searcher.get_property(vm.mob, Vim::VirtualMachine, 'config.hardware.device', ensure_all: true)
|
372
296
|
env['networks'] = generate_network_env(devices, networks, dvs_index)
|
373
297
|
|
374
298
|
@logger.debug("Updating agent env to: #{env.pretty_inspect}")
|
375
|
-
location = get_vm_location(vm, datacenter:
|
376
|
-
@agent_env.set_env(vm, location, env)
|
299
|
+
location = get_vm_location(vm.mob, datacenter: @datacenter.name)
|
300
|
+
@agent_env.set_env(vm.mob, location, env)
|
377
301
|
|
378
302
|
@logger.debug('Powering the VM back on')
|
379
|
-
|
380
|
-
end
|
381
|
-
end
|
382
|
-
|
383
|
-
def get_vm_host_info(vm_ref)
|
384
|
-
vm_properties = @cloud_searcher.get_properties(vm_ref, Vim::VirtualMachine, 'runtime')
|
385
|
-
vm_runtime = vm_properties['runtime']
|
386
|
-
|
387
|
-
properties = @cloud_searcher.get_properties(vm_runtime.host, Vim::HostSystem, ['datastore', 'parent'], ensure_all: true)
|
388
|
-
|
389
|
-
# Get the cluster that the vm's host belongs to.
|
390
|
-
cluster = @cloud_searcher.get_properties(properties['parent'], Vim::ClusterComputeResource, 'name')
|
391
|
-
|
392
|
-
# Get the datastores that are accessible to the vm's host.
|
393
|
-
datastores_accessible = []
|
394
|
-
properties['datastore'].each do |store|
|
395
|
-
ds = @cloud_searcher.get_properties(store, Vim::Datastore, 'info', ensure_all: true)
|
396
|
-
datastores_accessible << ds['info'].name
|
303
|
+
vm.power_on
|
397
304
|
end
|
398
|
-
|
399
|
-
{ 'cluster' => cluster['name'], 'datastores' => datastores_accessible }
|
400
305
|
end
|
401
306
|
|
402
307
|
def attach_disk(vm_cid, disk_cid)
|
403
308
|
with_thread_name("attach_disk(#{vm_cid}, #{disk_cid})") do
|
404
309
|
@logger.info("Attaching disk: #{disk_cid} on vm: #{vm_cid}")
|
405
310
|
|
406
|
-
vm =
|
407
|
-
|
408
|
-
datacenter_name = config.datacenter_name
|
409
|
-
|
410
|
-
vm_properties = @cloud_searcher.get_properties(vm, Vim::VirtualMachine, 'config.hardware.device', ensure_all: true)
|
411
|
-
host_info = get_vm_host_info(vm)
|
412
|
-
|
413
|
-
devices = vm_properties['config.hardware.device']
|
414
|
-
system_disk = devices.find { |device| device.kind_of?(Vim::Vm::Device::VirtualDisk) }
|
311
|
+
vm = vm_provider.find(vm_cid)
|
415
312
|
|
416
|
-
|
417
|
-
|
313
|
+
cluster = @datacenter.clusters[vm.cluster]
|
314
|
+
disk = disk_provider.find_and_move(disk_cid, cluster, @datacenter.name, vm.accessible_datastores)
|
315
|
+
disk_config_spec = disk.attach_spec(vm.system_disk.controller_key)
|
418
316
|
|
419
317
|
vm_config = Vim::Vm::ConfigSpec.new
|
420
318
|
vm_config.device_change = []
|
421
319
|
vm_config.device_change << disk_config_spec
|
422
|
-
fix_device_unit_numbers(
|
320
|
+
vm.fix_device_unit_numbers(vm_config.device_change)
|
423
321
|
|
424
|
-
env = @agent_env.get_current_env(vm,
|
322
|
+
env = @agent_env.get_current_env(vm.mob, @datacenter.name)
|
425
323
|
@logger.info("Reading current agent env: #{env.pretty_inspect}")
|
426
324
|
env['disks']['persistent'][disk_cid] = disk_config_spec.device.unit_number.to_s
|
427
325
|
@logger.info("Updating agent env to: #{env.pretty_inspect}")
|
428
326
|
|
429
|
-
location = get_vm_location(vm, datacenter:
|
430
|
-
@agent_env.set_env(vm, location, env)
|
327
|
+
location = get_vm_location(vm.mob, datacenter: @datacenter.name)
|
328
|
+
@agent_env.set_env(vm.mob, location, env)
|
431
329
|
@logger.info('Attaching disk')
|
432
|
-
client.reconfig_vm(vm, vm_config)
|
330
|
+
client.reconfig_vm(vm.mob, vm_config)
|
433
331
|
@logger.info('Finished attaching disk')
|
434
332
|
end
|
435
333
|
end
|
@@ -437,97 +335,64 @@ module VSphereCloud
|
|
437
335
|
def detach_disk(vm_cid, disk_cid)
|
438
336
|
with_thread_name("detach_disk(#{vm_cid}, #{disk_cid})") do
|
439
337
|
@logger.info("Detaching disk: #{disk_cid} from vm: #{vm_cid}")
|
440
|
-
disk =
|
441
|
-
|
442
|
-
|
443
|
-
vm = get_vm_by_cid(vm_cid)
|
338
|
+
disk = disk_provider.find(disk_cid)
|
339
|
+
vm = vm_provider.find(vm_cid)
|
340
|
+
vm_mob = vm.mob
|
444
341
|
|
445
|
-
location = get_vm_location(
|
446
|
-
env = @agent_env.get_current_env(
|
342
|
+
location = get_vm_location(vm_mob)
|
343
|
+
env = @agent_env.get_current_env(vm_mob, location[:datacenter])
|
447
344
|
@logger.info("Reading current agent env: #{env.pretty_inspect}")
|
448
|
-
if env['disks']['persistent'][disk.
|
449
|
-
env['disks']['persistent'].delete(disk.
|
345
|
+
if env['disks']['persistent'][disk.cid]
|
346
|
+
env['disks']['persistent'].delete(disk.cid)
|
450
347
|
@logger.info("Updating agent env to: #{env.pretty_inspect}")
|
451
348
|
|
452
|
-
@agent_env.set_env(
|
349
|
+
@agent_env.set_env(vm_mob, location, env)
|
453
350
|
end
|
454
351
|
|
455
|
-
|
456
|
-
virtual_disk =
|
457
|
-
|
458
|
-
device.kind_of?(Vim::Vm::Device::VirtualDisk) && device.backing.file_name.end_with?("/#{disk_cid}.vmdk")
|
459
|
-
end
|
460
|
-
raise Bosh::Clouds::DiskNotAttached.new(true), "Disk (#{disk_cid}) is not attached to VM (#{vm_cid})" if virtual_disk.nil?
|
352
|
+
vm.reload
|
353
|
+
virtual_disk = vm.disk_by_cid(disk.cid)
|
354
|
+
raise Bosh::Clouds::DiskNotAttached.new(true), "Disk (#{disk.cid}) is not attached to VM (#{vm.cid})" if virtual_disk.nil?
|
461
355
|
|
462
356
|
config = Vim::Vm::ConfigSpec.new
|
463
357
|
config.device_change = []
|
464
358
|
config.device_change << create_delete_device_spec(virtual_disk)
|
465
359
|
|
466
360
|
@logger.info('Detaching disk')
|
467
|
-
client.reconfig_vm(
|
361
|
+
client.reconfig_vm(vm_mob, config)
|
468
362
|
|
469
363
|
# detach-disk is async and task completion does not necessarily mean
|
470
364
|
# that changes have been applied to VC side. Query VC until we confirm
|
471
365
|
# that the change has been applied. This is a known issue for vsphere 4.
|
472
366
|
# Fixed in vsphere 5.
|
473
367
|
5.times do
|
474
|
-
|
475
|
-
virtual_disk =
|
476
|
-
devices.find do |device|
|
477
|
-
device.kind_of?(Vim::Vm::Device::VirtualDisk) &&
|
478
|
-
device.backing.file_name.end_with?("/#{disk_cid}.vmdk")
|
479
|
-
end
|
368
|
+
vm.reload
|
369
|
+
virtual_disk = vm.disk_by_cid(disk.cid)
|
480
370
|
break if virtual_disk.nil?
|
481
371
|
sleep(1.0)
|
482
372
|
end
|
483
|
-
raise "Failed to detach disk: #{
|
373
|
+
raise "Failed to detach disk: #{disk.cid} from vm: #{vm.cid}" unless virtual_disk.nil?
|
484
374
|
|
485
375
|
@logger.info('Finished detaching disk')
|
486
376
|
end
|
487
377
|
end
|
488
378
|
|
489
|
-
def create_disk(
|
490
|
-
with_thread_name("create_disk(#{
|
491
|
-
@logger.info("Creating disk with size: #{
|
492
|
-
disk =
|
493
|
-
disk.uuid = "disk-#{generate_unique_name}"
|
494
|
-
disk.size = size
|
495
|
-
disk.save
|
379
|
+
def create_disk(size_in_mb, cloud_properties, _ = nil)
|
380
|
+
with_thread_name("create_disk(#{size_in_mb}, _)") do
|
381
|
+
@logger.info("Creating disk with size: #{size_in_mb}")
|
382
|
+
disk = disk_provider.create(size_in_mb)
|
496
383
|
@logger.info("Created disk: #{disk.inspect}")
|
497
|
-
disk.
|
384
|
+
disk.cid
|
498
385
|
end
|
499
386
|
end
|
500
387
|
|
501
388
|
def delete_disk(disk_cid)
|
502
389
|
with_thread_name("delete_disk(#{disk_cid})") do
|
503
390
|
@logger.info("Deleting disk: #{disk_cid}")
|
504
|
-
disk =
|
505
|
-
|
506
|
-
unless has_disk?(disk_cid)
|
507
|
-
raise Bosh::Clouds::DiskNotFound.new(true), "disk #{disk_cid} not found"
|
508
|
-
end
|
509
|
-
|
510
|
-
datacenter = client.find_by_inventory_path(disk.datacenter)
|
511
|
-
if datacenter.nil?
|
512
|
-
raise Bosh::Clouds::DiskNotFound.new(true), "datacenter for disk #{disk_cid} not found"
|
513
|
-
end
|
514
|
-
|
515
|
-
client.delete_disk(datacenter, disk.path) if disk.path
|
391
|
+
disk = disk_provider.find(disk_cid)
|
392
|
+
client.delete_disk(@datacenter.mob, disk.path)
|
516
393
|
|
517
|
-
|
518
|
-
@logger.info('Finished deleting disk')
|
519
|
-
else
|
520
|
-
raise "Could not find disk: #{disk_cid}"
|
521
|
-
end
|
522
|
-
end
|
523
|
-
end
|
524
|
-
|
525
|
-
def get_vm_by_cid(vm_cid)
|
526
|
-
@resources.datacenters.each_value do |datacenter|
|
527
|
-
vm = client.find_by_inventory_path([datacenter.name, 'vm', datacenter.vm_folder.path_components, vm_cid])
|
528
|
-
return vm unless vm.nil?
|
394
|
+
@logger.info('Finished deleting disk')
|
529
395
|
end
|
530
|
-
raise Bosh::Clouds::VMNotFound, "VM `#{vm_cid}' not found"
|
531
396
|
end
|
532
397
|
|
533
398
|
def replicate_stemcell(cluster, datastore, stemcell)
|
@@ -695,14 +560,6 @@ module VSphereCloud
|
|
695
560
|
vm.clone(folder, name, clone_spec)
|
696
561
|
end
|
697
562
|
|
698
|
-
def generate_unique_name
|
699
|
-
SecureRandom.uuid
|
700
|
-
end
|
701
|
-
|
702
|
-
def create_disk_config_spec(datastore, file_name, controller_key, space, options = {})
|
703
|
-
|
704
|
-
end
|
705
|
-
|
706
563
|
def create_nic_config_spec(v_network_name, network, controller_key, dvs_index)
|
707
564
|
raise "Can't find network: #{v_network_name}" if network.nil?
|
708
565
|
if network.class == Vim::Dvs::DistributedVirtualPortgroup
|
@@ -749,25 +606,6 @@ module VSphereCloud
|
|
749
606
|
device_config_spec
|
750
607
|
end
|
751
608
|
|
752
|
-
def fix_device_unit_numbers(devices, device_changes)
|
753
|
-
controllers_available_unit_numbers = Hash.new { |h,k| h[k] = (0..15).to_a }
|
754
|
-
devices.each do |device|
|
755
|
-
if device.controller_key
|
756
|
-
available_unit_numbers = controllers_available_unit_numbers[device.controller_key]
|
757
|
-
available_unit_numbers.delete(device.unit_number)
|
758
|
-
end
|
759
|
-
end
|
760
|
-
|
761
|
-
device_changes.each do |device_change|
|
762
|
-
device = device_change.device
|
763
|
-
if device.controller_key && device.unit_number.nil?
|
764
|
-
available_unit_numbers = controllers_available_unit_numbers[device.controller_key]
|
765
|
-
raise "No available unit numbers for device: #{device.inspect}" if available_unit_numbers.empty?
|
766
|
-
device.unit_number = available_unit_numbers.shift
|
767
|
-
end
|
768
|
-
end
|
769
|
-
end
|
770
|
-
|
771
609
|
def import_ovf(name, ovf, resource_pool, datastore)
|
772
610
|
import_spec_params = Vim::OvfManager::CreateImportSpecParams.new
|
773
611
|
import_spec_params.entity_name = name
|
@@ -834,34 +672,44 @@ module VSphereCloud
|
|
834
672
|
info.entity
|
835
673
|
end
|
836
674
|
|
837
|
-
|
838
|
-
started = Time.now
|
839
|
-
loop do
|
840
|
-
power_state = @cloud_searcher.get_property(vm, Vim::VirtualMachine, 'runtime.powerState')
|
841
|
-
break if power_state == Vim::VirtualMachine::PowerState::POWERED_OFF
|
842
|
-
raise TimeoutException if Time.now - started > timeout
|
843
|
-
sleep(1.0)
|
844
|
-
end
|
845
|
-
end
|
846
|
-
|
675
|
+
# This method is used by micro bosh deployment cleaner
|
847
676
|
def get_vms
|
848
677
|
subfolders = []
|
849
678
|
with_thread_name("get_vms") do
|
850
|
-
@
|
851
|
-
|
852
|
-
|
853
|
-
|
854
|
-
|
855
|
-
|
679
|
+
@logger.info("Looking for VMs in: #{@datacenter.name} - #{@datacenter.master_vm_folder.path}")
|
680
|
+
subfolders += @datacenter.master_vm_folder.mob.child_entity
|
681
|
+
@logger.info("Looking for Stemcells in: #{@datacenter.name} - #{@datacenter.master_template_folder.path}")
|
682
|
+
subfolders += @datacenter.master_template_folder.mob.child_entity
|
683
|
+
end
|
684
|
+
mobs = subfolders.map { |folder| folder.child_entity }.flatten
|
685
|
+
mobs.map do |mob|
|
686
|
+
VSphereCloud::Resources::VM.new(mob.name, mob, @client, @logger)
|
856
687
|
end
|
857
|
-
|
858
|
-
subfolders.map { |folder| folder.child_entity }.flatten
|
859
688
|
end
|
860
689
|
|
861
690
|
def ping
|
862
691
|
"pong"
|
863
692
|
end
|
864
693
|
|
694
|
+
def vm_provider
|
695
|
+
VMProvider.new(
|
696
|
+
@datacenter,
|
697
|
+
@client,
|
698
|
+
@logger
|
699
|
+
)
|
700
|
+
end
|
701
|
+
|
702
|
+
def disk_provider
|
703
|
+
DiskProvider.new(
|
704
|
+
@client.service_content.virtual_disk_manager,
|
705
|
+
@datacenter,
|
706
|
+
@resources,
|
707
|
+
@config.datacenter_disk_path,
|
708
|
+
@client,
|
709
|
+
@logger
|
710
|
+
)
|
711
|
+
end
|
712
|
+
|
865
713
|
private
|
866
714
|
|
867
715
|
def choose_placer(cloud_properties)
|
@@ -880,8 +728,7 @@ module VSphereCloud
|
|
880
728
|
end
|
881
729
|
|
882
730
|
def find_cluster(cluster_name)
|
883
|
-
datacenter
|
884
|
-
datacenter.clusters[cluster_name]
|
731
|
+
@datacenter.clusters[cluster_name]
|
885
732
|
end
|
886
733
|
|
887
734
|
attr_reader :config
|
data/lib/cloud/vsphere/config.rb
CHANGED
@@ -74,10 +74,6 @@ module VSphereCloud
|
|
74
74
|
config.fetch('mem_overcommit_ratio', @default_overcommit_ratio)
|
75
75
|
end
|
76
76
|
|
77
|
-
def copy_disks
|
78
|
-
!!config['copy_disks']
|
79
|
-
end
|
80
|
-
|
81
77
|
def agent
|
82
78
|
config['agent']
|
83
79
|
end
|
@@ -122,10 +118,6 @@ module VSphereCloud
|
|
122
118
|
@cluster_objs ||= cluster_objs
|
123
119
|
end
|
124
120
|
|
125
|
-
def datacenter_allow_mixed_datastores
|
126
|
-
!!vcenter_datacenter['allow_mixed_datastores']
|
127
|
-
end
|
128
|
-
|
129
121
|
def datacenter_use_sub_folder
|
130
122
|
datacenter_clusters.any? { |_, cluster| cluster.resource_pool } ||
|
131
123
|
!!vcenter_datacenter['use_sub_folder']
|
@@ -155,7 +147,6 @@ module VSphereCloud
|
|
155
147
|
optional('cpi_log') => enum(String, Object),
|
156
148
|
optional('soap_log') => enum(String, Object),
|
157
149
|
optional('mem_overcommit_ratio') => Numeric,
|
158
|
-
optional('copy_disks') => bool,
|
159
150
|
'vcenters' => [{
|
160
151
|
'host' => String,
|
161
152
|
'user' => String,
|