opennebula 6.10.3 → 6.99.85.pre
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/cloud/CloudClient.rb +3 -3
- data/lib/models/role.rb +349 -823
- data/lib/models/service.rb +156 -80
- data/lib/models/vmrole.rb +703 -0
- data/lib/models/vrrole.rb +284 -0
- data/lib/models.rb +3 -1
- data/lib/opennebula/acl.rb +1 -1
- data/lib/opennebula/acl_pool.rb +1 -1
- data/lib/opennebula/backupjob.rb +1 -1
- data/lib/opennebula/backupjob_pool.rb +1 -1
- data/lib/opennebula/client.rb +1 -1
- data/lib/opennebula/cluster.rb +45 -2
- data/lib/opennebula/cluster_pool.rb +1 -1
- data/lib/opennebula/datastore.rb +1 -1
- data/lib/opennebula/datastore_pool.rb +1 -1
- data/lib/opennebula/document.rb +1 -1
- data/lib/opennebula/document_json.rb +1 -1
- data/lib/opennebula/document_pool.rb +1 -1
- data/lib/opennebula/document_pool_json.rb +1 -1
- data/lib/opennebula/error.rb +1 -1
- data/lib/opennebula/flow/grammar.rb +1 -1
- data/lib/opennebula/flow/service_pool.rb +1 -1
- data/lib/opennebula/flow/service_template.rb +353 -97
- data/lib/opennebula/flow/service_template_ext.rb +3 -3
- data/lib/opennebula/flow/service_template_pool.rb +1 -1
- data/lib/opennebula/flow/validator.rb +458 -410
- data/lib/opennebula/flow.rb +1 -1
- data/lib/opennebula/group.rb +1 -1
- data/lib/opennebula/group_pool.rb +1 -1
- data/lib/opennebula/hook.rb +1 -1
- data/lib/opennebula/hook_log.rb +1 -1
- data/lib/opennebula/hook_pool.rb +1 -1
- data/lib/opennebula/host.rb +1 -60
- data/lib/opennebula/host_pool.rb +1 -1
- data/lib/opennebula/image.rb +1 -1
- data/lib/opennebula/image_pool.rb +1 -1
- data/lib/opennebula/ldap_auth.rb +1 -1
- data/lib/opennebula/ldap_auth_spec.rb +1 -1
- data/lib/opennebula/lockable_ext.rb +1 -1
- data/lib/opennebula/marketplace.rb +1 -1
- data/lib/opennebula/marketplace_pool.rb +1 -1
- data/lib/opennebula/marketplaceapp.rb +1 -1
- data/lib/opennebula/marketplaceapp_ext.rb +14 -211
- data/lib/opennebula/marketplaceapp_pool.rb +1 -1
- data/lib/opennebula/oneflow_client.rb +11 -9
- data/lib/opennebula/pool.rb +1 -1
- data/lib/opennebula/pool_element.rb +1 -1
- data/lib/opennebula/security_group.rb +1 -1
- data/lib/opennebula/security_group_pool.rb +1 -1
- data/lib/opennebula/server_cipher_auth.rb +1 -1
- data/lib/opennebula/server_x509_auth.rb +1 -1
- data/lib/opennebula/ssh_auth.rb +1 -1
- data/lib/opennebula/system.rb +1 -1
- data/lib/opennebula/template.rb +1 -1
- data/lib/opennebula/template_ext.rb +1 -1
- data/lib/opennebula/template_pool.rb +1 -1
- data/lib/opennebula/user.rb +1 -1
- data/lib/opennebula/user_pool.rb +1 -1
- data/lib/opennebula/utils.rb +2 -2
- data/lib/opennebula/vdc.rb +1 -1
- data/lib/opennebula/vdc_pool.rb +1 -1
- data/lib/opennebula/virtual_machine.rb +3 -12
- data/lib/opennebula/virtual_machine_ext.rb +2 -31
- data/lib/opennebula/virtual_machine_pool.rb +1 -1
- data/lib/opennebula/virtual_network.rb +1 -1
- data/lib/opennebula/virtual_network_pool.rb +1 -1
- data/lib/opennebula/virtual_router.rb +1 -1
- data/lib/opennebula/virtual_router_pool.rb +1 -1
- data/lib/opennebula/vm_group.rb +1 -1
- data/lib/opennebula/vm_group_pool.rb +1 -1
- data/lib/opennebula/vntemplate.rb +1 -1
- data/lib/opennebula/vntemplate_pool.rb +1 -1
- data/lib/opennebula/wait_ext.rb +1 -1
- data/lib/opennebula/x509_auth.rb +1 -1
- data/lib/opennebula/xml_element.rb +2 -2
- data/lib/opennebula/xml_pool.rb +1 -1
- data/lib/opennebula/xml_utils.rb +1 -1
- data/lib/opennebula/zone.rb +1 -1
- data/lib/opennebula/zone_pool.rb +1 -1
- data/lib/opennebula.rb +2 -2
- metadata +6 -67
- data/lib/ActionManager.rb +0 -280
- data/lib/CommandManager.rb +0 -328
- data/lib/DriverExecHelper.rb +0 -213
- data/lib/HostSyncManager.rb +0 -111
- data/lib/OpenNebulaDriver.rb +0 -223
- data/lib/VirtualMachineDriver.rb +0 -404
- data/lib/datacenter.rb +0 -1319
- data/lib/datastore.rb +0 -1049
- data/lib/distributed_firewall.rb +0 -293
- data/lib/file_helper.rb +0 -374
- data/lib/host.rb +0 -1518
- data/lib/logical_port.rb +0 -50
- data/lib/logical_switch.rb +0 -77
- data/lib/memoize.rb +0 -74
- data/lib/network.rb +0 -705
- data/lib/nsx_client.rb +0 -157
- data/lib/nsx_component.rb +0 -28
- data/lib/nsx_constants.rb +0 -162
- data/lib/nsx_driver.rb +0 -91
- data/lib/nsx_error.rb +0 -77
- data/lib/nsx_rule.rb +0 -206
- data/lib/nsxt_client.rb +0 -189
- data/lib/nsxt_dfw.rb +0 -196
- data/lib/nsxt_logical_port.rb +0 -94
- data/lib/nsxt_rule.rb +0 -188
- data/lib/nsxt_tz.rb +0 -38
- data/lib/nsxv_client.rb +0 -189
- data/lib/nsxv_dfw.rb +0 -202
- data/lib/nsxv_logical_port.rb +0 -107
- data/lib/nsxv_rule.rb +0 -172
- data/lib/nsxv_tz.rb +0 -41
- data/lib/opaque_network.rb +0 -134
- data/lib/rest_client.rb +0 -191
- data/lib/scripts_common.rb +0 -176
- data/lib/transport_zone.rb +0 -43
- data/lib/vcenter_driver.rb +0 -152
- data/lib/vcenter_importer.rb +0 -626
- data/lib/vi_client.rb +0 -273
- data/lib/vi_helper.rb +0 -328
- data/lib/virtual_machine.rb +0 -3574
- data/lib/virtual_wire.rb +0 -158
- data/lib/vm_device.rb +0 -80
- data/lib/vm_disk.rb +0 -202
- data/lib/vm_folder.rb +0 -69
- data/lib/vm_helper.rb +0 -30
- data/lib/vm_monitor.rb +0 -305
- data/lib/vm_nic.rb +0 -70
- data/lib/vm_template.rb +0 -2112
- data/lib/vmm_importer.rb +0 -165
data/lib/virtual_machine.rb
DELETED
@@ -1,3574 +0,0 @@
|
|
1
|
-
# -------------------------------------------------------------------------- #
|
2
|
-
# Copyright 2002-2024, OpenNebula Project, OpenNebula Systems #
|
3
|
-
# #
|
4
|
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
5
|
-
# not use this file except in compliance with the License. You may obtain #
|
6
|
-
# a copy of the License at #
|
7
|
-
# #
|
8
|
-
# http://www.apache.org/licenses/LICENSE-2.0 #
|
9
|
-
# #
|
10
|
-
# Unless required by applicable law or agreed to in writing, software #
|
11
|
-
# distributed under the License is distributed on an "AS IS" BASIS, #
|
12
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
13
|
-
# See the License for the specific language governing permissions and #
|
14
|
-
# limitations under the License. #
|
15
|
-
#--------------------------------------------------------------------------- #
|
16
|
-
module VCenterDriver
|
17
|
-
|
18
|
-
ONE_LOCATION = ENV['ONE_LOCATION'] unless defined?(ONE_LOCATION)
|
19
|
-
|
20
|
-
if !ONE_LOCATION
|
21
|
-
unless defined?(RUBY_LIB_LOCATION)
|
22
|
-
RUBY_LIB_LOCATION = '/usr/lib/one/ruby'
|
23
|
-
end
|
24
|
-
unless defined?(GEMS_LOCATION)
|
25
|
-
GEMS_LOCATION = '/usr/share/one/gems'
|
26
|
-
end
|
27
|
-
else
|
28
|
-
unless defined?(RUBY_LIB_LOCATION)
|
29
|
-
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
|
30
|
-
end
|
31
|
-
unless defined?(GEMS_LOCATION)
|
32
|
-
GEMS_LOCATION = ONE_LOCATION + '/share/gems'
|
33
|
-
end
|
34
|
-
end
|
35
|
-
|
36
|
-
# rubocop: disable all
|
37
|
-
# %%RUBYGEMS_SETUP_BEGIN%%
|
38
|
-
if File.directory?(GEMS_LOCATION)
|
39
|
-
real_gems_path = File.realpath(GEMS_LOCATION)
|
40
|
-
if !defined?(Gem) || Gem.path != [real_gems_path]
|
41
|
-
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
|
42
|
-
|
43
|
-
# Suppress warnings from Rubygems
|
44
|
-
# https://github.com/OpenNebula/one/issues/5379
|
45
|
-
begin
|
46
|
-
verb = $VERBOSE
|
47
|
-
$VERBOSE = nil
|
48
|
-
require 'rubygems'
|
49
|
-
Gem.use_paths(real_gems_path)
|
50
|
-
ensure
|
51
|
-
$VERBOSE = verb
|
52
|
-
end
|
53
|
-
end
|
54
|
-
end
|
55
|
-
# %%RUBYGEMS_SETUP_END%%
|
56
|
-
# rubocop: enable all
|
57
|
-
|
58
|
-
$LOAD_PATH << RUBY_LIB_LOCATION
|
59
|
-
|
60
|
-
require 'vm_device'
|
61
|
-
require 'vm_helper'
|
62
|
-
require 'vm_monitor'
|
63
|
-
|
64
|
-
############################################################################
|
65
|
-
# Class VirtualMachine
|
66
|
-
############################################################################
|
67
|
-
class VirtualMachine < VCenterDriver::Template
|
68
|
-
|
69
|
-
# Supported access to VirtualMachineDevice classes:
|
70
|
-
# Example:
|
71
|
-
# Disk
|
72
|
-
# VirtualMachineDevice::Disk
|
73
|
-
# VCenterDriver::VirtualMachine::Disk
|
74
|
-
include VirtualMachineDevice
|
75
|
-
include VirtualMachineHelper
|
76
|
-
include VirtualMachineMonitor
|
77
|
-
|
78
|
-
########################################################################
|
79
|
-
# Virtual Machine main Class
|
80
|
-
########################################################################
|
81
|
-
|
82
|
-
VM_PREFIX_DEFAULT = 'one-$i-'
|
83
|
-
|
84
|
-
POLL_ATTRIBUTE =
|
85
|
-
OpenNebula::VirtualMachine::Driver::POLL_ATTRIBUTE
|
86
|
-
VM_STATE =
|
87
|
-
OpenNebula::VirtualMachine::Driver::VM_STATE
|
88
|
-
|
89
|
-
DNET_CARD =
|
90
|
-
RbVmomi::VIM::VirtualEthernetCardDistributedVirtualPortBackingInfo
|
91
|
-
NET_CARD =
|
92
|
-
RbVmomi::VIM::VirtualEthernetCardNetworkBackingInfo
|
93
|
-
OPAQUE_CARD =
|
94
|
-
RbVmomi::VIM::VirtualEthernetCardOpaqueNetworkBackingInfo
|
95
|
-
|
96
|
-
VM_SHUTDOWN_TIMEOUT = 600 # 10 minutes til poweroff hard
|
97
|
-
|
98
|
-
attr_accessor :item, :vm_id
|
99
|
-
|
100
|
-
attr_accessor :vm_info
|
101
|
-
|
102
|
-
include Memoize
|
103
|
-
|
104
|
-
def initialize(vi_client, ref, one_id)
|
105
|
-
if ref
|
106
|
-
ref = VCenterDriver::VIHelper.get_deploy_id(ref)
|
107
|
-
@item = RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref)
|
108
|
-
check_item(@item, RbVmomi::VIM::VirtualMachine)
|
109
|
-
end
|
110
|
-
|
111
|
-
super(@item, vi_client)
|
112
|
-
|
113
|
-
@vi_client = vi_client
|
114
|
-
@vm_id = one_id
|
115
|
-
@locking = true
|
116
|
-
@vm_info = nil
|
117
|
-
@disks = {}
|
118
|
-
@nics = { :macs => {} }
|
119
|
-
end
|
120
|
-
|
121
|
-
########################################################################
|
122
|
-
########################################################################
|
123
|
-
|
124
|
-
# Attributes that must be defined when the VM does not exist in vCenter
|
125
|
-
attr_accessor :vi_client
|
126
|
-
|
127
|
-
# these have their own getter (if they aren't set, we can set them
|
128
|
-
# dynamically)
|
129
|
-
attr_writer :one_item
|
130
|
-
attr_writer :host
|
131
|
-
attr_writer :target_ds_ref
|
132
|
-
|
133
|
-
########################################################################
|
134
|
-
########################################################################
|
135
|
-
|
136
|
-
# The OpenNebula VM
|
137
|
-
# @return OpenNebula::VirtualMachine or XMLElement
|
138
|
-
def one_item
|
139
|
-
unless @one_item
|
140
|
-
|
141
|
-
if @vm_id == -1
|
142
|
-
raise 'VCenterDriver::Virtualmachine: '\
|
143
|
-
'OpenNebula ID is mandatory for this vm!'
|
144
|
-
end
|
145
|
-
|
146
|
-
@one_item =
|
147
|
-
VIHelper
|
148
|
-
.one_item(
|
149
|
-
OpenNebula::VirtualMachine,
|
150
|
-
@vm_id
|
151
|
-
)
|
152
|
-
end
|
153
|
-
|
154
|
-
@one_item
|
155
|
-
end
|
156
|
-
|
157
|
-
# set the vmware item directly to the vm
|
158
|
-
def item_update(item)
|
159
|
-
@item = item
|
160
|
-
end
|
161
|
-
|
162
|
-
def disk_real_path(disk, disk_id)
|
163
|
-
volatile = disk['TYPE'] == 'fs'
|
164
|
-
|
165
|
-
if volatile
|
166
|
-
dir = disk['VCENTER_DS_VOLATILE_DIR'] || 'one-volatile'
|
167
|
-
img_path = "#{dir}/#{@vm_id}/one-#{@vm_id}-#{disk_id}.vmdk"
|
168
|
-
else
|
169
|
-
source = disk['SOURCE'].gsub('%20', ' ')
|
170
|
-
folder = File.dirname(source)
|
171
|
-
ext = File.extname(source)
|
172
|
-
file = File.basename(source, ext)
|
173
|
-
|
174
|
-
img_path = "#{folder}/#{file}-#{@vm_id}-#{disk_id}#{ext}"
|
175
|
-
end
|
176
|
-
|
177
|
-
img_path
|
178
|
-
end
|
179
|
-
|
180
|
-
# The OpenNebula host
|
181
|
-
# @return OpenNebula::Host or XMLElement
|
182
|
-
def host
|
183
|
-
if @host.nil?
|
184
|
-
if one_item.nil?
|
185
|
-
raise "'one_item' must be previously set to be able to " <<
|
186
|
-
'access the OpenNebula host.'
|
187
|
-
end
|
188
|
-
|
189
|
-
host_id = one_item['HISTORY_RECORDS/HISTORY[last()]/HID']
|
190
|
-
raise 'No valid host_id found.' if host_id.nil?
|
191
|
-
|
192
|
-
@host = VIHelper.one_item(OpenNebula::Host, host_id)
|
193
|
-
end
|
194
|
-
|
195
|
-
@host
|
196
|
-
end
|
197
|
-
|
198
|
-
# Target Datastore VMware reference getter
|
199
|
-
# @return
|
200
|
-
def target_ds_ref
|
201
|
-
if @target_ds_ref.nil?
|
202
|
-
if one_item.nil?
|
203
|
-
raise "'one_item' must be previously set to be able to " <<
|
204
|
-
'access the target Datastore.'
|
205
|
-
end
|
206
|
-
|
207
|
-
target_ds_id = one_item['HISTORY_RECORDS/HISTORY[last()]/DS_ID']
|
208
|
-
raise 'No valid target_ds_id found.' if target_ds_id.nil?
|
209
|
-
|
210
|
-
target_ds =
|
211
|
-
VCenterDriver::VIHelper
|
212
|
-
.one_item(
|
213
|
-
OpenNebula::Datastore,
|
214
|
-
target_ds_id
|
215
|
-
)
|
216
|
-
|
217
|
-
@target_ds_ref = target_ds['TEMPLATE/VCENTER_DS_REF']
|
218
|
-
end
|
219
|
-
|
220
|
-
@target_ds_ref
|
221
|
-
end
|
222
|
-
|
223
|
-
# Get a recommendation from a provided storagepod
|
224
|
-
# Returns the recommended datastore reference
|
225
|
-
def recommended_ds(ds_ref)
|
226
|
-
# Fail if datastore is not a storage pod
|
227
|
-
unless ds_ref.start_with?('group-')
|
228
|
-
raise 'Cannot recommend from a non storagepod reference'
|
229
|
-
end
|
230
|
-
|
231
|
-
# Retrieve information needed to create storage_spec hash
|
232
|
-
storage_manager =
|
233
|
-
vi_client
|
234
|
-
.vim
|
235
|
-
.serviceContent
|
236
|
-
.storageResourceManager
|
237
|
-
vcenter_name = vc_name
|
238
|
-
vc_template =
|
239
|
-
RbVmomi::VIM::VirtualMachine
|
240
|
-
.new(
|
241
|
-
vi_client.vim,
|
242
|
-
get_template_ref
|
243
|
-
)
|
244
|
-
dc = cluster.datacenter
|
245
|
-
vcenter_vm_folder_object = vcenter_folder(vcenter_folder_ref,
|
246
|
-
vc_template, dc)
|
247
|
-
storpod = get_ds(ds_ref)
|
248
|
-
disk_move_type = calculate_disk_move_type(storpod, vc_template,
|
249
|
-
linked_clones)
|
250
|
-
spec_hash = spec_hash_clone(disk_move_type)
|
251
|
-
clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(spec_hash)
|
252
|
-
|
253
|
-
# Create hash needed to get the recommendation
|
254
|
-
storage_spec = RbVmomi::VIM.StoragePlacementSpec(
|
255
|
-
:type => 'clone',
|
256
|
-
:cloneName => vcenter_name,
|
257
|
-
:folder => vcenter_vm_folder_object,
|
258
|
-
:podSelectionSpec =>
|
259
|
-
RbVmomi::VIM
|
260
|
-
.StorageDrsPodSelectionSpec(
|
261
|
-
:storagePod => storpod
|
262
|
-
),
|
263
|
-
:vm => vc_template,
|
264
|
-
:cloneSpec => clone_spec
|
265
|
-
)
|
266
|
-
|
267
|
-
# Query a storage placement recommendation
|
268
|
-
result = storage_manager
|
269
|
-
.RecommendDatastores(
|
270
|
-
:storageSpec => storage_spec
|
271
|
-
) rescue nil
|
272
|
-
if result.nil?
|
273
|
-
raise 'Could not get placement specification for StoragePod'
|
274
|
-
end
|
275
|
-
|
276
|
-
if !result.respond_to?(:recommendations) ||
|
277
|
-
result.recommendations.empty?
|
278
|
-
raise 'Could not get placement specification for StoragePod'
|
279
|
-
end
|
280
|
-
|
281
|
-
# Return recommended DS reference
|
282
|
-
result.recommendations.first.action.first.destination._ref
|
283
|
-
end
|
284
|
-
|
285
|
-
# Cached cluster
|
286
|
-
# @return ClusterComputeResource
|
287
|
-
def cluster
|
288
|
-
if @cluster.nil?
|
289
|
-
ccr_ref = host['TEMPLATE/VCENTER_CCR_REF']
|
290
|
-
@cluster = ClusterComputeResource.new_from_ref(ccr_ref,
|
291
|
-
vi_client)
|
292
|
-
end
|
293
|
-
|
294
|
-
@cluster
|
295
|
-
end
|
296
|
-
|
297
|
-
########################################################################
|
298
|
-
########################################################################
|
299
|
-
|
300
|
-
# @return Boolean whether the VM exists in vCenter
|
301
|
-
def new?
|
302
|
-
one_item['DEPLOY_ID'].empty?
|
303
|
-
end
|
304
|
-
|
305
|
-
def wild?
|
306
|
-
!(one_item['TEMPLATE/IMPORTED'] &&
|
307
|
-
one_item['TEMPLATE/IMPORTED'] == 'YES').nil?
|
308
|
-
end
|
309
|
-
|
310
|
-
# @return Boolean wheter the vm exists in OpenNebula
|
311
|
-
def one_exist?
|
312
|
-
!@vm_id.nil? && @vm_id != -1
|
313
|
-
end
|
314
|
-
|
315
|
-
# @return String the vm_id stored in vCenter
|
316
|
-
def get_vm_id(vm_pool = nil)
|
317
|
-
if defined?(@vm_id) && @vm_id
|
318
|
-
return @vm_id
|
319
|
-
end
|
320
|
-
|
321
|
-
vm_ref = self['_ref']
|
322
|
-
return unless vm_ref
|
323
|
-
|
324
|
-
vc_uuid = vcenter_instance_uuid
|
325
|
-
|
326
|
-
one_vm =
|
327
|
-
VCenterDriver::VIHelper
|
328
|
-
.find_by_ref(
|
329
|
-
OpenNebula::VirtualMachinePool,
|
330
|
-
'DEPLOY_ID',
|
331
|
-
vm_ref,
|
332
|
-
vc_uuid,
|
333
|
-
vm_pool
|
334
|
-
)
|
335
|
-
return unless one_vm
|
336
|
-
|
337
|
-
@vm_id = one_vm['ID']
|
338
|
-
@vm_id
|
339
|
-
end
|
340
|
-
|
341
|
-
def vcenter_instance_uuid
|
342
|
-
@vi_client.vim.serviceContent.about.instanceUuid
|
343
|
-
end
|
344
|
-
|
345
|
-
def disk_keys
|
346
|
-
unmanaged_keys = {}
|
347
|
-
@item.config.extraConfig.each do |val|
|
348
|
-
u = val[:key].include?('opennebula.disk')
|
349
|
-
m = val[:key].include?('opennebula.mdisk')
|
350
|
-
unmanaged_keys[val[:key]] = val[:value] if u || m
|
351
|
-
end
|
352
|
-
|
353
|
-
unmanaged_keys
|
354
|
-
end
|
355
|
-
|
356
|
-
########################################################################
|
357
|
-
# Getters
|
358
|
-
########################################################################
|
359
|
-
|
360
|
-
# @return RbVmomi::VIM::ResourcePool
|
361
|
-
def resource_pool
|
362
|
-
req_rp = one_item['VCENTER_RESOURCE_POOL'] ||
|
363
|
-
one_item['USER_TEMPLATE/VCENTER_RESOURCE_POOL']
|
364
|
-
|
365
|
-
# Get ref for req_rp
|
366
|
-
rp_list = cluster.get_resource_pool_list
|
367
|
-
req_rp_ref = rp_list.select do |rp|
|
368
|
-
rp[:name].downcase == req_rp.downcase
|
369
|
-
end.first[:ref] rescue nil
|
370
|
-
|
371
|
-
if vi_client.rp_confined?
|
372
|
-
if req_rp_ref && req_rp_ref != vi_client.rp._ref
|
373
|
-
raise 'Available resource pool '\
|
374
|
-
"[#{vi_client.rp.name}] in host"\
|
375
|
-
' does not match requested resource pool'\
|
376
|
-
" [#{req_rp}]"
|
377
|
-
end
|
378
|
-
|
379
|
-
vi_client.rp
|
380
|
-
else
|
381
|
-
if req_rp_ref
|
382
|
-
rps = cluster.resource_pools.select do |r|
|
383
|
-
r._ref == req_rp_ref
|
384
|
-
end
|
385
|
-
|
386
|
-
if rps.empty?
|
387
|
-
raise "No matching resource pool found (#{req_rp})."
|
388
|
-
end
|
389
|
-
|
390
|
-
rps.first
|
391
|
-
else
|
392
|
-
cluster['resourcePool']
|
393
|
-
end
|
394
|
-
end
|
395
|
-
end
|
396
|
-
|
397
|
-
# @return RbVmomi::VIM::Datastore or nil
|
398
|
-
def get_ds(current_ds_ref = nil)
|
399
|
-
if !current_ds_ref
|
400
|
-
current_ds_id =
|
401
|
-
one_item[
|
402
|
-
'HISTORY_RECORDS/HISTORY[last()]/DS_ID'
|
403
|
-
]
|
404
|
-
current_ds = VCenterDriver::VIHelper.one_item(
|
405
|
-
OpenNebula::Datastore, current_ds_id
|
406
|
-
)
|
407
|
-
current_ds_ref = current_ds['TEMPLATE/VCENTER_DS_REF']
|
408
|
-
end
|
409
|
-
|
410
|
-
if current_ds_ref
|
411
|
-
dc = cluster.datacenter
|
412
|
-
|
413
|
-
ds_folder = dc.datastore_folder
|
414
|
-
ds = ds_folder.get(current_ds_ref)
|
415
|
-
ds.item rescue nil
|
416
|
-
|
417
|
-
else
|
418
|
-
nil
|
419
|
-
end
|
420
|
-
end
|
421
|
-
|
422
|
-
# StorageResouceManager reference
|
423
|
-
def storagemanager
|
424
|
-
self['_connection.serviceContent.storageResourceManager']
|
425
|
-
end
|
426
|
-
|
427
|
-
# @return Customization or nil
|
428
|
-
def customization_spec
|
429
|
-
xpath = 'USER_TEMPLATE/VCENTER_CUSTOMIZATION_SPEC'
|
430
|
-
customization_spec = one_item[xpath]
|
431
|
-
|
432
|
-
if customization_spec.nil?
|
433
|
-
return
|
434
|
-
end
|
435
|
-
|
436
|
-
begin
|
437
|
-
custom_spec = vi_client
|
438
|
-
.vim
|
439
|
-
.serviceContent
|
440
|
-
.customizationSpecManager
|
441
|
-
.GetCustomizationSpec(
|
442
|
-
:name => customization_spec
|
443
|
-
)
|
444
|
-
|
445
|
-
unless custom_spec && (spec = custom_spec.spec)
|
446
|
-
raise 'Error getting customization spec'
|
447
|
-
end
|
448
|
-
|
449
|
-
spec
|
450
|
-
rescue StandardError
|
451
|
-
raise "Customization spec '#{customization_spec}' not found"
|
452
|
-
end
|
453
|
-
end
|
454
|
-
|
455
|
-
# @return VCenterDriver::Datastore datastore
|
456
|
-
# where the disk will live under
|
457
|
-
def get_effective_ds(disk)
|
458
|
-
if disk['PERSISTENT'] == 'YES'
|
459
|
-
ds_ref = disk['VCENTER_DS_REF']
|
460
|
-
else
|
461
|
-
ds_ref = target_ds_ref
|
462
|
-
|
463
|
-
if ds_ref.nil?
|
464
|
-
raise 'target_ds_ref must be defined on this object.'
|
465
|
-
end
|
466
|
-
end
|
467
|
-
|
468
|
-
VCenterDriver::Storage.new_from_ref(ds_ref, vi_client)
|
469
|
-
end
|
470
|
-
|
471
|
-
# @return String vcenter name
|
472
|
-
def vc_name
|
473
|
-
vm_prefix = host['TEMPLATE/VM_PREFIX']
|
474
|
-
vm_prefix = VM_PREFIX_DEFAULT if vm_prefix.nil?
|
475
|
-
|
476
|
-
if !one_item['USER_TEMPLATE/VM_PREFIX'].nil?
|
477
|
-
vm_prefix = one_item['USER_TEMPLATE/VM_PREFIX']
|
478
|
-
end
|
479
|
-
vm_prefix.gsub!('$i', one_item['ID'])
|
480
|
-
|
481
|
-
vm_suffix = ''
|
482
|
-
if !one_item['USER_TEMPLATE/VM_SUFFIX'].nil?
|
483
|
-
vm_suffix = one_item['USER_TEMPLATE/VM_SUFFIX']
|
484
|
-
end
|
485
|
-
vm_suffix.gsub!('$i', one_item['ID'])
|
486
|
-
|
487
|
-
vm_prefix + one_item['NAME'] + vm_suffix
|
488
|
-
end
|
489
|
-
|
490
|
-
# @return vCenter Tags
|
491
|
-
def vcenter_tags
|
492
|
-
one_item.info if one_item.instance_of?(OpenNebula::VirtualMachine)
|
493
|
-
one_item.retrieve_xmlelements('USER_TEMPLATE/VCENTER_TAG')
|
494
|
-
end
|
495
|
-
|
496
|
-
# @return if has vCenter Tags
|
497
|
-
def vcenter_tags?
|
498
|
-
!vcenter_tags.empty?
|
499
|
-
end
|
500
|
-
|
501
|
-
# @return if has cpuHotAddEnabled
|
502
|
-
def cpu_hot_add_enabled?
|
503
|
-
one_item.info if one_item.instance_of?(
|
504
|
-
OpenNebula::VirtualMachine
|
505
|
-
)
|
506
|
-
|
507
|
-
if one_item['USER_TEMPLATE/HOT_RESIZE/CPU_HOT_ADD_ENABLED'].nil?
|
508
|
-
return false
|
509
|
-
end
|
510
|
-
|
511
|
-
one_item[
|
512
|
-
'USER_TEMPLATE/HOT_RESIZE/CPU_HOT_ADD_ENABLED'
|
513
|
-
] == 'YES'
|
514
|
-
end
|
515
|
-
|
516
|
-
# @return if has memoryHotAddEnabled
|
517
|
-
def memory_hot_add_enabled?
|
518
|
-
one_item.info if one_item.instance_of?(
|
519
|
-
OpenNebula::VirtualMachine
|
520
|
-
)
|
521
|
-
|
522
|
-
if one_item['USER_TEMPLATE/HOT_RESIZE/MEMORY_HOT_ADD_ENABLED'].nil?
|
523
|
-
return false
|
524
|
-
end
|
525
|
-
|
526
|
-
one_item[
|
527
|
-
'USER_TEMPLATE/HOT_RESIZE/MEMORY_HOT_ADD_ENABLED'
|
528
|
-
] == 'YES'
|
529
|
-
end
|
530
|
-
|
531
|
-
########################################################################
|
532
|
-
# Create and reconfigure VM related methods
|
533
|
-
########################################################################
|
534
|
-
|
535
|
-
# This function permit get a folder by name if exist
|
536
|
-
# or create it if not exist
|
537
|
-
def find_or_create_folder(folder_root, name)
|
538
|
-
folder_root.childEntity.each do |child|
|
539
|
-
if child.instance_of?(RbVmomi::VIM::Folder) &&
|
540
|
-
child.name == name
|
541
|
-
return child
|
542
|
-
end
|
543
|
-
end
|
544
|
-
|
545
|
-
folder_root.CreateFolder(:name => name)
|
546
|
-
end
|
547
|
-
|
548
|
-
# This function creates a new VM from the
|
549
|
-
# driver_action XML and returns the
|
550
|
-
# VMware ref
|
551
|
-
# @param drv_action XML representing the deploy action
|
552
|
-
# @return String vmware ref
|
553
|
-
def clone_vm(drv_action)
|
554
|
-
vcenter_name = vc_name
|
555
|
-
|
556
|
-
dc = cluster.datacenter
|
557
|
-
|
558
|
-
vcenter_vm_folder = drv_action['USER_TEMPLATE/VCENTER_VM_FOLDER']
|
559
|
-
|
560
|
-
if !vcenter_vm_folder.nil? && !vcenter_vm_folder.empty?
|
561
|
-
vcenter_vm_folder =
|
562
|
-
vcenter_folder_name(vcenter_vm_folder, drv_action)
|
563
|
-
|
564
|
-
vcenter_vm_folder_object =
|
565
|
-
dc.item.find_folder(vcenter_vm_folder)
|
566
|
-
|
567
|
-
if vcenter_vm_folder_object.nil?
|
568
|
-
begin
|
569
|
-
dc.item.vmFolder.CreateFolder(
|
570
|
-
:name => vcenter_vm_folder
|
571
|
-
)
|
572
|
-
rescue StandardError => e
|
573
|
-
error_message = e.message
|
574
|
-
if VCenterDriver::CONFIG[:debug_information]
|
575
|
-
error_message += ' ' + e.backtrace
|
576
|
-
end
|
577
|
-
raise 'Cannot create Folder in vCenter:'\
|
578
|
-
"#{error_message}"
|
579
|
-
end
|
580
|
-
end
|
581
|
-
end
|
582
|
-
|
583
|
-
vcenter_vm_folder = drv_action['USER_TEMPLATE/VCENTER_VM_FOLDER']
|
584
|
-
|
585
|
-
if !vcenter_vm_folder.nil? && !vcenter_vm_folder.empty?
|
586
|
-
vcenter_vm_folder =
|
587
|
-
vcenter_folder_name(vcenter_vm_folder, drv_action)
|
588
|
-
|
589
|
-
vcenter_vm_folder_object =
|
590
|
-
dc.item.find_folder(vcenter_vm_folder)
|
591
|
-
|
592
|
-
if vcenter_vm_folder_object.nil?
|
593
|
-
begin
|
594
|
-
vcenter_vm_folder_list = vcenter_vm_folder.split('/')
|
595
|
-
folder_root = dc.item.vmFolder
|
596
|
-
|
597
|
-
vcenter_vm_folder_list.each do |folder_name|
|
598
|
-
folder_root = find_or_create_folder(
|
599
|
-
folder_root,
|
600
|
-
folder_name
|
601
|
-
)
|
602
|
-
end
|
603
|
-
rescue StandardError => e
|
604
|
-
error_message = e.message
|
605
|
-
if VCenterDriver::CONFIG[:debug_information]
|
606
|
-
error_message += ' ' + e.backtrace
|
607
|
-
end
|
608
|
-
|
609
|
-
raise 'Cannot create Folder in vCenter: '\
|
610
|
-
"#{error_message}"
|
611
|
-
end
|
612
|
-
end
|
613
|
-
end
|
614
|
-
|
615
|
-
vc_template_ref = drv_action['USER_TEMPLATE/VCENTER_TEMPLATE_REF']
|
616
|
-
vc_template = RbVmomi::VIM::VirtualMachine(@vi_client.vim,
|
617
|
-
vc_template_ref)
|
618
|
-
|
619
|
-
ds = get_ds
|
620
|
-
|
621
|
-
asking_for_linked_clones =
|
622
|
-
drv_action[
|
623
|
-
'USER_TEMPLATE/VCENTER_LINKED_CLONES'
|
624
|
-
]
|
625
|
-
disk_move_type = calculate_disk_move_type(ds,
|
626
|
-
vc_template,
|
627
|
-
asking_for_linked_clones)
|
628
|
-
|
629
|
-
spec_hash = spec_hash_clone(disk_move_type)
|
630
|
-
|
631
|
-
clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(spec_hash)
|
632
|
-
|
633
|
-
vcenter_vm_folder_object =
|
634
|
-
vcenter_folder(
|
635
|
-
vcenter_vm_folder,
|
636
|
-
vc_template,
|
637
|
-
dc
|
638
|
-
)
|
639
|
-
|
640
|
-
if ds.instance_of? RbVmomi::VIM::StoragePod
|
641
|
-
# VM is cloned using Storage Resource Manager for StoragePods
|
642
|
-
begin
|
643
|
-
opts = {
|
644
|
-
:vc_template => vc_template,
|
645
|
-
:vcenter_name => vcenter_name,
|
646
|
-
:clone_spec => clone_spec,
|
647
|
-
:storpod => ds,
|
648
|
-
:vcenter_vm_folder_object => vcenter_vm_folder_object,
|
649
|
-
:dc => dc
|
650
|
-
}
|
651
|
-
|
652
|
-
vm = storagepod_clonevm_task(opts)
|
653
|
-
rescue StandardError => e
|
654
|
-
error =
|
655
|
-
"Cannot clone VM Template to StoragePod: #{e.message}."
|
656
|
-
|
657
|
-
if VCenterDriver::CONFIG[:debug_information]
|
658
|
-
error += "\n\n#{e.backtrace}"
|
659
|
-
end
|
660
|
-
|
661
|
-
raise error
|
662
|
-
end
|
663
|
-
else
|
664
|
-
vm = nil
|
665
|
-
begin
|
666
|
-
vm = vc_template.CloneVM_Task(
|
667
|
-
:folder => vcenter_vm_folder_object,
|
668
|
-
:name => vcenter_name,
|
669
|
-
:spec => clone_spec
|
670
|
-
).wait_for_completion
|
671
|
-
rescue StandardError => e
|
672
|
-
if !e.message.start_with?('DuplicateName')
|
673
|
-
raise "Cannot clone VM Template: #{e.message}"
|
674
|
-
end
|
675
|
-
|
676
|
-
vm_folder = dc.vm_folder
|
677
|
-
vm_folder.fetch!
|
678
|
-
vm = vm_folder.items
|
679
|
-
.select {|_k, v| v.item.name == vcenter_name }
|
680
|
-
.values.first.item rescue nil
|
681
|
-
|
682
|
-
raise "Cannot clone VM Template: #{e.message}" unless vm
|
683
|
-
|
684
|
-
# Detach all persistent disks to
|
685
|
-
# avoid accidental destruction
|
686
|
-
detach_persistent_disks(vm)
|
687
|
-
|
688
|
-
vm.Destroy_Task.wait_for_completion
|
689
|
-
vm = vc_template.CloneVM_Task(
|
690
|
-
:folder => vcenter_vm_folder_object,
|
691
|
-
:name => vcenter_name,
|
692
|
-
:spec => clone_spec
|
693
|
-
).wait_for_completion
|
694
|
-
end
|
695
|
-
end
|
696
|
-
|
697
|
-
# @item is populated
|
698
|
-
@item = vm
|
699
|
-
|
700
|
-
reference_unmanaged_devices(vc_template_ref)
|
701
|
-
|
702
|
-
self['_ref']
|
703
|
-
end
|
704
|
-
|
705
|
-
# This function clone a VM Template to StoragePod
|
706
|
-
# @param opts HASH with all parameters need it to clone
|
707
|
-
# opts = {
|
708
|
-
# :vc_template => vc_template,
|
709
|
-
# :vcenter_name => vcenter_name,
|
710
|
-
# :clone_spec => clone_spec,
|
711
|
-
# :storpod => ds,
|
712
|
-
# :vcenter_vm_folder_object => vcenter_vm_folder_object,
|
713
|
-
# :dc => dc
|
714
|
-
# }
|
715
|
-
# @return vm (VirtualMachine)
|
716
|
-
def storagepod_clonevm_task(opts)
|
717
|
-
vc_template = opts[:vc_template]
|
718
|
-
vcenter_name = opts[:vcenter_name]
|
719
|
-
clone_spec = opts[:clone_spec]
|
720
|
-
storpod = opts[:storpod]
|
721
|
-
vcenter_vm_folder_object = opts[:vcenter_vm_folder_object]
|
722
|
-
dc = opts[:dc]
|
723
|
-
|
724
|
-
storage_manager =
|
725
|
-
vc_template
|
726
|
-
._connection
|
727
|
-
.serviceContent
|
728
|
-
.storageResourceManager
|
729
|
-
|
730
|
-
storage_spec = RbVmomi::VIM.StoragePlacementSpec(
|
731
|
-
:type => 'clone',
|
732
|
-
:cloneName => vcenter_name,
|
733
|
-
:folder => vcenter_vm_folder_object,
|
734
|
-
:podSelectionSpec =>
|
735
|
-
RbVmomi::VIM
|
736
|
-
.StorageDrsPodSelectionSpec(
|
737
|
-
:storagePod => storpod
|
738
|
-
),
|
739
|
-
:vm => vc_template,
|
740
|
-
:cloneSpec => clone_spec
|
741
|
-
)
|
742
|
-
|
743
|
-
# Query a storage placement recommendation
|
744
|
-
result = storage_manager
|
745
|
-
.RecommendDatastores(
|
746
|
-
:storageSpec => storage_spec
|
747
|
-
) rescue nil
|
748
|
-
|
749
|
-
if result.nil?
|
750
|
-
raise 'Could not get placement specification for StoragePod'
|
751
|
-
end
|
752
|
-
|
753
|
-
if !result
|
754
|
-
.respond_to?(
|
755
|
-
:recommendations
|
756
|
-
) || result.recommendations.empty?
|
757
|
-
raise 'Could not get placement specification for StoragePod'
|
758
|
-
end
|
759
|
-
|
760
|
-
# Get recommendation key to be applied
|
761
|
-
key = result.recommendations.first.key ||= ''
|
762
|
-
if key.empty?
|
763
|
-
raise 'Missing Datastore recommendation for StoragePod'
|
764
|
-
end
|
765
|
-
|
766
|
-
begin
|
767
|
-
apply_sr = storage_manager
|
768
|
-
.ApplyStorageDrsRecommendation_Task(:key => [key])
|
769
|
-
.wait_for_completion
|
770
|
-
apply_sr.vm
|
771
|
-
rescue StandardError => e
|
772
|
-
if !e.message.start_with?('DuplicateName')
|
773
|
-
raise 'Cannot clone VM Template: '\
|
774
|
-
"#{e.message}\n#{e.backtrace}"
|
775
|
-
end
|
776
|
-
|
777
|
-
# The VM already exists, try to find the vm
|
778
|
-
vm_folder = dc.vm_folder
|
779
|
-
vm_folder.fetch!
|
780
|
-
vm = vm_folder.items
|
781
|
-
.select {|_k, v| v.item.name == vcenter_name }
|
782
|
-
.values.first.item rescue nil
|
783
|
-
|
784
|
-
if vm
|
785
|
-
|
786
|
-
begin
|
787
|
-
# Detach all persistent disks to
|
788
|
-
# avoid accidental destruction
|
789
|
-
detach_persistent_disks(vm)
|
790
|
-
|
791
|
-
# Destroy the VM with any disks still attached to it
|
792
|
-
vm.Destroy_Task.wait_for_completion
|
793
|
-
|
794
|
-
# Query a storage placement recommendation
|
795
|
-
result =
|
796
|
-
storage_manager
|
797
|
-
.RecommendDatastores(
|
798
|
-
:storageSpec => storage_spec
|
799
|
-
) rescue nil
|
800
|
-
|
801
|
-
if result.nil?
|
802
|
-
raise 'Could not get placement specification '\
|
803
|
-
'for StoragePod'
|
804
|
-
end
|
805
|
-
|
806
|
-
if !result
|
807
|
-
.respond_to?(
|
808
|
-
:recommendations
|
809
|
-
) ||
|
810
|
-
result
|
811
|
-
.recommendations.empty?
|
812
|
-
raise 'Could not get placement '\
|
813
|
-
'specification for StoragePod'
|
814
|
-
end
|
815
|
-
|
816
|
-
# Get recommendation key to be applied
|
817
|
-
key = result.recommendations.first.key ||= ''
|
818
|
-
if key.empty?
|
819
|
-
raise 'Missing Datastore recommendation '\
|
820
|
-
' for StoragePod'
|
821
|
-
end
|
822
|
-
|
823
|
-
apply_sr =
|
824
|
-
storage_manager
|
825
|
-
.ApplyStorageDrsRecommendation_Task(
|
826
|
-
:key => [key]
|
827
|
-
)
|
828
|
-
.wait_for_completion
|
829
|
-
apply_sr.vm
|
830
|
-
rescue StandardError => e
|
831
|
-
raise 'Failure applying recommendation while '\
|
832
|
-
"cloning VM: #{e.message}"
|
833
|
-
end
|
834
|
-
end
|
835
|
-
end
|
836
|
-
end
|
837
|
-
|
838
|
-
# Calculates how to move disk backinggs from the
|
839
|
-
# vCenter VM Template moref
|
840
|
-
def calculate_disk_move_type(ds, vc_template, use_linked_clones)
|
841
|
-
# Default disk move type (Full Clone)
|
842
|
-
disk_move_type = :moveAllDiskBackingsAndDisallowSharing
|
843
|
-
|
844
|
-
if ds.instance_of?(RbVmomi::VIM::Datastore) &&
|
845
|
-
use_linked_clones &&
|
846
|
-
use_linked_clones.downcase == 'yes'
|
847
|
-
|
848
|
-
# Check if all disks in template has delta disks
|
849
|
-
disks = vc_template.config
|
850
|
-
.hardware
|
851
|
-
.device
|
852
|
-
.grep(RbVmomi::VIM::VirtualDisk)
|
853
|
-
|
854
|
-
disks_no_delta = disks.select do |d|
|
855
|
-
d.backing.parent.nil?
|
856
|
-
end
|
857
|
-
|
858
|
-
# Can use linked clones if all disks have delta disks
|
859
|
-
if disks_no_delta.empty?
|
860
|
-
disk_move_type = :moveChildMostDiskBacking
|
861
|
-
end
|
862
|
-
end
|
863
|
-
|
864
|
-
disk_move_type
|
865
|
-
end
|
866
|
-
|
867
|
-
# @return String vcenter folder name
|
868
|
-
def vcenter_folder_name(vm_folder_name, drv_action)
|
869
|
-
uname = drv_action['UNAME']
|
870
|
-
gname = drv_action['GNAME']
|
871
|
-
|
872
|
-
vm_folder_name.gsub!('$uname', uname)
|
873
|
-
vm_folder_name.gsub!('$gname', gname)
|
874
|
-
|
875
|
-
vm_folder_name
|
876
|
-
end
|
877
|
-
|
878
|
-
# Get vcenter folder object from the reference
|
879
|
-
# If folder is not found, the folder of the
|
880
|
-
# vCenter VM Template is returned
|
881
|
-
def vcenter_folder(vcenter_vm_folder, vc_template, dc)
|
882
|
-
vcenter_vm_folder_object = nil
|
883
|
-
|
884
|
-
if !vcenter_vm_folder.nil? && !vcenter_vm_folder.empty?
|
885
|
-
vcenter_vm_folder_object =
|
886
|
-
dc
|
887
|
-
.item
|
888
|
-
.find_folder(
|
889
|
-
vcenter_vm_folder
|
890
|
-
)
|
891
|
-
end
|
892
|
-
|
893
|
-
vcenter_vm_folder_object =
|
894
|
-
vc_template
|
895
|
-
.parent if vcenter_vm_folder_object.nil?
|
896
|
-
vcenter_vm_folder_object
|
897
|
-
end
|
898
|
-
|
899
|
-
# @return clone parameters spec hash
|
900
|
-
def spec_hash_clone(disk_move_type)
|
901
|
-
# Relocate spec
|
902
|
-
relocate_spec_params = {}
|
903
|
-
|
904
|
-
relocate_spec_params[:pool] = resource_pool
|
905
|
-
relocate_spec_params[:diskMoveType] = disk_move_type
|
906
|
-
|
907
|
-
ds = get_ds
|
908
|
-
|
909
|
-
relocate_spec_params[:datastore] =
|
910
|
-
ds if ds.instance_of? RbVmomi::VIM::Datastore
|
911
|
-
|
912
|
-
relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
|
913
|
-
relocate_spec_params
|
914
|
-
)
|
915
|
-
|
916
|
-
# Running flag - prevents spurious poweroff states in the VM
|
917
|
-
running_flag = [{ :key => 'opennebula.vm.running', :value => 'no' }]
|
918
|
-
|
919
|
-
running_flag_spec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
920
|
-
{ :extraConfig => running_flag }
|
921
|
-
)
|
922
|
-
|
923
|
-
clone_parameters = {
|
924
|
-
:location => relocate_spec,
|
925
|
-
:powerOn => false,
|
926
|
-
:template => false,
|
927
|
-
:config => running_flag_spec
|
928
|
-
}
|
929
|
-
|
930
|
-
cs = customization_spec
|
931
|
-
clone_parameters[:customization] = cs if cs
|
932
|
-
|
933
|
-
clone_parameters
|
934
|
-
end
|
935
|
-
|
936
|
-
########################################################################
|
937
|
-
# VirtualMachine Resource model methods
|
938
|
-
########################################################################
|
939
|
-
|
940
|
-
#
|
941
|
-
# gets the representation of the nics
|
942
|
-
#
|
943
|
-
# @return [Hash(String => self.Nic)
|
944
|
-
def nics
|
945
|
-
if !@nics[:macs].empty?
|
946
|
-
return @nics.reject {|k| k == :macs }
|
947
|
-
end
|
948
|
-
|
949
|
-
info_nics
|
950
|
-
end
|
951
|
-
|
952
|
-
# gets the representation of the disks
|
953
|
-
#
|
954
|
-
# @return [Hash(String => self.Disk)
|
955
|
-
def disks
|
956
|
-
return @disks unless @disks.empty?
|
957
|
-
|
958
|
-
info_disks
|
959
|
-
end
|
960
|
-
|
961
|
-
# iterate over the nics model
|
962
|
-
#
|
963
|
-
# @param condition[Symbol] selects nics that matches certain condition
|
964
|
-
# see Self.Nic|Resource class to see some methods: :exits?, :one?...
|
965
|
-
#
|
966
|
-
# @return yield the nic
|
967
|
-
def nics_each(condition)
|
968
|
-
res = []
|
969
|
-
nics.each do |_id, nic|
|
970
|
-
next unless nic.method(condition).call
|
971
|
-
|
972
|
-
yield nic if block_given?
|
973
|
-
|
974
|
-
res << nic
|
975
|
-
end
|
976
|
-
|
977
|
-
res
|
978
|
-
end
|
979
|
-
|
980
|
-
# iterate over the disks model
|
981
|
-
#
|
982
|
-
# @param condition[Symbol] selects disks that matches certain condition
|
983
|
-
# see Self.Disk|Resource class to see some methods: :exits?, :one?...
|
984
|
-
#
|
985
|
-
# @return yield the disk
|
986
|
-
def disks_each(condition)
|
987
|
-
res = []
|
988
|
-
disks.each do |_id, disk|
|
989
|
-
next unless disk.method(condition).call
|
990
|
-
|
991
|
-
yield disk if block_given?
|
992
|
-
|
993
|
-
res << disk
|
994
|
-
end
|
995
|
-
|
996
|
-
res
|
997
|
-
end
|
998
|
-
|
999
|
-
def disks_synced?
|
1000
|
-
disks_each(:unsynced?) { return false }
|
1001
|
-
|
1002
|
-
true
|
1003
|
-
end
|
1004
|
-
|
1005
|
-
def template_ref_get
|
1006
|
-
one_item['USER_TEMPLATE/VCENTER_TEMPLATE_REF']
|
1007
|
-
end
|
1008
|
-
|
1009
|
-
def vcenter_folder_ref
|
1010
|
-
one_item['USER_TEMPLATE/VCENTER_VM_FOLDER']
|
1011
|
-
end
|
1012
|
-
|
1013
|
-
# Queries to OpenNebula the machine disks xml representation
|
1014
|
-
def one_disks_list
|
1015
|
-
one_item.info if one_item.instance_of?(OpenNebula::VirtualMachine)
|
1016
|
-
one_item.retrieve_xmlelements('TEMPLATE/DISK')
|
1017
|
-
end
|
1018
|
-
|
1019
|
-
# Queries to OpenNebula the machine nics xml representation
|
1020
|
-
def one_nics_get
|
1021
|
-
one_item.info if one_item.instance_of?(OpenNebula::VirtualMachine)
|
1022
|
-
one_item.retrieve_xmlelements('TEMPLATE/NIC')
|
1023
|
-
end
|
1024
|
-
|
1025
|
-
def linked_clones
|
1026
|
-
one_item['USER_TEMPLATE/VCENTER_LINKED_CLONES']
|
1027
|
-
end
|
1028
|
-
|
1029
|
-
# perform a query to vCenter asking for the OpenNebula disk
|
1030
|
-
#
|
1031
|
-
# @param one_disk [XMLelement] The OpenNebula object
|
1032
|
-
# representation of the disk
|
1033
|
-
# @param keys [Hash (String => String)] Hashmap with
|
1034
|
-
# the unmanaged keys
|
1035
|
-
# @param vc_disks [Array (vcenter_disks)] Array of
|
1036
|
-
# the machine real disks
|
1037
|
-
# See vcenter_disks_get method
|
1038
|
-
#
|
1039
|
-
# @return [vCenter_disk] the proper disk
|
1040
|
-
def query_disk(one_disk, keys, vc_disks)
|
1041
|
-
index = one_disk['DISK_ID']
|
1042
|
-
unmanaged = "opennebula.disk.#{index}"
|
1043
|
-
managed = "opennebula.mdisk.#{index}"
|
1044
|
-
|
1045
|
-
if keys[managed]
|
1046
|
-
key = keys[managed].to_i
|
1047
|
-
elsif keys[unmanaged]
|
1048
|
-
key = keys[unmanaged].to_i
|
1049
|
-
end
|
1050
|
-
|
1051
|
-
if key
|
1052
|
-
query = vc_disks.select {|dev| key == dev[:key] }
|
1053
|
-
else
|
1054
|
-
if snapshots?
|
1055
|
-
error = 'Disk metadata not present and snapshots exist. ' \
|
1056
|
-
'Please remove imported VM with "onevm recover ' \
|
1057
|
-
'--delete-db".'
|
1058
|
-
raise error
|
1059
|
-
end
|
1060
|
-
|
1061
|
-
# Try to find the disk using the path known by OpenNebula
|
1062
|
-
source_path = one_disk['SOURCE']
|
1063
|
-
calculated_path = disk_real_path(one_disk, index)
|
1064
|
-
query = vc_disks.select do |dev|
|
1065
|
-
source_path == dev[:path_wo_ds] ||
|
1066
|
-
calculated_path == dev[:path_wo_ds]
|
1067
|
-
end
|
1068
|
-
end
|
1069
|
-
|
1070
|
-
return if query.size != 1
|
1071
|
-
|
1072
|
-
query.first
|
1073
|
-
end
|
1074
|
-
|
1075
|
-
# perform a query to vCenter asking for the OpenNebula nic
|
1076
|
-
#
|
1077
|
-
# @param vc_disks [String] The mac of the nic
|
1078
|
-
# @param vc_disks [Array (vcenter_nic)] Array of the machine real nics
|
1079
|
-
#
|
1080
|
-
# @return [vCenter_nic] the proper nic
|
1081
|
-
def query_nic(mac, vc_nics)
|
1082
|
-
nic = vc_nics.select {|dev| dev.macAddress == mac }.first
|
1083
|
-
|
1084
|
-
vc_nics.delete(nic) if nic
|
1085
|
-
end
|
1086
|
-
|
1087
|
-
# Refresh VcenterDriver machine nics model, does not perform
|
1088
|
-
# any sync operation!
|
1089
|
-
#
|
1090
|
-
# @return [Hash ("String" => self.Nic)] Model representation of nics
|
1091
|
-
def info_nics
|
1092
|
-
keep_mac_on_imported = false
|
1093
|
-
keep_mac_on_imported = CONFIG[:keep_mac_on_imported] \
|
1094
|
-
unless CONFIG[:keep_mac_on_imported].nil?
|
1095
|
-
|
1096
|
-
@nics = { :macs => {} }
|
1097
|
-
|
1098
|
-
vc_nics = vcenter_nics_list
|
1099
|
-
one_nics = one_nics_get
|
1100
|
-
|
1101
|
-
one_nics.each do |one_nic|
|
1102
|
-
index = one_nic['NIC_ID']
|
1103
|
-
if keep_mac_on_imported && one_nic['MAC_IMPORTED']
|
1104
|
-
mac = one_nic['MAC_IMPORTED']
|
1105
|
-
else
|
1106
|
-
mac = one_nic['MAC']
|
1107
|
-
end
|
1108
|
-
vc_dev = query_nic(mac, vc_nics)
|
1109
|
-
|
1110
|
-
if vc_dev
|
1111
|
-
@nics[index] = Nic.new(index.to_i, one_nic, vc_dev)
|
1112
|
-
@nics[:macs][mac] = index
|
1113
|
-
else
|
1114
|
-
@nics[index] = Nic.one_nic(index.to_i, one_nic)
|
1115
|
-
end
|
1116
|
-
end
|
1117
|
-
|
1118
|
-
vc_nics.each do |d|
|
1119
|
-
backing = d.backing
|
1120
|
-
|
1121
|
-
case backing.class.to_s
|
1122
|
-
when NET_CARD.to_s
|
1123
|
-
key = backing.network._ref
|
1124
|
-
when DNET_CARD.to_s
|
1125
|
-
key = backing.port.portgroupKey
|
1126
|
-
when OPAQUE_CARD.to_s
|
1127
|
-
# Select only Opaque Networks
|
1128
|
-
opaque_networks = @item.network.select do |net|
|
1129
|
-
net.class == RbVmomi::VIM::OpaqueNetwork
|
1130
|
-
end
|
1131
|
-
opaque_network = opaque_networks.find do |opn|
|
1132
|
-
backing.opaqueNetworkId == opn.summary.opaqueNetworkId
|
1133
|
-
end
|
1134
|
-
key = opaque_network._ref
|
1135
|
-
else
|
1136
|
-
raise "Unsupported network card type: #{backing.class}"
|
1137
|
-
end
|
1138
|
-
|
1139
|
-
@nics["#{key}#{d.key}"] = Nic.vc_nic(d)
|
1140
|
-
end
|
1141
|
-
|
1142
|
-
@nics.reject {|k| k == :macs }
|
1143
|
-
end
|
1144
|
-
|
1145
|
-
# Refresh VcenterDriver machine disks model, does not perform any
|
1146
|
-
# sync operation!
|
1147
|
-
#
|
1148
|
-
# @return [Hash ("String" => self.Disk)] Model representation of disks
|
1149
|
-
def info_disks
|
1150
|
-
@disks = {}
|
1151
|
-
|
1152
|
-
keys = disk_keys
|
1153
|
-
vc_disks = vcenter_disks_get
|
1154
|
-
one_disks = one_disks_list
|
1155
|
-
|
1156
|
-
one_disks.each do |one_disk|
|
1157
|
-
index = one_disk['DISK_ID']
|
1158
|
-
|
1159
|
-
disk = query_disk(one_disk, keys, vc_disks)
|
1160
|
-
|
1161
|
-
vc_dev = vc_disks.delete(disk) if disk
|
1162
|
-
|
1163
|
-
if vc_dev
|
1164
|
-
@disks[index] = Disk.new(index.to_i, one_disk, vc_dev)
|
1165
|
-
else
|
1166
|
-
@disks[index] = Disk.one_disk(index.to_i, one_disk)
|
1167
|
-
end
|
1168
|
-
end
|
1169
|
-
|
1170
|
-
vc_disks.each {|d| @disks[d[:path_wo_ds]] = Disk.vc_disk(d) }
|
1171
|
-
|
1172
|
-
@disks
|
1173
|
-
end
|
1174
|
-
|
1175
|
-
# Queries for a certain nic
|
1176
|
-
#
|
1177
|
-
# @param index [String| Integer] the id of the nic or the mac
|
1178
|
-
# @param opts [hash (symbol=>boolean)]
|
1179
|
-
# :sync : allow you to ignore local class memory
|
1180
|
-
def nic(index, opts = {})
|
1181
|
-
index = index.to_s
|
1182
|
-
is_mac = index.match(/^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$/)
|
1183
|
-
|
1184
|
-
if is_mac
|
1185
|
-
mac = index
|
1186
|
-
index = @nics[:macs][mac]
|
1187
|
-
end
|
1188
|
-
|
1189
|
-
return @nics[index] if @nics[index] && opts[:sync].nil?
|
1190
|
-
|
1191
|
-
if is_mac
|
1192
|
-
one_nic =
|
1193
|
-
one_item
|
1194
|
-
.retrieve_xmlelements(
|
1195
|
-
"TEMPLATE/NIC[MAC='#{mac}']"
|
1196
|
-
).first rescue nil
|
1197
|
-
index = one_nic['NIC_ID'] if one_nic
|
1198
|
-
else
|
1199
|
-
one_nic =
|
1200
|
-
one_item
|
1201
|
-
.retrieve_xmlelements(
|
1202
|
-
"TEMPLATE/NIC[NIC_ID='#{index}']"
|
1203
|
-
).first rescue nil
|
1204
|
-
mac = one_nic['MAC'] if one_nic
|
1205
|
-
end
|
1206
|
-
|
1207
|
-
raise "nic #{index} not found" unless one_nic
|
1208
|
-
|
1209
|
-
vc_nics = vcenter_nics_list
|
1210
|
-
vc_nic = query_nic(mac, vc_nics)
|
1211
|
-
|
1212
|
-
if vc_nic
|
1213
|
-
Nic.new(index.to_i, one_nic, vc_nic)
|
1214
|
-
else
|
1215
|
-
Nic.one_nic(index.to_i, one_nic)
|
1216
|
-
end
|
1217
|
-
end
|
1218
|
-
|
1219
|
-
# Queries for a certain disk
|
1220
|
-
#
|
1221
|
-
# @param index [String | Integer] the id of the disk
|
1222
|
-
# @param opts [hash (symbol=>boolean)]
|
1223
|
-
# :sync : allow you to ignore local class memory
|
1224
|
-
def disk(index, opts = {})
|
1225
|
-
index = index.to_s
|
1226
|
-
|
1227
|
-
return @disks[index] if @disks[index] && opts[:sync].nil?
|
1228
|
-
|
1229
|
-
one_disk =
|
1230
|
-
one_item
|
1231
|
-
.retrieve_xmlelements(
|
1232
|
-
"TEMPLATE/DISK[DISK_ID='#{index}']"
|
1233
|
-
).first rescue nil
|
1234
|
-
|
1235
|
-
raise "disk #{index} not found" unless one_disk
|
1236
|
-
|
1237
|
-
opts[:keys].nil? ? keys = disk_keys : keys = opts[:keys]
|
1238
|
-
if opts[:disks].nil?
|
1239
|
-
vc_disks = vcenter_disks_get
|
1240
|
-
else
|
1241
|
-
vc_disks = opts[:disks]
|
1242
|
-
end
|
1243
|
-
vc_disk = query_disk(one_disk, keys, vc_disks)
|
1244
|
-
|
1245
|
-
if vc_disk
|
1246
|
-
Disk.new(index.to_i, one_disk, vc_disk)
|
1247
|
-
else
|
1248
|
-
Disk.one_disk(index.to_i, one_disk)
|
1249
|
-
end
|
1250
|
-
end
|
1251
|
-
|
1252
|
-
# Matches disks from the vCenter VM Template (or VM if it is coming
|
1253
|
-
# from a Wild VM) with the disks represented in OpenNebula VM
|
1254
|
-
# data model (ie, the XML)
|
1255
|
-
def reference_unmanaged_devices(template_ref, execute = true)
|
1256
|
-
device_change = []
|
1257
|
-
spec = {}
|
1258
|
-
|
1259
|
-
# Get unmanaged disks in OpenNebula's VM template
|
1260
|
-
xpath =
|
1261
|
-
'TEMPLATE/DISK[OPENNEBULA_MANAGED="NO" '\
|
1262
|
-
'or OPENNEBULA_MANAGED="no"]'
|
1263
|
-
unmanaged_disks = one_item.retrieve_xmlelements(xpath)
|
1264
|
-
|
1265
|
-
managed = false
|
1266
|
-
extraconfig = reference_disks(
|
1267
|
-
template_ref,
|
1268
|
-
unmanaged_disks,
|
1269
|
-
managed
|
1270
|
-
)
|
1271
|
-
|
1272
|
-
# Add info for existing nics in template in vm xml
|
1273
|
-
xpath =
|
1274
|
-
'TEMPLATE/NIC[OPENNEBULA_MANAGED="NO" '\
|
1275
|
-
'or OPENNEBULA_MANAGED="no"]'
|
1276
|
-
unmanaged_nics = one_item.retrieve_xmlelements(xpath)
|
1277
|
-
|
1278
|
-
# Handle NIC changes (different model and/or set MAC address
|
1279
|
-
# for unmanaged nics
|
1280
|
-
begin
|
1281
|
-
if !unmanaged_nics.empty?
|
1282
|
-
nics = vcenter_nics_list
|
1283
|
-
|
1284
|
-
# iterate over nics array and find nic with ref
|
1285
|
-
# or return nil if not exist
|
1286
|
-
select_net =lambda {|ref|
|
1287
|
-
device = nil
|
1288
|
-
nics.each do |nic|
|
1289
|
-
type = nic.backing.class.to_s
|
1290
|
-
|
1291
|
-
case type
|
1292
|
-
when NET_CARD.to_s
|
1293
|
-
nref = nic.backing.network._ref
|
1294
|
-
when DNET_CARD.to_s
|
1295
|
-
nref = nic.backing.port.portgroupKey
|
1296
|
-
when OPAQUE_CARD.to_s
|
1297
|
-
# Select only Opaque Networks
|
1298
|
-
opaque_networks = @item.network.select do |net|
|
1299
|
-
net.class == RbVmomi::VIM::OpaqueNetwork
|
1300
|
-
end
|
1301
|
-
opaque_network = opaque_networks.find do |opn|
|
1302
|
-
nic.backing.opaqueNetworkId ==
|
1303
|
-
opn.summary.opaqueNetworkId
|
1304
|
-
end
|
1305
|
-
nref = opaque_network._ref
|
1306
|
-
else
|
1307
|
-
raise 'Unsupported network card type: '\
|
1308
|
-
"#{nic.backing.class}"
|
1309
|
-
end
|
1310
|
-
|
1311
|
-
next unless nref == ref
|
1312
|
-
|
1313
|
-
device = nic
|
1314
|
-
break
|
1315
|
-
end
|
1316
|
-
|
1317
|
-
if device
|
1318
|
-
nics.delete(device)
|
1319
|
-
else
|
1320
|
-
nil
|
1321
|
-
end
|
1322
|
-
}
|
1323
|
-
|
1324
|
-
# Go over all unmanaged nics in order to sync
|
1325
|
-
# with vCenter Virtual Machine
|
1326
|
-
unmanaged_nics.each do |unic|
|
1327
|
-
vnic = select_net.call(unic['VCENTER_NET_REF'])
|
1328
|
-
nic_class = vnic.class if vnic
|
1329
|
-
|
1330
|
-
if unic['MODEL']
|
1331
|
-
new_model = Nic.nic_model_class(unic['MODEL'])
|
1332
|
-
end
|
1333
|
-
|
1334
|
-
# if vnic is nil add a new device
|
1335
|
-
if vnic.nil?
|
1336
|
-
device_change << calculate_add_nic_spec(unic)
|
1337
|
-
# delete actual nic and update the new one.
|
1338
|
-
elsif new_model && new_model != nic_class
|
1339
|
-
device_change << {
|
1340
|
-
:device => vnic,
|
1341
|
-
:operation => :remove
|
1342
|
-
}
|
1343
|
-
device_change << calculate_add_nic_spec(
|
1344
|
-
unic,
|
1345
|
-
vnic.unitNumber
|
1346
|
-
)
|
1347
|
-
else
|
1348
|
-
vnic.macAddress = unic['MAC']
|
1349
|
-
device_change << {
|
1350
|
-
:device => vnic,
|
1351
|
-
:operation => :edit
|
1352
|
-
}
|
1353
|
-
end
|
1354
|
-
end
|
1355
|
-
|
1356
|
-
end
|
1357
|
-
rescue StandardError => e
|
1358
|
-
raise 'There is a problem with your vm NICS, '\
|
1359
|
-
'make sure that they are working properly. '\
|
1360
|
-
"Error: #{e.message}"
|
1361
|
-
end
|
1362
|
-
|
1363
|
-
# Save in extraconfig the key for unmanaged disks
|
1364
|
-
if !extraconfig.empty? || !device_change.empty?
|
1365
|
-
spec[:extraConfig] = extraconfig unless extraconfig.empty?
|
1366
|
-
spec[:deviceChange] = device_change unless device_change.empty?
|
1367
|
-
|
1368
|
-
return spec unless execute
|
1369
|
-
|
1370
|
-
@item.ReconfigVM_Task(:spec => spec).wait_for_completion
|
1371
|
-
end
|
1372
|
-
|
1373
|
-
{}
|
1374
|
-
end
|
1375
|
-
|
1376
|
-
def reference_all_disks
|
1377
|
-
# OpenNebula VM disks saved inside .vmx file in vCenter
|
1378
|
-
disks_extraconfig_current = {}
|
1379
|
-
# iterate over all attributes and get the disk information
|
1380
|
-
# keys for disks are prefixed with
|
1381
|
-
# opennebula.disk and opennebula.mdisk
|
1382
|
-
@item.config.extraConfig.each do |elem|
|
1383
|
-
disks_extraconfig_current[elem.key] =
|
1384
|
-
elem.value if elem.key.start_with?('opennebula.disk.')
|
1385
|
-
disks_extraconfig_current[elem.key] =
|
1386
|
-
elem.value if elem.key.start_with?('opennebula.mdisk.')
|
1387
|
-
end
|
1388
|
-
|
1389
|
-
# disks that exist currently in the vCenter Virtual Machine
|
1390
|
-
disks_vcenter_current = []
|
1391
|
-
disks_each(:synced?) do |disk|
|
1392
|
-
begin
|
1393
|
-
if disk.managed?
|
1394
|
-
key_prefix = 'opennebula.mdisk.'
|
1395
|
-
else
|
1396
|
-
key_prefix = 'opennebula.disk.'
|
1397
|
-
end
|
1398
|
-
k = "#{key_prefix}#{disk.id}"
|
1399
|
-
v = disk.key.to_s
|
1400
|
-
|
1401
|
-
disks_vcenter_current << { :key => k, :value => v }
|
1402
|
-
rescue StandardError => _e
|
1403
|
-
next
|
1404
|
-
end
|
1405
|
-
end
|
1406
|
-
|
1407
|
-
update = false
|
1408
|
-
# differences in the number of disks
|
1409
|
-
# between vCenter and OpenNebula VMs
|
1410
|
-
num_disks_difference =
|
1411
|
-
disks_extraconfig_current.keys.count -
|
1412
|
-
disks_vcenter_current.count
|
1413
|
-
|
1414
|
-
# check if disks are same in vCenter and OpenNebula
|
1415
|
-
disks_vcenter_current.each do |item|
|
1416
|
-
# check if vCenter disk have representation in the extraConfig
|
1417
|
-
# but with a different key, then we have to update
|
1418
|
-
first_condition =
|
1419
|
-
disks_extraconfig_current.key? item[:key]
|
1420
|
-
second_condition =
|
1421
|
-
disks_extraconfig_current[item[:key]] == item[:value]
|
1422
|
-
if first_condition && !second_condition
|
1423
|
-
update = true
|
1424
|
-
end
|
1425
|
-
# check if vCenter disk hasn't got
|
1426
|
-
# a representation in the extraConfig
|
1427
|
-
# then we have to update
|
1428
|
-
if !disks_extraconfig_current.key? item[:key]
|
1429
|
-
update = true
|
1430
|
-
end
|
1431
|
-
end
|
1432
|
-
|
1433
|
-
# new configuration for vCenter .vmx file
|
1434
|
-
disks_extraconfig_new = {}
|
1435
|
-
|
1436
|
-
return unless num_disks_difference != 0 || update
|
1437
|
-
|
1438
|
-
# Step 1: remove disks in the current configuration of .vmx
|
1439
|
-
# Avoids having an old disk in the configuration
|
1440
|
-
# that does not really exist
|
1441
|
-
disks_extraconfig_current.keys.each do |key|
|
1442
|
-
disks_extraconfig_new[key] = ''
|
1443
|
-
end
|
1444
|
-
|
1445
|
-
# Step 2: add current vCenter disks to new configuration
|
1446
|
-
disks_vcenter_current.each do |item|
|
1447
|
-
disks_extraconfig_new[item[:key]] = item[:value]
|
1448
|
-
end
|
1449
|
-
|
1450
|
-
# Step 3: create extraconfig_new with the values to update
|
1451
|
-
extraconfig_new = []
|
1452
|
-
disks_extraconfig_new.keys.each do |key|
|
1453
|
-
extraconfig_new <<
|
1454
|
-
{
|
1455
|
-
:key =>
|
1456
|
-
key,
|
1457
|
-
:value =>
|
1458
|
-
disks_extraconfig_new[key]
|
1459
|
-
}
|
1460
|
-
end
|
1461
|
-
|
1462
|
-
# Step 4: update the extraConfig
|
1463
|
-
spec_hash = { :extraConfig => extraconfig_new }
|
1464
|
-
spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
|
1465
|
-
@item.ReconfigVM_Task(:spec => spec).wait_for_completion
|
1466
|
-
end
|
1467
|
-
|
1468
|
-
# Build extraconfig section to reference disks
|
1469
|
-
# by key and avoid problems with changing paths
|
1470
|
-
# (mainly due to snapshots)
|
1471
|
-
# Uses VM Templte if ref available, or the vCenter VM if not
|
1472
|
-
# (latter case is if we are dealing with a Wild VM
|
1473
|
-
def reference_disks(template_ref, disks, managed)
|
1474
|
-
return [] if disks.empty? || instantiated_as_persistent?
|
1475
|
-
|
1476
|
-
extraconfig = []
|
1477
|
-
if managed
|
1478
|
-
key_prefix = 'opennebula.mdisk'
|
1479
|
-
else
|
1480
|
-
key_prefix = 'opennebula.disk'
|
1481
|
-
end
|
1482
|
-
|
1483
|
-
# Get vcenter VM disks to know real path of cloned disk
|
1484
|
-
vcenter_disks = vcenter_disks_get
|
1485
|
-
|
1486
|
-
# Create an array with the paths of the disks in vcenter template
|
1487
|
-
if !template_ref.nil?
|
1488
|
-
template = VCenterDriver::Template.new_from_ref(template_ref,
|
1489
|
-
vi_client)
|
1490
|
-
template_disks = template.vcenter_disks_get
|
1491
|
-
else
|
1492
|
-
# If we are dealing with a Wild VM, we simply use
|
1493
|
-
# what is available in the vCenter VM
|
1494
|
-
template_disks = vcenter_disks_get
|
1495
|
-
end
|
1496
|
-
template_disks_vector = []
|
1497
|
-
template_disks.each do |d|
|
1498
|
-
template_disks_vector << d[:path_wo_ds]
|
1499
|
-
end
|
1500
|
-
|
1501
|
-
# Try to find index of disks in template disks
|
1502
|
-
disks.each do |disk|
|
1503
|
-
disk_source =
|
1504
|
-
VCenterDriver::FileHelper
|
1505
|
-
.unescape_path(
|
1506
|
-
disk['SOURCE']
|
1507
|
-
)
|
1508
|
-
template_disk = template_disks.select do |d|
|
1509
|
-
d[:path_wo_ds] == disk_source
|
1510
|
-
end.first
|
1511
|
-
|
1512
|
-
if template_disk
|
1513
|
-
vcenter_disk = vcenter_disks.select do |d|
|
1514
|
-
d[:key] == template_disk[:key]
|
1515
|
-
end.first
|
1516
|
-
end
|
1517
|
-
|
1518
|
-
unless vcenter_disk
|
1519
|
-
raise "disk with path #{disk_source}"\
|
1520
|
-
'not found in the vCenter VM'
|
1521
|
-
end
|
1522
|
-
|
1523
|
-
reference = {}
|
1524
|
-
reference[:key] = "#{key_prefix}.#{disk['DISK_ID']}"
|
1525
|
-
reference[:value] = (vcenter_disk[:key]).to_s
|
1526
|
-
extraconfig << reference
|
1527
|
-
end
|
1528
|
-
|
1529
|
-
extraconfig
|
1530
|
-
end
|
1531
|
-
|
1532
|
-
# create storagedrs disks
|
1533
|
-
#
|
1534
|
-
# @param device_change_spod [array] add disk spec for every device
|
1535
|
-
#
|
1536
|
-
# @param device_change_spod_ids [object] map from unit ctrl to
|
1537
|
-
# disk_id
|
1538
|
-
#
|
1539
|
-
# @return extra_config [Array] array with the extra config for vCenter
|
1540
|
-
def create_storagedrs_disks(device_change_spod, device_change_spod_ids)
|
1541
|
-
sm = storagemanager
|
1542
|
-
disk_locator = []
|
1543
|
-
extra_config = []
|
1544
|
-
|
1545
|
-
device_change_spod.each do |device_spec|
|
1546
|
-
disk_locator <<
|
1547
|
-
RbVmomi::VIM
|
1548
|
-
.PodDiskLocator(
|
1549
|
-
:diskId => device_spec[
|
1550
|
-
:device
|
1551
|
-
].key
|
1552
|
-
)
|
1553
|
-
end
|
1554
|
-
|
1555
|
-
spec = {}
|
1556
|
-
spec[:deviceChange] = device_change_spod
|
1557
|
-
|
1558
|
-
# Disk locator is required for AddDisk
|
1559
|
-
vmpod_hash = {}
|
1560
|
-
vmpod_hash[:storagePod] = get_ds
|
1561
|
-
vmpod_hash[:disk] = disk_locator
|
1562
|
-
vmpod_config = RbVmomi::VIM::VmPodConfigForPlacement(vmpod_hash)
|
1563
|
-
|
1564
|
-
# The storage pod selection requires initialize
|
1565
|
-
spod_hash = {}
|
1566
|
-
spod_hash[:initialVmConfig] = [vmpod_config]
|
1567
|
-
spod_select = RbVmomi::VIM::StorageDrsPodSelectionSpec(spod_hash)
|
1568
|
-
storage_spec = RbVmomi::VIM.StoragePlacementSpec(
|
1569
|
-
:type => :reconfigure,
|
1570
|
-
:podSelectionSpec => spod_select,
|
1571
|
-
:vm => self['_ref'],
|
1572
|
-
:configSpec => spec
|
1573
|
-
)
|
1574
|
-
|
1575
|
-
# Query a storage placement recommendation
|
1576
|
-
result = sm
|
1577
|
-
.RecommendDatastores(
|
1578
|
-
:storageSpec => storage_spec
|
1579
|
-
) rescue nil
|
1580
|
-
|
1581
|
-
if result.nil?
|
1582
|
-
raise 'Could not get placement specification for StoragePod'
|
1583
|
-
end
|
1584
|
-
|
1585
|
-
if !result.respond_to?(:recommendations) ||
|
1586
|
-
result.recommendations.empty?
|
1587
|
-
raise 'Could not get placement specification for StoragePod'
|
1588
|
-
end
|
1589
|
-
|
1590
|
-
# Get recommendation key to be applied
|
1591
|
-
key = result.recommendations.first.key ||= ''
|
1592
|
-
|
1593
|
-
if key.empty?
|
1594
|
-
raise 'Missing Datastore recommendation for StoragePod'
|
1595
|
-
end
|
1596
|
-
|
1597
|
-
# Apply recommendation
|
1598
|
-
sm.ApplyStorageDrsRecommendation_Task(
|
1599
|
-
:key => [key]
|
1600
|
-
).wait_for_completion
|
1601
|
-
|
1602
|
-
# Set references in opennebula.disk elements
|
1603
|
-
device_change_spod.each do |device_spec|
|
1604
|
-
unit_number = device_spec[:device].unitNumber
|
1605
|
-
controller_key = device_spec[:device].controllerKey
|
1606
|
-
key = get_vcenter_disk_key(unit_number,
|
1607
|
-
controller_key)
|
1608
|
-
disk_id =
|
1609
|
-
device_change_spod_ids[
|
1610
|
-
"#{controller_key}-#{unit_number}"
|
1611
|
-
]
|
1612
|
-
reference = {}
|
1613
|
-
reference[:key] = "opennebula.disk.#{disk_id}"
|
1614
|
-
reference[:value] = key.to_s
|
1615
|
-
extra_config << reference
|
1616
|
-
end
|
1617
|
-
|
1618
|
-
extra_config
|
1619
|
-
end
|
1620
|
-
|
1621
|
-
# set the boot order of the machine
|
1622
|
-
#
|
1623
|
-
# @param boot_info [String] boot information stored in
|
1624
|
-
# the template of the virtual machine. example: disk0, nic0
|
1625
|
-
#
|
1626
|
-
# @return [Array (vCenterbootClass)] An array with the vCenter classes
|
1627
|
-
def boot_order_update(boot_info)
|
1628
|
-
convert = lambda {|device_str|
|
1629
|
-
spl = device_str.scan(/^(nic|disk)(\d+$)/).flatten
|
1630
|
-
raise "#{device_str} is not supported" if spl.empty?
|
1631
|
-
|
1632
|
-
device = nil
|
1633
|
-
sync = "sync_#{spl[0]}s"
|
1634
|
-
(0..1).each do |_i|
|
1635
|
-
device = send(spl[0], spl[1])
|
1636
|
-
break if device.exists?
|
1637
|
-
|
1638
|
-
send(sync)
|
1639
|
-
end
|
1640
|
-
|
1641
|
-
device.boot_dev
|
1642
|
-
}
|
1643
|
-
|
1644
|
-
boot_order = boot_info.split(',').map {|str| convert.call(str) }
|
1645
|
-
|
1646
|
-
RbVmomi::VIM.VirtualMachineBootOptions({ :bootOrder => boot_order })
|
1647
|
-
end
|
1648
|
-
|
1649
|
-
# sync OpenNebula nic model with vCenter
|
1650
|
-
#
|
1651
|
-
# @param option [symbol] if :all is provided
|
1652
|
-
# the method will try to sync
|
1653
|
-
# all the nics (detached and not existing ones)
|
1654
|
-
# otherwise it will only sync
|
1655
|
-
# the nics that are not existing
|
1656
|
-
#
|
1657
|
-
# @param execute [boolean] indicates
|
1658
|
-
# if the reconfigure operation is going to
|
1659
|
-
# be executed
|
1660
|
-
def sync_nics(option = :none, execute = true)
|
1661
|
-
device_change = []
|
1662
|
-
|
1663
|
-
if option == :all
|
1664
|
-
dchange = []
|
1665
|
-
|
1666
|
-
# detached? condition indicates that
|
1667
|
-
# the nic exists in OpeNebula but not
|
1668
|
-
# in vCenter
|
1669
|
-
nics_each(:detached?) do |nic|
|
1670
|
-
dchange << {
|
1671
|
-
:operation => :remove,
|
1672
|
-
:device => nic.vc_item
|
1673
|
-
}
|
1674
|
-
end
|
1675
|
-
if !dchange.empty?
|
1676
|
-
dspec_hash = { :deviceChange => dchange }
|
1677
|
-
dspec = RbVmomi::VIM.VirtualMachineConfigSpec(dspec_hash)
|
1678
|
-
@item.ReconfigVM_Task(:spec => dspec).wait_for_completion
|
1679
|
-
end
|
1680
|
-
end
|
1681
|
-
|
1682
|
-
# no_exits? condition indicates that
|
1683
|
-
# the nic does not exist in vCenter
|
1684
|
-
nics_each(:no_exists?) do |nic|
|
1685
|
-
device_change << calculate_add_nic_spec(nic.one_item)
|
1686
|
-
end
|
1687
|
-
|
1688
|
-
return device_change unless execute
|
1689
|
-
|
1690
|
-
spec_hash = { :deviceChange => device_change }
|
1691
|
-
|
1692
|
-
spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
|
1693
|
-
@item.ReconfigVM_Task(:spec => spec).wait_for_completion
|
1694
|
-
|
1695
|
-
info_nics
|
1696
|
-
end
|
1697
|
-
|
1698
|
-
# Clear extraconfig tags from a vCenter VM
|
1699
|
-
#
|
1700
|
-
def clear_tags
|
1701
|
-
keys_to_remove = extra_config_keys
|
1702
|
-
|
1703
|
-
spec_hash =
|
1704
|
-
keys_to_remove.map {|key| { :key => key, :value => '' } }
|
1705
|
-
|
1706
|
-
spec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
1707
|
-
:extraConfig => spec_hash
|
1708
|
-
)
|
1709
|
-
@item.ReconfigVM_Task(:spec => spec).wait_for_completion
|
1710
|
-
end
|
1711
|
-
|
1712
|
-
# Get extraconfig tags from a vCenter VM
|
1713
|
-
#
|
1714
|
-
def extra_config_keys
|
1715
|
-
keys_to_remove = []
|
1716
|
-
@item['config.extraConfig'].each do |extraconfig|
|
1717
|
-
next unless extraconfig.key.include?('opennebula.disk') ||
|
1718
|
-
extraconfig.key.include?('opennebula.vm') ||
|
1719
|
-
extraconfig.key.downcase.include?('remotedisplay')
|
1720
|
-
|
1721
|
-
keys_to_remove << extraconfig.key
|
1722
|
-
end
|
1723
|
-
keys_to_remove
|
1724
|
-
end
|
1725
|
-
|
1726
|
-
# Get required parameters to use VMware HTML Console SDK
|
1727
|
-
# To be used with the following SDK:
|
1728
|
-
# https://code.vmware.com/web/sdk/2.1.0/html-console
|
1729
|
-
#
|
1730
|
-
def html_console_parameters
|
1731
|
-
ticket = @item.AcquireTicket(:ticketType => 'webmks')
|
1732
|
-
{ :ticket => ticket.ticket, :host => ticket.host,
|
1733
|
-
:port => ticket.port }
|
1734
|
-
end
|
1735
|
-
|
1736
|
-
# Synchronize the OpenNebula VM representation with vCenter VM
|
1737
|
-
#
|
1738
|
-
# if the device exists in vCenter and not in OpenNebula : detach
|
1739
|
-
# if the device exists in OpenNebula and not in vCenter : attach
|
1740
|
-
# if the device exists in both : noop
|
1741
|
-
#
|
1742
|
-
def sync(deploy = {})
|
1743
|
-
extraconfig = []
|
1744
|
-
device_change = []
|
1745
|
-
sync_opt = nil
|
1746
|
-
|
1747
|
-
# Disk are only synced with :all option when VM is first created
|
1748
|
-
# NOTE: Detach actions are implemented through TM (not sync)
|
1749
|
-
sync_opt = :all if deploy[:new] == true
|
1750
|
-
|
1751
|
-
disks = sync_disks(sync_opt, false)
|
1752
|
-
resize_unmanaged_disks
|
1753
|
-
|
1754
|
-
if deploy[:boot] && !deploy[:boot].empty?
|
1755
|
-
boot_opts = boot_order_update(deploy[:boot])
|
1756
|
-
end
|
1757
|
-
|
1758
|
-
# changes from sync_disks
|
1759
|
-
device_change += disks[:deviceChange] if disks[:deviceChange]
|
1760
|
-
extraconfig += disks[:extraConfig] if disks[:extraConfig]
|
1761
|
-
|
1762
|
-
# get token and context
|
1763
|
-
extraconfig += extraconfig_context
|
1764
|
-
|
1765
|
-
# get file_ds
|
1766
|
-
if (files = one_item['TEMPLATE/CONTEXT/FILES_DS'])
|
1767
|
-
file_id = 0
|
1768
|
-
files.split(' ').each do |file|
|
1769
|
-
extraconfig += extraconfig_file(file, file_id)
|
1770
|
-
file_id += 1
|
1771
|
-
end
|
1772
|
-
end
|
1773
|
-
|
1774
|
-
# vnc configuration (for config_array hash)
|
1775
|
-
extraconfig += extraconfig_vnc
|
1776
|
-
|
1777
|
-
# device_change hash (nics)
|
1778
|
-
device_change += sync_nics(:all, false)
|
1779
|
-
|
1780
|
-
# Set CPU, memory and extraconfig
|
1781
|
-
num_cpus = one_item['TEMPLATE/VCPU'] || 1
|
1782
|
-
spec_hash = {
|
1783
|
-
:numCPUs => num_cpus.to_i,
|
1784
|
-
:memoryMB => one_item['TEMPLATE/MEMORY'],
|
1785
|
-
:extraConfig => extraconfig,
|
1786
|
-
:deviceChange => device_change
|
1787
|
-
}
|
1788
|
-
num_cores = one_item['TEMPLATE/TOPOLOGY/CORES'] || num_cpus.to_i
|
1789
|
-
if num_cpus.to_i % num_cores.to_i != 0
|
1790
|
-
num_cores = num_cpus.to_i
|
1791
|
-
end
|
1792
|
-
spec_hash[:numCoresPerSocket] = num_cores.to_i
|
1793
|
-
|
1794
|
-
spec_hash[:bootOptions] = boot_opts if boot_opts
|
1795
|
-
|
1796
|
-
spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
|
1797
|
-
|
1798
|
-
@item.ReconfigVM_Task(:spec => spec).wait_for_completion
|
1799
|
-
sync_extraconfig_disk(spec_hash)
|
1800
|
-
end
|
1801
|
-
|
1802
|
-
def extraconfig_file(file, id)
|
1803
|
-
path, name = file.split(':')
|
1804
|
-
name = name.gsub('\'', '')
|
1805
|
-
file_content = Base64.encode64(File.read(path))
|
1806
|
-
file_content.prepend("#{name}\n")
|
1807
|
-
|
1808
|
-
[
|
1809
|
-
{ :key => "guestinfo.opennebula.file.#{id}",
|
1810
|
-
:value => file_content }
|
1811
|
-
]
|
1812
|
-
end
|
1813
|
-
|
1814
|
-
def extraconfig_context
|
1815
|
-
one_item.info(true)
|
1816
|
-
|
1817
|
-
context_text = "# Context variables generated by OpenNebula\n"
|
1818
|
-
one_item.each('TEMPLATE/CONTEXT/*') do |context_element|
|
1819
|
-
# next if !context_element.text
|
1820
|
-
context_text += context_element.name + "='" +
|
1821
|
-
context_element.text.gsub("'", "\'") + "'\n"
|
1822
|
-
end
|
1823
|
-
|
1824
|
-
# token
|
1825
|
-
token = File.read(File.join(VAR_LOCATION,
|
1826
|
-
'vms',
|
1827
|
-
one_item['ID'],
|
1828
|
-
'token.txt')).chomp rescue nil
|
1829
|
-
|
1830
|
-
context_text += "ONEGATE_TOKEN='#{token}'\n" if token
|
1831
|
-
|
1832
|
-
# context_text
|
1833
|
-
[
|
1834
|
-
{ :key => 'guestinfo.opennebula.context',
|
1835
|
-
:value => Base64.encode64(context_text) }
|
1836
|
-
]
|
1837
|
-
end
|
1838
|
-
|
1839
|
-
def extraconfig_vnc
|
1840
|
-
if one_item['TEMPLATE/GRAPHICS']
|
1841
|
-
vnc_port = one_item['TEMPLATE/GRAPHICS/PORT'] || ''
|
1842
|
-
vnc_listen = one_item['TEMPLATE/GRAPHICS/LISTEN'] || '0.0.0.0'
|
1843
|
-
vnc_keymap = one_item['TEMPLATE/GRAPHICS/KEYMAP']
|
1844
|
-
|
1845
|
-
conf =
|
1846
|
-
[
|
1847
|
-
{
|
1848
|
-
:key =>
|
1849
|
-
'remotedisplay.vnc.enabled',
|
1850
|
-
:value =>
|
1851
|
-
'TRUE'
|
1852
|
-
},
|
1853
|
-
{
|
1854
|
-
:key =>
|
1855
|
-
'remotedisplay.vnc.port',
|
1856
|
-
:value =>
|
1857
|
-
vnc_port
|
1858
|
-
},
|
1859
|
-
{
|
1860
|
-
:key =>
|
1861
|
-
'remotedisplay.vnc.ip',
|
1862
|
-
:value =>
|
1863
|
-
vnc_listen
|
1864
|
-
}
|
1865
|
-
]
|
1866
|
-
|
1867
|
-
conf +=
|
1868
|
-
[
|
1869
|
-
{
|
1870
|
-
:key =>
|
1871
|
-
'remotedisplay.vnc.keymap',
|
1872
|
-
:value =>
|
1873
|
-
vnc_keymap
|
1874
|
-
}
|
1875
|
-
] if vnc_keymap
|
1876
|
-
|
1877
|
-
conf
|
1878
|
-
else
|
1879
|
-
[]
|
1880
|
-
end
|
1881
|
-
end
|
1882
|
-
|
1883
|
-
# Regenerate context when devices are hot plugged (reconfigure)
|
1884
|
-
def regenerate_context
|
1885
|
-
spec_hash = { :extraConfig => extraconfig_context }
|
1886
|
-
spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
|
1887
|
-
|
1888
|
-
begin
|
1889
|
-
@item.ReconfigVM_Task(:spec => spec).wait_for_completion
|
1890
|
-
rescue StandardError => e
|
1891
|
-
error = "Cannot generate VM context info: #{e.message}."
|
1892
|
-
|
1893
|
-
if VCenterDriver::CONFIG[:debug_information]
|
1894
|
-
error += "\n\n#{e.backtrace}"
|
1895
|
-
end
|
1896
|
-
|
1897
|
-
raise error
|
1898
|
-
end
|
1899
|
-
end
|
1900
|
-
|
1901
|
-
# Returns an array of actions to be included in :deviceChange
|
1902
|
-
def calculate_add_nic_spec(nic, unumber = nil)
|
1903
|
-
mac = nic['MAC']
|
1904
|
-
pg_name = nic['BRIDGE']
|
1905
|
-
default =
|
1906
|
-
VCenterDriver::VIHelper
|
1907
|
-
.get_default(
|
1908
|
-
'VM/TEMPLATE/NIC/MODEL'
|
1909
|
-
)
|
1910
|
-
tmodel = one_item['USER_TEMPLATE/NIC_DEFAULT/MODEL']
|
1911
|
-
|
1912
|
-
# got the model of the nic, first try to get the model
|
1913
|
-
# inside the nic, then the model defined by user and
|
1914
|
-
# last option model by default in vCenter Driver
|
1915
|
-
model = nic['MODEL'] || tmodel || default
|
1916
|
-
raise 'nic model cannot be empty!' if model == ''
|
1917
|
-
|
1918
|
-
vnet_ref = nic['VCENTER_NET_REF']
|
1919
|
-
backing = nil
|
1920
|
-
|
1921
|
-
# Maximum bitrate for the interface in kilobytes/second
|
1922
|
-
# for inbound traffic
|
1923
|
-
limit_in =
|
1924
|
-
nic['INBOUND_PEAK_BW'] ||
|
1925
|
-
VCenterDriver::VIHelper.get_default(
|
1926
|
-
'VM/TEMPLATE/NIC/INBOUND_PEAK_BW'
|
1927
|
-
)
|
1928
|
-
# Maximum bitrate for the interface in kilobytes/second
|
1929
|
-
# for outbound traffic
|
1930
|
-
limit_out =
|
1931
|
-
nic['OUTBOUND_PEAK_BW'] ||
|
1932
|
-
VCenterDriver::VIHelper.get_default(
|
1933
|
-
'VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW'
|
1934
|
-
)
|
1935
|
-
limit = nil
|
1936
|
-
|
1937
|
-
if limit_in && limit_out
|
1938
|
-
limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
|
1939
|
-
end
|
1940
|
-
|
1941
|
-
# Average bitrate for the interface in kilobytes/second
|
1942
|
-
# for inbound traffic
|
1943
|
-
rsrv_in =
|
1944
|
-
nic['INBOUND_AVG_BW'] ||
|
1945
|
-
VCenterDriver::VIHelper.get_default(
|
1946
|
-
'VM/TEMPLATE/NIC/INBOUND_AVG_BW'
|
1947
|
-
)
|
1948
|
-
# Average bitrate for the interface in kilobytes/second
|
1949
|
-
# for outbound traffic
|
1950
|
-
rsrv_out =
|
1951
|
-
nic['OUTBOUND_AVG_BW'] ||
|
1952
|
-
VCenterDriver::VIHelper.get_default(
|
1953
|
-
'VM/TEMPLATE/NIC/OUTBOUND_AVG_BW'
|
1954
|
-
)
|
1955
|
-
rsrv = nil
|
1956
|
-
|
1957
|
-
if rsrv_in || rsrv_out
|
1958
|
-
rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
|
1959
|
-
end
|
1960
|
-
|
1961
|
-
# get the network with ref equal to vnet_ref or
|
1962
|
-
# with name equal to pg_name
|
1963
|
-
network = self['runtime.host'].network.select do |n|
|
1964
|
-
n._ref == vnet_ref || n.name == pg_name
|
1965
|
-
end
|
1966
|
-
network = network.first
|
1967
|
-
|
1968
|
-
unless network
|
1969
|
-
raise "#{pg_name} not found in #{self['runtime.host'].name}"
|
1970
|
-
end
|
1971
|
-
|
1972
|
-
# start in one, we want the next avaliable id
|
1973
|
-
card_num = 1
|
1974
|
-
@item['config.hardware.device'].each do |dv|
|
1975
|
-
card_num += 1 if VCenterDriver::Network.nic?(dv)
|
1976
|
-
end
|
1977
|
-
|
1978
|
-
nic_card = Nic.nic_model_class(model)
|
1979
|
-
|
1980
|
-
if network.class == RbVmomi::VIM::Network
|
1981
|
-
backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
|
1982
|
-
:deviceName => pg_name,
|
1983
|
-
:network => network
|
1984
|
-
)
|
1985
|
-
elsif network.class == RbVmomi::VIM::DistributedVirtualPortgroup
|
1986
|
-
port = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
|
1987
|
-
:switchUuid =>
|
1988
|
-
network.config.distributedVirtualSwitch.uuid,
|
1989
|
-
:portgroupKey => network.key
|
1990
|
-
)
|
1991
|
-
backing =
|
1992
|
-
RbVmomi::VIM
|
1993
|
-
.VirtualEthernetCardDistributedVirtualPortBackingInfo(
|
1994
|
-
:port => port
|
1995
|
-
)
|
1996
|
-
elsif network.class == RbVmomi::VIM::OpaqueNetwork
|
1997
|
-
backing =
|
1998
|
-
RbVmomi::VIM
|
1999
|
-
.VirtualEthernetCardOpaqueNetworkBackingInfo(
|
2000
|
-
:opaqueNetworkId =>
|
2001
|
-
network.summary.opaqueNetworkId,
|
2002
|
-
:opaqueNetworkType =>
|
2003
|
-
'nsx.LogicalSwitch'
|
2004
|
-
)
|
2005
|
-
else
|
2006
|
-
raise 'Unknown network class'
|
2007
|
-
end
|
2008
|
-
|
2009
|
-
# grab the last unitNumber to ensure the nic to be added at the end
|
2010
|
-
if !unumber
|
2011
|
-
@unic = @unic || vcenter_nics_list.map do |d|
|
2012
|
-
d.unitNumber
|
2013
|
-
end.max || 0
|
2014
|
-
unumber = @unic += 1
|
2015
|
-
else
|
2016
|
-
@unic = unumber
|
2017
|
-
end
|
2018
|
-
|
2019
|
-
card_spec = {
|
2020
|
-
:key => 0,
|
2021
|
-
:deviceInfo => {
|
2022
|
-
:label => 'net' + card_num.to_s,
|
2023
|
-
:summary => pg_name
|
2024
|
-
},
|
2025
|
-
:backing => backing,
|
2026
|
-
:addressType => mac ? 'manual' : 'generated',
|
2027
|
-
:macAddress => mac,
|
2028
|
-
:unitNumber => unumber
|
2029
|
-
}
|
2030
|
-
if @vi_client.vim.serviceContent.about.apiVersion.to_f >= 7.0
|
2031
|
-
card_spec[:key] = -100 - card_num.to_i
|
2032
|
-
end
|
2033
|
-
|
2034
|
-
if (limit || rsrv) && (limit > 0)
|
2035
|
-
ra_spec = {}
|
2036
|
-
rsrv = limit if rsrv > limit
|
2037
|
-
# The bandwidth limit for the virtual network adapter. The
|
2038
|
-
# utilization of the virtual network adapter will not exceed
|
2039
|
-
# this limit, even if there are available resources. To clear
|
2040
|
-
# the value of this property and revert it to unset, set the
|
2041
|
-
# vaule to "-1" in an update operation. Units in Mbits/sec
|
2042
|
-
ra_spec[:limit] = limit if limit
|
2043
|
-
# Amount of network bandwidth that is guaranteed to the virtual
|
2044
|
-
# network adapter. If utilization is less than reservation, the
|
2045
|
-
# resource can be used by other virtual network adapters.
|
2046
|
-
# Reservation is not allowed to exceed the value of limit if
|
2047
|
-
# limit is set. Units in Mbits/sec
|
2048
|
-
ra_spec[:reservation] = rsrv if rsrv
|
2049
|
-
# Network share. The value is used as a relative weight in
|
2050
|
-
# competing for shared bandwidth, in case of resource contention
|
2051
|
-
ra_spec[:share] =
|
2052
|
-
RbVmomi::VIM.SharesInfo(
|
2053
|
-
{
|
2054
|
-
:level => RbVmomi::VIM.SharesLevel('normal'),
|
2055
|
-
:shares => 0
|
2056
|
-
}
|
2057
|
-
)
|
2058
|
-
card_spec[:resourceAllocation] =
|
2059
|
-
RbVmomi::VIM.VirtualEthernetCardResourceAllocation(
|
2060
|
-
ra_spec
|
2061
|
-
)
|
2062
|
-
end
|
2063
|
-
|
2064
|
-
{
|
2065
|
-
:operation => :add,
|
2066
|
-
:device => nic_card.new(card_spec)
|
2067
|
-
}
|
2068
|
-
end
|
2069
|
-
|
2070
|
-
# Returns an array of actions to be included in :deviceChange
|
2071
|
-
def calculate_add_nic_spec_autogenerate_mac(nic)
|
2072
|
-
pg_name = nic['BRIDGE']
|
2073
|
-
|
2074
|
-
default =
|
2075
|
-
VCenterDriver::VIHelper.get_default(
|
2076
|
-
'VM/TEMPLATE/NIC/MODEL'
|
2077
|
-
)
|
2078
|
-
tmodel = one_item['USER_TEMPLATE/NIC_DEFAULT/MODEL']
|
2079
|
-
|
2080
|
-
model = nic['MODEL'] || tmodel || default
|
2081
|
-
|
2082
|
-
vnet_ref = nic['VCENTER_NET_REF']
|
2083
|
-
backing = nil
|
2084
|
-
|
2085
|
-
# Maximum bitrate for the interface in kilobytes/second
|
2086
|
-
# for inbound traffic
|
2087
|
-
limit_in =
|
2088
|
-
nic['INBOUND_PEAK_BW'] ||
|
2089
|
-
VCenterDriver::VIHelper.get_default(
|
2090
|
-
'VM/TEMPLATE/NIC/INBOUND_PEAK_BW'
|
2091
|
-
)
|
2092
|
-
# Maximum bitrate for the interface in kilobytes/second
|
2093
|
-
# for outbound traffic
|
2094
|
-
limit_out =
|
2095
|
-
nic['OUTBOUND_PEAK_BW'] ||
|
2096
|
-
VCenterDriver::VIHelper.get_default(
|
2097
|
-
'VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW'
|
2098
|
-
)
|
2099
|
-
limit = nil
|
2100
|
-
|
2101
|
-
if limit_in && limit_out
|
2102
|
-
limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
|
2103
|
-
end
|
2104
|
-
|
2105
|
-
# Average bitrate for the interface in kilobytes/second
|
2106
|
-
# for inbound traffic
|
2107
|
-
rsrv_in =
|
2108
|
-
nic['INBOUND_AVG_BW'] ||
|
2109
|
-
VCenterDriver::VIHelper.get_default(
|
2110
|
-
'VM/TEMPLATE/NIC/INBOUND_AVG_BW'
|
2111
|
-
)
|
2112
|
-
|
2113
|
-
# Average bitrate for the interface in kilobytes/second
|
2114
|
-
# for outbound traffic
|
2115
|
-
rsrv_out =
|
2116
|
-
nic['OUTBOUND_AVG_BW'] ||
|
2117
|
-
VCenterDriver::VIHelper.get_default(
|
2118
|
-
'VM/TEMPLATE/NIC/OUTBOUND_AVG_BW'
|
2119
|
-
)
|
2120
|
-
|
2121
|
-
rsrv = nil
|
2122
|
-
|
2123
|
-
if rsrv_in || rsrv_out
|
2124
|
-
rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
|
2125
|
-
end
|
2126
|
-
|
2127
|
-
network = self['runtime.host'].network.select do |n|
|
2128
|
-
n._ref == vnet_ref || n.name == pg_name
|
2129
|
-
end
|
2130
|
-
|
2131
|
-
network = network.first
|
2132
|
-
|
2133
|
-
card_num = 1 # start in one, we want the next available id
|
2134
|
-
|
2135
|
-
@item['config.hardware.device'].each do |dv|
|
2136
|
-
card_num += 1 if VCenterDriver::Network.nic?(dv)
|
2137
|
-
end
|
2138
|
-
|
2139
|
-
nic_card = Nic.nic_model_class(model)
|
2140
|
-
|
2141
|
-
if network.class == RbVmomi::VIM::Network
|
2142
|
-
backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
|
2143
|
-
:deviceName => pg_name,
|
2144
|
-
:network => network
|
2145
|
-
)
|
2146
|
-
elsif network.class == RbVmomi::VIM::DistributedVirtualPortgroup
|
2147
|
-
port = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
|
2148
|
-
:switchUuid =>
|
2149
|
-
network.config.distributedVirtualSwitch.uuid,
|
2150
|
-
:portgroupKey => network.key
|
2151
|
-
)
|
2152
|
-
backing =
|
2153
|
-
RbVmomi::VIM
|
2154
|
-
.VirtualEthernetCardDistributedVirtualPortBackingInfo(
|
2155
|
-
:port => port
|
2156
|
-
)
|
2157
|
-
elsif network.class == RbVmomi::VIM::OpaqueNetwork
|
2158
|
-
backing =
|
2159
|
-
RbVmomi::VIM
|
2160
|
-
.VirtualEthernetCardOpaqueNetworkBackingInfo(
|
2161
|
-
:opaqueNetworkId => network.summary.opaqueNetworkId,
|
2162
|
-
:opaqueNetworkType => 'nsx.LogicalSwitch'
|
2163
|
-
)
|
2164
|
-
else
|
2165
|
-
raise 'Unknown network class'
|
2166
|
-
end
|
2167
|
-
|
2168
|
-
card_spec = {
|
2169
|
-
:key => 0,
|
2170
|
-
:deviceInfo => {
|
2171
|
-
:label => 'net' + card_num.to_s,
|
2172
|
-
:summary => pg_name
|
2173
|
-
},
|
2174
|
-
:backing => backing,
|
2175
|
-
:addressType => 'generated'
|
2176
|
-
}
|
2177
|
-
if @vi_client.vim.serviceContent.about.apiVersion.to_f >= 7.0
|
2178
|
-
card_spec[:key] = -100 - card_num.to_i
|
2179
|
-
end
|
2180
|
-
|
2181
|
-
if (limit || rsrv) && (limit > 0)
|
2182
|
-
ra_spec = {}
|
2183
|
-
rsrv = limit if rsrv > limit
|
2184
|
-
# The bandwidth limit for the virtual network adapter. The
|
2185
|
-
# utilization of the virtual network adapter will not exceed
|
2186
|
-
# this limit, even if there are available resources. To clear
|
2187
|
-
# the value of this property and revert it to unset, set the
|
2188
|
-
# vaule to "-1" in an update operation. Units in Mbits/sec
|
2189
|
-
ra_spec[:limit] = limit if limit
|
2190
|
-
# Amount of network bandwidth that is guaranteed to the virtual
|
2191
|
-
# network adapter. If utilization is less than reservation, the
|
2192
|
-
# resource can be used by other virtual network adapters.
|
2193
|
-
# Reservation is not allowed to exceed the value of limit if
|
2194
|
-
# limit is set. Units in Mbits/sec
|
2195
|
-
ra_spec[:reservation] = rsrv if rsrv
|
2196
|
-
# Network share. The value is used as a relative weight in
|
2197
|
-
# competing for shared bandwidth, in case of resource contention
|
2198
|
-
ra_spec[:share] =
|
2199
|
-
RbVmomi::VIM.SharesInfo(
|
2200
|
-
{
|
2201
|
-
:level =>
|
2202
|
-
RbVmomi::VIM.SharesLevel(
|
2203
|
-
'normal'
|
2204
|
-
),
|
2205
|
-
:shares => 0
|
2206
|
-
}
|
2207
|
-
)
|
2208
|
-
card_spec[:resourceAllocation] =
|
2209
|
-
RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec)
|
2210
|
-
end
|
2211
|
-
|
2212
|
-
{
|
2213
|
-
:operation => :add,
|
2214
|
-
:device => nic_card.new(card_spec)
|
2215
|
-
}
|
2216
|
-
end
|
2217
|
-
|
2218
|
-
# Add NIC to VM
|
2219
|
-
def attach_nic(one_nic)
|
2220
|
-
spec_hash = {}
|
2221
|
-
|
2222
|
-
begin
|
2223
|
-
# A new NIC requires a vcenter spec
|
2224
|
-
attach_nic_array = []
|
2225
|
-
attach_nic_array << calculate_add_nic_spec(one_nic)
|
2226
|
-
spec_hash[:deviceChange] =
|
2227
|
-
attach_nic_array unless attach_nic_array.empty?
|
2228
|
-
|
2229
|
-
# Reconfigure VM
|
2230
|
-
spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
|
2231
|
-
|
2232
|
-
@item.ReconfigVM_Task(:spec => spec).wait_for_completion
|
2233
|
-
rescue StandardError => e
|
2234
|
-
error = "Cannot attach NIC to VM: #{e.message}."
|
2235
|
-
|
2236
|
-
if VCenterDriver::CONFIG[:debug_information]
|
2237
|
-
error += "\n\n#{e.backtrace.join("\n")}"
|
2238
|
-
end
|
2239
|
-
|
2240
|
-
raise error
|
2241
|
-
end
|
2242
|
-
end
|
2243
|
-
|
2244
|
-
# Detach NIC from VM
|
2245
|
-
def detach_nic(mac)
|
2246
|
-
spec_hash = {}
|
2247
|
-
|
2248
|
-
nic = nic(mac) rescue nil
|
2249
|
-
|
2250
|
-
return if !nic || nic.no_exists?
|
2251
|
-
|
2252
|
-
# Remove NIC from VM in the ReconfigVM_Task
|
2253
|
-
spec_hash[:deviceChange] = [
|
2254
|
-
:operation => :remove,
|
2255
|
-
:device => nic.vc_item
|
2256
|
-
]
|
2257
|
-
begin
|
2258
|
-
@item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
|
2259
|
-
rescue StandardError => e
|
2260
|
-
error = "Cannot detach NIC from VM: #{e.message}."
|
2261
|
-
|
2262
|
-
if VCenterDriver::CONFIG[:debug_information]
|
2263
|
-
error += "\n\n#{e.backtrace.join("\n")}"
|
2264
|
-
end
|
2265
|
-
|
2266
|
-
raise error
|
2267
|
-
end
|
2268
|
-
end
|
2269
|
-
|
2270
|
-
# Detach all nics useful when removing pg and sw so they're not in use
|
2271
|
-
def detach_all_nics
|
2272
|
-
spec_hash = {}
|
2273
|
-
device_change = []
|
2274
|
-
|
2275
|
-
nics_each(:exists?) do |nic|
|
2276
|
-
device_change << {
|
2277
|
-
:operation => :remove,
|
2278
|
-
:device => nic.vc_item
|
2279
|
-
}
|
2280
|
-
end
|
2281
|
-
|
2282
|
-
return if device_change.empty?
|
2283
|
-
|
2284
|
-
# Remove NIC from VM in the ReconfigVM_Task
|
2285
|
-
spec_hash[:deviceChange] = device_change
|
2286
|
-
|
2287
|
-
begin
|
2288
|
-
@item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
|
2289
|
-
rescue StandardError => e
|
2290
|
-
error = "Cannot detach all NICs from VM: #{e.message}."
|
2291
|
-
|
2292
|
-
if VCenterDriver::CONFIG[:debug_information]
|
2293
|
-
error += "\n\n#{e.backtrace}"
|
2294
|
-
end
|
2295
|
-
|
2296
|
-
raise error
|
2297
|
-
end
|
2298
|
-
end
|
2299
|
-
|
2300
|
-
# try to get specs for new attached disks
|
2301
|
-
# using disk_each method with :no_exists? condition
|
2302
|
-
def attach_disks_specs
|
2303
|
-
attach_disk_array = []
|
2304
|
-
extraconfig = []
|
2305
|
-
attach_spod_array = []
|
2306
|
-
attach_spod_disk_info = {}
|
2307
|
-
|
2308
|
-
pos = { :ide => 0, :scsi => 0 }
|
2309
|
-
disks_each(:no_exists?) do |disk|
|
2310
|
-
disk.one_item['TYPE'] == 'CDROM' ? k = :ide : k = :scsi
|
2311
|
-
|
2312
|
-
if disk.storpod?
|
2313
|
-
spec = calculate_add_disk_spec(disk.one_item, pos[k])
|
2314
|
-
attach_spod_array << spec
|
2315
|
-
|
2316
|
-
controller_key = spec[:device].controllerKey
|
2317
|
-
unit_number = spec[:device].unitNumber
|
2318
|
-
|
2319
|
-
unit_ctrl = "#{controller_key}-#{unit_number}"
|
2320
|
-
attach_spod_disk_info[unit_ctrl] = disk.id
|
2321
|
-
else
|
2322
|
-
aspec = calculate_add_disk_spec(disk.one_item, pos[k])
|
2323
|
-
extra_key = "opennebula.mdisk.#{disk.one_item['DISK_ID']}"
|
2324
|
-
extra_value = aspec[:device].key.to_s
|
2325
|
-
|
2326
|
-
attach_disk_array << aspec
|
2327
|
-
extraconfig << { :key => extra_key, :value => extra_value }
|
2328
|
-
end
|
2329
|
-
|
2330
|
-
pos[k]+=1
|
2331
|
-
end
|
2332
|
-
|
2333
|
-
{ :disks => attach_disk_array,
|
2334
|
-
:spods => attach_spod_array,
|
2335
|
-
:spod_info => attach_spod_disk_info,
|
2336
|
-
:extraconfig => extraconfig }
|
2337
|
-
end
|
2338
|
-
|
2339
|
-
# try to get specs for detached disks
|
2340
|
-
# using disk_each method with :dechaded? condition
|
2341
|
-
def detach_disks_specs
|
2342
|
-
detach_disk_array = []
|
2343
|
-
extra_config = []
|
2344
|
-
keys = disk_keys.invert
|
2345
|
-
|
2346
|
-
ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
|
2347
|
-
disks_each(:detached?) do |d|
|
2348
|
-
key = d.key.to_s
|
2349
|
-
source = VCenterDriver::FileHelper.escape_path(d.path)
|
2350
|
-
persistent =
|
2351
|
-
VCenterDriver::VIHelper
|
2352
|
-
.find_persistent_image_by_source(
|
2353
|
-
source, ipool
|
2354
|
-
)
|
2355
|
-
|
2356
|
-
op = { :operation => :remove, :device => d.device }
|
2357
|
-
if !persistent && d.type != 'CDROM'
|
2358
|
-
op[:fileOperation] = :destroy
|
2359
|
-
end
|
2360
|
-
detach_disk_array << op
|
2361
|
-
|
2362
|
-
# Remove reference opennebula.disk if exist from vmx and cache
|
2363
|
-
extra_config << d.config(:delete) if keys[key]
|
2364
|
-
end
|
2365
|
-
|
2366
|
-
[detach_disk_array, extra_config]
|
2367
|
-
end
|
2368
|
-
|
2369
|
-
def different_key?(change_disk, vc_disk)
|
2370
|
-
change_disk[:device].controllerKey == vc_disk.controllerKey &&
|
2371
|
-
change_disk[:device].unitNumber == vc_disk.unitNumber &&
|
2372
|
-
change_disk[:device].key != vc_disk.key
|
2373
|
-
end
|
2374
|
-
|
2375
|
-
def sync_extraconfig_disk(spec_hash)
|
2376
|
-
return if spec_hash[:deviceChange].empty?
|
2377
|
-
|
2378
|
-
extraconfig_new = []
|
2379
|
-
# vCenter mob disks
|
2380
|
-
vc_disks = @item['config.hardware.device'].select do |vc_device|
|
2381
|
-
disk?(vc_device)
|
2382
|
-
end
|
2383
|
-
return unless vc_disks
|
2384
|
-
|
2385
|
-
# For each changed disk, compare with vcenter mob disk
|
2386
|
-
spec_hash[:deviceChange].each_with_index do |_device, index|
|
2387
|
-
change_disk = spec_hash[:deviceChange][index]
|
2388
|
-
vc_disks.each do |vc_disk|
|
2389
|
-
next unless different_key?(change_disk, vc_disk)
|
2390
|
-
|
2391
|
-
extraconfig_new <<
|
2392
|
-
{
|
2393
|
-
:key =>
|
2394
|
-
spec_hash[:extraConfig][index][:key],
|
2395
|
-
:value =>
|
2396
|
-
vc_disk.key.to_s
|
2397
|
-
}
|
2398
|
-
end
|
2399
|
-
end
|
2400
|
-
|
2401
|
-
return if extraconfig_new.empty?
|
2402
|
-
|
2403
|
-
spec_hash = {
|
2404
|
-
:extraConfig => extraconfig_new
|
2405
|
-
}
|
2406
|
-
spec =
|
2407
|
-
RbVmomi::VIM
|
2408
|
-
.VirtualMachineConfigSpec(
|
2409
|
-
spec_hash
|
2410
|
-
)
|
2411
|
-
@item.ReconfigVM_Task(
|
2412
|
-
:spec => spec
|
2413
|
-
).wait_for_completion
|
2414
|
-
end
|
2415
|
-
|
2416
|
-
# sync OpenNebula disk model with vCenter
|
2417
|
-
#
|
2418
|
-
# @param option [symbol] if :all is provided the
|
2419
|
-
# method will try to sync
|
2420
|
-
# all the disks (detached and not existing ones)
|
2421
|
-
# otherwise it will only sync the disks that are not existing
|
2422
|
-
#
|
2423
|
-
# @param execute [boolean] indicates if the reconfigure operation
|
2424
|
-
# is going to
|
2425
|
-
# be executed
|
2426
|
-
def sync_disks(option = :nil, execute = true)
|
2427
|
-
info_disks
|
2428
|
-
|
2429
|
-
spec_hash = {}
|
2430
|
-
|
2431
|
-
if option == :all
|
2432
|
-
detach_op = {}
|
2433
|
-
detach_op[:deviceChange], detach_op[:extraConfig] =
|
2434
|
-
detach_disks_specs
|
2435
|
-
perform =
|
2436
|
-
!detach_op[:deviceChange].empty? ||
|
2437
|
-
!detach_op[:extraConfig].empty?
|
2438
|
-
@item
|
2439
|
-
.ReconfigVM_Task(
|
2440
|
-
:spec => detach_op
|
2441
|
-
).wait_for_completion if perform
|
2442
|
-
end
|
2443
|
-
|
2444
|
-
a_specs = attach_disks_specs
|
2445
|
-
|
2446
|
-
if !a_specs[:spods].empty?
|
2447
|
-
spec_hash[:extraConfig] =
|
2448
|
-
create_storagedrs_disks(a_specs[:spods],
|
2449
|
-
a_specs[:spod_info])
|
2450
|
-
end
|
2451
|
-
|
2452
|
-
if !a_specs[:disks].empty?
|
2453
|
-
spec_hash[:deviceChange] = a_specs[:disks]
|
2454
|
-
spec_hash[:extraConfig] = a_specs[:extraconfig]
|
2455
|
-
end
|
2456
|
-
|
2457
|
-
return spec_hash unless execute
|
2458
|
-
|
2459
|
-
spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
|
2460
|
-
@item.ReconfigVM_Task(:spec => spec).wait_for_completion
|
2461
|
-
info_disks
|
2462
|
-
end
|
2463
|
-
|
2464
|
-
# Attach DISK to VM (hotplug)
|
2465
|
-
def attach_disk(disk)
|
2466
|
-
# Adding a new disk in newer vSphere versions
|
2467
|
-
# automatically cleans all system snapshots
|
2468
|
-
# https://github.com/OpenNebula/one/issues/5409
|
2469
|
-
if snapshots? || one_snapshots?
|
2470
|
-
error_msg = 'Existing sytem snapshots, cannot change disks. '
|
2471
|
-
error_msg << 'Please remove all snapshots and try again'
|
2472
|
-
raise error_msg
|
2473
|
-
end
|
2474
|
-
|
2475
|
-
spec_hash = {}
|
2476
|
-
device_change = []
|
2477
|
-
|
2478
|
-
# Extract unmanaged_keys
|
2479
|
-
unmanaged_keys = disk_keys
|
2480
|
-
vc_disks = vcenter_disks_get
|
2481
|
-
|
2482
|
-
# Check if we're dealing with a StoragePod SYSTEM ds
|
2483
|
-
storpod = disk['VCENTER_DS_REF'].start_with?('group-')
|
2484
|
-
|
2485
|
-
# Check if disk being attached is already connected to the VM
|
2486
|
-
raise 'DISK is already connected to VM' if disk_attached_to_vm(
|
2487
|
-
disk, unmanaged_keys, vc_disks
|
2488
|
-
)
|
2489
|
-
|
2490
|
-
# Generate vCenter spec and reconfigure VM
|
2491
|
-
add_spec = calculate_add_disk_spec(disk)
|
2492
|
-
device_change << add_spec
|
2493
|
-
raise 'Could not generate DISK spec' if device_change.empty?
|
2494
|
-
|
2495
|
-
extra_key = "opennebula.mdisk.#{disk['DISK_ID']}"
|
2496
|
-
extra_value = add_spec[:device].key.to_s
|
2497
|
-
|
2498
|
-
spec_hash[:deviceChange] = device_change
|
2499
|
-
spec_hash[:extraConfig] =
|
2500
|
-
[{ :key => extra_key, :value => extra_value }]
|
2501
|
-
spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
|
2502
|
-
|
2503
|
-
begin
|
2504
|
-
if storpod
|
2505
|
-
# Ask for StorageDRS recommendation
|
2506
|
-
# to reconfigure VM (AddDisk)
|
2507
|
-
sm = storagemanager
|
2508
|
-
|
2509
|
-
# Disk id is -1 as I don't know
|
2510
|
-
# what disk id is going to be set
|
2511
|
-
disk_locator = [RbVmomi::VIM.PodDiskLocator(:diskId => -1)]
|
2512
|
-
|
2513
|
-
# Disk locator is required for AddDisk
|
2514
|
-
vmpod_hash = {}
|
2515
|
-
vmpod_hash[:storagePod] = get_ds
|
2516
|
-
vmpod_hash[:disk] = disk_locator
|
2517
|
-
vmpod_config =
|
2518
|
-
RbVmomi::VIM::VmPodConfigForPlacement(
|
2519
|
-
vmpod_hash
|
2520
|
-
)
|
2521
|
-
|
2522
|
-
# The storage pod selection requires initialize
|
2523
|
-
spod_hash = {}
|
2524
|
-
spod_hash[:initialVmConfig] = [vmpod_config]
|
2525
|
-
spod_select =
|
2526
|
-
RbVmomi::VIM::StorageDrsPodSelectionSpec(
|
2527
|
-
spod_hash
|
2528
|
-
)
|
2529
|
-
storage_spec = RbVmomi::VIM.StoragePlacementSpec(
|
2530
|
-
:type => :reconfigure,
|
2531
|
-
:podSelectionSpec => spod_select,
|
2532
|
-
:vm => self['_ref'],
|
2533
|
-
:configSpec => spec
|
2534
|
-
)
|
2535
|
-
|
2536
|
-
# Query a storage placement recommendation
|
2537
|
-
result = sm
|
2538
|
-
.RecommendDatastores(
|
2539
|
-
:storageSpec => storage_spec
|
2540
|
-
) rescue nil
|
2541
|
-
|
2542
|
-
if result.nil?
|
2543
|
-
raise 'Could not get placement '\
|
2544
|
-
'specification for StoragePod'
|
2545
|
-
end
|
2546
|
-
|
2547
|
-
if !result.respond_to?(:recommendations) ||
|
2548
|
-
result.recommendations.empty?
|
2549
|
-
raise 'Could not get placement '\
|
2550
|
-
'specification for StoragePod'
|
2551
|
-
end
|
2552
|
-
|
2553
|
-
# Get recommendation key to be applied
|
2554
|
-
key = result.recommendations.first.key ||= ''
|
2555
|
-
|
2556
|
-
if key.empty?
|
2557
|
-
raise 'Missing Datastore recommendation for StoragePod'
|
2558
|
-
end
|
2559
|
-
|
2560
|
-
# Apply recommendation
|
2561
|
-
sm.ApplyStorageDrsRecommendation_Task(
|
2562
|
-
:key => [key]
|
2563
|
-
).wait_for_completion
|
2564
|
-
|
2565
|
-
# Add the key for the volatile disk to the
|
2566
|
-
# unmanaged opennebula.disk.id variables
|
2567
|
-
unit_number =
|
2568
|
-
spec_hash[:deviceChange][0][:device]
|
2569
|
-
.unitNumber
|
2570
|
-
controller_key =
|
2571
|
-
spec_hash[:deviceChange][0][:device]
|
2572
|
-
.controllerKey
|
2573
|
-
key =
|
2574
|
-
get_vcenter_disk_key(
|
2575
|
-
unit_number,
|
2576
|
-
controller_key
|
2577
|
-
)
|
2578
|
-
spec_hash = {}
|
2579
|
-
reference = {}
|
2580
|
-
reference[:key] =
|
2581
|
-
"opennebula.disk.#{disk['DISK_ID']}"
|
2582
|
-
reference[:value] = key.to_s
|
2583
|
-
spec_hash[:extraConfig] = [reference]
|
2584
|
-
@item
|
2585
|
-
.ReconfigVM_Task(
|
2586
|
-
:spec => spec_hash
|
2587
|
-
).wait_for_completion
|
2588
|
-
else
|
2589
|
-
@item
|
2590
|
-
.ReconfigVM_Task(
|
2591
|
-
:spec => spec
|
2592
|
-
).wait_for_completion
|
2593
|
-
end
|
2594
|
-
# Modify extraConfig if disks has a bad key
|
2595
|
-
sync_extraconfig_disk(spec_hash)
|
2596
|
-
rescue StandardError => e
|
2597
|
-
error = "Cannot attach DISK to VM: #{e.message}."
|
2598
|
-
|
2599
|
-
if VCenterDriver::CONFIG[:debug_information]
|
2600
|
-
error += "\n\n#{e.backtrace.join("\n")}"
|
2601
|
-
end
|
2602
|
-
|
2603
|
-
raise error
|
2604
|
-
end
|
2605
|
-
end
|
2606
|
-
|
2607
|
-
# Detach persistent disks to avoid incidental destruction
|
2608
|
-
def detach_persistent_disks(vm)
|
2609
|
-
spec_hash = {}
|
2610
|
-
spec_hash[:deviceChange] = []
|
2611
|
-
ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
|
2612
|
-
if ipool.respond_to?(:message)
|
2613
|
-
raise "Could not get OpenNebula ImagePool: #{ipool.message}"
|
2614
|
-
end
|
2615
|
-
|
2616
|
-
vm.config.hardware.device.each do |disk|
|
2617
|
-
next unless disk_or_cdrom?(disk)
|
2618
|
-
|
2619
|
-
# Let's try to find if disks is persistent
|
2620
|
-
source_unescaped = disk.backing.fileName.sub(
|
2621
|
-
/^\[(.*?)\] /, ''
|
2622
|
-
) rescue next
|
2623
|
-
source = VCenterDriver::FileHelper.escape_path(source_unescaped)
|
2624
|
-
|
2625
|
-
persistent = VCenterDriver::VIHelper
|
2626
|
-
.find_persistent_image_by_source(
|
2627
|
-
source, ipool
|
2628
|
-
)
|
2629
|
-
|
2630
|
-
next unless persistent
|
2631
|
-
|
2632
|
-
spec_hash[:deviceChange] << {
|
2633
|
-
:operation => :remove,
|
2634
|
-
:device => disk
|
2635
|
-
}
|
2636
|
-
end
|
2637
|
-
|
2638
|
-
return if spec_hash[:deviceChange].empty?
|
2639
|
-
|
2640
|
-
begin
|
2641
|
-
vm.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
|
2642
|
-
rescue StandardError => e
|
2643
|
-
error = "Cannot detach all DISKs from VM: #{e.message}."
|
2644
|
-
|
2645
|
-
if VCenterDriver::CONFIG[:debug_information]
|
2646
|
-
error += "\n\n#{e.backtrace}"
|
2647
|
-
end
|
2648
|
-
|
2649
|
-
raise error
|
2650
|
-
end
|
2651
|
-
end
|
2652
|
-
|
2653
|
-
def detach_disk(disk)
|
2654
|
-
return unless disk.exists?
|
2655
|
-
|
2656
|
-
if snapshots? || one_snapshots?
|
2657
|
-
error_message = 'Existing sytem snapshots, cannot change disks'
|
2658
|
-
error_message << '. Please remove all snapshots and try again'
|
2659
|
-
raise error_message
|
2660
|
-
end
|
2661
|
-
|
2662
|
-
spec_hash = {}
|
2663
|
-
spec_hash[:extraConfig] = [disk.config(:delete)]
|
2664
|
-
spec_hash[:deviceChange] = [{
|
2665
|
-
:operation => :remove,
|
2666
|
-
:device => disk.device
|
2667
|
-
}]
|
2668
|
-
|
2669
|
-
begin
|
2670
|
-
@item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
|
2671
|
-
rescue StandardError => e
|
2672
|
-
error = "Cannot detach DISK from VM: #{e.message}."
|
2673
|
-
error += "\nProbably an existing VM snapshot includes that disk"
|
2674
|
-
|
2675
|
-
if VCenterDriver::CONFIG[:debug_information]
|
2676
|
-
error += "\n\n#{e.backtrace}"
|
2677
|
-
end
|
2678
|
-
|
2679
|
-
raise error
|
2680
|
-
end
|
2681
|
-
end
|
2682
|
-
|
2683
|
-
def destroy_disk(disk)
|
2684
|
-
one_vm = one_item
|
2685
|
-
|
2686
|
-
# Check if we can detach and delete the non persistent disk:
|
2687
|
-
# - VM is terminated
|
2688
|
-
# - The disk is managed by OpenNebula
|
2689
|
-
detachable= !(one_vm['LCM_STATE'].to_i == 11 && !disk.managed?)
|
2690
|
-
detachable &&= disk.exists?
|
2691
|
-
|
2692
|
-
return unless detachable
|
2693
|
-
|
2694
|
-
detach_disk(disk)
|
2695
|
-
|
2696
|
-
# Check if we want to keep the non persistent disk
|
2697
|
-
keep_non_persistent_disks =
|
2698
|
-
VCenterDriver::CONFIG[:keep_non_persistent_disks]
|
2699
|
-
|
2700
|
-
return if keep_non_persistent_disks == true
|
2701
|
-
|
2702
|
-
disk.destroy
|
2703
|
-
@disks.delete(disk.id.to_s)
|
2704
|
-
end
|
2705
|
-
|
2706
|
-
# Get vcenter device representing DISK object (hotplug)
|
2707
|
-
def disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
|
2708
|
-
img_name = ''
|
2709
|
-
device_found = nil
|
2710
|
-
disk_id = disk['DISK_ID']
|
2711
|
-
unmanaged_key = unmanaged_keys["opennebula.disk.#{disk_id}"]
|
2712
|
-
|
2713
|
-
img_name_escaped = VCenterDriver::FileHelper.get_img_name(
|
2714
|
-
disk,
|
2715
|
-
one_item['ID'],
|
2716
|
-
self['name'],
|
2717
|
-
instantiated_as_persistent?
|
2718
|
-
)
|
2719
|
-
|
2720
|
-
img_name = VCenterDriver::FileHelper.unescape_path(img_name_escaped)
|
2721
|
-
|
2722
|
-
vc_disks.each do |d|
|
2723
|
-
key_matches = unmanaged_key && d[:key] == unmanaged_key.to_i
|
2724
|
-
path_matches = (d[:path_wo_ds] == img_name)
|
2725
|
-
|
2726
|
-
if key_matches || path_matches
|
2727
|
-
device_found = d
|
2728
|
-
break
|
2729
|
-
end
|
2730
|
-
end
|
2731
|
-
|
2732
|
-
device_found
|
2733
|
-
end
|
2734
|
-
|
2735
|
-
def get_key(type)
|
2736
|
-
@used_keys ||= []
|
2737
|
-
|
2738
|
-
if type == 'CDROM'
|
2739
|
-
bound = 'cdrom?'
|
2740
|
-
key = 3000
|
2741
|
-
else
|
2742
|
-
bound = 'disk?'
|
2743
|
-
key = 2000
|
2744
|
-
end
|
2745
|
-
|
2746
|
-
used = @used_keys
|
2747
|
-
@item.config.hardware.device.each do |dev|
|
2748
|
-
used << dev.key
|
2749
|
-
next unless send(bound, dev)
|
2750
|
-
|
2751
|
-
key = dev.key
|
2752
|
-
end
|
2753
|
-
|
2754
|
-
loop do
|
2755
|
-
break unless used.include?(key)
|
2756
|
-
|
2757
|
-
key+=1
|
2758
|
-
end
|
2759
|
-
|
2760
|
-
@used_keys << key
|
2761
|
-
|
2762
|
-
key
|
2763
|
-
end
|
2764
|
-
|
2765
|
-
def calculate_add_disk_spec(disk, position = 0)
|
2766
|
-
img_name_escaped = VCenterDriver::FileHelper.get_img_name(
|
2767
|
-
disk,
|
2768
|
-
one_item['ID'],
|
2769
|
-
self['name'],
|
2770
|
-
instantiated_as_persistent?
|
2771
|
-
)
|
2772
|
-
|
2773
|
-
img_name = VCenterDriver::FileHelper.unescape_path(img_name_escaped)
|
2774
|
-
|
2775
|
-
type = disk['TYPE']
|
2776
|
-
size_kb = disk['SIZE'].to_i * 1024
|
2777
|
-
|
2778
|
-
if type == 'CDROM'
|
2779
|
-
# CDROM drive will be found in the IMAGE DS
|
2780
|
-
ds_ref = disk['VCENTER_DS_REF']
|
2781
|
-
ds = VCenterDriver::Storage.new_from_ref(ds_ref,
|
2782
|
-
@vi_client)
|
2783
|
-
ds_name = ds['name']
|
2784
|
-
|
2785
|
-
# CDROM can only be added when the VM is in poweroff state
|
2786
|
-
vmdk_backing = RbVmomi::VIM::VirtualCdromIsoBackingInfo(
|
2787
|
-
:datastore => ds.item,
|
2788
|
-
:fileName => "[#{ds_name}] #{img_name}"
|
2789
|
-
)
|
2790
|
-
|
2791
|
-
if @item['summary.runtime.powerState'] != 'poweredOff'
|
2792
|
-
raise 'The CDROM image can only be added as an IDE device '\
|
2793
|
-
'when the VM is in the powered off state'
|
2794
|
-
end
|
2795
|
-
|
2796
|
-
controller, unit_number = find_free_ide_controller(position)
|
2797
|
-
|
2798
|
-
device = RbVmomi::VIM::VirtualCdrom(
|
2799
|
-
:backing => vmdk_backing,
|
2800
|
-
:key => get_key(type),
|
2801
|
-
:controllerKey => controller.key,
|
2802
|
-
:unitNumber => unit_number,
|
2803
|
-
|
2804
|
-
:connectable => RbVmomi::VIM::VirtualDeviceConnectInfo(
|
2805
|
-
:startConnected => true,
|
2806
|
-
:connected => true,
|
2807
|
-
:allowGuestControl => true
|
2808
|
-
)
|
2809
|
-
)
|
2810
|
-
|
2811
|
-
{
|
2812
|
-
:operation => :add,
|
2813
|
-
:device => device
|
2814
|
-
}
|
2815
|
-
|
2816
|
-
else
|
2817
|
-
# TYPE is regular disk (not CDROM)
|
2818
|
-
# disk_adapter
|
2819
|
-
disk_adapter = disk['VCENTER_ADAPTER_TYPE']
|
2820
|
-
case disk_adapter
|
2821
|
-
when 'ide'
|
2822
|
-
controller, unit_number = find_free_ide_controller(position)
|
2823
|
-
else
|
2824
|
-
controller, unit_number = find_free_controller(position)
|
2825
|
-
end
|
2826
|
-
storpod = disk['VCENTER_DS_REF'].start_with?('group-')
|
2827
|
-
if storpod
|
2828
|
-
vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
|
2829
|
-
:diskMode => 'persistent',
|
2830
|
-
:fileName => ''
|
2831
|
-
)
|
2832
|
-
else
|
2833
|
-
ds = get_effective_ds(disk)
|
2834
|
-
if ds.item._ref.start_with?('group-')
|
2835
|
-
ds_object = item.datastore.first
|
2836
|
-
ds_name = ds_object.name
|
2837
|
-
else
|
2838
|
-
ds_object = ds.item
|
2839
|
-
ds_name = ds['name']
|
2840
|
-
end
|
2841
|
-
vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
|
2842
|
-
:datastore => ds_object,
|
2843
|
-
:diskMode => 'persistent',
|
2844
|
-
:fileName => "[#{ds_name}] #{img_name}"
|
2845
|
-
)
|
2846
|
-
end
|
2847
|
-
|
2848
|
-
device = RbVmomi::VIM::VirtualDisk(
|
2849
|
-
:backing => vmdk_backing,
|
2850
|
-
:capacityInKB => size_kb,
|
2851
|
-
:controllerKey => controller.key,
|
2852
|
-
:key => get_key(type),
|
2853
|
-
:unitNumber => unit_number
|
2854
|
-
)
|
2855
|
-
|
2856
|
-
config = {
|
2857
|
-
:operation => :add,
|
2858
|
-
:device => device
|
2859
|
-
}
|
2860
|
-
|
2861
|
-
# For StorageDRS vCenter must create the file
|
2862
|
-
config[:fileOperation] = :create if storpod
|
2863
|
-
|
2864
|
-
config
|
2865
|
-
end
|
2866
|
-
end
|
2867
|
-
|
2868
|
-
# Remove the MAC addresses so they cannot be in conflict
|
2869
|
-
# with OpenNebula assigned mac addresses.
|
2870
|
-
# We detach all nics from the VM
|
2871
|
-
def convert_to_template
|
2872
|
-
detach_all_nics
|
2873
|
-
|
2874
|
-
# We attach new NICs where the MAC address is assigned by vCenter
|
2875
|
-
nic_specs = []
|
2876
|
-
one_nics = one_item.retrieve_xmlelements('TEMPLATE/NIC')
|
2877
|
-
one_nics.each do |nic|
|
2878
|
-
next unless nic['OPENNEBULA_MANAGED'] &&
|
2879
|
-
nic['OPENNEBULA_MANAGED'].upcase == 'NO'
|
2880
|
-
|
2881
|
-
nic_specs <<
|
2882
|
-
calculate_add_nic_spec_autogenerate_mac(
|
2883
|
-
nic
|
2884
|
-
)
|
2885
|
-
end
|
2886
|
-
|
2887
|
-
# Reconfigure VM to add unmanaged nics
|
2888
|
-
spec_hash = {}
|
2889
|
-
spec_hash[:deviceChange] = nic_specs
|
2890
|
-
spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
|
2891
|
-
@item.ReconfigVM_Task(:spec => spec).wait_for_completion
|
2892
|
-
|
2893
|
-
# Convert VM to template in vCenter
|
2894
|
-
mark_as_template
|
2895
|
-
|
2896
|
-
# Edit the OpenNebula template
|
2897
|
-
one_client = OpenNebula::Client.new
|
2898
|
-
template_id = one_item['TEMPLATE/TEMPLATE_ID']
|
2899
|
-
new_template = OpenNebula::Template.new_with_id(template_id,
|
2900
|
-
one_client)
|
2901
|
-
new_template.info
|
2902
|
-
|
2903
|
-
# unlock VM Template
|
2904
|
-
new_template.unlock
|
2905
|
-
|
2906
|
-
# Update the template reference
|
2907
|
-
new_template.update("VCENTER_TEMPLATE_REF=#{@item._ref}", true)
|
2908
|
-
|
2909
|
-
# Add vCenter template name
|
2910
|
-
new_template.update("VCENTER_TEMPLATE_NAME=#{@item.name}", true)
|
2911
|
-
end
|
2912
|
-
|
2913
|
-
def resize_unmanaged_disks
|
2914
|
-
spec = { :deviceChange => [] }
|
2915
|
-
disks_each(:one?) do |d|
|
2916
|
-
next unless !d.managed? && d.new_size
|
2917
|
-
|
2918
|
-
spec[:deviceChange] << d.config(:resize)
|
2919
|
-
end
|
2920
|
-
|
2921
|
-
return if spec[:deviceChange].empty?
|
2922
|
-
|
2923
|
-
@item
|
2924
|
-
.ReconfigVM_Task(
|
2925
|
-
:spec => spec
|
2926
|
-
).wait_for_completion
|
2927
|
-
end
|
2928
|
-
|
2929
|
-
def resize_disk(disk)
|
2930
|
-
if !disk.exists?
|
2931
|
-
size = disk.size
|
2932
|
-
sync_disks
|
2933
|
-
disk = disk(disk.id)
|
2934
|
-
disk.change_size(size)
|
2935
|
-
end
|
2936
|
-
|
2937
|
-
spec = { :deviceChange => [disk.config(:resize)] }
|
2938
|
-
|
2939
|
-
@item.ReconfigVM_Task(:spec => spec).wait_for_completion
|
2940
|
-
end
|
2941
|
-
|
2942
|
-
def snapshots?
|
2943
|
-
clear('rootSnapshot')
|
2944
|
-
self['rootSnapshot'] && !self['rootSnapshot'].empty?
|
2945
|
-
end
|
2946
|
-
|
2947
|
-
def one_snapshots?
|
2948
|
-
begin
|
2949
|
-
!one_item['TEMPLATE/SNAPSHOT'].nil?
|
2950
|
-
rescue StandardError
|
2951
|
-
# one_item may not be retrieved if deploy_id hasn't been set
|
2952
|
-
false
|
2953
|
-
end
|
2954
|
-
end
|
2955
|
-
|
2956
|
-
def instantiated_as_persistent?
|
2957
|
-
begin
|
2958
|
-
!one_item['TEMPLATE/CLONING_TEMPLATE_ID'].nil?
|
2959
|
-
rescue StandardError
|
2960
|
-
# one_item may not be retrieved if deploy_id hasn't been set
|
2961
|
-
false
|
2962
|
-
end
|
2963
|
-
end
|
2964
|
-
|
2965
|
-
def use_linked_clone?
|
2966
|
-
one_item['USER_TEMPLATE/VCENTER_LINKED_CLONES'] &&
|
2967
|
-
one_item['USER_TEMPLATE/VCENTER_LINKED_CLONES']
|
2968
|
-
.upcase == 'YES'
|
2969
|
-
end
|
2970
|
-
|
2971
|
-
def find_free_ide_controller(_position = 0)
|
2972
|
-
free_ide_controller = nil
|
2973
|
-
ide_schema = {}
|
2974
|
-
devices = @item.config.hardware.device
|
2975
|
-
|
2976
|
-
devices.each do |dev|
|
2977
|
-
# Iteration to initialize IDE Controllers
|
2978
|
-
next unless dev.is_a? RbVmomi::VIM::VirtualIDEController
|
2979
|
-
|
2980
|
-
if ide_schema[dev.key].nil?
|
2981
|
-
ide_schema[dev.key] = {}
|
2982
|
-
end
|
2983
|
-
ide_schema[dev.key][:device] = dev
|
2984
|
-
ide_schema[dev.key][:freeUnitNumber] = [0, 1]
|
2985
|
-
end
|
2986
|
-
|
2987
|
-
# Iteration to match Disks and Cdroms with its controllers
|
2988
|
-
devices.each do |dev| # rubocop:disable Style/CombinableLoops
|
2989
|
-
first_condition = dev.is_a? RbVmomi::VIM::VirtualDisk
|
2990
|
-
second_condition = dev.is_a? RbVmomi::VIM::VirtualCdrom
|
2991
|
-
third_condition = ide_schema.key?(dev.controllerKey)
|
2992
|
-
|
2993
|
-
next unless (first_condition || second_condition) &&
|
2994
|
-
third_condition
|
2995
|
-
|
2996
|
-
ide_schema[dev.controllerKey][:freeUnitNumber]
|
2997
|
-
.delete(
|
2998
|
-
dev.unitNumber
|
2999
|
-
)
|
3000
|
-
end
|
3001
|
-
|
3002
|
-
ide_schema.keys.each do |controller|
|
3003
|
-
unless ide_schema[controller][:freeUnitNumber].empty?
|
3004
|
-
free_ide_controller = ide_schema[controller]
|
3005
|
-
break
|
3006
|
-
end
|
3007
|
-
end
|
3008
|
-
|
3009
|
-
if !free_ide_controller
|
3010
|
-
raise 'There are no free IDE controllers ' +
|
3011
|
-
'to connect this CDROM device'
|
3012
|
-
end
|
3013
|
-
|
3014
|
-
controller = free_ide_controller[:device]
|
3015
|
-
new_unit_number = free_ide_controller[:freeUnitNumber][0]
|
3016
|
-
|
3017
|
-
[controller, new_unit_number]
|
3018
|
-
end
|
3019
|
-
|
3020
|
-
def find_free_controller(position = 0)
|
3021
|
-
free_scsi_controllers = []
|
3022
|
-
scsi_schema = {}
|
3023
|
-
|
3024
|
-
used_numbers = []
|
3025
|
-
available_numbers = []
|
3026
|
-
devices = @item.config.hardware.device
|
3027
|
-
|
3028
|
-
devices.each do |dev|
|
3029
|
-
if dev.is_a? RbVmomi::VIM::VirtualSCSIController
|
3030
|
-
if scsi_schema[dev.key].nil?
|
3031
|
-
scsi_schema[dev.key] = {}
|
3032
|
-
end
|
3033
|
-
|
3034
|
-
used_numbers << dev.scsiCtlrUnitNumber
|
3035
|
-
scsi_schema[dev.key][:device] = dev
|
3036
|
-
end
|
3037
|
-
|
3038
|
-
next if dev.class != RbVmomi::VIM::VirtualDisk
|
3039
|
-
|
3040
|
-
used_numbers << dev.unitNumber
|
3041
|
-
end
|
3042
|
-
|
3043
|
-
15.times do |scsi_id|
|
3044
|
-
available_numbers <<
|
3045
|
-
scsi_id if used_numbers.grep(scsi_id).length <= 0
|
3046
|
-
end
|
3047
|
-
|
3048
|
-
scsi_schema.keys.each do |controller|
|
3049
|
-
free_scsi_controllers <<
|
3050
|
-
scsi_schema[controller][:device].deviceInfo.label
|
3051
|
-
end
|
3052
|
-
|
3053
|
-
if !free_scsi_controllers.empty?
|
3054
|
-
available_controller_label = free_scsi_controllers[0]
|
3055
|
-
else
|
3056
|
-
add_new_scsi(scsi_schema, devices)
|
3057
|
-
return find_free_controller
|
3058
|
-
end
|
3059
|
-
|
3060
|
-
controller = nil
|
3061
|
-
|
3062
|
-
devices.each do |device|
|
3063
|
-
if device.deviceInfo.label == available_controller_label
|
3064
|
-
controller = device
|
3065
|
-
break
|
3066
|
-
end
|
3067
|
-
end
|
3068
|
-
|
3069
|
-
new_unit_number = available_numbers.sort[position]
|
3070
|
-
|
3071
|
-
[controller, new_unit_number]
|
3072
|
-
end
|
3073
|
-
|
3074
|
-
def add_new_scsi(scsi_schema, devices)
|
3075
|
-
controller = nil
|
3076
|
-
|
3077
|
-
if scsi_schema.keys.length >= 4
|
3078
|
-
raise 'Cannot add a new controller, maximum is 4.'
|
3079
|
-
end
|
3080
|
-
|
3081
|
-
scsi_key = 0
|
3082
|
-
scsi_number = 0
|
3083
|
-
|
3084
|
-
if !scsi_schema.keys.empty? && scsi_schema.keys.length < 4
|
3085
|
-
scsi_key =
|
3086
|
-
scsi_schema.keys.max + 1
|
3087
|
-
scsi_number =
|
3088
|
-
scsi_schema[scsi_schema.keys.max][:device].busNumber + 1
|
3089
|
-
end
|
3090
|
-
|
3091
|
-
controller_device = RbVmomi::VIM::VirtualLsiLogicController(
|
3092
|
-
:key => scsi_key,
|
3093
|
-
:busNumber => scsi_number,
|
3094
|
-
:sharedBus => :noSharing
|
3095
|
-
)
|
3096
|
-
|
3097
|
-
device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
|
3098
|
-
:device => controller_device,
|
3099
|
-
:operation => :add
|
3100
|
-
)
|
3101
|
-
|
3102
|
-
vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
|
3103
|
-
:deviceChange => [device_config_spec]
|
3104
|
-
)
|
3105
|
-
|
3106
|
-
@item.ReconfigVM_Task(:spec => vm_config_spec).wait_for_completion
|
3107
|
-
|
3108
|
-
devices.each do |device|
|
3109
|
-
valid_device =
|
3110
|
-
device.class == RbVmomi::VIM::VirtualLsiLogicController &&
|
3111
|
-
device.key == scsi_key
|
3112
|
-
controller = device.deviceInfo.label if valid_device
|
3113
|
-
end
|
3114
|
-
|
3115
|
-
controller
|
3116
|
-
end
|
3117
|
-
|
3118
|
-
# Create a snapshot for the VM
|
3119
|
-
def create_snapshot(snap_id, snap_name)
|
3120
|
-
memory_dumps = true
|
3121
|
-
memory_dumps = CONFIG[:memory_dumps] \
|
3122
|
-
unless CONFIG[:memory_dumps].nil?
|
3123
|
-
|
3124
|
-
snapshot_hash = {
|
3125
|
-
:name => snap_id,
|
3126
|
-
:description => "OpenNebula Snapshot: #{snap_name}",
|
3127
|
-
:memory => memory_dumps,
|
3128
|
-
:quiesce => true
|
3129
|
-
}
|
3130
|
-
|
3131
|
-
begin
|
3132
|
-
@item.CreateSnapshot_Task(snapshot_hash).wait_for_completion
|
3133
|
-
rescue StandardError => e
|
3134
|
-
error = "Cannot create snapshot for VM: #{e.message}."
|
3135
|
-
|
3136
|
-
if VCenterDriver::CONFIG[:debug_information]
|
3137
|
-
error += "\n\n#{e.backtrace.join("\n")}"
|
3138
|
-
end
|
3139
|
-
|
3140
|
-
raise error
|
3141
|
-
end
|
3142
|
-
|
3143
|
-
snap_id
|
3144
|
-
end
|
3145
|
-
|
3146
|
-
# Revert to a VM snapshot
|
3147
|
-
def revert_snapshot(snap_id)
|
3148
|
-
snapshot_list = self['snapshot.rootSnapshotList']
|
3149
|
-
snapshot = find_snapshot_in_list(snapshot_list, snap_id)
|
3150
|
-
|
3151
|
-
return unless snapshot
|
3152
|
-
|
3153
|
-
begin
|
3154
|
-
revert_snapshot_hash = { :_this => snapshot }
|
3155
|
-
snapshot
|
3156
|
-
.RevertToSnapshot_Task(
|
3157
|
-
revert_snapshot_hash
|
3158
|
-
).wait_for_completion
|
3159
|
-
rescue StandardError => e
|
3160
|
-
error = "Cannot revert snapshot of VM: #{e.message}."
|
3161
|
-
|
3162
|
-
if VCenterDriver::CONFIG[:debug_information]
|
3163
|
-
error += "\n\n#{e.backtrace.join("\n")}"
|
3164
|
-
end
|
3165
|
-
|
3166
|
-
raise error
|
3167
|
-
end
|
3168
|
-
end
|
3169
|
-
|
3170
|
-
# Delete VM snapshot
|
3171
|
-
def delete_snapshot(snap_id)
|
3172
|
-
snapshot_list = self['snapshot.rootSnapshotList']
|
3173
|
-
snapshot = find_snapshot_in_list(snapshot_list, snap_id)
|
3174
|
-
|
3175
|
-
return unless snapshot
|
3176
|
-
|
3177
|
-
begin
|
3178
|
-
delete_snapshot_hash = {
|
3179
|
-
:_this => snapshot,
|
3180
|
-
:removeChildren => false
|
3181
|
-
}
|
3182
|
-
snapshot
|
3183
|
-
.RemoveSnapshot_Task(
|
3184
|
-
delete_snapshot_hash
|
3185
|
-
).wait_for_completion
|
3186
|
-
rescue StandardError => e
|
3187
|
-
error = "Cannot delete snapshot of VM: #{e.message}."
|
3188
|
-
|
3189
|
-
if VCenterDriver::CONFIG[:debug_information]
|
3190
|
-
error += "\n\n#{e.backtrace.join("\n")}"
|
3191
|
-
end
|
3192
|
-
|
3193
|
-
raise error
|
3194
|
-
end
|
3195
|
-
end
|
3196
|
-
|
3197
|
-
def find_snapshot_in_list(list, snap_id)
|
3198
|
-
list.each do |i|
|
3199
|
-
return i.snapshot if i.name == snap_id.to_s
|
3200
|
-
|
3201
|
-
unless i.childSnapshotList.empty?
|
3202
|
-
snap = find_snapshot_in_list(i.childSnapshotList, snap_id)
|
3203
|
-
return snap if snap
|
3204
|
-
end
|
3205
|
-
end rescue nil
|
3206
|
-
|
3207
|
-
nil
|
3208
|
-
end
|
3209
|
-
|
3210
|
-
def migrate(config = {})
|
3211
|
-
if config.empty?
|
3212
|
-
raise 'You need at least 1 parameter to perform a migration'
|
3213
|
-
end
|
3214
|
-
|
3215
|
-
begin
|
3216
|
-
# retrieve host from DRS
|
3217
|
-
one_cluster = config[:cluster]
|
3218
|
-
resourcepool = one_cluster.item.resourcePool
|
3219
|
-
datastore = config[:datastore]
|
3220
|
-
|
3221
|
-
if datastore
|
3222
|
-
relocate_spec_params = {
|
3223
|
-
:folder => @item.parent,
|
3224
|
-
:datastore => datastore
|
3225
|
-
}
|
3226
|
-
|
3227
|
-
unless config[:same_host]
|
3228
|
-
relocate_spec_params[:pool] = resourcepool
|
3229
|
-
end
|
3230
|
-
|
3231
|
-
if config[:esx_migration_list].is_a?(String)
|
3232
|
-
if config[:esx_migration_list]==''
|
3233
|
-
relocate_spec_params[:host] =
|
3234
|
-
config[:cluster].item.host.sample
|
3235
|
-
elsif config[:esx_migration_list]!='Selected_by_DRS'
|
3236
|
-
hostnames = config[:esx_migration_list].split(' ')
|
3237
|
-
hostname = hostnames.sample
|
3238
|
-
host_moref = one_cluster.hostname_to_moref(hostname)
|
3239
|
-
relocate_spec_params[:host] = host_moref
|
3240
|
-
end
|
3241
|
-
end
|
3242
|
-
|
3243
|
-
relocate_spec =
|
3244
|
-
RbVmomi::VIM
|
3245
|
-
.VirtualMachineRelocateSpec(
|
3246
|
-
relocate_spec_params
|
3247
|
-
)
|
3248
|
-
@item.RelocateVM_Task(
|
3249
|
-
:spec => relocate_spec,
|
3250
|
-
:priority => 'defaultPriority'
|
3251
|
-
).wait_for_completion
|
3252
|
-
else
|
3253
|
-
migrate_spec_params = {
|
3254
|
-
:priority => 'defaultPriority'
|
3255
|
-
}
|
3256
|
-
|
3257
|
-
unless config[:same_host]
|
3258
|
-
migrate_spec_params[:pool] = resourcepool
|
3259
|
-
end
|
3260
|
-
|
3261
|
-
@item.MigrateVM_Task(
|
3262
|
-
migrate_spec_params
|
3263
|
-
).wait_for_completion
|
3264
|
-
end
|
3265
|
-
rescue StandardError => e
|
3266
|
-
error = "Cannot migrate VM: #{e.message}."
|
3267
|
-
|
3268
|
-
if VCenterDriver::CONFIG[:debug_information]
|
3269
|
-
error += "\n\n#{e.backtrace.join("\n")}"
|
3270
|
-
end
|
3271
|
-
|
3272
|
-
raise error
|
3273
|
-
end
|
3274
|
-
end
|
3275
|
-
|
3276
|
-
########################################################################
|
3277
|
-
# actions
|
3278
|
-
########################################################################
|
3279
|
-
|
3280
|
-
def shutdown
|
3281
|
-
return if powered_off?
|
3282
|
-
|
3283
|
-
begin
|
3284
|
-
if vm_tools?
|
3285
|
-
@item.ShutdownGuest
|
3286
|
-
else
|
3287
|
-
poweroff_hard
|
3288
|
-
end
|
3289
|
-
rescue RbVmomi::Fault => e
|
3290
|
-
error = e.message.split(':').first
|
3291
|
-
raise e.message if error != 'InvalidPowerState'
|
3292
|
-
end
|
3293
|
-
timeout = CONFIG[:vm_poweron_wait_default]
|
3294
|
-
wait_timeout(:powered_off?, timeout)
|
3295
|
-
end
|
3296
|
-
|
3297
|
-
def destroy
|
3298
|
-
@item.Destroy_Task.wait_for_completion
|
3299
|
-
end
|
3300
|
-
|
3301
|
-
def mark_as_template
|
3302
|
-
@item.MarkAsTemplate
|
3303
|
-
end
|
3304
|
-
|
3305
|
-
def mark_as_virtual_machine
|
3306
|
-
@item.MarkAsVirtualMachine(
|
3307
|
-
:pool => cluster['resourcePool']
|
3308
|
-
)
|
3309
|
-
end
|
3310
|
-
|
3311
|
-
def reset
|
3312
|
-
@item.ResetVM_Task.wait_for_completion
|
3313
|
-
end
|
3314
|
-
|
3315
|
-
def suspend
|
3316
|
-
@item.SuspendVM_Task.wait_for_completion
|
3317
|
-
end
|
3318
|
-
|
3319
|
-
def reboot
|
3320
|
-
@item.RebootGuest
|
3321
|
-
end
|
3322
|
-
|
3323
|
-
def poweron(set_running = false)
|
3324
|
-
begin
|
3325
|
-
@item.PowerOnVM_Task.wait_for_completion
|
3326
|
-
rescue RbVmomi::Fault => e
|
3327
|
-
error = e.message.split(':').first
|
3328
|
-
raise e.message if error != 'InvalidPowerState'
|
3329
|
-
end
|
3330
|
-
# opennebula.running flag
|
3331
|
-
set_running(true, true) if set_running
|
3332
|
-
|
3333
|
-
timeout = CONFIG[:vm_poweron_wait_default]
|
3334
|
-
wait_timeout(:powered_on?, timeout)
|
3335
|
-
end
|
3336
|
-
|
3337
|
-
def powered_on?
|
3338
|
-
@item.runtime.powerState == 'poweredOn'
|
3339
|
-
end
|
3340
|
-
|
3341
|
-
def powered_off?
|
3342
|
-
@item.runtime.powerState == 'poweredOff'
|
3343
|
-
end
|
3344
|
-
|
3345
|
-
def poweroff_hard
|
3346
|
-
@item.PowerOffVM_Task.wait_for_completion
|
3347
|
-
end
|
3348
|
-
|
3349
|
-
def remove_all_snapshots(consolidate = true)
|
3350
|
-
@item
|
3351
|
-
.RemoveAllSnapshots_Task(
|
3352
|
-
{ :consolidate => consolidate }
|
3353
|
-
).wait_for_completion
|
3354
|
-
info_disks
|
3355
|
-
end
|
3356
|
-
|
3357
|
-
def vm_tools?
|
3358
|
-
@item.guest.toolsRunningStatus == 'guestToolsRunning'
|
3359
|
-
end
|
3360
|
-
|
3361
|
-
def set_running(state, execute = true)
|
3362
|
-
state ? value = 'yes' : value = 'no'
|
3363
|
-
|
3364
|
-
config_array = [
|
3365
|
-
{ :key => 'opennebula.vm.running', :value => value }
|
3366
|
-
]
|
3367
|
-
|
3368
|
-
return config_array unless execute
|
3369
|
-
|
3370
|
-
spec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
3371
|
-
{ :extraConfig => config_array }
|
3372
|
-
)
|
3373
|
-
|
3374
|
-
@item.ReconfigVM_Task(:spec => spec).wait_for_completion
|
3375
|
-
end
|
3376
|
-
|
3377
|
-
# STATIC MEMBERS, ROUTINES AND CONSTRUCTORS
|
3378
|
-
########################################################################
|
3379
|
-
|
3380
|
-
def self.get_vm(opts = {})
|
3381
|
-
# try to retrieve machine from name
|
3382
|
-
if opts[:name]
|
3383
|
-
matches = opts[:name].match(/^one-(\d*)(-(.*))?$/)
|
3384
|
-
if matches
|
3385
|
-
id = matches[1]
|
3386
|
-
one_vm = VCenterDriver::VIHelper.one_item(
|
3387
|
-
OpenNebula::VirtualMachine, id, false
|
3388
|
-
)
|
3389
|
-
end
|
3390
|
-
end
|
3391
|
-
|
3392
|
-
if one_vm.nil?
|
3393
|
-
one_vm = VCenterDriver::VIHelper
|
3394
|
-
.find_by_ref(
|
3395
|
-
OpenNebula::VirtualMachinePool,
|
3396
|
-
'DEPLOY_ID',
|
3397
|
-
opts[:ref],
|
3398
|
-
opts[:vc_uuid],
|
3399
|
-
opts[:pool]
|
3400
|
-
)
|
3401
|
-
end
|
3402
|
-
|
3403
|
-
one_vm
|
3404
|
-
end
|
3405
|
-
|
3406
|
-
# Migrate a VM to another cluster and/or datastore
|
3407
|
-
# @params [int] vm_id ID of the VM to be migrated
|
3408
|
-
# @params [String] src_host Name of the source cluster
|
3409
|
-
# @params [String] dst_host Name of the target cluster
|
3410
|
-
# @params [Bool] hot_ds Wether this is a DS migration
|
3411
|
-
# with the VM running or not
|
3412
|
-
# @params [int] ds Destination datastore ID
|
3413
|
-
def self.migrate_routine(
|
3414
|
-
vm_id,
|
3415
|
-
src_host,
|
3416
|
-
dst_host,
|
3417
|
-
hot_ds = false,
|
3418
|
-
ds = nil
|
3419
|
-
)
|
3420
|
-
one_client = OpenNebula::Client.new
|
3421
|
-
pool = OpenNebula::HostPool.new(one_client)
|
3422
|
-
pool.info
|
3423
|
-
|
3424
|
-
src_id = pool["/HOST_POOL/HOST[NAME='#{src_host}']/ID"].to_i
|
3425
|
-
|
3426
|
-
return if src_id == 0
|
3427
|
-
|
3428
|
-
dst_id = pool["/HOST_POOL/HOST[NAME='#{dst_host}']/ID"].to_i
|
3429
|
-
|
3430
|
-
# different destination ds
|
3431
|
-
if ds
|
3432
|
-
ds_pool = OpenNebula::DatastorePool.new(one_client)
|
3433
|
-
ds_pool.info
|
3434
|
-
vcenter_ds_red =
|
3435
|
-
"/DATASTORE_POOL/DATASTORE[ID='#{ds}']" +
|
3436
|
-
'/TEMPLATE/VCENTER_DS_REF'
|
3437
|
-
datastore = ds_pool[vcenter_ds_red]
|
3438
|
-
end
|
3439
|
-
|
3440
|
-
vi_client = VCenterDriver::VIClient.new_from_host(src_id)
|
3441
|
-
|
3442
|
-
# required one objects
|
3443
|
-
vm = OpenNebula::VirtualMachine.new_with_id(vm_id, one_client)
|
3444
|
-
dst_host = OpenNebula::Host.new_with_id(dst_id, one_client)
|
3445
|
-
|
3446
|
-
# get info
|
3447
|
-
vm.info
|
3448
|
-
dst_host.info
|
3449
|
-
|
3450
|
-
esx_migration_list = dst_host['/HOST/TEMPLATE/ESX_MIGRATION_LIST']
|
3451
|
-
|
3452
|
-
# required vcenter objects
|
3453
|
-
vc_vm = VCenterDriver::VirtualMachine
|
3454
|
-
.new_without_id(
|
3455
|
-
vi_client,
|
3456
|
-
vm['/VM/DEPLOY_ID']
|
3457
|
-
)
|
3458
|
-
|
3459
|
-
vc_vm.vm_id = vm_id
|
3460
|
-
|
3461
|
-
ccr_ref = dst_host['/HOST/TEMPLATE/VCENTER_CCR_REF']
|
3462
|
-
vc_host = VCenterDriver::ClusterComputeResource.new_from_ref(
|
3463
|
-
ccr_ref, vi_client
|
3464
|
-
)
|
3465
|
-
|
3466
|
-
config = { :cluster => vc_host }
|
3467
|
-
|
3468
|
-
config[:same_host] = src_id == dst_id
|
3469
|
-
|
3470
|
-
config[:datastore] = datastore if datastore
|
3471
|
-
if hot_ds
|
3472
|
-
config[:esx_migration_list] =
|
3473
|
-
esx_migration_list if esx_migration_list
|
3474
|
-
else
|
3475
|
-
config[:esx_migration_list] = 'Selected_by_DRS'
|
3476
|
-
end
|
3477
|
-
|
3478
|
-
vc_vm.reference_all_disks
|
3479
|
-
vc_vm.migrate(config)
|
3480
|
-
|
3481
|
-
vm.replace({ 'VCENTER_CCR_REF' => ccr_ref })
|
3482
|
-
end
|
3483
|
-
|
3484
|
-
# Try to build the vcenterdriver virtualmachine without
|
3485
|
-
# any opennebula id or object, this constructor can find
|
3486
|
-
# inside the opennebula pool until match
|
3487
|
-
#
|
3488
|
-
# @param vi_client [vi_client] the vcenterdriver client
|
3489
|
-
# that allows the connection
|
3490
|
-
# @param ref [String] vcenter ref to the vm
|
3491
|
-
# @param opts [Hash] object with pairs that could
|
3492
|
-
# contain multiple option
|
3493
|
-
# :vc_uuid: give the vcenter uuid directly
|
3494
|
-
# :name: the vcenter vm name for extract the opennebula id
|
3495
|
-
#
|
3496
|
-
# @return [vcenterdriver::vm] the virtual machine
|
3497
|
-
def self.new_from_ref(vi_client, ref, name, opts = {})
|
3498
|
-
unless opts[:vc_uuid]
|
3499
|
-
opts[:vc_uuid] = vi_client.vim.serviceContent.about.instanceUuid
|
3500
|
-
end
|
3501
|
-
|
3502
|
-
opts[:name] = name
|
3503
|
-
opts[:ref] = ref
|
3504
|
-
|
3505
|
-
one_vm = VCenterDriver::VirtualMachine.get_vm(opts)
|
3506
|
-
|
3507
|
-
new_one(vi_client, ref, one_vm)
|
3508
|
-
end
|
3509
|
-
|
3510
|
-
# build a vcenterdriver virtual machine from a template
|
3511
|
-
# this function is used to instantiate vcenter vms
|
3512
|
-
#
|
3513
|
-
# @param vi_client [vi_client] the vcenterdriver
|
3514
|
-
# client that allows the connection
|
3515
|
-
# @param drv_action [xmlelement] driver_action that contains the info
|
3516
|
-
# @param id [int] the if of the opennebula virtual machine
|
3517
|
-
#
|
3518
|
-
# @return [vcenterdriver::vm] the virtual machine
|
3519
|
-
def self.new_from_clone(vi_client, drv_action, id)
|
3520
|
-
new(vi_client, nil, id).tap do |vm|
|
3521
|
-
vm.clone_vm(drv_action)
|
3522
|
-
end
|
3523
|
-
end
|
3524
|
-
|
3525
|
-
# build a vcenterdriver virtual machine
|
3526
|
-
# with the vmware item already linked
|
3527
|
-
#
|
3528
|
-
# @param vm_item the vmware VM item that it's going to be associated
|
3529
|
-
#
|
3530
|
-
# @return [vcenterdriver::vm] the virtual machine
|
3531
|
-
def self.new_with_item(vm_item)
|
3532
|
-
new(nil, nil, -1).tap do |vm|
|
3533
|
-
vm.item_update(vm_item)
|
3534
|
-
end
|
3535
|
-
end
|
3536
|
-
|
3537
|
-
# build a vcenterdriver virtual machine
|
3538
|
-
# with the opennebula object linked
|
3539
|
-
#
|
3540
|
-
# @param vi_client [vi_client] the vcenterdriver
|
3541
|
-
# client that allows the connection
|
3542
|
-
# @param ref [String] vcenter ref to the vm
|
3543
|
-
# @param one_item [one::vm] xmlelement of opennebula
|
3544
|
-
#
|
3545
|
-
# @return [vcenterdriver::vm] the virtual machine
|
3546
|
-
def self.new_one(vi_client, ref, one_item)
|
3547
|
-
id = one_item['ID'] || one_item['VM/ID'] rescue -1
|
3548
|
-
|
3549
|
-
new(vi_client, ref, id).tap do |vm|
|
3550
|
-
if one_item.instance_of?(OpenNebula::VirtualMachine)
|
3551
|
-
vm.one_item = one_item
|
3552
|
-
end
|
3553
|
-
end
|
3554
|
-
end
|
3555
|
-
|
3556
|
-
# build a vcenterdriver virtual machine
|
3557
|
-
# without opennebula object link, use id = -1 instead
|
3558
|
-
#
|
3559
|
-
# @param vi_client [vi_client] the vcenterdriver client
|
3560
|
-
# that allows the connection
|
3561
|
-
# @param ref [String] vcenter ref to the vm
|
3562
|
-
#
|
3563
|
-
# @return [vcenterdriver::vm] the virtual machine
|
3564
|
-
def self.new_without_id(vi_client, ref)
|
3565
|
-
new(vi_client, ref, -1)
|
3566
|
-
end
|
3567
|
-
|
3568
|
-
########################################################################
|
3569
|
-
|
3570
|
-
end
|
3571
|
-
# class VirtualMachine
|
3572
|
-
|
3573
|
-
end
|
3574
|
-
# module VCenterDriver
|