opennebula 5.12.13 → 5.13.80.pre

Sign up to get free protection for your applications and to get access to all the features.
Files changed (126) hide show
  1. checksums.yaml +5 -5
  2. data/lib/ActionManager.rb +1 -1
  3. data/lib/CommandManager.rb +1 -1
  4. data/lib/DriverExecHelper.rb +44 -28
  5. data/lib/OpenNebulaDriver.rb +8 -4
  6. data/lib/VirtualMachineDriver.rb +9 -2
  7. data/lib/cloud/CloudClient.rb +3 -3
  8. data/lib/datacenter.rb +1258 -0
  9. data/lib/datastore.rb +1025 -0
  10. data/lib/distributed_firewall.rb +280 -0
  11. data/lib/file_helper.rb +370 -0
  12. data/lib/host.rb +1517 -0
  13. data/lib/logical_port.rb +50 -0
  14. data/lib/logical_switch.rb +77 -0
  15. data/lib/memoize.rb +74 -0
  16. data/lib/models/role.rb +39 -8
  17. data/lib/models/service.rb +92 -31
  18. data/lib/models.rb +5 -5
  19. data/lib/network.rb +635 -0
  20. data/lib/nsx_client.rb +144 -0
  21. data/lib/nsx_component.rb +28 -0
  22. data/lib/nsx_constants.rb +149 -0
  23. data/lib/nsx_driver.rb +78 -0
  24. data/lib/nsx_error.rb +77 -0
  25. data/lib/nsx_rule.rb +193 -0
  26. data/lib/nsxt_client.rb +176 -0
  27. data/lib/nsxt_dfw.rb +196 -0
  28. data/lib/nsxt_logical_port.rb +94 -0
  29. data/lib/nsxt_rule.rb +188 -0
  30. data/lib/nsxt_tz.rb +38 -0
  31. data/lib/nsxv_client.rb +176 -0
  32. data/lib/nsxv_dfw.rb +202 -0
  33. data/lib/nsxv_logical_port.rb +107 -0
  34. data/lib/nsxv_rule.rb +172 -0
  35. data/lib/nsxv_tz.rb +41 -0
  36. data/lib/opaque_network.rb +134 -0
  37. data/lib/opennebula/acl.rb +1 -1
  38. data/lib/opennebula/acl_pool.rb +1 -1
  39. data/lib/opennebula/client.rb +1 -1
  40. data/lib/opennebula/cluster.rb +1 -1
  41. data/lib/opennebula/cluster_pool.rb +1 -1
  42. data/lib/opennebula/datastore.rb +1 -1
  43. data/lib/opennebula/datastore_pool.rb +1 -1
  44. data/lib/opennebula/document.rb +8 -29
  45. data/lib/opennebula/document_json.rb +42 -12
  46. data/lib/opennebula/document_pool.rb +1 -1
  47. data/lib/opennebula/document_pool_json.rb +1 -1
  48. data/lib/opennebula/error.rb +4 -1
  49. data/lib/opennebula/flow/grammar.rb +1195 -0
  50. data/lib/{models → opennebula/flow}/service_pool.rb +26 -2
  51. data/lib/{models → opennebula/flow}/service_template.rb +86 -17
  52. data/lib/opennebula/flow/service_template_ext.rb +84 -0
  53. data/lib/{models → opennebula/flow}/service_template_pool.rb +1 -1
  54. data/lib/opennebula/flow/validator.rb +499 -0
  55. data/lib/opennebula/flow.rb +23 -0
  56. data/lib/opennebula/group.rb +1 -1
  57. data/lib/opennebula/group_pool.rb +1 -1
  58. data/lib/opennebula/hook.rb +5 -12
  59. data/lib/opennebula/hook_log.rb +1 -1
  60. data/lib/opennebula/hook_pool.rb +1 -1
  61. data/lib/opennebula/host.rb +1 -1
  62. data/lib/opennebula/host_pool.rb +1 -1
  63. data/lib/opennebula/image.rb +17 -14
  64. data/lib/opennebula/image_pool.rb +1 -1
  65. data/lib/opennebula/ldap_auth.rb +1 -1
  66. data/lib/opennebula/ldap_auth_spec.rb +1 -1
  67. data/lib/opennebula/lockable_ext.rb +163 -0
  68. data/lib/opennebula/marketplace.rb +1 -1
  69. data/lib/opennebula/marketplace_pool.rb +1 -1
  70. data/lib/opennebula/marketplaceapp.rb +9 -119
  71. data/lib/opennebula/marketplaceapp_ext.rb +522 -0
  72. data/lib/opennebula/marketplaceapp_pool.rb +1 -1
  73. data/lib/opennebula/oneflow_client.rb +4 -3
  74. data/lib/opennebula/pool.rb +4 -3
  75. data/lib/opennebula/pool_element.rb +1 -1
  76. data/lib/opennebula/security_group.rb +1 -1
  77. data/lib/opennebula/security_group_pool.rb +1 -1
  78. data/lib/opennebula/server_cipher_auth.rb +1 -1
  79. data/lib/opennebula/server_x509_auth.rb +1 -1
  80. data/lib/opennebula/ssh_auth.rb +1 -1
  81. data/lib/opennebula/system.rb +1 -1
  82. data/lib/opennebula/template.rb +4 -13
  83. data/lib/opennebula/template_ext.rb +325 -0
  84. data/lib/opennebula/template_pool.rb +1 -1
  85. data/lib/opennebula/user.rb +26 -2
  86. data/lib/opennebula/user_pool.rb +1 -1
  87. data/lib/opennebula/utils.rb +1 -1
  88. data/lib/opennebula/vdc.rb +1 -1
  89. data/lib/opennebula/vdc_pool.rb +1 -1
  90. data/lib/opennebula/virtual_machine.rb +25 -207
  91. data/lib/opennebula/virtual_machine_ext.rb +469 -0
  92. data/lib/opennebula/virtual_machine_pool.rb +1 -5
  93. data/lib/opennebula/virtual_network.rb +4 -10
  94. data/lib/opennebula/virtual_network_pool.rb +1 -1
  95. data/lib/opennebula/virtual_router.rb +4 -12
  96. data/lib/opennebula/virtual_router_pool.rb +1 -1
  97. data/lib/opennebula/vm_group.rb +4 -11
  98. data/lib/opennebula/vm_group_pool.rb +1 -1
  99. data/lib/opennebula/vntemplate.rb +4 -13
  100. data/lib/opennebula/vntemplate_pool.rb +1 -1
  101. data/lib/opennebula/wait_ext.rb +222 -0
  102. data/lib/opennebula/x509_auth.rb +1 -1
  103. data/lib/opennebula/xml_element.rb +1 -1
  104. data/lib/opennebula/xml_pool.rb +1 -1
  105. data/lib/opennebula/xml_utils.rb +1 -1
  106. data/lib/opennebula/zone.rb +1 -1
  107. data/lib/opennebula/zone_pool.rb +1 -1
  108. data/lib/opennebula.rb +5 -2
  109. data/lib/rest_client.rb +201 -0
  110. data/lib/scripts_common.rb +180 -0
  111. data/lib/transport_zone.rb +43 -0
  112. data/lib/vcenter_driver.rb +9 -22
  113. data/lib/vcenter_importer.rb +616 -0
  114. data/lib/vi_client.rb +281 -0
  115. data/lib/vi_helper.rb +312 -0
  116. data/lib/virtual_machine.rb +3477 -0
  117. data/lib/virtual_wire.rb +158 -0
  118. data/lib/vm_device.rb +80 -0
  119. data/lib/vm_disk.rb +202 -0
  120. data/lib/vm_folder.rb +69 -0
  121. data/lib/vm_helper.rb +30 -0
  122. data/lib/vm_monitor.rb +303 -0
  123. data/lib/vm_nic.rb +70 -0
  124. data/lib/vm_template.rb +1961 -0
  125. data/lib/vmm_importer.rb +121 -0
  126. metadata +101 -35
@@ -0,0 +1,3477 @@
1
+ # -------------------------------------------------------------------------- #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
+ # #
4
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
+ # not use this file except in compliance with the License. You may obtain #
6
+ # a copy of the License at #
7
+ # #
8
+ # http://www.apache.org/licenses/LICENSE-2.0 #
9
+ # #
10
+ # Unless required by applicable law or agreed to in writing, software #
11
+ # distributed under the License is distributed on an "AS IS" BASIS, #
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
13
+ # See the License for the specific language governing permissions and #
14
+ # limitations under the License. #
15
+ #--------------------------------------------------------------------------- #
16
+ module VCenterDriver
17
+
18
+ ONE_LOCATION = ENV['ONE_LOCATION'] unless defined?(ONE_LOCATION)
19
+
20
+ if !ONE_LOCATION
21
+ unless defined?(RUBY_LIB_LOCATION)
22
+ RUBY_LIB_LOCATION = '/usr/lib/one/ruby'
23
+ end
24
+ unless defined?(GEMS_LOCATION)
25
+ GEMS_LOCATION = '/usr/share/one/gems'
26
+ end
27
+ else
28
+ unless defined?(RUBY_LIB_LOCATION)
29
+ RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
30
+ end
31
+ unless defined?(GEMS_LOCATION)
32
+ GEMS_LOCATION = ONE_LOCATION + '/share/gems'
33
+ end
34
+ end
35
+
36
+ if File.directory?(GEMS_LOCATION)
37
+ real_gems_path = File.realpath(GEMS_LOCATION)
38
+ if !defined?(Gem) || Gem.path != [real_gems_path]
39
+ $LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
40
+ require 'rubygems'
41
+ Gem.use_paths(real_gems_path)
42
+ end
43
+ end
44
+
45
+ $LOAD_PATH << RUBY_LIB_LOCATION
46
+
47
+ require 'vm_device'
48
+ require 'vm_helper'
49
+ require 'vm_monitor'
50
+
51
+ ############################################################################
52
+ # Class VirtualMachine
53
+ ############################################################################
54
+ class VirtualMachine < VCenterDriver::Template
55
+
56
+ # Supported access to VirtualMachineDevice classes:
57
+ # Example:
58
+ # Disk
59
+ # VirtualMachineDevice::Disk
60
+ # VCenterDriver::VirtualMachine::Disk
61
+ include VirtualMachineDevice
62
+ include VirtualMachineHelper
63
+ include VirtualMachineMonitor
64
+
65
+ ########################################################################
66
+ # Virtual Machine main Class
67
+ ########################################################################
68
+
69
+ VM_PREFIX_DEFAULT = 'one-$i-'
70
+
71
+ POLL_ATTRIBUTE =
72
+ OpenNebula::VirtualMachine::Driver::POLL_ATTRIBUTE
73
+ VM_STATE =
74
+ OpenNebula::VirtualMachine::Driver::VM_STATE
75
+
76
+ DNET_CARD =
77
+ RbVmomi::VIM::VirtualEthernetCardDistributedVirtualPortBackingInfo
78
+ NET_CARD =
79
+ RbVmomi::VIM::VirtualEthernetCardNetworkBackingInfo
80
+ OPAQUE_CARD =
81
+ RbVmomi::VIM::VirtualEthernetCardOpaqueNetworkBackingInfo
82
+
83
+ VM_SHUTDOWN_TIMEOUT = 600 # 10 minutes til poweroff hard
84
+
85
+ attr_accessor :item, :vm_id
86
+
87
+ attr_accessor :vm_info
88
+
89
+ include Memoize
90
+
91
+ def initialize(vi_client, ref, one_id)
92
+ if ref
93
+ @item = RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref)
94
+ check_item(@item, RbVmomi::VIM::VirtualMachine)
95
+ end
96
+
97
+ super(@item, vi_client)
98
+
99
+ @vi_client = vi_client
100
+ @vm_id = one_id
101
+ @locking = true
102
+ @vm_info = nil
103
+ @disks = {}
104
+ @nics = { :macs => {} }
105
+ end
106
+
107
+ ########################################################################
108
+ ########################################################################
109
+
110
+ # Attributes that must be defined when the VM does not exist in vCenter
111
+ attr_accessor :vi_client
112
+
113
+ # these have their own getter (if they aren't set, we can set them
114
+ # dynamically)
115
+ attr_writer :one_item
116
+ attr_writer :host
117
+ attr_writer :target_ds_ref
118
+
119
+ ########################################################################
120
+ ########################################################################
121
+
122
+ # The OpenNebula VM
123
+ # @return OpenNebula::VirtualMachine or XMLElement
124
+ def one_item
125
+ unless @one_item
126
+
127
+ if @vm_id == -1
128
+ raise 'VCenterDriver::Virtualmachine: '\
129
+ 'OpenNebula ID is mandatory for this vm!'
130
+ end
131
+
132
+ @one_item =
133
+ VIHelper
134
+ .one_item(
135
+ OpenNebula::VirtualMachine,
136
+ @vm_id
137
+ )
138
+ end
139
+
140
+ @one_item
141
+ end
142
+
143
+ # set the vmware item directly to the vm
144
+ def item_update(item)
145
+ @item = item
146
+ end
147
+
148
+ def disk_real_path(disk, disk_id)
149
+ volatile = disk['TYPE'] == 'fs'
150
+
151
+ if volatile
152
+ dir = disk['VCENTER_DS_VOLATILE_DIR'] || 'one-volatile'
153
+ img_path = "#{dir}/#{@vm_id}/one-#{@vm_id}-#{disk_id}.vmdk"
154
+ else
155
+ source = disk['SOURCE'].gsub('%20', ' ')
156
+ folder = File.dirname(source)
157
+ ext = File.extname(source)
158
+ file = File.basename(source, ext)
159
+
160
+ img_path = "#{folder}/#{file}-#{@vm_id}-#{disk_id}#{ext}"
161
+ end
162
+
163
+ img_path
164
+ end
165
+
166
+ # The OpenNebula host
167
+ # @return OpenNebula::Host or XMLElement
168
+ def host
169
+ if @host.nil?
170
+ if one_item.nil?
171
+ raise "'one_item' must be previously set to be able to " <<
172
+ 'access the OpenNebula host.'
173
+ end
174
+
175
+ host_id = one_item['HISTORY_RECORDS/HISTORY[last()]/HID']
176
+ raise 'No valid host_id found.' if host_id.nil?
177
+
178
+ @host = VIHelper.one_item(OpenNebula::Host, host_id)
179
+ end
180
+
181
+ @host
182
+ end
183
+
184
+ # Target Datastore VMware reference getter
185
+ # @return
186
+ def target_ds_ref
187
+ if @target_ds_ref.nil?
188
+ if one_item.nil?
189
+ raise "'one_item' must be previously set to be able to " <<
190
+ 'access the target Datastore.'
191
+ end
192
+
193
+ target_ds_id = one_item['HISTORY_RECORDS/HISTORY[last()]/DS_ID']
194
+ raise 'No valid target_ds_id found.' if target_ds_id.nil?
195
+
196
+ target_ds =
197
+ VCenterDriver::VIHelper
198
+ .one_item(
199
+ OpenNebula::Datastore,
200
+ target_ds_id
201
+ )
202
+
203
+ @target_ds_ref = target_ds['TEMPLATE/VCENTER_DS_REF']
204
+ end
205
+
206
+ @target_ds_ref
207
+ end
208
+
209
+ # Get a recommendation from a provided storagepod
210
+ # Returns the recommended datastore reference
211
+ def recommended_ds(ds_ref)
212
+ # Fail if datastore is not a storage pod
213
+ unless ds_ref.start_with?('group-')
214
+ raise 'Cannot recommend from a non storagepod reference'
215
+ end
216
+
217
+ # Retrieve information needed to create storage_spec hash
218
+ storage_manager =
219
+ vi_client
220
+ .vim
221
+ .serviceContent
222
+ .storageResourceManager
223
+ vcenter_name = vc_name
224
+ vc_template =
225
+ RbVmomi::VIM::VirtualMachine
226
+ .new(
227
+ vi_client.vim,
228
+ get_template_ref
229
+ )
230
+ dc = cluster.datacenter
231
+ vcenter_vm_folder_object = vcenter_folder(vcenter_folder_ref,
232
+ vc_template, dc)
233
+ storpod = get_ds(ds_ref)
234
+ disk_move_type = calculate_disk_move_type(storpod, vc_template,
235
+ linked_clones)
236
+ spec_hash = spec_hash_clone(disk_move_type)
237
+ clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(spec_hash)
238
+
239
+ # Create hash needed to get the recommendation
240
+ storage_spec = RbVmomi::VIM.StoragePlacementSpec(
241
+ :type => 'clone',
242
+ :cloneName => vcenter_name,
243
+ :folder => vcenter_vm_folder_object,
244
+ :podSelectionSpec =>
245
+ RbVmomi::VIM
246
+ .StorageDrsPodSelectionSpec(
247
+ :storagePod => storpod
248
+ ),
249
+ :vm => vc_template,
250
+ :cloneSpec => clone_spec
251
+ )
252
+
253
+ # Query a storage placement recommendation
254
+ result = storage_manager
255
+ .RecommendDatastores(
256
+ :storageSpec => storage_spec
257
+ ) rescue nil
258
+ if result.nil?
259
+ raise 'Could not get placement specification for StoragePod'
260
+ end
261
+
262
+ if !result.respond_to?(:recommendations) ||
263
+ result.recommendations.empty?
264
+ raise 'Could not get placement specification for StoragePod'
265
+ end
266
+
267
+ # Return recommended DS reference
268
+ result.recommendations.first.action.first.destination._ref
269
+ end
270
+
271
+ # Cached cluster
272
+ # @return ClusterComputeResource
273
+ def cluster
274
+ if @cluster.nil?
275
+ ccr_ref = host['TEMPLATE/VCENTER_CCR_REF']
276
+ @cluster = ClusterComputeResource.new_from_ref(ccr_ref,
277
+ vi_client)
278
+ end
279
+
280
+ @cluster
281
+ end
282
+
283
+ ########################################################################
284
+ ########################################################################
285
+
286
+ # @return Boolean whether the VM exists in vCenter
287
+ def new?
288
+ one_item['DEPLOY_ID'].empty?
289
+ end
290
+
291
+ def wild?
292
+ !(one_item['TEMPLATE/IMPORTED'] &&
293
+ one_item['TEMPLATE/IMPORTED'] == 'YES').nil?
294
+ end
295
+
296
+ # @return Boolean wheter the vm exists in OpenNebula
297
+ def one_exist?
298
+ !@vm_id.nil? && @vm_id != -1
299
+ end
300
+
301
+ # @return String the vm_id stored in vCenter
302
+ def get_vm_id(vm_pool = nil)
303
+ if defined?(@vm_id) && @vm_id
304
+ return @vm_id
305
+ end
306
+
307
+ vm_ref = self['_ref']
308
+ return unless vm_ref
309
+
310
+ vc_uuid = vcenter_instance_uuid
311
+
312
+ one_vm =
313
+ VCenterDriver::VIHelper
314
+ .find_by_ref(
315
+ OpenNebula::VirtualMachinePool,
316
+ 'DEPLOY_ID',
317
+ vm_ref,
318
+ vc_uuid,
319
+ vm_pool
320
+ )
321
+ return unless one_vm
322
+
323
+ @vm_id = one_vm['ID']
324
+ @vm_id
325
+ end
326
+
327
+ def vcenter_instance_uuid
328
+ @vi_client.vim.serviceContent.about.instanceUuid
329
+ end
330
+
331
+ def disk_keys
332
+ unmanaged_keys = {}
333
+ @item.config.extraConfig.each do |val|
334
+ u = val[:key].include?('opennebula.disk')
335
+ m = val[:key].include?('opennebula.mdisk')
336
+ unmanaged_keys[val[:key]] = val[:value] if u || m
337
+ end
338
+
339
+ unmanaged_keys
340
+ end
341
+
342
+ ########################################################################
343
+ # Getters
344
+ ########################################################################
345
+
346
+ # @return RbVmomi::VIM::ResourcePool
347
+ def resource_pool
348
+ req_rp = one_item['VCENTER_RESOURCE_POOL'] ||
349
+ one_item['USER_TEMPLATE/VCENTER_RESOURCE_POOL']
350
+
351
+ # Get ref for req_rp
352
+ rp_list = cluster.get_resource_pool_list
353
+ req_rp_ref = rp_list.select do |rp|
354
+ rp[:name].downcase == req_rp.downcase
355
+ end.first[:ref] rescue nil
356
+
357
+ if vi_client.rp_confined?
358
+ if req_rp_ref && req_rp_ref != vi_client.rp._ref
359
+ raise 'Available resource pool '\
360
+ "[#{vi_client.rp.name}] in host"\
361
+ ' does not match requested resource pool'\
362
+ " [#{req_rp}]"
363
+ end
364
+
365
+ vi_client.rp
366
+ else
367
+ if req_rp_ref
368
+ rps = cluster.resource_pools.select do |r|
369
+ r._ref == req_rp_ref
370
+ end
371
+
372
+ if rps.empty?
373
+ raise "No matching resource pool found (#{req_rp})."
374
+ end
375
+
376
+ rps.first
377
+ else
378
+ cluster['resourcePool']
379
+ end
380
+ end
381
+ end
382
+
383
+ # @return RbVmomi::VIM::Datastore or nil
384
+ def get_ds(current_ds_ref = nil)
385
+ if !current_ds_ref
386
+ current_ds_id =
387
+ one_item[
388
+ 'HISTORY_RECORDS/HISTORY[last()]/DS_ID'
389
+ ]
390
+ current_ds = VCenterDriver::VIHelper.one_item(
391
+ OpenNebula::Datastore, current_ds_id
392
+ )
393
+ current_ds_ref = current_ds['TEMPLATE/VCENTER_DS_REF']
394
+ end
395
+
396
+ if current_ds_ref
397
+ dc = cluster.datacenter
398
+
399
+ ds_folder = dc.datastore_folder
400
+ ds = ds_folder.get(current_ds_ref)
401
+ ds.item rescue nil
402
+
403
+ else
404
+ nil
405
+ end
406
+ end
407
+
408
+ # StorageResouceManager reference
409
+ def storagemanager
410
+ self['_connection.serviceContent.storageResourceManager']
411
+ end
412
+
413
+ # @return Customization or nil
414
+ def customization_spec
415
+ xpath = 'USER_TEMPLATE/VCENTER_CUSTOMIZATION_SPEC'
416
+ customization_spec = one_item[xpath]
417
+
418
+ if customization_spec.nil?
419
+ return
420
+ end
421
+
422
+ begin
423
+ custom_spec = vi_client
424
+ .vim
425
+ .serviceContent
426
+ .customizationSpecManager
427
+ .GetCustomizationSpec(
428
+ :name => customization_spec
429
+ )
430
+
431
+ unless custom_spec && (spec = custom_spec.spec)
432
+ raise 'Error getting customization spec'
433
+ end
434
+
435
+ spec
436
+ rescue StandardError
437
+ raise "Customization spec '#{customization_spec}' not found"
438
+ end
439
+ end
440
+
441
+ # @return VCenterDriver::Datastore datastore
442
+ # where the disk will live under
443
+ def get_effective_ds(disk)
444
+ if disk['PERSISTENT'] == 'YES'
445
+ ds_ref = disk['VCENTER_DS_REF']
446
+ else
447
+ ds_ref = target_ds_ref
448
+
449
+ if ds_ref.nil?
450
+ raise 'target_ds_ref must be defined on this object.'
451
+ end
452
+ end
453
+
454
+ VCenterDriver::Storage.new_from_ref(ds_ref, vi_client)
455
+ end
456
+
457
+ # @return String vcenter name
458
+ def vc_name
459
+ vm_prefix = host['TEMPLATE/VM_PREFIX']
460
+ vm_prefix = VM_PREFIX_DEFAULT if vm_prefix.nil? || vm_prefix.empty?
461
+
462
+ if !one_item['USER_TEMPLATE/VM_PREFIX'].nil?
463
+ vm_prefix = one_item['USER_TEMPLATE/VM_PREFIX']
464
+ end
465
+ vm_prefix.gsub!('$i', one_item['ID'])
466
+
467
+ vm_suffix = ''
468
+ if !one_item['USER_TEMPLATE/VM_SUFFIX'].nil?
469
+ vm_suffix = one_item['USER_TEMPLATE/VM_SUFFIX']
470
+ end
471
+ vm_suffix.gsub!('$i', one_item['ID'])
472
+
473
+ vm_prefix + one_item['NAME'] + vm_suffix
474
+ end
475
+
476
+ # @return vCenter Tags
477
+ def vcenter_tags
478
+ one_item.info if one_item.instance_of?(OpenNebula::VirtualMachine)
479
+ one_item.retrieve_xmlelements('USER_TEMPLATE/VCENTER_TAG')
480
+ end
481
+
482
+ # @return if has vCenter Tags
483
+ def vcenter_tags?
484
+ !vcenter_tags.empty?
485
+ end
486
+
487
+ # @return if has cpuHotAddEnabled
488
+ def cpu_hot_add_enabled?
489
+ one_item.info if one_item.instance_of?(
490
+ OpenNebula::VirtualMachine
491
+ )
492
+
493
+ if one_item['USER_TEMPLATE/HOT_RESIZE/CPU_HOT_ADD_ENABLED'].nil?
494
+ return false
495
+ end
496
+
497
+ one_item[
498
+ 'USER_TEMPLATE/HOT_RESIZE/CPU_HOT_ADD_ENABLED'
499
+ ] == 'YES'
500
+ end
501
+
502
+ # @return if has memoryHotAddEnabled
503
+ def memory_hot_add_enabled?
504
+ one_item.info if one_item.instance_of?(
505
+ OpenNebula::VirtualMachine
506
+ )
507
+
508
+ if one_item['USER_TEMPLATE/HOT_RESIZE/MEMORY_HOT_ADD_ENABLED'].nil?
509
+ return false
510
+ end
511
+
512
+ one_item[
513
+ 'USER_TEMPLATE/HOT_RESIZE/MEMORY_HOT_ADD_ENABLED'
514
+ ] == 'YES'
515
+ end
516
+
517
+ ########################################################################
518
+ # Create and reconfigure VM related methods
519
+ ########################################################################
520
+
521
+ # This function permit get a folder by name if exist
522
+ # or create it if not exist
523
+ def find_or_create_folder(folder_root, name)
524
+ folder_root.childEntity.each do |child|
525
+ if child.instance_of?(RbVmomi::VIM::Folder) &&
526
+ child.name == name
527
+ return child
528
+ end
529
+ end
530
+
531
+ folder_root.CreateFolder(:name => name)
532
+ end
533
+
534
+ # This function creates a new VM from the
535
+ # driver_action XML and returns the
536
+ # VMware ref
537
+ # @param drv_action XML representing the deploy action
538
+ # @return String vmware ref
539
+ def clone_vm(drv_action)
540
+ vcenter_name = vc_name
541
+
542
+ dc = cluster.datacenter
543
+
544
+ vcenter_vm_folder = drv_action['USER_TEMPLATE/VCENTER_VM_FOLDER']
545
+
546
+ if !vcenter_vm_folder.nil? && !vcenter_vm_folder.empty?
547
+ vcenter_vm_folder =
548
+ vcenter_folder_name(vcenter_vm_folder, drv_action)
549
+
550
+ vcenter_vm_folder_object =
551
+ dc.item.find_folder(vcenter_vm_folder)
552
+
553
+ if vcenter_vm_folder_object.nil?
554
+ begin
555
+ dc.item.vmFolder.CreateFolder(
556
+ :name => vcenter_vm_folder
557
+ )
558
+ rescue StandardError => e
559
+ error_message = e.message
560
+ if VCenterDriver::CONFIG[:debug_information]
561
+ error_message += ' ' + e.backtrace
562
+ end
563
+ raise 'Cannot create Folder in vCenter:'\
564
+ "#{error_message}"
565
+ end
566
+ end
567
+ end
568
+
569
+ dc = cluster.datacenter
570
+
571
+ vcenter_vm_folder = drv_action['USER_TEMPLATE/VCENTER_VM_FOLDER']
572
+
573
+ if !vcenter_vm_folder.nil? && !vcenter_vm_folder.empty?
574
+ vcenter_vm_folder =
575
+ vcenter_folder_name(vcenter_vm_folder, drv_action)
576
+
577
+ vcenter_vm_folder_object =
578
+ dc.item.find_folder(vcenter_vm_folder)
579
+
580
+ if vcenter_vm_folder_object.nil?
581
+ begin
582
+ vcenter_vm_folder_list = vcenter_vm_folder.split('/')
583
+ folder_root = dc.item.vmFolder
584
+
585
+ vcenter_vm_folder_list.each do |folder_name|
586
+ folder_root = find_or_create_folder(
587
+ folder_root,
588
+ folder_name
589
+ )
590
+ end
591
+ rescue StandardError => e
592
+ error_message = e.message
593
+ if VCenterDriver::CONFIG[:debug_information]
594
+ error_message += ' ' + e.backtrace
595
+ end
596
+
597
+ raise 'Cannot create Folder in vCenter: '\
598
+ "#{error_message}"
599
+ end
600
+ end
601
+ end
602
+
603
+ vc_template_ref = drv_action['USER_TEMPLATE/VCENTER_TEMPLATE_REF']
604
+ vc_template = RbVmomi::VIM::VirtualMachine(@vi_client.vim,
605
+ vc_template_ref)
606
+
607
+ ds = get_ds
608
+
609
+ asking_for_linked_clones =
610
+ drv_action[
611
+ 'USER_TEMPLATE/VCENTER_LINKED_CLONES'
612
+ ]
613
+ disk_move_type = calculate_disk_move_type(ds,
614
+ vc_template,
615
+ asking_for_linked_clones)
616
+
617
+ spec_hash = spec_hash_clone(disk_move_type)
618
+
619
+ clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(spec_hash)
620
+
621
+ vcenter_vm_folder_object =
622
+ vcenter_folder(
623
+ vcenter_vm_folder,
624
+ vc_template,
625
+ dc
626
+ )
627
+
628
+ if ds.instance_of? RbVmomi::VIM::StoragePod
629
+ # VM is cloned using Storage Resource Manager for StoragePods
630
+ begin
631
+ opts = {
632
+ :vc_template => vc_template,
633
+ :vcenter_name => vcenter_name,
634
+ :clone_spec => clone_spec,
635
+ :storpod => ds,
636
+ :vcenter_vm_folder_object => vcenter_vm_folder_object,
637
+ :dc => dc
638
+ }
639
+
640
+ vm = storagepod_clonevm_task(opts)
641
+ rescue StandardError => e
642
+ error =
643
+ "Cannot clone VM Template to StoragePod: #{e.message}."
644
+
645
+ if VCenterDriver::CONFIG[:debug_information]
646
+ error += "\n\n#{e.backtrace}"
647
+ end
648
+
649
+ raise error
650
+ end
651
+ else
652
+ vm = nil
653
+ begin
654
+ vm = vc_template.CloneVM_Task(
655
+ :folder => vcenter_vm_folder_object,
656
+ :name => vcenter_name,
657
+ :spec => clone_spec
658
+ ).wait_for_completion
659
+ rescue StandardError => e
660
+ if !e.message.start_with?('DuplicateName')
661
+ raise "Cannot clone VM Template: #{e.message}"
662
+ end
663
+
664
+ vm_folder = dc.vm_folder
665
+ vm_folder.fetch!
666
+ vm = vm_folder.items
667
+ .select {|_k, v| v.item.name == vcenter_name }
668
+ .values.first.item rescue nil
669
+
670
+ raise "Cannot clone VM Template: #{e.message}" unless vm
671
+
672
+ # Detach all persistent disks to
673
+ # avoid accidental destruction
674
+ detach_persistent_disks(vm)
675
+
676
+ vm.Destroy_Task.wait_for_completion
677
+ vm = vc_template.CloneVM_Task(
678
+ :folder => vcenter_vm_folder_object,
679
+ :name => vcenter_name,
680
+ :spec => clone_spec
681
+ ).wait_for_completion
682
+ end
683
+ end
684
+
685
+ # @item is populated
686
+ @item = vm
687
+
688
+ reference_unmanaged_devices(vc_template_ref)
689
+
690
+ self['_ref']
691
+ end
692
+
693
+ # This function clone a VM Template to StoragePod
694
+ # @param opts HASH with all parameters need it to clone
695
+ # opts = {
696
+ # :vc_template => vc_template,
697
+ # :vcenter_name => vcenter_name,
698
+ # :clone_spec => clone_spec,
699
+ # :storpod => ds,
700
+ # :vcenter_vm_folder_object => vcenter_vm_folder_object,
701
+ # :dc => dc
702
+ # }
703
+ # @return vm (VirtualMachine)
704
+ def storagepod_clonevm_task(opts)
705
+ vc_template = opts[:vc_template]
706
+ vcenter_name = opts[:vcenter_name]
707
+ clone_spec = opts[:clone_spec]
708
+ storpod = opts[:storpod]
709
+ vcenter_vm_folder_object = opts[:vcenter_vm_folder_object]
710
+ dc = opts[:dc]
711
+
712
+ storage_manager =
713
+ vc_template
714
+ ._connection
715
+ .serviceContent
716
+ .storageResourceManager
717
+
718
+ storage_spec = RbVmomi::VIM.StoragePlacementSpec(
719
+ :type => 'clone',
720
+ :cloneName => vcenter_name,
721
+ :folder => vcenter_vm_folder_object,
722
+ :podSelectionSpec =>
723
+ RbVmomi::VIM
724
+ .StorageDrsPodSelectionSpec(
725
+ :storagePod => storpod
726
+ ),
727
+ :vm => vc_template,
728
+ :cloneSpec => clone_spec
729
+ )
730
+
731
+ # Query a storage placement recommendation
732
+ result = storage_manager
733
+ .RecommendDatastores(
734
+ :storageSpec => storage_spec
735
+ ) rescue nil
736
+
737
+ if result.nil?
738
+ raise 'Could not get placement specification for StoragePod'
739
+ end
740
+
741
+ if !result
742
+ .respond_to?(
743
+ :recommendations
744
+ ) || result.recommendations.empty?
745
+ raise 'Could not get placement specification for StoragePod'
746
+ end
747
+
748
+ # Get recommendation key to be applied
749
+ key = result.recommendations.first.key ||= ''
750
+ if key.empty?
751
+ raise 'Missing Datastore recommendation for StoragePod'
752
+ end
753
+
754
+ begin
755
+ apply_sr = storage_manager
756
+ .ApplyStorageDrsRecommendation_Task(:key => [key])
757
+ .wait_for_completion
758
+ apply_sr.vm
759
+ rescue StandardError => e
760
+ if !e.message.start_with?('DuplicateName')
761
+ raise 'Cannot clone VM Template: '\
762
+ "#{e.message}\n#{e.backtrace}"
763
+ end
764
+
765
+ # The VM already exists, try to find the vm
766
+ vm_folder = dc.vm_folder
767
+ vm_folder.fetch!
768
+ vm = vm_folder.items
769
+ .select {|_k, v| v.item.name == vcenter_name }
770
+ .values.first.item rescue nil
771
+
772
+ if vm
773
+
774
+ begin
775
+ # Detach all persistent disks to
776
+ # avoid accidental destruction
777
+ detach_persistent_disks(vm)
778
+
779
+ # Destroy the VM with any disks still attached to it
780
+ vm.Destroy_Task.wait_for_completion
781
+
782
+ # Query a storage placement recommendation
783
+ result =
784
+ storage_manager
785
+ .RecommendDatastores(
786
+ :storageSpec => storage_spec
787
+ ) rescue nil
788
+
789
+ if result.nil?
790
+ raise 'Could not get placement specification '\
791
+ 'for StoragePod'
792
+ end
793
+
794
+ if !result
795
+ .respond_to?(
796
+ :recommendations
797
+ ) ||
798
+ result
799
+ .recommendations.empty?
800
+ raise 'Could not get placement '\
801
+ 'specification for StoragePod'
802
+ end
803
+
804
+ # Get recommendation key to be applied
805
+ key = result.recommendations.first.key ||= ''
806
+ if key.empty?
807
+ raise 'Missing Datastore recommendation '\
808
+ ' for StoragePod'
809
+ end
810
+
811
+ apply_sr =
812
+ storage_manager
813
+ .ApplyStorageDrsRecommendation_Task(
814
+ :key => [key]
815
+ )
816
+ .wait_for_completion
817
+ apply_sr.vm
818
+ rescue StandardError => e
819
+ raise 'Failure applying recommendation while '\
820
+ "cloning VM: #{e.message}"
821
+ end
822
+ end
823
+ end
824
+ end
825
+
826
+ # Calculates how to move disk backinggs from the
827
+ # vCenter VM Template moref
828
+ def calculate_disk_move_type(ds, vc_template, use_linked_clones)
829
+ # Default disk move type (Full Clone)
830
+ disk_move_type = :moveAllDiskBackingsAndDisallowSharing
831
+
832
+ if ds.instance_of?(RbVmomi::VIM::Datastore) &&
833
+ use_linked_clones &&
834
+ use_linked_clones.downcase == 'yes'
835
+
836
+ # Check if all disks in template has delta disks
837
+ disks = vc_template.config
838
+ .hardware
839
+ .device
840
+ .grep(RbVmomi::VIM::VirtualDisk)
841
+
842
+ disks_no_delta = disks.select do |d|
843
+ d.backing.parent.nil?
844
+ end
845
+
846
+ # Can use linked clones if all disks have delta disks
847
+ if disks_no_delta.empty?
848
+ disk_move_type = :moveChildMostDiskBacking
849
+ end
850
+ end
851
+
852
+ disk_move_type
853
+ end
854
+
855
+ # @return String vcenter folder name
856
+ def vcenter_folder_name(vm_folder_name, drv_action)
857
+ uname = drv_action['UNAME']
858
+ gname = drv_action['GNAME']
859
+
860
+ vm_folder_name.gsub!('$uname', uname)
861
+ vm_folder_name.gsub!('$gname', gname)
862
+
863
+ vm_folder_name
864
+ end
865
+
866
+ # Get vcenter folder object from the reference
867
+ # If folder is not found, the folder of the
868
+ # vCenter VM Template is returned
869
+ def vcenter_folder(vcenter_vm_folder, vc_template, dc)
870
+ vcenter_vm_folder_object = nil
871
+
872
+ if !vcenter_vm_folder.nil? && !vcenter_vm_folder.empty?
873
+ vcenter_vm_folder_object =
874
+ dc
875
+ .item
876
+ .find_folder(
877
+ vcenter_vm_folder
878
+ )
879
+ end
880
+
881
+ vcenter_vm_folder_object =
882
+ vc_template
883
+ .parent if vcenter_vm_folder_object.nil?
884
+ vcenter_vm_folder_object
885
+ end
886
+
887
+ # @return clone parameters spec hash
888
+ def spec_hash_clone(disk_move_type)
889
+ # Relocate spec
890
+ relocate_spec_params = {}
891
+
892
+ relocate_spec_params[:pool] = resource_pool
893
+ relocate_spec_params[:diskMoveType] = disk_move_type
894
+
895
+ ds = get_ds
896
+
897
+ relocate_spec_params[:datastore] =
898
+ ds if ds.instance_of? RbVmomi::VIM::Datastore
899
+
900
+ relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
901
+ relocate_spec_params
902
+ )
903
+
904
+ # Running flag - prevents spurious poweroff states in the VM
905
+ running_flag = [{ :key => 'opennebula.vm.running', :value => 'no' }]
906
+
907
+ running_flag_spec = RbVmomi::VIM.VirtualMachineConfigSpec(
908
+ { :extraConfig => running_flag }
909
+ )
910
+
911
+ clone_parameters = {
912
+ :location => relocate_spec,
913
+ :powerOn => false,
914
+ :template => false,
915
+ :config => running_flag_spec
916
+ }
917
+
918
+ cs = customization_spec
919
+ clone_parameters[:customization] = cs if cs
920
+
921
+ clone_parameters
922
+ end
923
+
924
+ ########################################################################
925
+ # VirtualMachine Resource model methods
926
+ ########################################################################
927
+
928
+ #
929
+ # gets the representation of the nics
930
+ #
931
+ # @return [Hash(String => self.Nic)
932
+ def nics
933
+ if !@nics[:macs].empty?
934
+ return @nics.reject {|k| k == :macs }
935
+ end
936
+
937
+ info_nics
938
+ end
939
+
940
+ # gets the representation of the disks
941
+ #
942
+ # @return [Hash(String => self.Disk)
943
+ def disks
944
+ return @disks unless @disks.empty?
945
+
946
+ info_disks
947
+ end
948
+
949
+ # iterate over the nics model
950
+ #
951
+ # @param condition[Symbol] selects nics that matches certain condition
952
+ # see Self.Nic|Resource class to see some methods: :exits?, :one?...
953
+ #
954
+ # @return yield the nic
955
+ def nics_each(condition)
956
+ res = []
957
+ nics.each do |_id, nic|
958
+ next unless nic.method(condition).call
959
+
960
+ yield nic if block_given?
961
+
962
+ res << nic
963
+ end
964
+
965
+ res
966
+ end
967
+
968
+ # iterate over the disks model
969
+ #
970
+ # @param condition[Symbol] selects disks that matches certain condition
971
+ # see Self.Disk|Resource class to see some methods: :exits?, :one?...
972
+ #
973
+ # @return yield the disk
974
+ def disks_each(condition)
975
+ res = []
976
+ disks.each do |_id, disk|
977
+ next unless disk.method(condition).call
978
+
979
+ yield disk if block_given?
980
+
981
+ res << disk
982
+ end
983
+
984
+ res
985
+ end
986
+
987
+ def disks_synced?
988
+ disks_each(:unsynced?) { return false }
989
+
990
+ true
991
+ end
992
+
993
+ def template_ref_get
994
+ one_item['USER_TEMPLATE/VCENTER_TEMPLATE_REF']
995
+ end
996
+
997
+ def vcenter_folder_ref
998
+ one_item['USER_TEMPLATE/VCENTER_VM_FOLDER']
999
+ end
1000
+
1001
+ # Queries to OpenNebula the machine disks xml representation
1002
+ def one_disks_list
1003
+ one_item.info if one_item.instance_of?(OpenNebula::VirtualMachine)
1004
+ one_item.retrieve_xmlelements('TEMPLATE/DISK')
1005
+ end
1006
+
1007
+ # Queries to OpenNebula the machine nics xml representation
1008
+ def one_nics_get
1009
+ one_item.info if one_item.instance_of?(OpenNebula::VirtualMachine)
1010
+ one_item.retrieve_xmlelements('TEMPLATE/NIC')
1011
+ end
1012
+
1013
+ def linked_clones
1014
+ one_item['USER_TEMPLATE/VCENTER_LINKED_CLONES']
1015
+ end
1016
+
1017
+ # perform a query to vCenter asking for the OpenNebula disk
1018
+ #
1019
+ # @param one_disk [XMLelement] The OpenNebula object
1020
+ # representation of the disk
1021
+ # @param keys [Hash (String => String)] Hashmap with
1022
+ # the unmanaged keys
1023
+ # @param vc_disks [Array (vcenter_disks)] Array of
1024
+ # the machine real disks
1025
+ # See vcenter_disks_get method
1026
+ #
1027
+ # @return [vCenter_disk] the proper disk
1028
+ def query_disk(one_disk, keys, vc_disks)
1029
+ index = one_disk['DISK_ID']
1030
+ unmanaged = "opennebula.disk.#{index}"
1031
+ managed = "opennebula.mdisk.#{index}"
1032
+
1033
+ if keys[managed]
1034
+ key = keys[managed].to_i
1035
+ elsif keys[unmanaged]
1036
+ key = keys[unmanaged].to_i
1037
+ end
1038
+
1039
+ if key
1040
+ query = vc_disks.select {|dev| key == dev[:key] }
1041
+ else
1042
+ if snapshots?
1043
+ error = 'Disk metadata not present and snapshots exist. ' \
1044
+ 'OpenNebula cannot manage this VM.'
1045
+ raise error
1046
+ end
1047
+
1048
+ # Try to find the disk using the path known by OpenNebula
1049
+ source_path = one_disk['SOURCE']
1050
+ calculated_path = disk_real_path(one_disk, index)
1051
+ query = vc_disks.select do |dev|
1052
+ source_path == dev[:path_wo_ds] ||
1053
+ calculated_path == dev[:path_wo_ds]
1054
+ end
1055
+ end
1056
+
1057
+ return if query.size != 1
1058
+
1059
+ query.first
1060
+ end
1061
+
1062
+ # perform a query to vCenter asking for the OpenNebula nic
1063
+ #
1064
+ # @param vc_disks [String] The mac of the nic
1065
+ # @param vc_disks [Array (vcenter_nic)] Array of the machine real nics
1066
+ #
1067
+ # @return [vCenter_nic] the proper nic
1068
+ def query_nic(mac, vc_nics)
1069
+ nic = vc_nics.select {|dev| dev.macAddress == mac }.first
1070
+
1071
+ vc_nics.delete(nic) if nic
1072
+ end
1073
+
1074
+ # Refresh VcenterDriver machine nics model, does not perform
1075
+ # any sync operation!
1076
+ #
1077
+ # @return [Hash ("String" => self.Nic)] Model representation of nics
1078
+ def info_nics
1079
+ @nics = { :macs => {} }
1080
+
1081
+ vc_nics = vcenter_nics_list
1082
+ one_nics = one_nics_get
1083
+
1084
+ one_nics.each do |one_nic|
1085
+ index = one_nic['NIC_ID']
1086
+ mac = one_nic['MAC']
1087
+ vc_dev = query_nic(mac, vc_nics)
1088
+
1089
+ if vc_dev
1090
+ @nics[index] = Nic.new(index.to_i, one_nic, vc_dev)
1091
+ @nics[:macs][mac] = index
1092
+ else
1093
+ @nics[index] = Nic.one_nic(index.to_i, one_nic)
1094
+ end
1095
+ end
1096
+
1097
+ vc_nics.each do |d|
1098
+ backing = d.backing
1099
+
1100
+ case backing.class.to_s
1101
+ when NET_CARD.to_s
1102
+ key = backing.network._ref
1103
+ when DNET_CARD.to_s
1104
+ key = backing.port.portgroupKey
1105
+ when OPAQUE_CARD.to_s
1106
+ # Select only Opaque Networks
1107
+ opaque_networks = @item.network.select do |net|
1108
+ RbVmomi::VIM::OpaqueNetwork == net.class
1109
+ end
1110
+ opaque_network = opaque_networks.find do |opn|
1111
+ backing.opaqueNetworkId == opn.summary.opaqueNetworkId
1112
+ end
1113
+ key = opaque_network._ref
1114
+ else
1115
+ raise "Unsupported network card type: #{backing.class}"
1116
+ end
1117
+
1118
+ @nics["#{key}#{d.key}"] = Nic.vc_nic(d)
1119
+ end
1120
+
1121
+ @nics.reject {|k| k == :macs }
1122
+ end
1123
+
1124
+ # Refresh VcenterDriver machine disks model, does not perform any
1125
+ # sync operation!
1126
+ #
1127
+ # @return [Hash ("String" => self.Disk)] Model representation of disks
1128
+ def info_disks
1129
+ @disks = {}
1130
+
1131
+ keys = disk_keys
1132
+ vc_disks = vcenter_disks_get
1133
+ one_disks = one_disks_list
1134
+
1135
+ one_disks.each do |one_disk|
1136
+ index = one_disk['DISK_ID']
1137
+
1138
+ disk = query_disk(one_disk, keys, vc_disks)
1139
+
1140
+ vc_dev = vc_disks.delete(disk) if disk
1141
+
1142
+ if vc_dev
1143
+ @disks[index] = Disk.new(index.to_i, one_disk, vc_dev)
1144
+ else
1145
+ @disks[index] = Disk.one_disk(index.to_i, one_disk)
1146
+ end
1147
+ end
1148
+
1149
+ vc_disks.each {|d| @disks[d[:path_wo_ds]] = Disk.vc_disk(d) }
1150
+
1151
+ @disks
1152
+ end
1153
+
1154
+ # Queries for a certain nic
1155
+ #
1156
+ # @param index [String| Integer] the id of the nic or the mac
1157
+ # @param opts [hash (symbol=>boolean)]
1158
+ # :sync : allow you to ignore local class memory
1159
+ def nic(index, opts = {})
1160
+ index = index.to_s
1161
+ is_mac = index.match(/^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$/)
1162
+
1163
+ if is_mac
1164
+ mac = index
1165
+ index = @nics[:macs][mac]
1166
+ end
1167
+
1168
+ return @nics[index] if @nics[index] && opts[:sync].nil?
1169
+
1170
+ if is_mac
1171
+ one_nic =
1172
+ one_item
1173
+ .retrieve_xmlelements(
1174
+ "TEMPLATE/NIC[MAC='#{mac}']"
1175
+ ).first rescue nil
1176
+ index = one_nic['NIC_ID'] if one_nic
1177
+ else
1178
+ one_nic =
1179
+ one_item
1180
+ .retrieve_xmlelements(
1181
+ "TEMPLATE/NIC[NIC_ID='#{index}']"
1182
+ ).first rescue nil
1183
+ mac = one_nic['MAC'] if one_nic
1184
+ end
1185
+
1186
+ raise "nic #{index} not found" unless one_nic
1187
+
1188
+ vc_nics = vcenter_nics_list
1189
+ vc_nic = query_nic(mac, vc_nics)
1190
+
1191
+ if vc_nic
1192
+ Nic.new(index.to_i, one_nic, vc_nic)
1193
+ else
1194
+ Nic.one_nic(index.to_i, one_nic)
1195
+ end
1196
+ end
1197
+
1198
+ # Queries for a certain disk
1199
+ #
1200
+ # @param index [String | Integer] the id of the disk
1201
+ # @param opts [hash (symbol=>boolean)]
1202
+ # :sync : allow you to ignore local class memory
1203
+ def disk(index, opts = {})
1204
+ index = index.to_s
1205
+
1206
+ return @disks[index] if @disks[index] && opts[:sync].nil?
1207
+
1208
+ one_disk =
1209
+ one_item
1210
+ .retrieve_xmlelements(
1211
+ "TEMPLATE/DISK[DISK_ID='#{index}']"
1212
+ ).first rescue nil
1213
+
1214
+ raise "disk #{index} not found" unless one_disk
1215
+
1216
+ opts[:keys].nil? ? keys = disk_keys : keys = opts[:keys]
1217
+ if opts[:disks].nil?
1218
+ vc_disks = vcenter_disks_get
1219
+ else
1220
+ vc_disks = opts[:disks]
1221
+ end
1222
+ vc_disk = query_disk(one_disk, keys, vc_disks)
1223
+
1224
+ if vc_disk
1225
+ Disk.new(index.to_i, one_disk, vc_disk)
1226
+ else
1227
+ Disk.one_disk(index.to_i, one_disk)
1228
+ end
1229
+ end
1230
+
1231
+ # Matches disks from the vCenter VM Template (or VM if it is coming
1232
+ # from a Wild VM) with the disks represented in OpenNebula VM
1233
+ # data model (ie, the XML)
1234
+ def reference_unmanaged_devices(template_ref, execute = true)
1235
+ device_change = []
1236
+ spec = {}
1237
+
1238
+ # Get unmanaged disks in OpenNebula's VM template
1239
+ xpath =
1240
+ 'TEMPLATE/DISK[OPENNEBULA_MANAGED="NO" '\
1241
+ 'or OPENNEBULA_MANAGED="no"]'
1242
+ unmanaged_disks = one_item.retrieve_xmlelements(xpath)
1243
+
1244
+ managed = false
1245
+ extraconfig = reference_disks(
1246
+ template_ref,
1247
+ unmanaged_disks,
1248
+ managed
1249
+ )
1250
+
1251
+ # Add info for existing nics in template in vm xml
1252
+ xpath =
1253
+ 'TEMPLATE/NIC[OPENNEBULA_MANAGED="NO" '\
1254
+ 'or OPENNEBULA_MANAGED="no"]'
1255
+ unmanaged_nics = one_item.retrieve_xmlelements(xpath)
1256
+
1257
+ # Handle NIC changes (different model and/or set MAC address
1258
+ # for unmanaged nics
1259
+ begin
1260
+ if !unmanaged_nics.empty?
1261
+ nics = vcenter_nics_list
1262
+
1263
+ # iterate over nics array and find nic with ref
1264
+ # or return nil if not exist
1265
+ select_net =lambda {|ref|
1266
+ device = nil
1267
+ nics.each do |nic|
1268
+ type = nic.backing.class.to_s
1269
+
1270
+ case type
1271
+ when NET_CARD.to_s
1272
+ nref = nic.backing.network._ref
1273
+ when DNET_CARD.to_s
1274
+ nref = nic.backing.port.portgroupKey
1275
+ when OPAQUE_CARD.to_s
1276
+ # Select only Opaque Networks
1277
+ opaque_networks = @item.network.select do |net|
1278
+ RbVmomi::VIM::OpaqueNetwork == net.class
1279
+ end
1280
+ opaque_network = opaque_networks.find do |opn|
1281
+ nic.backing.opaqueNetworkId ==
1282
+ opn.summary.opaqueNetworkId
1283
+ end
1284
+ nref = opaque_network._ref
1285
+ else
1286
+ raise 'Unsupported network card type: '\
1287
+ "#{nic.backing.class}"
1288
+ end
1289
+
1290
+ next unless nref == ref
1291
+
1292
+ device = nic
1293
+ break
1294
+ end
1295
+
1296
+ if device
1297
+ nics.delete(device)
1298
+ else
1299
+ nil
1300
+ end
1301
+ }
1302
+
1303
+ # Go over all unmanaged nics in order to sync
1304
+ # with vCenter Virtual Machine
1305
+ unmanaged_nics.each do |unic|
1306
+ vnic = select_net.call(unic['VCENTER_NET_REF'])
1307
+ nic_class = vnic.class if vnic
1308
+
1309
+ if unic['MODEL']
1310
+ new_model = Nic.nic_model_class(unic['MODEL'])
1311
+ end
1312
+
1313
+ # if vnic is nil add a new device
1314
+ if vnic.nil?
1315
+ device_change << calculate_add_nic_spec(unic)
1316
+ # delete actual nic and update the new one.
1317
+ elsif new_model && new_model != nic_class
1318
+ device_change << {
1319
+ :device => vnic,
1320
+ :operation => :remove
1321
+ }
1322
+ device_change << calculate_add_nic_spec(
1323
+ unic,
1324
+ vnic.unitNumber
1325
+ )
1326
+ else
1327
+ vnic.macAddress = unic['MAC']
1328
+ device_change << {
1329
+ :device => vnic,
1330
+ :operation => :edit
1331
+ }
1332
+ end
1333
+ end
1334
+
1335
+ end
1336
+ rescue StandardError => e
1337
+ raise 'There is a problem with your vm NICS, '\
1338
+ 'make sure that they are working properly. '\
1339
+ "Error: #{e.message}"
1340
+ end
1341
+
1342
+ # Save in extraconfig the key for unmanaged disks
1343
+ if !extraconfig.empty? || !device_change.empty?
1344
+ spec[:extraConfig] = extraconfig unless extraconfig.empty?
1345
+ spec[:deviceChange] = device_change unless device_change.empty?
1346
+
1347
+ return spec unless execute
1348
+
1349
+ @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1350
+ end
1351
+
1352
+ {}
1353
+ end
1354
+
1355
+ def reference_all_disks
1356
+ # OpenNebula VM disks saved inside .vmx file in vCenter
1357
+ disks_extraconfig_current = {}
1358
+ # iterate over all attributes and get the disk information
1359
+ # keys for disks are prefixed with
1360
+ # opennebula.disk and opennebula.mdisk
1361
+ @item.config.extraConfig.each do |elem|
1362
+ disks_extraconfig_current[elem.key] =
1363
+ elem.value if elem.key.start_with?('opennebula.disk.')
1364
+ disks_extraconfig_current[elem.key] =
1365
+ elem.value if elem.key.start_with?('opennebula.mdisk.')
1366
+ end
1367
+
1368
+ # disks that exist currently in the vCenter Virtual Machine
1369
+ disks_vcenter_current = []
1370
+ disks_each(:synced?) do |disk|
1371
+ begin
1372
+ if disk.managed?
1373
+ key_prefix = 'opennebula.mdisk.'
1374
+ else
1375
+ key_prefix = 'opennebula.disk.'
1376
+ end
1377
+ k = "#{key_prefix}#{disk.id}"
1378
+ v = disk.key.to_s
1379
+
1380
+ disks_vcenter_current << { :key => k, :value => v }
1381
+ rescue StandardError => _e
1382
+ next
1383
+ end
1384
+ end
1385
+
1386
+ update = false
1387
+ # differences in the number of disks
1388
+ # between vCenter and OpenNebula VMs
1389
+ num_disks_difference =
1390
+ disks_extraconfig_current.keys.count -
1391
+ disks_vcenter_current.count
1392
+
1393
+ # check if disks are same in vCenter and OpenNebula
1394
+ disks_vcenter_current.each do |item|
1395
+ # check if vCenter disk have representation in the extraConfig
1396
+ # but with a different key, then we have to update
1397
+ first_condition =
1398
+ disks_extraconfig_current.key? item[:key]
1399
+ second_condition =
1400
+ disks_extraconfig_current[item[:key]] == item[:value]
1401
+ if first_condition && !second_condition
1402
+ update = true
1403
+ end
1404
+ # check if vCenter disk hasn't got
1405
+ # a representation in the extraConfig
1406
+ # then we have to update
1407
+ if !disks_extraconfig_current.key? item[:key]
1408
+ update = true
1409
+ end
1410
+ end
1411
+
1412
+ # new configuration for vCenter .vmx file
1413
+ disks_extraconfig_new = {}
1414
+
1415
+ return unless num_disks_difference != 0 || update
1416
+
1417
+ # Step 1: remove disks in the current configuration of .vmx
1418
+ # Avoids having an old disk in the configuration
1419
+ # that does not really exist
1420
+ disks_extraconfig_current.keys.each do |key|
1421
+ disks_extraconfig_new[key] = ''
1422
+ end
1423
+
1424
+ # Step 2: add current vCenter disks to new configuration
1425
+ disks_vcenter_current.each do |item|
1426
+ disks_extraconfig_new[item[:key]] = item[:value]
1427
+ end
1428
+
1429
+ # Step 3: create extraconfig_new with the values to update
1430
+ extraconfig_new = []
1431
+ disks_extraconfig_new.keys.each do |key|
1432
+ extraconfig_new <<
1433
+ {
1434
+ :key =>
1435
+ key,
1436
+ :value =>
1437
+ disks_extraconfig_new[key]
1438
+ }
1439
+ end
1440
+
1441
+ # Step 4: update the extraConfig
1442
+ spec_hash = { :extraConfig => extraconfig_new }
1443
+ spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1444
+ @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1445
+ end
1446
+
1447
+ # Build extraconfig section to reference disks
1448
+ # by key and avoid problems with changing paths
1449
+ # (mainly due to snapshots)
1450
+ # Uses VM Templte if ref available, or the vCenter VM if not
1451
+ # (latter case is if we are dealing with a Wild VM
1452
+ def reference_disks(template_ref, disks, managed)
1453
+ return [] if disks.empty? || instantiated_as_persistent?
1454
+
1455
+ extraconfig = []
1456
+ if managed
1457
+ key_prefix = 'opennebula.mdisk'
1458
+ else
1459
+ key_prefix = 'opennebula.disk'
1460
+ end
1461
+
1462
+ # Get vcenter VM disks to know real path of cloned disk
1463
+ vcenter_disks = vcenter_disks_get
1464
+
1465
+ # Create an array with the paths of the disks in vcenter template
1466
+ if !template_ref.nil?
1467
+ template = VCenterDriver::Template.new_from_ref(template_ref,
1468
+ vi_client)
1469
+ template_disks = template.vcenter_disks_get
1470
+ else
1471
+ # If we are dealing with a Wild VM, we simply use
1472
+ # what is available in the vCenter VM
1473
+ template_disks = vcenter_disks_get
1474
+ end
1475
+ template_disks_vector = []
1476
+ template_disks.each do |d|
1477
+ template_disks_vector << d[:path_wo_ds]
1478
+ end
1479
+
1480
+ # Try to find index of disks in template disks
1481
+ disks.each do |disk|
1482
+ disk_source =
1483
+ VCenterDriver::FileHelper
1484
+ .unescape_path(
1485
+ disk['SOURCE']
1486
+ )
1487
+ template_disk = template_disks.select do |d|
1488
+ d[:path_wo_ds] == disk_source
1489
+ end.first
1490
+
1491
+ if template_disk
1492
+ vcenter_disk = vcenter_disks.select do |d|
1493
+ d[:key] == template_disk[:key]
1494
+ end.first
1495
+ end
1496
+
1497
+ unless vcenter_disk
1498
+ raise "disk with path #{disk_source}"\
1499
+ 'not found in the vCenter VM'
1500
+ end
1501
+
1502
+ reference = {}
1503
+ reference[:key] = "#{key_prefix}.#{disk['DISK_ID']}"
1504
+ reference[:value] = (vcenter_disk[:key]).to_s
1505
+ extraconfig << reference
1506
+ end
1507
+
1508
+ extraconfig
1509
+ end
1510
+
1511
+ # create storagedrs disks
1512
+ #
1513
+ # @param device_change_spod [array] add disk spec for every device
1514
+ #
1515
+ # @param device_change_spod_ids [object] map from unit ctrl to
1516
+ # disk_id
1517
+ #
1518
+ # @return extra_config [Array] array with the extra config for vCenter
1519
+ def create_storagedrs_disks(device_change_spod, device_change_spod_ids)
1520
+ sm = storagemanager
1521
+ disk_locator = []
1522
+ extra_config = []
1523
+
1524
+ device_change_spod.each do |device_spec|
1525
+ disk_locator <<
1526
+ RbVmomi::VIM
1527
+ .PodDiskLocator(
1528
+ :diskId => device_spec[
1529
+ :device
1530
+ ].key
1531
+ )
1532
+ end
1533
+
1534
+ spec = {}
1535
+ spec[:deviceChange] = device_change_spod
1536
+
1537
+ # Disk locator is required for AddDisk
1538
+ vmpod_hash = {}
1539
+ vmpod_hash[:storagePod] = get_ds
1540
+ vmpod_hash[:disk] = disk_locator
1541
+ vmpod_config = RbVmomi::VIM::VmPodConfigForPlacement(vmpod_hash)
1542
+
1543
+ # The storage pod selection requires initialize
1544
+ spod_hash = {}
1545
+ spod_hash[:initialVmConfig] = [vmpod_config]
1546
+ spod_select = RbVmomi::VIM::StorageDrsPodSelectionSpec(spod_hash)
1547
+ storage_spec = RbVmomi::VIM.StoragePlacementSpec(
1548
+ :type => :reconfigure,
1549
+ :podSelectionSpec => spod_select,
1550
+ :vm => self['_ref'],
1551
+ :configSpec => spec
1552
+ )
1553
+
1554
+ # Query a storage placement recommendation
1555
+ result = sm
1556
+ .RecommendDatastores(
1557
+ :storageSpec => storage_spec
1558
+ ) rescue nil
1559
+
1560
+ if result.nil?
1561
+ raise 'Could not get placement specification for StoragePod'
1562
+ end
1563
+
1564
+ if !result.respond_to?(:recommendations) ||
1565
+ result.recommendations.empty?
1566
+ raise 'Could not get placement specification for StoragePod'
1567
+ end
1568
+
1569
+ # Get recommendation key to be applied
1570
+ key = result.recommendations.first.key ||= ''
1571
+
1572
+ if key.empty?
1573
+ raise 'Missing Datastore recommendation for StoragePod'
1574
+ end
1575
+
1576
+ # Apply recommendation
1577
+ sm.ApplyStorageDrsRecommendation_Task(
1578
+ :key => [key]
1579
+ ).wait_for_completion
1580
+
1581
+ # Set references in opennebula.disk elements
1582
+ device_change_spod.each do |device_spec|
1583
+ unit_number = device_spec[:device].unitNumber
1584
+ controller_key = device_spec[:device].controllerKey
1585
+ key = get_vcenter_disk_key(unit_number,
1586
+ controller_key)
1587
+ disk_id =
1588
+ device_change_spod_ids[
1589
+ "#{controller_key}-#{unit_number}"
1590
+ ]
1591
+ reference = {}
1592
+ reference[:key] = "opennebula.disk.#{disk_id}"
1593
+ reference[:value] = key.to_s
1594
+ extra_config << reference
1595
+ end
1596
+
1597
+ extra_config
1598
+ end
1599
+
1600
+ # set the boot order of the machine
1601
+ #
1602
+ # @param boot_info [String] boot information stored in
1603
+ # the template of the virtual machine. example: disk0, nic0
1604
+ #
1605
+ # @return [Array (vCenterbootClass)] An array with the vCenter classes
1606
+ def boot_order_update(boot_info)
1607
+ convert = lambda {|device_str|
1608
+ spl = device_str.scan(/^(nic|disk)(\d+$)/).flatten
1609
+ raise "#{device_str} is not supported" if spl.empty?
1610
+
1611
+ device = nil
1612
+ sync = "sync_#{spl[0]}s"
1613
+ (0..1).each do |_i|
1614
+ device = send(spl[0], spl[1])
1615
+ break if device.exists?
1616
+
1617
+ send(sync)
1618
+ end
1619
+
1620
+ device.boot_dev
1621
+ }
1622
+
1623
+ boot_order = boot_info.split(',').map {|str| convert.call(str) }
1624
+
1625
+ RbVmomi::VIM.VirtualMachineBootOptions({ :bootOrder => boot_order })
1626
+ end
1627
+
1628
+ # sync OpenNebula nic model with vCenter
1629
+ #
1630
+ # @param option [symbol] if :all is provided
1631
+ # the method will try to sync
1632
+ # all the nics (detached and not existing ones)
1633
+ # otherwise it will only sync
1634
+ # the nics that are not existing
1635
+ #
1636
+ # @param execute [boolean] indicates
1637
+ # if the reconfigure operation is going to
1638
+ # be executed
1639
+ def sync_nics(option = :none, execute = true)
1640
+ device_change = []
1641
+
1642
+ if option == :all
1643
+ dchange = []
1644
+
1645
+ # detached? condition indicates that
1646
+ # the nic exists in OpeNebula but not
1647
+ # in vCenter
1648
+ nics_each(:detached?) do |nic|
1649
+ dchange << {
1650
+ :operation => :remove,
1651
+ :device => nic.vc_item
1652
+ }
1653
+ end
1654
+ if !dchange.empty?
1655
+ dspec_hash = { :deviceChange => dchange }
1656
+ dspec = RbVmomi::VIM.VirtualMachineConfigSpec(dspec_hash)
1657
+ @item.ReconfigVM_Task(:spec => dspec).wait_for_completion
1658
+ end
1659
+ end
1660
+
1661
+ # no_exits? condition indicates that
1662
+ # the nic does not exist in vCenter
1663
+ nics_each(:no_exists?) do |nic|
1664
+ device_change << calculate_add_nic_spec(nic.one_item)
1665
+ end
1666
+
1667
+ return device_change unless execute
1668
+
1669
+ spec_hash = { :deviceChange => device_change }
1670
+
1671
+ spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1672
+ @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1673
+
1674
+ info_nics
1675
+ end
1676
+
1677
+ # Get required parameters to use VMware HTML Console SDK
1678
+ # To be used with the following SDK:
1679
+ # https://code.vmware.com/web/sdk/2.1.0/html-console
1680
+ #
1681
+ def html_console_parameters
1682
+ ticket = @item.AcquireTicket(:ticketType => 'webmks')
1683
+ { :ticket => ticket.ticket, :host => ticket.host,
1684
+ :port => ticket.port }
1685
+ end
1686
+
1687
+ # Synchronize the OpenNebula VM representation with vCenter VM
1688
+ #
1689
+ # if the device exists in vCenter and not in OpenNebula : detach
1690
+ # if the device exists in OpenNebula and not in vCenter : attach
1691
+ # if the device exists in both : noop
1692
+ #
1693
+ def sync(deploy = {})
1694
+ extraconfig = []
1695
+ device_change = []
1696
+
1697
+ disks = sync_disks(:all, false)
1698
+ resize_unmanaged_disks
1699
+
1700
+ if deploy[:boot] && !deploy[:boot].empty?
1701
+ boot_opts = boot_order_update(deploy[:boot])
1702
+ end
1703
+
1704
+ # changes from sync_disks
1705
+ device_change += disks[:deviceChange] if disks[:deviceChange]
1706
+ extraconfig += disks[:extraConfig] if disks[:extraConfig]
1707
+
1708
+ # get token and context
1709
+ extraconfig += extraconfig_context
1710
+
1711
+ # get file_ds
1712
+ if (files = one_item['TEMPLATE/CONTEXT/FILES_DS'])
1713
+ file_id = 0
1714
+ files.split(' ').each do |file|
1715
+ extraconfig += extraconfig_file(file, file_id)
1716
+ file_id += 1
1717
+ end
1718
+ end
1719
+
1720
+ # vnc configuration (for config_array hash)
1721
+ extraconfig += extraconfig_vnc
1722
+
1723
+ # device_change hash (nics)
1724
+ device_change += sync_nics(:all, false)
1725
+
1726
+ # Set CPU, memory and extraconfig
1727
+ num_cpus = one_item['TEMPLATE/VCPU'] || 1
1728
+ spec_hash = {
1729
+ :numCPUs => num_cpus.to_i,
1730
+ :memoryMB => one_item['TEMPLATE/MEMORY'],
1731
+ :extraConfig => extraconfig,
1732
+ :deviceChange => device_change
1733
+ }
1734
+ num_cores = one_item['TEMPLATE/TOPOLOGY/CORES'] || num_cpus.to_i
1735
+ if num_cpus.to_i % num_cores.to_i != 0
1736
+ num_cores = num_cpus.to_i
1737
+ end
1738
+ spec_hash[:numCoresPerSocket] = num_cores.to_i
1739
+
1740
+ spec_hash[:bootOptions] = boot_opts if boot_opts
1741
+
1742
+ spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1743
+
1744
+ @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1745
+ sync_extraconfig_disk(spec_hash)
1746
+ end
1747
+
1748
+ def extraconfig_file(file, id)
1749
+ path, name = file.split(':')
1750
+ name = name.gsub('\'', '')
1751
+ file_content = Base64.encode64(File.read(path))
1752
+ file_content.prepend("#{name}\n")
1753
+
1754
+ [
1755
+ { :key => "guestinfo.opennebula.file.#{id}",
1756
+ :value => file_content }
1757
+ ]
1758
+ end
1759
+
1760
+ def extraconfig_context
1761
+ context_text = "# Context variables generated by OpenNebula\n"
1762
+ one_item.each('TEMPLATE/CONTEXT/*') do |context_element|
1763
+ # next if !context_element.text
1764
+ context_text += context_element.name + "='" +
1765
+ context_element.text.gsub("'", "\'") + "'\n"
1766
+ end
1767
+
1768
+ # token
1769
+ token = File.read(File.join(VAR_LOCATION,
1770
+ 'vms',
1771
+ one_item['ID'],
1772
+ 'token.txt')).chomp rescue nil
1773
+
1774
+ context_text += "ONEGATE_TOKEN='#{token}'\n" if token
1775
+
1776
+ # context_text
1777
+ [
1778
+ { :key => 'guestinfo.opennebula.context',
1779
+ :value => Base64.encode64(context_text) }
1780
+ ]
1781
+ end
1782
+
1783
+ def extraconfig_vnc
1784
+ if one_item['TEMPLATE/GRAPHICS']
1785
+ vnc_port = one_item['TEMPLATE/GRAPHICS/PORT'] || ''
1786
+ vnc_listen = one_item['TEMPLATE/GRAPHICS/LISTEN'] || '0.0.0.0'
1787
+ vnc_keymap = one_item['TEMPLATE/GRAPHICS/KEYMAP']
1788
+
1789
+ conf =
1790
+ [
1791
+ {
1792
+ :key =>
1793
+ 'remotedisplay.vnc.enabled',
1794
+ :value =>
1795
+ 'TRUE'
1796
+ },
1797
+ {
1798
+ :key =>
1799
+ 'remotedisplay.vnc.port',
1800
+ :value =>
1801
+ vnc_port
1802
+ },
1803
+ {
1804
+ :key =>
1805
+ 'remotedisplay.vnc.ip',
1806
+ :value =>
1807
+ vnc_listen
1808
+ }
1809
+ ]
1810
+
1811
+ conf +=
1812
+ [
1813
+ {
1814
+ :key =>
1815
+ 'remotedisplay.vnc.keymap',
1816
+ :value =>
1817
+ vnc_keymap
1818
+ }
1819
+ ] if vnc_keymap
1820
+
1821
+ conf
1822
+ else
1823
+ []
1824
+ end
1825
+ end
1826
+
1827
+ # Regenerate context when devices are hot plugged (reconfigure)
1828
+ def regenerate_context
1829
+ spec_hash = { :extraConfig => extraconfig_context }
1830
+ spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
1831
+
1832
+ begin
1833
+ @item.ReconfigVM_Task(:spec => spec).wait_for_completion
1834
+ rescue StandardError => e
1835
+ error = "Cannot create snapshot for VM: #{e.message}."
1836
+
1837
+ if VCenterDriver::CONFIG[:debug_information]
1838
+ error += "\n\n#{e.backtrace}"
1839
+ end
1840
+
1841
+ raise error
1842
+ end
1843
+ end
1844
+
1845
+ # Returns an array of actions to be included in :deviceChange
1846
+ def calculate_add_nic_spec(nic, unumber = nil)
1847
+ mac = nic['MAC']
1848
+ pg_name = nic['BRIDGE']
1849
+ default =
1850
+ VCenterDriver::VIHelper
1851
+ .get_default(
1852
+ 'VM/TEMPLATE/NIC/MODEL'
1853
+ )
1854
+ tmodel = one_item['USER_TEMPLATE/NIC_DEFAULT/MODEL']
1855
+
1856
+ # got the model of the nic, first try to get the model
1857
+ # inside the nic, then the model defined by user and
1858
+ # last option model by default in vCenter Driver
1859
+ model = nic['MODEL'] || tmodel || default
1860
+ raise 'nic model cannot be empty!' if model == ''
1861
+
1862
+ vnet_ref = nic['VCENTER_NET_REF']
1863
+ backing = nil
1864
+
1865
+ # Maximum bitrate for the interface in kilobytes/second
1866
+ # for inbound traffic
1867
+ limit_in =
1868
+ nic['INBOUND_PEAK_BW'] ||
1869
+ VCenterDriver::VIHelper.get_default(
1870
+ 'VM/TEMPLATE/NIC/INBOUND_PEAK_BW'
1871
+ )
1872
+ # Maximum bitrate for the interface in kilobytes/second
1873
+ # for outbound traffic
1874
+ limit_out =
1875
+ nic['OUTBOUND_PEAK_BW'] ||
1876
+ VCenterDriver::VIHelper.get_default(
1877
+ 'VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW'
1878
+ )
1879
+ limit = nil
1880
+
1881
+ if limit_in && limit_out
1882
+ limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
1883
+ end
1884
+
1885
+ # Average bitrate for the interface in kilobytes/second
1886
+ # for inbound traffic
1887
+ rsrv_in =
1888
+ nic['INBOUND_AVG_BW'] ||
1889
+ VCenterDriver::VIHelper.get_default(
1890
+ 'VM/TEMPLATE/NIC/INBOUND_AVG_BW'
1891
+ )
1892
+ # Average bitrate for the interface in kilobytes/second
1893
+ # for outbound traffic
1894
+ rsrv_out =
1895
+ nic['OUTBOUND_AVG_BW'] ||
1896
+ VCenterDriver::VIHelper.get_default(
1897
+ 'VM/TEMPLATE/NIC/OUTBOUND_AVG_BW'
1898
+ )
1899
+ rsrv = nil
1900
+
1901
+ if rsrv_in || rsrv_out
1902
+ rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
1903
+ end
1904
+
1905
+ # get the network with ref equal to vnet_ref or
1906
+ # with name equal to pg_name
1907
+ network = self['runtime.host'].network.select do |n|
1908
+ n._ref == vnet_ref || n.name == pg_name
1909
+ end
1910
+ network = network.first
1911
+
1912
+ unless network
1913
+ raise "#{pg_name} not found in #{self['runtime.host'].name}"
1914
+ end
1915
+
1916
+ # start in one, we want the next avaliable id
1917
+ card_num = 1
1918
+ @item['config.hardware.device'].each do |dv|
1919
+ card_num += 1 if VCenterDriver::Network.nic?(dv)
1920
+ end
1921
+
1922
+ nic_card = Nic.nic_model_class(model)
1923
+
1924
+ if network.class == RbVmomi::VIM::Network
1925
+ backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
1926
+ :deviceName => pg_name,
1927
+ :network => network
1928
+ )
1929
+ elsif network.class == RbVmomi::VIM::DistributedVirtualPortgroup
1930
+ port = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
1931
+ :switchUuid =>
1932
+ network.config.distributedVirtualSwitch.uuid,
1933
+ :portgroupKey => network.key
1934
+ )
1935
+ backing =
1936
+ RbVmomi::VIM
1937
+ .VirtualEthernetCardDistributedVirtualPortBackingInfo(
1938
+ :port => port
1939
+ )
1940
+ elsif network.class == RbVmomi::VIM::OpaqueNetwork
1941
+ backing =
1942
+ RbVmomi::VIM
1943
+ .VirtualEthernetCardOpaqueNetworkBackingInfo(
1944
+ :opaqueNetworkId =>
1945
+ network.summary.opaqueNetworkId,
1946
+ :opaqueNetworkType =>
1947
+ 'nsx.LogicalSwitch'
1948
+ )
1949
+ else
1950
+ raise 'Unknown network class'
1951
+ end
1952
+
1953
+ # grab the last unitNumber to ensure the nic to be added at the end
1954
+ if !unumber
1955
+ @unic = @unic || vcenter_nics_list.map do |d|
1956
+ d.unitNumber
1957
+ end.max || 0
1958
+ unumber = @unic += 1
1959
+ else
1960
+ @unic = unumber
1961
+ end
1962
+
1963
+ card_spec = {
1964
+ :key => 0,
1965
+ :deviceInfo => {
1966
+ :label => 'net' + card_num.to_s,
1967
+ :summary => pg_name
1968
+ },
1969
+ :backing => backing,
1970
+ :addressType => mac ? 'manual' : 'generated',
1971
+ :macAddress => mac,
1972
+ :unitNumber => unumber
1973
+ }
1974
+
1975
+ if (limit || rsrv) && (limit > 0)
1976
+ ra_spec = {}
1977
+ rsrv = limit if rsrv > limit
1978
+ # The bandwidth limit for the virtual network adapter. The
1979
+ # utilization of the virtual network adapter will not exceed
1980
+ # this limit, even if there are available resources. To clear
1981
+ # the value of this property and revert it to unset, set the
1982
+ # vaule to "-1" in an update operation. Units in Mbits/sec
1983
+ ra_spec[:limit] = limit if limit
1984
+ # Amount of network bandwidth that is guaranteed to the virtual
1985
+ # network adapter. If utilization is less than reservation, the
1986
+ # resource can be used by other virtual network adapters.
1987
+ # Reservation is not allowed to exceed the value of limit if
1988
+ # limit is set. Units in Mbits/sec
1989
+ ra_spec[:reservation] = rsrv if rsrv
1990
+ # Network share. The value is used as a relative weight in
1991
+ # competing for shared bandwidth, in case of resource contention
1992
+ ra_spec[:share] =
1993
+ RbVmomi::VIM.SharesInfo(
1994
+ {
1995
+ :level => RbVmomi::VIM.SharesLevel('normal'),
1996
+ :shares => 0
1997
+ }
1998
+ )
1999
+ card_spec[:resourceAllocation] =
2000
+ RbVmomi::VIM.VirtualEthernetCardResourceAllocation(
2001
+ ra_spec
2002
+ )
2003
+ end
2004
+
2005
+ {
2006
+ :operation => :add,
2007
+ :device => nic_card.new(card_spec)
2008
+ }
2009
+ end
2010
+
2011
+ # Returns an array of actions to be included in :deviceChange
2012
+ def calculate_add_nic_spec_autogenerate_mac(nic)
2013
+ pg_name = nic['BRIDGE']
2014
+
2015
+ default =
2016
+ VCenterDriver::VIHelper.get_default(
2017
+ 'VM/TEMPLATE/NIC/MODEL'
2018
+ )
2019
+ tmodel = one_item['USER_TEMPLATE/NIC_DEFAULT/MODEL']
2020
+
2021
+ model = nic['MODEL'] || tmodel || default
2022
+
2023
+ vnet_ref = nic['VCENTER_NET_REF']
2024
+ backing = nil
2025
+
2026
+ # Maximum bitrate for the interface in kilobytes/second
2027
+ # for inbound traffic
2028
+ limit_in =
2029
+ nic['INBOUND_PEAK_BW'] ||
2030
+ VCenterDriver::VIHelper.get_default(
2031
+ 'VM/TEMPLATE/NIC/INBOUND_PEAK_BW'
2032
+ )
2033
+ # Maximum bitrate for the interface in kilobytes/second
2034
+ # for outbound traffic
2035
+ limit_out =
2036
+ nic['OUTBOUND_PEAK_BW'] ||
2037
+ VCenterDriver::VIHelper.get_default(
2038
+ 'VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW'
2039
+ )
2040
+ limit = nil
2041
+
2042
+ if limit_in && limit_out
2043
+ limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
2044
+ end
2045
+
2046
+ # Average bitrate for the interface in kilobytes/second
2047
+ # for inbound traffic
2048
+ rsrv_in =
2049
+ nic['INBOUND_AVG_BW'] ||
2050
+ VCenterDriver::VIHelper.get_default(
2051
+ 'VM/TEMPLATE/NIC/INBOUND_AVG_BW'
2052
+ )
2053
+
2054
+ # Average bitrate for the interface in kilobytes/second
2055
+ # for outbound traffic
2056
+ rsrv_out =
2057
+ nic['OUTBOUND_AVG_BW'] ||
2058
+ VCenterDriver::VIHelper.get_default(
2059
+ 'VM/TEMPLATE/NIC/OUTBOUND_AVG_BW'
2060
+ )
2061
+
2062
+ rsrv = nil
2063
+
2064
+ if rsrv_in || rsrv_out
2065
+ rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
2066
+ end
2067
+
2068
+ network = self['runtime.host'].network.select do |n|
2069
+ n._ref == vnet_ref || n.name == pg_name
2070
+ end
2071
+
2072
+ network = network.first
2073
+
2074
+ card_num = 1 # start in one, we want the next available id
2075
+
2076
+ @item['config.hardware.device'].each do |dv|
2077
+ card_num += 1 if VCenterDriver::Network.nic?(dv)
2078
+ end
2079
+
2080
+ nic_card = Nic.nic_model_class(model)
2081
+
2082
+ if network.class == RbVmomi::VIM::Network
2083
+ backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
2084
+ :deviceName => pg_name,
2085
+ :network => network
2086
+ )
2087
+ elsif network.class == RbVmomi::VIM::DistributedVirtualPortgroup
2088
+ port = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
2089
+ :switchUuid =>
2090
+ network.config.distributedVirtualSwitch.uuid,
2091
+ :portgroupKey => network.key
2092
+ )
2093
+ backing =
2094
+ RbVmomi::VIM
2095
+ .VirtualEthernetCardDistributedVirtualPortBackingInfo(
2096
+ :port => port
2097
+ )
2098
+ elsif network.class == RbVmomi::VIM::OpaqueNetwork
2099
+ backing =
2100
+ RbVmomi::VIM
2101
+ .VirtualEthernetCardOpaqueNetworkBackingInfo(
2102
+ :opaqueNetworkId => network.summary.opaqueNetworkId,
2103
+ :opaqueNetworkType => 'nsx.LogicalSwitch'
2104
+ )
2105
+ else
2106
+ raise 'Unknown network class'
2107
+ end
2108
+
2109
+ card_spec = {
2110
+ :key => 0,
2111
+ :deviceInfo => {
2112
+ :label => 'net' + card_num.to_s,
2113
+ :summary => pg_name
2114
+ },
2115
+ :backing => backing,
2116
+ :addressType => 'generated'
2117
+ }
2118
+
2119
+ if (limit || rsrv) && (limit > 0)
2120
+ ra_spec = {}
2121
+ rsrv = limit if rsrv > limit
2122
+ # The bandwidth limit for the virtual network adapter. The
2123
+ # utilization of the virtual network adapter will not exceed
2124
+ # this limit, even if there are available resources. To clear
2125
+ # the value of this property and revert it to unset, set the
2126
+ # vaule to "-1" in an update operation. Units in Mbits/sec
2127
+ ra_spec[:limit] = limit if limit
2128
+ # Amount of network bandwidth that is guaranteed to the virtual
2129
+ # network adapter. If utilization is less than reservation, the
2130
+ # resource can be used by other virtual network adapters.
2131
+ # Reservation is not allowed to exceed the value of limit if
2132
+ # limit is set. Units in Mbits/sec
2133
+ ra_spec[:reservation] = rsrv if rsrv
2134
+ # Network share. The value is used as a relative weight in
2135
+ # competing for shared bandwidth, in case of resource contention
2136
+ ra_spec[:share] =
2137
+ RbVmomi::VIM.SharesInfo(
2138
+ {
2139
+ :level =>
2140
+ RbVmomi::VIM.SharesLevel(
2141
+ 'normal'
2142
+ ),
2143
+ :shares => 0
2144
+ }
2145
+ )
2146
+ card_spec[:resourceAllocation] =
2147
+ RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec)
2148
+ end
2149
+
2150
+ {
2151
+ :operation => :add,
2152
+ :device => nic_card.new(card_spec)
2153
+ }
2154
+ end
2155
+
2156
+ # Add NIC to VM
2157
+ def attach_nic(one_nic)
2158
+ spec_hash = {}
2159
+
2160
+ begin
2161
+ # A new NIC requires a vcenter spec
2162
+ attach_nic_array = []
2163
+ attach_nic_array << calculate_add_nic_spec(one_nic)
2164
+ spec_hash[:deviceChange] =
2165
+ attach_nic_array unless attach_nic_array.empty?
2166
+
2167
+ # Reconfigure VM
2168
+ spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
2169
+
2170
+ @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2171
+ rescue StandardError => e
2172
+ error = "Cannot attach NIC to VM: #{e.message}."
2173
+
2174
+ if VCenterDriver::CONFIG[:debug_information]
2175
+ error += "\n\n#{e.backtrace.join("\n")}"
2176
+ end
2177
+
2178
+ raise error
2179
+ end
2180
+ end
2181
+
2182
+ # Detach NIC from VM
2183
+ def detach_nic(mac)
2184
+ spec_hash = {}
2185
+
2186
+ nic = nic(mac) rescue nil
2187
+
2188
+ return if !nic || nic.no_exists?
2189
+
2190
+ # Remove NIC from VM in the ReconfigVM_Task
2191
+ spec_hash[:deviceChange] = [
2192
+ :operation => :remove,
2193
+ :device => nic.vc_item
2194
+ ]
2195
+ begin
2196
+ @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2197
+ rescue StandardError => e
2198
+ error = "Cannot detach NIC from VM: #{e.message}."
2199
+
2200
+ if VCenterDriver::CONFIG[:debug_information]
2201
+ error += "\n\n#{e.backtrace.join("\n")}"
2202
+ end
2203
+
2204
+ raise error
2205
+ end
2206
+ end
2207
+
2208
+ # Detach all nics useful when removing pg and sw so they're not in use
2209
+ def detach_all_nics
2210
+ spec_hash = {}
2211
+ device_change = []
2212
+
2213
+ nics_each(:exists?) do |nic|
2214
+ device_change << {
2215
+ :operation => :remove,
2216
+ :device => nic.vc_item
2217
+ }
2218
+ end
2219
+
2220
+ return if device_change.empty?
2221
+
2222
+ # Remove NIC from VM in the ReconfigVM_Task
2223
+ spec_hash[:deviceChange] = device_change
2224
+
2225
+ begin
2226
+ @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2227
+ rescue StandardError => e
2228
+ error = "Cannot detach all NICs from VM: #{e.message}."
2229
+
2230
+ if VCenterDriver::CONFIG[:debug_information]
2231
+ error += "\n\n#{e.backtrace}"
2232
+ end
2233
+
2234
+ raise error
2235
+ end
2236
+ end
2237
+
2238
+ # try to get specs for new attached disks
2239
+ # using disk_each method with :no_exists? condition
2240
+ def attach_disks_specs
2241
+ attach_disk_array = []
2242
+ extraconfig = []
2243
+ attach_spod_array = []
2244
+ attach_spod_disk_info = {}
2245
+
2246
+ pos = { :ide => 0, :scsi => 0 }
2247
+ disks_each(:no_exists?) do |disk|
2248
+ disk.one_item['TYPE'] == 'CDROM' ? k = :ide : k = :scsi
2249
+
2250
+ if disk.storpod?
2251
+ spec = calculate_add_disk_spec(disk.one_item, pos[k])
2252
+ attach_spod_array << spec
2253
+
2254
+ controller_key = spec[:device].controllerKey
2255
+ unit_number = spec[:device].unitNumber
2256
+
2257
+ unit_ctrl = "#{controller_key}-#{unit_number}"
2258
+ attach_spod_disk_info[unit_ctrl] = disk.id
2259
+ else
2260
+ aspec = calculate_add_disk_spec(disk.one_item, pos[k])
2261
+ extra_key = "opennebula.mdisk.#{disk.one_item['DISK_ID']}"
2262
+ extra_value = aspec[:device].key.to_s
2263
+
2264
+ attach_disk_array << aspec
2265
+ extraconfig << { :key => extra_key, :value => extra_value }
2266
+ end
2267
+
2268
+ pos[k]+=1
2269
+ end
2270
+
2271
+ { :disks => attach_disk_array,
2272
+ :spods => attach_spod_array,
2273
+ :spod_info => attach_spod_disk_info,
2274
+ :extraconfig => extraconfig }
2275
+ end
2276
+
2277
+ # try to get specs for detached disks
2278
+ # using disk_each method with :dechaded? condition
2279
+ def detach_disks_specs
2280
+ detach_disk_array = []
2281
+ extra_config = []
2282
+ keys = disk_keys.invert
2283
+ ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
2284
+ disks_each(:detached?) do |d|
2285
+ key = d.key.to_s
2286
+ source = VCenterDriver::FileHelper.escape_path(d.path)
2287
+ persistent =
2288
+ VCenterDriver::VIHelper
2289
+ .find_persistent_image_by_source(
2290
+ source, ipool
2291
+ )
2292
+
2293
+ op = { :operation => :remove, :device => d.device }
2294
+ if !persistent && d.type != 'CDROM'
2295
+ op[:fileOperation] = :destroy
2296
+ end
2297
+ detach_disk_array << op
2298
+
2299
+ # Remove reference opennebula.disk if exist from vmx and cache
2300
+ extra_config << d.config(:delete) if keys[key]
2301
+ end
2302
+
2303
+ [detach_disk_array, extra_config]
2304
+ end
2305
+
2306
+ def different_key?(change_disk, vc_disk)
2307
+ change_disk[:device].controllerKey == vc_disk.controllerKey &&
2308
+ change_disk[:device].unitNumber == vc_disk.unitNumber &&
2309
+ change_disk[:device].key != vc_disk.key
2310
+ end
2311
+
2312
+ def sync_extraconfig_disk(spec_hash)
2313
+ return if spec_hash[:deviceChange].empty?
2314
+
2315
+ extraconfig_new = []
2316
+ # vCenter mob disks
2317
+ vc_disks = @item['config.hardware.device'].select do |vc_device|
2318
+ disk?(vc_device)
2319
+ end
2320
+ return unless vc_disks
2321
+
2322
+ # For each changed disk, compare with vcenter mob disk
2323
+ spec_hash[:deviceChange].each_with_index do |_device, index|
2324
+ change_disk = spec_hash[:deviceChange][index]
2325
+ vc_disks.each do |vc_disk|
2326
+ next unless different_key?(change_disk, vc_disk)
2327
+
2328
+ extraconfig_new <<
2329
+ {
2330
+ :key =>
2331
+ spec_hash[:extraConfig][index][:key],
2332
+ :value =>
2333
+ vc_disk.key.to_s
2334
+ }
2335
+ end
2336
+ end
2337
+
2338
+ return if extraconfig_new.empty?
2339
+
2340
+ spec_hash = {
2341
+ :extraConfig => extraconfig_new
2342
+ }
2343
+ spec =
2344
+ RbVmomi::VIM
2345
+ .VirtualMachineConfigSpec(
2346
+ spec_hash
2347
+ )
2348
+ @item.ReconfigVM_Task(
2349
+ :spec => spec
2350
+ ).wait_for_completion
2351
+ end
2352
+
2353
+ # sync OpenNebula disk model with vCenter
2354
+ #
2355
+ # @param option [symbol] if :all is provided the
2356
+ # method will try to sync
2357
+ # all the disks (detached and not existing ones)
2358
+ # otherwishe it will only sync
2359
+ # the disks that are not existing
2360
+ #
2361
+ # @param execute [boolean] indicates if the reconfigure operation
2362
+ # is going to
2363
+ # be executed
2364
+ def sync_disks(option = :nil, execute = true)
2365
+ info_disks
2366
+
2367
+ spec_hash = {}
2368
+
2369
+ if option == :all
2370
+ detach_op = {}
2371
+ detach_op[:deviceChange], detach_op[:extraConfig] =
2372
+ detach_disks_specs
2373
+ perform =
2374
+ !detach_op[:deviceChange].empty? ||
2375
+ !detach_op[:extraConfig].empty?
2376
+ @item
2377
+ .ReconfigVM_Task(
2378
+ :spec => detach_op
2379
+ ).wait_for_completion if perform
2380
+ end
2381
+
2382
+ a_specs = attach_disks_specs
2383
+
2384
+ if !a_specs[:spods].empty?
2385
+ spec_hash[:extraConfig] =
2386
+ create_storagedrs_disks(a_specs[:spods],
2387
+ a_specs[:spod_info])
2388
+ end
2389
+
2390
+ if !a_specs[:disks].empty?
2391
+ spec_hash[:deviceChange] = a_specs[:disks]
2392
+ spec_hash[:extraConfig] = a_specs[:extraconfig]
2393
+ end
2394
+
2395
+ return spec_hash unless execute
2396
+
2397
+ spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
2398
+ @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2399
+ info_disks
2400
+ end
2401
+
2402
+ # Attach DISK to VM (hotplug)
2403
+ def attach_disk(disk)
2404
+ spec_hash = {}
2405
+ device_change = []
2406
+
2407
+ # Extract unmanaged_keys
2408
+ unmanaged_keys = disk_keys
2409
+ vc_disks = vcenter_disks_get
2410
+
2411
+ # Check if we're dealing with a StoragePod SYSTEM ds
2412
+ storpod = disk['VCENTER_DS_REF'].start_with?('group-')
2413
+
2414
+ # Check if disk being attached is already connected to the VM
2415
+ raise 'DISK is already connected to VM' if disk_attached_to_vm(
2416
+ disk, unmanaged_keys, vc_disks
2417
+ )
2418
+
2419
+ # Generate vCenter spec and reconfigure VM
2420
+ add_spec = calculate_add_disk_spec(disk)
2421
+ device_change << add_spec
2422
+ raise 'Could not generate DISK spec' if device_change.empty?
2423
+
2424
+ extra_key = "opennebula.mdisk.#{disk['DISK_ID']}"
2425
+ extra_value = add_spec[:device].key.to_s
2426
+
2427
+ spec_hash[:deviceChange] = device_change
2428
+ spec_hash[:extraConfig] =
2429
+ [{ :key => extra_key, :value => extra_value }]
2430
+ spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
2431
+
2432
+ begin
2433
+ if storpod
2434
+ # Ask for StorageDRS recommendation
2435
+ # to reconfigure VM (AddDisk)
2436
+ sm = storagemanager
2437
+
2438
+ # Disk id is -1 as I don't know
2439
+ # what disk id is going to be set
2440
+ disk_locator = [RbVmomi::VIM.PodDiskLocator(:diskId => -1)]
2441
+
2442
+ # Disk locator is required for AddDisk
2443
+ vmpod_hash = {}
2444
+ vmpod_hash[:storagePod] = get_ds
2445
+ vmpod_hash[:disk] = disk_locator
2446
+ vmpod_config =
2447
+ RbVmomi::VIM::VmPodConfigForPlacement(
2448
+ vmpod_hash
2449
+ )
2450
+
2451
+ # The storage pod selection requires initialize
2452
+ spod_hash = {}
2453
+ spod_hash[:initialVmConfig] = [vmpod_config]
2454
+ spod_select =
2455
+ RbVmomi::VIM::StorageDrsPodSelectionSpec(
2456
+ spod_hash
2457
+ )
2458
+ storage_spec = RbVmomi::VIM.StoragePlacementSpec(
2459
+ :type => :reconfigure,
2460
+ :podSelectionSpec => spod_select,
2461
+ :vm => self['_ref'],
2462
+ :configSpec => spec
2463
+ )
2464
+
2465
+ # Query a storage placement recommendation
2466
+ result = sm
2467
+ .RecommendDatastores(
2468
+ :storageSpec => storage_spec
2469
+ ) rescue nil
2470
+
2471
+ if result.nil?
2472
+ raise 'Could not get placement '\
2473
+ 'specification for StoragePod'
2474
+ end
2475
+
2476
+ if !result.respond_to?(:recommendations) ||
2477
+ result.recommendations.empty?
2478
+ raise 'Could not get placement '\
2479
+ 'specification for StoragePod'
2480
+ end
2481
+
2482
+ # Get recommendation key to be applied
2483
+ key = result.recommendations.first.key ||= ''
2484
+
2485
+ if key.empty?
2486
+ raise 'Missing Datastore recommendation for StoragePod'
2487
+ end
2488
+
2489
+ # Apply recommendation
2490
+ sm.ApplyStorageDrsRecommendation_Task(
2491
+ :key => [key]
2492
+ ).wait_for_completion
2493
+
2494
+ # Add the key for the volatile disk to the
2495
+ # unmanaged opennebula.disk.id variables
2496
+ unit_number =
2497
+ spec_hash[:deviceChange][0][:device]
2498
+ .unitNumber
2499
+ controller_key =
2500
+ spec_hash[:deviceChange][0][:device]
2501
+ .controllerKey
2502
+ key =
2503
+ get_vcenter_disk_key(
2504
+ unit_number,
2505
+ controller_key
2506
+ )
2507
+ spec_hash = {}
2508
+ reference = {}
2509
+ reference[:key] =
2510
+ "opennebula.disk.#{disk['DISK_ID']}"
2511
+ reference[:value] = key.to_s
2512
+ spec_hash[:extraConfig] = [reference]
2513
+ @item
2514
+ .ReconfigVM_Task(
2515
+ :spec => spec_hash
2516
+ ).wait_for_completion
2517
+ else
2518
+ @item
2519
+ .ReconfigVM_Task(
2520
+ :spec => spec
2521
+ ).wait_for_completion
2522
+ end
2523
+ # Modify extraConfig if disks has a bad key
2524
+ sync_extraconfig_disk(spec_hash)
2525
+ rescue StandardError => e
2526
+ error = "Cannot attach DISK to VM: #{e.message}."
2527
+
2528
+ if VCenterDriver::CONFIG[:debug_information]
2529
+ error += "\n\n#{e.backtrace.join("\n")}"
2530
+ end
2531
+
2532
+ raise error
2533
+ end
2534
+ end
2535
+
2536
+ # Detach persistent disks to avoid incidental destruction
2537
+ def detach_persistent_disks(vm)
2538
+ spec_hash = {}
2539
+ spec_hash[:deviceChange] = []
2540
+ ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
2541
+ if ipool.respond_to?(:message)
2542
+ raise "Could not get OpenNebula ImagePool: #{ipool.message}"
2543
+ end
2544
+
2545
+ vm.config.hardware.device.each do |disk|
2546
+ next unless disk_or_cdrom?(disk)
2547
+
2548
+ # Let's try to find if disks is persistent
2549
+ source_unescaped = disk.backing.fileName.sub(
2550
+ /^\[(.*?)\] /, ''
2551
+ ) rescue next
2552
+ source = VCenterDriver::FileHelper.escape_path(source_unescaped)
2553
+
2554
+ persistent = VCenterDriver::VIHelper
2555
+ .find_persistent_image_by_source(
2556
+ source, ipool
2557
+ )
2558
+
2559
+ next unless persistent
2560
+
2561
+ spec_hash[:deviceChange] << {
2562
+ :operation => :remove,
2563
+ :device => disk
2564
+ }
2565
+ end
2566
+
2567
+ return if spec_hash[:deviceChange].empty?
2568
+
2569
+ begin
2570
+ vm.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2571
+ rescue StandardError => e
2572
+ error = "Cannot detach all DISKs from VM: #{e.message}."
2573
+
2574
+ if VCenterDriver::CONFIG[:debug_information]
2575
+ error += "\n\n#{e.backtrace}"
2576
+ end
2577
+
2578
+ raise error
2579
+ end
2580
+ end
2581
+
2582
+ def detach_disk(disk)
2583
+ return unless disk.exists?
2584
+
2585
+ spec_hash = {}
2586
+ spec_hash[:extraConfig] = [disk.config(:delete)]
2587
+ spec_hash[:deviceChange] = [{
2588
+ :operation => :remove,
2589
+ :device => disk.device
2590
+ }]
2591
+
2592
+ begin
2593
+ @item.ReconfigVM_Task(:spec => spec_hash).wait_for_completion
2594
+ rescue StandardError => e
2595
+ error = "Cannot detach DISK from VM: #{e.message}."
2596
+ error += "\nProbably an existing VM snapshot includes that disk"
2597
+
2598
+ if VCenterDriver::CONFIG[:debug_information]
2599
+ error += "\n\n#{e.backtrace}"
2600
+ end
2601
+
2602
+ raise error
2603
+ end
2604
+ end
2605
+
2606
+ def destroy_disk(disk)
2607
+ one_vm = one_item
2608
+
2609
+ # Check if we can detach and delete the non persistent disk:
2610
+ # - VM is terminated
2611
+ # - The disk is managed by OpenNebula
2612
+ detachable= !(one_vm['LCM_STATE'].to_i == 11 && !disk.managed?)
2613
+ detachable &&= disk.exists?
2614
+
2615
+ return unless detachable
2616
+
2617
+ detach_disk(disk)
2618
+
2619
+ # Check if we want to keep the non persistent disk
2620
+ keep_non_persistent_disks =
2621
+ VCenterDriver::CONFIG[:keep_non_persistent_disks]
2622
+
2623
+ return if keep_non_persistent_disks == true
2624
+
2625
+ disk.destroy
2626
+ @disks.delete(disk.id.to_s)
2627
+ end
2628
+
2629
+ # Get vcenter device representing DISK object (hotplug)
2630
+ def disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
2631
+ img_name = ''
2632
+ device_found = nil
2633
+ disk_id = disk['DISK_ID']
2634
+ unmanaged_key = unmanaged_keys["opennebula.disk.#{disk_id}"]
2635
+
2636
+ img_name_escaped = VCenterDriver::FileHelper.get_img_name(
2637
+ disk,
2638
+ one_item['ID'],
2639
+ self['name'],
2640
+ instantiated_as_persistent?
2641
+ )
2642
+
2643
+ img_name = VCenterDriver::FileHelper.unescape_path(img_name_escaped)
2644
+
2645
+ vc_disks.each do |d|
2646
+ key_matches = (unmanaged_key && d[:key] == unmanaged_key.to_i)
2647
+ path_matches = (d[:path_wo_ds] == img_name)
2648
+
2649
+ if key_matches || path_matches
2650
+ device_found = d
2651
+ break
2652
+ end
2653
+ end
2654
+
2655
+ device_found
2656
+ end
2657
+
2658
+ def get_key(type)
2659
+ @used_keys ||= []
2660
+
2661
+ if type == 'CDROM'
2662
+ bound = 'cdrom?'
2663
+ key = 3000
2664
+ else
2665
+ bound = 'disk?'
2666
+ key = 2000
2667
+ end
2668
+
2669
+ used = @used_keys
2670
+ @item.config.hardware.device.each do |dev|
2671
+ used << dev.key
2672
+ next unless send(bound, dev)
2673
+
2674
+ key = dev.key
2675
+ end
2676
+
2677
+ loop do
2678
+ break unless used.include?(key)
2679
+
2680
+ key+=1
2681
+ end
2682
+
2683
+ @used_keys << key
2684
+
2685
+ key
2686
+ end
2687
+
2688
+ def calculate_add_disk_spec(disk, position = 0)
2689
+ img_name_escaped = VCenterDriver::FileHelper.get_img_name(
2690
+ disk,
2691
+ one_item['ID'],
2692
+ self['name'],
2693
+ instantiated_as_persistent?
2694
+ )
2695
+
2696
+ img_name = VCenterDriver::FileHelper.unescape_path(img_name_escaped)
2697
+
2698
+ type = disk['TYPE']
2699
+ size_kb = disk['SIZE'].to_i * 1024
2700
+
2701
+ if type == 'CDROM'
2702
+ # CDROM drive will be found in the IMAGE DS
2703
+ ds_ref = disk['VCENTER_DS_REF']
2704
+ ds = VCenterDriver::Storage.new_from_ref(ds_ref,
2705
+ @vi_client)
2706
+ ds_name = ds['name']
2707
+
2708
+ # CDROM can only be added when the VM is in poweroff state
2709
+ vmdk_backing = RbVmomi::VIM::VirtualCdromIsoBackingInfo(
2710
+ :datastore => ds.item,
2711
+ :fileName => "[#{ds_name}] #{img_name}"
2712
+ )
2713
+
2714
+ if @item['summary.runtime.powerState'] != 'poweredOff'
2715
+ raise 'The CDROM image can only be added as an IDE device '\
2716
+ 'when the VM is in the powered off state'
2717
+ end
2718
+
2719
+ controller, unit_number = find_free_ide_controller(position)
2720
+
2721
+ device = RbVmomi::VIM::VirtualCdrom(
2722
+ :backing => vmdk_backing,
2723
+ :key => get_key(type),
2724
+ :controllerKey => controller.key,
2725
+ :unitNumber => unit_number,
2726
+
2727
+ :connectable => RbVmomi::VIM::VirtualDeviceConnectInfo(
2728
+ :startConnected => true,
2729
+ :connected => true,
2730
+ :allowGuestControl => true
2731
+ )
2732
+ )
2733
+
2734
+ {
2735
+ :operation => :add,
2736
+ :device => device
2737
+ }
2738
+
2739
+ else
2740
+ # TYPE is regular disk (not CDROM)
2741
+ # disk_adapter
2742
+ disk_adapter = disk['VCENTER_ADAPTER_TYPE']
2743
+ case disk_adapter
2744
+ when 'ide'
2745
+ controller, unit_number = find_free_ide_controller(position)
2746
+ else
2747
+ controller, unit_number = find_free_controller(position)
2748
+ end
2749
+ storpod = disk['VCENTER_DS_REF'].start_with?('group-')
2750
+ if storpod
2751
+ vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
2752
+ :diskMode => 'persistent',
2753
+ :fileName => ''
2754
+ )
2755
+ else
2756
+ ds = get_effective_ds(disk)
2757
+ if ds.item._ref.start_with?('group-')
2758
+ ds_object = item.datastore.first
2759
+ ds_name = ds_object.name
2760
+ else
2761
+ ds_object = ds.item
2762
+ ds_name = ds['name']
2763
+ end
2764
+ vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
2765
+ :datastore => ds_object,
2766
+ :diskMode => 'persistent',
2767
+ :fileName => "[#{ds_name}] #{img_name}"
2768
+ )
2769
+ end
2770
+
2771
+ device = RbVmomi::VIM::VirtualDisk(
2772
+ :backing => vmdk_backing,
2773
+ :capacityInKB => size_kb,
2774
+ :controllerKey => controller.key,
2775
+ :key => get_key(type),
2776
+ :unitNumber => unit_number
2777
+ )
2778
+
2779
+ config = {
2780
+ :operation => :add,
2781
+ :device => device
2782
+ }
2783
+
2784
+ # For StorageDRS vCenter must create the file
2785
+ config[:fileOperation] = :create if storpod
2786
+
2787
+ config
2788
+ end
2789
+ end
2790
+
2791
+ # Remove the MAC addresses so they cannot be in conflict
2792
+ # with OpenNebula assigned mac addresses.
2793
+ # We detach all nics from the VM
2794
+ def convert_to_template
2795
+ detach_all_nics
2796
+
2797
+ # We attach new NICs where the MAC address is assigned by vCenter
2798
+ nic_specs = []
2799
+ one_nics = one_item.retrieve_xmlelements('TEMPLATE/NIC')
2800
+ one_nics.each do |nic|
2801
+ next unless nic['OPENNEBULA_MANAGED'] &&
2802
+ nic['OPENNEBULA_MANAGED'].upcase == 'NO'
2803
+
2804
+ nic_specs <<
2805
+ calculate_add_nic_spec_autogenerate_mac(
2806
+ nic
2807
+ )
2808
+ end
2809
+
2810
+ # Reconfigure VM to add unmanaged nics
2811
+ spec_hash = {}
2812
+ spec_hash[:deviceChange] = nic_specs
2813
+ spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
2814
+ @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2815
+
2816
+ # Convert VM to template in vCenter
2817
+ mark_as_template
2818
+
2819
+ # Edit the Opennebula template
2820
+ one_client = OpenNebula::Client.new
2821
+ template_id = one_item['TEMPLATE/TEMPLATE_ID']
2822
+ new_template = OpenNebula::Template.new_with_id(template_id,
2823
+ one_client)
2824
+ new_template.info
2825
+
2826
+ # unlock VM Template
2827
+ new_template.unlock
2828
+
2829
+ # Update the template reference
2830
+ new_template.update("VCENTER_TEMPLATE_REF=#{@item._ref}", true)
2831
+
2832
+ # Add vCenter template name
2833
+ new_template.update("VCENTER_TEMPLATE_NAME=#{@item.name}", true)
2834
+ end
2835
+
2836
+ def resize_unmanaged_disks
2837
+ spec = { :deviceChange => [] }
2838
+ disks_each(:one?) do |d|
2839
+ next unless !d.managed? && d.new_size
2840
+
2841
+ spec[:deviceChange] << d.config(:resize)
2842
+ end
2843
+
2844
+ return if spec[:deviceChange].empty?
2845
+
2846
+ @item
2847
+ .ReconfigVM_Task(
2848
+ :spec => spec
2849
+ ).wait_for_completion
2850
+ end
2851
+
2852
+ def resize_disk(disk)
2853
+ if !disk.exists?
2854
+ size = disk.size
2855
+ sync_disks
2856
+ disk = disk(disk.id)
2857
+ disk.change_size(size)
2858
+ end
2859
+
2860
+ spec = { :deviceChange => [disk.config(:resize)] }
2861
+
2862
+ @item.ReconfigVM_Task(:spec => spec).wait_for_completion
2863
+ end
2864
+
2865
+ def snapshots?
2866
+ clear('rootSnapshot')
2867
+ self['rootSnapshot'] && !self['rootSnapshot'].empty?
2868
+ end
2869
+
2870
+ def instantiated_as_persistent?
2871
+ begin
2872
+ !one_item['TEMPLATE/CLONING_TEMPLATE_ID'].nil?
2873
+ rescue StandardError
2874
+ # one_item may not be retrieved if deploy_id hasn't been set
2875
+ false
2876
+ end
2877
+ end
2878
+
2879
+ def use_linked_clone?
2880
+ one_item['USER_TEMPLATE/VCENTER_LINKED_CLONES'] &&
2881
+ one_item['USER_TEMPLATE/VCENTER_LINKED_CLONES']
2882
+ .upcase == 'YES'
2883
+ end
2884
+
2885
+ def find_free_ide_controller(_position = 0)
2886
+ free_ide_controller = nil
2887
+ ide_schema = {}
2888
+ devices = @item.config.hardware.device
2889
+
2890
+ devices.each do |dev|
2891
+ # Iteration to initialize IDE Controllers
2892
+ next unless dev.is_a? RbVmomi::VIM::VirtualIDEController
2893
+
2894
+ if ide_schema[dev.key].nil?
2895
+ ide_schema[dev.key] = {}
2896
+ end
2897
+ ide_schema[dev.key][:device] = dev
2898
+ ide_schema[dev.key][:freeUnitNumber] = [0, 1]
2899
+ end
2900
+
2901
+ # Iteration to match Disks and Cdroms with its controllers
2902
+ devices.each do |dev| # rubocop:disable Style/CombinableLoops
2903
+ first_condition = dev.is_a? RbVmomi::VIM::VirtualDisk
2904
+ second_condition = dev.is_a? RbVmomi::VIM::VirtualCdrom
2905
+ third_condition = ide_schema.key?(dev.controllerKey)
2906
+
2907
+ next unless (first_condition || second_condition) &&
2908
+ third_condition
2909
+
2910
+ ide_schema[dev.controllerKey][:freeUnitNumber]
2911
+ .delete(
2912
+ dev.unitNumber
2913
+ )
2914
+ end
2915
+
2916
+ ide_schema.keys.each do |controller|
2917
+ unless ide_schema[controller][:freeUnitNumber].empty?
2918
+ free_ide_controller = ide_schema[controller]
2919
+ break
2920
+ end
2921
+ end
2922
+
2923
+ if !free_ide_controller
2924
+ raise 'There are no free IDE controllers ' +
2925
+ 'to connect this CDROM device'
2926
+ end
2927
+
2928
+ controller = free_ide_controller[:device]
2929
+ new_unit_number = free_ide_controller[:freeUnitNumber][0]
2930
+
2931
+ [controller, new_unit_number]
2932
+ end
2933
+
2934
+ def find_free_controller(position = 0)
2935
+ free_scsi_controllers = []
2936
+ scsi_schema = {}
2937
+
2938
+ used_numbers = []
2939
+ available_numbers = []
2940
+ devices = @item.config.hardware.device
2941
+
2942
+ devices.each do |dev|
2943
+ if dev.is_a? RbVmomi::VIM::VirtualSCSIController
2944
+ if scsi_schema[dev.key].nil?
2945
+ scsi_schema[dev.key] = {}
2946
+ end
2947
+
2948
+ used_numbers << dev.scsiCtlrUnitNumber
2949
+ scsi_schema[dev.key][:device] = dev
2950
+ end
2951
+
2952
+ next if dev.class != RbVmomi::VIM::VirtualDisk
2953
+
2954
+ used_numbers << dev.unitNumber
2955
+ end
2956
+
2957
+ 15.times do |scsi_id|
2958
+ available_numbers <<
2959
+ scsi_id if used_numbers.grep(scsi_id).length <= 0
2960
+ end
2961
+
2962
+ scsi_schema.keys.each do |controller|
2963
+ free_scsi_controllers <<
2964
+ scsi_schema[controller][:device].deviceInfo.label
2965
+ end
2966
+
2967
+ if !free_scsi_controllers.empty?
2968
+ available_controller_label = free_scsi_controllers[0]
2969
+ else
2970
+ add_new_scsi(scsi_schema, devices)
2971
+ return find_free_controller
2972
+ end
2973
+
2974
+ controller = nil
2975
+
2976
+ devices.each do |device|
2977
+ if device.deviceInfo.label == available_controller_label
2978
+ controller = device
2979
+ break
2980
+ end
2981
+ end
2982
+
2983
+ new_unit_number = available_numbers.sort[position]
2984
+
2985
+ [controller, new_unit_number]
2986
+ end
2987
+
2988
+ def add_new_scsi(scsi_schema, devices)
2989
+ controller = nil
2990
+
2991
+ if scsi_schema.keys.length >= 4
2992
+ raise 'Cannot add a new controller, maximum is 4.'
2993
+ end
2994
+
2995
+ scsi_key = 0
2996
+ scsi_number = 0
2997
+
2998
+ if !scsi_schema.keys.empty? && scsi_schema.keys.length < 4
2999
+ scsi_key =
3000
+ scsi_schema.keys.max + 1
3001
+ scsi_number =
3002
+ scsi_schema[scsi_schema.keys.max][:device].busNumber + 1
3003
+ end
3004
+
3005
+ controller_device = RbVmomi::VIM::VirtualLsiLogicController(
3006
+ :key => scsi_key,
3007
+ :busNumber => scsi_number,
3008
+ :sharedBus => :noSharing
3009
+ )
3010
+
3011
+ device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
3012
+ :device => controller_device,
3013
+ :operation => :add
3014
+ )
3015
+
3016
+ vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
3017
+ :deviceChange => [device_config_spec]
3018
+ )
3019
+
3020
+ @item.ReconfigVM_Task(:spec => vm_config_spec).wait_for_completion
3021
+
3022
+ devices.each do |device|
3023
+ next unless first_ &&
3024
+ device.key == scsi_key
3025
+
3026
+ controller = device.deviceInfo.label
3027
+ end
3028
+
3029
+ controller
3030
+ end
3031
+
3032
+ # Create a snapshot for the VM
3033
+ def create_snapshot(snap_id, snap_name)
3034
+ memory_dumps = true
3035
+ memory_dumps = CONFIG[:memory_dumps] if CONFIG[:memory_dumps]
3036
+
3037
+ snapshot_hash = {
3038
+ :name => snap_id,
3039
+ :description => "OpenNebula Snapshot: #{snap_name}",
3040
+ :memory => memory_dumps,
3041
+ :quiesce => true
3042
+ }
3043
+
3044
+ begin
3045
+ @item.CreateSnapshot_Task(snapshot_hash).wait_for_completion
3046
+ rescue StandardError => e
3047
+ error = "Cannot create snapshot for VM: #{e.message}."
3048
+
3049
+ if VCenterDriver::CONFIG[:debug_information]
3050
+ error += "\n\n#{e.backtrace.join("\n")}"
3051
+ end
3052
+
3053
+ raise error
3054
+ end
3055
+
3056
+ snap_id
3057
+ end
3058
+
3059
+ # Revert to a VM snapshot
3060
+ def revert_snapshot(snap_id)
3061
+ snapshot_list = self['snapshot.rootSnapshotList']
3062
+ snapshot = find_snapshot_in_list(snapshot_list, snap_id)
3063
+
3064
+ return unless snapshot
3065
+
3066
+ begin
3067
+ revert_snapshot_hash = { :_this => snapshot }
3068
+ snapshot
3069
+ .RevertToSnapshot_Task(
3070
+ revert_snapshot_hash
3071
+ ).wait_for_completion
3072
+ rescue StandardError => e
3073
+ error = "Cannot revert snapshot of VM: #{e.message}."
3074
+
3075
+ if VCenterDriver::CONFIG[:debug_information]
3076
+ error += "\n\n#{e.backtrace.join("\n")}"
3077
+ end
3078
+
3079
+ raise error
3080
+ end
3081
+ end
3082
+
3083
+ # Delete VM snapshot
3084
+ def delete_snapshot(snap_id)
3085
+ snapshot_list = self['snapshot.rootSnapshotList']
3086
+ snapshot = find_snapshot_in_list(snapshot_list, snap_id)
3087
+
3088
+ return unless snapshot
3089
+
3090
+ begin
3091
+ delete_snapshot_hash = {
3092
+ :_this => snapshot,
3093
+ :removeChildren => false
3094
+ }
3095
+ snapshot
3096
+ .RemoveSnapshot_Task(
3097
+ delete_snapshot_hash
3098
+ ).wait_for_completion
3099
+ rescue StandardError => e
3100
+ error = "Cannot delete snapshot of VM: #{e.message}."
3101
+
3102
+ if VCenterDriver::CONFIG[:debug_information]
3103
+ error += "\n\n#{e.backtrace.join("\n")}"
3104
+ end
3105
+
3106
+ raise error
3107
+ end
3108
+ end
3109
+
3110
+ def find_snapshot_in_list(list, snap_id)
3111
+ list.each do |i|
3112
+ return i.snapshot if i.name == snap_id.to_s
3113
+
3114
+ unless i.childSnapshotList.empty?
3115
+ snap = find_snapshot_in_list(i.childSnapshotList, snap_id)
3116
+ return snap if snap
3117
+ end
3118
+ end rescue nil
3119
+
3120
+ nil
3121
+ end
3122
+
3123
+ def migrate(config = {})
3124
+ if config.empty?
3125
+ raise 'You need at least 1 parameter to perform a migration'
3126
+ end
3127
+
3128
+ begin
3129
+ # retrieve host from DRS
3130
+ one_cluster = config[:cluster]
3131
+ resourcepool = one_cluster.item.resourcePool
3132
+ datastore = config[:datastore]
3133
+
3134
+ if datastore
3135
+ relocate_spec_params = {
3136
+ :folder => @item.parent,
3137
+ :datastore => datastore
3138
+ }
3139
+
3140
+ unless config[:same_host]
3141
+ relocate_spec_params[:pool] = resourcepool
3142
+ end
3143
+
3144
+ if config[:esx_migration_list].is_a?(String)
3145
+ if config[:esx_migration_list]==''
3146
+ relocate_spec_params[:host] =
3147
+ config[:cluster].item.host.sample
3148
+ elsif config[:esx_migration_list]!='Selected_by_DRS'
3149
+ hostnames = config[:esx_migration_list].split(' ')
3150
+ hostname = hostnames.sample
3151
+ host_moref = one_cluster.hostname_to_moref(hostname)
3152
+ relocate_spec_params[:host] = host_moref
3153
+ end
3154
+ end
3155
+
3156
+ relocate_spec =
3157
+ RbVmomi::VIM
3158
+ .VirtualMachineRelocateSpec(
3159
+ relocate_spec_params
3160
+ )
3161
+ @item.RelocateVM_Task(
3162
+ :spec => relocate_spec,
3163
+ :priority => 'defaultPriority'
3164
+ ).wait_for_completion
3165
+ else
3166
+ migrate_spec_params = {
3167
+ :priority => 'defaultPriority'
3168
+ }
3169
+
3170
+ unless config[:same_host]
3171
+ migrate_spec_params[:pool] = resourcepool
3172
+ end
3173
+
3174
+ @item.MigrateVM_Task(
3175
+ migrate_spec_params
3176
+ ).wait_for_completion
3177
+ end
3178
+ rescue StandardError => e
3179
+ error = "Cannot migrate VM: #{e.message}."
3180
+
3181
+ if VCenterDriver::CONFIG[:debug_information]
3182
+ error += "\n\n#{e.backtrace.join("\n")}"
3183
+ end
3184
+
3185
+ raise error
3186
+ end
3187
+ end
3188
+
3189
+ ########################################################################
3190
+ # actions
3191
+ ########################################################################
3192
+
3193
+ def shutdown
3194
+ return if powered_off?
3195
+
3196
+ begin
3197
+ if vm_tools?
3198
+ @item.ShutdownGuest
3199
+ else
3200
+ poweroff_hard
3201
+ end
3202
+ rescue RbVmomi::Fault => e
3203
+ error = e.message.split(':').first
3204
+ raise e.message if error != 'InvalidPowerState'
3205
+ end
3206
+ timeout = CONFIG[:vm_poweron_wait_default]
3207
+ wait_timeout(:powered_off?, timeout)
3208
+ end
3209
+
3210
+ def destroy
3211
+ @item.Destroy_Task.wait_for_completion
3212
+ end
3213
+
3214
+ def mark_as_template
3215
+ @item.MarkAsTemplate
3216
+ end
3217
+
3218
+ def reset
3219
+ @item.ResetVM_Task.wait_for_completion
3220
+ end
3221
+
3222
+ def suspend
3223
+ @item.SuspendVM_Task.wait_for_completion
3224
+ end
3225
+
3226
+ def reboot
3227
+ @item.RebootGuest
3228
+ end
3229
+
3230
+ def poweron(set_running = false)
3231
+ begin
3232
+ @item.PowerOnVM_Task.wait_for_completion
3233
+ rescue RbVmomi::Fault => e
3234
+ error = e.message.split(':').first
3235
+ raise e.message if error != 'InvalidPowerState'
3236
+ end
3237
+ # opennebula.running flag
3238
+ set_running(true, true) if set_running
3239
+
3240
+ timeout = CONFIG[:vm_poweron_wait_default]
3241
+ wait_timeout(:powered_on?, timeout)
3242
+ end
3243
+
3244
+ def powered_on?
3245
+ @item.runtime.powerState == 'poweredOn'
3246
+ end
3247
+
3248
+ def powered_off?
3249
+ @item.runtime.powerState == 'poweredOff'
3250
+ end
3251
+
3252
+ def poweroff_hard
3253
+ @item.PowerOffVM_Task.wait_for_completion
3254
+ end
3255
+
3256
+ def remove_all_snapshots(consolidate = true)
3257
+ @item
3258
+ .RemoveAllSnapshots_Task(
3259
+ { :consolidate => consolidate }
3260
+ ).wait_for_completion
3261
+ info_disks
3262
+ end
3263
+
3264
+ def vm_tools?
3265
+ @item.guest.toolsRunningStatus == 'guestToolsRunning'
3266
+ end
3267
+
3268
+ def set_running(state, execute = true)
3269
+ state ? value = 'yes' : value = 'no'
3270
+
3271
+ config_array = [
3272
+ { :key => 'opennebula.vm.running', :value => value }
3273
+ ]
3274
+
3275
+ return config_array unless execute
3276
+
3277
+ spec = RbVmomi::VIM.VirtualMachineConfigSpec(
3278
+ { :extraConfig => config_array }
3279
+ )
3280
+
3281
+ @item.ReconfigVM_Task(:spec => spec).wait_for_completion
3282
+ end
3283
+
3284
+ # STATIC MEMBERS, ROUTINES AND CONSTRUCTORS
3285
+ ########################################################################
3286
+
3287
+ def self.get_vm(opts = {})
3288
+ # try to retrieve machine from name
3289
+ if opts[:name]
3290
+ matches = opts[:name].match(/^one-(\d*)(-(.*))?$/)
3291
+ if matches
3292
+ id = matches[1]
3293
+ one_vm = VCenterDriver::VIHelper.one_item(
3294
+ OpenNebula::VirtualMachine, id, false
3295
+ )
3296
+ end
3297
+ end
3298
+
3299
+ if one_vm.nil?
3300
+ one_vm = VCenterDriver::VIHelper
3301
+ .find_by_ref(
3302
+ OpenNebula::VirtualMachinePool,
3303
+ 'DEPLOY_ID',
3304
+ opts[:ref],
3305
+ opts[:vc_uuid],
3306
+ opts[:pool]
3307
+ )
3308
+ end
3309
+
3310
+ one_vm
3311
+ end
3312
+
3313
+ # Migrate a VM to another cluster and/or datastore
3314
+ # @params [int] vm_id ID of the VM to be migrated
3315
+ # @params [String] src_host Name of the source cluster
3316
+ # @params [String] dst_host Name of the target cluster
3317
+ # @params [Bool] hot_ds Wether this is a DS migration
3318
+ # with the VM running or not
3319
+ # @params [int] ds Destination datastore ID
3320
+ def self.migrate_routine(
3321
+ vm_id,
3322
+ src_host,
3323
+ dst_host,
3324
+ hot_ds = false,
3325
+ ds = nil
3326
+ )
3327
+ one_client = OpenNebula::Client.new
3328
+ pool = OpenNebula::HostPool.new(one_client)
3329
+ pool.info
3330
+
3331
+ src_id = pool["/HOST_POOL/HOST[NAME='#{src_host}']/ID"].to_i
3332
+ dst_id = pool["/HOST_POOL/HOST[NAME='#{dst_host}']/ID"].to_i
3333
+
3334
+ # different destination ds
3335
+ if ds
3336
+ ds_pool = OpenNebula::DatastorePool.new(one_client)
3337
+ ds_pool.info
3338
+ vcenter_ds_red =
3339
+ "/DATASTORE_POOL/DATASTORE[ID='#{ds}']" +
3340
+ '/TEMPLATE/VCENTER_DS_REF'
3341
+ datastore = ds_pool[vcenter_ds_red]
3342
+ end
3343
+
3344
+ vi_client = VCenterDriver::VIClient.new_from_host(src_id)
3345
+
3346
+ # required one objects
3347
+ vm = OpenNebula::VirtualMachine.new_with_id(vm_id, one_client)
3348
+ dst_host = OpenNebula::Host.new_with_id(dst_id, one_client)
3349
+
3350
+ # get info
3351
+ vm.info
3352
+ dst_host.info
3353
+
3354
+ esx_migration_list = dst_host['/HOST/TEMPLATE/ESX_MIGRATION_LIST']
3355
+
3356
+ # required vcenter objects
3357
+ vc_vm = VCenterDriver::VirtualMachine
3358
+ .new_without_id(
3359
+ vi_client,
3360
+ vm['/VM/DEPLOY_ID']
3361
+ )
3362
+
3363
+ vc_vm.vm_id = vm_id
3364
+
3365
+ ccr_ref = dst_host['/HOST/TEMPLATE/VCENTER_CCR_REF']
3366
+ vc_host = VCenterDriver::ClusterComputeResource.new_from_ref(
3367
+ ccr_ref, vi_client
3368
+ )
3369
+
3370
+ config = { :cluster => vc_host }
3371
+
3372
+ config[:same_host] = src_id == dst_id
3373
+
3374
+ config[:datastore] = datastore if datastore
3375
+ if hot_ds
3376
+ config[:esx_migration_list] =
3377
+ esx_migration_list if esx_migration_list
3378
+ else
3379
+ config[:esx_migration_list] = 'Selected_by_DRS'
3380
+ end
3381
+
3382
+ vc_vm.migrate(config)
3383
+
3384
+ vm.replace({ 'VCENTER_CCR_REF' => ccr_ref })
3385
+ end
3386
+
3387
+ # Try to build the vcenterdriver virtualmachine without
3388
+ # any opennebula id or object, this constructor can find
3389
+ # inside the opennebula pool until match
3390
+ #
3391
+ # @param vi_client [vi_client] the vcenterdriver client
3392
+ # that allows the connection
3393
+ # @param ref [String] vcenter ref to the vm
3394
+ # @param opts [Hash] object with pairs that could
3395
+ # contain multiple option
3396
+ # :vc_uuid: give the vcenter uuid directly
3397
+ # :name: the vcenter vm name for extract the opennebula id
3398
+ #
3399
+ # @return [vcenterdriver::vm] the virtual machine
3400
+ def self.new_from_ref(vi_client, ref, name, opts = {})
3401
+ unless opts[:vc_uuid]
3402
+ opts[:vc_uuid] = vi_client.vim.serviceContent.about.instanceUuid
3403
+ end
3404
+
3405
+ opts[:name] = name
3406
+ opts[:ref] = ref
3407
+
3408
+ one_vm = VCenterDriver::VirtualMachine.get_vm(opts)
3409
+
3410
+ new_one(vi_client, ref, one_vm)
3411
+ end
3412
+
3413
+ # build a vcenterdriver virtual machine from a template
3414
+ # this function is used to instantiate vcenter vms
3415
+ #
3416
+ # @param vi_client [vi_client] the vcenterdriver
3417
+ # client that allows the connection
3418
+ # @param drv_action [xmlelement] driver_action that contains the info
3419
+ # @param id [int] the if of the opennebula virtual machine
3420
+ #
3421
+ # @return [vcenterdriver::vm] the virtual machine
3422
+ def self.new_from_clone(vi_client, drv_action, id)
3423
+ new(vi_client, nil, id).tap do |vm|
3424
+ vm.clone_vm(drv_action)
3425
+ end
3426
+ end
3427
+
3428
+ # build a vcenterdriver virtual machine
3429
+ # with the vmware item already linked
3430
+ #
3431
+ # @param vm_item the vmware VM item that it's going to be associated
3432
+ #
3433
+ # @return [vcenterdriver::vm] the virtual machine
3434
+ def self.new_with_item(vm_item)
3435
+ new(nil, nil, -1).tap do |vm|
3436
+ vm.item_update(vm_item)
3437
+ end
3438
+ end
3439
+
3440
+ # build a vcenterdriver virtual machine
3441
+ # with the opennebula object linked
3442
+ #
3443
+ # @param vi_client [vi_client] the vcenterdriver
3444
+ # client that allows the connection
3445
+ # @param ref [String] vcenter ref to the vm
3446
+ # @param one_item [one::vm] xmlelement of opennebula
3447
+ #
3448
+ # @return [vcenterdriver::vm] the virtual machine
3449
+ def self.new_one(vi_client, ref, one_item)
3450
+ id = one_item['ID'] || one_item['VM/ID'] rescue -1
3451
+
3452
+ new(vi_client, ref, id).tap do |vm|
3453
+ if one_item.instance_of?(OpenNebula::VirtualMachine)
3454
+ vm.one_item = one_item
3455
+ end
3456
+ end
3457
+ end
3458
+
3459
+ # build a vcenterdriver virtual machine
3460
+ # without opennebula object link, use id = -1 instead
3461
+ #
3462
+ # @param vi_client [vi_client] the vcenterdriver client
3463
+ # that allows the connection
3464
+ # @param ref [String] vcenter ref to the vm
3465
+ #
3466
+ # @return [vcenterdriver::vm] the virtual machine
3467
+ def self.new_without_id(vi_client, ref)
3468
+ new(vi_client, ref, -1)
3469
+ end
3470
+
3471
+ ########################################################################
3472
+
3473
+ end
3474
+ # class VirtualMachine
3475
+
3476
+ end
3477
+ # module VCenterDriver