opennebula 5.12.13 → 5.13.80.pre
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/lib/ActionManager.rb +1 -1
- data/lib/CommandManager.rb +1 -1
- data/lib/DriverExecHelper.rb +44 -28
- data/lib/OpenNebulaDriver.rb +8 -4
- data/lib/VirtualMachineDriver.rb +9 -2
- data/lib/cloud/CloudClient.rb +3 -3
- data/lib/datacenter.rb +1258 -0
- data/lib/datastore.rb +1025 -0
- data/lib/distributed_firewall.rb +280 -0
- data/lib/file_helper.rb +370 -0
- data/lib/host.rb +1517 -0
- data/lib/logical_port.rb +50 -0
- data/lib/logical_switch.rb +77 -0
- data/lib/memoize.rb +74 -0
- data/lib/models/role.rb +39 -8
- data/lib/models/service.rb +92 -31
- data/lib/models.rb +5 -5
- data/lib/network.rb +635 -0
- data/lib/nsx_client.rb +144 -0
- data/lib/nsx_component.rb +28 -0
- data/lib/nsx_constants.rb +149 -0
- data/lib/nsx_driver.rb +78 -0
- data/lib/nsx_error.rb +77 -0
- data/lib/nsx_rule.rb +193 -0
- data/lib/nsxt_client.rb +176 -0
- data/lib/nsxt_dfw.rb +196 -0
- data/lib/nsxt_logical_port.rb +94 -0
- data/lib/nsxt_rule.rb +188 -0
- data/lib/nsxt_tz.rb +38 -0
- data/lib/nsxv_client.rb +176 -0
- data/lib/nsxv_dfw.rb +202 -0
- data/lib/nsxv_logical_port.rb +107 -0
- data/lib/nsxv_rule.rb +172 -0
- data/lib/nsxv_tz.rb +41 -0
- data/lib/opaque_network.rb +134 -0
- data/lib/opennebula/acl.rb +1 -1
- data/lib/opennebula/acl_pool.rb +1 -1
- data/lib/opennebula/client.rb +1 -1
- data/lib/opennebula/cluster.rb +1 -1
- data/lib/opennebula/cluster_pool.rb +1 -1
- data/lib/opennebula/datastore.rb +1 -1
- data/lib/opennebula/datastore_pool.rb +1 -1
- data/lib/opennebula/document.rb +8 -29
- data/lib/opennebula/document_json.rb +42 -12
- data/lib/opennebula/document_pool.rb +1 -1
- data/lib/opennebula/document_pool_json.rb +1 -1
- data/lib/opennebula/error.rb +4 -1
- data/lib/opennebula/flow/grammar.rb +1195 -0
- data/lib/{models → opennebula/flow}/service_pool.rb +26 -2
- data/lib/{models → opennebula/flow}/service_template.rb +86 -17
- data/lib/opennebula/flow/service_template_ext.rb +84 -0
- data/lib/{models → opennebula/flow}/service_template_pool.rb +1 -1
- data/lib/opennebula/flow/validator.rb +499 -0
- data/lib/opennebula/flow.rb +23 -0
- data/lib/opennebula/group.rb +1 -1
- data/lib/opennebula/group_pool.rb +1 -1
- data/lib/opennebula/hook.rb +5 -12
- data/lib/opennebula/hook_log.rb +1 -1
- data/lib/opennebula/hook_pool.rb +1 -1
- data/lib/opennebula/host.rb +1 -1
- data/lib/opennebula/host_pool.rb +1 -1
- data/lib/opennebula/image.rb +17 -14
- data/lib/opennebula/image_pool.rb +1 -1
- data/lib/opennebula/ldap_auth.rb +1 -1
- data/lib/opennebula/ldap_auth_spec.rb +1 -1
- data/lib/opennebula/lockable_ext.rb +163 -0
- data/lib/opennebula/marketplace.rb +1 -1
- data/lib/opennebula/marketplace_pool.rb +1 -1
- data/lib/opennebula/marketplaceapp.rb +9 -119
- data/lib/opennebula/marketplaceapp_ext.rb +522 -0
- data/lib/opennebula/marketplaceapp_pool.rb +1 -1
- data/lib/opennebula/oneflow_client.rb +4 -3
- data/lib/opennebula/pool.rb +4 -3
- data/lib/opennebula/pool_element.rb +1 -1
- data/lib/opennebula/security_group.rb +1 -1
- data/lib/opennebula/security_group_pool.rb +1 -1
- data/lib/opennebula/server_cipher_auth.rb +1 -1
- data/lib/opennebula/server_x509_auth.rb +1 -1
- data/lib/opennebula/ssh_auth.rb +1 -1
- data/lib/opennebula/system.rb +1 -1
- data/lib/opennebula/template.rb +4 -13
- data/lib/opennebula/template_ext.rb +325 -0
- data/lib/opennebula/template_pool.rb +1 -1
- data/lib/opennebula/user.rb +26 -2
- data/lib/opennebula/user_pool.rb +1 -1
- data/lib/opennebula/utils.rb +1 -1
- data/lib/opennebula/vdc.rb +1 -1
- data/lib/opennebula/vdc_pool.rb +1 -1
- data/lib/opennebula/virtual_machine.rb +25 -207
- data/lib/opennebula/virtual_machine_ext.rb +469 -0
- data/lib/opennebula/virtual_machine_pool.rb +1 -5
- data/lib/opennebula/virtual_network.rb +4 -10
- data/lib/opennebula/virtual_network_pool.rb +1 -1
- data/lib/opennebula/virtual_router.rb +4 -12
- data/lib/opennebula/virtual_router_pool.rb +1 -1
- data/lib/opennebula/vm_group.rb +4 -11
- data/lib/opennebula/vm_group_pool.rb +1 -1
- data/lib/opennebula/vntemplate.rb +4 -13
- data/lib/opennebula/vntemplate_pool.rb +1 -1
- data/lib/opennebula/wait_ext.rb +222 -0
- data/lib/opennebula/x509_auth.rb +1 -1
- data/lib/opennebula/xml_element.rb +1 -1
- data/lib/opennebula/xml_pool.rb +1 -1
- data/lib/opennebula/xml_utils.rb +1 -1
- data/lib/opennebula/zone.rb +1 -1
- data/lib/opennebula/zone_pool.rb +1 -1
- data/lib/opennebula.rb +5 -2
- data/lib/rest_client.rb +201 -0
- data/lib/scripts_common.rb +180 -0
- data/lib/transport_zone.rb +43 -0
- data/lib/vcenter_driver.rb +9 -22
- data/lib/vcenter_importer.rb +616 -0
- data/lib/vi_client.rb +281 -0
- data/lib/vi_helper.rb +312 -0
- data/lib/virtual_machine.rb +3477 -0
- data/lib/virtual_wire.rb +158 -0
- data/lib/vm_device.rb +80 -0
- data/lib/vm_disk.rb +202 -0
- data/lib/vm_folder.rb +69 -0
- data/lib/vm_helper.rb +30 -0
- data/lib/vm_monitor.rb +303 -0
- data/lib/vm_nic.rb +70 -0
- data/lib/vm_template.rb +1961 -0
- data/lib/vmm_importer.rb +121 -0
- metadata +101 -35
data/lib/host.rb
ADDED
@@ -0,0 +1,1517 @@
|
|
1
|
+
# -------------------------------------------------------------------------- #
|
2
|
+
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
|
3
|
+
# #
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
5
|
+
# not use this file except in compliance with the License. You may obtain #
|
6
|
+
# a copy of the License at #
|
7
|
+
# #
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0 #
|
9
|
+
# #
|
10
|
+
# Unless required by applicable law or agreed to in writing, software #
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS, #
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
13
|
+
# See the License for the specific language governing permissions and #
|
14
|
+
# limitations under the License. #
|
15
|
+
#--------------------------------------------------------------------------- #
|
16
|
+
|
17
|
+
##############################################################################
|
18
|
+
# Module VCenterDriver
|
19
|
+
##############################################################################
|
20
|
+
module VCenterDriver
|
21
|
+
|
22
|
+
require 'json'
|
23
|
+
require 'nsx_driver'
|
24
|
+
|
25
|
+
##########################################################################
|
26
|
+
# Class HostFolder
|
27
|
+
##########################################################################
|
28
|
+
class HostFolder
|
29
|
+
|
30
|
+
attr_accessor :item, :items
|
31
|
+
|
32
|
+
def initialize(item)
|
33
|
+
@item = item
|
34
|
+
@items = {}
|
35
|
+
end
|
36
|
+
|
37
|
+
def fetch_clusters!
|
38
|
+
VIClient
|
39
|
+
.get_entities(
|
40
|
+
@item,
|
41
|
+
'ClusterComputeResource'
|
42
|
+
).each do |item|
|
43
|
+
item_name = item._ref
|
44
|
+
@items[item_name.to_sym] = ClusterComputeResource.new(item)
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
def get_cluster(ref)
|
49
|
+
if !@items[ref.to_sym]
|
50
|
+
rbvmomi_dc =
|
51
|
+
RbVmomi::VIM::ClusterComputeResource
|
52
|
+
.new(
|
53
|
+
@item._connection,
|
54
|
+
ref
|
55
|
+
)
|
56
|
+
@items[ref.to_sym] =
|
57
|
+
ClusterComputeResource
|
58
|
+
.new(
|
59
|
+
rbvmomi_dc
|
60
|
+
)
|
61
|
+
end
|
62
|
+
|
63
|
+
@items[ref.to_sym]
|
64
|
+
end
|
65
|
+
|
66
|
+
end
|
67
|
+
# class HostFolder
|
68
|
+
|
69
|
+
##########################################################################
|
70
|
+
# Class ClusterComputeResource
|
71
|
+
##########################################################################
|
72
|
+
class ClusterComputeResource
|
73
|
+
|
74
|
+
attr_accessor :item
|
75
|
+
attr_accessor :rp_list
|
76
|
+
|
77
|
+
include Memoize
|
78
|
+
|
79
|
+
def initialize(item, vi_client = nil)
|
80
|
+
@item = item
|
81
|
+
@vi_client = vi_client
|
82
|
+
@rp_list # rubocop:disable Lint/Void
|
83
|
+
end
|
84
|
+
|
85
|
+
def fetch_resource_pools(rp, rp_array = [])
|
86
|
+
rp_array << rp
|
87
|
+
|
88
|
+
rp.resourcePool.each do |child_rp|
|
89
|
+
fetch_resource_pools(child_rp, rp_array)
|
90
|
+
end
|
91
|
+
|
92
|
+
rp_array
|
93
|
+
end
|
94
|
+
|
95
|
+
def resource_pools
|
96
|
+
if @resource_pools.nil?
|
97
|
+
@resource_pools = fetch_resource_pools(@item.resourcePool)
|
98
|
+
end
|
99
|
+
|
100
|
+
@resource_pools
|
101
|
+
end
|
102
|
+
|
103
|
+
def get_resource_pool_list(
|
104
|
+
rp = @item
|
105
|
+
.resourcePool,
|
106
|
+
parent_prefix = '',
|
107
|
+
rp_array = []
|
108
|
+
)
|
109
|
+
current_rp = ''
|
110
|
+
|
111
|
+
if !parent_prefix.empty?
|
112
|
+
current_rp << parent_prefix
|
113
|
+
current_rp << '/'
|
114
|
+
end
|
115
|
+
|
116
|
+
resource_pool, name = rp.collect('resourcePool', 'name')
|
117
|
+
current_rp << name if name != 'Resources'
|
118
|
+
|
119
|
+
resource_pool.each do |child_rp|
|
120
|
+
get_resource_pool_list(child_rp, current_rp, rp_array)
|
121
|
+
end
|
122
|
+
|
123
|
+
rp_info = {}
|
124
|
+
rp_info[:name] = current_rp
|
125
|
+
rp_info[:ref] = rp._ref
|
126
|
+
rp_array << rp_info unless current_rp.empty?
|
127
|
+
|
128
|
+
rp_array
|
129
|
+
end
|
130
|
+
|
131
|
+
def nsx_get
|
132
|
+
nsx_info = ''
|
133
|
+
nsx_obj = {}
|
134
|
+
# In the future add more than one nsx manager
|
135
|
+
extension_list =
|
136
|
+
@vi_client
|
137
|
+
.vim
|
138
|
+
.serviceContent
|
139
|
+
.extensionManager
|
140
|
+
.extensionList
|
141
|
+
extension_list.each do |ext_list|
|
142
|
+
case ext_list.key
|
143
|
+
when NSXDriver::NSXConstants::NSXV_EXTENSION_LIST
|
144
|
+
nsx_obj['type'] = NSXDriver::NSXConstants::NSXV
|
145
|
+
url_full = ext_list.client[0].url
|
146
|
+
url_split = url_full.split('/')
|
147
|
+
# protocol = "https://"
|
148
|
+
protocol = url_split[0] + '//'
|
149
|
+
# ip_port = ip:port
|
150
|
+
ip_port = url_split[2]
|
151
|
+
nsx_obj['url'] = protocol + ip_port
|
152
|
+
nsx_obj['version'] = ext_list.version
|
153
|
+
nsx_obj['label'] = ext_list.description.label
|
154
|
+
when NSXDriver::NSXConstants::NSXT_EXTENSION_LIST
|
155
|
+
nsx_obj['type'] = NSXDriver::NSXConstants::NSXT
|
156
|
+
nsx_obj['url'] = ext_list.server[0].url
|
157
|
+
nsx_obj['version'] = ext_list.version
|
158
|
+
nsx_obj['label'] = ext_list.description.label
|
159
|
+
else
|
160
|
+
next
|
161
|
+
end
|
162
|
+
end
|
163
|
+
unless nsx_obj.empty?
|
164
|
+
nsx_info << "NSX_MANAGER=\"#{nsx_obj['url']}\"\n"
|
165
|
+
nsx_info << "NSX_TYPE=\"#{nsx_obj['type']}\"\n"
|
166
|
+
nsx_info << "NSX_VERSION=\"#{nsx_obj['version']}\"\n"
|
167
|
+
nsx_info << "NSX_LABEL=\"#{nsx_obj['label']}\"\n"
|
168
|
+
end
|
169
|
+
nsx_info
|
170
|
+
end
|
171
|
+
|
172
|
+
def nsx_ready?
|
173
|
+
@one_item =
|
174
|
+
VCenterDriver::VIHelper
|
175
|
+
.one_item(
|
176
|
+
OpenNebula::Host,
|
177
|
+
@vi_client
|
178
|
+
.instance_variable_get(
|
179
|
+
:@host_id
|
180
|
+
).to_i
|
181
|
+
)
|
182
|
+
|
183
|
+
# Check if NSX_MANAGER is into the host template
|
184
|
+
if [nil, ''].include?(@one_item['TEMPLATE/NSX_MANAGER'])
|
185
|
+
@nsx_status = "NSX_STATUS = \"Missing NSX_MANAGER\"\n"
|
186
|
+
return false
|
187
|
+
end
|
188
|
+
|
189
|
+
# Check if NSX_USER is into the host template
|
190
|
+
if [nil, ''].include?(@one_item['TEMPLATE/NSX_USER'])
|
191
|
+
@nsx_status = "NSX_STATUS = \"Missing NSX_USER\"\n"
|
192
|
+
return false
|
193
|
+
end
|
194
|
+
|
195
|
+
# Check if NSX_PASSWORD is into the host template
|
196
|
+
if [nil, ''].include?(@one_item['TEMPLATE/NSX_PASSWORD'])
|
197
|
+
@nsx_status = "NSX_STATUS = \"Missing NSX_PASSWORD\"\n"
|
198
|
+
return false
|
199
|
+
end
|
200
|
+
|
201
|
+
# Check if NSX_TYPE is into the host template
|
202
|
+
if [nil, ''].include?(@one_item['TEMPLATE/NSX_TYPE'])
|
203
|
+
@nsx_status = "NSX_STATUS = \"Missing NSX_TYPE\"\n"
|
204
|
+
return false
|
205
|
+
end
|
206
|
+
|
207
|
+
# Try a connection as part of NSX_STATUS
|
208
|
+
nsx_client = NSXDriver::NSXClient
|
209
|
+
.new_from_id(
|
210
|
+
@vi_client
|
211
|
+
.instance_variable_get(
|
212
|
+
:@host_id
|
213
|
+
).to_i
|
214
|
+
)
|
215
|
+
|
216
|
+
if @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXV
|
217
|
+
# URL to test a connection
|
218
|
+
url = '/api/2.0/vdn/scopes'
|
219
|
+
begin
|
220
|
+
if nsx_client.get(url)
|
221
|
+
@nsx_status = "NSX_STATUS = \"OK\"\n"
|
222
|
+
return true
|
223
|
+
else
|
224
|
+
@nsx_status =
|
225
|
+
"NSX_STATUS = \"Response code incorrect\"\n"
|
226
|
+
return false
|
227
|
+
end
|
228
|
+
rescue StandardError
|
229
|
+
@nsx_status = 'NSX_STATUS = "Error connecting to ' \
|
230
|
+
"NSX_MANAGER\"\n"
|
231
|
+
return false
|
232
|
+
end
|
233
|
+
end
|
234
|
+
|
235
|
+
nxs_type = @one_item['TEMPLATE/NSX_TYPE']
|
236
|
+
unless nxs_type == NSXDriver::NSXConstants::NSXT; return; end
|
237
|
+
|
238
|
+
# URL to test a connection
|
239
|
+
url = '/api/v1/transport-zones'
|
240
|
+
begin
|
241
|
+
if nsx_client.get(url)
|
242
|
+
@nsx_status = "NSX_STATUS = \"OK\"\n"
|
243
|
+
true
|
244
|
+
else
|
245
|
+
@nsx_status =
|
246
|
+
"NSX_STATUS = \"Response code incorrect\"\n"
|
247
|
+
false
|
248
|
+
end
|
249
|
+
rescue StandardError
|
250
|
+
@nsx_status = 'NSX_STATUS = "Error connecting to '\
|
251
|
+
"NSX_MANAGER\"\n"
|
252
|
+
false
|
253
|
+
end
|
254
|
+
end
|
255
|
+
|
256
|
+
def tz_get
|
257
|
+
@nsx_status = ''
|
258
|
+
if !nsx_ready?
|
259
|
+
tz_info = @nsx_status
|
260
|
+
else
|
261
|
+
tz_info = "NSX_STATUS = OK\n"
|
262
|
+
tz_info << 'NSX_TRANSPORT_ZONES = ['
|
263
|
+
|
264
|
+
nsx_client =
|
265
|
+
NSXDriver::NSXClient
|
266
|
+
.new_from_id(
|
267
|
+
@vi_client
|
268
|
+
.instance_variable_get(
|
269
|
+
:@host_id
|
270
|
+
).to_i
|
271
|
+
)
|
272
|
+
tz_object = NSXDriver::TransportZone.new_child(nsx_client)
|
273
|
+
|
274
|
+
# NSX request to get Transport Zones
|
275
|
+
case @one_item['TEMPLATE/NSX_TYPE']
|
276
|
+
when NSXDriver::NSXConstants::NSXV
|
277
|
+
tzs = tz_object.tzs
|
278
|
+
tzs.each do |tz|
|
279
|
+
tz_info << tz.xpath('name').text << '="'
|
280
|
+
tz_info << tz.xpath('objectId').text << '",'
|
281
|
+
end
|
282
|
+
tz_info.chomp!(',')
|
283
|
+
when NSXDriver::NSXConstants::NSXT
|
284
|
+
r = tz_object.tzs
|
285
|
+
r['results'].each do |tz|
|
286
|
+
tz_info << tz['display_name'] << '="'
|
287
|
+
tz_info << tz['id'] << '",'
|
288
|
+
end
|
289
|
+
tz_info.chomp!(',')
|
290
|
+
else
|
291
|
+
raise "Unknown Port Group type \
|
292
|
+
#{@one_item['TEMPLATE/NSX_TYPE']}"
|
293
|
+
end
|
294
|
+
tz_info << ']'
|
295
|
+
return tz_info
|
296
|
+
end
|
297
|
+
tz_info
|
298
|
+
end
|
299
|
+
|
300
|
+
def monitor
|
301
|
+
total_cpu,
|
302
|
+
num_cpu_cores,
|
303
|
+
effective_cpu,
|
304
|
+
total_memory,
|
305
|
+
effective_mem,
|
306
|
+
num_hosts,
|
307
|
+
num_eff_hosts,
|
308
|
+
overall_status,
|
309
|
+
drs_enabled,
|
310
|
+
ha_enabled= @item.collect('summary.totalCpu',
|
311
|
+
'summary.numCpuCores',
|
312
|
+
'summary.effectiveCpu',
|
313
|
+
'summary.totalMemory',
|
314
|
+
'summary.effectiveMemory',
|
315
|
+
'summary.numHosts',
|
316
|
+
'summary.numEffectiveHosts',
|
317
|
+
'summary.overallStatus',
|
318
|
+
'configuration.drsConfig.enabled',
|
319
|
+
'configuration.dasConfig.enabled')
|
320
|
+
|
321
|
+
mhz_core = total_cpu.to_f / num_cpu_cores.to_f
|
322
|
+
eff_core = effective_cpu.to_f / mhz_core
|
323
|
+
|
324
|
+
free_cpu = format('%.2f', eff_core * 100).to_f # rubocop:disable Style/FormatStringToken
|
325
|
+
total_cpu = num_cpu_cores.to_f * 100
|
326
|
+
used_cpu = format('%.2f', total_cpu - free_cpu).to_f # rubocop:disable Style/FormatStringToken
|
327
|
+
|
328
|
+
total_mem = total_memory.to_i / 1024
|
329
|
+
free_mem = effective_mem.to_i * 1024
|
330
|
+
|
331
|
+
str_info = ''
|
332
|
+
|
333
|
+
# Get cluster name for informative purposes
|
334
|
+
# (replace space with _ if any)
|
335
|
+
str_info << 'VCENTER_NAME=' << self['name'].tr(' ', '_') << "\n"
|
336
|
+
|
337
|
+
# System
|
338
|
+
str_info << "HYPERVISOR=vcenter\n"
|
339
|
+
str_info << 'TOTALHOST=' << num_hosts.to_s << "\n"
|
340
|
+
str_info << 'AVAILHOST=' << num_eff_hosts.to_s << "\n"
|
341
|
+
str_info << 'STATUS=' << overall_status << "\n"
|
342
|
+
|
343
|
+
# CPU
|
344
|
+
str_info << 'CPUSPEED=' << mhz_core.to_s << "\n"
|
345
|
+
str_info << 'TOTALCPU=' << total_cpu.to_s << "\n"
|
346
|
+
str_info << 'USEDCPU=' << used_cpu.to_s << "\n"
|
347
|
+
str_info << 'FREECPU=' << free_cpu.to_s << "\n"
|
348
|
+
|
349
|
+
# Memory
|
350
|
+
str_info << 'TOTALMEMORY=' << total_mem.to_s << "\n"
|
351
|
+
str_info << 'FREEMEMORY=' << free_mem.to_s << "\n"
|
352
|
+
str_info << 'USEDMEMORY=' << (total_mem - free_mem).to_s << "\n"
|
353
|
+
|
354
|
+
# DRS enabled
|
355
|
+
str_info << 'VCENTER_DRS=' << drs_enabled.to_s << "\n"
|
356
|
+
|
357
|
+
# HA enabled
|
358
|
+
str_info << 'VCENTER_HA=' << ha_enabled.to_s << "\n"
|
359
|
+
|
360
|
+
# NSX info
|
361
|
+
str_info << nsx_get
|
362
|
+
str_info << tz_get
|
363
|
+
|
364
|
+
str_info << monitor_resource_pools(mhz_core)
|
365
|
+
end
|
366
|
+
|
367
|
+
def monitor_resource_pools(mhz_core)
|
368
|
+
@rp_list = get_resource_pool_list
|
369
|
+
|
370
|
+
view =
|
371
|
+
@vi_client
|
372
|
+
.vim
|
373
|
+
.serviceContent
|
374
|
+
.viewManager
|
375
|
+
.CreateContainerView(
|
376
|
+
{
|
377
|
+
:container => @item, # View for RPs inside this cluster
|
378
|
+
:type => ['ResourcePool'],
|
379
|
+
:recursive => true
|
380
|
+
}
|
381
|
+
)
|
382
|
+
|
383
|
+
pc = @vi_client.vim.serviceContent.propertyCollector
|
384
|
+
|
385
|
+
monitored_properties = [
|
386
|
+
'config.cpuAllocation.expandableReservation',
|
387
|
+
'config.cpuAllocation.limit',
|
388
|
+
'config.cpuAllocation.reservation',
|
389
|
+
'config.cpuAllocation.shares.level',
|
390
|
+
'config.cpuAllocation.shares.shares',
|
391
|
+
'config.memoryAllocation.expandableReservation',
|
392
|
+
'config.memoryAllocation.limit',
|
393
|
+
'config.memoryAllocation.reservation',
|
394
|
+
'config.memoryAllocation.shares.level',
|
395
|
+
'config.memoryAllocation.shares.shares'
|
396
|
+
]
|
397
|
+
|
398
|
+
filter_spec = RbVmomi::VIM.PropertyFilterSpec(
|
399
|
+
:objectSet => [
|
400
|
+
{ :obj => view,
|
401
|
+
:skip => true,
|
402
|
+
:selectSet => [
|
403
|
+
RbVmomi::VIM.TraversalSpec(
|
404
|
+
:name => 'traverseEntities',
|
405
|
+
:type => 'ContainerView',
|
406
|
+
:path => 'view',
|
407
|
+
:skip => false
|
408
|
+
)
|
409
|
+
] }
|
410
|
+
],
|
411
|
+
:propSet => [
|
412
|
+
{
|
413
|
+
:type => 'ResourcePool',
|
414
|
+
:pathSet => monitored_properties
|
415
|
+
}
|
416
|
+
]
|
417
|
+
)
|
418
|
+
|
419
|
+
result = pc.RetrieveProperties(:specSet => [filter_spec])
|
420
|
+
|
421
|
+
rps = {}
|
422
|
+
result.each do |r|
|
423
|
+
hashed_properties = r.to_hash
|
424
|
+
if r.obj.is_a?(RbVmomi::VIM::ResourcePool)
|
425
|
+
rps[r.obj._ref] = hashed_properties
|
426
|
+
end
|
427
|
+
end
|
428
|
+
|
429
|
+
return '' if rps.empty?
|
430
|
+
|
431
|
+
rp_info = ''
|
432
|
+
|
433
|
+
rps.each do |ref, info|
|
434
|
+
# CPU
|
435
|
+
if info['config.cpuAllocation.expandableReservation']
|
436
|
+
cpu_expandable = 'YES'
|
437
|
+
else
|
438
|
+
cpu_expandable = 'NO'
|
439
|
+
end
|
440
|
+
if info['config.cpuAllocation.limit'] == '-1'
|
441
|
+
cpu_limit = 'UNLIMITED'
|
442
|
+
else
|
443
|
+
cpu_limit = info['config.cpuAllocation.limit']
|
444
|
+
end
|
445
|
+
cpu_reservation = info['config.cpuAllocation.reservation']
|
446
|
+
cpu_num = cpu_reservation.to_f / mhz_core
|
447
|
+
cpu_shares_level = info['config.cpuAllocation.shares.level']
|
448
|
+
cpu_shares = info['config.cpuAllocation.shares.shares']
|
449
|
+
|
450
|
+
# MEMORY
|
451
|
+
if info['config.memoryAllocation.expandableReservation']
|
452
|
+
mem_expandable = 'YES'
|
453
|
+
else
|
454
|
+
mem_expandable = 'NO'
|
455
|
+
end
|
456
|
+
if info['config.memoryAllocation.limit'] == '-1'
|
457
|
+
mem_limit = 'UNLIMITED'
|
458
|
+
else
|
459
|
+
mem_limit = info['config.memoryAllocation.limit']
|
460
|
+
end
|
461
|
+
mem_reservation =
|
462
|
+
info['config.memoryAllocation.reservation'].to_f
|
463
|
+
mem_shares_level =
|
464
|
+
info['config.memoryAllocation.shares.level']
|
465
|
+
mem_shares =
|
466
|
+
info['config.memoryAllocation.shares.shares']
|
467
|
+
|
468
|
+
rp_name =
|
469
|
+
@rp_list
|
470
|
+
.select do |item|
|
471
|
+
item[:ref] == ref
|
472
|
+
end.first[:name] rescue ''
|
473
|
+
|
474
|
+
rp_name = 'Resources' if rp_name.empty?
|
475
|
+
|
476
|
+
rp_info << "\nVCENTER_RESOURCE_POOL_INFO = ["
|
477
|
+
rp_info << "NAME=\"#{rp_name}\","
|
478
|
+
rp_info << "CPU_EXPANDABLE=#{cpu_expandable},"
|
479
|
+
rp_info << "CPU_LIMIT=#{cpu_limit},"
|
480
|
+
rp_info << "CPU_RESERVATION=#{cpu_reservation},"
|
481
|
+
rp_info << "CPU_RESERVATION_NUM_CORES=#{cpu_num},"
|
482
|
+
rp_info << "CPU_SHARES=#{cpu_shares},"
|
483
|
+
rp_info << "CPU_SHARES_LEVEL=#{cpu_shares_level},"
|
484
|
+
rp_info << "MEM_EXPANDABLE=#{mem_expandable},"
|
485
|
+
rp_info << "MEM_LIMIT=#{mem_limit},"
|
486
|
+
rp_info << "MEM_RESERVATION=#{mem_reservation},"
|
487
|
+
rp_info << "MEM_SHARES=#{mem_shares},"
|
488
|
+
rp_info << "MEM_SHARES_LEVEL=#{mem_shares_level}"
|
489
|
+
rp_info << ']'
|
490
|
+
end
|
491
|
+
|
492
|
+
view.DestroyView
|
493
|
+
|
494
|
+
rp_info
|
495
|
+
end
|
496
|
+
|
497
|
+
def hostname_to_moref(hostname)
|
498
|
+
result = filter_hosts
|
499
|
+
|
500
|
+
moref = ''
|
501
|
+
result.each do |r|
|
502
|
+
if r.obj.name == hostname
|
503
|
+
moref = r.obj._ref
|
504
|
+
break
|
505
|
+
end
|
506
|
+
end
|
507
|
+
raise "Host #{hostname} was not found" if moref.empty?
|
508
|
+
|
509
|
+
moref
|
510
|
+
end
|
511
|
+
|
512
|
+
def filter_hosts
|
513
|
+
view =
|
514
|
+
@vi_client
|
515
|
+
.vim
|
516
|
+
.serviceContent
|
517
|
+
.viewManager
|
518
|
+
.CreateContainerView(
|
519
|
+
{
|
520
|
+
# View for Hosts inside this cluster
|
521
|
+
:container => @item,
|
522
|
+
:type => ['HostSystem'],
|
523
|
+
:recursive => true
|
524
|
+
}
|
525
|
+
)
|
526
|
+
|
527
|
+
pc = @vi_client.vim.serviceContent.propertyCollector
|
528
|
+
|
529
|
+
monitored_properties = [
|
530
|
+
'name',
|
531
|
+
'runtime.connectionState',
|
532
|
+
'summary.hardware.numCpuCores',
|
533
|
+
'summary.hardware.memorySize',
|
534
|
+
'summary.hardware.cpuModel',
|
535
|
+
'summary.hardware.cpuMhz',
|
536
|
+
'summary.quickStats.overallCpuUsage',
|
537
|
+
'summary.quickStats.overallMemoryUsage'
|
538
|
+
]
|
539
|
+
|
540
|
+
filter_spec = RbVmomi::VIM.PropertyFilterSpec(
|
541
|
+
:objectSet => [
|
542
|
+
{ :obj => view,
|
543
|
+
:skip => true,
|
544
|
+
:selectSet => [
|
545
|
+
RbVmomi::VIM.TraversalSpec(
|
546
|
+
:name => 'traverseEntities',
|
547
|
+
:type => 'ContainerView',
|
548
|
+
:path => 'view',
|
549
|
+
:skip => false
|
550
|
+
)
|
551
|
+
] }
|
552
|
+
],
|
553
|
+
:propSet => [
|
554
|
+
{ :type => 'HostSystem', :pathSet => monitored_properties }
|
555
|
+
]
|
556
|
+
)
|
557
|
+
|
558
|
+
result = pc.RetrieveProperties(:specSet => [filter_spec])
|
559
|
+
view.DestroyView # Destroy the view
|
560
|
+
result
|
561
|
+
end
|
562
|
+
|
563
|
+
def monitor_host_systems
|
564
|
+
host_info = ''
|
565
|
+
result = filter_hosts
|
566
|
+
hosts = {}
|
567
|
+
result.each do |r|
|
568
|
+
hashed_properties = r.to_hash
|
569
|
+
if r.obj.is_a?(RbVmomi::VIM::HostSystem)
|
570
|
+
hosts[r.obj._ref] = hashed_properties
|
571
|
+
end
|
572
|
+
end
|
573
|
+
|
574
|
+
hosts.each do |_ref, info|
|
575
|
+
next if info['runtime.connectionState'] != 'connected'
|
576
|
+
|
577
|
+
total_cpu = info['summary.hardware.numCpuCores'] * 100
|
578
|
+
used_cpu =
|
579
|
+
(
|
580
|
+
info['summary.quickStats.overallCpuUsage']
|
581
|
+
.to_f / info['summary.hardware.cpuMhz']
|
582
|
+
.to_f
|
583
|
+
) * 100
|
584
|
+
# Trim precission
|
585
|
+
used_cpu = format('%.2f', used_cpu).to_f # rubocop:disable Style/FormatStringToken
|
586
|
+
free_cpu = total_cpu - used_cpu
|
587
|
+
|
588
|
+
total_memory =
|
589
|
+
info['summary.hardware.memorySize']/1024
|
590
|
+
used_memory =
|
591
|
+
info['summary.quickStats.overallMemoryUsage']*1024
|
592
|
+
free_memory = total_memory - used_memory
|
593
|
+
|
594
|
+
host_info << "\nHOST=["
|
595
|
+
host_info << 'STATE=on,'
|
596
|
+
host_info << 'HOSTNAME="' <<
|
597
|
+
info['name'].to_s << '",'
|
598
|
+
host_info <<
|
599
|
+
'MODELNAME="' <<
|
600
|
+
info['summary.hardware.cpuModel'].to_s << '",'
|
601
|
+
host_info << 'CPUSPEED=' <<
|
602
|
+
info['summary.hardware.cpuMhz'].to_s << ','
|
603
|
+
host_info << 'MAX_CPU=' << total_cpu.to_s << ','
|
604
|
+
host_info << 'USED_CPU=' << used_cpu.to_s << ','
|
605
|
+
host_info << 'FREE_CPU=' << free_cpu.to_s << ','
|
606
|
+
host_info << 'MAX_MEM=' << total_memory.to_s << ','
|
607
|
+
host_info << 'USED_MEM=' << used_memory.to_s << ','
|
608
|
+
host_info << 'FREE_MEM=' << free_memory.to_s
|
609
|
+
host_info << ']'
|
610
|
+
end
|
611
|
+
|
612
|
+
host_info
|
613
|
+
end
|
614
|
+
|
615
|
+
def monitor_vms(host_id, vm_type)
|
616
|
+
vc_uuid = @vi_client.vim.serviceContent.about.instanceUuid
|
617
|
+
cluster_name = self['name']
|
618
|
+
cluster_ref = self['_ref']
|
619
|
+
|
620
|
+
# Get info of the host where the VM/template is located
|
621
|
+
one_host =
|
622
|
+
VCenterDriver::VIHelper
|
623
|
+
.one_item(
|
624
|
+
OpenNebula::Host,
|
625
|
+
host_id
|
626
|
+
)
|
627
|
+
if !one_host
|
628
|
+
STDERR.puts "Failed to retieve host with id #{host.id}"
|
629
|
+
if VCenterDriver::CONFIG[:debug_information]
|
630
|
+
STDERR.puts "#{message} #{e.backtrace}"
|
631
|
+
end
|
632
|
+
end
|
633
|
+
|
634
|
+
esx_hosts = {}
|
635
|
+
@item.host.each do |esx_host|
|
636
|
+
esx_hosts[esx_host._ref] = {
|
637
|
+
:name => esx_host.name,
|
638
|
+
:cpu => esx_host.summary.hardware.cpuMhz.to_f
|
639
|
+
}
|
640
|
+
end
|
641
|
+
|
642
|
+
monitored_vms = Set.new
|
643
|
+
str_info = ''
|
644
|
+
|
645
|
+
view =
|
646
|
+
@vi_client
|
647
|
+
.vim
|
648
|
+
.serviceContent
|
649
|
+
.viewManager
|
650
|
+
.CreateContainerView(
|
651
|
+
{
|
652
|
+
:container => @item, # View for VMs inside this cluster
|
653
|
+
:type => ['VirtualMachine'],
|
654
|
+
:recursive => true
|
655
|
+
}
|
656
|
+
)
|
657
|
+
|
658
|
+
pc = @vi_client.vim.serviceContent.propertyCollector
|
659
|
+
|
660
|
+
monitored_properties = [
|
661
|
+
'name', # VM name
|
662
|
+
'config.template', # To filter out templates
|
663
|
+
'summary.runtime.powerState', # VM power state
|
664
|
+
'summary.quickStats.hostMemoryUsage', # Memory usage
|
665
|
+
'summary.quickStats.overallCpuUsage', # CPU used by VM
|
666
|
+
'runtime.host', # ESX host
|
667
|
+
'resourcePool', # RP
|
668
|
+
'guest.guestFullName',
|
669
|
+
# IP addresses as seen by guest tools,
|
670
|
+
'guest.net',
|
671
|
+
'guest.guestState',
|
672
|
+
'guest.toolsVersion',
|
673
|
+
'guest.toolsRunningStatus',
|
674
|
+
# IP addresses as seen by guest tools,
|
675
|
+
'guest.toolsVersionStatus2',
|
676
|
+
# VM extraconfig info e.g opennebula.vm.running
|
677
|
+
'config.extraConfig',
|
678
|
+
'config.hardware.numCPU',
|
679
|
+
'config.hardware.memoryMB',
|
680
|
+
'config.annotation',
|
681
|
+
'datastore'
|
682
|
+
]
|
683
|
+
|
684
|
+
filter_spec = RbVmomi::VIM.PropertyFilterSpec(
|
685
|
+
:objectSet => [
|
686
|
+
{ :obj => view,
|
687
|
+
:skip => true,
|
688
|
+
:selectSet => [
|
689
|
+
RbVmomi::VIM.TraversalSpec(
|
690
|
+
:name => 'traverseEntities',
|
691
|
+
:type => 'ContainerView',
|
692
|
+
:path => 'view',
|
693
|
+
:skip => false
|
694
|
+
)
|
695
|
+
] }
|
696
|
+
],
|
697
|
+
:propSet => [
|
698
|
+
{
|
699
|
+
:type => 'VirtualMachine',
|
700
|
+
:pathSet => monitored_properties
|
701
|
+
}
|
702
|
+
]
|
703
|
+
)
|
704
|
+
|
705
|
+
result = pc.RetrieveProperties(:specSet => [filter_spec])
|
706
|
+
|
707
|
+
vms = {}
|
708
|
+
vm_objects = []
|
709
|
+
result.each do |r|
|
710
|
+
hashed_properties = r.to_hash
|
711
|
+
next unless r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
|
712
|
+
|
713
|
+
# Only take care of VMs, not templates
|
714
|
+
if !hashed_properties['config.template']
|
715
|
+
vms[r.obj._ref] = hashed_properties
|
716
|
+
vm_objects << r.obj
|
717
|
+
end
|
718
|
+
end
|
719
|
+
|
720
|
+
pm = @vi_client.vim.serviceContent.perfManager
|
721
|
+
|
722
|
+
stats = {}
|
723
|
+
|
724
|
+
max_samples = 9
|
725
|
+
refresh_rate = 20 # Real time stats takes samples every 20 seconds
|
726
|
+
|
727
|
+
last_mon_time = one_host['TEMPLATE/VCENTER_LAST_PERF_POLL']
|
728
|
+
|
729
|
+
if last_mon_time
|
730
|
+
interval = (Time.now.to_i - last_mon_time.to_i)
|
731
|
+
interval = 3601 if interval < 0
|
732
|
+
samples = (interval / refresh_rate)
|
733
|
+
samples = 1 if samples == 0
|
734
|
+
interval > 3600 ? max_samples = 9 : max_samples = samples
|
735
|
+
end
|
736
|
+
|
737
|
+
if !vm_objects.empty?
|
738
|
+
stats = pm.retrieve_stats(
|
739
|
+
vm_objects,
|
740
|
+
[
|
741
|
+
'net.transmitted',
|
742
|
+
'net.bytesRx',
|
743
|
+
'net.bytesTx',
|
744
|
+
'net.received',
|
745
|
+
'virtualDisk.numberReadAveraged',
|
746
|
+
'virtualDisk.numberWriteAveraged',
|
747
|
+
'virtualDisk.read',
|
748
|
+
'virtualDisk.write'
|
749
|
+
],
|
750
|
+
{
|
751
|
+
:max_samples => max_samples
|
752
|
+
}
|
753
|
+
) rescue {}
|
754
|
+
end
|
755
|
+
|
756
|
+
if !stats.empty?
|
757
|
+
last_mon_time = Time.now.to_i.to_s
|
758
|
+
end
|
759
|
+
|
760
|
+
@rp_list ||= get_resource_pool_list
|
761
|
+
|
762
|
+
vm_pool =
|
763
|
+
VCenterDriver::VIHelper
|
764
|
+
.one_pool(
|
765
|
+
OpenNebula::VirtualMachinePool
|
766
|
+
)
|
767
|
+
# We filter to retrieve only those VMs
|
768
|
+
# running in the host that we are monitoring
|
769
|
+
host_vms =
|
770
|
+
vm_pool
|
771
|
+
.retrieve_xmlelements(
|
772
|
+
"/VM_POOL/VM[HISTORY_RECORDS/HISTORY/HID='#{host_id}']"
|
773
|
+
)
|
774
|
+
|
775
|
+
vms.each do |vm_ref, info|
|
776
|
+
vm_info = ''
|
777
|
+
begin
|
778
|
+
esx_host = esx_hosts[info['runtime.host']._ref]
|
779
|
+
info[:esx_host_name] = esx_host[:name]
|
780
|
+
info[:esx_host_cpu] = esx_host[:cpu]
|
781
|
+
info[:cluster_name] = cluster_name
|
782
|
+
info[:cluster_ref] = cluster_ref
|
783
|
+
info[:vc_uuid] = vc_uuid
|
784
|
+
info[:host_id] = host_id
|
785
|
+
info[:rp_list] = @rp_list
|
786
|
+
|
787
|
+
# Check the running flag
|
788
|
+
running_flag = info['config.extraConfig'].select do |val|
|
789
|
+
val[:key] == 'opennebula.vm.running'
|
790
|
+
end
|
791
|
+
|
792
|
+
if !running_flag.empty? && running_flag.first
|
793
|
+
running_flag = running_flag[0][:value]
|
794
|
+
end
|
795
|
+
|
796
|
+
next if running_flag == 'no'
|
797
|
+
|
798
|
+
id = -1
|
799
|
+
# Find the VM by its deploy_id,
|
800
|
+
# which in the vCenter driver is
|
801
|
+
# the vCenter managed object reference
|
802
|
+
found_vm =
|
803
|
+
host_vms
|
804
|
+
.select do |vm|
|
805
|
+
vm['DEPLOY_ID'].eql? vm_ref
|
806
|
+
end.first
|
807
|
+
id = found_vm['ID'] if found_vm
|
808
|
+
|
809
|
+
# skip if it is a wild and
|
810
|
+
# we are looking for OpenNebula VMs
|
811
|
+
next if (vm_type == 'ones') && (id == -1)
|
812
|
+
# skip if it is not a wild and we are looking for wilds
|
813
|
+
next if (vm_type == 'wilds') && (id != -1)
|
814
|
+
# skip if already monitored
|
815
|
+
next if monitored_vms.include? vm_ref
|
816
|
+
|
817
|
+
monitored_vms << vm_ref
|
818
|
+
|
819
|
+
vm =
|
820
|
+
VCenterDriver::VirtualMachine
|
821
|
+
.new(
|
822
|
+
@vi_client,
|
823
|
+
vm_ref,
|
824
|
+
id
|
825
|
+
)
|
826
|
+
vm.vm_info = info
|
827
|
+
vm.monitor(stats)
|
828
|
+
|
829
|
+
vm_name = "#{info['name']} - #{cluster_name}"
|
830
|
+
vm_info << "VM = [ ID=\"#{id}\", "
|
831
|
+
vm_info << "VM_NAME=\"#{vm_name}\", "
|
832
|
+
vm_info << "DEPLOY_ID=\"#{vm_ref}\", "
|
833
|
+
|
834
|
+
# if the machine does not exist in
|
835
|
+
# opennebula it means that is a wild:
|
836
|
+
if vm.one_exist?
|
837
|
+
mon_s64 = Base64.strict_encode64(vm.info)
|
838
|
+
vm_info << "MONITOR=\"#{mon_s64}\"]\n"
|
839
|
+
else
|
840
|
+
vm_template64 =
|
841
|
+
Base64
|
842
|
+
.encode64(
|
843
|
+
vm.vm_to_one(vm_name)
|
844
|
+
).gsub("\n", '')
|
845
|
+
vm_info << 'VCENTER_TEMPLATE="YES",'
|
846
|
+
vm_info << "IMPORT_TEMPLATE=\"#{vm_template64}\"]\n"
|
847
|
+
end
|
848
|
+
rescue StandardError => e
|
849
|
+
vm_info = error_monitoring(e, id, vm_ref, vc_uuid, info)
|
850
|
+
end
|
851
|
+
|
852
|
+
str_info << vm_info
|
853
|
+
end
|
854
|
+
|
855
|
+
view.DestroyView # Destroy the view
|
856
|
+
|
857
|
+
[str_info, last_mon_time]
|
858
|
+
end
|
859
|
+
|
860
|
+
def error_monitoring(e, id, vm_ref, _vc_uuid, info = {})
|
861
|
+
error_info = ''
|
862
|
+
vm_name = info['name'] || nil
|
863
|
+
tmp_str = e.inspect
|
864
|
+
tmp_str << e.backtrace.join("\n")
|
865
|
+
|
866
|
+
error_info << "VM = [ ID=\"#{id}\", "
|
867
|
+
error_info << "VM_NAME=\"#{vm_name}\", "
|
868
|
+
error_info << "DEPLOY_ID=\"#{vm_ref}\", "
|
869
|
+
error_info <<
|
870
|
+
"ERROR=\"#{Base64.encode64(tmp_str).gsub("\n", '')}\"]\n"
|
871
|
+
end
|
872
|
+
|
873
|
+
def monitor_customizations
|
874
|
+
customizations =
|
875
|
+
self['_connection']
|
876
|
+
.serviceContent
|
877
|
+
.customizationSpecManager
|
878
|
+
.info
|
879
|
+
|
880
|
+
text = ''
|
881
|
+
|
882
|
+
customizations.each do |c|
|
883
|
+
t = 'CUSTOMIZATION = [ '
|
884
|
+
t << %(NAME = "#{c.name}", )
|
885
|
+
t << %(TYPE = "#{c.type}" ]\n)
|
886
|
+
|
887
|
+
text << t
|
888
|
+
end
|
889
|
+
|
890
|
+
text
|
891
|
+
end
|
892
|
+
|
893
|
+
def datacenter # rubocop:disable Naming/AccessorMethodName
|
894
|
+
item = @item
|
895
|
+
|
896
|
+
until item.instance_of? RbVmomi::VIM::Datacenter
|
897
|
+
item = item.parent
|
898
|
+
if item.nil?
|
899
|
+
raise 'Could not find the parent Datacenter'
|
900
|
+
end
|
901
|
+
end
|
902
|
+
|
903
|
+
Datacenter.new(item)
|
904
|
+
end
|
905
|
+
|
906
|
+
def self.to_one(cluster, con_ops, rp, one_cluster_id)
|
907
|
+
one_host = VCenterDriver::VIHelper.new_one_item(OpenNebula::Host)
|
908
|
+
|
909
|
+
if OpenNebula.is_error?(one_host)
|
910
|
+
raise "Could not create host: #{one_host.message}"
|
911
|
+
end
|
912
|
+
|
913
|
+
one_cluster_id ||= -1
|
914
|
+
|
915
|
+
rc = one_host
|
916
|
+
.allocate(
|
917
|
+
cluster[:cluster_name],
|
918
|
+
'vcenter',
|
919
|
+
'vcenter',
|
920
|
+
one_cluster_id.to_i
|
921
|
+
)
|
922
|
+
|
923
|
+
if OpenNebula.is_error?(rc)
|
924
|
+
raise "Could not allocate host: #{rc.message}"
|
925
|
+
end
|
926
|
+
|
927
|
+
template = "VCENTER_HOST=\"#{con_ops[:host]}\"\n"\
|
928
|
+
"VCENTER_PASSWORD=\"#{con_ops[:password]}\"\n"\
|
929
|
+
"VCENTER_USER=\"#{con_ops[:user]}\"\n"\
|
930
|
+
"VCENTER_CCR_REF=\"#{cluster[:cluster_ref]}\"\n"\
|
931
|
+
"VCENTER_INSTANCE_ID=\"#{cluster[:vcenter_uuid]}\"\n"\
|
932
|
+
"VCENTER_VERSION=\"#{cluster[:vcenter_version]}\"\n"\
|
933
|
+
|
934
|
+
template << "VCENTER_RESOURCE_POOL=\"#{rp}\"" if rp
|
935
|
+
|
936
|
+
template << "VCENTER_PORT=\"#{con_ops[:port]}\"" if con_ops[:port]
|
937
|
+
|
938
|
+
rc = one_host.update(template, false)
|
939
|
+
|
940
|
+
if OpenNebula.is_error?(rc)
|
941
|
+
update_error = rc.message
|
942
|
+
rc = one_host.delete
|
943
|
+
|
944
|
+
unless OpenNebula.is_error?(rc)
|
945
|
+
raise "Could not update host: #{rc.message}"; end
|
946
|
+
|
947
|
+
raise "Could not update host: #{update_error} "\
|
948
|
+
"and could not delete host: #{rc.message}"
|
949
|
+
end
|
950
|
+
|
951
|
+
rc = one_host.offline
|
952
|
+
|
953
|
+
if OpenNebula.is_error?(rc)
|
954
|
+
update_error = rc.message
|
955
|
+
rc = one_host.delete
|
956
|
+
|
957
|
+
unless OpenNebula.is_error?(rc)
|
958
|
+
raise "Could not offline host: #{rc.message}"; end
|
959
|
+
|
960
|
+
raise "Could not offline host: #{update_error} "\
|
961
|
+
"and could not delete host: #{rc.message}"
|
962
|
+
end
|
963
|
+
|
964
|
+
rc = one_host.enable
|
965
|
+
|
966
|
+
if OpenNebula.is_error?(rc)
|
967
|
+
update_error = rc.message
|
968
|
+
rc = one_host.delete
|
969
|
+
|
970
|
+
unless OpenNebula.is_error?(rc)
|
971
|
+
raise "Could not enable host: #{rc.message}"; end
|
972
|
+
|
973
|
+
raise "Could not enable host: #{update_error} "\
|
974
|
+
"and could not delete host: #{rc.message}"
|
975
|
+
end
|
976
|
+
|
977
|
+
one_host
|
978
|
+
end
|
979
|
+
|
980
|
+
def self.new_from_ref(ref, vi_client)
|
981
|
+
new(
|
982
|
+
RbVmomi::VIM::ClusterComputeResource
|
983
|
+
.new(
|
984
|
+
vi_client.vim,
|
985
|
+
ref
|
986
|
+
),
|
987
|
+
vi_client
|
988
|
+
)
|
989
|
+
end
|
990
|
+
|
991
|
+
end
|
992
|
+
# class ClusterComputeResource
|
993
|
+
|
994
|
+
##########################################################################
|
995
|
+
# Class ESXHost
|
996
|
+
##########################################################################
|
997
|
+
class ESXHost
|
998
|
+
|
999
|
+
attr_accessor :item
|
1000
|
+
|
1001
|
+
include Memoize
|
1002
|
+
|
1003
|
+
PG_CREATE_TIMEOUT = 240 # We will wait for 4 minutes for the pg creation
|
1004
|
+
|
1005
|
+
def initialize(item, vi_client = nil)
|
1006
|
+
@net_rollback = []
|
1007
|
+
@locking = true
|
1008
|
+
@item = item
|
1009
|
+
@vi_client = vi_client
|
1010
|
+
end
|
1011
|
+
|
1012
|
+
def self.new_from_ref(ref, vi_client)
|
1013
|
+
new(RbVmomi::VIM::HostSystem.new(vi_client.vim, ref), vi_client)
|
1014
|
+
end
|
1015
|
+
|
1016
|
+
# Locking function. Similar to flock
|
1017
|
+
def lock
|
1018
|
+
hostlockname = @item['name'].downcase.tr(' ', '_')
|
1019
|
+
|
1020
|
+
return unless @locking
|
1021
|
+
|
1022
|
+
@locking_file =
|
1023
|
+
File
|
1024
|
+
.open("/tmp/vcenter-#{hostlockname}-lock", 'w')
|
1025
|
+
@locking_file.flock(File::LOCK_EX)
|
1026
|
+
end
|
1027
|
+
|
1028
|
+
# Unlock driver execution mutex
|
1029
|
+
def unlock
|
1030
|
+
return unless @locking
|
1031
|
+
|
1032
|
+
@locking_file.close
|
1033
|
+
end
|
1034
|
+
|
1035
|
+
########################################################################
|
1036
|
+
# Check if standard switch exists in host
|
1037
|
+
########################################################################
|
1038
|
+
|
1039
|
+
def vss_exists(vswitch_name)
|
1040
|
+
vswitches = @item.configManager.networkSystem.networkInfo.vswitch
|
1041
|
+
vswitches.select {|vs| vs.name == vswitch_name }.first rescue nil
|
1042
|
+
end
|
1043
|
+
|
1044
|
+
########################################################################
|
1045
|
+
# Create a standard vcenter switch in an ESX host
|
1046
|
+
########################################################################
|
1047
|
+
|
1048
|
+
def create_vss(
|
1049
|
+
name,
|
1050
|
+
num_ports,
|
1051
|
+
pnics = nil,
|
1052
|
+
mtu = 1500,
|
1053
|
+
pnics_available = nil
|
1054
|
+
)
|
1055
|
+
# Get NetworkSystem
|
1056
|
+
nws = self['configManager.networkSystem']
|
1057
|
+
hostbridge = nil
|
1058
|
+
nics = []
|
1059
|
+
|
1060
|
+
num_ports = 128 if num_ports.nil?
|
1061
|
+
|
1062
|
+
if pnics
|
1063
|
+
pnics = pnics.split(',')
|
1064
|
+
pnics.each do |pnic|
|
1065
|
+
# Add nics if not in use
|
1066
|
+
nics << pnic if pnics_available.include?(pnic)
|
1067
|
+
end
|
1068
|
+
|
1069
|
+
if !nics.empty?
|
1070
|
+
hostbridge =
|
1071
|
+
RbVmomi::VIM::HostVirtualSwitchBondBridge(
|
1072
|
+
:nicDevice => nics
|
1073
|
+
)
|
1074
|
+
end
|
1075
|
+
end
|
1076
|
+
|
1077
|
+
# Create spec
|
1078
|
+
vswitchspec =
|
1079
|
+
RbVmomi::VIM::HostVirtualSwitchSpec(
|
1080
|
+
:bridge => hostbridge,
|
1081
|
+
:mtu => mtu,
|
1082
|
+
:numPorts => num_ports
|
1083
|
+
)
|
1084
|
+
|
1085
|
+
# add vSwitch to the host
|
1086
|
+
begin
|
1087
|
+
nws.AddVirtualSwitch(:vswitchName => name, :spec => vswitchspec)
|
1088
|
+
rescue StandardError => e
|
1089
|
+
raise "The standard vSwitch #{name} could not be \
|
1090
|
+
created. AddVirtualSwitch failed Reason: #{e.message}."
|
1091
|
+
end
|
1092
|
+
|
1093
|
+
@net_rollback << { :action => :delete_sw, :name => name }
|
1094
|
+
|
1095
|
+
name
|
1096
|
+
end
|
1097
|
+
|
1098
|
+
########################################################################
|
1099
|
+
# Update a standard vcenter switch in an ESX host
|
1100
|
+
########################################################################
|
1101
|
+
def update_vss(switch, name, pnics, num_ports, mtu)
|
1102
|
+
pnics = pnics.split(',') rescue []
|
1103
|
+
|
1104
|
+
# Backup switch spec for rollback
|
1105
|
+
orig_spec = switch.spec
|
1106
|
+
|
1107
|
+
# Compare current configuration and return if switch hasn't changed
|
1108
|
+
switch_has_pnics = switch
|
1109
|
+
.spec
|
1110
|
+
.respond_to?(
|
1111
|
+
:bridge
|
1112
|
+
) && switch
|
1113
|
+
.spec
|
1114
|
+
.bridge
|
1115
|
+
.respond_to?(
|
1116
|
+
:nicDevice
|
1117
|
+
)
|
1118
|
+
|
1119
|
+
same_switch = switch.spec.respond_to?(:mtu) && switch
|
1120
|
+
.spec
|
1121
|
+
.mtu == mtu &&
|
1122
|
+
switch
|
1123
|
+
.spec
|
1124
|
+
.respond_to?(
|
1125
|
+
:numPorts
|
1126
|
+
) && switch.spec.numPorts == num_ports &&
|
1127
|
+
(!switch_has_pnics && pnics.empty? ||
|
1128
|
+
switch_has_pnics && switch
|
1129
|
+
.spec
|
1130
|
+
.bridge
|
1131
|
+
.nicDevice
|
1132
|
+
.uniq
|
1133
|
+
.sort == pnics.uniq.sort)
|
1134
|
+
return if same_switch
|
1135
|
+
|
1136
|
+
# Let's create a new spec and update the switch
|
1137
|
+
hostbridge = nil
|
1138
|
+
nws = self['configManager.networkSystem']
|
1139
|
+
unless pnics.empty?
|
1140
|
+
hostbridge =
|
1141
|
+
RbVmomi::VIM::HostVirtualSwitchBondBridge(
|
1142
|
+
:nicDevice => pnics
|
1143
|
+
)
|
1144
|
+
end
|
1145
|
+
vswitchspec =
|
1146
|
+
RbVmomi::VIM::HostVirtualSwitchSpec(
|
1147
|
+
:bridge => hostbridge,
|
1148
|
+
:mtu => mtu,
|
1149
|
+
:numPorts => num_ports
|
1150
|
+
)
|
1151
|
+
begin
|
1152
|
+
nws
|
1153
|
+
.UpdateVirtualSwitch(
|
1154
|
+
:vswitchName => name,
|
1155
|
+
:spec => vswitchspec
|
1156
|
+
)
|
1157
|
+
rescue StandardError => e
|
1158
|
+
raise "The standard switch with name #{name} \
|
1159
|
+
could not be updated. Reason: #{e.message}"
|
1160
|
+
end
|
1161
|
+
|
1162
|
+
@net_rollback << {
|
1163
|
+
:action => :update_sw,
|
1164
|
+
:name => name,
|
1165
|
+
:spec => orig_spec
|
1166
|
+
}
|
1167
|
+
end
|
1168
|
+
|
1169
|
+
########################################################################
|
1170
|
+
# Remove a standard vswitch from the host
|
1171
|
+
########################################################################
|
1172
|
+
def remove_vss(vswitch_name)
|
1173
|
+
nws = self['configManager.networkSystem']
|
1174
|
+
|
1175
|
+
begin
|
1176
|
+
nws.RemoveVirtualSwitch(:vswitchName => vswitch_name)
|
1177
|
+
rescue RbVmomi::VIM::ResourceInUse
|
1178
|
+
STDERR.puts "The standard switch #{vswitch_name} \
|
1179
|
+
is in use so it cannot be deleted"
|
1180
|
+
return
|
1181
|
+
rescue RbVmomi::VIM::NotFound
|
1182
|
+
STDERR.puts "The standard switch #{vswitch_name} \
|
1183
|
+
was not found in vCenter"
|
1184
|
+
return
|
1185
|
+
rescue StandardError => e
|
1186
|
+
raise "There was a failure while deleting a vcenter \
|
1187
|
+
standard switch #{vswitch_name}. Reason: #{e.message}"
|
1188
|
+
end
|
1189
|
+
|
1190
|
+
vswitch_name
|
1191
|
+
end
|
1192
|
+
|
1193
|
+
########################################################################
|
1194
|
+
# Get physical nics that are available in a host
|
1195
|
+
########################################################################
|
1196
|
+
def available_pnics
|
1197
|
+
pnics_in_use = []
|
1198
|
+
pnics_available = []
|
1199
|
+
|
1200
|
+
# Get pnics in use in standard switches
|
1201
|
+
@item.config.network.vswitch.each do |vs|
|
1202
|
+
vs.pnic.each do |pnic|
|
1203
|
+
next unless pnic.instance_of?(String)
|
1204
|
+
|
1205
|
+
pnic.slice!('key-vim.host.PhysicalNic-')
|
1206
|
+
pnics_in_use << pnic
|
1207
|
+
end
|
1208
|
+
end
|
1209
|
+
|
1210
|
+
# Get pnics in host
|
1211
|
+
self['config.network'].pnic.each do |pnic|
|
1212
|
+
next if pnics_in_use
|
1213
|
+
.include?(pnic.device)
|
1214
|
+
|
1215
|
+
pnics_available << pnic
|
1216
|
+
.device
|
1217
|
+
end
|
1218
|
+
|
1219
|
+
pnics_available
|
1220
|
+
end
|
1221
|
+
|
1222
|
+
########################################################################
|
1223
|
+
# Get networks inside a host
|
1224
|
+
########################################################################
|
1225
|
+
def pg_inside_host
|
1226
|
+
pg_inside = {}
|
1227
|
+
|
1228
|
+
# Get pnics in use in standard switches
|
1229
|
+
@item.config.network.vswitch.each do |vs|
|
1230
|
+
pg_inside[vs.name] = []
|
1231
|
+
vs.portgroup.each do |pg|
|
1232
|
+
pg.slice!('key-vim.host.PortGroup-')
|
1233
|
+
pg_inside[vs.name] << pg
|
1234
|
+
end
|
1235
|
+
end
|
1236
|
+
|
1237
|
+
pg_inside
|
1238
|
+
end
|
1239
|
+
|
1240
|
+
########################################################################
|
1241
|
+
# Check if proxy switch exists in host for distributed virtual switch
|
1242
|
+
########################################################################
|
1243
|
+
|
1244
|
+
def proxy_switch_exists(switch_name)
|
1245
|
+
nws = self['configManager.networkSystem']
|
1246
|
+
proxy_switches = nws.networkInfo.proxySwitch
|
1247
|
+
proxy_switches
|
1248
|
+
.select {|ps| ps.dvsName == switch_name }
|
1249
|
+
.first rescue nil
|
1250
|
+
end
|
1251
|
+
|
1252
|
+
########################################################################
|
1253
|
+
# Assign a host to a a distributed vcenter switch (proxy switch)
|
1254
|
+
########################################################################
|
1255
|
+
|
1256
|
+
def assign_proxy_switch(dvs, switch_name, pnics, _pnics_available)
|
1257
|
+
dvs = dvs.item
|
1258
|
+
|
1259
|
+
# Return if host is already assigned
|
1260
|
+
return dvs unless dvs['config.host']
|
1261
|
+
.select do |host|
|
1262
|
+
host.config.host._ref == self['_ref']
|
1263
|
+
end.empty?
|
1264
|
+
|
1265
|
+
# Prepare spec for DVS reconfiguration
|
1266
|
+
config_spec = RbVmomi::VIM::VMwareDVSConfigSpec.new
|
1267
|
+
config_spec.name = switch_name
|
1268
|
+
config_spec.configVersion = dvs['config.configVersion']
|
1269
|
+
|
1270
|
+
# Check if host is already assigned to distributed switch
|
1271
|
+
operation = 'add'
|
1272
|
+
# #operation = "edit" if !dvs['config.host'].select
|
1273
|
+
# { |host| host.config.host._ref == self['_ref'] }.empty?
|
1274
|
+
|
1275
|
+
# Add host members to the distributed virtual switch
|
1276
|
+
host_member_spec =
|
1277
|
+
RbVmomi::VIM::DistributedVirtualSwitchHostMemberConfigSpec
|
1278
|
+
.new
|
1279
|
+
host_member_spec.host = @item
|
1280
|
+
host_member_spec.operation = operation
|
1281
|
+
host_member_spec.backing =
|
1282
|
+
RbVmomi::VIM::DistributedVirtualSwitchHostMemberPnicBacking
|
1283
|
+
.new
|
1284
|
+
host_member_spec.backing.pnicSpec = []
|
1285
|
+
|
1286
|
+
# If pnics are needed assign pnics for uplinks
|
1287
|
+
if pnics
|
1288
|
+
pnics = pnics.split(',')
|
1289
|
+
# Get uplink portgroup from dvswitch
|
1290
|
+
uplink_key = dvs['config.uplinkPortgroup'].select do |ul|
|
1291
|
+
ul.name == "#{switch_name}-uplink-pg"
|
1292
|
+
end.first.key rescue nil
|
1293
|
+
|
1294
|
+
unless uplink_key
|
1295
|
+
raise "Cannot find the uplink portgroup for #{switch_name}"
|
1296
|
+
end
|
1297
|
+
|
1298
|
+
pnics.each do |pnic|
|
1299
|
+
pnic_spec =
|
1300
|
+
RbVmomi::VIM::DistributedVirtualSwitchHostMemberPnicSpec
|
1301
|
+
.new
|
1302
|
+
pnic_spec.pnicDevice = pnic
|
1303
|
+
pnic_spec.uplinkPortgroupKey = uplink_key
|
1304
|
+
host_member_spec.backing.pnicSpec << pnic_spec
|
1305
|
+
end
|
1306
|
+
end
|
1307
|
+
|
1308
|
+
config_spec.host = [host_member_spec]
|
1309
|
+
|
1310
|
+
# The DVS must be reconfigured
|
1311
|
+
dvs_reconfigure_task = dvs.ReconfigureDvs_Task(:spec => config_spec)
|
1312
|
+
dvs_reconfigure_task.wait_for_completion
|
1313
|
+
if dvs_reconfigure_task.info.state != 'success'
|
1314
|
+
raise "It wasn't possible to assign host \
|
1315
|
+
#{self['name']} as a member of #{switch_name}'"
|
1316
|
+
end
|
1317
|
+
|
1318
|
+
dvs
|
1319
|
+
end
|
1320
|
+
|
1321
|
+
########################################################################
|
1322
|
+
# Create a standard port group
|
1323
|
+
########################################################################
|
1324
|
+
|
1325
|
+
def create_pg(pgname, vswitch, vlan = 0)
|
1326
|
+
spec = RbVmomi::VIM.HostPortGroupSpec(
|
1327
|
+
:name => pgname,
|
1328
|
+
:vlanId => vlan,
|
1329
|
+
:vswitchName => vswitch,
|
1330
|
+
:policy => RbVmomi::VIM.HostNetworkPolicy
|
1331
|
+
)
|
1332
|
+
|
1333
|
+
nws = self['configManager.networkSystem']
|
1334
|
+
|
1335
|
+
begin
|
1336
|
+
nws.AddPortGroup(:portgrp => spec)
|
1337
|
+
rescue StandardError => e
|
1338
|
+
raise "A port group with name #{pgname} \
|
1339
|
+
could not be created. Reason: #{e.message}"
|
1340
|
+
end
|
1341
|
+
|
1342
|
+
@net_rollback << { :action => :delete_pg, :name => pgname }
|
1343
|
+
|
1344
|
+
# wait until the network is ready and we have a reference
|
1345
|
+
networks = @item['network'].select {|net| net.name == pgname }
|
1346
|
+
(0..PG_CREATE_TIMEOUT).each do
|
1347
|
+
break unless networks.empty?
|
1348
|
+
|
1349
|
+
networks = @item['network'].select {|net| net.name == pgname }
|
1350
|
+
sleep 1
|
1351
|
+
end
|
1352
|
+
|
1353
|
+
if networks.empty?
|
1354
|
+
raise 'Cannot get VCENTER_NET_REF for new port group'
|
1355
|
+
end
|
1356
|
+
|
1357
|
+
networks.first._ref
|
1358
|
+
end
|
1359
|
+
|
1360
|
+
########################################################################
|
1361
|
+
# Check if standard port group exists in host
|
1362
|
+
########################################################################
|
1363
|
+
|
1364
|
+
def pg_exists(pg_name)
|
1365
|
+
nws = self['configManager.networkSystem']
|
1366
|
+
portgroups = nws.networkInfo.portgroup
|
1367
|
+
portgroups.select {|pg| pg.spec.name == pg_name }.first rescue nil
|
1368
|
+
end
|
1369
|
+
|
1370
|
+
########################################################################
|
1371
|
+
# Is the switch for the pg different?
|
1372
|
+
########################################################################
|
1373
|
+
|
1374
|
+
def pg_changes_sw?(pg, switch_name)
|
1375
|
+
pg
|
1376
|
+
.spec
|
1377
|
+
.respond_to?(
|
1378
|
+
:vswitchName
|
1379
|
+
) && pg
|
1380
|
+
.spec
|
1381
|
+
.vswitchName != switch_name
|
1382
|
+
end
|
1383
|
+
|
1384
|
+
########################################################################
|
1385
|
+
# Update a standard port group
|
1386
|
+
########################################################################
|
1387
|
+
|
1388
|
+
def update_pg(pg, switch_name, vlan_id)
|
1389
|
+
unless pg.spec.respond_to?(:vlanId) && pg.spec.vlanId != vlan_id
|
1390
|
+
return; end
|
1391
|
+
|
1392
|
+
# Backup original spec
|
1393
|
+
orig_spec = pg.spec
|
1394
|
+
|
1395
|
+
# Create new spec
|
1396
|
+
pg_name = pg.spec.name
|
1397
|
+
|
1398
|
+
spec = RbVmomi::VIM.HostPortGroupSpec(
|
1399
|
+
:name => pg_name,
|
1400
|
+
:vlanId => vlan_id,
|
1401
|
+
:vswitchName => switch_name,
|
1402
|
+
:policy => RbVmomi::VIM.HostNetworkPolicy
|
1403
|
+
)
|
1404
|
+
|
1405
|
+
nws = self['configManager.networkSystem']
|
1406
|
+
|
1407
|
+
begin
|
1408
|
+
nws.UpdatePortGroup(:pgName => pg_name, :portgrp => spec)
|
1409
|
+
rescue StandardError => e
|
1410
|
+
raise "A port group with name #{pg_name} \
|
1411
|
+
could not be updated. Reason: #{e.message}"
|
1412
|
+
end
|
1413
|
+
|
1414
|
+
# Set rollback operation
|
1415
|
+
@net_rollback << {
|
1416
|
+
:action => :update_pg,
|
1417
|
+
:name => pg_name,
|
1418
|
+
:spec => orig_spec
|
1419
|
+
}
|
1420
|
+
end
|
1421
|
+
|
1422
|
+
########################################################################
|
1423
|
+
# Remove a standard port group from the host
|
1424
|
+
########################################################################
|
1425
|
+
|
1426
|
+
def remove_pg(pgname)
|
1427
|
+
nws = self['configManager.networkSystem']
|
1428
|
+
|
1429
|
+
swname = nil
|
1430
|
+
begin
|
1431
|
+
portgroups = nws.networkConfig.portgroup
|
1432
|
+
portgroups.each do |pg|
|
1433
|
+
if pg.spec.name == pgname
|
1434
|
+
swname = pg.spec.vswitchName
|
1435
|
+
break
|
1436
|
+
end
|
1437
|
+
end
|
1438
|
+
nws.RemovePortGroup(:pgName => pgname)
|
1439
|
+
rescue RbVmomi::VIM::ResourceInUse
|
1440
|
+
STDERR.puts "The standard portgroup \
|
1441
|
+
#{pgname} is in use so it cannot be deleted"
|
1442
|
+
return
|
1443
|
+
rescue RbVmomi::VIM::NotFound
|
1444
|
+
STDERR.puts "The standard portgroup \
|
1445
|
+
#{pgname} was not found in vCenter"
|
1446
|
+
return
|
1447
|
+
rescue StandardError => e
|
1448
|
+
raise "There was a failure while \
|
1449
|
+
deleting a standard portgroup #{pgname} \
|
1450
|
+
in vCenter. Reason: #{e.message}"
|
1451
|
+
end
|
1452
|
+
|
1453
|
+
swname
|
1454
|
+
end
|
1455
|
+
|
1456
|
+
def network_rollback
|
1457
|
+
nws = self['configManager.networkSystem']
|
1458
|
+
|
1459
|
+
@net_rollback.reverse_each do |nr|
|
1460
|
+
case nr[:action]
|
1461
|
+
when :update_pg
|
1462
|
+
begin
|
1463
|
+
nws
|
1464
|
+
.UpdatePortGroup(
|
1465
|
+
:pgName => nr[:name],
|
1466
|
+
:portgrp => nr[:spec]
|
1467
|
+
)
|
1468
|
+
rescue StandardError => e
|
1469
|
+
raise "A rollback operation for standard \
|
1470
|
+
port group #{nr[:name]} could not \
|
1471
|
+
be performed. Reason: #{e.message}"
|
1472
|
+
end
|
1473
|
+
when :update_sw
|
1474
|
+
begin
|
1475
|
+
nws
|
1476
|
+
.UpdateVirtualSwitch(
|
1477
|
+
:vswitchName => nr[:name],
|
1478
|
+
:spec => nr[:spec]
|
1479
|
+
)
|
1480
|
+
rescue StandardError => e
|
1481
|
+
raise "A rollback operation for standard \
|
1482
|
+
switch #{nr[:name]} could not \
|
1483
|
+
be performed. Reason: #{e.message}"
|
1484
|
+
end
|
1485
|
+
when :delete_sw
|
1486
|
+
begin
|
1487
|
+
nws.RemoveVirtualSwitch(:vswitchName=> nr[:name])
|
1488
|
+
rescue RbVmomi::VIM::ResourceInUse
|
1489
|
+
next # Ignore if switch in use
|
1490
|
+
rescue RbVmomi::VIM::NotFound
|
1491
|
+
next # Ignore if switch not found
|
1492
|
+
rescue StandardError => e
|
1493
|
+
raise "A rollback operation for standard \
|
1494
|
+
switch #{nr[:name]} could not \
|
1495
|
+
be performed. Reason: #{e.message}"
|
1496
|
+
end
|
1497
|
+
when :delete_pg
|
1498
|
+
begin
|
1499
|
+
nws.RemovePortGroup(:pgName => nr[:name])
|
1500
|
+
rescue RbVmomi::VIM::ResourceInUse
|
1501
|
+
next # Ignore if pg in use
|
1502
|
+
rescue RbVmomi::VIM::NotFound
|
1503
|
+
next # Ignore if pg not found
|
1504
|
+
rescue StandardError => e
|
1505
|
+
raise "A rollback operation for \
|
1506
|
+
standard port group #{nr[:name]} could \
|
1507
|
+
not be performed. Reason: #{e.message}"
|
1508
|
+
end
|
1509
|
+
end
|
1510
|
+
end
|
1511
|
+
end
|
1512
|
+
|
1513
|
+
end
|
1514
|
+
# class ESXHost
|
1515
|
+
|
1516
|
+
end
|
1517
|
+
# module VCenterDriver
|