opennebula 6.10.3 → 6.99.85.pre
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/cloud/CloudClient.rb +3 -3
- data/lib/models/role.rb +349 -823
- data/lib/models/service.rb +156 -80
- data/lib/models/vmrole.rb +703 -0
- data/lib/models/vrrole.rb +284 -0
- data/lib/models.rb +3 -1
- data/lib/opennebula/acl.rb +1 -1
- data/lib/opennebula/acl_pool.rb +1 -1
- data/lib/opennebula/backupjob.rb +1 -1
- data/lib/opennebula/backupjob_pool.rb +1 -1
- data/lib/opennebula/client.rb +1 -1
- data/lib/opennebula/cluster.rb +45 -2
- data/lib/opennebula/cluster_pool.rb +1 -1
- data/lib/opennebula/datastore.rb +1 -1
- data/lib/opennebula/datastore_pool.rb +1 -1
- data/lib/opennebula/document.rb +1 -1
- data/lib/opennebula/document_json.rb +1 -1
- data/lib/opennebula/document_pool.rb +1 -1
- data/lib/opennebula/document_pool_json.rb +1 -1
- data/lib/opennebula/error.rb +1 -1
- data/lib/opennebula/flow/grammar.rb +1 -1
- data/lib/opennebula/flow/service_pool.rb +1 -1
- data/lib/opennebula/flow/service_template.rb +353 -97
- data/lib/opennebula/flow/service_template_ext.rb +3 -3
- data/lib/opennebula/flow/service_template_pool.rb +1 -1
- data/lib/opennebula/flow/validator.rb +458 -410
- data/lib/opennebula/flow.rb +1 -1
- data/lib/opennebula/group.rb +1 -1
- data/lib/opennebula/group_pool.rb +1 -1
- data/lib/opennebula/hook.rb +1 -1
- data/lib/opennebula/hook_log.rb +1 -1
- data/lib/opennebula/hook_pool.rb +1 -1
- data/lib/opennebula/host.rb +1 -60
- data/lib/opennebula/host_pool.rb +1 -1
- data/lib/opennebula/image.rb +1 -1
- data/lib/opennebula/image_pool.rb +1 -1
- data/lib/opennebula/ldap_auth.rb +1 -1
- data/lib/opennebula/ldap_auth_spec.rb +1 -1
- data/lib/opennebula/lockable_ext.rb +1 -1
- data/lib/opennebula/marketplace.rb +1 -1
- data/lib/opennebula/marketplace_pool.rb +1 -1
- data/lib/opennebula/marketplaceapp.rb +1 -1
- data/lib/opennebula/marketplaceapp_ext.rb +14 -211
- data/lib/opennebula/marketplaceapp_pool.rb +1 -1
- data/lib/opennebula/oneflow_client.rb +11 -9
- data/lib/opennebula/pool.rb +1 -1
- data/lib/opennebula/pool_element.rb +1 -1
- data/lib/opennebula/security_group.rb +1 -1
- data/lib/opennebula/security_group_pool.rb +1 -1
- data/lib/opennebula/server_cipher_auth.rb +1 -1
- data/lib/opennebula/server_x509_auth.rb +1 -1
- data/lib/opennebula/ssh_auth.rb +1 -1
- data/lib/opennebula/system.rb +1 -1
- data/lib/opennebula/template.rb +1 -1
- data/lib/opennebula/template_ext.rb +1 -1
- data/lib/opennebula/template_pool.rb +1 -1
- data/lib/opennebula/user.rb +1 -1
- data/lib/opennebula/user_pool.rb +1 -1
- data/lib/opennebula/utils.rb +2 -2
- data/lib/opennebula/vdc.rb +1 -1
- data/lib/opennebula/vdc_pool.rb +1 -1
- data/lib/opennebula/virtual_machine.rb +3 -12
- data/lib/opennebula/virtual_machine_ext.rb +2 -31
- data/lib/opennebula/virtual_machine_pool.rb +1 -1
- data/lib/opennebula/virtual_network.rb +1 -1
- data/lib/opennebula/virtual_network_pool.rb +1 -1
- data/lib/opennebula/virtual_router.rb +1 -1
- data/lib/opennebula/virtual_router_pool.rb +1 -1
- data/lib/opennebula/vm_group.rb +1 -1
- data/lib/opennebula/vm_group_pool.rb +1 -1
- data/lib/opennebula/vntemplate.rb +1 -1
- data/lib/opennebula/vntemplate_pool.rb +1 -1
- data/lib/opennebula/wait_ext.rb +1 -1
- data/lib/opennebula/x509_auth.rb +1 -1
- data/lib/opennebula/xml_element.rb +2 -2
- data/lib/opennebula/xml_pool.rb +1 -1
- data/lib/opennebula/xml_utils.rb +1 -1
- data/lib/opennebula/zone.rb +1 -1
- data/lib/opennebula/zone_pool.rb +1 -1
- data/lib/opennebula.rb +2 -2
- metadata +6 -67
- data/lib/ActionManager.rb +0 -280
- data/lib/CommandManager.rb +0 -328
- data/lib/DriverExecHelper.rb +0 -213
- data/lib/HostSyncManager.rb +0 -111
- data/lib/OpenNebulaDriver.rb +0 -223
- data/lib/VirtualMachineDriver.rb +0 -404
- data/lib/datacenter.rb +0 -1319
- data/lib/datastore.rb +0 -1049
- data/lib/distributed_firewall.rb +0 -293
- data/lib/file_helper.rb +0 -374
- data/lib/host.rb +0 -1518
- data/lib/logical_port.rb +0 -50
- data/lib/logical_switch.rb +0 -77
- data/lib/memoize.rb +0 -74
- data/lib/network.rb +0 -705
- data/lib/nsx_client.rb +0 -157
- data/lib/nsx_component.rb +0 -28
- data/lib/nsx_constants.rb +0 -162
- data/lib/nsx_driver.rb +0 -91
- data/lib/nsx_error.rb +0 -77
- data/lib/nsx_rule.rb +0 -206
- data/lib/nsxt_client.rb +0 -189
- data/lib/nsxt_dfw.rb +0 -196
- data/lib/nsxt_logical_port.rb +0 -94
- data/lib/nsxt_rule.rb +0 -188
- data/lib/nsxt_tz.rb +0 -38
- data/lib/nsxv_client.rb +0 -189
- data/lib/nsxv_dfw.rb +0 -202
- data/lib/nsxv_logical_port.rb +0 -107
- data/lib/nsxv_rule.rb +0 -172
- data/lib/nsxv_tz.rb +0 -41
- data/lib/opaque_network.rb +0 -134
- data/lib/rest_client.rb +0 -191
- data/lib/scripts_common.rb +0 -176
- data/lib/transport_zone.rb +0 -43
- data/lib/vcenter_driver.rb +0 -152
- data/lib/vcenter_importer.rb +0 -626
- data/lib/vi_client.rb +0 -273
- data/lib/vi_helper.rb +0 -328
- data/lib/virtual_machine.rb +0 -3574
- data/lib/virtual_wire.rb +0 -158
- data/lib/vm_device.rb +0 -80
- data/lib/vm_disk.rb +0 -202
- data/lib/vm_folder.rb +0 -69
- data/lib/vm_helper.rb +0 -30
- data/lib/vm_monitor.rb +0 -305
- data/lib/vm_nic.rb +0 -70
- data/lib/vm_template.rb +0 -2112
- data/lib/vmm_importer.rb +0 -165
data/lib/host.rb
DELETED
@@ -1,1518 +0,0 @@
|
|
1
|
-
# -------------------------------------------------------------------------- #
|
2
|
-
# Copyright 2002-2024, OpenNebula Project, OpenNebula Systems #
|
3
|
-
# #
|
4
|
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
5
|
-
# not use this file except in compliance with the License. You may obtain #
|
6
|
-
# a copy of the License at #
|
7
|
-
# #
|
8
|
-
# http://www.apache.org/licenses/LICENSE-2.0 #
|
9
|
-
# #
|
10
|
-
# Unless required by applicable law or agreed to in writing, software #
|
11
|
-
# distributed under the License is distributed on an "AS IS" BASIS, #
|
12
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
13
|
-
# See the License for the specific language governing permissions and #
|
14
|
-
# limitations under the License. #
|
15
|
-
#--------------------------------------------------------------------------- #
|
16
|
-
|
17
|
-
##############################################################################
|
18
|
-
# Module VCenterDriver
|
19
|
-
##############################################################################
|
20
|
-
module VCenterDriver
|
21
|
-
|
22
|
-
require 'json'
|
23
|
-
require 'nsx_driver'
|
24
|
-
|
25
|
-
##########################################################################
|
26
|
-
# Class HostFolder
|
27
|
-
##########################################################################
|
28
|
-
class HostFolder
|
29
|
-
|
30
|
-
attr_accessor :item, :items
|
31
|
-
|
32
|
-
def initialize(item)
|
33
|
-
@item = item
|
34
|
-
@items = {}
|
35
|
-
end
|
36
|
-
|
37
|
-
def fetch_clusters!
|
38
|
-
VIClient
|
39
|
-
.get_entities(
|
40
|
-
@item,
|
41
|
-
'ClusterComputeResource'
|
42
|
-
).each do |item|
|
43
|
-
item_name = item._ref
|
44
|
-
@items[item_name.to_sym] = ClusterComputeResource.new(item)
|
45
|
-
end
|
46
|
-
end
|
47
|
-
|
48
|
-
def get_cluster(ref)
|
49
|
-
if !@items[ref.to_sym]
|
50
|
-
rbvmomi_dc =
|
51
|
-
RbVmomi::VIM::ClusterComputeResource
|
52
|
-
.new(
|
53
|
-
@item._connection,
|
54
|
-
ref
|
55
|
-
)
|
56
|
-
@items[ref.to_sym] =
|
57
|
-
ClusterComputeResource
|
58
|
-
.new(
|
59
|
-
rbvmomi_dc
|
60
|
-
)
|
61
|
-
end
|
62
|
-
|
63
|
-
@items[ref.to_sym]
|
64
|
-
end
|
65
|
-
|
66
|
-
end
|
67
|
-
# class HostFolder
|
68
|
-
|
69
|
-
##########################################################################
|
70
|
-
# Class ClusterComputeResource
|
71
|
-
##########################################################################
|
72
|
-
class ClusterComputeResource
|
73
|
-
|
74
|
-
attr_accessor :item
|
75
|
-
attr_accessor :rp_list
|
76
|
-
|
77
|
-
include Memoize
|
78
|
-
|
79
|
-
def initialize(item, vi_client = nil)
|
80
|
-
@item = item
|
81
|
-
@vi_client = vi_client
|
82
|
-
@rp_list # rubocop:disable Lint/Void
|
83
|
-
end
|
84
|
-
|
85
|
-
def fetch_resource_pools(rp, rp_array = [])
|
86
|
-
rp_array << rp
|
87
|
-
|
88
|
-
rp.resourcePool.each do |child_rp|
|
89
|
-
fetch_resource_pools(child_rp, rp_array)
|
90
|
-
end
|
91
|
-
|
92
|
-
rp_array
|
93
|
-
end
|
94
|
-
|
95
|
-
def resource_pools
|
96
|
-
if @resource_pools.nil?
|
97
|
-
@resource_pools = fetch_resource_pools(@item.resourcePool)
|
98
|
-
end
|
99
|
-
|
100
|
-
@resource_pools
|
101
|
-
end
|
102
|
-
|
103
|
-
def get_resource_pool_list(
|
104
|
-
rp = @item
|
105
|
-
.resourcePool,
|
106
|
-
parent_prefix = '',
|
107
|
-
rp_array = []
|
108
|
-
)
|
109
|
-
current_rp = ''
|
110
|
-
|
111
|
-
if !parent_prefix.empty?
|
112
|
-
current_rp << parent_prefix
|
113
|
-
current_rp << '/'
|
114
|
-
end
|
115
|
-
|
116
|
-
resource_pool, name = rp.collect('resourcePool', 'name')
|
117
|
-
current_rp << name if name != 'Resources'
|
118
|
-
|
119
|
-
resource_pool.each do |child_rp|
|
120
|
-
get_resource_pool_list(child_rp, current_rp, rp_array)
|
121
|
-
end
|
122
|
-
|
123
|
-
rp_info = {}
|
124
|
-
rp_info[:name] = current_rp
|
125
|
-
rp_info[:ref] = rp._ref
|
126
|
-
rp_array << rp_info unless current_rp.empty?
|
127
|
-
|
128
|
-
rp_array
|
129
|
-
end
|
130
|
-
|
131
|
-
def nsx_get
|
132
|
-
nsx_info = ''
|
133
|
-
nsx_obj = {}
|
134
|
-
# In the future add more than one nsx manager
|
135
|
-
extension_list =
|
136
|
-
@vi_client
|
137
|
-
.vim
|
138
|
-
.serviceContent
|
139
|
-
.extensionManager
|
140
|
-
.extensionList
|
141
|
-
extension_list.each do |ext_list|
|
142
|
-
case ext_list.key
|
143
|
-
when NSXDriver::NSXConstants::NSXV_EXTENSION_LIST
|
144
|
-
nsx_obj['type'] = NSXDriver::NSXConstants::NSXV
|
145
|
-
url_full = ext_list.client[0].url
|
146
|
-
url_split = url_full.split('/')
|
147
|
-
# protocol = "https://"
|
148
|
-
protocol = url_split[0] + '//'
|
149
|
-
# ip_port = ip:port
|
150
|
-
ip_port = url_split[2]
|
151
|
-
nsx_obj['url'] = protocol + ip_port
|
152
|
-
nsx_obj['version'] = ext_list.version
|
153
|
-
nsx_obj['label'] = ext_list.description.label
|
154
|
-
when NSXDriver::NSXConstants::NSXT_EXTENSION_LIST
|
155
|
-
nsx_obj['type'] = NSXDriver::NSXConstants::NSXT
|
156
|
-
nsx_obj['url'] = ext_list.server[0].url
|
157
|
-
nsx_obj['version'] = ext_list.version
|
158
|
-
nsx_obj['label'] = ext_list.description.label
|
159
|
-
else
|
160
|
-
next
|
161
|
-
end
|
162
|
-
end
|
163
|
-
unless nsx_obj.empty?
|
164
|
-
nsx_info << "NSX_MANAGER=\"#{nsx_obj['url']}\"\n"
|
165
|
-
nsx_info << "NSX_TYPE=\"#{nsx_obj['type']}\"\n"
|
166
|
-
nsx_info << "NSX_VERSION=\"#{nsx_obj['version']}\"\n"
|
167
|
-
nsx_info << "NSX_LABEL=\"#{nsx_obj['label']}\"\n"
|
168
|
-
end
|
169
|
-
nsx_info
|
170
|
-
end
|
171
|
-
|
172
|
-
def nsx_ready?
|
173
|
-
@one_item =
|
174
|
-
VCenterDriver::VIHelper
|
175
|
-
.one_item(
|
176
|
-
OpenNebula::Host,
|
177
|
-
@vi_client
|
178
|
-
.instance_variable_get(
|
179
|
-
:@host_id
|
180
|
-
).to_i
|
181
|
-
)
|
182
|
-
|
183
|
-
# Check if NSX_MANAGER is into the host template
|
184
|
-
if [nil, ''].include?(@one_item['TEMPLATE/NSX_MANAGER'])
|
185
|
-
@nsx_status = "NSX_STATUS = \"Missing NSX_MANAGER\"\n"
|
186
|
-
return false
|
187
|
-
end
|
188
|
-
|
189
|
-
# Check if NSX_USER is into the host template
|
190
|
-
if [nil, ''].include?(@one_item['TEMPLATE/NSX_USER'])
|
191
|
-
@nsx_status = "NSX_STATUS = \"Missing NSX_USER\"\n"
|
192
|
-
return false
|
193
|
-
end
|
194
|
-
|
195
|
-
# Check if NSX_PASSWORD is into the host template
|
196
|
-
if [nil, ''].include?(@one_item['TEMPLATE/NSX_PASSWORD'])
|
197
|
-
@nsx_status = "NSX_STATUS = \"Missing NSX_PASSWORD\"\n"
|
198
|
-
return false
|
199
|
-
end
|
200
|
-
|
201
|
-
# Check if NSX_TYPE is into the host template
|
202
|
-
if [nil, ''].include?(@one_item['TEMPLATE/NSX_TYPE'])
|
203
|
-
@nsx_status = "NSX_STATUS = \"Missing NSX_TYPE\"\n"
|
204
|
-
return false
|
205
|
-
end
|
206
|
-
|
207
|
-
# Try a connection as part of NSX_STATUS
|
208
|
-
nsx_client = NSXDriver::NSXClient
|
209
|
-
.new_from_id(
|
210
|
-
@vi_client
|
211
|
-
.instance_variable_get(
|
212
|
-
:@host_id
|
213
|
-
).to_i
|
214
|
-
)
|
215
|
-
|
216
|
-
if @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXV
|
217
|
-
# URL to test a connection
|
218
|
-
url = '/api/2.0/vdn/scopes'
|
219
|
-
begin
|
220
|
-
if nsx_client.get(url)
|
221
|
-
@nsx_status = "NSX_STATUS = \"OK\"\n"
|
222
|
-
return true
|
223
|
-
else
|
224
|
-
@nsx_status =
|
225
|
-
"NSX_STATUS = \"Response code incorrect\"\n"
|
226
|
-
return false
|
227
|
-
end
|
228
|
-
rescue StandardError
|
229
|
-
@nsx_status = 'NSX_STATUS = "Error connecting to ' \
|
230
|
-
"NSX_MANAGER\"\n"
|
231
|
-
return false
|
232
|
-
end
|
233
|
-
end
|
234
|
-
|
235
|
-
nxs_type = @one_item['TEMPLATE/NSX_TYPE']
|
236
|
-
return unless nxs_type == NSXDriver::NSXConstants::NSXT
|
237
|
-
|
238
|
-
# URL to test a connection
|
239
|
-
url = '/api/v1/transport-zones'
|
240
|
-
begin
|
241
|
-
if nsx_client.get(url)
|
242
|
-
@nsx_status = "NSX_STATUS = \"OK\"\n"
|
243
|
-
true
|
244
|
-
else
|
245
|
-
@nsx_status =
|
246
|
-
"NSX_STATUS = \"Response code incorrect\"\n"
|
247
|
-
false
|
248
|
-
end
|
249
|
-
rescue StandardError
|
250
|
-
@nsx_status = 'NSX_STATUS = "Error connecting to '\
|
251
|
-
"NSX_MANAGER\"\n"
|
252
|
-
false
|
253
|
-
end
|
254
|
-
end
|
255
|
-
|
256
|
-
def tz_get
|
257
|
-
@nsx_status = ''
|
258
|
-
if !nsx_ready?
|
259
|
-
tz_info = @nsx_status
|
260
|
-
else
|
261
|
-
tz_info = "NSX_STATUS = OK\n"
|
262
|
-
tz_info << 'NSX_TRANSPORT_ZONES = ['
|
263
|
-
|
264
|
-
nsx_client =
|
265
|
-
NSXDriver::NSXClient
|
266
|
-
.new_from_id(
|
267
|
-
@vi_client
|
268
|
-
.instance_variable_get(
|
269
|
-
:@host_id
|
270
|
-
).to_i
|
271
|
-
)
|
272
|
-
tz_object = NSXDriver::TransportZone.new_child(nsx_client)
|
273
|
-
|
274
|
-
# NSX request to get Transport Zones
|
275
|
-
case @one_item['TEMPLATE/NSX_TYPE']
|
276
|
-
when NSXDriver::NSXConstants::NSXV
|
277
|
-
tzs = tz_object.tzs
|
278
|
-
tzs.each do |tz|
|
279
|
-
tz_info << tz.xpath('name').text << '="'
|
280
|
-
tz_info << tz.xpath('objectId').text << '",'
|
281
|
-
end
|
282
|
-
tz_info.chomp!(',')
|
283
|
-
when NSXDriver::NSXConstants::NSXT
|
284
|
-
r = tz_object.tzs
|
285
|
-
r['results'].each do |tz|
|
286
|
-
tz_info << tz['display_name'] << '="'
|
287
|
-
tz_info << tz['id'] << '",'
|
288
|
-
end
|
289
|
-
tz_info.chomp!(',')
|
290
|
-
else
|
291
|
-
raise "Unknown Port Group type \
|
292
|
-
#{@one_item['TEMPLATE/NSX_TYPE']}"
|
293
|
-
end
|
294
|
-
tz_info << ']'
|
295
|
-
return tz_info
|
296
|
-
end
|
297
|
-
tz_info
|
298
|
-
end
|
299
|
-
|
300
|
-
def monitor
|
301
|
-
total_cpu,
|
302
|
-
num_cpu_cores,
|
303
|
-
effective_cpu,
|
304
|
-
total_memory,
|
305
|
-
effective_mem,
|
306
|
-
num_hosts,
|
307
|
-
num_eff_hosts,
|
308
|
-
overall_status,
|
309
|
-
drs_enabled,
|
310
|
-
ha_enabled= @item.collect('summary.totalCpu',
|
311
|
-
'summary.numCpuCores',
|
312
|
-
'summary.effectiveCpu',
|
313
|
-
'summary.totalMemory',
|
314
|
-
'summary.effectiveMemory',
|
315
|
-
'summary.numHosts',
|
316
|
-
'summary.numEffectiveHosts',
|
317
|
-
'summary.overallStatus',
|
318
|
-
'configuration.drsConfig.enabled',
|
319
|
-
'configuration.dasConfig.enabled')
|
320
|
-
|
321
|
-
mhz_core = total_cpu.to_f / num_cpu_cores.to_f
|
322
|
-
eff_core = effective_cpu.to_f / mhz_core
|
323
|
-
|
324
|
-
free_cpu = format('%.2f', eff_core * 100).to_f # rubocop:disable Style/FormatStringToken
|
325
|
-
total_cpu = num_cpu_cores.to_f * 100
|
326
|
-
used_cpu = format('%.2f', total_cpu - free_cpu).to_f # rubocop:disable Style/FormatStringToken
|
327
|
-
|
328
|
-
total_mem = total_memory.to_i / 1024
|
329
|
-
free_mem = effective_mem.to_i * 1024
|
330
|
-
|
331
|
-
str_info = ''
|
332
|
-
|
333
|
-
# Get cluster name for informative purposes
|
334
|
-
# (replace space with _ if any)
|
335
|
-
str_info << 'VCENTER_NAME=' << self['name'].tr(' ', '_') << "\n"
|
336
|
-
|
337
|
-
# System
|
338
|
-
str_info << "HYPERVISOR=vcenter\n"
|
339
|
-
str_info << 'TOTALHOST=' << num_hosts.to_s << "\n"
|
340
|
-
str_info << 'AVAILHOST=' << num_eff_hosts.to_s << "\n"
|
341
|
-
str_info << 'STATUS=' << overall_status << "\n"
|
342
|
-
|
343
|
-
# CPU
|
344
|
-
str_info << 'CPUSPEED=' << mhz_core.to_s << "\n"
|
345
|
-
str_info << 'TOTALCPU=' << total_cpu.to_s << "\n"
|
346
|
-
str_info << 'USEDCPU=' << used_cpu.to_s << "\n"
|
347
|
-
str_info << 'FREECPU=' << free_cpu.to_s << "\n"
|
348
|
-
|
349
|
-
# Memory
|
350
|
-
str_info << 'TOTALMEMORY=' << total_mem.to_s << "\n"
|
351
|
-
str_info << 'FREEMEMORY=' << free_mem.to_s << "\n"
|
352
|
-
str_info << 'USEDMEMORY=' << (total_mem - free_mem).to_s << "\n"
|
353
|
-
|
354
|
-
# DRS enabled
|
355
|
-
str_info << 'VCENTER_DRS=' << drs_enabled.to_s << "\n"
|
356
|
-
|
357
|
-
# HA enabled
|
358
|
-
str_info << 'VCENTER_HA=' << ha_enabled.to_s << "\n"
|
359
|
-
|
360
|
-
# NSX info
|
361
|
-
str_info << nsx_get
|
362
|
-
str_info << tz_get
|
363
|
-
|
364
|
-
str_info << monitor_resource_pools(mhz_core)
|
365
|
-
end
|
366
|
-
|
367
|
-
def monitor_resource_pools(mhz_core)
|
368
|
-
@rp_list = get_resource_pool_list
|
369
|
-
|
370
|
-
view =
|
371
|
-
@vi_client
|
372
|
-
.vim
|
373
|
-
.serviceContent
|
374
|
-
.viewManager
|
375
|
-
.CreateContainerView(
|
376
|
-
{
|
377
|
-
:container => @item, # View for RPs inside this cluster
|
378
|
-
:type => ['ResourcePool'],
|
379
|
-
:recursive => true
|
380
|
-
}
|
381
|
-
)
|
382
|
-
|
383
|
-
pc = @vi_client.vim.serviceContent.propertyCollector
|
384
|
-
|
385
|
-
monitored_properties = [
|
386
|
-
'config.cpuAllocation.expandableReservation',
|
387
|
-
'config.cpuAllocation.limit',
|
388
|
-
'config.cpuAllocation.reservation',
|
389
|
-
'config.cpuAllocation.shares.level',
|
390
|
-
'config.cpuAllocation.shares.shares',
|
391
|
-
'config.memoryAllocation.expandableReservation',
|
392
|
-
'config.memoryAllocation.limit',
|
393
|
-
'config.memoryAllocation.reservation',
|
394
|
-
'config.memoryAllocation.shares.level',
|
395
|
-
'config.memoryAllocation.shares.shares'
|
396
|
-
]
|
397
|
-
|
398
|
-
filter_spec = RbVmomi::VIM.PropertyFilterSpec(
|
399
|
-
:objectSet => [
|
400
|
-
{ :obj => view,
|
401
|
-
:skip => true,
|
402
|
-
:selectSet => [
|
403
|
-
RbVmomi::VIM.TraversalSpec(
|
404
|
-
:name => 'traverseEntities',
|
405
|
-
:type => 'ContainerView',
|
406
|
-
:path => 'view',
|
407
|
-
:skip => false
|
408
|
-
)
|
409
|
-
] }
|
410
|
-
],
|
411
|
-
:propSet => [
|
412
|
-
{
|
413
|
-
:type => 'ResourcePool',
|
414
|
-
:pathSet => monitored_properties
|
415
|
-
}
|
416
|
-
]
|
417
|
-
)
|
418
|
-
|
419
|
-
result = pc.RetrieveProperties(:specSet => [filter_spec])
|
420
|
-
|
421
|
-
rps = {}
|
422
|
-
result.each do |r|
|
423
|
-
hashed_properties = r.to_hash
|
424
|
-
if r.obj.is_a?(RbVmomi::VIM::ResourcePool)
|
425
|
-
rps[r.obj._ref] = hashed_properties
|
426
|
-
end
|
427
|
-
end
|
428
|
-
|
429
|
-
return '' if rps.empty?
|
430
|
-
|
431
|
-
rp_info = ''
|
432
|
-
|
433
|
-
rps.each do |ref, info|
|
434
|
-
# CPU
|
435
|
-
if info['config.cpuAllocation.expandableReservation']
|
436
|
-
cpu_expandable = 'YES'
|
437
|
-
else
|
438
|
-
cpu_expandable = 'NO'
|
439
|
-
end
|
440
|
-
if info['config.cpuAllocation.limit'] == '-1'
|
441
|
-
cpu_limit = 'UNLIMITED'
|
442
|
-
else
|
443
|
-
cpu_limit = info['config.cpuAllocation.limit']
|
444
|
-
end
|
445
|
-
cpu_reservation = info['config.cpuAllocation.reservation']
|
446
|
-
cpu_num = cpu_reservation.to_f / mhz_core
|
447
|
-
cpu_shares_level = info['config.cpuAllocation.shares.level']
|
448
|
-
cpu_shares = info['config.cpuAllocation.shares.shares']
|
449
|
-
|
450
|
-
# MEMORY
|
451
|
-
if info['config.memoryAllocation.expandableReservation']
|
452
|
-
mem_expandable = 'YES'
|
453
|
-
else
|
454
|
-
mem_expandable = 'NO'
|
455
|
-
end
|
456
|
-
if info['config.memoryAllocation.limit'] == '-1'
|
457
|
-
mem_limit = 'UNLIMITED'
|
458
|
-
else
|
459
|
-
mem_limit = info['config.memoryAllocation.limit']
|
460
|
-
end
|
461
|
-
mem_reservation =
|
462
|
-
info['config.memoryAllocation.reservation'].to_f
|
463
|
-
mem_shares_level =
|
464
|
-
info['config.memoryAllocation.shares.level']
|
465
|
-
mem_shares =
|
466
|
-
info['config.memoryAllocation.shares.shares']
|
467
|
-
|
468
|
-
rp_name =
|
469
|
-
@rp_list
|
470
|
-
.select do |item|
|
471
|
-
item[:ref] == ref
|
472
|
-
end.first[:name] rescue ''
|
473
|
-
|
474
|
-
rp_name = 'Resources' if rp_name.empty?
|
475
|
-
|
476
|
-
rp_info << "\nVCENTER_RESOURCE_POOL_INFO = ["
|
477
|
-
rp_info << "NAME=\"#{rp_name}\","
|
478
|
-
rp_info << "CPU_EXPANDABLE=#{cpu_expandable},"
|
479
|
-
rp_info << "CPU_LIMIT=#{cpu_limit},"
|
480
|
-
rp_info << "CPU_RESERVATION=#{cpu_reservation},"
|
481
|
-
rp_info << "CPU_RESERVATION_NUM_CORES=#{cpu_num},"
|
482
|
-
rp_info << "CPU_SHARES=#{cpu_shares},"
|
483
|
-
rp_info << "CPU_SHARES_LEVEL=#{cpu_shares_level},"
|
484
|
-
rp_info << "MEM_EXPANDABLE=#{mem_expandable},"
|
485
|
-
rp_info << "MEM_LIMIT=#{mem_limit},"
|
486
|
-
rp_info << "MEM_RESERVATION=#{mem_reservation},"
|
487
|
-
rp_info << "MEM_SHARES=#{mem_shares},"
|
488
|
-
rp_info << "MEM_SHARES_LEVEL=#{mem_shares_level}"
|
489
|
-
rp_info << ']'
|
490
|
-
end
|
491
|
-
|
492
|
-
view.DestroyView
|
493
|
-
|
494
|
-
rp_info
|
495
|
-
end
|
496
|
-
|
497
|
-
def hostname_to_moref(hostname)
|
498
|
-
result = filter_hosts
|
499
|
-
|
500
|
-
moref = ''
|
501
|
-
result.each do |r|
|
502
|
-
if r.obj.name == hostname
|
503
|
-
moref = r.obj._ref
|
504
|
-
break
|
505
|
-
end
|
506
|
-
end
|
507
|
-
raise "Host #{hostname} was not found" if moref.empty?
|
508
|
-
|
509
|
-
moref
|
510
|
-
end
|
511
|
-
|
512
|
-
def filter_hosts
|
513
|
-
view =
|
514
|
-
@vi_client
|
515
|
-
.vim
|
516
|
-
.serviceContent
|
517
|
-
.viewManager
|
518
|
-
.CreateContainerView(
|
519
|
-
{
|
520
|
-
# View for Hosts inside this cluster
|
521
|
-
:container => @item,
|
522
|
-
:type => ['HostSystem'],
|
523
|
-
:recursive => true
|
524
|
-
}
|
525
|
-
)
|
526
|
-
|
527
|
-
pc = @vi_client.vim.serviceContent.propertyCollector
|
528
|
-
|
529
|
-
monitored_properties = [
|
530
|
-
'name',
|
531
|
-
'runtime.connectionState',
|
532
|
-
'summary.hardware.numCpuCores',
|
533
|
-
'summary.hardware.memorySize',
|
534
|
-
'summary.hardware.cpuModel',
|
535
|
-
'summary.hardware.cpuMhz',
|
536
|
-
'summary.quickStats.overallCpuUsage',
|
537
|
-
'summary.quickStats.overallMemoryUsage'
|
538
|
-
]
|
539
|
-
|
540
|
-
filter_spec = RbVmomi::VIM.PropertyFilterSpec(
|
541
|
-
:objectSet => [
|
542
|
-
{ :obj => view,
|
543
|
-
:skip => true,
|
544
|
-
:selectSet => [
|
545
|
-
RbVmomi::VIM.TraversalSpec(
|
546
|
-
:name => 'traverseEntities',
|
547
|
-
:type => 'ContainerView',
|
548
|
-
:path => 'view',
|
549
|
-
:skip => false
|
550
|
-
)
|
551
|
-
] }
|
552
|
-
],
|
553
|
-
:propSet => [
|
554
|
-
{ :type => 'HostSystem', :pathSet => monitored_properties }
|
555
|
-
]
|
556
|
-
)
|
557
|
-
|
558
|
-
result = pc.RetrieveProperties(:specSet => [filter_spec])
|
559
|
-
view.DestroyView # Destroy the view
|
560
|
-
result
|
561
|
-
end
|
562
|
-
|
563
|
-
def monitor_host_systems
|
564
|
-
host_info = ''
|
565
|
-
result = filter_hosts
|
566
|
-
hosts = {}
|
567
|
-
result.each do |r|
|
568
|
-
hashed_properties = r.to_hash
|
569
|
-
if r.obj.is_a?(RbVmomi::VIM::HostSystem)
|
570
|
-
hosts[r.obj._ref] = hashed_properties
|
571
|
-
end
|
572
|
-
end
|
573
|
-
|
574
|
-
hosts.each do |_ref, info|
|
575
|
-
next if info['runtime.connectionState'] != 'connected'
|
576
|
-
|
577
|
-
total_cpu = info['summary.hardware.numCpuCores'] * 100
|
578
|
-
used_cpu =
|
579
|
-
(
|
580
|
-
info['summary.quickStats.overallCpuUsage']
|
581
|
-
.to_f / info['summary.hardware.cpuMhz']
|
582
|
-
.to_f
|
583
|
-
) * 100
|
584
|
-
# Trim precission
|
585
|
-
used_cpu = format('%.2f', used_cpu).to_f # rubocop:disable Style/FormatStringToken
|
586
|
-
free_cpu = total_cpu - used_cpu
|
587
|
-
|
588
|
-
total_memory =
|
589
|
-
info['summary.hardware.memorySize']/1024
|
590
|
-
used_memory =
|
591
|
-
info['summary.quickStats.overallMemoryUsage']*1024
|
592
|
-
free_memory = total_memory - used_memory
|
593
|
-
|
594
|
-
host_info << "\nHOST=["
|
595
|
-
host_info << 'STATE=on,'
|
596
|
-
host_info << 'HOSTNAME="' <<
|
597
|
-
info['name'].to_s << '",'
|
598
|
-
host_info <<
|
599
|
-
'MODELNAME="' <<
|
600
|
-
info['summary.hardware.cpuModel'].to_s << '",'
|
601
|
-
host_info << 'CPUSPEED=' <<
|
602
|
-
info['summary.hardware.cpuMhz'].to_s << ','
|
603
|
-
host_info << 'MAX_CPU=' << total_cpu.to_s << ','
|
604
|
-
host_info << 'USED_CPU=' << used_cpu.to_s << ','
|
605
|
-
host_info << 'FREE_CPU=' << free_cpu.to_s << ','
|
606
|
-
host_info << 'MAX_MEM=' << total_memory.to_s << ','
|
607
|
-
host_info << 'USED_MEM=' << used_memory.to_s << ','
|
608
|
-
host_info << 'FREE_MEM=' << free_memory.to_s
|
609
|
-
host_info << ']'
|
610
|
-
end
|
611
|
-
|
612
|
-
host_info
|
613
|
-
end
|
614
|
-
|
615
|
-
def monitor_vms(host_id, vm_type)
|
616
|
-
vc_uuid = @vi_client.vim.serviceContent.about.instanceUuid
|
617
|
-
cluster_name = self['name']
|
618
|
-
cluster_ref = self['_ref']
|
619
|
-
|
620
|
-
# Get info of the host where the VM/template is located
|
621
|
-
one_host =
|
622
|
-
VCenterDriver::VIHelper
|
623
|
-
.one_item(
|
624
|
-
OpenNebula::Host,
|
625
|
-
host_id
|
626
|
-
)
|
627
|
-
if !one_host
|
628
|
-
STDERR.puts "Failed to retieve host with id #{host.id}"
|
629
|
-
if VCenterDriver::CONFIG[:debug_information]
|
630
|
-
STDERR.puts "#{message} #{e.backtrace}"
|
631
|
-
end
|
632
|
-
end
|
633
|
-
|
634
|
-
esx_hosts = {}
|
635
|
-
@item.host.each do |esx_host|
|
636
|
-
esx_hosts[esx_host._ref] = {
|
637
|
-
:name => esx_host.name,
|
638
|
-
:cpu => esx_host.summary.hardware.cpuMhz.to_f
|
639
|
-
}
|
640
|
-
end
|
641
|
-
|
642
|
-
monitored_vms = Set.new
|
643
|
-
str_info = ''
|
644
|
-
|
645
|
-
view =
|
646
|
-
@vi_client
|
647
|
-
.vim
|
648
|
-
.serviceContent
|
649
|
-
.viewManager
|
650
|
-
.CreateContainerView(
|
651
|
-
{
|
652
|
-
:container => @item, # View for VMs inside this cluster
|
653
|
-
:type => ['VirtualMachine'],
|
654
|
-
:recursive => true
|
655
|
-
}
|
656
|
-
)
|
657
|
-
|
658
|
-
pc = @vi_client.vim.serviceContent.propertyCollector
|
659
|
-
|
660
|
-
monitored_properties = [
|
661
|
-
'name', # VM name
|
662
|
-
'config.template', # To filter out templates
|
663
|
-
'summary.runtime.powerState', # VM power state
|
664
|
-
'summary.quickStats.hostMemoryUsage', # Memory usage
|
665
|
-
'summary.quickStats.overallCpuUsage', # CPU used by VM
|
666
|
-
'runtime.host', # ESX host
|
667
|
-
'resourcePool', # RP
|
668
|
-
'guest.guestFullName',
|
669
|
-
# IP addresses as seen by guest tools,
|
670
|
-
'guest.net',
|
671
|
-
'guest.guestState',
|
672
|
-
'guest.toolsVersion',
|
673
|
-
'guest.toolsRunningStatus',
|
674
|
-
# IP addresses as seen by guest tools,
|
675
|
-
'guest.toolsVersionStatus2',
|
676
|
-
# VM extraconfig info e.g opennebula.vm.running
|
677
|
-
'config.extraConfig',
|
678
|
-
'config.hardware.numCPU',
|
679
|
-
'config.hardware.memoryMB',
|
680
|
-
'config.annotation',
|
681
|
-
'datastore'
|
682
|
-
]
|
683
|
-
|
684
|
-
filter_spec = RbVmomi::VIM.PropertyFilterSpec(
|
685
|
-
:objectSet => [
|
686
|
-
{ :obj => view,
|
687
|
-
:skip => true,
|
688
|
-
:selectSet => [
|
689
|
-
RbVmomi::VIM.TraversalSpec(
|
690
|
-
:name => 'traverseEntities',
|
691
|
-
:type => 'ContainerView',
|
692
|
-
:path => 'view',
|
693
|
-
:skip => false
|
694
|
-
)
|
695
|
-
] }
|
696
|
-
],
|
697
|
-
:propSet => [
|
698
|
-
{
|
699
|
-
:type => 'VirtualMachine',
|
700
|
-
:pathSet => monitored_properties
|
701
|
-
}
|
702
|
-
]
|
703
|
-
)
|
704
|
-
|
705
|
-
result = pc.RetrieveProperties(:specSet => [filter_spec])
|
706
|
-
|
707
|
-
vms = {}
|
708
|
-
vm_objects = []
|
709
|
-
result.each do |r|
|
710
|
-
hashed_properties = r.to_hash
|
711
|
-
next unless r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
|
712
|
-
|
713
|
-
# Only take care of VMs, not templates
|
714
|
-
if !hashed_properties['config.template']
|
715
|
-
vms[r.obj._ref + '_' + vc_uuid] = hashed_properties
|
716
|
-
vm_objects << r.obj
|
717
|
-
end
|
718
|
-
end
|
719
|
-
|
720
|
-
pm = @vi_client.vim.serviceContent.perfManager
|
721
|
-
|
722
|
-
stats = {}
|
723
|
-
|
724
|
-
max_samples = 9
|
725
|
-
refresh_rate = 20 # Real time stats takes samples every 20 seconds
|
726
|
-
|
727
|
-
last_mon_time = one_host['TEMPLATE/VCENTER_LAST_PERF_POLL']
|
728
|
-
|
729
|
-
if last_mon_time
|
730
|
-
interval = (Time.now.to_i - last_mon_time.to_i)
|
731
|
-
interval = 3601 if interval < 0
|
732
|
-
samples = (interval / refresh_rate)
|
733
|
-
samples = 1 if samples == 0
|
734
|
-
interval > 3600 ? max_samples = 9 : max_samples = samples
|
735
|
-
end
|
736
|
-
|
737
|
-
if !vm_objects.empty?
|
738
|
-
stats = pm.retrieve_stats(
|
739
|
-
vm_objects,
|
740
|
-
[
|
741
|
-
'net.transmitted',
|
742
|
-
'net.bytesRx',
|
743
|
-
'net.bytesTx',
|
744
|
-
'net.received',
|
745
|
-
'virtualDisk.numberReadAveraged',
|
746
|
-
'virtualDisk.numberWriteAveraged',
|
747
|
-
'virtualDisk.read',
|
748
|
-
'virtualDisk.write'
|
749
|
-
],
|
750
|
-
{
|
751
|
-
:max_samples => max_samples
|
752
|
-
}
|
753
|
-
) rescue {}
|
754
|
-
end
|
755
|
-
|
756
|
-
if !stats.empty?
|
757
|
-
last_mon_time = Time.now.to_i.to_s
|
758
|
-
end
|
759
|
-
|
760
|
-
@rp_list ||= get_resource_pool_list
|
761
|
-
|
762
|
-
vm_pool =
|
763
|
-
VCenterDriver::VIHelper
|
764
|
-
.one_pool(
|
765
|
-
OpenNebula::VirtualMachinePool
|
766
|
-
)
|
767
|
-
# We filter to retrieve only those VMs
|
768
|
-
# running in the host that we are monitoring
|
769
|
-
host_vms =
|
770
|
-
vm_pool
|
771
|
-
.retrieve_xmlelements(
|
772
|
-
"/VM_POOL/VM[HISTORY_RECORDS/HISTORY/HID='#{host_id}']"
|
773
|
-
)
|
774
|
-
|
775
|
-
vms.each do |vm_ref, info|
|
776
|
-
vm_info = ''
|
777
|
-
begin
|
778
|
-
esx_host = esx_hosts[info['runtime.host']._ref]
|
779
|
-
info[:esx_host_name] = esx_host[:name]
|
780
|
-
info[:esx_host_cpu] = esx_host[:cpu]
|
781
|
-
info[:cluster_name] = cluster_name
|
782
|
-
info[:cluster_ref] = cluster_ref
|
783
|
-
info[:vc_uuid] = vc_uuid
|
784
|
-
info[:host_id] = host_id
|
785
|
-
info[:rp_list] = @rp_list
|
786
|
-
|
787
|
-
# Check the running flag
|
788
|
-
running_flag = info['config.extraConfig'].select do |val|
|
789
|
-
val[:key] == 'opennebula.vm.running'
|
790
|
-
end
|
791
|
-
|
792
|
-
if !running_flag.empty? && running_flag.first
|
793
|
-
running_flag = running_flag[0][:value]
|
794
|
-
end
|
795
|
-
|
796
|
-
next if running_flag == 'no'
|
797
|
-
|
798
|
-
id = -1
|
799
|
-
# Find the VM by its deploy_id,
|
800
|
-
# which in the vCenter driver is
|
801
|
-
# the vCenter managed object reference
|
802
|
-
found_vm =
|
803
|
-
host_vms
|
804
|
-
.select do |vm|
|
805
|
-
vm['DEPLOY_ID'] == vm_ref ||
|
806
|
-
vm['DEPLOY_ID'] == VIHelper.get_deploy_id(vm_ref)
|
807
|
-
end.first
|
808
|
-
id = found_vm['ID'] if found_vm
|
809
|
-
|
810
|
-
# skip if it is a wild and
|
811
|
-
# we are looking for OpenNebula VMs
|
812
|
-
next if (vm_type == 'ones') && (id == -1)
|
813
|
-
# skip if it is not a wild and we are looking for wilds
|
814
|
-
next if (vm_type == 'wilds') && (id != -1)
|
815
|
-
# skip if already monitored
|
816
|
-
next if monitored_vms.include? vm_ref
|
817
|
-
|
818
|
-
monitored_vms << vm_ref
|
819
|
-
|
820
|
-
vm =
|
821
|
-
VCenterDriver::VirtualMachine
|
822
|
-
.new(
|
823
|
-
@vi_client,
|
824
|
-
vm_ref,
|
825
|
-
id
|
826
|
-
)
|
827
|
-
vm.vm_info = info
|
828
|
-
vm.monitor(stats)
|
829
|
-
|
830
|
-
vm_name = "#{info['name']} - #{cluster_name}"
|
831
|
-
vm_info << "VM = [ ID=\"#{id}\", "
|
832
|
-
vm_info << "VM_NAME=\"#{vm_name}\", "
|
833
|
-
vm_info << "DEPLOY_ID=\"#{vm_ref}\", "
|
834
|
-
|
835
|
-
# if the machine does not exist in
|
836
|
-
# opennebula it means that is a wild:
|
837
|
-
if vm.one_exist?
|
838
|
-
mon_s64 = Base64.strict_encode64(vm.info)
|
839
|
-
vm_info << "MONITOR=\"#{mon_s64}\"]\n"
|
840
|
-
else
|
841
|
-
vm_template64 =
|
842
|
-
Base64
|
843
|
-
.encode64(
|
844
|
-
vm.vm_to_one(vm_name)
|
845
|
-
).gsub("\n", '')
|
846
|
-
vm_info << 'VCENTER_TEMPLATE="YES",'
|
847
|
-
vm_info << "IMPORT_TEMPLATE=\"#{vm_template64}\"]\n"
|
848
|
-
end
|
849
|
-
rescue StandardError => e
|
850
|
-
vm_info = error_monitoring(e, id, vm_ref, vc_uuid, info)
|
851
|
-
end
|
852
|
-
|
853
|
-
str_info << vm_info
|
854
|
-
end
|
855
|
-
|
856
|
-
view.DestroyView # Destroy the view
|
857
|
-
|
858
|
-
[str_info, last_mon_time]
|
859
|
-
end
|
860
|
-
|
861
|
-
def error_monitoring(e, id, vm_ref, _vc_uuid, info = {})
|
862
|
-
error_info = ''
|
863
|
-
vm_name = info['name'] || nil
|
864
|
-
tmp_str = e.inspect
|
865
|
-
tmp_str << e.backtrace.join("\n")
|
866
|
-
|
867
|
-
error_info << "VM = [ ID=\"#{id}\", "
|
868
|
-
error_info << "VM_NAME=\"#{vm_name}\", "
|
869
|
-
error_info << "DEPLOY_ID=\"#{vm_ref}\", "
|
870
|
-
error_info <<
|
871
|
-
"ERROR=\"#{Base64.encode64(tmp_str).gsub("\n", '')}\"]\n"
|
872
|
-
end
|
873
|
-
|
874
|
-
def monitor_customizations
|
875
|
-
customizations =
|
876
|
-
self['_connection']
|
877
|
-
.serviceContent
|
878
|
-
.customizationSpecManager
|
879
|
-
.info
|
880
|
-
|
881
|
-
text = ''
|
882
|
-
|
883
|
-
customizations.each do |c|
|
884
|
-
t = 'CUSTOMIZATION = [ '
|
885
|
-
t << %(NAME = "#{c.name}", )
|
886
|
-
t << %(TYPE = "#{c.type}" ]\n)
|
887
|
-
|
888
|
-
text << t
|
889
|
-
end
|
890
|
-
|
891
|
-
text
|
892
|
-
end
|
893
|
-
|
894
|
-
def datacenter # rubocop:disable Naming/AccessorMethodName
|
895
|
-
item = @item
|
896
|
-
|
897
|
-
until item.instance_of? RbVmomi::VIM::Datacenter
|
898
|
-
item = item.parent
|
899
|
-
if item.nil?
|
900
|
-
raise 'Could not find the parent Datacenter'
|
901
|
-
end
|
902
|
-
end
|
903
|
-
|
904
|
-
Datacenter.new(item)
|
905
|
-
end
|
906
|
-
|
907
|
-
def self.to_one(cluster, con_ops, rp, one_cluster_id)
|
908
|
-
one_host = VCenterDriver::VIHelper.new_one_item(OpenNebula::Host)
|
909
|
-
|
910
|
-
if OpenNebula.is_error?(one_host)
|
911
|
-
raise "Could not create host: #{one_host.message}"
|
912
|
-
end
|
913
|
-
|
914
|
-
one_cluster_id ||= -1
|
915
|
-
|
916
|
-
rc = one_host
|
917
|
-
.allocate(
|
918
|
-
cluster[:cluster_name],
|
919
|
-
'vcenter',
|
920
|
-
'vcenter',
|
921
|
-
one_cluster_id.to_i
|
922
|
-
)
|
923
|
-
|
924
|
-
if OpenNebula.is_error?(rc)
|
925
|
-
raise "Could not allocate host: #{rc.message}"
|
926
|
-
end
|
927
|
-
|
928
|
-
template = "VCENTER_HOST=\"#{con_ops[:host]}\"\n"\
|
929
|
-
"VCENTER_PASSWORD=\"#{con_ops[:password]}\"\n"\
|
930
|
-
"VCENTER_USER=\"#{con_ops[:user]}\"\n"\
|
931
|
-
"VCENTER_CCR_REF=\"#{cluster[:cluster_ref]}\"\n"\
|
932
|
-
"VCENTER_INSTANCE_ID=\"#{cluster[:vcenter_uuid]}\"\n"\
|
933
|
-
"VCENTER_VERSION=\"#{cluster[:vcenter_version]}\"\n"\
|
934
|
-
|
935
|
-
template << "VCENTER_RESOURCE_POOL=\"#{rp}\"" if rp
|
936
|
-
|
937
|
-
template << "VCENTER_PORT=\"#{con_ops[:port]}\"" if con_ops[:port]
|
938
|
-
|
939
|
-
rc = one_host.update(template, false)
|
940
|
-
|
941
|
-
if OpenNebula.is_error?(rc)
|
942
|
-
update_error = rc.message
|
943
|
-
rc = one_host.delete
|
944
|
-
|
945
|
-
unless OpenNebula.is_error?(rc)
|
946
|
-
raise "Could not update host: #{rc.message}"; end
|
947
|
-
|
948
|
-
raise "Could not update host: #{update_error} "\
|
949
|
-
"and could not delete host: #{rc.message}"
|
950
|
-
end
|
951
|
-
|
952
|
-
rc = one_host.offline
|
953
|
-
|
954
|
-
if OpenNebula.is_error?(rc)
|
955
|
-
update_error = rc.message
|
956
|
-
rc = one_host.delete
|
957
|
-
|
958
|
-
unless OpenNebula.is_error?(rc)
|
959
|
-
raise "Could not offline host: #{rc.message}"; end
|
960
|
-
|
961
|
-
raise "Could not offline host: #{update_error} "\
|
962
|
-
"and could not delete host: #{rc.message}"
|
963
|
-
end
|
964
|
-
|
965
|
-
rc = one_host.enable
|
966
|
-
|
967
|
-
if OpenNebula.is_error?(rc)
|
968
|
-
update_error = rc.message
|
969
|
-
rc = one_host.delete
|
970
|
-
|
971
|
-
unless OpenNebula.is_error?(rc)
|
972
|
-
raise "Could not enable host: #{rc.message}"; end
|
973
|
-
|
974
|
-
raise "Could not enable host: #{update_error} "\
|
975
|
-
"and could not delete host: #{rc.message}"
|
976
|
-
end
|
977
|
-
|
978
|
-
one_host
|
979
|
-
end
|
980
|
-
|
981
|
-
def self.new_from_ref(ref, vi_client)
|
982
|
-
new(
|
983
|
-
RbVmomi::VIM::ClusterComputeResource
|
984
|
-
.new(
|
985
|
-
vi_client.vim,
|
986
|
-
ref
|
987
|
-
),
|
988
|
-
vi_client
|
989
|
-
)
|
990
|
-
end
|
991
|
-
|
992
|
-
end
|
993
|
-
# class ClusterComputeResource
|
994
|
-
|
995
|
-
##########################################################################
|
996
|
-
# Class ESXHost
|
997
|
-
##########################################################################
|
998
|
-
class ESXHost
|
999
|
-
|
1000
|
-
attr_accessor :item
|
1001
|
-
|
1002
|
-
include Memoize
|
1003
|
-
|
1004
|
-
PG_CREATE_TIMEOUT = 240 # We will wait for 4 minutes for the pg creation
|
1005
|
-
|
1006
|
-
def initialize(item, vi_client = nil)
|
1007
|
-
@net_rollback = []
|
1008
|
-
@locking = true
|
1009
|
-
@item = item
|
1010
|
-
@vi_client = vi_client
|
1011
|
-
end
|
1012
|
-
|
1013
|
-
def self.new_from_ref(ref, vi_client)
|
1014
|
-
new(RbVmomi::VIM::HostSystem.new(vi_client.vim, ref), vi_client)
|
1015
|
-
end
|
1016
|
-
|
1017
|
-
# Locking function. Similar to flock
|
1018
|
-
def lock
|
1019
|
-
hostlockname = @item['name'].downcase.tr(' ', '_')
|
1020
|
-
|
1021
|
-
return unless @locking
|
1022
|
-
|
1023
|
-
@locking_file =
|
1024
|
-
File
|
1025
|
-
.open("/tmp/vcenter-#{hostlockname}-lock", 'w')
|
1026
|
-
@locking_file.flock(File::LOCK_EX)
|
1027
|
-
end
|
1028
|
-
|
1029
|
-
# Unlock driver execution mutex
|
1030
|
-
def unlock
|
1031
|
-
return unless @locking
|
1032
|
-
|
1033
|
-
@locking_file.close
|
1034
|
-
end
|
1035
|
-
|
1036
|
-
########################################################################
|
1037
|
-
# Check if standard switch exists in host
|
1038
|
-
########################################################################
|
1039
|
-
|
1040
|
-
def vss_exists(vswitch_name)
|
1041
|
-
vswitches = @item.configManager.networkSystem.networkInfo.vswitch
|
1042
|
-
vswitches.select {|vs| vs.name == vswitch_name }.first rescue nil
|
1043
|
-
end
|
1044
|
-
|
1045
|
-
########################################################################
|
1046
|
-
# Create a standard vcenter switch in an ESX host
|
1047
|
-
########################################################################
|
1048
|
-
|
1049
|
-
def create_vss(
|
1050
|
-
name,
|
1051
|
-
num_ports,
|
1052
|
-
pnics = nil,
|
1053
|
-
mtu = 1500,
|
1054
|
-
pnics_available = nil
|
1055
|
-
)
|
1056
|
-
# Get NetworkSystem
|
1057
|
-
nws = self['configManager.networkSystem']
|
1058
|
-
hostbridge = nil
|
1059
|
-
nics = []
|
1060
|
-
|
1061
|
-
num_ports = 128 if num_ports.nil?
|
1062
|
-
|
1063
|
-
if pnics
|
1064
|
-
pnics = pnics.split(',')
|
1065
|
-
pnics.each do |pnic|
|
1066
|
-
# Add nics if not in use
|
1067
|
-
nics << pnic if pnics_available.include?(pnic)
|
1068
|
-
end
|
1069
|
-
|
1070
|
-
if !nics.empty?
|
1071
|
-
hostbridge =
|
1072
|
-
RbVmomi::VIM::HostVirtualSwitchBondBridge(
|
1073
|
-
:nicDevice => nics
|
1074
|
-
)
|
1075
|
-
end
|
1076
|
-
end
|
1077
|
-
|
1078
|
-
# Create spec
|
1079
|
-
vswitchspec =
|
1080
|
-
RbVmomi::VIM::HostVirtualSwitchSpec(
|
1081
|
-
:bridge => hostbridge,
|
1082
|
-
:mtu => mtu,
|
1083
|
-
:numPorts => num_ports
|
1084
|
-
)
|
1085
|
-
|
1086
|
-
# add vSwitch to the host
|
1087
|
-
begin
|
1088
|
-
nws.AddVirtualSwitch(:vswitchName => name, :spec => vswitchspec)
|
1089
|
-
rescue StandardError => e
|
1090
|
-
raise "The standard vSwitch #{name} could not be \
|
1091
|
-
created. AddVirtualSwitch failed Reason: #{e.message}."
|
1092
|
-
end
|
1093
|
-
|
1094
|
-
@net_rollback << { :action => :delete_sw, :name => name }
|
1095
|
-
|
1096
|
-
name
|
1097
|
-
end
|
1098
|
-
|
1099
|
-
########################################################################
|
1100
|
-
# Update a standard vcenter switch in an ESX host
|
1101
|
-
########################################################################
|
1102
|
-
def update_vss(switch, name, pnics, num_ports, mtu)
|
1103
|
-
pnics = pnics.split(',') rescue []
|
1104
|
-
|
1105
|
-
# Backup switch spec for rollback
|
1106
|
-
orig_spec = switch.spec
|
1107
|
-
|
1108
|
-
# Compare current configuration and return if switch hasn't changed
|
1109
|
-
switch_has_pnics = switch
|
1110
|
-
.spec
|
1111
|
-
.respond_to?(
|
1112
|
-
:bridge
|
1113
|
-
) && switch
|
1114
|
-
.spec
|
1115
|
-
.bridge
|
1116
|
-
.respond_to?(
|
1117
|
-
:nicDevice
|
1118
|
-
)
|
1119
|
-
|
1120
|
-
same_switch = switch.spec.respond_to?(:mtu) && switch
|
1121
|
-
.spec
|
1122
|
-
.mtu == mtu &&
|
1123
|
-
switch
|
1124
|
-
.spec
|
1125
|
-
.respond_to?(
|
1126
|
-
:numPorts
|
1127
|
-
) && switch.spec.numPorts == num_ports &&
|
1128
|
-
(!switch_has_pnics && pnics.empty? ||
|
1129
|
-
switch_has_pnics && switch
|
1130
|
-
.spec
|
1131
|
-
.bridge
|
1132
|
-
.nicDevice
|
1133
|
-
.uniq
|
1134
|
-
.sort == pnics.uniq.sort)
|
1135
|
-
return if same_switch
|
1136
|
-
|
1137
|
-
# Let's create a new spec and update the switch
|
1138
|
-
hostbridge = nil
|
1139
|
-
nws = self['configManager.networkSystem']
|
1140
|
-
unless pnics.empty?
|
1141
|
-
hostbridge =
|
1142
|
-
RbVmomi::VIM::HostVirtualSwitchBondBridge(
|
1143
|
-
:nicDevice => pnics
|
1144
|
-
)
|
1145
|
-
end
|
1146
|
-
vswitchspec =
|
1147
|
-
RbVmomi::VIM::HostVirtualSwitchSpec(
|
1148
|
-
:bridge => hostbridge,
|
1149
|
-
:mtu => mtu,
|
1150
|
-
:numPorts => num_ports
|
1151
|
-
)
|
1152
|
-
begin
|
1153
|
-
nws
|
1154
|
-
.UpdateVirtualSwitch(
|
1155
|
-
:vswitchName => name,
|
1156
|
-
:spec => vswitchspec
|
1157
|
-
)
|
1158
|
-
rescue StandardError => e
|
1159
|
-
raise "The standard switch with name #{name} \
|
1160
|
-
could not be updated. Reason: #{e.message}"
|
1161
|
-
end
|
1162
|
-
|
1163
|
-
@net_rollback << {
|
1164
|
-
:action => :update_sw,
|
1165
|
-
:name => name,
|
1166
|
-
:spec => orig_spec
|
1167
|
-
}
|
1168
|
-
end
|
1169
|
-
|
1170
|
-
########################################################################
|
1171
|
-
# Remove a standard vswitch from the host
|
1172
|
-
########################################################################
|
1173
|
-
def remove_vss(vswitch_name)
|
1174
|
-
nws = self['configManager.networkSystem']
|
1175
|
-
|
1176
|
-
begin
|
1177
|
-
nws.RemoveVirtualSwitch(:vswitchName => vswitch_name)
|
1178
|
-
rescue RbVmomi::VIM::ResourceInUse
|
1179
|
-
STDERR.puts "The standard switch #{vswitch_name} \
|
1180
|
-
is in use so it cannot be deleted"
|
1181
|
-
return
|
1182
|
-
rescue RbVmomi::VIM::NotFound
|
1183
|
-
STDERR.puts "The standard switch #{vswitch_name} \
|
1184
|
-
was not found in vCenter"
|
1185
|
-
return
|
1186
|
-
rescue StandardError => e
|
1187
|
-
raise "There was a failure while deleting a vcenter \
|
1188
|
-
standard switch #{vswitch_name}. Reason: #{e.message}"
|
1189
|
-
end
|
1190
|
-
|
1191
|
-
vswitch_name
|
1192
|
-
end
|
1193
|
-
|
1194
|
-
########################################################################
|
1195
|
-
# Get physical nics that are available in a host
|
1196
|
-
########################################################################
|
1197
|
-
def available_pnics
|
1198
|
-
pnics_in_use = []
|
1199
|
-
pnics_available = []
|
1200
|
-
|
1201
|
-
# Get pnics in use in standard switches
|
1202
|
-
@item.config.network.vswitch.each do |vs|
|
1203
|
-
vs.pnic.each do |pnic|
|
1204
|
-
next unless pnic.instance_of?(String)
|
1205
|
-
|
1206
|
-
pnic.slice!('key-vim.host.PhysicalNic-')
|
1207
|
-
pnics_in_use << pnic
|
1208
|
-
end
|
1209
|
-
end
|
1210
|
-
|
1211
|
-
# Get pnics in host
|
1212
|
-
self['config.network'].pnic.each do |pnic|
|
1213
|
-
next if pnics_in_use
|
1214
|
-
.include?(pnic.device)
|
1215
|
-
|
1216
|
-
pnics_available << pnic
|
1217
|
-
.device
|
1218
|
-
end
|
1219
|
-
|
1220
|
-
pnics_available
|
1221
|
-
end
|
1222
|
-
|
1223
|
-
########################################################################
|
1224
|
-
# Get networks inside a host
|
1225
|
-
########################################################################
|
1226
|
-
def pg_inside_host
|
1227
|
-
pg_inside = {}
|
1228
|
-
|
1229
|
-
# Get pnics in use in standard switches
|
1230
|
-
@item.config.network.vswitch.each do |vs|
|
1231
|
-
pg_inside[vs.name] = []
|
1232
|
-
vs.portgroup.each do |pg|
|
1233
|
-
pg.slice!('key-vim.host.PortGroup-')
|
1234
|
-
pg_inside[vs.name] << pg
|
1235
|
-
end
|
1236
|
-
end
|
1237
|
-
|
1238
|
-
pg_inside
|
1239
|
-
end
|
1240
|
-
|
1241
|
-
########################################################################
|
1242
|
-
# Check if proxy switch exists in host for distributed virtual switch
|
1243
|
-
########################################################################
|
1244
|
-
|
1245
|
-
def proxy_switch_exists(switch_name)
|
1246
|
-
nws = self['configManager.networkSystem']
|
1247
|
-
proxy_switches = nws.networkInfo.proxySwitch
|
1248
|
-
proxy_switches
|
1249
|
-
.select {|ps| ps.dvsName == switch_name }
|
1250
|
-
.first rescue nil
|
1251
|
-
end
|
1252
|
-
|
1253
|
-
########################################################################
|
1254
|
-
# Assign a host to a a distributed vcenter switch (proxy switch)
|
1255
|
-
########################################################################
|
1256
|
-
|
1257
|
-
def assign_proxy_switch(dvs, switch_name, pnics, _pnics_available)
|
1258
|
-
dvs = dvs.item
|
1259
|
-
|
1260
|
-
# Return if host is already assigned
|
1261
|
-
return dvs unless dvs['config.host']
|
1262
|
-
.select do |host|
|
1263
|
-
host.config.host._ref == self['_ref']
|
1264
|
-
end.empty?
|
1265
|
-
|
1266
|
-
# Prepare spec for DVS reconfiguration
|
1267
|
-
config_spec = RbVmomi::VIM::VMwareDVSConfigSpec.new
|
1268
|
-
config_spec.name = switch_name
|
1269
|
-
config_spec.configVersion = dvs['config.configVersion']
|
1270
|
-
|
1271
|
-
# Check if host is already assigned to distributed switch
|
1272
|
-
operation = 'add'
|
1273
|
-
# #operation = "edit" if !dvs['config.host'].select
|
1274
|
-
# { |host| host.config.host._ref == self['_ref'] }.empty?
|
1275
|
-
|
1276
|
-
# Add host members to the distributed virtual switch
|
1277
|
-
host_member_spec =
|
1278
|
-
RbVmomi::VIM::DistributedVirtualSwitchHostMemberConfigSpec
|
1279
|
-
.new
|
1280
|
-
host_member_spec.host = @item
|
1281
|
-
host_member_spec.operation = operation
|
1282
|
-
host_member_spec.backing =
|
1283
|
-
RbVmomi::VIM::DistributedVirtualSwitchHostMemberPnicBacking
|
1284
|
-
.new
|
1285
|
-
host_member_spec.backing.pnicSpec = []
|
1286
|
-
|
1287
|
-
# If pnics are needed assign pnics for uplinks
|
1288
|
-
if pnics
|
1289
|
-
pnics = pnics.split(',')
|
1290
|
-
# Get uplink portgroup from dvswitch
|
1291
|
-
uplink_key = dvs['config.uplinkPortgroup'].select do |ul|
|
1292
|
-
ul.name == "#{switch_name}-uplink-pg"
|
1293
|
-
end.first.key rescue nil
|
1294
|
-
|
1295
|
-
unless uplink_key
|
1296
|
-
raise "Cannot find the uplink portgroup for #{switch_name}"
|
1297
|
-
end
|
1298
|
-
|
1299
|
-
pnics.each do |pnic|
|
1300
|
-
pnic_spec =
|
1301
|
-
RbVmomi::VIM::DistributedVirtualSwitchHostMemberPnicSpec
|
1302
|
-
.new
|
1303
|
-
pnic_spec.pnicDevice = pnic
|
1304
|
-
pnic_spec.uplinkPortgroupKey = uplink_key
|
1305
|
-
host_member_spec.backing.pnicSpec << pnic_spec
|
1306
|
-
end
|
1307
|
-
end
|
1308
|
-
|
1309
|
-
config_spec.host = [host_member_spec]
|
1310
|
-
|
1311
|
-
# The DVS must be reconfigured
|
1312
|
-
dvs_reconfigure_task = dvs.ReconfigureDvs_Task(:spec => config_spec)
|
1313
|
-
dvs_reconfigure_task.wait_for_completion
|
1314
|
-
if dvs_reconfigure_task.info.state != 'success'
|
1315
|
-
raise "It wasn't possible to assign host \
|
1316
|
-
#{self['name']} as a member of #{switch_name}'"
|
1317
|
-
end
|
1318
|
-
|
1319
|
-
dvs
|
1320
|
-
end
|
1321
|
-
|
1322
|
-
########################################################################
|
1323
|
-
# Create a standard port group
|
1324
|
-
########################################################################
|
1325
|
-
|
1326
|
-
def create_pg(pgname, vswitch, vlan = 0)
|
1327
|
-
spec = RbVmomi::VIM.HostPortGroupSpec(
|
1328
|
-
:name => pgname,
|
1329
|
-
:vlanId => vlan,
|
1330
|
-
:vswitchName => vswitch,
|
1331
|
-
:policy => RbVmomi::VIM.HostNetworkPolicy
|
1332
|
-
)
|
1333
|
-
|
1334
|
-
nws = self['configManager.networkSystem']
|
1335
|
-
|
1336
|
-
begin
|
1337
|
-
nws.AddPortGroup(:portgrp => spec)
|
1338
|
-
rescue StandardError => e
|
1339
|
-
raise "A port group with name #{pgname} \
|
1340
|
-
could not be created. Reason: #{e.message}"
|
1341
|
-
end
|
1342
|
-
|
1343
|
-
@net_rollback << { :action => :delete_pg, :name => pgname }
|
1344
|
-
|
1345
|
-
# wait until the network is ready and we have a reference
|
1346
|
-
networks = @item['network'].select {|net| net.name == pgname }
|
1347
|
-
(0..PG_CREATE_TIMEOUT).each do
|
1348
|
-
break unless networks.empty?
|
1349
|
-
|
1350
|
-
networks = @item['network'].select {|net| net.name == pgname }
|
1351
|
-
sleep 1
|
1352
|
-
end
|
1353
|
-
|
1354
|
-
if networks.empty?
|
1355
|
-
raise 'Cannot get VCENTER_NET_REF for new port group'
|
1356
|
-
end
|
1357
|
-
|
1358
|
-
networks.first._ref
|
1359
|
-
end
|
1360
|
-
|
1361
|
-
########################################################################
|
1362
|
-
# Check if standard port group exists in host
|
1363
|
-
########################################################################
|
1364
|
-
|
1365
|
-
def pg_exists(pg_name)
|
1366
|
-
nws = self['configManager.networkSystem']
|
1367
|
-
portgroups = nws.networkInfo.portgroup
|
1368
|
-
portgroups.select {|pg| pg.spec.name == pg_name }.first rescue nil
|
1369
|
-
end
|
1370
|
-
|
1371
|
-
########################################################################
|
1372
|
-
# Is the switch for the pg different?
|
1373
|
-
########################################################################
|
1374
|
-
|
1375
|
-
def pg_changes_sw?(pg, switch_name)
|
1376
|
-
pg
|
1377
|
-
.spec
|
1378
|
-
.respond_to?(
|
1379
|
-
:vswitchName
|
1380
|
-
) && pg
|
1381
|
-
.spec
|
1382
|
-
.vswitchName != switch_name
|
1383
|
-
end
|
1384
|
-
|
1385
|
-
########################################################################
|
1386
|
-
# Update a standard port group
|
1387
|
-
########################################################################
|
1388
|
-
|
1389
|
-
def update_pg(pg, switch_name, vlan_id)
|
1390
|
-
unless pg.spec.respond_to?(:vlanId) && pg.spec.vlanId != vlan_id
|
1391
|
-
return; end
|
1392
|
-
|
1393
|
-
# Backup original spec
|
1394
|
-
orig_spec = pg.spec
|
1395
|
-
|
1396
|
-
# Create new spec
|
1397
|
-
pg_name = pg.spec.name
|
1398
|
-
|
1399
|
-
spec = RbVmomi::VIM.HostPortGroupSpec(
|
1400
|
-
:name => pg_name,
|
1401
|
-
:vlanId => vlan_id,
|
1402
|
-
:vswitchName => switch_name,
|
1403
|
-
:policy => RbVmomi::VIM.HostNetworkPolicy
|
1404
|
-
)
|
1405
|
-
|
1406
|
-
nws = self['configManager.networkSystem']
|
1407
|
-
|
1408
|
-
begin
|
1409
|
-
nws.UpdatePortGroup(:pgName => pg_name, :portgrp => spec)
|
1410
|
-
rescue StandardError => e
|
1411
|
-
raise "A port group with name #{pg_name} \
|
1412
|
-
could not be updated. Reason: #{e.message}"
|
1413
|
-
end
|
1414
|
-
|
1415
|
-
# Set rollback operation
|
1416
|
-
@net_rollback << {
|
1417
|
-
:action => :update_pg,
|
1418
|
-
:name => pg_name,
|
1419
|
-
:spec => orig_spec
|
1420
|
-
}
|
1421
|
-
end
|
1422
|
-
|
1423
|
-
########################################################################
|
1424
|
-
# Remove a standard port group from the host
|
1425
|
-
########################################################################
|
1426
|
-
|
1427
|
-
def remove_pg(pgname)
|
1428
|
-
nws = self['configManager.networkSystem']
|
1429
|
-
|
1430
|
-
swname = nil
|
1431
|
-
begin
|
1432
|
-
portgroups = nws.networkConfig.portgroup
|
1433
|
-
portgroups.each do |pg|
|
1434
|
-
if pg.spec.name == pgname
|
1435
|
-
swname = pg.spec.vswitchName
|
1436
|
-
break
|
1437
|
-
end
|
1438
|
-
end
|
1439
|
-
nws.RemovePortGroup(:pgName => pgname)
|
1440
|
-
rescue RbVmomi::VIM::ResourceInUse
|
1441
|
-
STDERR.puts "The standard portgroup \
|
1442
|
-
#{pgname} is in use so it cannot be deleted"
|
1443
|
-
return
|
1444
|
-
rescue RbVmomi::VIM::NotFound
|
1445
|
-
STDERR.puts "The standard portgroup \
|
1446
|
-
#{pgname} was not found in vCenter"
|
1447
|
-
return
|
1448
|
-
rescue StandardError => e
|
1449
|
-
raise "There was a failure while \
|
1450
|
-
deleting a standard portgroup #{pgname} \
|
1451
|
-
in vCenter. Reason: #{e.message}"
|
1452
|
-
end
|
1453
|
-
|
1454
|
-
swname
|
1455
|
-
end
|
1456
|
-
|
1457
|
-
def network_rollback
|
1458
|
-
nws = self['configManager.networkSystem']
|
1459
|
-
|
1460
|
-
@net_rollback.reverse_each do |nr|
|
1461
|
-
case nr[:action]
|
1462
|
-
when :update_pg
|
1463
|
-
begin
|
1464
|
-
nws
|
1465
|
-
.UpdatePortGroup(
|
1466
|
-
:pgName => nr[:name],
|
1467
|
-
:portgrp => nr[:spec]
|
1468
|
-
)
|
1469
|
-
rescue StandardError => e
|
1470
|
-
raise "A rollback operation for standard \
|
1471
|
-
port group #{nr[:name]} could not \
|
1472
|
-
be performed. Reason: #{e.message}"
|
1473
|
-
end
|
1474
|
-
when :update_sw
|
1475
|
-
begin
|
1476
|
-
nws
|
1477
|
-
.UpdateVirtualSwitch(
|
1478
|
-
:vswitchName => nr[:name],
|
1479
|
-
:spec => nr[:spec]
|
1480
|
-
)
|
1481
|
-
rescue StandardError => e
|
1482
|
-
raise "A rollback operation for standard \
|
1483
|
-
switch #{nr[:name]} could not \
|
1484
|
-
be performed. Reason: #{e.message}"
|
1485
|
-
end
|
1486
|
-
when :delete_sw
|
1487
|
-
begin
|
1488
|
-
nws.RemoveVirtualSwitch(:vswitchName=> nr[:name])
|
1489
|
-
rescue RbVmomi::VIM::ResourceInUse
|
1490
|
-
next # Ignore if switch in use
|
1491
|
-
rescue RbVmomi::VIM::NotFound
|
1492
|
-
next # Ignore if switch not found
|
1493
|
-
rescue StandardError => e
|
1494
|
-
raise "A rollback operation for standard \
|
1495
|
-
switch #{nr[:name]} could not \
|
1496
|
-
be performed. Reason: #{e.message}"
|
1497
|
-
end
|
1498
|
-
when :delete_pg
|
1499
|
-
begin
|
1500
|
-
nws.RemovePortGroup(:pgName => nr[:name])
|
1501
|
-
rescue RbVmomi::VIM::ResourceInUse
|
1502
|
-
next # Ignore if pg in use
|
1503
|
-
rescue RbVmomi::VIM::NotFound
|
1504
|
-
next # Ignore if pg not found
|
1505
|
-
rescue StandardError => e
|
1506
|
-
raise "A rollback operation for \
|
1507
|
-
standard port group #{nr[:name]} could \
|
1508
|
-
not be performed. Reason: #{e.message}"
|
1509
|
-
end
|
1510
|
-
end
|
1511
|
-
end
|
1512
|
-
end
|
1513
|
-
|
1514
|
-
end
|
1515
|
-
# class ESXHost
|
1516
|
-
|
1517
|
-
end
|
1518
|
-
# module VCenterDriver
|