opennebula 5.2.1 → 5.3.80.beta1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/VirtualMachineDriver.rb +7 -0
- data/lib/cloud/CloudClient.rb +1 -1
- data/lib/opennebula/acl.rb +8 -3
- data/lib/opennebula/client.rb +1 -1
- data/lib/opennebula/error.rb +10 -2
- data/lib/opennebula/group.rb +37 -7
- data/lib/opennebula/host.rb +15 -4
- data/lib/opennebula/server_cipher_auth.rb +5 -3
- data/lib/opennebula/system.rb +13 -1
- data/lib/opennebula/virtual_machine.rb +31 -34
- data/lib/opennebula/virtual_network.rb +32 -15
- data/lib/opennebula/vm_group.rb +151 -0
- data/lib/opennebula/vm_group_pool.rb +78 -0
- data/lib/opennebula/xml_element.rb +18 -0
- data/lib/opennebula/zone.rb +24 -1
- data/lib/opennebula.rb +3 -1
- data/lib/vcenter_driver.rb +35 -3111
- metadata +7 -6
data/lib/vcenter_driver.rb
CHANGED
@@ -14,9 +14,10 @@
|
|
14
14
|
# limitations under the License. #
|
15
15
|
# ---------------------------------------------------------------------------- #
|
16
16
|
|
17
|
-
#
|
18
|
-
# Set up the environment for the driver
|
19
|
-
#
|
17
|
+
# ---------------------------------------------------------------------------- #
|
18
|
+
# Set up the environment for the driver #
|
19
|
+
# ---------------------------------------------------------------------------- #
|
20
|
+
|
20
21
|
ONE_LOCATION = ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
|
21
22
|
|
22
23
|
if !ONE_LOCATION
|
@@ -25,3131 +26,54 @@ if !ONE_LOCATION
|
|
25
26
|
ETC_LOCATION = "/etc/one/" if !defined?(ETC_LOCATION)
|
26
27
|
VAR_LOCATION = "/var/lib/one" if !defined?(VAR_LOCATION)
|
27
28
|
else
|
28
|
-
BIN_LOCATION = ONE_LOCATION + "/bin"
|
29
|
-
LIB_LOCATION = ONE_LOCATION + "/lib"
|
30
|
-
ETC_LOCATION = ONE_LOCATION
|
29
|
+
BIN_LOCATION = ONE_LOCATION + "/bin" if !defined?(BIN_LOCATION)
|
30
|
+
LIB_LOCATION = ONE_LOCATION + "/lib" if !defined?(LIB_LOCATION)
|
31
|
+
ETC_LOCATION = ONE_LOCATION + "/etc/" if !defined?(ETC_LOCATION)
|
31
32
|
VAR_LOCATION = ONE_LOCATION + "/var/" if !defined?(VAR_LOCATION)
|
32
33
|
end
|
33
34
|
|
34
35
|
ENV['LANG'] = 'C'
|
35
36
|
|
36
|
-
$: << LIB_LOCATION+'/ruby/vendors/rbvmomi/lib'
|
37
|
-
$: << LIB_LOCATION+'/ruby'
|
37
|
+
$: << LIB_LOCATION + '/ruby/vendors/rbvmomi/lib'
|
38
|
+
$: << LIB_LOCATION + '/ruby'
|
39
|
+
$: << LIB_LOCATION + '/ruby/vcenter_driver'
|
38
40
|
|
39
|
-
require 'ostruct'
|
40
41
|
require 'rbvmomi'
|
41
42
|
require 'yaml'
|
42
43
|
require 'opennebula'
|
43
44
|
require 'base64'
|
44
45
|
require 'openssl'
|
45
46
|
|
46
|
-
|
47
|
-
#
|
48
|
-
|
49
|
-
|
50
|
-
class RbVmomi::VIM::Datastore
|
51
|
-
|
52
|
-
# Download a file from this datastore.
|
53
|
-
# @param remote_path [String] Source path on the datastore.
|
54
|
-
# @param local_path [String] Destination path on the local machine.
|
55
|
-
# @return [void]
|
56
|
-
def download_to_stdout remote_path
|
57
|
-
url = "http#{_connection.http.use_ssl? ? 's' : ''}://#{_connection.http.address}:#{_connection.http.port}#{mkuripath(remote_path)}"
|
58
|
-
|
59
|
-
pid = spawn CURLBIN, "-k", '--noproxy', '*', '-f',
|
60
|
-
"-b", _connection.cookie,
|
61
|
-
url
|
62
|
-
|
63
|
-
|
64
|
-
Process.waitpid(pid, 0)
|
65
|
-
fail "download failed" unless $?.success?
|
66
|
-
end
|
67
|
-
|
68
|
-
def is_descriptor? remote_path
|
69
|
-
url = "http#{_connection.http.use_ssl? ? 's' : ''}://#{_connection.http.address}:#{_connection.http.port}#{mkuripath(remote_path)}"
|
70
|
-
|
71
|
-
rout, wout = IO.pipe
|
72
|
-
|
73
|
-
pid = spawn CURLBIN, "-I", "-k", '--noproxy', '*', '-f',
|
74
|
-
"-b", _connection.cookie,
|
75
|
-
url,
|
76
|
-
:out => wout,
|
77
|
-
:err => '/dev/null'
|
78
|
-
|
79
|
-
Process.waitpid(pid, 0)
|
80
|
-
fail "read image header failed" unless $?.success?
|
81
|
-
|
82
|
-
wout.close
|
83
|
-
size = rout.readlines.select{|l| l.start_with?("Content-Length")}[0].sub("Content-Length: ","")
|
84
|
-
rout.close
|
85
|
-
size.chomp.to_i < 4096 # If <4k, then is a descriptor
|
86
|
-
end
|
87
|
-
|
88
|
-
def get_text_file remote_path
|
89
|
-
url = "http#{_connection.http.use_ssl? ? 's' : ''}://#{_connection.http.address}:#{_connection.http.port}#{mkuripath(remote_path)}"
|
90
|
-
|
91
|
-
rout, wout = IO.pipe
|
92
|
-
pid = spawn CURLBIN, "-k", '--noproxy', '*', '-f',
|
93
|
-
"-b", _connection.cookie,
|
94
|
-
url,
|
95
|
-
:out => wout,
|
96
|
-
:err => '/dev/null'
|
97
|
-
|
98
|
-
Process.waitpid(pid, 0)
|
99
|
-
fail "get text file failed" unless $?.success?
|
100
|
-
|
101
|
-
wout.close
|
102
|
-
output = rout.readlines
|
103
|
-
rout.close
|
104
|
-
return output
|
105
|
-
end
|
106
|
-
|
107
|
-
end
|
108
|
-
|
109
|
-
module VCenterDriver
|
110
|
-
|
111
|
-
################################################################################
|
112
|
-
# This class represents a VCenter connection and an associated OpenNebula client
|
113
|
-
# The connection is associated to the VCenter backing a given OpenNebula host.
|
114
|
-
# For the VCenter driver each OpenNebula host represents a VCenter cluster
|
115
|
-
################################################################################
|
116
|
-
class VIClient
|
117
|
-
attr_reader :vim, :one, :root, :cluster, :user, :pass, :host, :dc
|
118
|
-
|
119
|
-
def self.get_entities(folder, type, entities=[])
|
120
|
-
return nil if folder == []
|
121
|
-
|
122
|
-
folder.childEntity.each do |child|
|
123
|
-
name, junk = child.to_s.split('(')
|
124
|
-
|
125
|
-
case name
|
126
|
-
when "Folder"
|
127
|
-
VIClient.get_entities(child, type, entities)
|
128
|
-
when type
|
129
|
-
entities.push(child)
|
130
|
-
end
|
131
|
-
end
|
132
|
-
|
133
|
-
return entities
|
134
|
-
end
|
135
|
-
|
136
|
-
# Only retrieve properties with faster search
|
137
|
-
def get_entities_to_import(folder, type)
|
138
|
-
res = folder.inventory_flat(type => :all)
|
139
|
-
objects = []
|
140
|
-
|
141
|
-
res.each {|k,v|
|
142
|
-
if k.to_s.split('(').first == type
|
143
|
-
obj = {}
|
144
|
-
v.propSet.each{ |dynprop|
|
145
|
-
obj[dynprop.name] = dynprop.val
|
146
|
-
}
|
147
|
-
obj[:ref] = k._ref
|
148
|
-
objects << OpenStruct.new(obj)
|
149
|
-
end
|
150
|
-
}
|
151
|
-
return objects
|
152
|
-
end
|
153
|
-
|
154
|
-
############################################################################
|
155
|
-
# Initialize the VIClient, and creates an OpenNebula client. The parameters
|
156
|
-
# are obtained from the associated OpenNebula host
|
157
|
-
# @param hid [Integer] The OpenNebula host id with VCenter attributes
|
158
|
-
############################################################################
|
159
|
-
def initialize(hid)
|
160
|
-
|
161
|
-
initialize_one
|
162
|
-
|
163
|
-
@one_host = ::OpenNebula::Host.new_with_id(hid, @one)
|
164
|
-
rc = @one_host.info
|
165
|
-
|
166
|
-
if ::OpenNebula.is_error?(rc)
|
167
|
-
raise "Error getting host information: #{rc.message}"
|
168
|
-
end
|
169
|
-
|
170
|
-
password = @one_host["TEMPLATE/VCENTER_PASSWORD"]
|
171
|
-
|
172
|
-
if !@token.nil?
|
173
|
-
begin
|
174
|
-
cipher = OpenSSL::Cipher::Cipher.new("aes-256-cbc")
|
175
|
-
|
176
|
-
cipher.decrypt
|
177
|
-
cipher.key = @token
|
178
|
-
|
179
|
-
password = cipher.update(Base64::decode64(password))
|
180
|
-
password << cipher.final
|
181
|
-
rescue
|
182
|
-
raise "Error decrypting vCenter password"
|
183
|
-
end
|
184
|
-
end
|
185
|
-
|
186
|
-
connection = {
|
187
|
-
:host => @one_host["TEMPLATE/VCENTER_HOST"],
|
188
|
-
:user => @one_host["TEMPLATE/VCENTER_USER"],
|
189
|
-
:password => password
|
190
|
-
}
|
191
|
-
|
192
|
-
initialize_vim(connection)
|
193
|
-
|
194
|
-
datacenters = VIClient.get_entities(@root, 'Datacenter')
|
195
|
-
|
196
|
-
datacenters.each {|dc|
|
197
|
-
ccrs = VIClient.get_entities(dc.hostFolder, 'ClusterComputeResource')
|
198
|
-
|
199
|
-
next if ccrs.nil?
|
200
|
-
|
201
|
-
@cluster = ccrs.find{ |ccr| @one_host.name == ccr.name }
|
202
|
-
|
203
|
-
(@dc = dc; break) if @cluster
|
204
|
-
}
|
205
|
-
|
206
|
-
if @dc.nil? || @cluster.nil?
|
207
|
-
raise "Cannot find DataCenter or ClusterComputeResource for host."
|
208
|
-
end
|
209
|
-
end
|
210
|
-
|
211
|
-
########################################################################
|
212
|
-
# Initialize a VIConnection based just on the VIM parameters. The
|
213
|
-
# OpenNebula client is also initialized
|
214
|
-
########################################################################
|
215
|
-
def self.new_connection(user_opts, one_client=nil)
|
216
|
-
|
217
|
-
conn = allocate
|
218
|
-
|
219
|
-
conn.initialize_one(one_client)
|
220
|
-
|
221
|
-
conn.initialize_vim(user_opts)
|
222
|
-
|
223
|
-
return conn
|
224
|
-
end
|
225
|
-
|
226
|
-
########################################################################
|
227
|
-
# The associated cluster for this connection
|
228
|
-
########################################################################
|
229
|
-
def cluster
|
230
|
-
@cluster
|
231
|
-
end
|
232
|
-
|
233
|
-
########################################################################
|
234
|
-
# Is this Cluster confined in a resource pool?
|
235
|
-
########################################################################
|
236
|
-
def rp_confined?
|
237
|
-
!@one_host["TEMPLATE/VCENTER_RESOURCE_POOL"].nil?
|
238
|
-
end
|
239
|
-
|
240
|
-
########################################################################
|
241
|
-
# The associated resource pool for this connection
|
242
|
-
# @return [ResourcePool] an array of resource pools including the default
|
243
|
-
# resource pool. If the connection is confined to a particular
|
244
|
-
# resource pool, then return just that one
|
245
|
-
########################################################################
|
246
|
-
def resource_pool
|
247
|
-
rp_name = @one_host["TEMPLATE/VCENTER_RESOURCE_POOL"]
|
248
|
-
|
249
|
-
if rp_name.nil?
|
250
|
-
rp_array = @cluster.resourcePool.resourcePool
|
251
|
-
rp_array << @cluster.resourcePool
|
252
|
-
rp_array
|
253
|
-
else
|
254
|
-
[find_resource_pool(rp_name)]
|
255
|
-
end
|
256
|
-
end
|
257
|
-
|
258
|
-
########################################################################
|
259
|
-
# Get the default resource pool of the connection. Only valid if
|
260
|
-
# the connection is not confined in a resource pool
|
261
|
-
# @return ResourcePool the default resource pool
|
262
|
-
########################################################################
|
263
|
-
def default_resource_pool
|
264
|
-
@cluster.resourcePool
|
265
|
-
end
|
266
|
-
|
267
|
-
########################################################################
|
268
|
-
# Searches the desired ResourcePool of the DataCenter for the current
|
269
|
-
# connection. Returns a RbVmomi::VIM::ResourcePool or the default pool
|
270
|
-
# if not found
|
271
|
-
# @param rpool [String] the ResourcePool name
|
272
|
-
########################################################################
|
273
|
-
def find_resource_pool(poolName)
|
274
|
-
baseEntity = @cluster
|
275
|
-
|
276
|
-
entityArray = poolName.split('/')
|
277
|
-
entityArray.each do |entityArrItem|
|
278
|
-
if entityArrItem != ''
|
279
|
-
if baseEntity.is_a? RbVmomi::VIM::Folder
|
280
|
-
baseEntity = baseEntity.childEntity.find { |f|
|
281
|
-
f.name == entityArrItem
|
282
|
-
} or return @cluster.resourcePool
|
283
|
-
elsif baseEntity.is_a? RbVmomi::VIM::ClusterComputeResource
|
284
|
-
baseEntity = baseEntity.resourcePool.resourcePool.find { |f|
|
285
|
-
f.name == entityArrItem
|
286
|
-
} or return @cluster.resourcePool
|
287
|
-
elsif baseEntity.is_a? RbVmomi::VIM::ResourcePool
|
288
|
-
baseEntity = baseEntity.resourcePool.find { |f|
|
289
|
-
f.name == entityArrItem
|
290
|
-
} or return @cluster.resourcePool
|
291
|
-
else
|
292
|
-
return @cluster.resourcePool
|
293
|
-
end
|
294
|
-
end
|
295
|
-
end
|
296
|
-
|
297
|
-
if !baseEntity.is_a?(RbVmomi::VIM::ResourcePool) and
|
298
|
-
baseEntity.respond_to?(:resourcePool)
|
299
|
-
baseEntity = baseEntity.resourcePool
|
300
|
-
end
|
301
|
-
|
302
|
-
baseEntity
|
303
|
-
end
|
304
|
-
|
305
|
-
########################################################################
|
306
|
-
# Searches the associated vmFolder of the DataCenter for the current
|
307
|
-
# connection. Returns a RbVmomi::VIM::VirtualMachine or nil if not found
|
308
|
-
#
|
309
|
-
# Searches by moref, name, uuid and then iterates over all VMs
|
310
|
-
#
|
311
|
-
# @param uuid [String] the UUID of the VM or VM Template
|
312
|
-
# @param ref [String] VMware moref
|
313
|
-
# @param name [String] VM name in vCenter
|
314
|
-
########################################################################
|
315
|
-
def find_vm_fast(uuid, ref = nil, name = nil)
|
316
|
-
if ref
|
317
|
-
# It can raise ManagedObjectNotFound
|
318
|
-
begin
|
319
|
-
vm = RbVmomi::VIM::VirtualMachine.new(@dc._connection, ref)
|
320
|
-
return vm if vm.config && vm.config.uuid == uuid
|
321
|
-
rescue => e
|
322
|
-
end
|
323
|
-
end
|
324
|
-
|
325
|
-
if name
|
326
|
-
begin
|
327
|
-
vm = @dc.vmFolder.find(name)
|
328
|
-
return vm if vm.config && vm.config.uuid == uuid
|
329
|
-
rescue
|
330
|
-
end
|
331
|
-
end
|
332
|
-
|
333
|
-
return find_vm_template(uuid)
|
334
|
-
end
|
335
|
-
|
336
|
-
########################################################################
|
337
|
-
# Searches the associated vmFolder of the DataCenter for the current
|
338
|
-
# connection. Returns a RbVmomi::VIM::VirtualMachine or nil if not found
|
339
|
-
# @param uuid [String] the UUID of the VM or VM Template
|
340
|
-
########################################################################
|
341
|
-
def find_vm_template(uuid)
|
342
|
-
version = @vim.serviceContent.about.version
|
343
|
-
|
344
|
-
found_vm = nil
|
345
|
-
found_vm = @dc.vmFolder.findByUuid(uuid, RbVmomi::VIM::VirtualMachine, @dc)
|
346
|
-
return found_vm if found_vm
|
347
|
-
|
348
|
-
vms = VIClient.get_entities(@dc.vmFolder, 'VirtualMachine')
|
349
|
-
|
350
|
-
return vms.find do |v|
|
351
|
-
begin
|
352
|
-
v.config && v.config.uuid == uuid
|
353
|
-
rescue RbVmomi::VIM::ManagedObjectNotFound
|
354
|
-
false
|
355
|
-
end
|
356
|
-
end
|
357
|
-
end
|
358
|
-
|
359
|
-
########################################################################
|
360
|
-
# Searches the associated vmFolder of the DataCenter for the current
|
361
|
-
# connection. Returns a RbVmomi::VIM::VirtualMachine or nil if not found
|
362
|
-
# @param vm_name [String] the UUID of the VM or VM Template
|
363
|
-
########################################################################
|
364
|
-
def find_vm(vm_name)
|
365
|
-
vms = VIClient.get_entities(@dc.vmFolder, 'VirtualMachine')
|
366
|
-
|
367
|
-
return vms.find do |v|
|
368
|
-
begin
|
369
|
-
v.name == vm_name
|
370
|
-
rescue RbVmomi::VIM::ManagedObjectNotFound
|
371
|
-
false
|
372
|
-
end
|
373
|
-
end
|
374
|
-
end
|
375
|
-
|
376
|
-
########################################################################
|
377
|
-
# Searches the associated datacenter for a particular datastore
|
378
|
-
# @param ds_name [String] name of the datastore
|
379
|
-
# @returns a RbVmomi::VIM::VirtualMachine or nil if not found
|
380
|
-
########################################################################
|
381
|
-
def get_datastore(ds_name)
|
382
|
-
datastores = VIClient.get_entities(@dc.datastoreFolder, 'Datastore')
|
383
|
-
|
384
|
-
storage_pods = VIClient.get_entities(@dc.datastoreFolder, 'StoragePod')
|
385
|
-
storage_pods.each { |sp|
|
386
|
-
datastores << sp #Add StoragePod
|
387
|
-
|
388
|
-
# Add individual datastores under StoragePod
|
389
|
-
storage_pod_datastores = VIClient.get_entities(sp, 'Datastore')
|
390
|
-
if not storage_pod_datastores.empty?
|
391
|
-
datastores.concat(storage_pod_datastores)
|
392
|
-
end
|
393
|
-
}
|
394
|
-
|
395
|
-
ds = datastores.select{|ds| ds.name == ds_name}[0]
|
396
|
-
end
|
397
|
-
|
398
|
-
########################################################################
|
399
|
-
# Builds a hash with the DataCenter / ClusterComputeResource hierarchy
|
400
|
-
# for this VCenter.
|
401
|
-
# @return [Hash] in the form
|
402
|
-
# {dc_name [String] => ClusterComputeResources Names [Array - String]}
|
403
|
-
########################################################################
|
404
|
-
def hierarchy(one_client=nil)
|
405
|
-
vc_hosts = {}
|
406
|
-
|
407
|
-
datacenters = VIClient.get_entities(@root, 'Datacenter')
|
408
|
-
|
409
|
-
hpool = OpenNebula::HostPool.new((one_client||@one))
|
410
|
-
rc = hpool.info
|
411
|
-
|
412
|
-
datacenters.each { |dc|
|
413
|
-
ccrs = VIClient.get_entities(dc.hostFolder, 'ClusterComputeResource')
|
414
|
-
vc_hosts[dc.name] = []
|
415
|
-
ccrs.each { |c|
|
416
|
-
if !hpool["HOST[NAME=\"#{c.name}\"]"]
|
417
|
-
vc_hosts[dc.name] << c.name
|
418
|
-
end
|
419
|
-
}
|
420
|
-
}
|
421
|
-
|
422
|
-
return vc_hosts
|
423
|
-
end
|
424
|
-
|
425
|
-
########################################################################
|
426
|
-
# Builds a hash with the Datacenter / VM Templates for this VCenter
|
427
|
-
# @param one_client [OpenNebula::Client] Use this client instead of @one
|
428
|
-
# @return [Hash] in the form
|
429
|
-
# { dc_name [String] => Templates [Array] }
|
430
|
-
########################################################################
|
431
|
-
def vm_templates(one_client=nil)
|
432
|
-
vm_templates = {}
|
433
|
-
|
434
|
-
tpool = OpenNebula::TemplatePool.new(
|
435
|
-
(one_client||@one), OpenNebula::Pool::INFO_ALL)
|
436
|
-
rc = tpool.info
|
437
|
-
if OpenNebula.is_error?(rc)
|
438
|
-
raise "Error contacting OpenNebula #{rc.message}"
|
439
|
-
end
|
440
|
-
|
441
|
-
datacenters = VIClient.get_entities(@root, 'Datacenter')
|
442
|
-
|
443
|
-
datacenters.each { |dc|
|
444
|
-
vms = get_entities_to_import(dc.vmFolder, 'VirtualMachine')
|
445
|
-
|
446
|
-
tmp = vms.select { |v| v.config && (v.config.template == true) }
|
447
|
-
|
448
|
-
one_tmp = []
|
449
|
-
host_cache = {}
|
450
|
-
ds_cache = {}
|
451
|
-
|
452
|
-
tmp.each { |t|
|
453
|
-
vi_tmp = VCenterVm.new(self, t)
|
454
|
-
|
455
|
-
if !tpool["VMTEMPLATE/TEMPLATE/PUBLIC_CLOUD[\
|
456
|
-
TYPE=\"vcenter\" \
|
457
|
-
and VM_TEMPLATE=\"#{vi_tmp.vm.config.uuid}\"]"]
|
458
|
-
# Check cached objects
|
459
|
-
if !host_cache[vi_tmp.vm.runtime.host.to_s]
|
460
|
-
host_cache[vi_tmp.vm.runtime.host.to_s] =
|
461
|
-
VCenterCachedHost.new vi_tmp.vm.runtime.host
|
462
|
-
end
|
463
|
-
|
464
|
-
if !ds_cache[t.datastore[0].to_s]
|
465
|
-
ds_cache[t.datastore[0].to_s] =
|
466
|
-
VCenterCachedDatastore.new t.datastore[0]
|
467
|
-
end
|
468
|
-
|
469
|
-
host = host_cache[vi_tmp.vm.runtime.host.to_s]
|
470
|
-
ds = ds_cache[t.datastore[0].to_s]
|
471
|
-
|
472
|
-
one_tmp << {
|
473
|
-
:name => "#{vi_tmp.vm.name} - #{host.cluster_name}",
|
474
|
-
:uuid => vi_tmp.vm.config.uuid,
|
475
|
-
:host => host.cluster_name,
|
476
|
-
:one => vi_tmp.to_one(host),
|
477
|
-
:ds => vi_tmp.to_one_ds(host, ds.name),
|
478
|
-
:default_ds => ds.name,
|
479
|
-
:rp => vi_tmp.to_one_rp(host),
|
480
|
-
:vcenter_ref => vi_tmp.vm._ref,
|
481
|
-
:vcenter_name => vi_tmp.vm.name
|
482
|
-
}
|
483
|
-
end
|
484
|
-
}
|
485
|
-
|
486
|
-
vm_templates[dc.name] = one_tmp
|
487
|
-
}
|
488
|
-
|
489
|
-
return vm_templates
|
490
|
-
end
|
491
|
-
|
492
|
-
########################################################################
|
493
|
-
# Builds a hash with the Datacenter / CCR (Distributed)Networks
|
494
|
-
# for this VCenter
|
495
|
-
# @param one_client [OpenNebula::Client] Use this client instead of @one
|
496
|
-
# @return [Hash] in the form
|
497
|
-
# { dc_name [String] => Networks [Array] }
|
498
|
-
########################################################################
|
499
|
-
def vcenter_networks(one_client=nil)
|
500
|
-
vcenter_networks = {}
|
501
|
-
|
502
|
-
vnpool = OpenNebula::VirtualNetworkPool.new(
|
503
|
-
(one_client||@one), OpenNebula::Pool::INFO_ALL)
|
504
|
-
rc = vnpool.info
|
505
|
-
if OpenNebula.is_error?(rc)
|
506
|
-
raise "Error contacting OpenNebula #{rc.message}"
|
507
|
-
end
|
508
|
-
|
509
|
-
datacenters = VIClient.get_entities(@root, 'Datacenter')
|
510
|
-
|
511
|
-
datacenters.each { |dc|
|
512
|
-
networks = VIClient.get_entities(dc.networkFolder, 'Network' )
|
513
|
-
one_nets = []
|
514
|
-
|
515
|
-
networks.each { |n|
|
516
|
-
# Skip those not in cluster
|
517
|
-
next if !n[:host][0]
|
518
|
-
|
519
|
-
# Networks can be in several cluster, create one per cluster
|
520
|
-
net_names = []
|
521
|
-
Array(n[:host]).each{ |host_system|
|
522
|
-
net_name = "#{n.name} - #{host_system.parent.name}"
|
523
|
-
if !net_names.include?(net_name)
|
524
|
-
if !vnpool["VNET[BRIDGE=\"#{n[:name]}\"]/\
|
525
|
-
TEMPLATE[VCENTER_TYPE=\"Port Group\"]"]
|
526
|
-
one_nets << {
|
527
|
-
:name => net_name,
|
528
|
-
:bridge => n.name,
|
529
|
-
:cluster => host_system.parent.name,
|
530
|
-
:type => "Port Group",
|
531
|
-
:one => "NAME = \"#{net_name}\"\n" \
|
532
|
-
"BRIDGE = \"#{n[:name]}\"\n" \
|
533
|
-
"VN_MAD = \"dummy\"\n" \
|
534
|
-
"VCENTER_TYPE = \"Port Group\""
|
535
|
-
}
|
536
|
-
net_names << net_name
|
537
|
-
end
|
538
|
-
end
|
539
|
-
}
|
540
|
-
}
|
541
|
-
|
542
|
-
networks = VIClient.get_entities(dc.networkFolder,
|
543
|
-
'DistributedVirtualPortgroup' )
|
544
|
-
|
545
|
-
networks.each { |n|
|
546
|
-
# Skip those not in cluster
|
547
|
-
next if !n[:host][0]
|
548
|
-
|
549
|
-
# DistributedVirtualPortgroup can be in several cluster,
|
550
|
-
# create one per cluster
|
551
|
-
Array(n[:host][0]).each{ |host_system|
|
552
|
-
net_name = "#{n.name} - #{n[:host][0].parent.name}"
|
553
|
-
|
554
|
-
if !vnpool["VNET[BRIDGE=\"#{n[:name]}\"]/\
|
555
|
-
TEMPLATE[VCENTER_TYPE=\"Distributed Port Group\"]"]
|
556
|
-
vnet_template = "NAME = \"#{net_name}\"\n" \
|
557
|
-
"BRIDGE = \"#{n[:name]}\"\n" \
|
558
|
-
"VN_MAD = \"dummy\"\n" \
|
559
|
-
"VCENTER_TYPE = \"Distributed Port Group\""
|
560
|
-
|
561
|
-
default_pc = n.config.defaultPortConfig
|
562
|
-
|
563
|
-
has_vlan = false
|
564
|
-
vlan_str = ""
|
565
|
-
|
566
|
-
if default_pc.methods.include? :vlan
|
567
|
-
has_vlan = default_pc.vlan.methods.include? :vlanId
|
568
|
-
end
|
569
|
-
|
570
|
-
if has_vlan
|
571
|
-
vlan = n.config.defaultPortConfig.vlan.vlanId
|
572
|
-
|
573
|
-
if vlan != 0
|
574
|
-
if vlan.is_a? Array
|
575
|
-
vlan.each{|v|
|
576
|
-
vlan_str += v.start.to_s + ".." +
|
577
|
-
v.end.to_s + ","
|
578
|
-
}
|
579
|
-
vlan_str.chop!
|
580
|
-
else
|
581
|
-
vlan_str = vlan.to_s
|
582
|
-
end
|
583
|
-
end
|
584
|
-
end
|
585
|
-
|
586
|
-
if !vlan_str.empty?
|
587
|
-
vnet_template << "VLAN_TAGGED_ID=#{vlan_str}\n"
|
588
|
-
end
|
589
|
-
|
590
|
-
one_net = {:name => net_name,
|
591
|
-
:bridge => n.name,
|
592
|
-
:cluster => host_system.parent.name,
|
593
|
-
:type => "Distributed Port Group",
|
594
|
-
:one => vnet_template}
|
595
|
-
|
596
|
-
one_net[:vlan] = vlan_str if !vlan_str.empty?
|
597
|
-
|
598
|
-
one_nets << one_net
|
599
|
-
end
|
600
|
-
}
|
601
|
-
}
|
602
|
-
|
603
|
-
vcenter_networks[dc.name] = one_nets
|
604
|
-
}
|
605
|
-
|
606
|
-
return vcenter_networks
|
607
|
-
end
|
608
|
-
|
609
|
-
|
610
|
-
########################################################################
|
611
|
-
# Builds a hash with the Datacenter / Datastores for this VCenter
|
612
|
-
# @param one_client [OpenNebula::Client] Use this client instead of @one
|
613
|
-
# @return [Hash] in the form
|
614
|
-
# { dc_name [String] => Datastore [Array] of DS templates}
|
615
|
-
########################################################################
|
616
|
-
def vcenter_datastores(one_client=nil)
|
617
|
-
ds_templates = {}
|
618
|
-
|
619
|
-
dspool = OpenNebula::DatastorePool.new(
|
620
|
-
(one_client||@one))
|
621
|
-
rc = dspool.info
|
622
|
-
if OpenNebula.is_error?(rc)
|
623
|
-
raise "Error contacting OpenNebula #{rc.message}"
|
624
|
-
end
|
625
|
-
|
626
|
-
hpool = OpenNebula::HostPool.new(
|
627
|
-
(one_client||@one))
|
628
|
-
rc = hpool.info
|
629
|
-
if OpenNebula.is_error?(rc)
|
630
|
-
raise "Error contacting OpenNebula #{rc.message}"
|
631
|
-
end
|
632
|
-
|
633
|
-
datacenters = VIClient.get_entities(@root, 'Datacenter')
|
634
|
-
|
635
|
-
datacenters.each { |dc|
|
636
|
-
one_tmp = []
|
637
|
-
datastores = VIClient.get_entities(dc.datastoreFolder, 'Datastore')
|
638
|
-
|
639
|
-
storage_pods = VIClient.get_entities(dc.datastoreFolder, 'StoragePod')
|
640
|
-
storage_pods.each { |sp|
|
641
|
-
datastores << sp # Add StoragePod
|
642
|
-
storage_pod_datastores = VIClient.get_entities(sp, 'Datastore')
|
643
|
-
if not storage_pod_datastores.empty?
|
644
|
-
datastores.concat(storage_pod_datastores)
|
645
|
-
end
|
646
|
-
}
|
647
|
-
|
648
|
-
datastores.each { |ds|
|
649
|
-
next if !ds.is_a? RbVmomi::VIM::Datastore and !ds.is_a? RbVmomi::VIM::StoragePod
|
650
|
-
# Find the Cluster from which to access this ds
|
651
|
-
|
652
|
-
cluster_name = ""
|
653
|
-
if ds.is_a? RbVmomi::VIM::StoragePod
|
654
|
-
storage_pod_datastores = VIClient.get_entities(ds, 'Datastore')
|
655
|
-
storage_pod_datastores.each { |sp|
|
656
|
-
next if !sp.is_a? RbVmomi::VIM::Datastore
|
657
|
-
# Find the Cluster from which to access this ds
|
658
|
-
next if !sp.host[0]
|
659
|
-
cluster_name = sp.host[0].key.parent.name
|
660
|
-
break
|
661
|
-
}
|
662
|
-
else
|
663
|
-
next if !ds.host[0]
|
664
|
-
cluster_name = ds.host[0].key.parent.name
|
665
|
-
end
|
666
|
-
|
667
|
-
if !dspool["DATASTORE[NAME=\"#{ds.name}\"]"] and
|
668
|
-
hpool["HOST[NAME=\"#{cluster_name}\"]"]
|
669
|
-
if ds.is_a? RbVmomi::VIM::StoragePod
|
670
|
-
one_tmp << {
|
671
|
-
:name => "#{ds.name}",
|
672
|
-
:total_mb => ((ds.summary.capacity.to_i / 1024) / 1024),
|
673
|
-
:free_mb => ((ds.summary.freeSpace.to_i / 1024) / 1024),
|
674
|
-
:cluster => cluster_name,
|
675
|
-
:one => "NAME=#{ds.name}\n"\
|
676
|
-
"TM_MAD=vcenter\n"\
|
677
|
-
"VCENTER_CLUSTER=#{cluster_name}\n"\
|
678
|
-
"TYPE=SYSTEM_DS\n" # StoragePods must be set as SYSTEM_DS
|
679
|
-
}
|
680
|
-
else
|
681
|
-
one_tmp << {
|
682
|
-
:name => "#{ds.name}",
|
683
|
-
:total_mb => ((ds.summary.capacity.to_i / 1024) / 1024),
|
684
|
-
:free_mb => ((ds.summary.freeSpace.to_i / 1024) / 1024),
|
685
|
-
:cluster => cluster_name,
|
686
|
-
:one => "NAME=#{ds.name}\n"\
|
687
|
-
"DS_MAD=vcenter\n"\
|
688
|
-
"TM_MAD=vcenter\n"\
|
689
|
-
"VCENTER_CLUSTER=#{cluster_name}\n"
|
690
|
-
}
|
691
|
-
end
|
692
|
-
end
|
693
|
-
}
|
694
|
-
ds_templates[dc.name] = one_tmp
|
695
|
-
}
|
696
|
-
|
697
|
-
return ds_templates
|
698
|
-
end
|
699
|
-
|
700
|
-
#############################################################################
|
701
|
-
# Builds a hash with the Images for a particular datastore
|
702
|
-
# @param one_client [OpenNebula::Client] Use this client instead of @one
|
703
|
-
# @return [Array] of image templates
|
704
|
-
############################################################################
|
705
|
-
def vcenter_images(ds_name, one_client=nil)
|
706
|
-
img_types = ["FloppyImageFileInfo",
|
707
|
-
"IsoImageFileInfo",
|
708
|
-
"VmDiskFileInfo"]
|
709
|
-
|
710
|
-
img_templates = []
|
711
|
-
|
712
|
-
ipool = OpenNebula::ImagePool.new((one_client||@one))
|
713
|
-
rc = ipool.info
|
714
|
-
if OpenNebula.is_error?(rc)
|
715
|
-
raise "Error contacting OpenNebula #{rc.message}"
|
716
|
-
end
|
717
|
-
|
718
|
-
dspool = OpenNebula::DatastorePool.new((one_client||@one))
|
719
|
-
rc = dspool.info
|
720
|
-
if OpenNebula.is_error?(rc)
|
721
|
-
raise "Error contacting OpenNebula #{rc.message}"
|
722
|
-
end
|
723
|
-
|
724
|
-
ds_id = dspool["DATASTORE[NAME=\"#{ds_name}\"]/ID"]
|
725
|
-
|
726
|
-
if !ds_id
|
727
|
-
raise "Datastore not found in OpenNebula. Please import"\
|
728
|
-
" it first and try again"
|
729
|
-
end
|
730
|
-
|
731
|
-
datacenters = VIClient.get_entities(@root, 'Datacenter')
|
732
|
-
|
733
|
-
datacenters.each { |dc|
|
734
|
-
|
735
|
-
# Find datastore within datacenter
|
736
|
-
datastores = VIClient.get_entities(dc.datastoreFolder, 'Datastore')
|
737
|
-
|
738
|
-
storage_pods = VIClient.get_entities(dc.datastoreFolder, 'StoragePod')
|
739
|
-
storage_pods.each { |sp|
|
740
|
-
storage_pod_datastores = VIClient.get_entities(sp, 'Datastore')
|
741
|
-
if not storage_pod_datastores.empty?
|
742
|
-
datastores.concat(storage_pod_datastores)
|
743
|
-
end
|
744
|
-
}
|
745
|
-
|
746
|
-
ds = datastores.select{|ds| ds.name == ds_name}[0]
|
747
|
-
next if !ds
|
748
|
-
|
749
|
-
# Cannot import from StoragePod directly
|
750
|
-
if ds.is_a? RbVmomi::VIM::StoragePod
|
751
|
-
raise "OpenNebula cannot import images from a StoragePod. Please import"\
|
752
|
-
" it from the datastore which is a member of the StorageDRS cluster"
|
753
|
-
end
|
754
|
-
|
755
|
-
# Create Search Spec
|
756
|
-
spec = RbVmomi::VIM::HostDatastoreBrowserSearchSpec.new
|
757
|
-
spec.query = [RbVmomi::VIM::VmDiskFileQuery.new,
|
758
|
-
RbVmomi::VIM::IsoImageFileQuery.new]
|
759
|
-
spec.details = RbVmomi::VIM::FileQueryFlags(:fileOwner => true,
|
760
|
-
:fileSize => true,
|
761
|
-
:fileType => true,
|
762
|
-
:modification => true)
|
763
|
-
spec.matchPattern=[]
|
764
|
-
|
765
|
-
search_params = {'datastorePath' => "[#{ds.name}]",
|
766
|
-
'searchSpec' => spec}
|
767
|
-
|
768
|
-
# Perform search task and return results
|
769
|
-
search_task=ds.browser.SearchDatastoreSubFolders_Task(search_params)
|
770
|
-
search_task.wait_for_completion
|
771
|
-
|
772
|
-
search_task.info.result.each { |image|
|
773
|
-
folderpath = ""
|
774
|
-
if image.folderPath[-1] != "]"
|
775
|
-
folderpath = image.folderPath.sub(/^\[#{ds_name}\] /, "")
|
776
|
-
end
|
777
|
-
|
778
|
-
image = image.file[0]
|
779
|
-
|
780
|
-
# Skip not relevant files
|
781
|
-
next if !img_types.include? image.class.to_s
|
782
|
-
|
783
|
-
image_path = folderpath + image.path
|
784
|
-
|
785
|
-
image_name = File.basename(image.path).reverse.sub("kdmv.","").reverse
|
786
|
-
|
787
|
-
if !ipool["IMAGE[NAME=\"#{image_name} - #{ds_name}\"]"]
|
788
|
-
img_templates << {
|
789
|
-
:name => "#{image_name} - #{ds_name}",
|
790
|
-
:path => image_path,
|
791
|
-
:size => (image.fileSize / 1024).to_s,
|
792
|
-
:type => image.class.to_s,
|
793
|
-
:dsid => ds_id,
|
794
|
-
:one => "NAME=\"#{image_name} - #{ds_name}\"\n"\
|
795
|
-
"PATH=\"vcenter://#{image_path}\"\n"\
|
796
|
-
"PERSISTENT=\"YES\"\n"\
|
797
|
-
}
|
798
|
-
|
799
|
-
if image.class.to_s == "VmDiskFileInfo"
|
800
|
-
img_templates[-1][:one] += "TYPE=\"OS\"\n"
|
801
|
-
else
|
802
|
-
img_templates[-1][:one] += "TYPE=\"CDROM\"\n"
|
803
|
-
end
|
804
|
-
|
805
|
-
if image.class.to_s == "VmDiskFileInfo" &&
|
806
|
-
!image.diskType.nil?
|
807
|
-
img_templates[-1][:one] += "DISK_TYPE=#{image.diskType}\n"
|
808
|
-
end
|
809
|
-
end
|
810
|
-
}
|
811
|
-
}
|
812
|
-
|
813
|
-
return img_templates
|
814
|
-
end
|
815
|
-
|
816
|
-
def self.translate_hostname(hostname)
|
817
|
-
host_pool = OpenNebula::HostPool.new(::OpenNebula::Client.new())
|
818
|
-
rc = host_pool.info
|
819
|
-
raise "Could not find host #{hostname}" if OpenNebula.is_error?(rc)
|
820
|
-
|
821
|
-
host = host_pool.select {|host_element| host_element.name==hostname }
|
822
|
-
return host.first.id
|
823
|
-
end
|
824
|
-
|
825
|
-
def self.find_ds_name(ds_id)
|
826
|
-
ds = OpenNebula::Datastore.new_with_id(ds_id)
|
827
|
-
rc = ds.info
|
828
|
-
raise "Could not find datastore #{ds_id}" if OpenNebula.is_error?(rc)
|
829
|
-
|
830
|
-
return ds.name
|
831
|
-
end
|
832
|
-
|
833
|
-
############################################################################
|
834
|
-
# Initialize an OpenNebula connection with the default ONE_AUTH
|
835
|
-
############################################################################
|
836
|
-
def initialize_one(one_client=nil)
|
837
|
-
begin
|
838
|
-
if one_client
|
839
|
-
@one = one_client
|
840
|
-
else
|
841
|
-
@one = ::OpenNebula::Client.new()
|
842
|
-
end
|
843
|
-
|
844
|
-
system = ::OpenNebula::System.new(@one)
|
845
|
-
|
846
|
-
config = system.get_configuration()
|
847
|
-
|
848
|
-
if ::OpenNebula.is_error?(config)
|
849
|
-
raise "Error getting oned configuration : #{config.message}"
|
850
|
-
end
|
851
|
-
|
852
|
-
@token = config["ONE_KEY"]
|
853
|
-
rescue Exception => e
|
854
|
-
raise "Error initializing OpenNebula client: #{e.message}"
|
855
|
-
end
|
856
|
-
end
|
857
|
-
|
858
|
-
############################################################################
|
859
|
-
# Initialize a connection with vCenter. Options
|
860
|
-
# @param options[Hash] with:
|
861
|
-
# :user => The vcenter user
|
862
|
-
# :password => Password for the user
|
863
|
-
# :host => vCenter hostname or IP
|
864
|
-
# :insecure => SSL (optional, defaults to true)
|
865
|
-
############################################################################
|
866
|
-
def initialize_vim(user_opts={})
|
867
|
-
opts = {
|
868
|
-
:insecure => true
|
869
|
-
}.merge(user_opts)
|
870
|
-
|
871
|
-
@user = opts[:user]
|
872
|
-
@pass = opts[:password]
|
873
|
-
@host = opts[:host]
|
874
|
-
|
875
|
-
begin
|
876
|
-
@vim = RbVmomi::VIM.connect(opts)
|
877
|
-
@root = @vim.root
|
878
|
-
@vdm = @vim.serviceContent.virtualDiskManager
|
879
|
-
@file_manager = @vim.serviceContent.fileManager
|
880
|
-
rescue Exception => e
|
881
|
-
raise "Error connecting to #{@host}: #{e.message}"
|
882
|
-
end
|
883
|
-
end
|
884
|
-
|
885
|
-
######################### Datastore Operations #############################
|
886
|
-
|
887
|
-
############################################################################
|
888
|
-
# Retrieve size for a VirtualDisk in a particular datastore
|
889
|
-
# @param ds_name [String] name of the datastore
|
890
|
-
# @param img_str [String] path to the VirtualDisk
|
891
|
-
# @return size of the file in Kb
|
892
|
-
############################################################################
|
893
|
-
def stat(ds_name, img_str)
|
894
|
-
img_path = File.dirname img_str
|
895
|
-
img_name = File.basename img_str
|
896
|
-
|
897
|
-
# Find datastore within datacenter
|
898
|
-
ds = get_datastore(ds_name)
|
899
|
-
|
900
|
-
# Create Search Spec
|
901
|
-
spec = RbVmomi::VIM::HostDatastoreBrowserSearchSpec.new
|
902
|
-
spec.query = [RbVmomi::VIM::VmDiskFileQuery.new,
|
903
|
-
RbVmomi::VIM::IsoImageFileQuery.new]
|
904
|
-
spec.details = RbVmomi::VIM::FileQueryFlags(:fileOwner => true,
|
905
|
-
:fileSize => true,
|
906
|
-
:fileType => true,
|
907
|
-
:modification => true)
|
908
|
-
spec.matchPattern=[img_name]
|
909
|
-
|
910
|
-
search_params = {'datastorePath' => "[#{ds_name}] #{img_path}",
|
911
|
-
'searchSpec' => spec}
|
912
|
-
|
913
|
-
# Perform search task and return results
|
914
|
-
search_task=ds.browser.SearchDatastoreSubFolders_Task(search_params)
|
915
|
-
search_task.wait_for_completion
|
916
|
-
(search_task.info.result[0].file[0].fileSize / 1024) / 1024
|
917
|
-
end
|
918
|
-
|
919
|
-
############################################################################
|
920
|
-
# Returns Datastore information
|
921
|
-
# @param ds_name [String] name of the datastore
|
922
|
-
# @return [String] monitor information of the DS
|
923
|
-
############################################################################
|
924
|
-
def monitor_ds(ds_name)
|
925
|
-
# Find datastore within datacenter
|
926
|
-
ds = get_datastore(ds_name)
|
927
|
-
|
928
|
-
total_mb = (ds.summary.capacity.to_i / 1024) / 1024
|
929
|
-
free_mb = (ds.summary.freeSpace.to_i / 1024) / 1024
|
930
|
-
used_mb = total_mb - free_mb
|
931
|
-
|
932
|
-
if ds.is_a? RbVmomi::VIM::Datastore
|
933
|
-
ds_type = ds.summary.type
|
934
|
-
end
|
935
|
-
|
936
|
-
"USED_MB=#{used_mb}\nFREE_MB=#{free_mb} \nTOTAL_MB=#{total_mb}"
|
937
|
-
end
|
938
|
-
|
939
|
-
############################################################################
|
940
|
-
# Copy a VirtualDisk
|
941
|
-
# @param ds_name [String] name of the datastore
|
942
|
-
# @param img_str [String] path to the VirtualDisk
|
943
|
-
############################################################################
|
944
|
-
def copy_virtual_disk(source_path, source_ds, target_path, target_ds=nil)
|
945
|
-
target_ds = source_ds if target_ds.nil?
|
946
|
-
|
947
|
-
copy_params= {:sourceName => "[#{source_ds}] #{source_path}",
|
948
|
-
:sourceDatacenter => @dc,
|
949
|
-
:destName => "[#{target_ds}] #{target_path}"}
|
950
|
-
|
951
|
-
@vdm.CopyVirtualDisk_Task(copy_params).wait_for_completion
|
952
|
-
|
953
|
-
target_path
|
954
|
-
end
|
955
|
-
|
956
|
-
############################################################################
|
957
|
-
# Create a VirtualDisk
|
958
|
-
# @param img_name [String] name of the image
|
959
|
-
# @param ds_name [String] name of the datastore on which the VD will be
|
960
|
-
# created
|
961
|
-
# @param size [String] size of the new image in MB
|
962
|
-
# @param adapter_type [String] as described in
|
963
|
-
# http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.wssdk.apiref.doc/vim.VirtualDiskManager.VirtualDiskAdapterType.html
|
964
|
-
# @param disk_type [String] as described in
|
965
|
-
# http://pubs.vmware.com/vsphere-60/index.jsp?topic=%2Fcom.vmware.wssdk.apiref.doc%2Fvim.VirtualDiskManager.VirtualDiskType.html
|
966
|
-
# @return name of the final image
|
967
|
-
############################################################################
|
968
|
-
def create_virtual_disk(img_name, ds_name, size, adapter_type, disk_type)
|
969
|
-
vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec(
|
970
|
-
:adapterType => adapter_type,
|
971
|
-
:capacityKb => size.to_i*1024,
|
972
|
-
:diskType => disk_type
|
973
|
-
)
|
974
|
-
|
975
|
-
@vdm.CreateVirtualDisk_Task(
|
976
|
-
:datacenter => @dc,
|
977
|
-
:name => "[#{ds_name}] #{img_name}.vmdk",
|
978
|
-
:spec => vmdk_spec
|
979
|
-
).wait_for_completion
|
980
|
-
|
981
|
-
"#{img_name}.vmdk"
|
982
|
-
end
|
983
|
-
|
984
|
-
############################################################################
|
985
|
-
# Delete a VirtualDisk
|
986
|
-
# @param img_name [String] name of the image
|
987
|
-
# @param ds_name [String] name of the datastore where the VD resides
|
988
|
-
############################################################################
|
989
|
-
def delete_virtual_disk(img_name, ds_name)
|
990
|
-
@vdm.DeleteVirtualDisk_Task(
|
991
|
-
name: "[#{ds_name}] #{img_name}",
|
992
|
-
datacenter: @dc
|
993
|
-
).wait_for_completion
|
994
|
-
end
|
995
|
-
|
996
|
-
############################################################################
|
997
|
-
# Delete a VirtualDisk
|
998
|
-
# @param directory [String] name of the new directory
|
999
|
-
# @param ds_name [String] name of the datastore where to create the dir
|
1000
|
-
############################################################################
|
1001
|
-
def create_directory(directory, ds_name)
|
1002
|
-
begin
|
1003
|
-
path = "[#{ds_name}] #{directory}"
|
1004
|
-
@file_manager.MakeDirectory(:name => path,
|
1005
|
-
:datacenter => @dc,
|
1006
|
-
:createParentDirectories => true)
|
1007
|
-
rescue RbVmomi::VIM::FileAlreadyExists => e
|
1008
|
-
end
|
1009
|
-
end
|
1010
|
-
|
1011
|
-
############################################################################
|
1012
|
-
# Silences standard output and error
|
1013
|
-
############################################################################
|
1014
|
-
def self.in_silence
|
1015
|
-
begin
|
1016
|
-
orig_stderr = $stderr.clone
|
1017
|
-
orig_stdout = $stdout.clone
|
1018
|
-
$stderr.reopen File.new('/dev/null', 'w')
|
1019
|
-
$stdout.reopen File.new('/dev/null', 'w')
|
1020
|
-
retval = yield
|
1021
|
-
rescue Exception => e
|
1022
|
-
$stdout.reopen orig_stdout
|
1023
|
-
$stderr.reopen orig_stderr
|
1024
|
-
raise e
|
1025
|
-
ensure
|
1026
|
-
$stdout.reopen orig_stdout
|
1027
|
-
$stderr.reopen orig_stderr
|
1028
|
-
end
|
1029
|
-
retval
|
1030
|
-
end
|
1031
|
-
|
1032
|
-
############################################################################
|
1033
|
-
# Silences standard output and error
|
1034
|
-
############################################################################
|
1035
|
-
def self.in_stderr_silence
|
1036
|
-
begin
|
1037
|
-
orig_stderr = $stderr.clone
|
1038
|
-
$stderr.reopen File.new('/dev/null', 'w')
|
1039
|
-
retval = yield
|
1040
|
-
rescue Exception => e
|
1041
|
-
$stderr.reopen orig_stderr
|
1042
|
-
raise e
|
1043
|
-
ensure
|
1044
|
-
$stderr.reopen orig_stderr
|
1045
|
-
end
|
1046
|
-
retval
|
1047
|
-
end
|
1048
|
-
end
|
1049
|
-
|
1050
|
-
################################################################################
|
1051
|
-
# Cached Classes to speed up import and monitoring
|
1052
|
-
################################################################################
|
1053
|
-
class VCenterCachedHost
|
1054
|
-
|
1055
|
-
def initialize(rbVmomiHost)
|
1056
|
-
@host = rbVmomiHost
|
1057
|
-
@attributes = Hash.new
|
1058
|
-
end
|
1059
|
-
|
1060
|
-
def name
|
1061
|
-
if !@attributes['name']
|
1062
|
-
@attributes['name']=@host.parent.name
|
1063
|
-
end
|
1064
|
-
@attributes['name']
|
1065
|
-
end
|
1066
|
-
|
1067
|
-
def cluster_name
|
1068
|
-
if !@attributes['cluster_name']
|
1069
|
-
@attributes['cluster_name']=@host.parent.name
|
1070
|
-
end
|
1071
|
-
@attributes['cluster_name']
|
1072
|
-
end
|
1073
|
-
|
1074
|
-
def ds_list
|
1075
|
-
if !@attributes['ds_list']
|
1076
|
-
@attributes['ds_list']=""
|
1077
|
-
|
1078
|
-
datacenter = @host.parent
|
1079
|
-
while !datacenter.is_a? RbVmomi::VIM::Datacenter
|
1080
|
-
datacenter = datacenter.parent
|
1081
|
-
end
|
1082
|
-
|
1083
|
-
datastores=VIClient.get_entities(
|
1084
|
-
datacenter.datastoreFolder,
|
1085
|
-
'Datastore')
|
1086
|
-
|
1087
|
-
storage_pods = VIClient.get_entities(datacenter.datastoreFolder,
|
1088
|
-
'StoragePod')
|
1089
|
-
storage_pods.each { |sp|
|
1090
|
-
datastores << sp # Add Storage Pod
|
1091
|
-
storage_pod_datastores = VIClient.get_entities(sp, 'Datastore')
|
1092
|
-
if not storage_pod_datastores.empty?
|
1093
|
-
datastores.concat(storage_pod_datastores)
|
1094
|
-
end
|
1095
|
-
}
|
1096
|
-
|
1097
|
-
datastores.each { |ds|
|
1098
|
-
@attributes['ds_list'] += ds.name + ","
|
1099
|
-
}
|
1100
|
-
@attributes['ds_list']=@attributes['ds_list'][0..-2]
|
1101
|
-
end
|
1102
|
-
@attributes['ds_list']
|
1103
|
-
end
|
1104
|
-
|
1105
|
-
def rp_list
|
1106
|
-
if !@attributes['rp_list']
|
1107
|
-
@attributes['rp_list']=""
|
1108
|
-
@host.parent.resourcePool.resourcePool.each{|rp|
|
1109
|
-
@attributes['rp_list'] += get_child_rp_names(rp, "")
|
1110
|
-
}
|
1111
|
-
@attributes['rp_list']=@attributes['rp_list'][0..-2]
|
1112
|
-
end
|
1113
|
-
@attributes['rp_list']
|
1114
|
-
end
|
1115
|
-
|
1116
|
-
def get_child_rp_names(rp, parent_prefix)
|
1117
|
-
rp_str = ""
|
1118
|
-
|
1119
|
-
current_rp = (parent_prefix.empty? ? "" : parent_prefix + "/")
|
1120
|
-
current_rp += rp.name
|
1121
|
-
|
1122
|
-
if rp.resourcePool.size != 0
|
1123
|
-
rp.resourcePool.each{|child_rp|
|
1124
|
-
rp_str += get_child_rp_names(child_rp, current_rp)
|
1125
|
-
}
|
1126
|
-
end
|
1127
|
-
|
1128
|
-
rp_str += current_rp + ","
|
1129
|
-
|
1130
|
-
return rp_str
|
1131
|
-
end
|
1132
|
-
|
1133
|
-
def cpumhz
|
1134
|
-
if !@attributes['cpumhz']
|
1135
|
-
@attributes['cpumhz']=@host.summary.hardware.cpuMhz.to_f
|
1136
|
-
end
|
1137
|
-
@attributes['cpumhz']
|
1138
|
-
end
|
1139
|
-
|
1140
|
-
end
|
1141
|
-
|
1142
|
-
class VCenterCachedDatastore
|
47
|
+
# ---------------------------------------------------------------------------- #
|
48
|
+
# vCenter Library #
|
49
|
+
# ---------------------------------------------------------------------------- #
|
1143
50
|
|
1144
|
-
|
1145
|
-
|
1146
|
-
|
1147
|
-
|
51
|
+
require 'memoize'
|
52
|
+
require 'vi_client'
|
53
|
+
require 'vi_helper'
|
54
|
+
require 'datacenter'
|
55
|
+
require 'host'
|
56
|
+
require 'datastore'
|
57
|
+
require 'virtual_machine'
|
58
|
+
require 'network'
|
59
|
+
require 'file_helper'
|
60
|
+
require 'importer'
|
1148
61
|
|
1149
|
-
|
1150
|
-
|
1151
|
-
|
1152
|
-
end
|
1153
|
-
@attributes['name']
|
1154
|
-
end
|
62
|
+
# ---------------------------------------------------------------------------- #
|
63
|
+
# Helper functions #
|
64
|
+
# ---------------------------------------------------------------------------- #
|
1155
65
|
|
66
|
+
def error_message(message)
|
67
|
+
error_str = "ERROR MESSAGE --8<------\n"
|
68
|
+
error_str << message
|
69
|
+
error_str << "\nERROR MESSAGE ------>8--"
|
1156
70
|
|
71
|
+
return error_str
|
1157
72
|
end
|
1158
73
|
|
1159
|
-
|
1160
|
-
|
1161
|
-
|
1162
|
-
|
1163
|
-
################################################################################
|
1164
|
-
class VCenterHost < ::OpenNebula::Host
|
1165
|
-
attr_reader :vc_client, :vc_root, :cluster, :host, :client
|
1166
|
-
|
1167
|
-
############################################################################
|
1168
|
-
# Initialize the VCenterHost by looking for the associated objects of the
|
1169
|
-
# VIM hierarchy
|
1170
|
-
# client [VIClient] to interact with the associated vCenter
|
1171
|
-
############################################################################
|
1172
|
-
def initialize(client)
|
1173
|
-
@client = client
|
1174
|
-
@cluster = client.cluster
|
1175
|
-
|
1176
|
-
@resource_pools = client.resource_pool
|
1177
|
-
end
|
1178
|
-
|
1179
|
-
########################################################################
|
1180
|
-
# Creates an OpenNebula host representing a cluster in this VCenter
|
1181
|
-
# @param cluster_name[String] the name of the cluster in the vcenter
|
1182
|
-
# @param client [VIClient] to create the host
|
1183
|
-
# @return In case of success [0, host_id] or [-1, error_msg]
|
1184
|
-
########################################################################
|
1185
|
-
def self.to_one(cluster_name, client)
|
1186
|
-
one_host = ::OpenNebula::Host.new(::OpenNebula::Host.build_xml,
|
1187
|
-
client.one)
|
1188
|
-
|
1189
|
-
rc = one_host.allocate(cluster_name, 'vcenter', 'vcenter',
|
1190
|
-
::OpenNebula::ClusterPool::NONE_CLUSTER_ID)
|
1191
|
-
|
1192
|
-
return -1, rc.message if ::OpenNebula.is_error?(rc)
|
1193
|
-
|
1194
|
-
template = "VCENTER_HOST=\"#{client.host}\"\n"\
|
1195
|
-
"VCENTER_PASSWORD=\"#{client.pass}\"\n"\
|
1196
|
-
"VCENTER_USER=\"#{client.user}\"\n"
|
1197
|
-
|
1198
|
-
rc = one_host.update(template, false)
|
1199
|
-
|
1200
|
-
if ::OpenNebula.is_error?(rc)
|
1201
|
-
error = rc.message
|
1202
|
-
|
1203
|
-
rc = one_host.delete
|
1204
|
-
|
1205
|
-
if ::OpenNebula.is_error?(rc)
|
1206
|
-
error << ". Host #{cluster_name} could not be"\
|
1207
|
-
" deleted: #{rc.message}."
|
1208
|
-
end
|
1209
|
-
|
1210
|
-
return -1, error
|
1211
|
-
end
|
1212
|
-
|
1213
|
-
return 0, one_host.id
|
74
|
+
def check_valid(parameter, label)
|
75
|
+
if parameter.nil? || parameter.empty?
|
76
|
+
STDERR.puts error_message("The parameter '#{label}' is required for this action.")
|
77
|
+
exit -1
|
1214
78
|
end
|
1215
|
-
|
1216
|
-
############################################################################
|
1217
|
-
# Generate an OpenNebula monitor string for this host. Reference:
|
1218
|
-
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/Reference
|
1219
|
-
# Guide/vim.ComputeResource.Summary.html
|
1220
|
-
# - effectiveCpu: Effective CPU resources (in MHz) available to run
|
1221
|
-
# VMs. This is the aggregated from all running hosts excluding hosts in
|
1222
|
-
# maintenance mode or unresponsive are not counted.
|
1223
|
-
# - effectiveMemory: Effective memory resources (in MB) available to run
|
1224
|
-
# VMs. Equivalente to effectiveCpu.
|
1225
|
-
# - numCpuCores: Number of physical CPU cores.
|
1226
|
-
# - numEffectiveHosts: Total number of effective hosts.
|
1227
|
-
# - numHosts:Total number of hosts.
|
1228
|
-
# - totalCpu: Aggregated CPU resources of all hosts, in MHz.
|
1229
|
-
# - totalMemory: Aggregated memory resources of all hosts, in bytes.
|
1230
|
-
############################################################################
|
1231
|
-
def monitor_cluster
|
1232
|
-
#Load the host systems
|
1233
|
-
summary = @cluster.summary
|
1234
|
-
|
1235
|
-
mhz_core = summary.totalCpu.to_f / summary.numCpuCores.to_f
|
1236
|
-
eff_core = summary.effectiveCpu.to_f / mhz_core
|
1237
|
-
|
1238
|
-
free_cpu = sprintf('%.2f', eff_core * 100).to_f
|
1239
|
-
total_cpu = summary.numCpuCores.to_f * 100
|
1240
|
-
used_cpu = sprintf('%.2f', total_cpu - free_cpu).to_f
|
1241
|
-
|
1242
|
-
total_mem = summary.totalMemory.to_i / 1024
|
1243
|
-
free_mem = summary.effectiveMemory.to_i * 1024
|
1244
|
-
|
1245
|
-
str_info = ""
|
1246
|
-
|
1247
|
-
# System
|
1248
|
-
str_info << "HYPERVISOR=vcenter\n"
|
1249
|
-
str_info << "PUBLIC_CLOUD=YES\n"
|
1250
|
-
str_info << "TOTALHOST=" << summary.numHosts.to_s << "\n"
|
1251
|
-
str_info << "AVAILHOST=" << summary.numEffectiveHosts.to_s << "\n"
|
1252
|
-
|
1253
|
-
# CPU
|
1254
|
-
str_info << "CPUSPEED=" << mhz_core.to_s << "\n"
|
1255
|
-
str_info << "TOTALCPU=" << total_cpu.to_s << "\n"
|
1256
|
-
str_info << "USEDCPU=" << used_cpu.to_s << "\n"
|
1257
|
-
str_info << "FREECPU=" << free_cpu.to_s << "\n"
|
1258
|
-
|
1259
|
-
# Memory
|
1260
|
-
str_info << "TOTALMEMORY=" << total_mem.to_s << "\n"
|
1261
|
-
str_info << "FREEMEMORY=" << free_mem.to_s << "\n"
|
1262
|
-
str_info << "USEDMEMORY=" << (total_mem - free_mem).to_s
|
1263
|
-
|
1264
|
-
str_info << monitor_resource_pools(@cluster.resourcePool, "", mhz_core)
|
1265
|
-
end
|
1266
|
-
|
1267
|
-
############################################################################
|
1268
|
-
# Generate an OpenNebula monitor string for all resource pools of a cluster
|
1269
|
-
# Reference:
|
1270
|
-
# http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.wssdk.apiref.doc
|
1271
|
-
# /vim.ResourcePool.html
|
1272
|
-
############################################################################
|
1273
|
-
def monitor_resource_pools(parent_rp, parent_prefix, mhz_core)
|
1274
|
-
return "" if parent_rp.resourcePool.size == 0
|
1275
|
-
|
1276
|
-
rp_info = ""
|
1277
|
-
|
1278
|
-
parent_rp.resourcePool.each{|rp|
|
1279
|
-
rpcpu = rp.config.cpuAllocation
|
1280
|
-
rpmem = rp.config.memoryAllocation
|
1281
|
-
# CPU
|
1282
|
-
cpu_expandable = rpcpu.expandableReservation ? "YES" : "NO"
|
1283
|
-
cpu_limit = rpcpu.limit == "-1" ? "UNLIMITED" : rpcpu.limit
|
1284
|
-
cpu_reservation = rpcpu.reservation
|
1285
|
-
cpu_num = rpcpu.reservation.to_f / mhz_core
|
1286
|
-
cpu_shares_level = rpcpu.shares.level
|
1287
|
-
cpu_shares = rpcpu.shares.shares
|
1288
|
-
|
1289
|
-
# MEMORY
|
1290
|
-
mem_expandable = rpmem.expandableReservation ? "YES" : "NO"
|
1291
|
-
mem_limit = rpmem.limit == "-1" ? "UNLIMITED" : rpmem.limit
|
1292
|
-
mem_reservation = rpmem.reservation.to_f
|
1293
|
-
mem_shares_level = rpmem.shares.level
|
1294
|
-
mem_shares = rpmem.shares.shares
|
1295
|
-
|
1296
|
-
rp_name = (parent_prefix.empty? ? "" : parent_prefix + "/")
|
1297
|
-
rp_name += rp.name
|
1298
|
-
|
1299
|
-
rp_info << "\nRESOURCE_POOL = ["
|
1300
|
-
rp_info << "NAME=\"#{rp_name}\","
|
1301
|
-
rp_info << "CPU_EXPANDABLE=#{cpu_expandable},"
|
1302
|
-
rp_info << "CPU_LIMIT=#{cpu_limit},"
|
1303
|
-
rp_info << "CPU_RESERVATION=#{cpu_reservation},"
|
1304
|
-
rp_info << "CPU_RESERVATION_NUM_CORES=#{cpu_num},"
|
1305
|
-
rp_info << "CPU_SHARES=#{cpu_shares},"
|
1306
|
-
rp_info << "CPU_SHARES_LEVEL=#{cpu_shares_level},"
|
1307
|
-
rp_info << "MEM_EXPANDABLE=#{mem_expandable},"
|
1308
|
-
rp_info << "MEM_LIMIT=#{mem_limit},"
|
1309
|
-
rp_info << "MEM_RESERVATION=#{mem_reservation},"
|
1310
|
-
rp_info << "MEM_SHARES=#{mem_shares},"
|
1311
|
-
rp_info << "MEM_SHARES_LEVEL=#{mem_shares_level}"
|
1312
|
-
rp_info << "]"
|
1313
|
-
|
1314
|
-
if rp.resourcePool.size != 0
|
1315
|
-
rp_info << monitor_resource_pools(rp, rp_name, mhz_core)
|
1316
|
-
end
|
1317
|
-
}
|
1318
|
-
|
1319
|
-
return rp_info
|
1320
|
-
end
|
1321
|
-
|
1322
|
-
############################################################################
|
1323
|
-
# Generate a template with information for each ESX Host. Reference:
|
1324
|
-
# http://pubs.vmware.com/vi-sdk/visdk250/ReferenceGuide/vim.HostSystem.html
|
1325
|
-
# - Summary: Basic information about the host, including connection state
|
1326
|
-
# - hardware: Hardware configuration of the host. This might not be
|
1327
|
-
# available for a disconnected host.
|
1328
|
-
# - quickStats: Basic host statistics.
|
1329
|
-
############################################################################
|
1330
|
-
def monitor_host_systems
|
1331
|
-
host_info = ""
|
1332
|
-
|
1333
|
-
@cluster.host.each{|h|
|
1334
|
-
next if h.runtime.connectionState != "connected"
|
1335
|
-
|
1336
|
-
summary = h.summary
|
1337
|
-
hw = summary.hardware
|
1338
|
-
stats = summary.quickStats
|
1339
|
-
|
1340
|
-
total_cpu = hw.numCpuCores * 100
|
1341
|
-
used_cpu = (stats.overallCpuUsage.to_f / hw.cpuMhz.to_f) * 100
|
1342
|
-
used_cpu = sprintf('%.2f', used_cpu).to_f # Trim precission
|
1343
|
-
free_cpu = total_cpu - used_cpu
|
1344
|
-
|
1345
|
-
total_memory = hw.memorySize/1024
|
1346
|
-
used_memory = stats.overallMemoryUsage*1024
|
1347
|
-
free_memory = total_memory - used_memory
|
1348
|
-
|
1349
|
-
host_info << "\nHOST=["
|
1350
|
-
host_info << "STATE=on,"
|
1351
|
-
host_info << "HOSTNAME=\"" << h.name.to_s << "\","
|
1352
|
-
host_info << "MODELNAME=\"" << hw.cpuModel.to_s << "\","
|
1353
|
-
host_info << "CPUSPEED=" << hw.cpuMhz.to_s << ","
|
1354
|
-
host_info << "MAX_CPU=" << total_cpu.to_s << ","
|
1355
|
-
host_info << "USED_CPU=" << used_cpu.to_s << ","
|
1356
|
-
host_info << "FREE_CPU=" << free_cpu.to_s << ","
|
1357
|
-
host_info << "MAX_MEM=" << total_memory.to_s << ","
|
1358
|
-
host_info << "USED_MEM=" << used_memory.to_s << ","
|
1359
|
-
host_info << "FREE_MEM=" << free_memory.to_s
|
1360
|
-
host_info << "]"
|
1361
|
-
}
|
1362
|
-
|
1363
|
-
return host_info
|
1364
|
-
end
|
1365
|
-
|
1366
|
-
def monitor_vms
|
1367
|
-
# Only monitor from top level (Resource) Resource Pool
|
1368
|
-
monitor_vms_in_rp(@resource_pools[-1])
|
1369
|
-
end
|
1370
|
-
|
1371
|
-
|
1372
|
-
def monitor_vms_in_rp(rp)
|
1373
|
-
str_info = ""
|
1374
|
-
|
1375
|
-
if rp.resourcePool.size != 0
|
1376
|
-
rp.resourcePool.each{|child_rp|
|
1377
|
-
str_info += monitor_vms_in_rp(child_rp)
|
1378
|
-
}
|
1379
|
-
end
|
1380
|
-
|
1381
|
-
host_cache = {}
|
1382
|
-
|
1383
|
-
rp.vm.each { |v|
|
1384
|
-
begin
|
1385
|
-
# Check cached objects
|
1386
|
-
if !host_cache[v.runtime.host.to_s]
|
1387
|
-
host_cache[v.runtime.host.to_s] =
|
1388
|
-
VCenterCachedHost.new v.runtime.host
|
1389
|
-
end
|
1390
|
-
|
1391
|
-
host = host_cache[v.runtime.host.to_s]
|
1392
|
-
|
1393
|
-
name = v.name
|
1394
|
-
number = -1
|
1395
|
-
vm_extra_config = v.config.extraConfig
|
1396
|
-
|
1397
|
-
# Check the running flag
|
1398
|
-
running_flag = v.config.extraConfig.select{|val|
|
1399
|
-
val[:key]=="opennebula.vm.running"}
|
1400
|
-
if running_flag.size > 0 and running_flag[0]
|
1401
|
-
running_flag = running_flag[0][:value]
|
1402
|
-
end
|
1403
|
-
|
1404
|
-
next if running_flag == "no"
|
1405
|
-
|
1406
|
-
# Extract vmid if possible
|
1407
|
-
matches = name.match(/^one-(\d*)(-(.*))?$/)
|
1408
|
-
number = matches[1] if matches
|
1409
|
-
extraconfig_vmid = v.config.extraConfig.select{|val|
|
1410
|
-
val[:key]=="opennebula.vm.id"}
|
1411
|
-
if extraconfig_vmid.size > 0 and extraconfig_vmid[0]
|
1412
|
-
number = extraconfig_vmid[0][:value]
|
1413
|
-
end
|
1414
|
-
vm = VCenterVm.new(@client, v)
|
1415
|
-
vm.monitor(host)
|
1416
|
-
next if !vm.vm.config
|
1417
|
-
str_info << "\nVM = ["
|
1418
|
-
str_info << "ID=#{number},"
|
1419
|
-
str_info << "DEPLOY_ID=\"#{vm.vm.config.uuid}\","
|
1420
|
-
str_info << "VM_NAME=\"#{name} - "\
|
1421
|
-
"#{host.cluster_name}\","
|
1422
|
-
if number == -1
|
1423
|
-
vm_template_to_one =
|
1424
|
-
Base64.encode64(vm.vm_to_one(host)).gsub("\n","")
|
1425
|
-
str_info << "IMPORT_TEMPLATE=\"#{vm_template_to_one}\","
|
1426
|
-
end
|
1427
|
-
str_info << "POLL=\"#{vm.info}\"]"
|
1428
|
-
rescue Exception => e
|
1429
|
-
STDERR.puts e.inspect
|
1430
|
-
STDERR.puts e.backtrace
|
1431
|
-
end
|
1432
|
-
}
|
1433
|
-
return str_info
|
1434
|
-
end
|
1435
|
-
|
1436
|
-
def monitor_customizations
|
1437
|
-
customizations = client.vim.serviceContent.customizationSpecManager.info
|
1438
|
-
|
1439
|
-
text = ''
|
1440
|
-
|
1441
|
-
customizations.each do |c|
|
1442
|
-
t = "CUSTOMIZATION = [ "
|
1443
|
-
t << %Q<NAME = "#{c.name}", >
|
1444
|
-
t << %Q<TYPE = "#{c.type}" ]\n>
|
1445
|
-
|
1446
|
-
text << t
|
1447
|
-
end
|
1448
|
-
|
1449
|
-
text
|
1450
|
-
end
|
1451
|
-
|
1452
|
-
def get_available_ds
|
1453
|
-
str_info = ""
|
1454
|
-
|
1455
|
-
datastores = VIClient.get_entities(client.dc.datastoreFolder,
|
1456
|
-
'Datastore')
|
1457
|
-
|
1458
|
-
storage_pods = VIClient.get_entities(client.dc.datastoreFolder,
|
1459
|
-
'StoragePod')
|
1460
|
-
|
1461
|
-
storage_pods.each { |sp|
|
1462
|
-
datastores << sp
|
1463
|
-
storage_pod_datastores = VIClient.get_entities(sp, 'Datastore')
|
1464
|
-
if not storage_pod_datastores.empty?
|
1465
|
-
datastores.concat(storage_pod_datastores)
|
1466
|
-
end
|
1467
|
-
}
|
1468
|
-
|
1469
|
-
datastores.each { |ds|
|
1470
|
-
str_info += "VCENTER_DATASTORE=\"#{ds.name}\"\n"
|
1471
|
-
}
|
1472
|
-
str_info.chomp
|
1473
|
-
end
|
1474
|
-
end
|
1475
|
-
|
1476
|
-
################################################################################
|
1477
|
-
# This class is a high level abstraction of a VI VirtualMachine class with
|
1478
|
-
# OpenNebula semantics.
|
1479
|
-
################################################################################
|
1480
|
-
|
1481
|
-
class VCenterVm
|
1482
|
-
attr_reader :vm
|
1483
|
-
|
1484
|
-
POLL_ATTRIBUTE = OpenNebula::VirtualMachine::Driver::POLL_ATTRIBUTE
|
1485
|
-
VM_STATE = OpenNebula::VirtualMachine::Driver::VM_STATE
|
1486
|
-
|
1487
|
-
############################################################################
|
1488
|
-
# Creates a new VIVm using a RbVmomi::VirtualMachine object
|
1489
|
-
# @param client [VCenterClient] client to connect to vCenter
|
1490
|
-
# @param vm_vi [RbVmomi::VirtualMachine] it will be used if not nil
|
1491
|
-
########################################################################
|
1492
|
-
def initialize(client, vm_vi )
|
1493
|
-
@vm = vm_vi
|
1494
|
-
@client = client
|
1495
|
-
|
1496
|
-
@used_cpu = 0
|
1497
|
-
@used_memory = 0
|
1498
|
-
|
1499
|
-
@netrx = 0
|
1500
|
-
@nettx = 0
|
1501
|
-
end
|
1502
|
-
|
1503
|
-
############################################################################
|
1504
|
-
# Deploys a VM
|
1505
|
-
# @xml_text XML representation of the VM
|
1506
|
-
############################################################################
|
1507
|
-
def self.deploy(xml_text, lcm_state, deploy_id, hostname, datastore = nil,
|
1508
|
-
ops = {})
|
1509
|
-
if lcm_state == "BOOT" || lcm_state == "BOOT_FAILURE"
|
1510
|
-
return clone_vm(xml_text, hostname, datastore, ops)
|
1511
|
-
else
|
1512
|
-
hid = VIClient::translate_hostname(hostname)
|
1513
|
-
connection = VIClient.new(hid)
|
1514
|
-
vm = connection.find_vm_fast(deploy_id,
|
1515
|
-
ops[:ref],
|
1516
|
-
ops[:name])
|
1517
|
-
xml = REXML::Document.new xml_text
|
1518
|
-
|
1519
|
-
reconfigure_vm(vm, xml, false, hostname)
|
1520
|
-
|
1521
|
-
vm.PowerOnVM_Task.wait_for_completion
|
1522
|
-
return vm.config.uuid
|
1523
|
-
end
|
1524
|
-
end
|
1525
|
-
|
1526
|
-
############################################################################
|
1527
|
-
# Cancels a VM
|
1528
|
-
# @param deploy_id vcenter identifier of the VM
|
1529
|
-
# @param hostname name of the host (equals the vCenter cluster)
|
1530
|
-
# @param lcm_state state of the VM
|
1531
|
-
# @param keep_disks keep or not VM disks in datastore
|
1532
|
-
# @param disks VM attached disks
|
1533
|
-
############################################################################
|
1534
|
-
def self.cancel(deploy_id, hostname, lcm_state, keep_disks, disks, to_template)
|
1535
|
-
case lcm_state
|
1536
|
-
when "SHUTDOWN_POWEROFF", "SHUTDOWN_UNDEPLOY"
|
1537
|
-
shutdown(deploy_id, hostname, lcm_state, keep_disks, disks, to_template)
|
1538
|
-
when "CANCEL", "LCM_INIT", "CLEANUP_RESUBMIT", "SHUTDOWN", "CLEANUP_DELETE"
|
1539
|
-
hid = VIClient::translate_hostname(hostname)
|
1540
|
-
connection = VIClient.new(hid)
|
1541
|
-
vm = connection.find_vm_template(deploy_id)
|
1542
|
-
|
1543
|
-
begin
|
1544
|
-
if vm.summary.runtime.powerState == "poweredOn"
|
1545
|
-
vm.PowerOffVM_Task.wait_for_completion
|
1546
|
-
end
|
1547
|
-
rescue
|
1548
|
-
end
|
1549
|
-
if keep_disks
|
1550
|
-
detach_all_disks(vm)
|
1551
|
-
else
|
1552
|
-
detach_attached_disks(vm, disks, hostname) if disks
|
1553
|
-
end
|
1554
|
-
|
1555
|
-
# If the VM was instantiated to persistent, convert the VM to
|
1556
|
-
# vCenter VM Template and update the OpenNebula new
|
1557
|
-
# VM Template to point to the new vCenter VM Template
|
1558
|
-
if !to_template.nil?
|
1559
|
-
vm.MarkAsTemplate
|
1560
|
-
|
1561
|
-
new_template = OpenNebula::Template.new_with_id(to_template,
|
1562
|
-
OpenNebula::Client.new)
|
1563
|
-
new_template.info
|
1564
|
-
|
1565
|
-
public_cloud_str = "PUBLIC_CLOUD=["
|
1566
|
-
|
1567
|
-
new_template.to_hash["VMTEMPLATE"]["TEMPLATE"]["PUBLIC_CLOUD"].each{|k,v|
|
1568
|
-
if k == "VM_TEMPLATE"
|
1569
|
-
public_cloud_str += "VM_TEMPLATE=\"#{deploy_id}\",\n"
|
1570
|
-
else
|
1571
|
-
public_cloud_str += "#{k}=\"#{v}\",\n"
|
1572
|
-
end
|
1573
|
-
}
|
1574
|
-
|
1575
|
-
public_cloud_str = public_cloud_str + "]"
|
1576
|
-
|
1577
|
-
new_template.update(public_cloud_str, true)
|
1578
|
-
else
|
1579
|
-
vm.Destroy_Task.wait_for_completion
|
1580
|
-
end
|
1581
|
-
else
|
1582
|
-
raise "LCM_STATE #{lcm_state} not supported for cancel"
|
1583
|
-
end
|
1584
|
-
end
|
1585
|
-
|
1586
|
-
|
1587
|
-
############################################################################
|
1588
|
-
# Saves a VM
|
1589
|
-
# @param deploy_id vcenter identifier of the VM
|
1590
|
-
# @param hostname name of the host (equals the vCenter cluster)
|
1591
|
-
############################################################################
|
1592
|
-
def self.save(deploy_id, hostname, lcm_state)
|
1593
|
-
case lcm_state
|
1594
|
-
when "SAVE_MIGRATE"
|
1595
|
-
raise "Migration between vCenters cluster not supported"
|
1596
|
-
when "SAVE_SUSPEND", "SAVE_STOP"
|
1597
|
-
hid = VIClient::translate_hostname(hostname)
|
1598
|
-
connection = VIClient.new(hid)
|
1599
|
-
vm = connection.find_vm_template(deploy_id)
|
1600
|
-
|
1601
|
-
vm.SuspendVM_Task.wait_for_completion
|
1602
|
-
end
|
1603
|
-
end
|
1604
|
-
|
1605
|
-
############################################################################
|
1606
|
-
# Resumes a VM
|
1607
|
-
# @param deploy_id vcenter identifier of the VM
|
1608
|
-
# @param hostname name of the host (equals the vCenter cluster)
|
1609
|
-
############################################################################
|
1610
|
-
def self.resume(deploy_id, hostname)
|
1611
|
-
hid = VIClient::translate_hostname(hostname)
|
1612
|
-
connection = VIClient.new(hid)
|
1613
|
-
vm = connection.find_vm_template(deploy_id)
|
1614
|
-
|
1615
|
-
vm.PowerOnVM_Task.wait_for_completion
|
1616
|
-
end
|
1617
|
-
|
1618
|
-
############################################################################
|
1619
|
-
# Reboots a VM
|
1620
|
-
# @param deploy_id vcenter identifier of the VM
|
1621
|
-
# @param hostname name of the host (equals the vCenter cluster)
|
1622
|
-
############################################################################
|
1623
|
-
def self.reboot(deploy_id, hostname)
|
1624
|
-
hid = VIClient::translate_hostname(hostname)
|
1625
|
-
connection = VIClient.new(hid)
|
1626
|
-
|
1627
|
-
vm = connection.find_vm_template(deploy_id)
|
1628
|
-
|
1629
|
-
vm.RebootGuest.wait_for_completion
|
1630
|
-
end
|
1631
|
-
|
1632
|
-
############################################################################
|
1633
|
-
# Resets a VM
|
1634
|
-
# @param deploy_id vcetranslate_hostnamnter identifier of the VM
|
1635
|
-
# @param hostname name of the host (equals the vCenter cluster)
|
1636
|
-
############################################################################
|
1637
|
-
def self.reset(deploy_id, hostname)
|
1638
|
-
hid = VIClient::translate_hostname(hostname)
|
1639
|
-
connection = VIClient.new(hid)
|
1640
|
-
|
1641
|
-
vm = connection.find_vm_template(deploy_id)
|
1642
|
-
|
1643
|
-
vm.ResetVM_Task.wait_for_completion
|
1644
|
-
end
|
1645
|
-
|
1646
|
-
############################################################################
|
1647
|
-
# Shutdown a VM
|
1648
|
-
# @param deploy_id vcenter identifier of the VM
|
1649
|
-
# @param hostname name of the host (equals the vCenter cluster)
|
1650
|
-
# @param lcm_state state of the VM
|
1651
|
-
# @param keep_disks keep or not VM disks in datastore
|
1652
|
-
# @param disks VM attached disks
|
1653
|
-
# @param to_template whether this VM has been instantiated as persistent
|
1654
|
-
############################################################################
|
1655
|
-
def self.shutdown(deploy_id, hostname, lcm_state, keep_disks, disks, to_template)
|
1656
|
-
hid = VIClient::translate_hostname(hostname)
|
1657
|
-
connection = VIClient.new(hid)
|
1658
|
-
|
1659
|
-
vm = connection.find_vm_template(deploy_id)
|
1660
|
-
|
1661
|
-
case lcm_state
|
1662
|
-
when "SHUTDOWN"
|
1663
|
-
begin
|
1664
|
-
vm.ShutdownGuest
|
1665
|
-
counter = 60*10 # 10 minutes
|
1666
|
-
while counter > 0
|
1667
|
-
break if vm.runtime.powerState == "poweredOff"
|
1668
|
-
counter -= 1
|
1669
|
-
sleep 1
|
1670
|
-
end
|
1671
|
-
rescue
|
1672
|
-
end
|
1673
|
-
|
1674
|
-
if vm.runtime.powerState != "poweredOff"
|
1675
|
-
vm.PowerOffVM_Task.wait_for_completion
|
1676
|
-
end
|
1677
|
-
|
1678
|
-
if keep_disks
|
1679
|
-
detach_all_disks(vm)
|
1680
|
-
else
|
1681
|
-
detach_attached_disks(vm, disks, hostname) if disks
|
1682
|
-
end
|
1683
|
-
|
1684
|
-
# If the VM was instantiated to persistent, convert the VM to
|
1685
|
-
# vCenter VM Template and update the OpenNebula new
|
1686
|
-
# VM Template to point to the new vCenter VM Template
|
1687
|
-
if !to_template.nil?
|
1688
|
-
vm.MarkAsTemplate
|
1689
|
-
|
1690
|
-
new_template = OpenNebula::Template.new_with_id(to_template,
|
1691
|
-
OpenNebula::Client.new)
|
1692
|
-
new_template.info
|
1693
|
-
|
1694
|
-
public_cloud_str = "PUBLIC_CLOUD=["
|
1695
|
-
|
1696
|
-
new_template.to_hash["VMTEMPLATE"]["TEMPLATE"]["PUBLIC_CLOUD"].each{|k,v|
|
1697
|
-
if k == "VM_TEMPLATE"
|
1698
|
-
public_cloud_str += "VM_TEMPLATE=\"#{deploy_id}\"\n"
|
1699
|
-
else
|
1700
|
-
public_cloud_str += "#{k}=\"#{v}\",\n"
|
1701
|
-
end
|
1702
|
-
}
|
1703
|
-
|
1704
|
-
public_cloud_str = public_cloud_str + "]"
|
1705
|
-
|
1706
|
-
new_template.update(public_cloud_str, true)
|
1707
|
-
else
|
1708
|
-
vm.Destroy_Task.wait_for_completion
|
1709
|
-
end
|
1710
|
-
|
1711
|
-
when "SHUTDOWN_POWEROFF", "SHUTDOWN_UNDEPLOY"
|
1712
|
-
begin
|
1713
|
-
vm.ShutdownGuest
|
1714
|
-
counter = 60*10 # 10 minutes
|
1715
|
-
while counter > 0
|
1716
|
-
break if vm.runtime.powerState == "poweredOff"
|
1717
|
-
counter -= 1
|
1718
|
-
sleep 1
|
1719
|
-
end
|
1720
|
-
rescue
|
1721
|
-
end
|
1722
|
-
|
1723
|
-
if vm.runtime.powerState != "poweredOff"
|
1724
|
-
vm.PowerOffVM_Task.wait_for_completion
|
1725
|
-
end
|
1726
|
-
end
|
1727
|
-
end
|
1728
|
-
|
1729
|
-
############################################################################
|
1730
|
-
# Create VM snapshot
|
1731
|
-
# @param deploy_id vcenter identifier of the VM
|
1732
|
-
# @param hostname name of the host (equals the vCenter cluster)
|
1733
|
-
# @param snaphot_name name of the snapshot
|
1734
|
-
############################################################################
|
1735
|
-
def self.create_snapshot(deploy_id, hostname, snapshot_name)
|
1736
|
-
hid = VIClient::translate_hostname(hostname)
|
1737
|
-
connection = VIClient.new(hid)
|
1738
|
-
|
1739
|
-
snapshot_hash = {
|
1740
|
-
:name => snapshot_name,
|
1741
|
-
:description => "OpenNebula Snapshot of VM #{deploy_id}",
|
1742
|
-
:memory => true,
|
1743
|
-
:quiesce => true
|
1744
|
-
}
|
1745
|
-
|
1746
|
-
vm = connection.find_vm_template(deploy_id)
|
1747
|
-
|
1748
|
-
vm.CreateSnapshot_Task(snapshot_hash).wait_for_completion
|
1749
|
-
|
1750
|
-
return snapshot_name
|
1751
|
-
end
|
1752
|
-
|
1753
|
-
############################################################################
|
1754
|
-
# Find VM snapshot
|
1755
|
-
# @param list root list of VM snapshots
|
1756
|
-
# @param snaphot_name name of the snapshot
|
1757
|
-
############################################################################
|
1758
|
-
def self.find_snapshot(list, snapshot_name)
|
1759
|
-
list.each do |i|
|
1760
|
-
if i.name == snapshot_name
|
1761
|
-
return i.snapshot
|
1762
|
-
elsif !i.childSnapshotList.empty?
|
1763
|
-
snap = find_snapshot(i.childSnapshotList, snapshot_name)
|
1764
|
-
return snap if snap
|
1765
|
-
end
|
1766
|
-
end
|
1767
|
-
|
1768
|
-
nil
|
1769
|
-
end
|
1770
|
-
|
1771
|
-
############################################################################
|
1772
|
-
# Delete VM snapshot
|
1773
|
-
# @param deploy_id vcenter identifier of the VM
|
1774
|
-
# @param hostname name of the host (equals the vCenter cluster)
|
1775
|
-
# @param snaphot_name name of the snapshot
|
1776
|
-
############################################################################
|
1777
|
-
def self.delete_snapshot(deploy_id, hostname, snapshot_name)
|
1778
|
-
hid = VIClient::translate_hostname(hostname)
|
1779
|
-
connection = VIClient.new(hid)
|
1780
|
-
|
1781
|
-
vm = connection.find_vm_template(deploy_id)
|
1782
|
-
|
1783
|
-
list = vm.snapshot.rootSnapshotList
|
1784
|
-
|
1785
|
-
snapshot = find_snapshot(list, snapshot_name)
|
1786
|
-
return nil if !snapshot
|
1787
|
-
|
1788
|
-
delete_snapshot_hash = {
|
1789
|
-
:_this => snapshot,
|
1790
|
-
:removeChildren => false
|
1791
|
-
}
|
1792
|
-
|
1793
|
-
snapshot.RemoveSnapshot_Task(delete_snapshot_hash).wait_for_completion
|
1794
|
-
end
|
1795
|
-
|
1796
|
-
############################################################################
|
1797
|
-
# Revert VM snapshot
|
1798
|
-
# @param deploy_id vcenter identifier of the VM
|
1799
|
-
# @param hostname name of the host (equals the vCenter cluster)
|
1800
|
-
# @param snaphot_name name of the snapshot
|
1801
|
-
############################################################################
|
1802
|
-
def self.revert_snapshot(deploy_id, hostname, snapshot_name)
|
1803
|
-
hid = VIClient::translate_hostname(hostname)
|
1804
|
-
connection = VIClient.new(hid)
|
1805
|
-
|
1806
|
-
vm = connection.find_vm_template(deploy_id)
|
1807
|
-
|
1808
|
-
list = vm.snapshot.rootSnapshotList
|
1809
|
-
|
1810
|
-
snapshot = find_snapshot(list, snapshot_name)
|
1811
|
-
return nil if !snapshot
|
1812
|
-
|
1813
|
-
revert_snapshot_hash = {
|
1814
|
-
:_this => snapshot
|
1815
|
-
}
|
1816
|
-
|
1817
|
-
snapshot.RevertToSnapshot_Task(revert_snapshot_hash).wait_for_completion
|
1818
|
-
end
|
1819
|
-
|
1820
|
-
############################################################################
|
1821
|
-
# Attach NIC to a VM
|
1822
|
-
# @param deploy_id vcenter identifier of the VM
|
1823
|
-
# @param mac MAC address of the NIC to be attached
|
1824
|
-
# @param bridge name of the Network in vCenter
|
1825
|
-
# @param model model of the NIC to be attached
|
1826
|
-
# @param host hostname of the ESX where the VM is running
|
1827
|
-
############################################################################
|
1828
|
-
def self.attach_nic(deploy_id, mac, bridge, model, host)
|
1829
|
-
hid = VIClient::translate_hostname(host)
|
1830
|
-
connection = VIClient.new(hid)
|
1831
|
-
|
1832
|
-
vm = connection.find_vm_template(deploy_id)
|
1833
|
-
|
1834
|
-
spec_nics = calculate_addnic_spec(vm, mac, bridge, model)
|
1835
|
-
|
1836
|
-
spec_hash = {:deviceChange => [spec_nics]}
|
1837
|
-
|
1838
|
-
#B4897 track hot plugged nics
|
1839
|
-
hotplugged_nics = vm.config.extraConfig.select do |val|
|
1840
|
-
val[:key] == "opennebula.hotplugged_nics"
|
1841
|
-
end
|
1842
|
-
|
1843
|
-
if hotplugged_nics && !hotplugged_nics.empty?
|
1844
|
-
hotplugged_nics = hotplugged_nics[0][:value].to_s.split(";")
|
1845
|
-
hotplugged_nics << mac.to_s if !hotplugged_nics.include?(mac)
|
1846
|
-
else
|
1847
|
-
hotplugged_nics = []
|
1848
|
-
hotplugged_nics << mac.to_s
|
1849
|
-
end
|
1850
|
-
|
1851
|
-
config_array = [{:key=>"opennebula.hotplugged_nics",
|
1852
|
-
:value=>hotplugged_nics.join(";")}]
|
1853
|
-
extra_config_spec = {:extraConfig =>config_array}
|
1854
|
-
|
1855
|
-
spec_hash.merge!(extra_config_spec)
|
1856
|
-
|
1857
|
-
spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
|
1858
|
-
|
1859
|
-
vm.ReconfigVM_Task(:spec => spec).wait_for_completion
|
1860
|
-
end
|
1861
|
-
|
1862
|
-
############################################################################
|
1863
|
-
# Detach NIC from a VM
|
1864
|
-
############################################################################
|
1865
|
-
def self.detach_nic(deploy_id, mac, host)
|
1866
|
-
hid = VIClient::translate_hostname(host)
|
1867
|
-
connection = VIClient.new(hid)
|
1868
|
-
|
1869
|
-
vm = connection.find_vm_template(deploy_id)
|
1870
|
-
|
1871
|
-
nic = vm.config.hardware.device.find { |d|
|
1872
|
-
is_nic?(d) && (d.macAddress == mac)
|
1873
|
-
}
|
1874
|
-
|
1875
|
-
raise "Could not find NIC with mac address #{mac}" if nic.nil?
|
1876
|
-
|
1877
|
-
#B4897 track hot plugged nics
|
1878
|
-
hotplugged_nics = vm.config.extraConfig.select do |val|
|
1879
|
-
val[:key] == "opennebula.hotplugged_nics"
|
1880
|
-
end
|
1881
|
-
|
1882
|
-
config_array = []
|
1883
|
-
if hotplugged_nics && !hotplugged_nics.empty?
|
1884
|
-
hotplugged_nics = hotplugged_nics[0][:value].to_s.split(";")
|
1885
|
-
hotplugged_nics.delete(mac) # remove hotplugged nic
|
1886
|
-
config_array = [{:key=>"opennebula.hotplugged_nics",
|
1887
|
-
:value=>hotplugged_nics.join(";")}]
|
1888
|
-
end
|
1889
|
-
|
1890
|
-
spec = {
|
1891
|
-
:deviceChange => [
|
1892
|
-
:operation => :remove,
|
1893
|
-
:device => nic
|
1894
|
-
],
|
1895
|
-
:extraConfig => config_array
|
1896
|
-
}
|
1897
|
-
|
1898
|
-
vm.ReconfigVM_Task(:spec => spec).wait_for_completion
|
1899
|
-
end
|
1900
|
-
|
1901
|
-
############################################################################
|
1902
|
-
# Reconfigures a VM (context data)
|
1903
|
-
# @param deploy_id vcenter identifier of the VM
|
1904
|
-
# @param hostname name of the host (equals the vCenter cluster)
|
1905
|
-
# @param xml_text XML repsentation of the VM
|
1906
|
-
############################################################################
|
1907
|
-
def self.reconfigure(deploy_id, hostname, xml_text)
|
1908
|
-
hid = VIClient::translate_hostname(hostname)
|
1909
|
-
connection = VIClient.new(hid)
|
1910
|
-
vm = connection.find_vm_template(deploy_id)
|
1911
|
-
|
1912
|
-
xml = REXML::Document.new xml_text
|
1913
|
-
context = xml.root.elements["//TEMPLATE/CONTEXT"]
|
1914
|
-
|
1915
|
-
if context
|
1916
|
-
context_text = create_context(context)
|
1917
|
-
context_spec = {
|
1918
|
-
:extraConfig => [
|
1919
|
-
{ :key=>"guestinfo.opennebula.context",
|
1920
|
-
:value=> Base64.encode64(context_text) }
|
1921
|
-
]
|
1922
|
-
}
|
1923
|
-
|
1924
|
-
spec = RbVmomi::VIM.VirtualMachineConfigSpec(context_spec)
|
1925
|
-
vm.ReconfigVM_Task(:spec => spec).wait_for_completion
|
1926
|
-
end
|
1927
|
-
end
|
1928
|
-
|
1929
|
-
########################################################################
|
1930
|
-
# Initialize the vm monitor information
|
1931
|
-
########################################################################
|
1932
|
-
def monitor(host)
|
1933
|
-
@summary = @vm.summary
|
1934
|
-
@state = state_to_c(@summary.runtime.powerState)
|
1935
|
-
|
1936
|
-
if @state != VM_STATE[:active]
|
1937
|
-
@used_cpu = 0
|
1938
|
-
@used_memory = 0
|
1939
|
-
|
1940
|
-
@netrx = 0
|
1941
|
-
@nettx = 0
|
1942
|
-
|
1943
|
-
return
|
1944
|
-
end
|
1945
|
-
|
1946
|
-
@used_memory = @summary.quickStats.hostMemoryUsage * 1024
|
1947
|
-
cpuMhz = @vm.runtime.host.summary.hardware.cpuMhz.to_f
|
1948
|
-
|
1949
|
-
@used_cpu =
|
1950
|
-
((@summary.quickStats.overallCpuUsage.to_f / cpuMhz) * 100).to_s
|
1951
|
-
@used_cpu = sprintf('%.2f',@used_cpu).to_s
|
1952
|
-
|
1953
|
-
# Check for negative values
|
1954
|
-
@used_memory = 0 if @used_memory.to_i < 0
|
1955
|
-
@used_cpu = 0 if @used_cpu.to_i < 0
|
1956
|
-
|
1957
|
-
@esx_host = @vm.summary.runtime.host.name
|
1958
|
-
@guest_ip = @vm.guest.ipAddress
|
1959
|
-
@guest_state = @vm.guest.guestState
|
1960
|
-
@vmware_tools = @vm.guest.toolsRunningStatus
|
1961
|
-
@vmtools_ver = @vm.guest.toolsVersion
|
1962
|
-
@vmtools_verst = @vm.guest.toolsVersionStatus
|
1963
|
-
|
1964
|
-
guest_ip_addresses = []
|
1965
|
-
|
1966
|
-
@vm.guest.net.each do |net|
|
1967
|
-
net.ipConfig.ipAddress.each do |ip|
|
1968
|
-
guest_ip_addresses << ip.ipAddress
|
1969
|
-
end if net.ipConfig && net.ipConfig.ipAddress
|
1970
|
-
end if @vm.guest.net
|
1971
|
-
|
1972
|
-
@guest_ip_addresses = guest_ip_addresses.join(',')
|
1973
|
-
|
1974
|
-
# Network metrics - Realtime retrieved by perfManager
|
1975
|
-
pm = @client.vim.serviceInstance.content.perfManager
|
1976
|
-
|
1977
|
-
provider = pm.provider_summary [@vm].first
|
1978
|
-
refresh_rate = provider.refreshRate
|
1979
|
-
max_samples = 0
|
1980
|
-
|
1981
|
-
vmid = -1
|
1982
|
-
extraconfig_vmid = @vm.config.extraConfig.select{|val|
|
1983
|
-
val[:key]=="opennebula.vm.id"}
|
1984
|
-
if extraconfig_vmid.size > 0 and extraconfig_vmid[0]
|
1985
|
-
vmid = extraconfig_vmid[0][:value].to_i
|
1986
|
-
end
|
1987
|
-
|
1988
|
-
if vmid < 0
|
1989
|
-
@nettx = 0
|
1990
|
-
@netrx = 0
|
1991
|
-
else
|
1992
|
-
one_vm = OpenNebula::VirtualMachine.new_with_id(vmid, OpenNebula::Client.new)
|
1993
|
-
one_vm.info
|
1994
|
-
stats = []
|
1995
|
-
previous_nettx = 0
|
1996
|
-
previous_netrx = 0
|
1997
|
-
if one_vm["MONITORING/NETTX"]
|
1998
|
-
previous_nettx = one_vm["MONITORING/NETTX"].to_i
|
1999
|
-
end
|
2000
|
-
if one_vm["MONITORING/NETRX"]
|
2001
|
-
previous_netrx = one_vm["MONITORING/NETRX"].to_i
|
2002
|
-
end
|
2003
|
-
|
2004
|
-
if(one_vm["MONITORING/LAST_MON"] && one_vm["MONITORING/LAST_MON"].to_i != 0 )
|
2005
|
-
#Real time data stores max 1 hour. 1 minute has 3 samples
|
2006
|
-
interval = (Time.now.to_i - one_vm["MONITORING/LAST_MON"].to_i)
|
2007
|
-
interval = 3601 if interval < 0 #Safety check
|
2008
|
-
#If last poll was more than hour ago get 3 minutes,
|
2009
|
-
#else calculate how many samples since last poll
|
2010
|
-
max_samples = interval > 3600 ? 9 : (interval / refresh_rate) + 1
|
2011
|
-
else
|
2012
|
-
# First poll, get at least latest 3 minutes = 9 samples
|
2013
|
-
max_samples = 9
|
2014
|
-
end
|
2015
|
-
|
2016
|
-
stats = pm.retrieve_stats(
|
2017
|
-
[@vm],
|
2018
|
-
['net.transmitted','net.bytesRx','net.bytesTx','net.received'],
|
2019
|
-
{interval:refresh_rate, max_samples: max_samples}
|
2020
|
-
)
|
2021
|
-
|
2022
|
-
if stats.empty? || stats.first[1][:metrics].empty?
|
2023
|
-
@nettx = 0 + previous_nettx
|
2024
|
-
@netrx = 0 + previous_netrx
|
2025
|
-
else
|
2026
|
-
metrics = stats.first[1][:metrics]
|
2027
|
-
nettx_kbpersec = 0
|
2028
|
-
netrx_kbpersec = 0
|
2029
|
-
(0..max_samples-1).each { |index|
|
2030
|
-
tx = [0]
|
2031
|
-
rx = [0]
|
2032
|
-
tx << metrics['net.transmitted'][index] if metrics['net.transmitted']
|
2033
|
-
tx << metrics['net.bytesTx'][index] if metrics['net.bytesTx']
|
2034
|
-
rx << metrics['net.received'][index] if metrics['net.received']
|
2035
|
-
rx << metrics['net.bytesRx'][index] if metrics['net.bytesRx']
|
2036
|
-
nettx_kbpersec += tx.max
|
2037
|
-
netrx_kbpersec += rx.max
|
2038
|
-
}
|
2039
|
-
@nettx = (nettx_kbpersec * 1000 * refresh_rate).to_i + previous_nettx
|
2040
|
-
@netrx = (netrx_kbpersec * 1000 * refresh_rate).to_i + previous_netrx
|
2041
|
-
end
|
2042
|
-
end
|
2043
|
-
end
|
2044
|
-
|
2045
|
-
########################################################################
|
2046
|
-
# Generates a OpenNebula IM Driver valid string with the monitor info
|
2047
|
-
########################################################################
|
2048
|
-
def info
|
2049
|
-
return 'STATE=d' if @state == 'd'
|
2050
|
-
|
2051
|
-
str_info = ""
|
2052
|
-
|
2053
|
-
str_info << "GUEST_IP=" << @guest_ip.to_s << " " if @guest_ip
|
2054
|
-
if @guest_ip_addresses && !@guest_ip_addresses.empty?
|
2055
|
-
str_info << "GUEST_IP_ADDRESSES=\\\"" <<
|
2056
|
-
@guest_ip_addresses.to_s << "\\\" "
|
2057
|
-
end
|
2058
|
-
str_info << "LAST_MON=" << Time.now.to_i.to_s << " "
|
2059
|
-
str_info << "#{POLL_ATTRIBUTE[:state]}=" << @state << " "
|
2060
|
-
str_info << "#{POLL_ATTRIBUTE[:cpu]}=" << @used_cpu.to_s << " "
|
2061
|
-
str_info << "#{POLL_ATTRIBUTE[:memory]}=" << @used_memory.to_s << " "
|
2062
|
-
str_info << "#{POLL_ATTRIBUTE[:netrx]}=" << @netrx.to_s << " "
|
2063
|
-
str_info << "#{POLL_ATTRIBUTE[:nettx]}=" << @nettx.to_s << " "
|
2064
|
-
str_info << "ESX_HOST=\\\"" << @esx_host.to_s << "\\\" "
|
2065
|
-
str_info << "GUEST_STATE=" << @guest_state.to_s << " "
|
2066
|
-
str_info << "VMWARETOOLS_RUNNING_STATUS=" << @vmware_tools.to_s << " "
|
2067
|
-
str_info << "VMWARETOOLS_VERSION=" << @vmtools_ver.to_s << " "
|
2068
|
-
str_info << "VMWARETOOLS_VERSION_STATUS=" << @vmtools_verst.to_s << " "
|
2069
|
-
str_info << "RESOURCE_POOL=\\\"" << @vm.resourcePool.name << "\\\" "
|
2070
|
-
end
|
2071
|
-
|
2072
|
-
########################################################################
|
2073
|
-
# Generates an OpenNebula Template for this VCenterVm
|
2074
|
-
########################################################################
|
2075
|
-
def to_one(host)
|
2076
|
-
cluster_name = host.cluster_name
|
2077
|
-
|
2078
|
-
str = "NAME = \"#{@vm.name} - #{cluster_name}\"\n"\
|
2079
|
-
"CPU = \"#{@vm.config.hardware.numCPU}\"\n"\
|
2080
|
-
"vCPU = \"#{@vm.config.hardware.numCPU}\"\n"\
|
2081
|
-
"MEMORY = \"#{@vm.config.hardware.memoryMB}\"\n"\
|
2082
|
-
"HYPERVISOR = \"vcenter\"\n"\
|
2083
|
-
"PUBLIC_CLOUD = [\n"\
|
2084
|
-
" TYPE =\"vcenter\",\n"\
|
2085
|
-
" VM_TEMPLATE =\"#{@vm.config.uuid}\",\n"\
|
2086
|
-
" VCENTER_REF =\"#{@vm.ref}\",\n"\
|
2087
|
-
" VCENTER_NAME=\"#{@vm.name}\",\n"\
|
2088
|
-
" HOST =\"#{cluster_name}\"\n"\
|
2089
|
-
"]\n"\
|
2090
|
-
"GRAPHICS = [\n"\
|
2091
|
-
" TYPE =\"vnc\",\n"\
|
2092
|
-
" LISTEN =\"0.0.0.0\"\n"\
|
2093
|
-
"]\n"\
|
2094
|
-
"SCHED_REQUIREMENTS=\"NAME=\\\"#{cluster_name}\\\"\"\n"\
|
2095
|
-
"CONTEXT = ["\
|
2096
|
-
" NETWORK = \"YES\","\
|
2097
|
-
" SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\" ]"
|
2098
|
-
|
2099
|
-
if @vm.config.annotation.nil? || @vm.config.annotation.empty?
|
2100
|
-
str << "DESCRIPTION = \"vCenter Template imported by OpenNebula"\
|
2101
|
-
" from Cluster #{@vm.runtime.host.parent.name}\"\n"
|
2102
|
-
else
|
2103
|
-
notes = @vm.config.annotation.gsub("\\", "\\\\").gsub("\"", "\\\"")
|
2104
|
-
str << "DESCRIPTION = \"#{notes}\"\n"
|
2105
|
-
end
|
2106
|
-
|
2107
|
-
case @vm.guest.guestFullName
|
2108
|
-
when /CentOS/i
|
2109
|
-
str << "LOGO=images/logos/centos.png"
|
2110
|
-
when /Debian/i
|
2111
|
-
str << "LOGO=images/logos/debian.png"
|
2112
|
-
when /Red Hat/i
|
2113
|
-
str << "LOGO=images/logos/redhat.png"
|
2114
|
-
when /Ubuntu/i
|
2115
|
-
str << "LOGO=images/logos/ubuntu.png"
|
2116
|
-
when /Windows XP/i
|
2117
|
-
str << "LOGO=images/logos/windowsxp.png"
|
2118
|
-
when /Windows/i
|
2119
|
-
str << "LOGO=images/logos/windows8.png"
|
2120
|
-
when /Linux/i
|
2121
|
-
str << "LOGO=images/logos/linux.png"
|
2122
|
-
end
|
2123
|
-
return str
|
2124
|
-
end
|
2125
|
-
|
2126
|
-
########################################################################
|
2127
|
-
# Generates a Datastore user input
|
2128
|
-
########################################################################
|
2129
|
-
def to_one_ds(host, default_ds)
|
2130
|
-
# Datastores User Input
|
2131
|
-
str = ""
|
2132
|
-
|
2133
|
-
if host.ds_list != ""
|
2134
|
-
str = "M|list|Which datastore you want this VM to run on?|"\
|
2135
|
-
<< "#{host.ds_list}|#{default_ds}"
|
2136
|
-
end
|
2137
|
-
|
2138
|
-
return str
|
2139
|
-
end
|
2140
|
-
|
2141
|
-
########################################################################
|
2142
|
-
# Generates a Resource Pool user input
|
2143
|
-
########################################################################
|
2144
|
-
def to_one_rp(host)
|
2145
|
-
# Resource Pool User Input
|
2146
|
-
str = ""
|
2147
|
-
|
2148
|
-
if host.rp_list != ""
|
2149
|
-
str = "M|list|Which resource pool you want this VM to run"\
|
2150
|
-
" in?|#{host.rp_list}|#{host.rp_list.split(",")[0]}"
|
2151
|
-
end
|
2152
|
-
|
2153
|
-
return str
|
2154
|
-
end
|
2155
|
-
|
2156
|
-
########################################################################
|
2157
|
-
# Generates an OpenNebula VirtualMachine for this VCenterVm
|
2158
|
-
#
|
2159
|
-
#
|
2160
|
-
########################################################################
|
2161
|
-
def vm_to_one(host)
|
2162
|
-
cluster_name = host.cluster_name
|
2163
|
-
|
2164
|
-
state = case state_to_c(@summary.runtime.powerState)
|
2165
|
-
when 'a'
|
2166
|
-
"RUNNING"
|
2167
|
-
when 'd'
|
2168
|
-
"POWEROFF"
|
2169
|
-
end
|
2170
|
-
|
2171
|
-
str = "NAME = \"#{@vm.name} - #{cluster_name}\"\n"\
|
2172
|
-
"CPU = \"#{@vm.config.hardware.numCPU}\"\n"\
|
2173
|
-
"vCPU = \"#{@vm.config.hardware.numCPU}\"\n"\
|
2174
|
-
"MEMORY = \"#{@vm.config.hardware.memoryMB}\"\n"\
|
2175
|
-
"HYPERVISOR = \"vcenter\"\n"\
|
2176
|
-
"PUBLIC_CLOUD = [\n"\
|
2177
|
-
" TYPE =\"vcenter\",\n"\
|
2178
|
-
" VM_TEMPLATE =\"#{@vm.config.uuid}\",\n"\
|
2179
|
-
" HOST =\"#{cluster_name}\"\n"\
|
2180
|
-
"]\n"\
|
2181
|
-
"IMPORT_VM_ID = \"#{@vm.config.uuid}\"\n"\
|
2182
|
-
"IMPORT_STATE = \"#{state}\"\n"\
|
2183
|
-
"SCHED_REQUIREMENTS=\"NAME=\\\"#{cluster_name}\\\"\"\n"
|
2184
|
-
|
2185
|
-
vp = @vm.config.extraConfig.select{|v|
|
2186
|
-
v[:key].downcase=="remotedisplay.vnc.port"}
|
2187
|
-
keymap = @vm.config.extraConfig.select{|v|
|
2188
|
-
v[:key].downcase=="remotedisplay.vnc.keymap"}
|
2189
|
-
|
2190
|
-
if vp.size > 0
|
2191
|
-
str << "GRAPHICS = [\n"\
|
2192
|
-
" TYPE =\"vnc\",\n"\
|
2193
|
-
" LISTEN =\"0.0.0.0\",\n"\
|
2194
|
-
" PORT =\"#{vp[0][:value]}\"\n"
|
2195
|
-
str << " ,KEYMAP =\"#{keymap[0][:value]}\"\n" if keymap[0]
|
2196
|
-
str << "]\n"
|
2197
|
-
end
|
2198
|
-
|
2199
|
-
if @vm.config.annotation.nil? || @vm.config.annotation.empty?
|
2200
|
-
str << "DESCRIPTION = \"vCenter Virtual Machine imported by"\
|
2201
|
-
" OpenNebula from Cluster #{cluster_name}\"\n"
|
2202
|
-
else
|
2203
|
-
notes = @vm.config.annotation.gsub("\\", "\\\\").gsub("\"", "\\\"")
|
2204
|
-
str << "DESCRIPTION = \"#{notes}\"\n"
|
2205
|
-
end
|
2206
|
-
|
2207
|
-
case @vm.guest.guestFullName
|
2208
|
-
when /CentOS/i
|
2209
|
-
str << "LOGO=images/logos/centos.png"
|
2210
|
-
when /Debian/i
|
2211
|
-
str << "LOGO=images/logos/debian.png"
|
2212
|
-
when /Red Hat/i
|
2213
|
-
str << "LOGO=images/logos/redhat.png"
|
2214
|
-
when /Ubuntu/i
|
2215
|
-
str << "LOGO=images/logos/ubuntu.png"
|
2216
|
-
when /Windows XP/i
|
2217
|
-
str << "LOGO=images/logos/windowsxp.png"
|
2218
|
-
when /Windows/i
|
2219
|
-
str << "LOGO=images/logos/windows8.png"
|
2220
|
-
when /Linux/i
|
2221
|
-
str << "LOGO=images/logos/linux.png"
|
2222
|
-
end
|
2223
|
-
|
2224
|
-
return str
|
2225
|
-
end
|
2226
|
-
|
2227
|
-
private
|
2228
|
-
|
2229
|
-
########################################################################
|
2230
|
-
# Converts the VI string state to OpenNebula state convention
|
2231
|
-
# Guest states are:
|
2232
|
-
# - poweredOff The virtual machine is currently powered off.
|
2233
|
-
# - poweredOn The virtual machine is currently powered on.
|
2234
|
-
# - suspended The virtual machine is currently suspended.
|
2235
|
-
########################################################################
|
2236
|
-
def state_to_c(state)
|
2237
|
-
case state
|
2238
|
-
when 'poweredOn'
|
2239
|
-
VM_STATE[:active]
|
2240
|
-
when 'suspended'
|
2241
|
-
VM_STATE[:paused]
|
2242
|
-
when 'poweredOff'
|
2243
|
-
VM_STATE[:deleted]
|
2244
|
-
else
|
2245
|
-
VM_STATE[:unknown]
|
2246
|
-
end
|
2247
|
-
end
|
2248
|
-
|
2249
|
-
########################################################################
|
2250
|
-
# Checks if a RbVmomi::VIM::VirtualDevice is a network interface
|
2251
|
-
########################################################################
|
2252
|
-
def self.is_nic?(device)
|
2253
|
-
!device.class.ancestors.index(RbVmomi::VIM::VirtualEthernetCard).nil?
|
2254
|
-
end
|
2255
|
-
|
2256
|
-
########################################################################
|
2257
|
-
# Checks if a RbVmomi::VIM::VirtualDevice is a disk
|
2258
|
-
########################################################################
|
2259
|
-
def self.is_disk?(device)
|
2260
|
-
is_disk = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
|
2261
|
-
is_cdrom = !(device.class.ancestors.index(RbVmomi::VIM::VirtualCdrom)).nil?
|
2262
|
-
is_disk or is_cdrom
|
2263
|
-
end
|
2264
|
-
|
2265
|
-
########################################################################
|
2266
|
-
# Returns the spec to reconfig a VM and add a NIC
|
2267
|
-
########################################################################
|
2268
|
-
def self.calculate_addnic_spec(vm, mac, bridge, model, limit=nil, rsrv=nil)
|
2269
|
-
model = model.nil? ? nil : model.downcase
|
2270
|
-
network = vm.runtime.host.network.select{|n| n.name==bridge}
|
2271
|
-
backing = nil
|
2272
|
-
|
2273
|
-
if network.empty?
|
2274
|
-
raise "Network #{bridge} not found in host #{vm.runtime.host.name}"
|
2275
|
-
else
|
2276
|
-
network = network[0]
|
2277
|
-
end
|
2278
|
-
|
2279
|
-
card_num = 1 # start in one, we want the next avaliable id
|
2280
|
-
|
2281
|
-
vm.config.hardware.device.each{ |dv|
|
2282
|
-
card_num = card_num + 1 if is_nic?(dv)
|
2283
|
-
}
|
2284
|
-
|
2285
|
-
nic_card = case model
|
2286
|
-
when "virtuale1000", "e1000"
|
2287
|
-
RbVmomi::VIM::VirtualE1000
|
2288
|
-
when "virtuale1000e", "e1000e"
|
2289
|
-
RbVmomi::VIM::VirtualE1000e
|
2290
|
-
when "virtualpcnet32", "pcnet32"
|
2291
|
-
RbVmomi::VIM::VirtualPCNet32
|
2292
|
-
when "virtualsriovethernetcard", "sriovethernetcard"
|
2293
|
-
RbVmomi::VIM::VirtualSriovEthernetCard
|
2294
|
-
when "virtualvmxnetm", "vmxnetm"
|
2295
|
-
RbVmomi::VIM::VirtualVmxnetm
|
2296
|
-
when "virtualvmxnet2", "vmnet2"
|
2297
|
-
RbVmomi::VIM::VirtualVmxnet2
|
2298
|
-
when "virtualvmxnet3", "vmxnet3"
|
2299
|
-
RbVmomi::VIM::VirtualVmxnet3
|
2300
|
-
else # If none matches, use VirtualE1000
|
2301
|
-
RbVmomi::VIM::VirtualE1000
|
2302
|
-
end
|
2303
|
-
|
2304
|
-
if network.class == RbVmomi::VIM::Network
|
2305
|
-
backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
|
2306
|
-
:deviceName => bridge,
|
2307
|
-
:network => network)
|
2308
|
-
else
|
2309
|
-
port = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
|
2310
|
-
:switchUuid =>
|
2311
|
-
network.config.distributedVirtualSwitch.uuid,
|
2312
|
-
:portgroupKey => network.key)
|
2313
|
-
backing =
|
2314
|
-
RbVmomi::VIM.VirtualEthernetCardDistributedVirtualPortBackingInfo(
|
2315
|
-
:port => port)
|
2316
|
-
end
|
2317
|
-
|
2318
|
-
card_spec = {
|
2319
|
-
:key => 0,
|
2320
|
-
:deviceInfo => {
|
2321
|
-
:label => "net" + card_num.to_s,
|
2322
|
-
:summary => bridge
|
2323
|
-
},
|
2324
|
-
:backing => backing,
|
2325
|
-
:addressType => mac ? 'manual' : 'generated',
|
2326
|
-
:macAddress => mac
|
2327
|
-
}
|
2328
|
-
|
2329
|
-
if (limit or rsrv) and (limit > 0)
|
2330
|
-
ra_spec = Hash.new
|
2331
|
-
rsrv = limit if rsrv > limit
|
2332
|
-
ra_spec[:limit] = limit if limit
|
2333
|
-
ra_spec[:reservation] = rsrv if rsrv
|
2334
|
-
ra_spec[:share] = RbVmomi::VIM.SharesInfo({
|
2335
|
-
:level => RbVmomi::VIM.SharesLevel("normal"),
|
2336
|
-
:shares => 0
|
2337
|
-
})
|
2338
|
-
card_spec[:resourceAllocation] =
|
2339
|
-
RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec)
|
2340
|
-
end
|
2341
|
-
|
2342
|
-
return {
|
2343
|
-
:operation => :add,
|
2344
|
-
:device => nic_card.new(card_spec)
|
2345
|
-
}
|
2346
|
-
end
|
2347
|
-
|
2348
|
-
########################################################################
|
2349
|
-
# Clone a vCenter VM Template and leaves it powered on
|
2350
|
-
########################################################################
|
2351
|
-
def self.clone_vm(xml_text, hostname, datastore, ops = {})
|
2352
|
-
|
2353
|
-
host_id = VCenterDriver::VIClient.translate_hostname(hostname)
|
2354
|
-
|
2355
|
-
# Retrieve hostname
|
2356
|
-
|
2357
|
-
host = OpenNebula::Host.new_with_id(host_id, OpenNebula::Client.new())
|
2358
|
-
host.info # Not failing if host retrieval fails
|
2359
|
-
|
2360
|
-
# Get VM prefix name
|
2361
|
-
|
2362
|
-
if host["/HOST/TEMPLATE/VM_PREFIX"] and !host["/HOST/TEMPLATE/VM_PREFIX"].empty?
|
2363
|
-
vmname_prefix = host["/HOST/TEMPLATE/VM_PREFIX"]
|
2364
|
-
else # fall back to default value
|
2365
|
-
vmname_prefix = "one-$i-"
|
2366
|
-
end
|
2367
|
-
|
2368
|
-
xml = REXML::Document.new xml_text
|
2369
|
-
pcs = xml.root.get_elements("/VM/USER_TEMPLATE/PUBLIC_CLOUD")
|
2370
|
-
|
2371
|
-
raise "Cannot find VCenter element in VM template." if pcs.nil?
|
2372
|
-
|
2373
|
-
template = pcs.select { |t|
|
2374
|
-
type = t.elements["TYPE"]
|
2375
|
-
!type.nil? && type.text.downcase == "vcenter"
|
2376
|
-
}
|
2377
|
-
|
2378
|
-
# If there are multiple vCenter templates, find the right one
|
2379
|
-
|
2380
|
-
if template.is_a? Array
|
2381
|
-
all_vcenter_templates = template.clone
|
2382
|
-
# If there is more than one coincidence, pick the first one
|
2383
|
-
template = template.select {|t|
|
2384
|
-
cluster_name = t.elements["HOST"]
|
2385
|
-
!cluster_name.nil? && cluster_name.text == hostname
|
2386
|
-
}[0]
|
2387
|
-
# The template may not reference any specific CLUSTER
|
2388
|
-
# (referenced to as HOST in the OpenNebula template)
|
2389
|
-
# Therefore, here take the first one that does not
|
2390
|
-
# specify a CLUSTER to see if we are lucky
|
2391
|
-
if template.nil?
|
2392
|
-
template = all_vcenter_templates.select {|t|
|
2393
|
-
t.elements["HOST"].nil?
|
2394
|
-
}[0]
|
2395
|
-
end
|
2396
|
-
end
|
2397
|
-
|
2398
|
-
raise "Cannot find vCenter element in VM template." if template.nil?
|
2399
|
-
|
2400
|
-
uuid = template.elements["VM_TEMPLATE"]
|
2401
|
-
|
2402
|
-
raise "Cannot find VM_TEMPLATE in vCenter element." if uuid.nil?
|
2403
|
-
|
2404
|
-
uuid = uuid.text
|
2405
|
-
vmid = xml.root.elements["/VM/ID"].text
|
2406
|
-
vmname_prefix.gsub!("$i", vmid)
|
2407
|
-
vcenter_name = "#{vmname_prefix}#{xml.root.elements["/VM/NAME"].text}"
|
2408
|
-
hid = xml.root.elements["/VM/HISTORY_RECORDS/HISTORY/HID"]
|
2409
|
-
|
2410
|
-
raise "Cannot find host id in deployment file history." if hid.nil?
|
2411
|
-
|
2412
|
-
connection = VIClient.new(hid)
|
2413
|
-
vc_template = connection.find_vm_fast(uuid, ops[:ref], ops[:name])
|
2414
|
-
|
2415
|
-
# Find out requested and available resource pool
|
2416
|
-
|
2417
|
-
req_rp = nil
|
2418
|
-
if !xml.root.elements["/VM/USER_TEMPLATE/RESOURCE_POOL"].nil?
|
2419
|
-
req_rp = xml.root.elements["/VM/USER_TEMPLATE/RESOURCE_POOL"].text
|
2420
|
-
end
|
2421
|
-
|
2422
|
-
if connection.rp_confined?
|
2423
|
-
rp = connection.resource_pool.first
|
2424
|
-
if req_rp && rp.name != req_rp
|
2425
|
-
raise "Available resource pool in host [#{rp.name}]"\
|
2426
|
-
" does not match requested resource pool"\
|
2427
|
-
" [#{req_rp}]"
|
2428
|
-
end
|
2429
|
-
else
|
2430
|
-
if req_rp # if there is requested resource pool, retrieve it
|
2431
|
-
rp = connection.find_resource_pool(req_rp)
|
2432
|
-
raise "Cannot find resource pool "\
|
2433
|
-
"#{template.elements["RESOURCE_POOL"].text}" if !rp
|
2434
|
-
else # otherwise, get the default resource pool
|
2435
|
-
rp = connection.default_resource_pool
|
2436
|
-
end
|
2437
|
-
end
|
2438
|
-
|
2439
|
-
# Find out requested and available datastore
|
2440
|
-
|
2441
|
-
if !xml.root.elements["/VM/USER_TEMPLATE/VCENTER_DATASTORE"].nil?
|
2442
|
-
datastore = xml.root.elements["/VM/USER_TEMPLATE/VCENTER_DATASTORE"].text
|
2443
|
-
end
|
2444
|
-
|
2445
|
-
if datastore
|
2446
|
-
datastores = VIClient.get_entities(connection.dc.datastoreFolder,
|
2447
|
-
'Datastore')
|
2448
|
-
|
2449
|
-
storage_pods = VIClient.get_entities(connection.dc.datastoreFolder,
|
2450
|
-
'StoragePod')
|
2451
|
-
|
2452
|
-
storpod = storage_pods.select{|sp| sp.name == datastore}
|
2453
|
-
|
2454
|
-
storage_pods.each { |sp|
|
2455
|
-
storage_pod_datastores = VIClient.get_entities(sp, 'Datastore')
|
2456
|
-
if not storage_pod_datastores.empty?
|
2457
|
-
datastores.concat(storage_pod_datastores)
|
2458
|
-
end
|
2459
|
-
}
|
2460
|
-
|
2461
|
-
ds = datastores.select{|ds| ds.name == datastore}[0]
|
2462
|
-
|
2463
|
-
raise "Cannot find datastore #{datastore}" if !ds && !storpod
|
2464
|
-
|
2465
|
-
end
|
2466
|
-
|
2467
|
-
relocate_spec_params = {
|
2468
|
-
:pool => rp
|
2469
|
-
}
|
2470
|
-
|
2471
|
-
relocate_spec_params[:datastore] = ds if datastore
|
2472
|
-
|
2473
|
-
relocate_spec_params[:diskMoveType] = :moveChildMostDiskBacking if ds
|
2474
|
-
|
2475
|
-
relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
|
2476
|
-
relocate_spec_params)
|
2477
|
-
|
2478
|
-
# This running flag will prevent spurious poweroff states in the VM
|
2479
|
-
|
2480
|
-
running_flag = [{:key=>"opennebula.vm.running",:value=>"no"}]
|
2481
|
-
|
2482
|
-
running_flag_spec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
2483
|
-
{:extraConfig =>running_flag})
|
2484
|
-
|
2485
|
-
clone_parameters = {
|
2486
|
-
:location => relocate_spec,
|
2487
|
-
:powerOn => false,
|
2488
|
-
:template => false,
|
2489
|
-
:config => running_flag_spec
|
2490
|
-
}
|
2491
|
-
|
2492
|
-
customization = template.elements["CUSTOMIZATION_SPEC"]
|
2493
|
-
|
2494
|
-
vim = connection.vim
|
2495
|
-
|
2496
|
-
if !customization.nil?
|
2497
|
-
begin
|
2498
|
-
custom_spec = vim.serviceContent.customizationSpecManager.
|
2499
|
-
GetCustomizationSpec(:name => customization.text)
|
2500
|
-
|
2501
|
-
if custom_spec && spec=custom_spec.spec
|
2502
|
-
clone_parameters[:customization] = spec
|
2503
|
-
else
|
2504
|
-
raise "Error getting customization spec"
|
2505
|
-
end
|
2506
|
-
|
2507
|
-
rescue
|
2508
|
-
raise "Customization spec '#{customization.text}' not found"
|
2509
|
-
end
|
2510
|
-
end
|
2511
|
-
|
2512
|
-
clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(clone_parameters)
|
2513
|
-
|
2514
|
-
if storpod && !storpod.empty? && storpod[0].is_a?(RbVmomi::VIM::StoragePod)
|
2515
|
-
|
2516
|
-
storage_manager = vim.serviceContent.storageResourceManager
|
2517
|
-
|
2518
|
-
pod_spec = RbVmomi::VIM.StorageDrsPodSelectionSpec(storagePod: storpod[0])
|
2519
|
-
|
2520
|
-
storage_spec = RbVmomi::VIM.StoragePlacementSpec(
|
2521
|
-
type: 'clone',
|
2522
|
-
cloneName: vcenter_name,
|
2523
|
-
folder: vc_template.parent,
|
2524
|
-
podSelectionSpec: pod_spec,
|
2525
|
-
vm: vc_template,
|
2526
|
-
cloneSpec: clone_spec
|
2527
|
-
)
|
2528
|
-
|
2529
|
-
result = storage_manager.RecommendDatastores(storageSpec: storage_spec)
|
2530
|
-
|
2531
|
-
recommendation = result.recommendations[0]
|
2532
|
-
|
2533
|
-
key = recommendation.key ||= ''
|
2534
|
-
|
2535
|
-
if key == ''
|
2536
|
-
raise "Missing Datastore recommendation for StoragePod (Storage DRS)"
|
2537
|
-
end
|
2538
|
-
|
2539
|
-
begin
|
2540
|
-
apply_sr = storage_manager.ApplyStorageDrsRecommendation_Task(key: [key]).wait_for_completion
|
2541
|
-
vm = apply_sr.vm
|
2542
|
-
rescue Exception => e
|
2543
|
-
raise "Cannot clone VM Template to StoragePod: #{e.message}"
|
2544
|
-
end
|
2545
|
-
else
|
2546
|
-
|
2547
|
-
begin
|
2548
|
-
vm = vc_template.CloneVM_Task(
|
2549
|
-
:folder => vc_template.parent,
|
2550
|
-
:name => vcenter_name,
|
2551
|
-
:spec => clone_spec).wait_for_completion
|
2552
|
-
rescue Exception => e
|
2553
|
-
|
2554
|
-
if !e.message.start_with?('DuplicateName')
|
2555
|
-
raise "Cannot clone VM Template: #{e.message}"
|
2556
|
-
end
|
2557
|
-
|
2558
|
-
vm = connection.find_vm(vcenter_name)
|
2559
|
-
|
2560
|
-
raise "Cannot clone VM Template" if vm.nil?
|
2561
|
-
|
2562
|
-
vm.Destroy_Task.wait_for_completion
|
2563
|
-
vm = vc_template.CloneVM_Task(
|
2564
|
-
:folder => vc_template.parent,
|
2565
|
-
:name => vcenter_name,
|
2566
|
-
:spec => clone_spec).wait_for_completion
|
2567
|
-
end
|
2568
|
-
|
2569
|
-
end
|
2570
|
-
|
2571
|
-
|
2572
|
-
|
2573
|
-
reconfigure_vm(vm, xml, true, hostname)
|
2574
|
-
|
2575
|
-
# Power on the VM
|
2576
|
-
vm.PowerOnVM_Task.wait_for_completion
|
2577
|
-
|
2578
|
-
# Set to yes the running flag
|
2579
|
-
|
2580
|
-
config_array = [{:key=>"opennebula.vm.running",:value=>"yes"}]
|
2581
|
-
spec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
2582
|
-
{:extraConfig =>config_array})
|
2583
|
-
|
2584
|
-
vm.ReconfigVM_Task(:spec => spec).wait_for_completion
|
2585
|
-
|
2586
|
-
return vm.config.uuid
|
2587
|
-
end
|
2588
|
-
|
2589
|
-
########################################################################
|
2590
|
-
# Reconfigures a VM with new deployment description
|
2591
|
-
########################################################################
|
2592
|
-
def self.reconfigure_vm(vm, xml, newvm, hostname)
|
2593
|
-
vm_uuid = vm.config.uuid
|
2594
|
-
vmid = xml.root.elements["/VM/ID"].text
|
2595
|
-
context = xml.root.elements["/VM/TEMPLATE/CONTEXT"]
|
2596
|
-
|
2597
|
-
token = vm.config.extraConfig.select do |val|
|
2598
|
-
val[:key] == "opennebula.token"
|
2599
|
-
end
|
2600
|
-
|
2601
|
-
if token && !token.empty?
|
2602
|
-
token = token.first[:value]
|
2603
|
-
else
|
2604
|
-
token = nil
|
2605
|
-
end
|
2606
|
-
|
2607
|
-
# Add VMID to VM's extraConfig
|
2608
|
-
|
2609
|
-
config_array = [{:key=>"opennebula.vm.id",:value=>vmid}]
|
2610
|
-
|
2611
|
-
# VNC Section
|
2612
|
-
|
2613
|
-
vnc_port = xml.root.elements["/VM/TEMPLATE/GRAPHICS/PORT"]
|
2614
|
-
vnc_listen = xml.root.elements["/VM/TEMPLATE/GRAPHICS/LISTEN"]
|
2615
|
-
vnc_keymap = xml.root.elements["/VM/TEMPLATE/GRAPHICS/KEYMAP"]
|
2616
|
-
|
2617
|
-
if !vnc_listen
|
2618
|
-
vnc_listen = "0.0.0.0"
|
2619
|
-
else
|
2620
|
-
vnc_listen = vnc_listen.text
|
2621
|
-
end
|
2622
|
-
|
2623
|
-
context_vnc_spec = {}
|
2624
|
-
|
2625
|
-
if vnc_port
|
2626
|
-
config_array +=
|
2627
|
-
[{:key=>"remotedisplay.vnc.enabled",:value=>"TRUE"},
|
2628
|
-
{:key=>"remotedisplay.vnc.port", :value=>vnc_port.text},
|
2629
|
-
{:key=>"remotedisplay.vnc.ip", :value=>vnc_listen}]
|
2630
|
-
end
|
2631
|
-
|
2632
|
-
config_array += [{:key=>"remotedisplay.vnc.keymap",
|
2633
|
-
:value=>vnc_keymap.text}] if vnc_keymap
|
2634
|
-
|
2635
|
-
# Context section
|
2636
|
-
|
2637
|
-
if context
|
2638
|
-
context_text = create_context(context)
|
2639
|
-
|
2640
|
-
# OneGate
|
2641
|
-
onegate_token_flag = xml.root.elements["/VM/TEMPLATE/CONTEXT/TOKEN"]
|
2642
|
-
|
2643
|
-
if onegate_token_flag and onegate_token_flag.text == "YES"
|
2644
|
-
if token
|
2645
|
-
onegate_token_64 = token
|
2646
|
-
else
|
2647
|
-
# Create the OneGate token string
|
2648
|
-
vmid_str = xml.root.elements["/VM/ID"].text
|
2649
|
-
stime_str = xml.root.elements["/VM/STIME"].text
|
2650
|
-
str_to_encrypt = "#{vmid_str}:#{stime_str}"
|
2651
|
-
|
2652
|
-
user_id = xml.root.elements['//CREATED_BY'].text
|
2653
|
-
|
2654
|
-
if user_id.nil?
|
2655
|
-
STDERR.puts {"VMID:#{vmid} CREATED_BY not present" \
|
2656
|
-
" in the VM TEMPLATE"}
|
2657
|
-
return nil
|
2658
|
-
end
|
2659
|
-
|
2660
|
-
user = OpenNebula::User.new_with_id(user_id,
|
2661
|
-
OpenNebula::Client.new)
|
2662
|
-
rc = user.info
|
2663
|
-
|
2664
|
-
if OpenNebula.is_error?(rc)
|
2665
|
-
STDERR.puts {"VMID:#{vmid} user.info" \
|
2666
|
-
" error: #{rc.message}"}
|
2667
|
-
return nil
|
2668
|
-
end
|
2669
|
-
|
2670
|
-
token_password = user['TEMPLATE/TOKEN_PASSWORD']
|
2671
|
-
|
2672
|
-
if token_password.nil?
|
2673
|
-
STDERR.puts {"VMID:#{vmid} TOKEN_PASSWORD not present"\
|
2674
|
-
" in the USER:#{user_id} TEMPLATE"}
|
2675
|
-
return nil
|
2676
|
-
end
|
2677
|
-
|
2678
|
-
cipher = OpenSSL::Cipher::Cipher.new("aes-256-cbc")
|
2679
|
-
cipher.encrypt
|
2680
|
-
cipher.key = token_password
|
2681
|
-
onegate_token = cipher.update(str_to_encrypt)
|
2682
|
-
onegate_token << cipher.final
|
2683
|
-
|
2684
|
-
onegate_token_64 = Base64.encode64(onegate_token).chop
|
2685
|
-
config_array << {
|
2686
|
-
:key => 'opennebula.token',
|
2687
|
-
:value => onegate_token_64
|
2688
|
-
}
|
2689
|
-
end
|
2690
|
-
|
2691
|
-
context_text += "ONEGATE_TOKEN='#{onegate_token_64}'\n"
|
2692
|
-
end
|
2693
|
-
|
2694
|
-
context_text = Base64.encode64(context_text.chop)
|
2695
|
-
|
2696
|
-
config_array +=
|
2697
|
-
[{:key=>"guestinfo.opennebula.context",
|
2698
|
-
:value=>context_text}]
|
2699
|
-
end
|
2700
|
-
|
2701
|
-
device_change = []
|
2702
|
-
|
2703
|
-
# NIC section, build the reconfig hash
|
2704
|
-
|
2705
|
-
nics = xml.root.get_elements("/VM/TEMPLATE/NIC")
|
2706
|
-
|
2707
|
-
# If the VM is not new, avoid readding NiCs
|
2708
|
-
if !newvm
|
2709
|
-
|
2710
|
-
nic_array = []
|
2711
|
-
|
2712
|
-
# B4897 - Get mac of NICs that were hot-plugged from vCenter extraConfig
|
2713
|
-
hotplugged_nics = []
|
2714
|
-
extraconfig_nics = vm.config.extraConfig.select do |val|
|
2715
|
-
val[:key] == "opennebula.hotplugged_nics"
|
2716
|
-
end
|
2717
|
-
|
2718
|
-
if extraconfig_nics && !extraconfig_nics.empty?
|
2719
|
-
hotplugged_nics = extraconfig_nics[0][:value].to_s.split(";")
|
2720
|
-
end
|
2721
|
-
|
2722
|
-
# Get MACs from NICs inside VM template
|
2723
|
-
one_mac_addresses = Array.new
|
2724
|
-
nics.each{|nic|
|
2725
|
-
mac = nic.elements["MAC"].text
|
2726
|
-
one_mac_addresses << mac
|
2727
|
-
# B4897 - Add NICs that were attached in POWEROFF
|
2728
|
-
if !hotplugged_nics.include?(mac)
|
2729
|
-
hotplugged_nics << mac.to_s
|
2730
|
-
end
|
2731
|
-
}
|
2732
|
-
|
2733
|
-
vm.config.hardware.device.each{ |dv|
|
2734
|
-
if is_nic?(dv)
|
2735
|
-
nics.each{|nic|
|
2736
|
-
if nic.elements["MAC"].text == dv.macAddress
|
2737
|
-
nics.delete(nic)
|
2738
|
-
end
|
2739
|
-
}
|
2740
|
-
# B4897 - Remove detached NICs from vCenter that were unplugged in POWEROFF
|
2741
|
-
if !one_mac_addresses.include?(dv.macAddress) && hotplugged_nics.include?(dv.macAddress)
|
2742
|
-
nic_array << { :operation => :remove, :device => dv}
|
2743
|
-
hotplugged_nics.delete(dv.macAddress)
|
2744
|
-
end
|
2745
|
-
end
|
2746
|
-
}
|
2747
|
-
|
2748
|
-
# B4897 - Save what NICs have been attached by OpenNebula in vCenter VM extraconfig
|
2749
|
-
if !hotplugged_nics.empty?
|
2750
|
-
config_array << {
|
2751
|
-
:key => 'opennebula.hotplugged_nics',
|
2752
|
-
:value => hotplugged_nics.join(";")
|
2753
|
-
}
|
2754
|
-
else
|
2755
|
-
config_array << {
|
2756
|
-
:key => 'opennebula.hotplugged_nics',
|
2757
|
-
:value => ""
|
2758
|
-
}
|
2759
|
-
end
|
2760
|
-
|
2761
|
-
device_change += nic_array
|
2762
|
-
else
|
2763
|
-
# B4897 - Add NICs that have been added to the VM template
|
2764
|
-
# to the hotplugged_nics extraconfig so we can track what must be removed
|
2765
|
-
|
2766
|
-
# Get MACs from NICs inside VM template to track NICs added by OpenNebula
|
2767
|
-
one_mac_addresses = []
|
2768
|
-
nics.each{|nic|
|
2769
|
-
one_mac_addresses << nic.elements["MAC"].text
|
2770
|
-
}
|
2771
|
-
|
2772
|
-
if !one_mac_addresses.empty?
|
2773
|
-
config_array << {
|
2774
|
-
:key => 'opennebula.hotplugged_nics',
|
2775
|
-
:value => one_mac_addresses.join(";")
|
2776
|
-
}
|
2777
|
-
end
|
2778
|
-
|
2779
|
-
end
|
2780
|
-
|
2781
|
-
if !nics.nil?
|
2782
|
-
nic_array = []
|
2783
|
-
nics.each{|nic|
|
2784
|
-
mac = nic.elements["MAC"].text
|
2785
|
-
bridge = nic.elements["BRIDGE"].text
|
2786
|
-
model = nic.elements["MODEL"] ? nic.elements["MODEL"].text : nil
|
2787
|
-
limit_in = nic.elements["INBOUND_PEAK_BW"] ? nic.elements["INBOUND_PEAK_BW"].text : ""
|
2788
|
-
limit_out = nic.elements["OUTBOUND_PEAK_BW"] ? nic.elements["OUTBOUND_PEAK_BW"].text : ""
|
2789
|
-
limit = nil
|
2790
|
-
if !limit_in.empty? or !limit_out.empty?
|
2791
|
-
limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
|
2792
|
-
end
|
2793
|
-
rsrv_in = nic.elements["INBOUND_AVG_BW"] ? nic.elements["INBOUND_AVG_BW"].text : ""
|
2794
|
-
rsrv_out = nic.elements["OUTBOUND_AVG_BW"] ? nic.elements["OUTBOUND_AVG_BW"].text : ""
|
2795
|
-
rsrv = nil
|
2796
|
-
if !rsrv_in.empty? or !rsrv_out.empty?
|
2797
|
-
rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
|
2798
|
-
end
|
2799
|
-
nic_array << calculate_addnic_spec(vm,
|
2800
|
-
mac,
|
2801
|
-
bridge,
|
2802
|
-
model,
|
2803
|
-
limit,
|
2804
|
-
rsrv)
|
2805
|
-
}
|
2806
|
-
|
2807
|
-
device_change += nic_array
|
2808
|
-
end
|
2809
|
-
|
2810
|
-
# DISK section, build the reconfig hash
|
2811
|
-
|
2812
|
-
disks = xml.root.get_elements("/VM/TEMPLATE/DISK")
|
2813
|
-
disk_spec = {}
|
2814
|
-
|
2815
|
-
# If the VM is not new, avoid reading DISKS
|
2816
|
-
if !newvm
|
2817
|
-
vm.config.hardware.device.select { |d|
|
2818
|
-
if is_disk?(d)
|
2819
|
-
disks.each{|disk|
|
2820
|
-
if d.backing.respond_to?(:fileName) &&
|
2821
|
-
disk.elements["SOURCE"].text == d.backing.fileName &&
|
2822
|
-
disks.delete(disk)
|
2823
|
-
end
|
2824
|
-
}
|
2825
|
-
end
|
2826
|
-
}
|
2827
|
-
end
|
2828
|
-
|
2829
|
-
if !disks.nil?
|
2830
|
-
disk_array = []
|
2831
|
-
hid = VIClient::translate_hostname(hostname)
|
2832
|
-
connection = VIClient.new(hid)
|
2833
|
-
disks.each{|disk|
|
2834
|
-
ds_name = disk.elements["DATASTORE"].text
|
2835
|
-
img_name = disk.elements["SOURCE"].text
|
2836
|
-
type_str = disk.elements["TYPE"].text
|
2837
|
-
|
2838
|
-
disk_array += attach_disk("", "", ds_name, img_name, type_str, 0, vm, connection)[:deviceChange]
|
2839
|
-
}
|
2840
|
-
|
2841
|
-
device_change += disk_array
|
2842
|
-
end
|
2843
|
-
|
2844
|
-
# Capacity section
|
2845
|
-
|
2846
|
-
cpu = xml.root.elements["/VM/TEMPLATE/VCPU"] ? xml.root.elements["/VM/TEMPLATE/VCPU"].text : 1
|
2847
|
-
memory = xml.root.elements["/VM/TEMPLATE/MEMORY"].text
|
2848
|
-
capacity_spec = {:numCPUs => cpu.to_i,
|
2849
|
-
:memoryMB => memory }
|
2850
|
-
|
2851
|
-
# Perform the VM reconfiguration
|
2852
|
-
if config_array != []
|
2853
|
-
context_vnc_spec = {:extraConfig =>config_array}
|
2854
|
-
end
|
2855
|
-
|
2856
|
-
spec_hash = context_vnc_spec.merge(capacity_spec)
|
2857
|
-
if device_change.length > 0
|
2858
|
-
spec_hash.merge!({ :deviceChange => device_change })
|
2859
|
-
end
|
2860
|
-
|
2861
|
-
spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
|
2862
|
-
vm.ReconfigVM_Task(:spec => spec).wait_for_completion
|
2863
|
-
end
|
2864
|
-
|
2865
|
-
############################################################################
|
2866
|
-
# Attach disk to a VM
|
2867
|
-
# @params hostname[String] vcenter cluster name in opennebula as host
|
2868
|
-
# @params deploy_id[String] deploy id of the vm
|
2869
|
-
# @params ds_name[String] name of the datastore
|
2870
|
-
# @params img_name[String] path of the image
|
2871
|
-
# @params size_kb[String] size in kb of the disk
|
2872
|
-
# @params vm[RbVmomi::VIM::VirtualMachine] VM if called from instance
|
2873
|
-
# @params connection[ViClient::connectoon] connection if called from instance
|
2874
|
-
############################################################################
|
2875
|
-
def self.attach_disk(hostname, deploy_id, ds_name, img_name, type, size_kb, vm=nil, connection=nil)
|
2876
|
-
only_return = true
|
2877
|
-
if !vm
|
2878
|
-
hid = VIClient::translate_hostname(hostname)
|
2879
|
-
connection = VIClient.new(hid)
|
2880
|
-
|
2881
|
-
vm = connection.find_vm_template(deploy_id)
|
2882
|
-
only_return = false
|
2883
|
-
end
|
2884
|
-
|
2885
|
-
# Find datastore within datacenter
|
2886
|
-
datastores = VIClient.get_entities(connection.dc.datastoreFolder,
|
2887
|
-
'Datastore')
|
2888
|
-
|
2889
|
-
storage_pods = VIClient.get_entities(connection.dc.datastoreFolder,
|
2890
|
-
'StoragePod')
|
2891
|
-
storage_pods.each { |sp|
|
2892
|
-
storage_pod_datastores = VIClient.get_entities(sp, 'Datastore')
|
2893
|
-
if not storage_pod_datastores.empty?
|
2894
|
-
datastores.concat(storage_pod_datastores)
|
2895
|
-
end
|
2896
|
-
}
|
2897
|
-
|
2898
|
-
ds = datastores.select{|ds| ds.name == ds_name}[0]
|
2899
|
-
|
2900
|
-
controller, new_number = find_free_controller(vm)
|
2901
|
-
|
2902
|
-
if type == "CDROM"
|
2903
|
-
vmdk_backing = RbVmomi::VIM::VirtualCdromIsoBackingInfo(
|
2904
|
-
:datastore => ds,
|
2905
|
-
:fileName => "[#{ds_name}] #{img_name}"
|
2906
|
-
)
|
2907
|
-
|
2908
|
-
cd = vm.config.hardware.device.select {|hw|
|
2909
|
-
hw.class == RbVmomi::VIM::VirtualCdrom}.first
|
2910
|
-
|
2911
|
-
# If no CDROM drive present, we need to add it
|
2912
|
-
if !cd
|
2913
|
-
controller, new_unit_number = find_free_controller(vm)
|
2914
|
-
cdrom_drive_spec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
2915
|
-
:deviceChange => [{
|
2916
|
-
:operation => :add,
|
2917
|
-
:device => RbVmomi::VIM::VirtualCdrom(
|
2918
|
-
:backing => vmdk_backing,
|
2919
|
-
:key => -1,
|
2920
|
-
:controllerKey => 15000,
|
2921
|
-
:unitNumber => 0,
|
2922
|
-
:connectable => RbVmomi::VIM::VirtualDeviceConnectInfo(
|
2923
|
-
:startConnected => true,
|
2924
|
-
:connected => true,
|
2925
|
-
:allowGuestControl => true
|
2926
|
-
)
|
2927
|
-
)}]
|
2928
|
-
)
|
2929
|
-
|
2930
|
-
vm.ReconfigVM_Task(:spec =>
|
2931
|
-
cdrom_drive_spec).wait_for_completion
|
2932
|
-
|
2933
|
-
return
|
2934
|
-
else
|
2935
|
-
device = RbVmomi::VIM::VirtualCdrom(
|
2936
|
-
backing: vmdk_backing,
|
2937
|
-
key: cd.key,
|
2938
|
-
controllerKey: cd.controllerKey,
|
2939
|
-
connectable: RbVmomi::VIM::VirtualDeviceConnectInfo(
|
2940
|
-
startConnected: true,
|
2941
|
-
connected: true,
|
2942
|
-
allowGuestControl: true
|
2943
|
-
)
|
2944
|
-
)
|
2945
|
-
device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
|
2946
|
-
:device => device,
|
2947
|
-
:operation => RbVmomi::VIM::VirtualDeviceConfigSpecOperation('edit')
|
2948
|
-
)
|
2949
|
-
end
|
2950
|
-
else
|
2951
|
-
vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
|
2952
|
-
:datastore => ds,
|
2953
|
-
:diskMode => 'persistent',
|
2954
|
-
:fileName => "[#{ds_name}] #{img_name}"
|
2955
|
-
)
|
2956
|
-
|
2957
|
-
device = RbVmomi::VIM::VirtualDisk(
|
2958
|
-
:backing => vmdk_backing,
|
2959
|
-
:capacityInKB => size_kb,
|
2960
|
-
:controllerKey => controller.key,
|
2961
|
-
:key => -1,
|
2962
|
-
:unitNumber => new_number
|
2963
|
-
)
|
2964
|
-
device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
|
2965
|
-
:device => device,
|
2966
|
-
:operation => RbVmomi::VIM::VirtualDeviceConfigSpecOperation('add')
|
2967
|
-
)
|
2968
|
-
end
|
2969
|
-
|
2970
|
-
vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
|
2971
|
-
:deviceChange => [device_config_spec]
|
2972
|
-
)
|
2973
|
-
|
2974
|
-
return vm_config_spec if only_return
|
2975
|
-
|
2976
|
-
vm.ReconfigVM_Task(:spec => vm_config_spec).wait_for_completion
|
2977
|
-
end
|
2978
|
-
|
2979
|
-
def self.find_free_controller(vm)
|
2980
|
-
free_scsi_controllers = Array.new
|
2981
|
-
available_controller = nil
|
2982
|
-
scsi_schema = Hash.new
|
2983
|
-
|
2984
|
-
used_numbers = Array.new
|
2985
|
-
available_numbers = Array.new
|
2986
|
-
|
2987
|
-
vm.config.hardware.device.each{ |dev|
|
2988
|
-
if dev.is_a? RbVmomi::VIM::VirtualSCSIController
|
2989
|
-
if scsi_schema[dev.controllerKey].nil?
|
2990
|
-
scsi_schema[dev.key] = Hash.new
|
2991
|
-
scsi_schema[dev.key][:lower] = Array.new
|
2992
|
-
end
|
2993
|
-
used_numbers << dev.scsiCtlrUnitNumber
|
2994
|
-
scsi_schema[dev.key][:device] = dev
|
2995
|
-
end
|
2996
|
-
|
2997
|
-
next if dev.class != RbVmomi::VIM::VirtualDisk
|
2998
|
-
used_numbers << dev.unitNumber
|
2999
|
-
}
|
3000
|
-
|
3001
|
-
15.times{ |scsi_id|
|
3002
|
-
available_numbers << scsi_id if used_numbers.grep(scsi_id).length <= 0
|
3003
|
-
}
|
3004
|
-
|
3005
|
-
scsi_schema.keys.each{|controller|
|
3006
|
-
if scsi_schema[controller][:lower].length < 15
|
3007
|
-
free_scsi_controllers << scsi_schema[controller][:device].deviceInfo.label
|
3008
|
-
end
|
3009
|
-
}
|
3010
|
-
|
3011
|
-
if free_scsi_controllers.length > 0
|
3012
|
-
available_controller_label = free_scsi_controllers[0]
|
3013
|
-
else
|
3014
|
-
add_new_scsi(vm, scsi_schema)
|
3015
|
-
return find_free_controller(vm)
|
3016
|
-
end
|
3017
|
-
|
3018
|
-
controller = nil
|
3019
|
-
|
3020
|
-
vm.config.hardware.device.each { |device|
|
3021
|
-
(controller = device ; break) if device.deviceInfo.label == available_controller_label
|
3022
|
-
}
|
3023
|
-
|
3024
|
-
new_unit_number = available_numbers.sort[0]
|
3025
|
-
|
3026
|
-
return controller, new_unit_number
|
3027
|
-
end
|
3028
|
-
|
3029
|
-
def self.add_new_scsi(vm, scsi_schema)
|
3030
|
-
controller = nil
|
3031
|
-
|
3032
|
-
if scsi_schema.keys.length >= 4
|
3033
|
-
raise "Cannot add a new controller, maximum is 4."
|
3034
|
-
end
|
3035
|
-
|
3036
|
-
if scsi_schema.keys.length == 0
|
3037
|
-
scsi_key = 0
|
3038
|
-
scsi_number = 0
|
3039
|
-
else scsi_schema.keys.length < 4
|
3040
|
-
scsi_key = scsi_schema.keys.sort[-1] + 1
|
3041
|
-
scsi_number = scsi_schema[scsi_schema.keys.sort[-1]][:device].busNumber + 1
|
3042
|
-
end
|
3043
|
-
|
3044
|
-
controller_device = RbVmomi::VIM::VirtualLsiLogicController(
|
3045
|
-
:key => scsi_key,
|
3046
|
-
:busNumber => scsi_number,
|
3047
|
-
:sharedBus => :noSharing
|
3048
|
-
)
|
3049
|
-
|
3050
|
-
device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
|
3051
|
-
:device => controller_device,
|
3052
|
-
:operation => RbVmomi::VIM::VirtualDeviceConfigSpecOperation('add')
|
3053
|
-
)
|
3054
|
-
|
3055
|
-
vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
|
3056
|
-
:deviceChange => [device_config_spec]
|
3057
|
-
)
|
3058
|
-
|
3059
|
-
vm.ReconfigVM_Task(:spec => vm_config_spec).wait_for_completion
|
3060
|
-
|
3061
|
-
vm.config.hardware.device.each { |device|
|
3062
|
-
if device.class == RbVmomi::VIM::VirtualLsiLogicController &&
|
3063
|
-
device.key == scsi_key
|
3064
|
-
controller = device.deviceInfo.label
|
3065
|
-
end
|
3066
|
-
}
|
3067
|
-
|
3068
|
-
return controller
|
3069
|
-
end
|
3070
|
-
|
3071
|
-
############################################################################
|
3072
|
-
# Detach a specific disk from a VM
|
3073
|
-
# @params hostname[String] vcenter cluster name in opennebula as host
|
3074
|
-
# @params deploy_id[String] deploy id of the vm
|
3075
|
-
# @params ds_name[String] name of the datastore
|
3076
|
-
# @params img_path[String] path of the image
|
3077
|
-
############################################################################
|
3078
|
-
def self.detach_disk(hostname, deploy_id, ds_name, img_path)
|
3079
|
-
hid = VIClient::translate_hostname(hostname)
|
3080
|
-
connection = VIClient.new(hid)
|
3081
|
-
|
3082
|
-
vm = connection.find_vm_template(deploy_id)
|
3083
|
-
|
3084
|
-
ds_and_img_name = "[#{ds_name}] #{img_path}"
|
3085
|
-
|
3086
|
-
disk = vm.config.hardware.device.select { |d| is_disk?(d) &&
|
3087
|
-
d.backing.respond_to?(:fileName) &&
|
3088
|
-
d.backing.fileName == ds_and_img_name }
|
3089
|
-
|
3090
|
-
raise "Disk #{img_path} not found." if disk.nil?
|
3091
|
-
|
3092
|
-
spec = { :deviceChange => [{
|
3093
|
-
:operation => :remove,
|
3094
|
-
:device => disk[0]
|
3095
|
-
}]}
|
3096
|
-
|
3097
|
-
vm.ReconfigVM_Task(:spec => spec).wait_for_completion
|
3098
|
-
end
|
3099
|
-
|
3100
|
-
############################################################################
|
3101
|
-
# Detach all disks from a VM
|
3102
|
-
# @params vm[VCenterVm] vCenter VM
|
3103
|
-
############################################################################
|
3104
|
-
def self.detach_all_disks(vm)
|
3105
|
-
disks = vm.config.hardware.device.select { |d| is_disk?(d) }
|
3106
|
-
|
3107
|
-
return if disks.nil?
|
3108
|
-
|
3109
|
-
spec = { :deviceChange => [] }
|
3110
|
-
|
3111
|
-
disks.each{|disk|
|
3112
|
-
spec[:deviceChange] << {
|
3113
|
-
:operation => :remove,
|
3114
|
-
:device => disk
|
3115
|
-
}
|
3116
|
-
}
|
3117
|
-
|
3118
|
-
vm.ReconfigVM_Task(:spec => spec).wait_for_completion
|
3119
|
-
end
|
3120
|
-
|
3121
|
-
def self.create_context(context)
|
3122
|
-
# Remove <CONTEXT> (9) and </CONTEXT>\n (11)
|
3123
|
-
context_text = "# Context variables generated by OpenNebula\n"
|
3124
|
-
context.elements.each{|context_element|
|
3125
|
-
next if !context_element.text
|
3126
|
-
context_text += context_element.name + "='" +
|
3127
|
-
context_element.text.gsub("'", "\\'") + "'\n"
|
3128
|
-
}
|
3129
|
-
context_text
|
3130
|
-
end
|
3131
|
-
|
3132
|
-
############################################################################
|
3133
|
-
# Detach attached disks from a VM
|
3134
|
-
############################################################################
|
3135
|
-
def self.detach_attached_disks(vm, disks, hostname)
|
3136
|
-
hid = VIClient::translate_hostname(hostname)
|
3137
|
-
connection = VIClient.new(hid)
|
3138
|
-
|
3139
|
-
spec = { :deviceChange => [] }
|
3140
|
-
|
3141
|
-
disks.each{ |disk|
|
3142
|
-
ds_and_img_name = "[#{disk['DATASTORE']}] #{disk['SOURCE']}"
|
3143
|
-
vcenter_disk = vm.config.hardware.device.select { |d| is_disk?(d) &&
|
3144
|
-
d.backing.respond_to?(:fileName) &&
|
3145
|
-
d.backing.fileName == ds_and_img_name }[0]
|
3146
|
-
spec[:deviceChange] << {
|
3147
|
-
:operation => :remove,
|
3148
|
-
:device => vcenter_disk
|
3149
|
-
}
|
3150
|
-
}
|
3151
|
-
|
3152
|
-
vm.ReconfigVM_Task(:spec => spec).wait_for_completion
|
3153
|
-
end
|
3154
|
-
end
|
3155
79
|
end
|