opennebula 4.90.10.rc1 → 5.0.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,2716 @@
1
+ # ---------------------------------------------------------------------------- #
2
+ # Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
3
+ # #
4
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
+ # not use this file except in compliance with the License. You may obtain #
6
+ # a copy of the License at #
7
+ # #
8
+ # http://www.apache.org/licenses/LICENSE-2.0 #
9
+ # #
10
+ # Unless required by applicable law or agreed to in writing, software #
11
+ # distributed under the License is distributed on an "AS IS" BASIS, #
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
13
+ # See the License for the specific language governing permissions and #
14
+ # limitations under the License. #
15
+ # ---------------------------------------------------------------------------- #
16
+
17
+ # -------------------------------------------------------------------------#
18
+ # Set up the environment for the driver #
19
+ # -------------------------------------------------------------------------#
20
+ ONE_LOCATION = ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
21
+
22
+ if !ONE_LOCATION
23
+ BIN_LOCATION = "/usr/bin" if !defined?(BIN_LOCATION)
24
+ LIB_LOCATION = "/usr/lib/one" if !defined?(LIB_LOCATION)
25
+ ETC_LOCATION = "/etc/one/" if !defined?(ETC_LOCATION)
26
+ VAR_LOCATION = "/var/lib/one" if !defined?(VAR_LOCATION)
27
+ else
28
+ BIN_LOCATION = ONE_LOCATION + "/bin" if !defined?(BIN_LOCATION)
29
+ LIB_LOCATION = ONE_LOCATION + "/lib" if !defined?(LIB_LOCATION)
30
+ ETC_LOCATION = ONE_LOCATION + "/etc/" if !defined?(ETC_LOCATION)
31
+ VAR_LOCATION = ONE_LOCATION + "/var/" if !defined?(VAR_LOCATION)
32
+ end
33
+
34
+ ENV['LANG'] = 'C'
35
+
36
+ $: << LIB_LOCATION+'/ruby/vendors/rbvmomi/lib'
37
+ $: << LIB_LOCATION+'/ruby'
38
+
39
+ require 'ostruct'
40
+ require 'rbvmomi'
41
+ require 'yaml'
42
+ require 'opennebula'
43
+ require 'base64'
44
+ require 'openssl'
45
+ require 'VirtualMachineDriver'
46
+
47
+ ################################################################################
48
+ # Monkey patch rbvmomi library with some extra functions
49
+ ################################################################################
50
+
51
+ class RbVmomi::VIM::Datastore
52
+
53
+ # Download a file from this datastore.
54
+ # @param remote_path [String] Source path on the datastore.
55
+ # @param local_path [String] Destination path on the local machine.
56
+ # @return [void]
57
+ def download_to_stdout remote_path
58
+ url = "http#{_connection.http.use_ssl? ? 's' : ''}://#{_connection.http.address}:#{_connection.http.port}#{mkuripath(remote_path)}"
59
+
60
+ pid = spawn CURLBIN, "-k", '--noproxy', '*', '-f',
61
+ "-b", _connection.cookie,
62
+ url
63
+
64
+
65
+ Process.waitpid(pid, 0)
66
+ fail "download failed" unless $?.success?
67
+ end
68
+
69
+ def is_descriptor? remote_path
70
+ url = "http#{_connection.http.use_ssl? ? 's' : ''}://#{_connection.http.address}:#{_connection.http.port}#{mkuripath(remote_path)}"
71
+
72
+ rout, wout = IO.pipe
73
+
74
+ pid = spawn CURLBIN, "-I", "-k", '--noproxy', '*', '-f',
75
+ "-b", _connection.cookie,
76
+ url,
77
+ :out => wout,
78
+ :err => '/dev/null'
79
+
80
+ Process.waitpid(pid, 0)
81
+ fail "read image header failed" unless $?.success?
82
+
83
+ wout.close
84
+ size = rout.readlines.select{|l| l.start_with?("Content-Length")}[0].sub("Content-Length: ","")
85
+ rout.close
86
+ size.chomp.to_i < 4096 # If <4k, then is a descriptor
87
+ end
88
+
89
+ def get_text_file remote_path
90
+ url = "http#{_connection.http.use_ssl? ? 's' : ''}://#{_connection.http.address}:#{_connection.http.port}#{mkuripath(remote_path)}"
91
+
92
+ rout, wout = IO.pipe
93
+ pid = spawn CURLBIN, "-k", '--noproxy', '*', '-f',
94
+ "-b", _connection.cookie,
95
+ url,
96
+ :out => wout,
97
+ :err => '/dev/null'
98
+
99
+ Process.waitpid(pid, 0)
100
+ fail "get text file failed" unless $?.success?
101
+
102
+ wout.close
103
+ output = rout.readlines
104
+ rout.close
105
+ return output
106
+ end
107
+
108
+ end
109
+
110
+ module VCenterDriver
111
+
112
+ ################################################################################
113
+ # This class represents a VCenter connection and an associated OpenNebula client
114
+ # The connection is associated to the VCenter backing a given OpenNebula host.
115
+ # For the VCenter driver each OpenNebula host represents a VCenter cluster
116
+ ################################################################################
117
+ class VIClient
118
+ attr_reader :vim, :one, :root, :cluster, :user, :pass, :host, :dc
119
+
120
+ def self.get_entities(folder, type, entities=[])
121
+ return nil if folder == []
122
+
123
+ folder.childEntity.each do |child|
124
+ name, junk = child.to_s.split('(')
125
+
126
+ case name
127
+ when "Folder"
128
+ VIClient.get_entities(child, type, entities)
129
+ when type
130
+ entities.push(child)
131
+ end
132
+ end
133
+
134
+ return entities
135
+ end
136
+
137
+ # Only retrieve properties with faster search
138
+ def get_entities_to_import(folder, type)
139
+ res = folder.inventory_flat(type => :all)
140
+ objects = []
141
+
142
+ res.each {|k,v|
143
+ if k.to_s.split('(').first == type
144
+ obj = {}
145
+ v.propSet.each{ |dynprop|
146
+ obj[dynprop.name] = dynprop.val
147
+ }
148
+ objects << OpenStruct.new(obj)
149
+ end
150
+ }
151
+ return objects
152
+ end
153
+
154
+ ############################################################################
155
+ # Initializr the VIClient, and creates an OpenNebula client. The parameters
156
+ # are obtained from the associated OpenNebula host
157
+ # @param hid [Integer] The OpenNebula host id with VCenter attributes
158
+ ############################################################################
159
+ def initialize(hid)
160
+
161
+ initialize_one
162
+
163
+ @one_host = ::OpenNebula::Host.new_with_id(hid, @one)
164
+ rc = @one_host.info
165
+
166
+ if ::OpenNebula.is_error?(rc)
167
+ raise "Error getting host information: #{rc.message}"
168
+ end
169
+
170
+ password = @one_host["TEMPLATE/VCENTER_PASSWORD"]
171
+
172
+ if !@token.nil?
173
+ begin
174
+ cipher = OpenSSL::Cipher::Cipher.new("aes-256-cbc")
175
+
176
+ cipher.decrypt
177
+ cipher.key = @token
178
+
179
+ password = cipher.update(Base64::decode64(password))
180
+ password << cipher.final
181
+ rescue
182
+ raise "Error decrypting vCenter password"
183
+ end
184
+ end
185
+
186
+ connection = {
187
+ :host => @one_host["TEMPLATE/VCENTER_HOST"],
188
+ :user => @one_host["TEMPLATE/VCENTER_USER"],
189
+ :password => password
190
+ }
191
+
192
+ initialize_vim(connection)
193
+
194
+ datacenters = VIClient.get_entities(@root, 'Datacenter')
195
+
196
+ datacenters.each {|dc|
197
+ ccrs = VIClient.get_entities(dc.hostFolder, 'ClusterComputeResource')
198
+
199
+ next if ccrs.nil?
200
+
201
+ @cluster = ccrs.find{ |ccr| @one_host.name == ccr.name }
202
+
203
+ (@dc = dc; break) if @cluster
204
+ }
205
+
206
+ if @dc.nil? || @cluster.nil?
207
+ raise "Cannot find DataCenter or ClusterComputeResource for host."
208
+ end
209
+ end
210
+
211
+ ########################################################################
212
+ # Initialize a VIConnection based just on the VIM parameters. The
213
+ # OpenNebula client is also initialized
214
+ ########################################################################
215
+ def self.new_connection(user_opts, one_client=nil)
216
+
217
+ conn = allocate
218
+
219
+ conn.initialize_one(one_client)
220
+
221
+ conn.initialize_vim(user_opts)
222
+
223
+ return conn
224
+ end
225
+
226
+ ########################################################################
227
+ # The associated cluster for this connection
228
+ ########################################################################
229
+ def cluster
230
+ @cluster
231
+ end
232
+
233
+ ########################################################################
234
+ # Is this Cluster confined in a resource pool?
235
+ ########################################################################
236
+ def rp_confined?
237
+ !@one_host["TEMPLATE/VCENTER_RESOURCE_POOL"].nil?
238
+ end
239
+
240
+ ########################################################################
241
+ # The associated resource pool for this connection
242
+ # @return [ResourcePool] an array of resource pools including the default
243
+ #  resource pool. If the connection is confined to a particular
244
+ #  resource pool, then return just that one
245
+ ########################################################################
246
+ def resource_pool
247
+ rp_name = @one_host["TEMPLATE/VCENTER_RESOURCE_POOL"]
248
+
249
+ if rp_name.nil?
250
+ rp_array = @cluster.resourcePool.resourcePool
251
+ rp_array << @cluster.resourcePool
252
+ rp_array
253
+ else
254
+ [find_resource_pool(rp_name)]
255
+ end
256
+ end
257
+
258
+ ########################################################################
259
+ # Get the default resource pool of the connection. Only valid if
260
+ # the connection is not confined in a resource pool
261
+ # @return ResourcePool the default resource pool
262
+ ########################################################################
263
+ def default_resource_pool
264
+ @cluster.resourcePool
265
+ end
266
+
267
+ ########################################################################
268
+ # Searches the desired ResourcePool of the DataCenter for the current
269
+ # connection. Returns a RbVmomi::VIM::ResourcePool or the default pool
270
+ # if not found
271
+ # @param rpool [String] the ResourcePool name
272
+ ########################################################################
273
+ def find_resource_pool(poolName)
274
+ baseEntity = @cluster
275
+
276
+ entityArray = poolName.split('/')
277
+ entityArray.each do |entityArrItem|
278
+ if entityArrItem != ''
279
+ if baseEntity.is_a? RbVmomi::VIM::Folder
280
+ baseEntity = baseEntity.childEntity.find { |f|
281
+ f.name == entityArrItem
282
+ } or return @cluster.resourcePool
283
+ elsif baseEntity.is_a? RbVmomi::VIM::ClusterComputeResource
284
+ baseEntity = baseEntity.resourcePool.resourcePool.find { |f|
285
+ f.name == entityArrItem
286
+ } or return @cluster.resourcePool
287
+ elsif baseEntity.is_a? RbVmomi::VIM::ResourcePool
288
+ baseEntity = baseEntity.resourcePool.find { |f|
289
+ f.name == entityArrItem
290
+ } or return @cluster.resourcePool
291
+ else
292
+ return @cluster.resourcePool
293
+ end
294
+ end
295
+ end
296
+
297
+ if !baseEntity.is_a?(RbVmomi::VIM::ResourcePool) and
298
+ baseEntity.respond_to?(:resourcePool)
299
+ baseEntity = baseEntity.resourcePool
300
+ end
301
+
302
+ baseEntity
303
+ end
304
+
305
+ ########################################################################
306
+ # Searches the associated vmFolder of the DataCenter for the current
307
+ # connection. Returns a RbVmomi::VIM::VirtualMachine or nil if not found
308
+ # @param uuid [String] the UUID of the VM or VM Template
309
+ ########################################################################
310
+ def find_vm_template(uuid)
311
+ version = @vim.serviceContent.about.version
312
+
313
+ if version.split(".").first.to_i >= 6
314
+ @dc.vmFolder.findByUuid(uuid, RbVmomi::VIM::VirtualMachine, @dc)
315
+ else
316
+ vms = VIClient.get_entities(@dc.vmFolder, 'VirtualMachine')
317
+
318
+ return vms.find do |v|
319
+ begin
320
+ v.config && v.config.uuid == uuid
321
+ rescue RbVmomi::VIM::ManagedObjectNotFound
322
+ false
323
+ end
324
+ end
325
+ end
326
+ end
327
+
328
+ ########################################################################
329
+ # Searches the associated vmFolder of the DataCenter for the current
330
+ # connection. Returns a RbVmomi::VIM::VirtualMachine or nil if not found
331
+ # @param vm_name [String] the UUID of the VM or VM Template
332
+ ########################################################################
333
+ def find_vm(vm_name)
334
+ vms = VIClient.get_entities(@dc.vmFolder, 'VirtualMachine')
335
+
336
+ return vms.find do |v|
337
+ begin
338
+ v.name == vm_name
339
+ rescue RbVmomi::VIM::ManagedObjectNotFound
340
+ false
341
+ end
342
+ end
343
+ end
344
+
345
+ ########################################################################
346
+ # Searches the associated datacenter for a particular datastore
347
+ # @param ds_name [String] name of the datastore
348
+ # @returns a RbVmomi::VIM::VirtualMachine or nil if not found
349
+ ########################################################################
350
+ def get_datastore(ds_name)
351
+ datastores = VIClient.get_entities(@dc.datastoreFolder, 'Datastore')
352
+ ds = datastores.select{|ds| ds.name == ds_name}[0]
353
+ end
354
+
355
+ ########################################################################
356
+ # Builds a hash with the DataCenter / ClusterComputeResource hierarchy
357
+ # for this VCenter.
358
+ # @return [Hash] in the form
359
+ # {dc_name [String] => ClusterComputeResources Names [Array - String]}
360
+ ########################################################################
361
+ def hierarchy(one_client=nil)
362
+ vc_hosts = {}
363
+
364
+ datacenters = VIClient.get_entities(@root, 'Datacenter')
365
+
366
+ hpool = OpenNebula::HostPool.new((one_client||@one))
367
+ rc = hpool.info
368
+
369
+ datacenters.each { |dc|
370
+ ccrs = VIClient.get_entities(dc.hostFolder, 'ClusterComputeResource')
371
+ vc_hosts[dc.name] = []
372
+ ccrs.each { |c|
373
+ if !hpool["HOST[NAME=\"#{c.name}\"]"]
374
+ vc_hosts[dc.name] << c.name
375
+ end
376
+ }
377
+ }
378
+
379
+ return vc_hosts
380
+ end
381
+
382
+ ########################################################################
383
+ # Builds a hash with the Datacenter / VM Templates for this VCenter
384
+ # @param one_client [OpenNebula::Client] Use this client instead of @one
385
+ # @return [Hash] in the form
386
+ # { dc_name [String] => Templates [Array] }
387
+ ########################################################################
388
+ def vm_templates(one_client=nil)
389
+ vm_templates = {}
390
+
391
+ tpool = OpenNebula::TemplatePool.new(
392
+ (one_client||@one), OpenNebula::Pool::INFO_ALL)
393
+ rc = tpool.info
394
+ if OpenNebula.is_error?(rc)
395
+ raise "Error contacting OpenNebula #{rc.message}"
396
+ end
397
+
398
+ datacenters = VIClient.get_entities(@root, 'Datacenter')
399
+
400
+ datacenters.each { |dc|
401
+ vms = get_entities_to_import(dc.vmFolder, 'VirtualMachine')
402
+
403
+ tmp = vms.select { |v| v.config && (v.config.template == true) }
404
+
405
+ one_tmp = []
406
+ host_cache = {}
407
+ ds_cache = {}
408
+
409
+ tmp.each { |t|
410
+ vi_tmp = VCenterVm.new(self, t)
411
+
412
+ if !tpool["VMTEMPLATE/TEMPLATE/PUBLIC_CLOUD[\
413
+ TYPE=\"vcenter\" \
414
+ and VM_TEMPLATE=\"#{vi_tmp.vm.config.uuid}\"]"]
415
+ # Check cached objects
416
+ if !host_cache[vi_tmp.vm.runtime.host.to_s]
417
+ host_cache[vi_tmp.vm.runtime.host.to_s] =
418
+ VCenterCachedHost.new vi_tmp.vm.runtime.host
419
+ end
420
+
421
+ if !ds_cache[t.datastore[0].to_s]
422
+ ds_cache[t.datastore[0].to_s] =
423
+ VCenterCachedDatastore.new t.datastore[0]
424
+ end
425
+
426
+ host = host_cache[vi_tmp.vm.runtime.host.to_s]
427
+ ds = ds_cache[t.datastore[0].to_s]
428
+
429
+ one_tmp << {
430
+ :name => "#{vi_tmp.vm.name} - #{host.cluster_name}",
431
+ :uuid => vi_tmp.vm.config.uuid,
432
+ :host => host.cluster_name,
433
+ :one => vi_tmp.to_one(host),
434
+ :ds => vi_tmp.to_one_ds(host, ds.name),
435
+ :default_ds => ds.name,
436
+ :rp => vi_tmp.to_one_rp(host)
437
+ }
438
+ end
439
+ }
440
+
441
+ vm_templates[dc.name] = one_tmp
442
+ }
443
+
444
+ return vm_templates
445
+ end
446
+
447
+ ########################################################################
448
+ # Builds a hash with the Datacenter / CCR (Distributed)Networks
449
+ # for this VCenter
450
+ # @param one_client [OpenNebula::Client] Use this client instead of @one
451
+ # @return [Hash] in the form
452
+ # { dc_name [String] => Networks [Array] }
453
+ ########################################################################
454
+ def vcenter_networks(one_client=nil)
455
+ vcenter_networks = {}
456
+
457
+ vnpool = OpenNebula::VirtualNetworkPool.new(
458
+ (one_client||@one), OpenNebula::Pool::INFO_ALL)
459
+ rc = vnpool.info
460
+ if OpenNebula.is_error?(rc)
461
+ raise "Error contacting OpenNebula #{rc.message}"
462
+ end
463
+
464
+ datacenters = VIClient.get_entities(@root, 'Datacenter')
465
+
466
+ datacenters.each { |dc|
467
+ networks = VIClient.get_entities(dc.networkFolder, 'Network' )
468
+ one_nets = []
469
+
470
+ networks.each { |n|
471
+ # Skip those not in cluster
472
+ next if !n[:host][0]
473
+
474
+ # Networks can be in several cluster, create one per cluster
475
+ Array(n[:host][0]).each{ |host_system|
476
+ net_name = "#{n.name} - #{host_system.parent.name}"
477
+
478
+ if !vnpool["VNET[BRIDGE=\"#{n[:name]}\"]/\
479
+ TEMPLATE[VCENTER_TYPE=\"Port Group\"]"]
480
+ one_nets << {
481
+ :name => net_name,
482
+ :bridge => n.name,
483
+ :cluster => host_system.parent.name,
484
+ :type => "Port Group",
485
+ :one => "NAME = \"#{net_name}\"\n" \
486
+ "BRIDGE = \"#{n[:name]}\"\n" \
487
+ "VN_MAD = \"dummy\"\n" \
488
+ "VCENTER_TYPE = \"Port Group\""
489
+ }
490
+ end
491
+ }
492
+ }
493
+
494
+ networks = VIClient.get_entities(dc.networkFolder,
495
+ 'DistributedVirtualPortgroup' )
496
+
497
+ networks.each { |n|
498
+ # Skip those not in cluster
499
+ next if !n[:host][0]
500
+
501
+ # DistributedVirtualPortgroup can be in several cluster,
502
+ # create one per cluster
503
+ Array(n[:host][0]).each{ |host_system|
504
+ net_name = "#{n.name} - #{n[:host][0].parent.name}"
505
+
506
+ if !vnpool["VNET[BRIDGE=\"#{n[:name]}\"]/\
507
+ TEMPLATE[VCENTER_TYPE=\"Distributed Port Group\"]"]
508
+ vnet_template = "NAME = \"#{net_name}\"\n" \
509
+ "BRIDGE = \"#{n[:name]}\"\n" \
510
+ "VN_MAD = \"dummy\"\n" \
511
+ "VCENTER_TYPE = \"Distributed Port Group\""
512
+
513
+
514
+ default_pc = n.config.defaultPortConfig
515
+
516
+ has_vlan = false
517
+ vlan_str = ""
518
+
519
+ if default_pc.methods.include? :vlan
520
+ has_vlan = default_pc.vlan.methods.include? :vlanId
521
+ end
522
+
523
+ if has_vlan
524
+ vlan = n.config.defaultPortConfig.vlan.vlanId
525
+
526
+ if vlan != 0
527
+ if vlan.is_a? Array
528
+ vlan.each{|v|
529
+ vlan_str += v.start.to_s + ".." +
530
+ v.end.to_s + ","
531
+ }
532
+ vlan_str.chop!
533
+ else
534
+ vlan_str = vlan.to_s
535
+ end
536
+ end
537
+ end
538
+
539
+ if !vlan_str.empty?
540
+ vnet_template << "VLAN_ID=#{vlan_str}\n"
541
+ end
542
+
543
+ one_net = {:name => net_name,
544
+ :bridge => n.name,
545
+ :cluster => host_system.parent.name,
546
+ :type => "Distributed Port Group",
547
+ :one => vnet_template}
548
+
549
+ one_net[:vlan] = vlan_str if !vlan_str.empty?
550
+
551
+ one_nets << one_net
552
+ end
553
+ }
554
+ }
555
+
556
+ vcenter_networks[dc.name] = one_nets
557
+ }
558
+
559
+ return vcenter_networks
560
+ end
561
+
562
+
563
+ ########################################################################
564
+ # Builds a hash with the Datacenter / Datastores for this VCenter
565
+ # @param one_client [OpenNebula::Client] Use this client instead of @one
566
+ # @return [Hash] in the form
567
+ # { dc_name [String] => Datastore [Array] of DS templates}
568
+ ########################################################################
569
+ def vcenter_datastores(one_client=nil)
570
+ ds_templates = {}
571
+
572
+ dspool = OpenNebula::DatastorePool.new(
573
+ (one_client||@one))
574
+ rc = dspool.info
575
+ if OpenNebula.is_error?(rc)
576
+ raise "Error contacting OpenNebula #{rc.message}"
577
+ end
578
+
579
+ hpool = OpenNebula::HostPool.new(
580
+ (one_client||@one))
581
+ rc = hpool.info
582
+ if OpenNebula.is_error?(rc)
583
+ raise "Error contacting OpenNebula #{rc.message}"
584
+ end
585
+
586
+ datacenters = VIClient.get_entities(@root, 'Datacenter')
587
+
588
+ datacenters.each { |dc|
589
+ one_tmp = []
590
+ datastores = VIClient.get_entities(dc.datastoreFolder, 'Datastore')
591
+ datastores.each { |ds|
592
+ next if !ds.is_a? RbVmomi::VIM::Datastore
593
+ # Find the Cluster from which to access this ds
594
+ cluster_name = ds.host[0].key.parent.name
595
+
596
+ if !dspool["DATASTORE[NAME=\"#{ds.name}\"]"] and
597
+ hpool["HOST[NAME=\"#{cluster_name}\"]"]
598
+ one_tmp << {
599
+ :name => "#{ds.name}",
600
+ :total_mb => ((ds.summary.capacity.to_i / 1024) / 1024),
601
+ :free_mb => ((ds.summary.freeSpace.to_i / 1024) / 1024),
602
+ :cluster => cluster_name,
603
+ :one => "NAME=#{ds.name}\n"\
604
+ "DS_MAD=vcenter\n"\
605
+ "TM_MAD=vcenter\n"\
606
+ "VCENTER_CLUSTER=#{cluster_name}\n"
607
+ }
608
+ end
609
+ }
610
+ ds_templates[dc.name] = one_tmp
611
+ }
612
+
613
+ return ds_templates
614
+ end
615
+
616
+ #############################################################################
617
+ # Builds a hash with the Images for a particular datastore
618
+ # @param one_client [OpenNebula::Client] Use this client instead of @one
619
+ # @return [Array] of image templates
620
+ ############################################################################
621
+ def vcenter_images(ds_name, one_client=nil)
622
+ img_types = ["FloppyImageFileInfo",
623
+ "IsoImageFileInfo",
624
+ "VmDiskFileInfo"]
625
+
626
+ img_templates = []
627
+
628
+ ipool = OpenNebula::ImagePool.new((one_client||@one))
629
+ rc = ipool.info
630
+ if OpenNebula.is_error?(rc)
631
+ raise "Error contacting OpenNebula #{rc.message}"
632
+ end
633
+
634
+ dspool = OpenNebula::DatastorePool.new((one_client||@one))
635
+ rc = dspool.info
636
+ if OpenNebula.is_error?(rc)
637
+ raise "Error contacting OpenNebula #{rc.message}"
638
+ end
639
+
640
+ ds_id = dspool["DATASTORE[NAME=\"#{ds_name}\"]/ID"]
641
+
642
+ if !ds_id
643
+ raise "Datastore not found in OpenNebula. Please import"\
644
+ " it first and try again"
645
+ end
646
+
647
+ datacenters = VIClient.get_entities(@root, 'Datacenter')
648
+
649
+ datacenters.each { |dc|
650
+
651
+ # Find datastore within datacenter
652
+ datastores = VIClient.get_entities(dc.datastoreFolder, 'Datastore')
653
+ ds = datastores.select{|ds| ds.name == ds_name}[0]
654
+ next if !ds
655
+
656
+ # Create Search Spec
657
+ spec = RbVmomi::VIM::HostDatastoreBrowserSearchSpec.new
658
+ spec.query = [RbVmomi::VIM::VmDiskFileQuery.new,
659
+ RbVmomi::VIM::IsoImageFileQuery.new]
660
+ spec.details = RbVmomi::VIM::FileQueryFlags(:fileOwner => true,
661
+ :fileSize => true,
662
+ :fileType => true,
663
+ :modification => true)
664
+ spec.matchPattern=[]
665
+
666
+ search_params = {'datastorePath' => "[#{ds.name}]",
667
+ 'searchSpec' => spec}
668
+
669
+ # Perform search task and return results
670
+ search_task=ds.browser.SearchDatastoreSubFolders_Task(search_params)
671
+ search_task.wait_for_completion
672
+
673
+ search_task.info.result.each { |image|
674
+ folderpath = ""
675
+ if image.folderPath[-1] != "]"
676
+ folderpath = image.folderPath.sub(/^\[#{ds_name}\] /, "")
677
+ end
678
+
679
+ image = image.file[0]
680
+
681
+ # Skip not relevant files
682
+ next if !img_types.include? image.class.to_s
683
+
684
+ image_path = folderpath + image.path
685
+
686
+ image_name = File.basename(image.path).reverse.sub("kdmv.","").reverse
687
+
688
+ if !ipool["IMAGE[NAME=\"#{image_name} - #{ds_name}\"]"]
689
+ img_templates << {
690
+ :name => "#{image_name} - #{ds_name}",
691
+ :path => image_path,
692
+ :size => (image.fileSize / 1024).to_s,
693
+ :type => image.class.to_s,
694
+ :dsid => ds_id,
695
+ :one => "NAME=\"#{image_name} - #{ds_name}\"\n"\
696
+ "PATH=\"vcenter://#{image_path}\"\n"\
697
+ "PERSISTENT=\"YES\"\n"\
698
+ }
699
+
700
+ if image.class.to_s == "VmDiskFileInfo"
701
+ img_templates[-1][:one] += "TYPE=\"OS\"\n"
702
+ else
703
+ img_templates[-1][:one] += "TYPE=\"CDROM\"\n"
704
+ end
705
+
706
+ if image.class.to_s == "VmDiskFileInfo" &&
707
+ !image.diskType.nil?
708
+ img_templates[-1][:one] += "DISK_TYPE=#{image.diskType}\n"
709
+ end
710
+ end
711
+ }
712
+ }
713
+
714
+ return img_templates
715
+ end
716
+
717
+ def self.translate_hostname(hostname)
718
+ host_pool = OpenNebula::HostPool.new(::OpenNebula::Client.new())
719
+ rc = host_pool.info
720
+ raise "Could not find host #{hostname}" if OpenNebula.is_error?(rc)
721
+
722
+ host = host_pool.select {|host_element| host_element.name==hostname }
723
+ return host.first.id
724
+ end
725
+
726
+ def self.find_ds_name(ds_id)
727
+ ds = OpenNebula::Datastore.new_with_id(ds_id)
728
+ rc = ds.info
729
+ raise "Could not find datastore #{ds_id}" if OpenNebula.is_error?(rc)
730
+
731
+ return ds.name
732
+ end
733
+
734
+ ############################################################################
735
+ # Initialize an OpenNebula connection with the default ONE_AUTH
736
+ ############################################################################
737
+ def initialize_one(one_client=nil)
738
+ begin
739
+ if one_client
740
+ @one = one_client
741
+ else
742
+ @one = ::OpenNebula::Client.new()
743
+ end
744
+
745
+ system = ::OpenNebula::System.new(@one)
746
+
747
+ config = system.get_configuration()
748
+
749
+ if ::OpenNebula.is_error?(config)
750
+ raise "Error getting oned configuration : #{config.message}"
751
+ end
752
+
753
+ @token = config["ONE_KEY"]
754
+ rescue Exception => e
755
+ raise "Error initializing OpenNebula client: #{e.message}"
756
+ end
757
+ end
758
+
759
+ ############################################################################
760
+ # Initialize a connection with vCenter. Options
761
+ # @param options[Hash] with:
762
+ # :user => The vcenter user
763
+ # :password => Password for the user
764
+ # :host => vCenter hostname or IP
765
+ # :insecure => SSL (optional, defaults to true)
766
+ ############################################################################
767
+ def initialize_vim(user_opts={})
768
+ opts = {
769
+ :insecure => true
770
+ }.merge(user_opts)
771
+
772
+ @user = opts[:user]
773
+ @pass = opts[:password]
774
+ @host = opts[:host]
775
+
776
+ begin
777
+ @vim = RbVmomi::VIM.connect(opts)
778
+ @root = @vim.root
779
+ @vdm = @vim.serviceContent.virtualDiskManager
780
+ @file_manager = @vim.serviceContent.fileManager
781
+ rescue Exception => e
782
+ raise "Error connecting to #{@host}: #{e.message}"
783
+ end
784
+ end
785
+
786
+ ######################### Datastore Operations #############################
787
+
788
+ ############################################################################
789
+ # Retrieve size for a VirtualDisk in a particular datastore
790
+ # @param ds_name [String] name of the datastore
791
+ # @param img_str [String] path to the VirtualDisk
792
+ # @return size of the file in Kb
793
+ ############################################################################
794
+ def stat(ds_name, img_str)
795
+ img_path = File.dirname img_str
796
+ img_name = File.basename img_str
797
+
798
+ # Find datastore within datacenter
799
+ ds = get_datastore(ds_name)
800
+
801
+ # Create Search Spec
802
+ spec = RbVmomi::VIM::HostDatastoreBrowserSearchSpec.new
803
+ spec.query = [RbVmomi::VIM::VmDiskFileQuery.new,
804
+ RbVmomi::VIM::IsoImageFileQuery.new]
805
+ spec.details = RbVmomi::VIM::FileQueryFlags(:fileOwner => true,
806
+ :fileSize => true,
807
+ :fileType => true,
808
+ :modification => true)
809
+ spec.matchPattern=[img_name]
810
+
811
+ search_params = {'datastorePath' => "[#{ds_name}] #{img_path}",
812
+ 'searchSpec' => spec}
813
+
814
+ # Perform search task and return results
815
+ search_task=ds.browser.SearchDatastoreSubFolders_Task(search_params)
816
+ search_task.wait_for_completion
817
+ (search_task.info.result[0].file[0].fileSize / 1024) / 1024
818
+ end
819
+
820
+ ############################################################################
821
+ # Returns Datastore information
822
+ # @param ds_name [String] name of the datastore
823
+ # @return [String] monitor information of the DS
824
+ ############################################################################
825
+ def monitor_ds(ds_name)
826
+ # Find datastore within datacenter
827
+ ds = get_datastore(ds_name)
828
+
829
+ total_mb = (ds.summary.capacity.to_i / 1024) / 1024
830
+ free_mb = (ds.summary.freeSpace.to_i / 1024) / 1024
831
+ used_mb = total_mb - free_mb
832
+ ds_type = ds.summary.type
833
+
834
+ "USED_MB=#{used_mb}\nFREE_MB=#{free_mb} \nTOTAL_MB=#{total_mb}"
835
+ end
836
+
837
+ ############################################################################
838
+ # Copy a VirtualDisk
839
+ # @param ds_name [String] name of the datastore
840
+ # @param img_str [String] path to the VirtualDisk
841
+ ############################################################################
842
+ def copy_virtual_disk(source_path, source_ds, target_path, target_ds=nil)
843
+ target_ds = source_ds if target_ds.nil?
844
+
845
+ copy_params= {:sourceName => "[#{source_ds}] #{source_path}",
846
+ :sourceDatacenter => @dc,
847
+ :destName => "[#{target_ds}] #{target_path}"}
848
+
849
+ @vdm.CopyVirtualDisk_Task(copy_params).wait_for_completion
850
+
851
+ target_path
852
+ end
853
+
854
+ ############################################################################
855
+ # Create a VirtualDisk
856
+ # @param img_name [String] name of the image
857
+ # @param ds_name [String] name of the datastore on which the VD will be
858
+ # created
859
+ # @param size [String] size of the new image in MB
860
+ # @param adapter_type [String] as described in
861
+ #  http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.wssdk.apiref.doc/vim.VirtualDiskManager.VirtualDiskAdapterType.html
862
+ # @param disk_type [String] as described in
863
+ #  http://pubs.vmware.com/vsphere-60/index.jsp?topic=%2Fcom.vmware.wssdk.apiref.doc%2Fvim.VirtualDiskManager.VirtualDiskType.html
864
+ # @return name of the final image
865
+ ############################################################################
866
+ def create_virtual_disk(img_name, ds_name, size, adapter_type, disk_type)
867
+ vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec(
868
+ :adapterType => adapter_type,
869
+ :capacityKb => size.to_i*1024,
870
+ :diskType => disk_type
871
+ )
872
+
873
+ @vdm.CreateVirtualDisk_Task(
874
+ :datacenter => @dc,
875
+ :name => "[#{ds_name}] #{img_name}.vmdk",
876
+ :spec => vmdk_spec
877
+ ).wait_for_completion
878
+
879
+ "#{img_name}.vmdk"
880
+ end
881
+
882
+ ############################################################################
883
+ # Delete a VirtualDisk
884
+ # @param img_name [String] name of the image
885
+ # @param ds_name [String] name of the datastore where the VD resides
886
+ ############################################################################
887
+ def delete_virtual_disk(img_name, ds_name)
888
+ @vdm.DeleteVirtualDisk_Task(
889
+ name: "[#{ds_name}] #{img_name}",
890
+ datacenter: @dc
891
+ ).wait_for_completion
892
+ end
893
+
894
+ ############################################################################
895
+ # Delete a VirtualDisk
896
+ # @param directory [String] name of the new directory
897
+ # @param ds_name [String] name of the datastore where to create the dir
898
+ ############################################################################
899
+ def create_directory(directory, ds_name)
900
+ begin
901
+ path = "[#{ds_name}] #{directory}"
902
+ @file_manager.MakeDirectory(:name => path,
903
+ :datacenter => @dc,
904
+ :createParentDirectories => true)
905
+ rescue RbVmomi::VIM::FileAlreadyExists => e
906
+ end
907
+ end
908
+
909
+ ############################################################################
910
+ # Silences standard output and error
911
+ ############################################################################
912
+ def self.in_silence
913
+ begin
914
+ orig_stderr = $stderr.clone
915
+ orig_stdout = $stdout.clone
916
+ $stderr.reopen File.new('/dev/null', 'w')
917
+ $stdout.reopen File.new('/dev/null', 'w')
918
+ retval = yield
919
+ rescue Exception => e
920
+ $stdout.reopen orig_stdout
921
+ $stderr.reopen orig_stderr
922
+ raise e
923
+ ensure
924
+ $stdout.reopen orig_stdout
925
+ $stderr.reopen orig_stderr
926
+ end
927
+ retval
928
+ end
929
+
930
+ ############################################################################
931
+ # Silences standard output and error
932
+ ############################################################################
933
+ def self.in_stderr_silence
934
+ begin
935
+ orig_stderr = $stderr.clone
936
+ $stderr.reopen File.new('/dev/null', 'w')
937
+ retval = yield
938
+ rescue Exception => e
939
+ $stderr.reopen orig_stderr
940
+ raise e
941
+ ensure
942
+ $stderr.reopen orig_stderr
943
+ end
944
+ retval
945
+ end
946
+ end
947
+
948
+ ################################################################################
949
+ # Cached Classes to speed up import and monitoring
950
+ ################################################################################
951
+ class VCenterCachedHost
952
+
953
+ def initialize(rbVmomiHost)
954
+ @host = rbVmomiHost
955
+ @attributes = Hash.new
956
+ end
957
+
958
+ def name
959
+ if !@attributes['name']
960
+ @attributes['name']=@host.parent.name
961
+ end
962
+ @attributes['name']
963
+ end
964
+
965
+ def cluster_name
966
+ if !@attributes['cluster_name']
967
+ @attributes['cluster_name']=@host.parent.name
968
+ end
969
+ @attributes['cluster_name']
970
+ end
971
+
972
+ def ds_list
973
+ if !@attributes['ds_list']
974
+ @attributes['ds_list']=""
975
+
976
+ datacenter = @host.parent
977
+ while !datacenter.is_a? RbVmomi::VIM::Datacenter
978
+ datacenter = datacenter.parent
979
+ end
980
+
981
+ datastores=VIClient.get_entities(
982
+ datacenter.datastoreFolder,
983
+ 'Datastore')
984
+ datastores.each { |ds|
985
+ @attributes['ds_list'] += ds.name + ","
986
+ }
987
+ @attributes['ds_list']=@attributes['ds_list'][0..-2]
988
+ end
989
+ @attributes['ds_list']
990
+ end
991
+
992
+ def rp_list
993
+ if !@attributes['rp_list']
994
+ @attributes['rp_list']=""
995
+ @host.parent.resourcePool.resourcePool.each{|rp|
996
+ @attributes['rp_list'] += get_child_rp_names(rp, "")
997
+ }
998
+ @attributes['rp_list']=@attributes['rp_list'][0..-2]
999
+ end
1000
+ @attributes['rp_list']
1001
+ end
1002
+
1003
+ def get_child_rp_names(rp, parent_prefix)
1004
+ rp_str = ""
1005
+
1006
+ current_rp = (parent_prefix.empty? ? "" : parent_prefix + "/")
1007
+ current_rp += rp.name
1008
+
1009
+ if rp.resourcePool.size != 0
1010
+ rp.resourcePool.each{|child_rp|
1011
+ rp_str += get_child_rp_names(child_rp, current_rp)
1012
+ }
1013
+ end
1014
+
1015
+ rp_str += current_rp + ","
1016
+
1017
+ return rp_str
1018
+ end
1019
+
1020
+ def cpumhz
1021
+ if !@attributes['cpumhz']
1022
+ @attributes['cpumhz']=@host.summary.hardware.cpuMhz.to_f
1023
+ end
1024
+ @attributes['cpumhz']
1025
+ end
1026
+
1027
+ end
1028
+
1029
+ class VCenterCachedDatastore
1030
+
1031
+ def initialize(rbVmomiDatastore)
1032
+ @ds = rbVmomiDatastore
1033
+ @attributes = Hash.new
1034
+ end
1035
+
1036
+ def name
1037
+ if !@attributes['name']
1038
+ @attributes['name']=@ds.name
1039
+ end
1040
+ @attributes['name']
1041
+ end
1042
+
1043
+
1044
+ end
1045
+
1046
+ ################################################################################
1047
+ # This class is an OpenNebula hosts that abstracts a vCenter cluster. It
1048
+ # includes the functionality needed to monitor the cluster and report the ESX
1049
+ # hosts and VM status of the cluster.
1050
+ ################################################################################
1051
+ class VCenterHost < ::OpenNebula::Host
1052
+ attr_reader :vc_client, :vc_root, :cluster, :host, :client
1053
+
1054
+ ############################################################################
1055
+ # Initialize the VCenterHost by looking for the associated objects of the
1056
+ # VIM hierarchy
1057
+ # client [VIClient] to interact with the associated vCenter
1058
+ ############################################################################
1059
+ def initialize(client)
1060
+ @client = client
1061
+ @cluster = client.cluster
1062
+
1063
+ @resource_pools = client.resource_pool
1064
+ end
1065
+
1066
+ ########################################################################
1067
+ # Creates an OpenNebula host representing a cluster in this VCenter
1068
+ # @param cluster_name[String] the name of the cluster in the vcenter
1069
+ # @param client [VIClient] to create the host
1070
+ # @return In case of success [0, host_id] or [-1, error_msg]
1071
+ ########################################################################
1072
+ def self.to_one(cluster_name, client)
1073
+ one_host = ::OpenNebula::Host.new(::OpenNebula::Host.build_xml,
1074
+ client.one)
1075
+
1076
+ rc = one_host.allocate(cluster_name, 'vcenter', 'vcenter',
1077
+ ::OpenNebula::ClusterPool::NONE_CLUSTER_ID)
1078
+
1079
+ return -1, rc.message if ::OpenNebula.is_error?(rc)
1080
+
1081
+ template = "VCENTER_HOST=\"#{client.host}\"\n"\
1082
+ "VCENTER_PASSWORD=\"#{client.pass}\"\n"\
1083
+ "VCENTER_USER=\"#{client.user}\"\n"
1084
+
1085
+ rc = one_host.update(template, false)
1086
+
1087
+ if ::OpenNebula.is_error?(rc)
1088
+ error = rc.message
1089
+
1090
+ rc = one_host.delete
1091
+
1092
+ if ::OpenNebula.is_error?(rc)
1093
+ error << ". Host #{cluster_name} could not be"\
1094
+ " deleted: #{rc.message}."
1095
+ end
1096
+
1097
+ return -1, error
1098
+ end
1099
+
1100
+ return 0, one_host.id
1101
+ end
1102
+
1103
+ ############################################################################
1104
+ # Generate an OpenNebula monitor string for this host. Reference:
1105
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/Reference
1106
+ # Guide/vim.ComputeResource.Summary.html
1107
+ # - effectiveCpu: Effective CPU resources (in MHz) available to run
1108
+ # VMs. This is the aggregated from all running hosts excluding hosts in
1109
+ # maintenance mode or unresponsive are not counted.
1110
+ # - effectiveMemory: Effective memory resources (in MB) available to run
1111
+ # VMs. Equivalente to effectiveCpu.
1112
+ # - numCpuCores: Number of physical CPU cores.
1113
+ # - numEffectiveHosts: Total number of effective hosts.
1114
+ # - numHosts:Total number of hosts.
1115
+ # - totalCpu: Aggregated CPU resources of all hosts, in MHz.
1116
+ # - totalMemory: Aggregated memory resources of all hosts, in bytes.
1117
+ ############################################################################
1118
+ def monitor_cluster
1119
+ #Load the host systems
1120
+ summary = @cluster.summary
1121
+
1122
+ mhz_core = summary.totalCpu.to_f / summary.numCpuCores.to_f
1123
+ eff_core = summary.effectiveCpu.to_f / mhz_core
1124
+
1125
+ free_cpu = sprintf('%.2f', eff_core * 100).to_f
1126
+ total_cpu = summary.numCpuCores.to_f * 100
1127
+ used_cpu = sprintf('%.2f', total_cpu - free_cpu).to_f
1128
+
1129
+ total_mem = summary.totalMemory.to_i / 1024
1130
+ free_mem = summary.effectiveMemory.to_i * 1024
1131
+
1132
+ str_info = ""
1133
+
1134
+ # System
1135
+ str_info << "HYPERVISOR=vcenter\n"
1136
+ str_info << "PUBLIC_CLOUD=YES\n"
1137
+ str_info << "TOTALHOST=" << summary.numHosts.to_s << "\n"
1138
+ str_info << "AVAILHOST=" << summary.numEffectiveHosts.to_s << "\n"
1139
+
1140
+ # CPU
1141
+ str_info << "CPUSPEED=" << mhz_core.to_s << "\n"
1142
+ str_info << "TOTALCPU=" << total_cpu.to_s << "\n"
1143
+ str_info << "USEDCPU=" << used_cpu.to_s << "\n"
1144
+ str_info << "FREECPU=" << free_cpu.to_s << "\n"
1145
+
1146
+ # Memory
1147
+ str_info << "TOTALMEMORY=" << total_mem.to_s << "\n"
1148
+ str_info << "FREEMEMORY=" << free_mem.to_s << "\n"
1149
+ str_info << "USEDMEMORY=" << (total_mem - free_mem).to_s
1150
+
1151
+ str_info << monitor_resource_pools(@cluster.resourcePool, "", mhz_core)
1152
+ end
1153
+
1154
+ ############################################################################
1155
+ # Generate an OpenNebula monitor string for all resource pools of a cluster
1156
+ # Reference:
1157
+ # http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.wssdk.apiref.doc
1158
+ # /vim.ResourcePool.html
1159
+ ############################################################################
1160
+ def monitor_resource_pools(parent_rp, parent_prefix, mhz_core)
1161
+ return "" if parent_rp.resourcePool.size == 0
1162
+
1163
+ rp_info = ""
1164
+
1165
+ parent_rp.resourcePool.each{|rp|
1166
+ rpcpu = rp.config.cpuAllocation
1167
+ rpmem = rp.config.memoryAllocation
1168
+ # CPU
1169
+ cpu_expandable = rpcpu.expandableReservation ? "YES" : "NO"
1170
+ cpu_limit = rpcpu.limit == "-1" ? "UNLIMITED" : rpcpu.limit
1171
+ cpu_reservation = rpcpu.reservation
1172
+ cpu_num = rpcpu.reservation.to_f / mhz_core
1173
+ cpu_shares_level = rpcpu.shares.level
1174
+ cpu_shares = rpcpu.shares.shares
1175
+
1176
+ # MEMORY
1177
+ mem_expandable = rpmem.expandableReservation ? "YES" : "NO"
1178
+ mem_limit = rpmem.limit == "-1" ? "UNLIMITED" : rpmem.limit
1179
+ mem_reservation = rpmem.reservation.to_f
1180
+ mem_shares_level = rpmem.shares.level
1181
+ mem_shares = rpmem.shares.shares
1182
+
1183
+ rp_name = (parent_prefix.empty? ? "" : parent_prefix + "/")
1184
+ rp_name += rp.name
1185
+
1186
+ rp_info << "\nRESOURCE_POOL = ["
1187
+ rp_info << "NAME=\"#{rp_name}\","
1188
+ rp_info << "CPU_EXPANDABLE=#{cpu_expandable},"
1189
+ rp_info << "CPU_LIMIT=#{cpu_limit},"
1190
+ rp_info << "CPU_RESERVATION=#{cpu_reservation},"
1191
+ rp_info << "CPU_RESERVATION_NUM_CORES=#{cpu_num},"
1192
+ rp_info << "CPU_SHARES=#{cpu_shares},"
1193
+ rp_info << "CPU_SHARES_LEVEL=#{cpu_shares_level},"
1194
+ rp_info << "MEM_EXPANDABLE=#{mem_expandable},"
1195
+ rp_info << "MEM_LIMIT=#{mem_limit},"
1196
+ rp_info << "MEM_RESERVATION=#{mem_reservation},"
1197
+ rp_info << "MEM_SHARES=#{mem_shares},"
1198
+ rp_info << "MEM_SHARES_LEVEL=#{mem_shares_level}"
1199
+ rp_info << "]"
1200
+
1201
+ if rp.resourcePool.size != 0
1202
+ rp_info << monitor_resource_pools(rp, rp_name, mhz_core)
1203
+ end
1204
+ }
1205
+
1206
+ return rp_info
1207
+ end
1208
+
1209
+ ############################################################################
1210
+ # Generate a template with information for each ESX Host. Reference:
1211
+ # http://pubs.vmware.com/vi-sdk/visdk250/ReferenceGuide/vim.HostSystem.html
1212
+ # - Summary: Basic information about the host, including connection state
1213
+ # - hardware: Hardware configuration of the host. This might not be
1214
+ # available for a disconnected host.
1215
+ # - quickStats: Basic host statistics.
1216
+ ############################################################################
1217
+ def monitor_host_systems
1218
+ host_info = ""
1219
+
1220
+ @cluster.host.each{|h|
1221
+ next if h.runtime.connectionState != "connected"
1222
+
1223
+ summary = h.summary
1224
+ hw = summary.hardware
1225
+ stats = summary.quickStats
1226
+
1227
+ total_cpu = hw.numCpuCores * 100
1228
+ used_cpu = (stats.overallCpuUsage.to_f / hw.cpuMhz.to_f) * 100
1229
+ used_cpu = sprintf('%.2f', used_cpu).to_f # Trim precission
1230
+ free_cpu = total_cpu - used_cpu
1231
+
1232
+ total_memory = hw.memorySize/1024
1233
+ used_memory = stats.overallMemoryUsage*1024
1234
+ free_memory = total_memory - used_memory
1235
+
1236
+ host_info << "\nHOST=["
1237
+ host_info << "STATE=on,"
1238
+ host_info << "HOSTNAME=\"" << h.name.to_s << "\","
1239
+ host_info << "MODELNAME=\"" << hw.cpuModel.to_s << "\","
1240
+ host_info << "CPUSPEED=" << hw.cpuMhz.to_s << ","
1241
+ host_info << "MAX_CPU=" << total_cpu.to_s << ","
1242
+ host_info << "USED_CPU=" << used_cpu.to_s << ","
1243
+ host_info << "FREE_CPU=" << free_cpu.to_s << ","
1244
+ host_info << "MAX_MEM=" << total_memory.to_s << ","
1245
+ host_info << "USED_MEM=" << used_memory.to_s << ","
1246
+ host_info << "FREE_MEM=" << free_memory.to_s
1247
+ host_info << "]"
1248
+ }
1249
+
1250
+ return host_info
1251
+ end
1252
+
1253
+ def monitor_vms
1254
+ # Only monitor from top level (Resource) Resource Pool
1255
+ monitor_vms_in_rp(@resource_pools[-1])
1256
+ end
1257
+
1258
+
1259
+ def monitor_vms_in_rp(rp)
1260
+ str_info = ""
1261
+
1262
+ if rp.resourcePool.size != 0
1263
+ rp.resourcePool.each{|child_rp|
1264
+ str_info += monitor_vms_in_rp(child_rp)
1265
+ }
1266
+ end
1267
+
1268
+ host_cache = {}
1269
+
1270
+ rp.vm.each { |v|
1271
+ begin
1272
+ # Check cached objects
1273
+ if !host_cache[v.runtime.host.to_s]
1274
+ host_cache[v.runtime.host.to_s] =
1275
+ VCenterCachedHost.new v.runtime.host
1276
+ end
1277
+
1278
+ host = host_cache[v.runtime.host.to_s]
1279
+
1280
+ name = v.name
1281
+ number = -1
1282
+ vm_extra_config = v.config.extraConfig
1283
+
1284
+ # Check the running flag
1285
+ running_flag = v.config.extraConfig.select{|val|
1286
+ val[:key]=="opennebula.vm.running"}
1287
+ if running_flag.size > 0 and running_flag[0]
1288
+ running_flag = running_flag[0][:value]
1289
+ end
1290
+
1291
+ next if running_flag == "no"
1292
+
1293
+ # Extract vmid if possible
1294
+ matches = name.match(/^one-(\d*)(-(.*))?$/)
1295
+ number = matches[1] if matches
1296
+ extraconfig_vmid = v.config.extraConfig.select{|val|
1297
+ val[:key]=="opennebula.vm.id"}
1298
+ if extraconfig_vmid.size > 0 and extraconfig_vmid[0]
1299
+ number = extraconfig_vmid[0][:value]
1300
+ end
1301
+ vm = VCenterVm.new(@client, v)
1302
+ vm.monitor(host)
1303
+ next if !vm.vm.config
1304
+ str_info << "\nVM = ["
1305
+ str_info << "ID=#{number},"
1306
+ str_info << "DEPLOY_ID=\"#{vm.vm.config.uuid}\","
1307
+ str_info << "VM_NAME=\"#{name} - "\
1308
+ "#{host.cluster_name}\","
1309
+ if number == -1
1310
+ vm_template_to_one =
1311
+ Base64.encode64(vm.vm_to_one(host)).gsub("\n","")
1312
+ str_info << "IMPORT_TEMPLATE=\"#{vm_template_to_one}\","
1313
+ end
1314
+ str_info << "POLL=\"#{vm.info}\"]"
1315
+ rescue Exception => e
1316
+ STDERR.puts e.inspect
1317
+ STDERR.puts e.backtrace
1318
+ end
1319
+ }
1320
+ return str_info
1321
+ end
1322
+
1323
+ def monitor_customizations
1324
+ customizations = client.vim.serviceContent.customizationSpecManager.info
1325
+
1326
+ text = ''
1327
+
1328
+ customizations.each do |c|
1329
+ t = "CUSTOMIZATION = [ "
1330
+ t << %Q<NAME = "#{c.name}", >
1331
+ t << %Q<TYPE = "#{c.type}" ]\n>
1332
+
1333
+ text << t
1334
+ end
1335
+
1336
+ text
1337
+ end
1338
+
1339
+ def get_available_ds
1340
+ str_info = ""
1341
+
1342
+ datastores = VIClient.get_entities(client.dc.datastoreFolder,
1343
+ 'Datastore')
1344
+ datastores.each { |ds|
1345
+ str_info += "VCENTER_DATASTORE=\"#{ds.name}\"\n"
1346
+ }
1347
+ str_info.chomp
1348
+ end
1349
+ end
1350
+
1351
+ ################################################################################
1352
+ # This class is a high level abstraction of a VI VirtualMachine class with
1353
+ # OpenNebula semantics.
1354
+ ################################################################################
1355
+
1356
+ class VCenterVm
1357
+ attr_reader :vm
1358
+
1359
+ POLL_ATTRIBUTE = VirtualMachineDriver::POLL_ATTRIBUTE
1360
+ VM_STATE = VirtualMachineDriver::VM_STATE
1361
+
1362
+ ############################################################################
1363
+ # Creates a new VIVm using a RbVmomi::VirtualMachine object
1364
+ # @param client [VCenterClient] client to connect to vCenter
1365
+ # @param vm_vi [RbVmomi::VirtualMachine] it will be used if not nil
1366
+ ########################################################################
1367
+ def initialize(client, vm_vi )
1368
+ @vm = vm_vi
1369
+ @client = client
1370
+
1371
+ @used_cpu = 0
1372
+ @used_memory = 0
1373
+
1374
+ @netrx = 0
1375
+ @nettx = 0
1376
+ end
1377
+
1378
+ ############################################################################
1379
+ # Deploys a VM
1380
+ # @xml_text XML representation of the VM
1381
+ ############################################################################
1382
+ def self.deploy(xml_text, lcm_state, deploy_id, hostname, datastore = nil)
1383
+ if lcm_state == "BOOT" || lcm_state == "BOOT_FAILURE"
1384
+ return clone_vm(xml_text, hostname, datastore)
1385
+ else
1386
+ hid = VIClient::translate_hostname(hostname)
1387
+ connection = VIClient.new(hid)
1388
+ vm = connection.find_vm_template(deploy_id)
1389
+ xml = REXML::Document.new xml_text
1390
+
1391
+ reconfigure_vm(vm, xml, false, hostname)
1392
+
1393
+ vm.PowerOnVM_Task.wait_for_completion
1394
+ return vm.config.uuid
1395
+ end
1396
+ end
1397
+
1398
+ ############################################################################
1399
+ # Cancels a VM
1400
+ # @param deploy_id vcenter identifier of the VM
1401
+ # @param hostname name of the host (equals the vCenter cluster)
1402
+ # @param lcm_state state of the VM
1403
+ # @param keep_disks keep or not VM disks in datastore
1404
+ # @param disks VM attached disks
1405
+ ############################################################################
1406
+ def self.cancel(deploy_id, hostname, lcm_state, keep_disks, disks, to_template)
1407
+ case lcm_state
1408
+ when "SHUTDOWN_POWEROFF", "SHUTDOWN_UNDEPLOY"
1409
+ shutdown(deploy_id, hostname, lcm_state, keep_disks, disks, to_template)
1410
+ when "CANCEL", "LCM_INIT", "CLEANUP_RESUBMIT", "SHUTDOWN", "CLEANUP_DELETE"
1411
+ hid = VIClient::translate_hostname(hostname)
1412
+ connection = VIClient.new(hid)
1413
+ vm = connection.find_vm_template(deploy_id)
1414
+
1415
+ begin
1416
+ if vm.summary.runtime.powerState == "poweredOn"
1417
+ vm.PowerOffVM_Task.wait_for_completion
1418
+ end
1419
+ rescue
1420
+ end
1421
+ if keep_disks
1422
+ detach_all_disks(vm)
1423
+ else
1424
+ detach_attached_disks(vm, disks, hostname) if disks
1425
+ end
1426
+
1427
+ # If the VM was instantiated to persistent, convert the VM to
1428
+ # vCenter VM Template and update the OpenNebula new
1429
+ # VM Template to point to the new vCenter VM Template
1430
+ if !to_template.nil?
1431
+ vm.MarkAsTemplate
1432
+
1433
+ new_template = OpenNebula::Template.new_with_id(to_template,
1434
+ OpenNebula::Client.new)
1435
+ new_template.info
1436
+
1437
+ public_cloud_str = "PUBLIC_CLOUD=["
1438
+
1439
+ new_template.to_hash["VMTEMPLATE"]["TEMPLATE"]["PUBLIC_CLOUD"].each{|k,v|
1440
+ if k == "VM_TEMPLATE"
1441
+ public_cloud_str += "VM_TEMPLATE=\"#{deploy_id}\",\n"
1442
+ else
1443
+ public_cloud_str += "#{k}=\"#{v}\",\n"
1444
+ end
1445
+ }
1446
+
1447
+ public_cloud_str = public_cloud_str + "]"
1448
+
1449
+ new_template.update(public_cloud_str, true)
1450
+ else
1451
+ vm.Destroy_Task.wait_for_completion
1452
+ end
1453
+ else
1454
+ raise "LCM_STATE #{lcm_state} not supported for cancel"
1455
+ end
1456
+ end
1457
+
1458
+
1459
+ ############################################################################
1460
+ # Saves a VM
1461
+ # @param deploy_id vcenter identifier of the VM
1462
+ # @param hostname name of the host (equals the vCenter cluster)
1463
+ ############################################################################
1464
+ def self.save(deploy_id, hostname, lcm_state)
1465
+ case lcm_state
1466
+ when "SAVE_MIGRATE"
1467
+ raise "Migration between vCenters cluster not supported"
1468
+ when "SAVE_SUSPEND", "SAVE_STOP"
1469
+ hid = VIClient::translate_hostname(hostname)
1470
+ connection = VIClient.new(hid)
1471
+ vm = connection.find_vm_template(deploy_id)
1472
+
1473
+ vm.SuspendVM_Task.wait_for_completion
1474
+ end
1475
+ end
1476
+
1477
+ ############################################################################
1478
+ # Resumes a VM
1479
+ # @param deploy_id vcenter identifier of the VM
1480
+ # @param hostname name of the host (equals the vCenter cluster)
1481
+ ############################################################################
1482
+ def self.resume(deploy_id, hostname)
1483
+ hid = VIClient::translate_hostname(hostname)
1484
+ connection = VIClient.new(hid)
1485
+ vm = connection.find_vm_template(deploy_id)
1486
+
1487
+ vm.PowerOnVM_Task.wait_for_completion
1488
+ end
1489
+
1490
+ ############################################################################
1491
+ # Reboots a VM
1492
+ # @param deploy_id vcenter identifier of the VM
1493
+ # @param hostname name of the host (equals the vCenter cluster)
1494
+ ############################################################################
1495
+ def self.reboot(deploy_id, hostname)
1496
+ hid = VIClient::translate_hostname(hostname)
1497
+ connection = VIClient.new(hid)
1498
+
1499
+ vm = connection.find_vm_template(deploy_id)
1500
+
1501
+ vm.RebootGuest.wait_for_completion
1502
+ end
1503
+
1504
+ ############################################################################
1505
+ # Resets a VM
1506
+ # @param deploy_id vcetranslate_hostnamnter identifier of the VM
1507
+ # @param hostname name of the host (equals the vCenter cluster)
1508
+ ############################################################################
1509
+ def self.reset(deploy_id, hostname)
1510
+ hid = VIClient::translate_hostname(hostname)
1511
+ connection = VIClient.new(hid)
1512
+
1513
+ vm = connection.find_vm_template(deploy_id)
1514
+
1515
+ vm.ResetVM_Task.wait_for_completion
1516
+ end
1517
+
1518
+ ############################################################################
1519
+ # Shutdown a VM
1520
+ # @param deploy_id vcenter identifier of the VM
1521
+ # @param hostname name of the host (equals the vCenter cluster)
1522
+ # @param lcm_state state of the VM
1523
+ # @param keep_disks keep or not VM disks in datastore
1524
+ # @param disks VM attached disks
1525
+ # @param to_template whether this VM has been instantiated as persistent
1526
+ ############################################################################
1527
+ def self.shutdown(deploy_id, hostname, lcm_state, keep_disks, disks, to_template)
1528
+ hid = VIClient::translate_hostname(hostname)
1529
+ connection = VIClient.new(hid)
1530
+
1531
+ vm = connection.find_vm_template(deploy_id)
1532
+
1533
+ case lcm_state
1534
+ when "SHUTDOWN"
1535
+ begin
1536
+ vm.ShutdownGuest.wait_for_completion
1537
+ rescue
1538
+ end
1539
+ vm.PowerOffVM_Task.wait_for_completion
1540
+ if keep_disks
1541
+ detach_all_disks(vm)
1542
+ else
1543
+ detach_attached_disks(vm, disks, hostname) if disks
1544
+ end
1545
+
1546
+ # If the VM was instantiated to persistent, convert the VM to
1547
+ # vCenter VM Template and update the OpenNebula new
1548
+ # VM Template to point to the new vCenter VM Template
1549
+ if !to_template.nil?
1550
+ vm.MarkAsTemplate
1551
+
1552
+ new_template = OpenNebula::Template.new_with_id(to_template,
1553
+ OpenNebula::Client.new)
1554
+ new_template.info
1555
+
1556
+ public_cloud_str = "PUBLIC_CLOUD=["
1557
+
1558
+ new_template.to_hash["VMTEMPLATE"]["TEMPLATE"]["PUBLIC_CLOUD"].each{|k,v|
1559
+ if k == "VM_TEMPLATE"
1560
+ public_cloud_str += "VM_TEMPLATE=\"#{deploy_id}\"\n"
1561
+ else
1562
+ public_cloud_str += "#{k}=\"#{v}\",\n"
1563
+ end
1564
+ }
1565
+
1566
+ public_cloud_str = public_cloud_str + "]"
1567
+
1568
+ new_template.update(public_cloud_str, true)
1569
+ else
1570
+ vm.Destroy_Task.wait_for_completion
1571
+ end
1572
+
1573
+ when "SHUTDOWN_POWEROFF", "SHUTDOWN_UNDEPLOY"
1574
+ begin
1575
+ vm.ShutdownGuest.wait_for_completion
1576
+ rescue
1577
+ end
1578
+ vm.PowerOffVM_Task.wait_for_completion
1579
+ end
1580
+ end
1581
+
1582
+ ############################################################################
1583
+ # Create VM snapshot
1584
+ # @param deploy_id vcenter identifier of the VM
1585
+ # @param hostname name of the host (equals the vCenter cluster)
1586
+ # @param snaphot_name name of the snapshot
1587
+ ############################################################################
1588
+ def self.create_snapshot(deploy_id, hostname, snapshot_name)
1589
+ hid = VIClient::translate_hostname(hostname)
1590
+ connection = VIClient.new(hid)
1591
+
1592
+ snapshot_hash = {
1593
+ :name => snapshot_name,
1594
+ :description => "OpenNebula Snapshot of VM #{deploy_id}",
1595
+ :memory => true,
1596
+ :quiesce => true
1597
+ }
1598
+
1599
+ vm = connection.find_vm_template(deploy_id)
1600
+
1601
+ vm.CreateSnapshot_Task(snapshot_hash).wait_for_completion
1602
+
1603
+ return snapshot_name
1604
+ end
1605
+
1606
+ ############################################################################
1607
+ # Find VM snapshot
1608
+ # @param list root list of VM snapshots
1609
+ # @param snaphot_name name of the snapshot
1610
+ ############################################################################
1611
+ def self.find_snapshot(list, snapshot_name)
1612
+ list.each do |i|
1613
+ if i.name == snapshot_name
1614
+ return i.snapshot
1615
+ elsif !i.childSnapshotList.empty?
1616
+ snap = find_snapshot(i.childSnapshotList, snapshot_name)
1617
+ return snap if snap
1618
+ end
1619
+ end
1620
+
1621
+ nil
1622
+ end
1623
+
1624
+ ############################################################################
1625
+ # Delete VM snapshot
1626
+ # @param deploy_id vcenter identifier of the VM
1627
+ # @param hostname name of the host (equals the vCenter cluster)
1628
+ # @param snaphot_name name of the snapshot
1629
+ ############################################################################
1630
+ def self.delete_snapshot(deploy_id, hostname, snapshot_name)
1631
+ hid = VIClient::translate_hostname(hostname)
1632
+ connection = VIClient.new(hid)
1633
+
1634
+ vm = connection.find_vm_template(deploy_id)
1635
+
1636
+ list = vm.snapshot.rootSnapshotList
1637
+
1638
+ snapshot = find_snapshot(list, snapshot_name)
1639
+ return nil if !snapshot
1640
+
1641
+ delete_snapshot_hash = {
1642
+ :_this => snapshot,
1643
+ :removeChildren => false
1644
+ }
1645
+
1646
+ snapshot.RemoveSnapshot_Task(delete_snapshot_hash).wait_for_completion
1647
+ end
1648
+
1649
+ ############################################################################
1650
+ # Revert VM snapshot
1651
+ # @param deploy_id vcenter identifier of the VM
1652
+ # @param hostname name of the host (equals the vCenter cluster)
1653
+ # @param snaphot_name name of the snapshot
1654
+ ############################################################################
1655
+ def self.revert_snapshot(deploy_id, hostname, snapshot_name)
1656
+ hid = VIClient::translate_hostname(hostname)
1657
+ connection = VIClient.new(hid)
1658
+
1659
+ vm = connection.find_vm_template(deploy_id)
1660
+
1661
+ list = vm.snapshot.rootSnapshotList
1662
+
1663
+ snapshot = find_snapshot(list, snapshot_name)
1664
+ return nil if !snapshot
1665
+
1666
+ revert_snapshot_hash = {
1667
+ :_this => snapshot
1668
+ }
1669
+
1670
+ snapshot.RevertToSnapshot_Task(revert_snapshot_hash).wait_for_completion
1671
+ end
1672
+
1673
+ ############################################################################
1674
+ # Attach NIC to a VM
1675
+ # @param deploy_id vcenter identifier of the VM
1676
+ # @param mac MAC address of the NIC to be attached
1677
+ # @param bridge name of the Network in vCenter
1678
+ # @param model model of the NIC to be attached
1679
+ # @param host hostname of the ESX where the VM is running
1680
+ ############################################################################
1681
+ def self.attach_nic(deploy_id, mac, bridge, model, host)
1682
+ hid = VIClient::translate_hostname(host)
1683
+ connection = VIClient.new(hid)
1684
+
1685
+ vm = connection.find_vm_template(deploy_id)
1686
+
1687
+ spec_hash = calculate_addnic_spec(vm, mac, bridge, model)
1688
+
1689
+ spec = RbVmomi::VIM.VirtualMachineConfigSpec({:deviceChange =>
1690
+ [spec_hash]})
1691
+
1692
+ vm.ReconfigVM_Task(:spec => spec).wait_for_completion
1693
+ end
1694
+
1695
+ ############################################################################
1696
+ # Detach NIC from a VM
1697
+ ############################################################################
1698
+ def self.detach_nic(deploy_id, mac, host)
1699
+ hid = VIClient::translate_hostname(host)
1700
+ connection = VIClient.new(hid)
1701
+
1702
+ vm = connection.find_vm_template(deploy_id)
1703
+
1704
+ nic = vm.config.hardware.device.find { |d|
1705
+ is_nic?(d) && (d.macAddress == mac)
1706
+ }
1707
+
1708
+ raise "Could not find NIC with mac address #{mac}" if nic.nil?
1709
+
1710
+ spec = {
1711
+ :deviceChange => [
1712
+ :operation => :remove,
1713
+ :device => nic
1714
+ ]
1715
+ }
1716
+
1717
+ vm.ReconfigVM_Task(:spec => spec).wait_for_completion
1718
+ end
1719
+
1720
+ ############################################################################
1721
+ # Reconfigures a VM (context data)
1722
+ # @param deploy_id vcenter identifier of the VM
1723
+ # @param hostname name of the host (equals the vCenter cluster)
1724
+ # @param xml_text XML repsentation of the VM
1725
+ ############################################################################
1726
+ def self.reconfigure(deploy_id, hostname, xml_text)
1727
+ hid = VIClient::translate_hostname(hostname)
1728
+ connection = VIClient.new(hid)
1729
+ vm = connection.find_vm_template(deploy_id)
1730
+
1731
+ xml = REXML::Document.new xml_text
1732
+ context = xml.root.elements["//TEMPLATE/CONTEXT"]
1733
+
1734
+ if context
1735
+ context_text = create_context(context)
1736
+ context_spec = {
1737
+ :extraConfig => [
1738
+ { :key=>"guestinfo.opennebula.context",
1739
+ :value=> Base64.encode64(context_text) }
1740
+ ]
1741
+ }
1742
+
1743
+ spec = RbVmomi::VIM.VirtualMachineConfigSpec(context_spec)
1744
+ vm.ReconfigVM_Task(:spec => spec).wait_for_completion
1745
+ end
1746
+ end
1747
+
1748
+ ########################################################################
1749
+ # Initialize the vm monitor information
1750
+ ########################################################################
1751
+ def monitor(host)
1752
+ @summary = @vm.summary
1753
+ @state = state_to_c(@summary.runtime.powerState)
1754
+
1755
+ if @state != VM_STATE[:active]
1756
+ @used_cpu = 0
1757
+ @used_memory = 0
1758
+
1759
+ @netrx = 0
1760
+ @nettx = 0
1761
+
1762
+ return
1763
+ end
1764
+
1765
+ @used_memory = @summary.quickStats.hostMemoryUsage * 1024
1766
+ cpuMhz = @vm.runtime.host.summary.hardware.cpuMhz.to_f
1767
+
1768
+ @used_cpu =
1769
+ ((@summary.quickStats.overallCpuUsage.to_f / cpuMhz) * 100).to_s
1770
+ @used_cpu = sprintf('%.2f',@used_cpu).to_s
1771
+
1772
+ # Check for negative values
1773
+ @used_memory = 0 if @used_memory.to_i < 0
1774
+ @used_cpu = 0 if @used_cpu.to_i < 0
1775
+
1776
+ @esx_host = @vm.summary.runtime.host.name
1777
+ @guest_ip = @vm.guest.ipAddress
1778
+ @guest_state = @vm.guest.guestState
1779
+ @vmware_tools = @vm.guest.toolsRunningStatus
1780
+ @vmtools_ver = @vm.guest.toolsVersion
1781
+ @vmtools_verst = @vm.guest.toolsVersionStatus
1782
+
1783
+ guest_ip_addresses = []
1784
+
1785
+ @vm.guest.net.each do |net|
1786
+ net.ipConfig.ipAddress.each do |ip|
1787
+ guest_ip_addresses << ip.ipAddress
1788
+ end if net.ipConfig && net.ipConfig.ipAddress
1789
+ end if @vm.guest.net
1790
+
1791
+ @guest_ip_addresses = guest_ip_addresses.join(',')
1792
+ end
1793
+
1794
+ ########################################################################
1795
+ # Generates a OpenNebula IM Driver valid string with the monitor info
1796
+ ########################################################################
1797
+ def info
1798
+ return 'STATE=d' if @state == 'd'
1799
+
1800
+ str_info = ""
1801
+
1802
+ str_info << "GUEST_IP=" << @guest_ip.to_s << " " if @guest_ip
1803
+ if @guest_ip_addresses && !@guest_ip_addresses.empty?
1804
+ str_info << "GUEST_IP_ADDRESSES=\\\"" <<
1805
+ @guest_ip_addresses.to_s << "\\\" "
1806
+ end
1807
+ str_info << "#{POLL_ATTRIBUTE[:state]}=" << @state << " "
1808
+ str_info << "#{POLL_ATTRIBUTE[:cpu]}=" << @used_cpu.to_s << " "
1809
+ str_info << "#{POLL_ATTRIBUTE[:memory]}=" << @used_memory.to_s << " "
1810
+ str_info << "#{POLL_ATTRIBUTE[:netrx]}=" << @netrx.to_s << " "
1811
+ str_info << "#{POLL_ATTRIBUTE[:nettx]}=" << @nettx.to_s << " "
1812
+ str_info << "ESX_HOST=\\\"" << @esx_host.to_s << "\\\" "
1813
+ str_info << "GUEST_STATE=" << @guest_state.to_s << " "
1814
+ str_info << "VMWARETOOLS_RUNNING_STATUS=" << @vmware_tools.to_s << " "
1815
+ str_info << "VMWARETOOLS_VERSION=" << @vmtools_ver.to_s << " "
1816
+ str_info << "VMWARETOOLS_VERSION_STATUS=" << @vmtools_verst.to_s << " "
1817
+ str_info << "RESOURCE_POOL=\\\"" << @vm.resourcePool.name << "\\\" "
1818
+ end
1819
+
1820
+ ########################################################################
1821
+ # Generates an OpenNebula Template for this VCenterVm
1822
+ ########################################################################
1823
+ def to_one(host)
1824
+ cluster_name = host.cluster_name
1825
+
1826
+ str = "NAME = \"#{@vm.name} - #{cluster_name}\"\n"\
1827
+ "CPU = \"#{@vm.config.hardware.numCPU}\"\n"\
1828
+ "vCPU = \"#{@vm.config.hardware.numCPU}\"\n"\
1829
+ "MEMORY = \"#{@vm.config.hardware.memoryMB}\"\n"\
1830
+ "HYPERVISOR = \"vcenter\"\n"\
1831
+ "PUBLIC_CLOUD = [\n"\
1832
+ " TYPE =\"vcenter\",\n"\
1833
+ " VM_TEMPLATE =\"#{@vm.config.uuid}\",\n"\
1834
+ " HOST =\"#{cluster_name}\"\n"\
1835
+ "]\n"\
1836
+ "GRAPHICS = [\n"\
1837
+ " TYPE =\"vnc\",\n"\
1838
+ " LISTEN =\"0.0.0.0\"\n"\
1839
+ "]\n"\
1840
+ "SCHED_REQUIREMENTS=\"NAME=\\\"#{cluster_name}\\\"\"\n"\
1841
+ "CONTEXT = ["\
1842
+ " NETWORK = \"YES\","\
1843
+ " SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\" ]"
1844
+
1845
+ if @vm.config.annotation.nil? || @vm.config.annotation.empty?
1846
+ str << "DESCRIPTION = \"vCenter Template imported by OpenNebula"\
1847
+ " from Cluster #{@vm.runtime.host.parent.name}\"\n"
1848
+ else
1849
+ notes = @vm.config.annotation.gsub("\\", "\\\\").gsub("\"", "\\\"")
1850
+ str << "DESCRIPTION = \"#{notes}\"\n"
1851
+ end
1852
+
1853
+ case @vm.guest.guestFullName
1854
+ when /CentOS/i
1855
+ str << "LOGO=images/logos/centos.png"
1856
+ when /Debian/i
1857
+ str << "LOGO=images/logos/debian.png"
1858
+ when /Red Hat/i
1859
+ str << "LOGO=images/logos/redhat.png"
1860
+ when /Ubuntu/i
1861
+ str << "LOGO=images/logos/ubuntu.png"
1862
+ when /Windows XP/i
1863
+ str << "LOGO=images/logos/windowsxp.png"
1864
+ when /Windows/i
1865
+ str << "LOGO=images/logos/windows8.png"
1866
+ when /Linux/i
1867
+ str << "LOGO=images/logos/linux.png"
1868
+ end
1869
+ return str
1870
+ end
1871
+
1872
+ ########################################################################
1873
+ # Generates a Datastore user input
1874
+ ########################################################################
1875
+ def to_one_ds(host, default_ds)
1876
+ # Datastores User Input
1877
+ str = ""
1878
+
1879
+ if host.ds_list != ""
1880
+ str = "M|list|Which datastore you want this VM to run on?|"\
1881
+ << "#{host.ds_list}|#{default_ds}"
1882
+ end
1883
+
1884
+ return str
1885
+ end
1886
+
1887
+ ########################################################################
1888
+ # Generates a Resource Pool user input
1889
+ ########################################################################
1890
+ def to_one_rp(host)
1891
+ # Resource Pool User Input
1892
+ str = ""
1893
+
1894
+ if host.rp_list != ""
1895
+ str = "M|list|Which resource pool you want this VM to run"\
1896
+ " in?|#{host.rp_list}|#{host.rp_list.split(",")[0]}"
1897
+ end
1898
+
1899
+ return str
1900
+ end
1901
+
1902
+ ########################################################################
1903
+ # Generates an OpenNebula VirtualMachine for this VCenterVm
1904
+ #
1905
+ #
1906
+ ########################################################################
1907
+ def vm_to_one(host)
1908
+ cluster_name = host.cluster_name
1909
+
1910
+ state = case state_to_c(@summary.runtime.powerState)
1911
+ when 'a'
1912
+ "RUNNING"
1913
+ when 'd'
1914
+ "POWEROFF"
1915
+ end
1916
+
1917
+ str = "NAME = \"#{@vm.name} - #{cluster_name}\"\n"\
1918
+ "CPU = \"#{@vm.config.hardware.numCPU}\"\n"\
1919
+ "vCPU = \"#{@vm.config.hardware.numCPU}\"\n"\
1920
+ "MEMORY = \"#{@vm.config.hardware.memoryMB}\"\n"\
1921
+ "HYPERVISOR = \"vcenter\"\n"\
1922
+ "PUBLIC_CLOUD = [\n"\
1923
+ " TYPE =\"vcenter\",\n"\
1924
+ " VM_TEMPLATE =\"#{@vm.config.uuid}\",\n"\
1925
+ " HOST =\"#{cluster_name}\"\n"\
1926
+ "]\n"\
1927
+ "IMPORT_VM_ID = \"#{@vm.config.uuid}\"\n"\
1928
+ "IMPORT_STATE = \"#{state}\"\n"\
1929
+ "SCHED_REQUIREMENTS=\"NAME=\\\"#{cluster_name}\\\"\"\n"
1930
+
1931
+ vp = @vm.config.extraConfig.select{|v|
1932
+ v[:key].downcase=="remotedisplay.vnc.port"}
1933
+ keymap = @vm.config.extraConfig.select{|v|
1934
+ v[:key].downcase=="remotedisplay.vnc.keymap"}
1935
+
1936
+ if vp.size > 0
1937
+ str << "GRAPHICS = [\n"\
1938
+ " TYPE =\"vnc\",\n"\
1939
+ " LISTEN =\"0.0.0.0\",\n"\
1940
+ " PORT =\"#{vp[0][:value]}\"\n"
1941
+ str << " ,KEYMAP =\"#{keymap[0][:value]}\"\n" if keymap[0]
1942
+ str << "]\n"
1943
+ end
1944
+
1945
+ if @vm.config.annotation.nil? || @vm.config.annotation.empty?
1946
+ str << "DESCRIPTION = \"vCenter Virtual Machine imported by"\
1947
+ " OpenNebula from Cluster #{cluster_name}\"\n"
1948
+ else
1949
+ notes = @vm.config.annotation.gsub("\\", "\\\\").gsub("\"", "\\\"")
1950
+ str << "DESCRIPTION = \"#{notes}\"\n"
1951
+ end
1952
+
1953
+ case @vm.guest.guestFullName
1954
+ when /CentOS/i
1955
+ str << "LOGO=images/logos/centos.png"
1956
+ when /Debian/i
1957
+ str << "LOGO=images/logos/debian.png"
1958
+ when /Red Hat/i
1959
+ str << "LOGO=images/logos/redhat.png"
1960
+ when /Ubuntu/i
1961
+ str << "LOGO=images/logos/ubuntu.png"
1962
+ when /Windows XP/i
1963
+ str << "LOGO=images/logos/windowsxp.png"
1964
+ when /Windows/i
1965
+ str << "LOGO=images/logos/windows8.png"
1966
+ when /Linux/i
1967
+ str << "LOGO=images/logos/linux.png"
1968
+ end
1969
+
1970
+ return str
1971
+ end
1972
+
1973
+ private
1974
+
1975
+ ########################################################################
1976
+ # Converts the VI string state to OpenNebula state convention
1977
+ # Guest states are:
1978
+ # - poweredOff The virtual machine is currently powered off.
1979
+ # - poweredOn The virtual machine is currently powered on.
1980
+ # - suspended The virtual machine is currently suspended.
1981
+ ########################################################################
1982
+ def state_to_c(state)
1983
+ case state
1984
+ when 'poweredOn'
1985
+ VM_STATE[:active]
1986
+ when 'suspended'
1987
+ VM_STATE[:paused]
1988
+ when 'poweredOff'
1989
+ VM_STATE[:deleted]
1990
+ else
1991
+ VM_STATE[:unknown]
1992
+ end
1993
+ end
1994
+
1995
+ ########################################################################
1996
+ # Checks if a RbVmomi::VIM::VirtualDevice is a network interface
1997
+ ########################################################################
1998
+ def self.is_nic?(device)
1999
+ !device.class.ancestors.index(RbVmomi::VIM::VirtualEthernetCard).nil?
2000
+ end
2001
+
2002
+ ########################################################################
2003
+ # Checks if a RbVmomi::VIM::VirtualDevice is a disk
2004
+ ########################################################################
2005
+ def self.is_disk?(device)
2006
+ !device.class.ancestors.index(RbVmomi::VIM::VirtualDisk).nil?
2007
+ end
2008
+
2009
+ ########################################################################
2010
+ # Returns the spec to reconfig a VM and add a NIC
2011
+ ########################################################################
2012
+ def self.calculate_addnic_spec(vm, mac, bridge, model)
2013
+ model = model.nil? ? nil : model.downcase
2014
+ network = vm.runtime.host.network.select{|n| n.name==bridge}
2015
+ backing = nil
2016
+
2017
+ if network.empty?
2018
+ raise "Network #{bridge} not found in host #{vm.runtime.host.name}"
2019
+ else
2020
+ network = network[0]
2021
+ end
2022
+
2023
+ card_num = 1 # start in one, we want the next avaliable id
2024
+
2025
+ vm.config.hardware.device.each{ |dv|
2026
+ card_num = card_num + 1 if is_nic?(dv)
2027
+ }
2028
+
2029
+ nic_card = case model
2030
+ when "virtuale1000", "e1000"
2031
+ RbVmomi::VIM::VirtualE1000
2032
+ when "virtuale1000e", "e1000e"
2033
+ RbVmomi::VIM::VirtualE1000e
2034
+ when "virtualpcnet32", "pcnet32"
2035
+ RbVmomi::VIM::VirtualPCNet32
2036
+ when "virtualsriovethernetcard", "sriovethernetcard"
2037
+ RbVmomi::VIM::VirtualSriovEthernetCard
2038
+ when "virtualvmxnetm", "vmxnetm"
2039
+ RbVmomi::VIM::VirtualVmxnetm
2040
+ when "virtualvmxnet2", "vmnet2"
2041
+ RbVmomi::VIM::VirtualVmxnet2
2042
+ when "virtualvmxnet3", "vmxnet3"
2043
+ RbVmomi::VIM::VirtualVmxnet3
2044
+ else # If none matches, use VirtualE1000
2045
+ RbVmomi::VIM::VirtualE1000
2046
+ end
2047
+
2048
+ if network.class == RbVmomi::VIM::Network
2049
+ backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
2050
+ :deviceName => bridge,
2051
+ :network => network)
2052
+ else
2053
+ port = RbVmomi::VIM::DistributedVirtualSwitchPortConnection(
2054
+ :switchUuid =>
2055
+ network.config.distributedVirtualSwitch.uuid,
2056
+ :portgroupKey => network.key)
2057
+ backing =
2058
+ RbVmomi::VIM.VirtualEthernetCardDistributedVirtualPortBackingInfo(
2059
+ :port => port)
2060
+ end
2061
+
2062
+ return {:operation => :add,
2063
+ :device => nic_card.new(
2064
+ :key => 0,
2065
+ :deviceInfo => {
2066
+ :label => "net" + card_num.to_s,
2067
+ :summary => bridge
2068
+ },
2069
+ :backing => backing,
2070
+ :addressType => mac ? 'manual' : 'generated',
2071
+ :macAddress => mac
2072
+ )
2073
+ }
2074
+ end
2075
+
2076
+ ########################################################################
2077
+ # Clone a vCenter VM Template and leaves it powered on
2078
+ ########################################################################
2079
+ def self.clone_vm(xml_text, hostname, datastore)
2080
+
2081
+ host_id = VCenterDriver::VIClient.translate_hostname(hostname)
2082
+
2083
+ # Retrieve hostname
2084
+
2085
+ host = OpenNebula::Host.new_with_id(host_id, OpenNebula::Client.new())
2086
+ host.info # Not failing if host retrieval fails
2087
+
2088
+ # Get VM prefix name
2089
+
2090
+ if host["/HOST/TEMPLATE/VM_PREFIX"] and !host["/HOST/TEMPLATE/VM_PREFIX"].empty?
2091
+ vmname_prefix = host["/HOST/TEMPLATE/VM_PREFIX"]
2092
+ else # fall back to default value
2093
+ vmname_prefix = "one-$i-"
2094
+ end
2095
+
2096
+ xml = REXML::Document.new xml_text
2097
+ pcs = xml.root.get_elements("/VM/USER_TEMPLATE/PUBLIC_CLOUD")
2098
+
2099
+ raise "Cannot find VCenter element in VM template." if pcs.nil?
2100
+
2101
+ template = pcs.select { |t|
2102
+ type = t.elements["TYPE"]
2103
+ !type.nil? && type.text.downcase == "vcenter"
2104
+ }
2105
+
2106
+ # If there are multiple vCenter templates, find the right one
2107
+
2108
+ if template.is_a? Array
2109
+ all_vcenter_templates = template.clone
2110
+ # If there is more than one coincidence, pick the first one
2111
+ template = template.select {|t|
2112
+ cluster_name = t.elements["HOST"]
2113
+ !cluster_name.nil? && cluster_name.text == hostname
2114
+ }[0]
2115
+ # The template may not reference any specific CLUSTER
2116
+ # (referenced to as HOST in the OpenNebula template)
2117
+ # Therefore, here take the first one that does not
2118
+ # specify a CLUSTER to see if we are lucky
2119
+ if template.nil?
2120
+ template = all_vcenter_templates.select {|t|
2121
+ t.elements["HOST"].nil?
2122
+ }[0]
2123
+ end
2124
+ end
2125
+
2126
+ raise "Cannot find vCenter element in VM template." if template.nil?
2127
+
2128
+ uuid = template.elements["VM_TEMPLATE"]
2129
+
2130
+ raise "Cannot find VM_TEMPLATE in vCenter element." if uuid.nil?
2131
+
2132
+ uuid = uuid.text
2133
+ vmid = xml.root.elements["/VM/ID"].text
2134
+ vmname_prefix.gsub!("$i", vmid)
2135
+ vcenter_name = "#{vmname_prefix}#{xml.root.elements["/VM/NAME"].text}"
2136
+ hid = xml.root.elements["/VM/HISTORY_RECORDS/HISTORY/HID"]
2137
+
2138
+ raise "Cannot find host id in deployment file history." if hid.nil?
2139
+
2140
+ connection = VIClient.new(hid)
2141
+ vc_template = connection.find_vm_template(uuid)
2142
+
2143
+ # Find out requested and available resource pool
2144
+
2145
+ req_rp = nil
2146
+ if !xml.root.elements["/VM/USER_TEMPLATE/RESOURCE_POOL"].nil?
2147
+ req_rp = xml.root.elements["/VM/USER_TEMPLATE/RESOURCE_POOL"].text
2148
+ end
2149
+
2150
+ if connection.rp_confined?
2151
+ rp = connection.resource_pool.first
2152
+ if req_rp && rp.name != req_rp
2153
+ raise "Available resource pool in host [#{rp.name}]"\
2154
+ " does not match requested resource pool"\
2155
+ " [#{req_rp}]"
2156
+ end
2157
+ else
2158
+ if req_rp # if there is requested resource pool, retrieve it
2159
+ rp = connection.find_resource_pool(req_rp)
2160
+ raise "Cannot find resource pool "\
2161
+ "#{template.elements["RESOURCE_POOL"].text}" if !rp
2162
+ else # otherwise, get the default resource pool
2163
+ rp = connection.default_resource_pool
2164
+ end
2165
+ end
2166
+
2167
+ # Find out requested and available datastore
2168
+
2169
+ if !xml.root.elements["/VM/USER_TEMPLATE/VCENTER_DATASTORE"].nil?
2170
+ datastore = xml.root.elements["/VM/USER_TEMPLATE/VCENTER_DATASTORE"].text
2171
+ end
2172
+
2173
+ if datastore
2174
+ datastores = VIClient.get_entities(connection.dc.datastoreFolder,
2175
+ 'Datastore')
2176
+ ds = datastores.select{|ds| ds.name == datastore}[0]
2177
+ raise "Cannot find datastore #{datastore}" if !ds
2178
+ end
2179
+
2180
+ relocate_spec_params = {
2181
+ :diskMoveType => :moveChildMostDiskBacking,
2182
+ :pool => rp
2183
+ }
2184
+
2185
+ relocate_spec_params[:datastore] = ds if datastore
2186
+
2187
+ relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
2188
+ relocate_spec_params)
2189
+
2190
+ # This running flag will prevent spurious poweroff states in the VM
2191
+
2192
+ running_flag = [{:key=>"opennebula.vm.running",:value=>"no"}]
2193
+
2194
+ running_flag_spec = RbVmomi::VIM.VirtualMachineConfigSpec(
2195
+ {:extraConfig =>running_flag})
2196
+
2197
+ clone_parameters = {
2198
+ :location => relocate_spec,
2199
+ :powerOn => false,
2200
+ :template => false,
2201
+ :config => running_flag_spec
2202
+ }
2203
+
2204
+ customization = template.elements["CUSTOMIZATION_SPEC"]
2205
+
2206
+ vim = connection.vim
2207
+
2208
+ if !customization.nil?
2209
+ begin
2210
+ custom_spec = vim.serviceContent.customizationSpecManager.
2211
+ GetCustomizationSpec(:name => customization.text)
2212
+
2213
+ if custom_spec && spec=custom_spec.spec
2214
+ clone_parameters[:customization] = spec
2215
+ else
2216
+ raise "Error getting customization spec"
2217
+ end
2218
+
2219
+ rescue
2220
+ raise "Customization spec '#{customization.text}' not found"
2221
+ end
2222
+ end
2223
+
2224
+ clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(clone_parameters)
2225
+
2226
+ begin
2227
+ vm = vc_template.CloneVM_Task(
2228
+ :folder => vc_template.parent,
2229
+ :name => vcenter_name,
2230
+ :spec => clone_spec).wait_for_completion
2231
+ rescue Exception => e
2232
+
2233
+ if !e.message.start_with?('DuplicateName')
2234
+ raise "Cannot clone VM Template: #{e.message}"
2235
+ end
2236
+
2237
+ vm = connection.find_vm(vcenter_name)
2238
+
2239
+ raise "Cannot clone VM Template" if vm.nil?
2240
+
2241
+ vm.Destroy_Task.wait_for_completion
2242
+ vm = vc_template.CloneVM_Task(
2243
+ :folder => vc_template.parent,
2244
+ :name => vcenter_name,
2245
+ :spec => clone_spec).wait_for_completion
2246
+ end
2247
+
2248
+ reconfigure_vm(vm, xml, true, hostname)
2249
+
2250
+ # Power on the VM
2251
+ vm.PowerOnVM_Task.wait_for_completion
2252
+
2253
+ # Set to yes the running flag
2254
+
2255
+ config_array = [{:key=>"opennebula.vm.running",:value=>"yes"}]
2256
+ spec = RbVmomi::VIM.VirtualMachineConfigSpec(
2257
+ {:extraConfig =>config_array})
2258
+
2259
+ vm.ReconfigVM_Task(:spec => spec).wait_for_completion
2260
+
2261
+ return vm.config.uuid
2262
+ end
2263
+
2264
+ ########################################################################
2265
+ # Reconfigures a VM with new deployment description
2266
+ ########################################################################
2267
+ def self.reconfigure_vm(vm, xml, newvm, hostname)
2268
+ vm_uuid = vm.config.uuid
2269
+ vmid = xml.root.elements["/VM/ID"].text
2270
+ context = xml.root.elements["/VM/TEMPLATE/CONTEXT"]
2271
+
2272
+ # Read existing context if it is not a new VM
2273
+ if !newvm
2274
+ old_context = vm.config.extraConfig.select{|val|
2275
+ val[:key]=="guestinfo.opennebula.context"}
2276
+ end
2277
+
2278
+ # Add VMID to VM's extraConfig
2279
+
2280
+ config_array = [{:key=>"opennebula.vm.id",:value=>vmid}]
2281
+
2282
+ # VNC Section
2283
+
2284
+ vnc_port = xml.root.elements["/VM/TEMPLATE/GRAPHICS/PORT"]
2285
+ vnc_listen = xml.root.elements["/VM/TEMPLATE/GRAPHICS/LISTEN"]
2286
+ vnc_keymap = xml.root.elements["/VM/TEMPLATE/GRAPHICS/KEYMAP"]
2287
+
2288
+ if !vnc_listen
2289
+ vnc_listen = "0.0.0.0"
2290
+ else
2291
+ vnc_listen = vnc_listen.text
2292
+ end
2293
+
2294
+ context_vnc_spec = {}
2295
+
2296
+ if vnc_port
2297
+ config_array +=
2298
+ [{:key=>"remotedisplay.vnc.enabled",:value=>"TRUE"},
2299
+ {:key=>"remotedisplay.vnc.port", :value=>vnc_port.text},
2300
+ {:key=>"remotedisplay.vnc.ip", :value=>vnc_listen}]
2301
+ end
2302
+
2303
+ config_array += [{:key=>"remotedisplay.vnc.keymap",
2304
+ :value=>vnc_keymap.text}] if vnc_keymap
2305
+
2306
+ # Context section
2307
+
2308
+ if context
2309
+ context_text = create_context(context)
2310
+
2311
+ # OneGate
2312
+ onegate_token_flag = xml.root.elements["/VM/TEMPLATE/CONTEXT/TOKEN"]
2313
+ if onegate_token_flag and
2314
+ onegate_token_flag.text == "YES" and
2315
+ !newvm
2316
+ # Create the OneGate token string
2317
+ vmid_str = xml.root.elements["/VM/ID"].text
2318
+ stime_str = xml.root.elements["/VM/STIME"].text
2319
+ str_to_encrypt = "#{vmid_str}:#{stime_str}"
2320
+
2321
+ user_id = xml.root.elements['//CREATED_BY'].text
2322
+
2323
+ if user_id.nil?
2324
+ STDERR.puts {"VMID:#{vmid} CREATED_BY not present" \
2325
+ " in the VM TEMPLATE"}
2326
+ return nil
2327
+ end
2328
+
2329
+ user = OpenNebula::User.new_with_id(user_id,
2330
+ OpenNebula::Client.new)
2331
+ rc = user.info
2332
+
2333
+ if OpenNebula.is_error?(rc)
2334
+ STDERR.puts {"VMID:#{vmid} user.info" \
2335
+ " error: #{rc.message}"}
2336
+ return nil
2337
+ end
2338
+
2339
+ token_password = user['TEMPLATE/TOKEN_PASSWORD']
2340
+
2341
+ if token_password.nil?
2342
+ STDERR.puts {"VMID:#{vmid} TOKEN_PASSWORD not present"\
2343
+ " in the USER:#{user_id} TEMPLATE"}
2344
+ return nil
2345
+ end
2346
+
2347
+ cipher = OpenSSL::Cipher::Cipher.new("aes-256-cbc")
2348
+ cipher.encrypt
2349
+ cipher.key = token_password
2350
+ onegate_token = cipher.update(str_to_encrypt)
2351
+ onegate_token << cipher.final
2352
+
2353
+ onegate_token_64 = Base64.encode64(onegate_token).chop
2354
+
2355
+ context_text += "ONEGATE_TOKEN='#{onegate_token_64}'\n"
2356
+ end
2357
+
2358
+ # If there is an old VM, we need to honor the existing ONEGATE_TOKEN
2359
+ if !newvm
2360
+ onegate_token =
2361
+ Base64.decode64(old_context[0][:value]).split("\n").
2362
+ select{|line| line.start_with?("ONEGATE_TOKEN")}[0]
2363
+
2364
+ if onegate_token
2365
+ context_text += onegate_token
2366
+ end
2367
+ end
2368
+
2369
+ context_text = Base64.encode64(context_text.chop)
2370
+
2371
+ config_array +=
2372
+ [{:key=>"guestinfo.opennebula.context",
2373
+ :value=>context_text}]
2374
+ end
2375
+
2376
+ if config_array != []
2377
+ context_vnc_spec = {:extraConfig =>config_array}
2378
+ end
2379
+
2380
+ device_change = []
2381
+
2382
+ # NIC section, build the reconfig hash
2383
+
2384
+ nics = xml.root.get_elements("/VM/TEMPLATE/NIC")
2385
+ nic_spec = {}
2386
+
2387
+ # If the VM is not new, avoid readding NiCs
2388
+ if !newvm
2389
+ vm.config.hardware.device.each{ |dv|
2390
+ if is_nic?(dv)
2391
+ nics.each{|nic|
2392
+ if nic.elements["MAC"].text == dv.macAddress and
2393
+ nic.elements["BRIDGE"].text == dv.deviceInfo.summary
2394
+ nics.delete(nic)
2395
+ end
2396
+ }
2397
+ end
2398
+ }
2399
+ end
2400
+
2401
+ if !nics.nil?
2402
+ nic_array = []
2403
+ nics.each{|nic|
2404
+ mac = nic.elements["MAC"].text
2405
+ bridge = nic.elements["BRIDGE"].text
2406
+ model = nic.elements["MODEL"] ? nic.elements["MODEL"].text : nil
2407
+ nic_array << calculate_addnic_spec(vm, mac, bridge, model)
2408
+ }
2409
+
2410
+ device_change += nic_array
2411
+ end
2412
+
2413
+ # DISK section, build the reconfig hash
2414
+
2415
+ disks = xml.root.get_elements("/VM/TEMPLATE/DISK")
2416
+ disk_spec = {}
2417
+
2418
+ # If the VM is not new, avoid readding DISKS
2419
+ if !newvm
2420
+ vm.config.hardware.device.select { |d|
2421
+ if is_disk?(d)
2422
+ disks.each{|disk|
2423
+ if disk.elements["SOURCE"].text == d.backing.fileName
2424
+ disks.delete(disk)
2425
+ end
2426
+ }
2427
+ end
2428
+ }
2429
+ end
2430
+
2431
+ if !disks.nil?
2432
+ disk_array = []
2433
+ hid = VIClient::translate_hostname(hostname)
2434
+ connection = VIClient.new(hid)
2435
+ disks.each{|disk|
2436
+ ds_name = disk.elements["DATASTORE"].text
2437
+ img_name = disk.elements["SOURCE"].text
2438
+ type_str = disk.elements["TYPE"].text
2439
+
2440
+ disk_array += attach_disk("", "", ds_name, img_name, 0, type_str, vm, connection)[:deviceChange]
2441
+ }
2442
+
2443
+ device_change += disk_array
2444
+ end
2445
+
2446
+ # Capacity section
2447
+
2448
+ cpu = xml.root.elements["/VM/TEMPLATE/VCPU"] ? xml.root.elements["/VM/TEMPLATE/VCPU"].text : 1
2449
+ memory = xml.root.elements["/VM/TEMPLATE/MEMORY"].text
2450
+ capacity_spec = {:numCPUs => cpu.to_i,
2451
+ :memoryMB => memory }
2452
+
2453
+ # Perform the VM reconfiguration
2454
+ spec_hash = context_vnc_spec.merge(capacity_spec)
2455
+ if device_change.length > 0
2456
+ spec_hash.merge!({ :deviceChange => device_change })
2457
+ end
2458
+
2459
+ spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
2460
+ vm.ReconfigVM_Task(:spec => spec).wait_for_completion
2461
+ end
2462
+
2463
+ ############################################################################
2464
+ # Attach disk to a VM
2465
+ # @params hostname[String] vcenter cluster name in opennebula as host
2466
+ # @params deploy_id[String] deploy id of the vm
2467
+ # @params ds_name[String] name of the datastore
2468
+ # @params img_name[String] path of the image
2469
+ # @params size_kb[String] size in kb of the disk
2470
+ # @params vm[RbVmomi::VIM::VirtualMachine] VM if called from instance
2471
+ # @params connection[ViClient::connectoon] connection if called from instance
2472
+ ############################################################################
2473
+ def self.attach_disk(hostname, deploy_id, ds_name, img_name, type, size_kb, vm=nil, connection=nil)
2474
+ only_return = true
2475
+ if !vm
2476
+ hid = VIClient::translate_hostname(hostname)
2477
+ connection = VIClient.new(hid)
2478
+
2479
+ vm = connection.find_vm_template(deploy_id)
2480
+ only_return = false
2481
+ end
2482
+
2483
+ # Find datastore within datacenter
2484
+ datastores = VIClient.get_entities(connection.dc.datastoreFolder,
2485
+ 'Datastore')
2486
+ ds = datastores.select{|ds| ds.name == ds_name}[0]
2487
+
2488
+ controller, new_number = find_free_controller(vm)
2489
+
2490
+ if type == "CDROM"
2491
+ vmdk_backing = RbVmomi::VIM::VirtualCdromIsoBackingInfo(
2492
+ :datastore => ds,
2493
+ :fileName => "[#{ds_name}] #{img_name}"
2494
+ )
2495
+
2496
+ cd = vm.config.hardware.device.select {|hw|
2497
+ hw.class == RbVmomi::VIM::VirtualCdrom}.first
2498
+ device = RbVmomi::VIM::VirtualCdrom(
2499
+ backing: vmdk_backing,
2500
+ key: cd.key,
2501
+ controllerKey: cd.controllerKey,
2502
+ connectable: RbVmomi::VIM::VirtualDeviceConnectInfo(
2503
+ startConnected: false,
2504
+ connected: false,
2505
+ allowGuestControl: false
2506
+ )
2507
+ )
2508
+ device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
2509
+ :device => device,
2510
+ :operation => RbVmomi::VIM::VirtualDeviceConfigSpecOperation('edit')
2511
+ )
2512
+ else
2513
+ vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
2514
+ :datastore => ds,
2515
+ :diskMode => 'persistent',
2516
+ :fileName => "[#{ds_name}] #{img_name}"
2517
+ )
2518
+
2519
+ device = RbVmomi::VIM::VirtualDisk(
2520
+ :backing => vmdk_backing,
2521
+ :capacityInKB => size_kb,
2522
+ :controllerKey => controller.key,
2523
+ :key => -1,
2524
+ :unitNumber => new_number
2525
+ )
2526
+ device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
2527
+ :device => device,
2528
+ :operation => RbVmomi::VIM::VirtualDeviceConfigSpecOperation('add')
2529
+ )
2530
+ end
2531
+
2532
+ vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
2533
+ :deviceChange => [device_config_spec]
2534
+ )
2535
+
2536
+ return vm_config_spec if only_return
2537
+
2538
+ vm.ReconfigVM_Task(:spec => vm_config_spec).wait_for_completion
2539
+ end
2540
+
2541
+ def self.find_free_controller(vm)
2542
+ free_scsi_controllers = Array.new
2543
+ available_controller = nil
2544
+ scsi_schema = Hash.new
2545
+
2546
+ used_numbers = Array.new
2547
+ available_numbers = Array.new
2548
+
2549
+ vm.config.hardware.device.each{ |dev|
2550
+ if dev.is_a? RbVmomi::VIM::VirtualSCSIController
2551
+ if scsi_schema[dev.controllerKey].nil?
2552
+ scsi_schema[dev.key] = Hash.new
2553
+ scsi_schema[dev.key][:lower] = Array.new
2554
+ end
2555
+ used_numbers << dev.scsiCtlrUnitNumber
2556
+ scsi_schema[dev.key][:device] = dev
2557
+ end
2558
+
2559
+ next if dev.class != RbVmomi::VIM::VirtualDisk
2560
+ used_numbers << dev.unitNumber
2561
+ }
2562
+
2563
+ 15.times{ |scsi_id|
2564
+ available_numbers << scsi_id if used_numbers.grep(scsi_id).length <= 0
2565
+ }
2566
+
2567
+ scsi_schema.keys.each{|controller|
2568
+ if scsi_schema[controller][:lower].length < 15
2569
+ free_scsi_controllers << scsi_schema[controller][:device].deviceInfo.label
2570
+ end
2571
+ }
2572
+
2573
+ if free_scsi_controllers.length > 0
2574
+ available_controller_label = free_scsi_controllers[0]
2575
+ else
2576
+ add_new_scsi(vm, scsi_schema)
2577
+ return find_free_controller(vm)
2578
+ end
2579
+
2580
+ controller = nil
2581
+
2582
+ vm.config.hardware.device.each { |device|
2583
+ (controller = device ; break) if device.deviceInfo.label == available_controller_label
2584
+ }
2585
+
2586
+ new_unit_number = available_numbers.sort[0]
2587
+
2588
+ return controller, new_unit_number
2589
+ end
2590
+
2591
+ def self.add_new_scsi(vm, scsi_schema)
2592
+ controller = nil
2593
+
2594
+ if scsi_schema.keys.length >= 4
2595
+ raise "Cannot add a new controller, maximum is 4."
2596
+ end
2597
+
2598
+ if scsi_schema.keys.length == 0
2599
+ scsi_key = 0
2600
+ scsi_number = 0
2601
+ else scsi_schema.keys.length < 4
2602
+ scsi_key = scsi_schema.keys.sort[-1] + 1
2603
+ scsi_number = scsi_schema[scsi_schema.keys.sort[-1]][:device].busNumber + 1
2604
+ end
2605
+
2606
+ controller_device = RbVmomi::VIM::VirtualLsiLogicController(
2607
+ :key => scsi_key,
2608
+ :busNumber => scsi_number,
2609
+ :sharedBus => :noSharing
2610
+ )
2611
+
2612
+ device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
2613
+ :device => controller_device,
2614
+ :operation => RbVmomi::VIM::VirtualDeviceConfigSpecOperation('add')
2615
+ )
2616
+
2617
+ vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
2618
+ :deviceChange => [device_config_spec]
2619
+ )
2620
+
2621
+ vm.ReconfigVM_Task(:spec => vm_config_spec).wait_for_completion
2622
+
2623
+ vm.config.hardware.device.each { |device|
2624
+ if device.class == RbVmomi::VIM::VirtualLsiLogicController &&
2625
+ device.key == scsi_key
2626
+ controller = device.deviceInfo.label
2627
+ end
2628
+ }
2629
+
2630
+ return controller
2631
+ end
2632
+
2633
+ ############################################################################
2634
+ # Detach a specific disk from a VM
2635
+ # Attach disk to a VM
2636
+ # @params hostname[String] vcenter cluster name in opennebula as host
2637
+ # @params deploy_id[String] deploy id of the vm
2638
+ # @params ds_name[String] name of the datastore
2639
+ # @params img_path[String] path of the image
2640
+ ############################################################################
2641
+ def self.detach_disk(hostname, deploy_id, ds_name, img_path)
2642
+ hid = VIClient::translate_hostname(hostname)
2643
+ connection = VIClient.new(hid)
2644
+
2645
+ vm = connection.find_vm_template(deploy_id)
2646
+
2647
+ ds_and_img_name = "[#{ds_name}] #{img_path}"
2648
+
2649
+ disk = vm.config.hardware.device.select { |d| is_disk?(d) &&
2650
+ d.backing.fileName == ds_and_img_name }
2651
+
2652
+ raise "Disk #{img_path} not found." if disk.nil?
2653
+
2654
+ spec = { :deviceChange => [{
2655
+ :operation => :remove,
2656
+ :device => disk[0]
2657
+ }]}
2658
+
2659
+ vm.ReconfigVM_Task(:spec => spec).wait_for_completion
2660
+ end
2661
+
2662
+ ############################################################################
2663
+ # Detach all disks from a VM
2664
+ # @params vm[VCenterVm] vCenter VM
2665
+ ############################################################################
2666
+ def self.detach_all_disks(vm)
2667
+ disks = vm.config.hardware.device.select { |d| is_disk?(d) }
2668
+
2669
+ return if disks.nil?
2670
+
2671
+ spec = { :deviceChange => [] }
2672
+
2673
+ disks.each{|disk|
2674
+ spec[:deviceChange] << {
2675
+ :operation => :remove,
2676
+ :device => disk
2677
+ }
2678
+ }
2679
+
2680
+ vm.ReconfigVM_Task(:spec => spec).wait_for_completion
2681
+ end
2682
+
2683
+ def self.create_context(context)
2684
+ # Remove <CONTEXT> (9) and </CONTEXT>\n (11)
2685
+ context_text = "# Context variables generated by OpenNebula\n"
2686
+ context.elements.each{|context_element|
2687
+ next if !context_element.text
2688
+ context_text += context_element.name + "='" +
2689
+ context_element.text.gsub("'", "\\'") + "'\n"
2690
+ }
2691
+ context_text
2692
+ end
2693
+
2694
+ ############################################################################
2695
+ # Detach attached disks from a VM
2696
+ ############################################################################
2697
+ def self.detach_attached_disks(vm, disks, hostname)
2698
+ hid = VIClient::translate_hostname(hostname)
2699
+ connection = VIClient.new(hid)
2700
+
2701
+ spec = { :deviceChange => [] }
2702
+
2703
+ disks.each{ |disk|
2704
+ ds_and_img_name = "[#{disk['DATASTORE']}] #{disk['SOURCE']}"
2705
+ vcenter_disk = vm.config.hardware.device.select { |d| is_disk?(d) &&
2706
+ d.backing.fileName == ds_and_img_name }[0]
2707
+ spec[:deviceChange] << {
2708
+ :operation => :remove,
2709
+ :device => vcenter_disk
2710
+ }
2711
+ }
2712
+
2713
+ vm.ReconfigVM_Task(:spec => spec).wait_for_completion
2714
+ end
2715
+ end
2716
+ end