opennebula 5.1.80.beta1 → 5.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: bce5ec2f4c8ec1633421f1998e9a659a3b712fe2
4
- data.tar.gz: 75718ab515a32acc13a5f931836a7a77a02c6ce4
3
+ metadata.gz: 40bbc901923acb487fd5b753dda85f8677985cdb
4
+ data.tar.gz: dd3421af744f7a1e25f6c940507297e80b595fe6
5
5
  SHA512:
6
- metadata.gz: c69828852d920460e330c553ce20fd3f71e9450d5702576e4e800bbf12fbde0d273b8d7d03e4dd62c34971ba0c131d6e3fea21afc550de6a71794796f00d97d7
7
- data.tar.gz: 23be93fa25689ce1d05d920a420cd3f766a47594a7f60d152539b3f013b953444357f8ebd01cdb8c6f0bbaa79c1186ad9beec04cea773c2ca4ee9b00b5c4a495
6
+ metadata.gz: 2db7146a955358e97698af1f8207623da99d1c4a9b8204380d3483bd9ffacc962b6d4c0d5d5d55dc5a0a49b2602ee2a1a05e021203a02285653aef50c1eaef51
7
+ data.tar.gz: b0818115d8aebc25f332cdf4585d2bbbd1d9e50f742122442d5aef6d4a61ff9d5211943600a53479e0bbc75e836db1147fd7a4b964286a297658b8ea38e4a077
@@ -50,12 +50,17 @@ end
50
50
  module CloudClient
51
51
 
52
52
  # OpenNebula version
53
- VERSION = '5.1.80'
53
+ VERSION = '5.2.0'
54
54
 
55
55
  # #########################################################################
56
56
  # Default location for the authentication file
57
57
  # #########################################################################
58
- DEFAULT_AUTH_FILE = ENV["HOME"]+"/.one/one_auth"
58
+
59
+ if ENV["HOME"]
60
+ DEFAULT_AUTH_FILE = ENV["HOME"]+"/.one/one_auth"
61
+ else
62
+ DEFAULT_AUTH_FILE = "/var/lib/one/.one/one_auth"
63
+ end
59
64
 
60
65
  # #########################################################################
61
66
  # Gets authorization credentials from ONE_AUTH or default
@@ -63,6 +68,7 @@ module CloudClient
63
68
  #
64
69
  # Raises an error if authorization is not found
65
70
  # #########################################################################
71
+
66
72
  def self.get_one_auth
67
73
  if ENV["ONE_AUTH"] and !ENV["ONE_AUTH"].empty? and
68
74
  File.file?(ENV["ONE_AUTH"])
data/lib/opennebula.rb CHANGED
@@ -66,5 +66,5 @@ require 'opennebula/marketplaceapp_pool'
66
66
  module OpenNebula
67
67
 
68
68
  # OpenNebula version
69
- VERSION = '5.1.80'
69
+ VERSION = '5.2.0'
70
70
  end
@@ -331,9 +331,9 @@ module OpenNebula
331
331
  @client)
332
332
 
333
333
  if udriver
334
- rc = group_admin.allocate(uadmin, upasswd, udriver)
334
+ rc = group_admin.allocate(uadmin, upasswd, udriver, [self.id])
335
335
  else
336
- rc = group_admin.allocate(uadmin, upasswd)
336
+ rc = group_admin.allocate(uadmin, upasswd, nil, [self.id])
337
337
  end
338
338
 
339
339
  if OpenNebula.is_error?(rc)
@@ -341,14 +341,6 @@ module OpenNebula
341
341
  end
342
342
  end
343
343
 
344
- # Set admin user groups to self
345
- rc = group_admin.chgrp(self.id)
346
-
347
- if OpenNebula.is_error?(rc)
348
- group_admin.delete
349
- return rc
350
- end
351
-
352
344
  rc = self.add_admin(group_admin.id)
353
345
 
354
346
  if OpenNebula.is_error?(rc)
@@ -385,4 +385,4 @@ module Service
385
385
  res
386
386
  end
387
387
  end
388
- end
388
+ end
@@ -62,19 +62,23 @@ module OpenNebula
62
62
  alias_method :info!, :info
63
63
 
64
64
  def info_all(xml_method, *args)
65
- return xmlrpc_info(xml_method,INFO_ALL,-1,-1, *args)
65
+ return xmlrpc_info(xml_method, INFO_ALL, -1, -1, *args)
66
66
  end
67
67
 
68
68
  def info_mine(xml_method, *args)
69
- return xmlrpc_info(xml_method,INFO_MINE,-1,-1, *args)
69
+ return xmlrpc_info(xml_method, INFO_MINE, -1, -1, *args)
70
70
  end
71
71
 
72
72
  def info_group(xml_method, *args)
73
- return xmlrpc_info(xml_method,INFO_GROUP,-1,-1, *args)
73
+ return xmlrpc_info(xml_method, INFO_GROUP, -1, -1, *args)
74
+ end
75
+
76
+ def info_primary_group(xml_method, *args)
77
+ return xmlrpc_info(xml_method, INFO_PRIMARY_GROUP, -1, -1, *args)
74
78
  end
75
79
 
76
80
  def info_filter(xml_method, who, start_id, end_id, *args)
77
- return xmlrpc_info(xml_method,who, start_id, end_id, *args)
81
+ return xmlrpc_info(xml_method, who, start_id, end_id, *args)
78
82
  end
79
83
 
80
84
  # Retrieves the monitoring data for all the Objects in the pool
@@ -144,6 +148,7 @@ module OpenNebula
144
148
  INFO_GROUP = -1
145
149
  INFO_ALL = -2
146
150
  INFO_MINE = -3
151
+ INFO_PRIMARY_GROUP = -4
147
152
 
148
153
  # Iterates over every PoolElement in the Pool and calls the block with a
149
154
  # a PoolElement obtained calling the factory method
@@ -98,8 +98,17 @@ module OpenNebula
98
98
  # +username+ Name of the new user.
99
99
  #
100
100
  # +password+ Password for the new user
101
- def allocate(username, password, driver=CORE_AUTH)
102
- super(USER_METHODS[:allocate], username, password, driver)
101
+ # @param username Username for the new user.
102
+ # @param password Password for the new user
103
+ # @param driver Auth driver for the new user.
104
+ # @param gids Group IDs. The first ID will be used as the main group.
105
+ # This array can be empty, in which case the default group will be used.
106
+ #
107
+ # @return [nil, OpenNebula::Error] nil in case of success, Error
108
+ # otherwise
109
+ def allocate(username, password, driver=nil, gids=[])
110
+ driver = CORE_AUTH if driver.nil?
111
+ super(USER_METHODS[:allocate], username, password, driver, gids)
103
112
  end
104
113
 
105
114
  # Replaces the template contents
@@ -144,6 +144,7 @@ class VIClient
144
144
  v.propSet.each{ |dynprop|
145
145
  obj[dynprop.name] = dynprop.val
146
146
  }
147
+ obj[:ref] = k._ref
147
148
  objects << OpenStruct.new(obj)
148
149
  end
149
150
  }
@@ -151,7 +152,7 @@ class VIClient
151
152
  end
152
153
 
153
154
  ############################################################################
154
- # Initializr the VIClient, and creates an OpenNebula client. The parameters
155
+ # Initialize the VIClient, and creates an OpenNebula client. The parameters
155
156
  # are obtained from the associated OpenNebula host
156
157
  # @param hid [Integer] The OpenNebula host id with VCenter attributes
157
158
  ############################################################################
@@ -301,6 +302,37 @@ class VIClient
301
302
  baseEntity
302
303
  end
303
304
 
305
+ ########################################################################
306
+ # Searches the associated vmFolder of the DataCenter for the current
307
+ # connection. Returns a RbVmomi::VIM::VirtualMachine or nil if not found
308
+ #
309
+ # Searches by moref, name, uuid and then iterates over all VMs
310
+ #
311
+ # @param uuid [String] the UUID of the VM or VM Template
312
+ # @param ref [String] VMware moref
313
+ # @param name [String] VM name in vCenter
314
+ ########################################################################
315
+ def find_vm_fast(uuid, ref = nil, name = nil)
316
+ if ref
317
+ # It can raise ManagedObjectNotFound
318
+ begin
319
+ vm = RbVmomi::VIM::VirtualMachine.new(@dc._connection, ref)
320
+ return vm if vm.config && vm.config.uuid == uuid
321
+ rescue => e
322
+ end
323
+ end
324
+
325
+ if name
326
+ begin
327
+ vm = @dc.vmFolder.find(name)
328
+ return vm if vm.config && vm.config.uuid == uuid
329
+ rescue
330
+ end
331
+ end
332
+
333
+ return find_vm_template(uuid)
334
+ end
335
+
304
336
  ########################################################################
305
337
  # Searches the associated vmFolder of the DataCenter for the current
306
338
  # connection. Returns a RbVmomi::VIM::VirtualMachine or nil if not found
@@ -310,11 +342,7 @@ class VIClient
310
342
  version = @vim.serviceContent.about.version
311
343
 
312
344
  found_vm = nil
313
-
314
- if version.split(".").first.to_i >= 6
315
- found_vm = @dc.vmFolder.findByUuid(uuid, RbVmomi::VIM::VirtualMachine, @dc)
316
- end
317
-
345
+ found_vm = @dc.vmFolder.findByUuid(uuid, RbVmomi::VIM::VirtualMachine, @dc)
318
346
  return found_vm if found_vm
319
347
 
320
348
  vms = VIClient.get_entities(@dc.vmFolder, 'VirtualMachine')
@@ -352,6 +380,15 @@ class VIClient
352
380
  ########################################################################
353
381
  def get_datastore(ds_name)
354
382
  datastores = VIClient.get_entities(@dc.datastoreFolder, 'Datastore')
383
+
384
+ storage_pods = VIClient.get_entities(@dc.datastoreFolder, 'StoragePod')
385
+ storage_pods.each { |sp|
386
+ storage_pod_datastores = VIClient.get_entities(sp, 'Datastore')
387
+ if not storage_pod_datastores.empty?
388
+ datastores.concat(storage_pod_datastores)
389
+ end
390
+ }
391
+
355
392
  ds = datastores.select{|ds| ds.name == ds_name}[0]
356
393
  end
357
394
 
@@ -430,13 +467,15 @@ class VIClient
430
467
  ds = ds_cache[t.datastore[0].to_s]
431
468
 
432
469
  one_tmp << {
433
- :name => "#{vi_tmp.vm.name} - #{host.cluster_name}",
434
- :uuid => vi_tmp.vm.config.uuid,
435
- :host => host.cluster_name,
436
- :one => vi_tmp.to_one(host),
437
- :ds => vi_tmp.to_one_ds(host, ds.name),
438
- :default_ds => ds.name,
439
- :rp => vi_tmp.to_one_rp(host)
470
+ :name => "#{vi_tmp.vm.name} - #{host.cluster_name}",
471
+ :uuid => vi_tmp.vm.config.uuid,
472
+ :host => host.cluster_name,
473
+ :one => vi_tmp.to_one(host),
474
+ :ds => vi_tmp.to_one_ds(host, ds.name),
475
+ :default_ds => ds.name,
476
+ :rp => vi_tmp.to_one_rp(host),
477
+ :vcenter_ref => vi_tmp.vm._ref,
478
+ :vcenter_name => vi_tmp.vm.name
440
479
  }
441
480
  end
442
481
  }
@@ -513,7 +552,6 @@ class VIClient
513
552
  "VN_MAD = \"dummy\"\n" \
514
553
  "VCENTER_TYPE = \"Distributed Port Group\""
515
554
 
516
-
517
555
  default_pc = n.config.defaultPortConfig
518
556
 
519
557
  has_vlan = false
@@ -540,7 +578,7 @@ class VIClient
540
578
  end
541
579
 
542
580
  if !vlan_str.empty?
543
- vnet_template << "VLAN_ID=#{vlan_str}\n"
581
+ vnet_template << "VLAN_TAGGED_ID=#{vlan_str}\n"
544
582
  end
545
583
 
546
584
  one_net = {:name => net_name,
@@ -591,6 +629,15 @@ class VIClient
591
629
  datacenters.each { |dc|
592
630
  one_tmp = []
593
631
  datastores = VIClient.get_entities(dc.datastoreFolder, 'Datastore')
632
+
633
+ storage_pods = VIClient.get_entities(dc.datastoreFolder, 'StoragePod')
634
+ storage_pods.each { |sp|
635
+ storage_pod_datastores = VIClient.get_entities(sp, 'Datastore')
636
+ if not storage_pod_datastores.empty?
637
+ datastores.concat(storage_pod_datastores)
638
+ end
639
+ }
640
+
594
641
  datastores.each { |ds|
595
642
  next if !ds.is_a? RbVmomi::VIM::Datastore
596
643
  # Find the Cluster from which to access this ds
@@ -654,6 +701,15 @@ class VIClient
654
701
 
655
702
  # Find datastore within datacenter
656
703
  datastores = VIClient.get_entities(dc.datastoreFolder, 'Datastore')
704
+
705
+ storage_pods = VIClient.get_entities(dc.datastoreFolder, 'StoragePod')
706
+ storage_pods.each { |sp|
707
+ storage_pod_datastores = VIClient.get_entities(sp, 'Datastore')
708
+ if not storage_pod_datastores.empty?
709
+ datastores.concat(storage_pod_datastores)
710
+ end
711
+ }
712
+
657
713
  ds = datastores.select{|ds| ds.name == ds_name}[0]
658
714
  next if !ds
659
715
 
@@ -981,10 +1037,20 @@ class VCenterCachedHost
981
1037
  while !datacenter.is_a? RbVmomi::VIM::Datacenter
982
1038
  datacenter = datacenter.parent
983
1039
  end
984
-
1040
+
985
1041
  datastores=VIClient.get_entities(
986
1042
  datacenter.datastoreFolder,
987
1043
  'Datastore')
1044
+
1045
+ storage_pods = VIClient.get_entities(datacenter.datastoreFolder,
1046
+ 'StoragePod')
1047
+ storage_pods.each { |sp|
1048
+ storage_pod_datastores = VIClient.get_entities(sp, 'Datastore')
1049
+ if not storage_pod_datastores.empty?
1050
+ datastores.concat(storage_pod_datastores)
1051
+ end
1052
+ }
1053
+
988
1054
  datastores.each { |ds|
989
1055
  @attributes['ds_list'] += ds.name + ","
990
1056
  }
@@ -1290,7 +1356,7 @@ class VCenterHost < ::OpenNebula::Host
1290
1356
  val[:key]=="opennebula.vm.running"}
1291
1357
  if running_flag.size > 0 and running_flag[0]
1292
1358
  running_flag = running_flag[0][:value]
1293
- end
1359
+ end
1294
1360
 
1295
1361
  next if running_flag == "no"
1296
1362
 
@@ -1345,6 +1411,17 @@ class VCenterHost < ::OpenNebula::Host
1345
1411
 
1346
1412
  datastores = VIClient.get_entities(client.dc.datastoreFolder,
1347
1413
  'Datastore')
1414
+
1415
+ storage_pods = VIClient.get_entities(client.dc.datastoreFolder,
1416
+ 'StoragePod')
1417
+
1418
+ storage_pods.each { |sp|
1419
+ storage_pod_datastores = VIClient.get_entities(sp, 'Datastore')
1420
+ if not storage_pod_datastores.empty?
1421
+ datastores.concat(storage_pod_datastores)
1422
+ end
1423
+ }
1424
+
1348
1425
  datastores.each { |ds|
1349
1426
  str_info += "VCENTER_DATASTORE=\"#{ds.name}\"\n"
1350
1427
  }
@@ -1383,13 +1460,16 @@ class VCenterVm
1383
1460
  # Deploys a VM
1384
1461
  # @xml_text XML representation of the VM
1385
1462
  ############################################################################
1386
- def self.deploy(xml_text, lcm_state, deploy_id, hostname, datastore = nil)
1463
+ def self.deploy(xml_text, lcm_state, deploy_id, hostname, datastore = nil,
1464
+ ops = {})
1387
1465
  if lcm_state == "BOOT" || lcm_state == "BOOT_FAILURE"
1388
- return clone_vm(xml_text, hostname, datastore)
1466
+ return clone_vm(xml_text, hostname, datastore, ops)
1389
1467
  else
1390
1468
  hid = VIClient::translate_hostname(hostname)
1391
1469
  connection = VIClient.new(hid)
1392
- vm = connection.find_vm_template(deploy_id)
1470
+ vm = connection.find_vm_fast(deploy_id,
1471
+ ops[:ref],
1472
+ ops[:name])
1393
1473
  xml = REXML::Document.new xml_text
1394
1474
 
1395
1475
  reconfigure_vm(vm, xml, false, hostname)
@@ -1428,8 +1508,8 @@ class VCenterVm
1428
1508
  detach_attached_disks(vm, disks, hostname) if disks
1429
1509
  end
1430
1510
 
1431
- # If the VM was instantiated to persistent, convert the VM to
1432
- # vCenter VM Template and update the OpenNebula new
1511
+ # If the VM was instantiated to persistent, convert the VM to
1512
+ # vCenter VM Template and update the OpenNebula new
1433
1513
  # VM Template to point to the new vCenter VM Template
1434
1514
  if !to_template.nil?
1435
1515
  vm.MarkAsTemplate
@@ -1537,18 +1617,28 @@ class VCenterVm
1537
1617
  case lcm_state
1538
1618
  when "SHUTDOWN"
1539
1619
  begin
1540
- vm.ShutdownGuest.wait_for_completion
1620
+ vm.ShutdownGuest
1621
+ counter = 60*10 # 10 minutes
1622
+ while counter > 0
1623
+ break if vm.runtime.powerState == "poweredOff"
1624
+ counter -= 1
1625
+ sleep 1
1626
+ end
1541
1627
  rescue
1542
1628
  end
1543
- vm.PowerOffVM_Task.wait_for_completion
1629
+
1630
+ if vm.runtime.powerState != "poweredOff"
1631
+ vm.PowerOffVM_Task.wait_for_completion
1632
+ end
1633
+
1544
1634
  if keep_disks
1545
1635
  detach_all_disks(vm)
1546
1636
  else
1547
1637
  detach_attached_disks(vm, disks, hostname) if disks
1548
1638
  end
1549
1639
 
1550
- # If the VM was instantiated to persistent, convert the VM to
1551
- # vCenter VM Template and update the OpenNebula new
1640
+ # If the VM was instantiated to persistent, convert the VM to
1641
+ # vCenter VM Template and update the OpenNebula new
1552
1642
  # VM Template to point to the new vCenter VM Template
1553
1643
  if !to_template.nil?
1554
1644
  vm.MarkAsTemplate
@@ -1576,10 +1666,19 @@ class VCenterVm
1576
1666
 
1577
1667
  when "SHUTDOWN_POWEROFF", "SHUTDOWN_UNDEPLOY"
1578
1668
  begin
1579
- vm.ShutdownGuest.wait_for_completion
1669
+ vm.ShutdownGuest
1670
+ counter = 60*10 # 10 minutes
1671
+ while counter > 0
1672
+ break if vm.runtime.powerState == "poweredOff"
1673
+ counter -= 1
1674
+ sleep 1
1675
+ end
1580
1676
  rescue
1581
1677
  end
1582
- vm.PowerOffVM_Task.wait_for_completion
1678
+
1679
+ if vm.runtime.powerState != "poweredOff"
1680
+ vm.PowerOffVM_Task.wait_for_completion
1681
+ end
1583
1682
  end
1584
1683
  end
1585
1684
 
@@ -1793,6 +1892,73 @@ class VCenterVm
1793
1892
  end if @vm.guest.net
1794
1893
 
1795
1894
  @guest_ip_addresses = guest_ip_addresses.join(',')
1895
+
1896
+ # Network metrics - Realtime retrieved by perfManager
1897
+ pm = @client.vim.serviceInstance.content.perfManager
1898
+
1899
+ provider = pm.provider_summary [@vm].first
1900
+ refresh_rate = provider.refreshRate
1901
+
1902
+ vmid = -1
1903
+ extraconfig_vmid = @vm.config.extraConfig.select{|val|
1904
+ val[:key]=="opennebula.vm.id"}
1905
+ if extraconfig_vmid.size > 0 and extraconfig_vmid[0]
1906
+ vmid = extraconfig_vmid[0][:value].to_i
1907
+ end
1908
+
1909
+ if vmid < 0
1910
+ @nettx = 0
1911
+ @netrx = 0
1912
+ id_not_found = "Could not retrieve VM ID from extra configuration for "\
1913
+ "vCenter's VM UUID #{@vm.config.uuid}"
1914
+ else
1915
+ one_vm = OpenNebula::VirtualMachine.new_with_id(vmid, OpenNebula::Client.new)
1916
+ one_vm.info
1917
+ stats = []
1918
+
1919
+ if(one_vm["LAST_POLL"] && one_vm["LAST_POLL"].to_i != 0 )
1920
+ #Real time data stores max 1 hour. 1 minute has 3 samples
1921
+ interval = (Time.now.to_i - one_vm["LAST_POLL"].to_i)
1922
+
1923
+ #If last poll was more than hour ago get 3 minutes,
1924
+ #else calculate how many samples since last poll
1925
+ samples = interval > 3600 ? 9 : interval / refresh_rate
1926
+ max_samples = samples > 0 ? samples : 1
1927
+
1928
+ stats = pm.retrieve_stats(
1929
+ [@vm],
1930
+ ['net.transmitted','net.bytesRx','net.bytesTx','net.received'],
1931
+ {interval:refresh_rate, max_samples: max_samples}
1932
+ )
1933
+ else
1934
+ # First poll, get at least latest 3 minutes = 9 samples
1935
+ stats = pm.retrieve_stats(
1936
+ [@vm],
1937
+ ['net.transmitted','net.bytesRx'],
1938
+ {interval:refresh_rate, max_samples: 9}
1939
+ )
1940
+ end
1941
+
1942
+ if stats.empty? || stats.first[1][:metrics].empty?
1943
+ @nettx = 0
1944
+ @netrx = 0
1945
+ else
1946
+ metrics = stats.first[1][:metrics]
1947
+
1948
+ nettx_kbpersec = 0
1949
+ metrics['net.transmitted'].each { |sample|
1950
+ nettx_kbpersec += sample
1951
+ }
1952
+
1953
+ netrx_kbpersec = 0
1954
+ metrics['net.bytesRx'].each { |sample|
1955
+ netrx_kbpersec += sample
1956
+ }
1957
+
1958
+ @nettx = (nettx_kbpersec * 1024 * refresh_rate).to_i
1959
+ @netrx = (netrx_kbpersec * 1024 * refresh_rate).to_i
1960
+ end
1961
+ end
1796
1962
  end
1797
1963
 
1798
1964
  ########################################################################
@@ -1835,6 +2001,8 @@ class VCenterVm
1835
2001
  "PUBLIC_CLOUD = [\n"\
1836
2002
  " TYPE =\"vcenter\",\n"\
1837
2003
  " VM_TEMPLATE =\"#{@vm.config.uuid}\",\n"\
2004
+ " VCENTER_REF =\"#{@vm.ref}\",\n"\
2005
+ " VCENTER_NAME=\"#{@vm.name}\",\n"\
1838
2006
  " HOST =\"#{cluster_name}\"\n"\
1839
2007
  "]\n"\
1840
2008
  "GRAPHICS = [\n"\
@@ -2015,7 +2183,7 @@ private
2015
2183
  ########################################################################
2016
2184
  # Returns the spec to reconfig a VM and add a NIC
2017
2185
  ########################################################################
2018
- def self.calculate_addnic_spec(vm, mac, bridge, model)
2186
+ def self.calculate_addnic_spec(vm, mac, bridge, model, limit=nil, rsrv=nil)
2019
2187
  model = model.nil? ? nil : model.downcase
2020
2188
  network = vm.runtime.host.network.select{|n| n.name==bridge}
2021
2189
  backing = nil
@@ -2065,24 +2233,40 @@ private
2065
2233
  :port => port)
2066
2234
  end
2067
2235
 
2068
- return {:operation => :add,
2069
- :device => nic_card.new(
2070
- :key => 0,
2071
- :deviceInfo => {
2072
- :label => "net" + card_num.to_s,
2073
- :summary => bridge
2074
- },
2075
- :backing => backing,
2076
- :addressType => mac ? 'manual' : 'generated',
2077
- :macAddress => mac
2078
- )
2236
+ card_spec = {
2237
+ :key => 0,
2238
+ :deviceInfo => {
2239
+ :label => "net" + card_num.to_s,
2240
+ :summary => bridge
2241
+ },
2242
+ :backing => backing,
2243
+ :addressType => mac ? 'manual' : 'generated',
2244
+ :macAddress => mac
2245
+ }
2246
+
2247
+ if (limit or rsrv) and (limit > 0)
2248
+ ra_spec = Hash.new
2249
+ rsrv = limit if rsrv > limit
2250
+ ra_spec[:limit] = limit if limit
2251
+ ra_spec[:reservation] = rsrv if rsrv
2252
+ ra_spec[:share] = RbVmomi::VIM.SharesInfo({
2253
+ :level => RbVmomi::VIM.SharesLevel("normal"),
2254
+ :shares => 0
2255
+ })
2256
+ card_spec[:resourceAllocation] =
2257
+ RbVmomi::VIM.VirtualEthernetCardResourceAllocation(ra_spec)
2258
+ end
2259
+
2260
+ return {
2261
+ :operation => :add,
2262
+ :device => nic_card.new(card_spec)
2079
2263
  }
2080
2264
  end
2081
2265
 
2082
2266
  ########################################################################
2083
2267
  # Clone a vCenter VM Template and leaves it powered on
2084
2268
  ########################################################################
2085
- def self.clone_vm(xml_text, hostname, datastore)
2269
+ def self.clone_vm(xml_text, hostname, datastore, ops = {})
2086
2270
 
2087
2271
  host_id = VCenterDriver::VIClient.translate_hostname(hostname)
2088
2272
 
@@ -2144,7 +2328,7 @@ private
2144
2328
  raise "Cannot find host id in deployment file history." if hid.nil?
2145
2329
 
2146
2330
  connection = VIClient.new(hid)
2147
- vc_template = connection.find_vm_template(uuid)
2331
+ vc_template = connection.find_vm_fast(uuid, ops[:ref], ops[:name])
2148
2332
 
2149
2333
  # Find out requested and available resource pool
2150
2334
 
@@ -2179,6 +2363,16 @@ private
2179
2363
  if datastore
2180
2364
  datastores = VIClient.get_entities(connection.dc.datastoreFolder,
2181
2365
  'Datastore')
2366
+
2367
+ storage_pods = VIClient.get_entities(connection.dc.datastoreFolder,
2368
+ 'StoragePod')
2369
+ storage_pods.each { |sp|
2370
+ storage_pod_datastores = VIClient.get_entities(sp, 'Datastore')
2371
+ if not storage_pod_datastores.empty?
2372
+ datastores.concat(storage_pod_datastores)
2373
+ end
2374
+ }
2375
+
2182
2376
  ds = datastores.select{|ds| ds.name == datastore}[0]
2183
2377
  raise "Cannot find datastore #{datastore}" if !ds
2184
2378
  end
@@ -2410,7 +2604,24 @@ private
2410
2604
  mac = nic.elements["MAC"].text
2411
2605
  bridge = nic.elements["BRIDGE"].text
2412
2606
  model = nic.elements["MODEL"] ? nic.elements["MODEL"].text : nil
2413
- nic_array << calculate_addnic_spec(vm, mac, bridge, model)
2607
+ limit_in = nic.elements["INBOUND_PEAK_BW"] ? nic.elements["INBOUND_PEAK_BW"].text : ""
2608
+ limit_out = nic.elements["OUTBOUND_PEAK_BW"] ? nic.elements["OUTBOUND_PEAK_BW"].text : ""
2609
+ limit = nil
2610
+ if !limit_in.empty? or !limit_out.empty?
2611
+ limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8
2612
+ end
2613
+ rsrv_in = nic.elements["INBOUND_AVG_BW"] ? nic.elements["INBOUND_AVG_BW"].text : ""
2614
+ rsrv_out = nic.elements["OUTBOUND_AVG_BW"] ? nic.elements["OUTBOUND_AVG_BW"].text : ""
2615
+ rsrv = nil
2616
+ if !rsrv_in.empty? or !rsrv_out.empty?
2617
+ rsrv=([rsrv_in.to_i, rsrv_out.to_i].min / 1024) * 8
2618
+ end
2619
+ nic_array << calculate_addnic_spec(vm,
2620
+ mac,
2621
+ bridge,
2622
+ model,
2623
+ limit,
2624
+ rsrv)
2414
2625
  }
2415
2626
 
2416
2627
  device_change += nic_array
@@ -2421,7 +2632,7 @@ private
2421
2632
  disks = xml.root.get_elements("/VM/TEMPLATE/DISK")
2422
2633
  disk_spec = {}
2423
2634
 
2424
- # If the VM is not new, avoid readding DISKS
2635
+ # If the VM is not new, avoid reading DISKS
2425
2636
  if !newvm
2426
2637
  vm.config.hardware.device.select { |d|
2427
2638
  if is_disk?(d)
@@ -2490,6 +2701,16 @@ private
2490
2701
  # Find datastore within datacenter
2491
2702
  datastores = VIClient.get_entities(connection.dc.datastoreFolder,
2492
2703
  'Datastore')
2704
+
2705
+ storage_pods = VIClient.get_entities(connection.dc.datastoreFolder,
2706
+ 'StoragePod')
2707
+ storage_pods.each { |sp|
2708
+ storage_pod_datastores = VIClient.get_entities(sp, 'Datastore')
2709
+ if not storage_pod_datastores.empty?
2710
+ datastores.concat(storage_pod_datastores)
2711
+ end
2712
+ }
2713
+
2493
2714
  ds = datastores.select{|ds| ds.name == ds_name}[0]
2494
2715
 
2495
2716
  controller, new_number = find_free_controller(vm)
@@ -2500,22 +2721,48 @@ private
2500
2721
  :fileName => "[#{ds_name}] #{img_name}"
2501
2722
  )
2502
2723
 
2503
- cd = vm.config.hardware.device.select {|hw|
2724
+ cd = vm.config.hardware.device.select {|hw|
2504
2725
  hw.class == RbVmomi::VIM::VirtualCdrom}.first
2505
- device = RbVmomi::VIM::VirtualCdrom(
2506
- backing: vmdk_backing,
2507
- key: cd.key,
2508
- controllerKey: cd.controllerKey,
2509
- connectable: RbVmomi::VIM::VirtualDeviceConnectInfo(
2510
- startConnected: false,
2511
- connected: false,
2512
- allowGuestControl: false
2513
- )
2514
- )
2515
- device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
2516
- :device => device,
2517
- :operation => RbVmomi::VIM::VirtualDeviceConfigSpecOperation('edit')
2518
- )
2726
+
2727
+ # If no CDROM drive present, we need to add it
2728
+ if !cd
2729
+ controller, new_unit_number = find_free_controller(vm)
2730
+ cdrom_drive_spec = RbVmomi::VIM.VirtualMachineConfigSpec(
2731
+ :deviceChange => [{
2732
+ :operation => :add,
2733
+ :device => RbVmomi::VIM::VirtualCdrom(
2734
+ :backing => vmdk_backing,
2735
+ :key => -1,
2736
+ :controllerKey => 15000,
2737
+ :unitNumber => 0,
2738
+ :connectable => RbVmomi::VIM::VirtualDeviceConnectInfo(
2739
+ :startConnected => true,
2740
+ :connected => true,
2741
+ :allowGuestControl => true
2742
+ )
2743
+ )}]
2744
+ )
2745
+
2746
+ vm.ReconfigVM_Task(:spec =>
2747
+ cdrom_drive_spec).wait_for_completion
2748
+
2749
+ return
2750
+ else
2751
+ device = RbVmomi::VIM::VirtualCdrom(
2752
+ backing: vmdk_backing,
2753
+ key: cd.key,
2754
+ controllerKey: cd.controllerKey,
2755
+ connectable: RbVmomi::VIM::VirtualDeviceConnectInfo(
2756
+ startConnected: true,
2757
+ connected: true,
2758
+ allowGuestControl: true
2759
+ )
2760
+ )
2761
+ device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
2762
+ :device => device,
2763
+ :operation => RbVmomi::VIM::VirtualDeviceConfigSpecOperation('edit')
2764
+ )
2765
+ end
2519
2766
  else
2520
2767
  vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
2521
2768
  :datastore => ds,
@@ -2639,7 +2886,6 @@ private
2639
2886
 
2640
2887
  ############################################################################
2641
2888
  # Detach a specific disk from a VM
2642
- # Attach disk to a VM
2643
2889
  # @params hostname[String] vcenter cluster name in opennebula as host
2644
2890
  # @params deploy_id[String] deploy id of the vm
2645
2891
  # @params ds_name[String] name of the datastore
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: opennebula
3
3
  version: !ruby/object:Gem::Version
4
- version: 5.1.80.beta1
4
+ version: 5.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - OpenNebula
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-09-16 00:00:00.000000000 Z
11
+ date: 2016-10-18 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: nokogiri
@@ -134,9 +134,9 @@ required_ruby_version: !ruby/object:Gem::Requirement
134
134
  version: '0'
135
135
  required_rubygems_version: !ruby/object:Gem::Requirement
136
136
  requirements:
137
- - - ">"
137
+ - - ">="
138
138
  - !ruby/object:Gem::Version
139
- version: 1.3.1
139
+ version: '0'
140
140
  requirements: []
141
141
  rubyforge_project:
142
142
  rubygems_version: 2.5.1