opennebula 5.12.6 → 5.13.90.pre

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (126) hide show
  1. checksums.yaml +4 -4
  2. data/lib/ActionManager.rb +1 -1
  3. data/lib/CommandManager.rb +1 -1
  4. data/lib/DriverExecHelper.rb +44 -28
  5. data/lib/OpenNebulaDriver.rb +8 -4
  6. data/lib/VirtualMachineDriver.rb +9 -2
  7. data/lib/cloud/CloudClient.rb +3 -3
  8. data/lib/datacenter.rb +1259 -0
  9. data/lib/datastore.rb +1025 -0
  10. data/lib/distributed_firewall.rb +280 -0
  11. data/lib/file_helper.rb +370 -0
  12. data/lib/host.rb +1517 -0
  13. data/lib/logical_port.rb +50 -0
  14. data/lib/logical_switch.rb +77 -0
  15. data/lib/memoize.rb +74 -0
  16. data/lib/models.rb +32 -0
  17. data/lib/models/role.rb +1126 -0
  18. data/lib/models/service.rb +709 -0
  19. data/lib/network.rb +708 -0
  20. data/lib/nsx_client.rb +144 -0
  21. data/lib/nsx_component.rb +28 -0
  22. data/lib/nsx_constants.rb +149 -0
  23. data/lib/nsx_driver.rb +78 -0
  24. data/lib/nsx_error.rb +77 -0
  25. data/lib/nsx_rule.rb +193 -0
  26. data/lib/nsxt_client.rb +176 -0
  27. data/lib/nsxt_dfw.rb +196 -0
  28. data/lib/nsxt_logical_port.rb +94 -0
  29. data/lib/nsxt_rule.rb +188 -0
  30. data/lib/nsxt_tz.rb +38 -0
  31. data/lib/nsxv_client.rb +176 -0
  32. data/lib/nsxv_dfw.rb +202 -0
  33. data/lib/nsxv_logical_port.rb +107 -0
  34. data/lib/nsxv_rule.rb +172 -0
  35. data/lib/nsxv_tz.rb +41 -0
  36. data/lib/opaque_network.rb +134 -0
  37. data/lib/opennebula.rb +5 -2
  38. data/lib/opennebula/acl.rb +1 -1
  39. data/lib/opennebula/acl_pool.rb +1 -1
  40. data/lib/opennebula/client.rb +10 -2
  41. data/lib/opennebula/cluster.rb +1 -1
  42. data/lib/opennebula/cluster_pool.rb +1 -1
  43. data/lib/opennebula/datastore.rb +1 -1
  44. data/lib/opennebula/datastore_pool.rb +1 -1
  45. data/lib/opennebula/document.rb +8 -29
  46. data/lib/opennebula/document_json.rb +42 -12
  47. data/lib/opennebula/document_pool.rb +1 -1
  48. data/lib/opennebula/document_pool_json.rb +1 -1
  49. data/lib/opennebula/error.rb +4 -1
  50. data/lib/opennebula/flow.rb +23 -0
  51. data/lib/opennebula/flow/grammar.rb +1195 -0
  52. data/lib/opennebula/flow/service_pool.rb +190 -0
  53. data/lib/opennebula/flow/service_template.rb +572 -0
  54. data/lib/opennebula/flow/service_template_ext.rb +84 -0
  55. data/lib/opennebula/flow/service_template_pool.rb +32 -0
  56. data/lib/opennebula/flow/validator.rb +499 -0
  57. data/lib/opennebula/group.rb +1 -1
  58. data/lib/opennebula/group_pool.rb +1 -1
  59. data/lib/opennebula/hook.rb +5 -12
  60. data/lib/opennebula/hook_log.rb +1 -1
  61. data/lib/opennebula/hook_pool.rb +1 -1
  62. data/lib/opennebula/host.rb +1 -1
  63. data/lib/opennebula/host_pool.rb +1 -1
  64. data/lib/opennebula/image.rb +19 -14
  65. data/lib/opennebula/image_pool.rb +1 -1
  66. data/lib/opennebula/ldap_auth.rb +1 -1
  67. data/lib/opennebula/ldap_auth_spec.rb +1 -1
  68. data/lib/opennebula/lockable_ext.rb +163 -0
  69. data/lib/opennebula/marketplace.rb +1 -1
  70. data/lib/opennebula/marketplace_pool.rb +1 -1
  71. data/lib/opennebula/marketplaceapp.rb +9 -119
  72. data/lib/opennebula/marketplaceapp_ext.rb +522 -0
  73. data/lib/opennebula/marketplaceapp_pool.rb +1 -1
  74. data/lib/opennebula/oneflow_client.rb +4 -3
  75. data/lib/opennebula/pool.rb +19 -3
  76. data/lib/opennebula/pool_element.rb +1 -1
  77. data/lib/opennebula/security_group.rb +1 -1
  78. data/lib/opennebula/security_group_pool.rb +1 -1
  79. data/lib/opennebula/server_cipher_auth.rb +1 -1
  80. data/lib/opennebula/server_x509_auth.rb +1 -1
  81. data/lib/opennebula/ssh_auth.rb +1 -1
  82. data/lib/opennebula/system.rb +1 -1
  83. data/lib/opennebula/template.rb +4 -13
  84. data/lib/opennebula/template_ext.rb +342 -0
  85. data/lib/opennebula/template_pool.rb +1 -1
  86. data/lib/opennebula/user.rb +26 -2
  87. data/lib/opennebula/user_pool.rb +1 -1
  88. data/lib/opennebula/utils.rb +1 -1
  89. data/lib/opennebula/vdc.rb +1 -1
  90. data/lib/opennebula/vdc_pool.rb +1 -1
  91. data/lib/opennebula/virtual_machine.rb +26 -206
  92. data/lib/opennebula/virtual_machine_ext.rb +469 -0
  93. data/lib/opennebula/virtual_machine_pool.rb +5 -1
  94. data/lib/opennebula/virtual_network.rb +4 -10
  95. data/lib/opennebula/virtual_network_pool.rb +1 -1
  96. data/lib/opennebula/virtual_router.rb +4 -12
  97. data/lib/opennebula/virtual_router_pool.rb +1 -1
  98. data/lib/opennebula/vm_group.rb +4 -11
  99. data/lib/opennebula/vm_group_pool.rb +1 -1
  100. data/lib/opennebula/vntemplate.rb +4 -13
  101. data/lib/opennebula/vntemplate_pool.rb +1 -1
  102. data/lib/opennebula/wait_ext.rb +257 -0
  103. data/lib/opennebula/x509_auth.rb +1 -1
  104. data/lib/opennebula/xml_element.rb +1 -1
  105. data/lib/opennebula/xml_pool.rb +1 -1
  106. data/lib/opennebula/xml_utils.rb +1 -1
  107. data/lib/opennebula/zone.rb +1 -1
  108. data/lib/opennebula/zone_pool.rb +1 -1
  109. data/lib/rest_client.rb +201 -0
  110. data/lib/scripts_common.rb +180 -0
  111. data/lib/transport_zone.rb +43 -0
  112. data/lib/vcenter_driver.rb +13 -12
  113. data/lib/vcenter_importer.rb +616 -0
  114. data/lib/vi_client.rb +281 -0
  115. data/lib/vi_helper.rb +312 -0
  116. data/lib/virtual_machine.rb +3477 -0
  117. data/lib/virtual_wire.rb +158 -0
  118. data/lib/vm_device.rb +80 -0
  119. data/lib/vm_disk.rb +202 -0
  120. data/lib/vm_folder.rb +69 -0
  121. data/lib/vm_helper.rb +30 -0
  122. data/lib/vm_monitor.rb +303 -0
  123. data/lib/vm_nic.rb +70 -0
  124. data/lib/vm_template.rb +1961 -0
  125. data/lib/vmm_importer.rb +121 -0
  126. metadata +130 -17
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 0402c2db7019e52c30ba01408c5dff8d6d9eda2f
4
- data.tar.gz: 41df228c299bf0fb1fc6627771b7d6799952d12e
3
+ metadata.gz: f9a8c00c023d39dac1d3d0c866851beca57f8b4a
4
+ data.tar.gz: e44c5056a014e41ba5f2f5f928d36d3bdfcafd15
5
5
  SHA512:
6
- metadata.gz: 789a9a93b98ba79573e66744ef22f6de6cfad03a6920bb6c581b7bd22a1730b9febc22b8db3bb2b8f64961612304dcfb09e2a7f0da66913095fc5d948eb55792
7
- data.tar.gz: b936580a7d133822ca2634277aa6840a33e4d6f8434508b17dcf96771acd6285722791820225bc38adcf7f3453fea07e9a906c9a33d1cb87e1ea66df7d04be51
6
+ metadata.gz: 1efacac3e2a1dc06134672c69f3d563a89d2cdbbb810bd8289c0d7f5e7e7dbd7de1a247fe5976f36d9df796355160b0d1de3f9fb8013c0fe4c3e30116212ab96
7
+ data.tar.gz: 08304bf5ee0386bbef56d81e6a65668010452130b259ced25a75fdc5693109d7fa667248cb3cec5157636d1c965425fb44dfa043bdadcf821f4f6d3d2f9d1fdb
data/lib/ActionManager.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # -------------------------------------------------------------------------- */
2
- # Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License"); you may */
4
4
  # not use this file except in compliance with the License. You may obtain */
5
5
  # a copy of the License at */
@@ -1,5 +1,5 @@
1
1
  # --------------------------------------------------------------------------
2
- # Copyright 2002-2020, OpenNebula Project, OpenNebula Systems
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems
3
3
  #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License"); you may
5
5
  # not use this file except in compliance with the License. You may obtain
@@ -1,5 +1,6 @@
1
+ # rubocop:disable Naming/FileName
1
2
  # -------------------------------------------------------------------------- #
2
- # Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
3
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
4
  # #
4
5
  # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
6
  # not use this file except in compliance with the License. You may obtain #
@@ -18,14 +19,15 @@
18
19
  # OpenNebula Drivers. The module has been designed to be included as part
19
20
  # of a driver and not to be used standalone.
20
21
  module DriverExecHelper
22
+
21
23
  # Action result strings for messages
22
24
  RESULT = {
23
- :success => "SUCCESS",
24
- :failure => "FAILURE"
25
+ :success => 'SUCCESS',
26
+ :failure => 'FAILURE'
25
27
  }
26
28
 
27
29
  def self.failed?(rc_str)
28
- return rc_str == RESULT[:failure]
30
+ rc_str == RESULT[:failure]
29
31
  end
30
32
 
31
33
  # Initialize module variables
@@ -34,6 +36,7 @@ module DriverExecHelper
34
36
  @remote_scripts_base_path = @config['SCRIPTS_REMOTE_DIR']
35
37
 
36
38
  @local_actions = options[:local_actions]
39
+ @per_drvr_local_actions = options[:per_drvr_local_actions] || []
37
40
 
38
41
  if ENV['ONE_LOCATION'].nil?
39
42
  @local_scripts_base_path = '/var/lib/one/remotes'
@@ -57,62 +60,74 @@ module DriverExecHelper
57
60
  # actions is remote or local. If the local actions has defined an special
58
61
  # script name this is used, otherwise the action name in downcase is
59
62
  # used as the script name.
63
+ # When action is a String starting with '/' it's considered alreay full
64
+ # path command and no modification is performed apart from adding params.
60
65
  #
61
66
  # @param [String, Symbol] action name of the action
62
67
  # @param [String] parameters arguments for the script
63
68
  # @param [String, nil] default_name alternative name for the script
64
69
  # @param [String, ''] directory to append to the scripts path for actions
65
70
  # @return [String] command line needed to execute the action
66
- def action_command_line(action, parameters, default_name=nil, directory='')
67
- if action_is_local? action
71
+ def action_command_line(action, parameters,
72
+ default_name = nil, directory = '')
73
+
74
+ if action.is_a?(String) && action[0] == '/'
75
+ return action + ' ' + parameters if parameters
76
+
77
+ return action
78
+ elsif action_is_local?(action, directory)
68
79
  script_path=File.join(@local_scripts_path, directory)
69
80
  else
70
81
  script_path=File.join(@remote_scripts_path, directory)
71
82
  end
72
83
 
73
84
  File.join(script_path, action_script_name(action, default_name))+
74
- " "+parameters
85
+ ' '+parameters
75
86
  end
76
87
 
77
88
  # True if the action is meant to be executed locally
78
89
  #
79
90
  # @param [String, Symbol] action name of the action
80
- def action_is_local?(action)
81
- @local_actions.include? action.to_s.upcase
91
+ # @param [String, Symbol] driver name
92
+ def action_is_local?(action, driver = '')
93
+ @local_actions.include? action.to_s.upcase if driver.empty?
94
+
95
+ @local_actions.include? action.to_s.upcase or
96
+ @per_drvr_local_actions.include? "#{driver}-#{action}"
82
97
  end
83
98
 
84
99
  # Name of the script file for the given action
85
100
  #
86
101
  # @param [String, Symbol] action name of the action
87
102
  # @param [String, nil] default_name alternative name for the script
88
- def action_script_name(action, default_name=nil)
103
+ def action_script_name(action, default_name = nil)
89
104
  name=@local_actions[action.to_s.upcase]
90
105
 
91
- if name
92
- name
93
- else
94
- default_name || action.to_s.downcase
95
- end
106
+ name || default_name || action.to_s.downcase
96
107
  end
97
108
 
98
109
  #
99
110
  # METHODS FOR LOGS & COMMAND OUTPUT
100
111
  #
101
112
  # Sends a message to the OpenNebula core through stdout
102
- def send_message(action="-", result=RESULT[:failure], id="-", info="-")
103
- @send_mutex.synchronize {
113
+ # rubocop:disable Metrics/ParameterLists
114
+ def send_message(action = '-', result = RESULT[:failure],
115
+ id = '-', info = '-')
116
+
117
+ @send_mutex.synchronize do
104
118
  STDOUT.puts "#{action} #{result} #{id} #{info}"
105
119
  STDOUT.flush
106
- }
120
+ end
107
121
  end
122
+ # rubocop:enable Metrics/ParameterLists
108
123
 
109
124
  # Sends a log message to ONE. The +message+ can be multiline, it will
110
125
  # be automatically splitted by lines.
111
- def log(number, message, all=true)
126
+ def log(number, message, all = true)
112
127
  in_error_message=false
113
128
  msg=message.strip
114
- msg.each_line {|line|
115
- severity=all ? 'I' : nil
129
+ msg.each_line do |line|
130
+ all ? severity='I' : severity=nil
116
131
  l=line.strip
117
132
 
118
133
  if l=='ERROR MESSAGE --8<------'
@@ -125,8 +140,8 @@ module DriverExecHelper
125
140
  if in_error_message
126
141
  severity='E'
127
142
  elsif line.match(/^(ERROR|DEBUG|INFO):(.*)$/)
128
- line=$2
129
- case $1
143
+ line=Regexp.last_match(2)
144
+ case Regexp.last_match(1)
130
145
  when 'ERROR'
131
146
  severity='E'
132
147
  when 'DEBUG'
@@ -137,19 +152,19 @@ module DriverExecHelper
137
152
  end
138
153
  end
139
154
 
140
- send_message("LOG", severity, number, line.strip) if severity
141
- }
155
+ send_message('LOG', severity, number, line.strip) if severity
156
+ end
142
157
  end
143
158
 
144
159
  # Generates a proc with that calls log with a hardcoded number. It will
145
160
  # be used to add loging to command actions
146
161
  def log_method(num)
147
- lambda {|message, all=true|
162
+ lambda {|message, all = true|
148
163
  log(num, message, all)
149
164
  }
150
165
  end
151
166
 
152
- #This method returns the result in terms
167
+ # This method returns the result in terms
153
168
  def get_info_from_execution(command_exe)
154
169
  if command_exe.code == 0
155
170
  result = RESULT[:success]
@@ -159,7 +174,7 @@ module DriverExecHelper
159
174
  info = command_exe.get_error_message
160
175
  end
161
176
 
162
- info = "-" if info == nil || info.empty?
177
+ info = '-' if info.nil? || info.empty?
163
178
 
164
179
  [result, info]
165
180
  end
@@ -211,3 +226,4 @@ module DriverExecHelper
211
226
  end
212
227
 
213
228
  end
229
+ # rubocop:enable Naming/FileName
@@ -1,5 +1,5 @@
1
1
  # -------------------------------------------------------------------------- #
2
- # Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
3
  # #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
5
  # not use this file except in compliance with the License. You may obtain #
@@ -90,13 +90,17 @@ class OpenNebulaDriver < ActionManager
90
90
  :respond => true,
91
91
  :ssh_stream => nil,
92
92
  :base64 => false,
93
- :zip => false
93
+ :zip => false,
94
+ :no_extra_params => false
94
95
  }.merge(ops)
95
96
 
96
- params = parameters + " #{id} #{host}"
97
+ params = parameters
98
+ params = "#{params} #{id} #{host}" unless options[:no_extra_params]
97
99
  command = action_command_line(aname, params, options[:script_name])
98
100
 
99
- if action_is_local?(aname)
101
+ # if options[:is_local] is not specified (nil)
102
+ # we rely uniquely in actions_is_local?
103
+ if action_is_local?(aname) or options[:is_local]
100
104
  stdin = Base64.strict_encode64(options[:stdin].to_s)
101
105
  execution = LocalCommand.run(command,
102
106
  log_method(id),
@@ -1,5 +1,5 @@
1
1
  # -------------------------------------------------------------------------- #
2
- # Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
3
  # #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
5
  # not use this file except in compliance with the License. You may obtain #
@@ -52,7 +52,8 @@ class VirtualMachineDriver < OpenNebulaDriver
52
52
  :disk_snapshot_create => "DISKSNAPSHOTCREATE",
53
53
  :resize_disk => "RESIZEDISK",
54
54
  :update_sg => "UPDATESG",
55
- :update_conf => "UPDATECONF"
55
+ :update_conf => "UPDATECONF",
56
+ :resize => "RESIZE"
56
57
  }
57
58
 
58
59
  POLL_ATTRIBUTE = OpenNebula::VirtualMachine::Driver::POLL_ATTRIBUTE
@@ -98,6 +99,7 @@ class VirtualMachineDriver < OpenNebulaDriver
98
99
  register_action(ACTION[:resize_disk].to_sym, method("resize_disk"))
99
100
  register_action(ACTION[:update_sg].to_sym, method("update_sg"))
100
101
  register_action(ACTION[:update_conf].to_sym, method("update_conf"))
102
+ register_action(ACTION[:resize].to_sym, method("resize"))
101
103
  end
102
104
 
103
105
  # Decodes the encoded XML driver message received from the core
@@ -227,6 +229,11 @@ class VirtualMachineDriver < OpenNebulaDriver
227
229
  send_message(ACTION[:update_conf],RESULT[:failure],id,error)
228
230
  end
229
231
 
232
+ def resize(id, drv_message)
233
+ error = "Action not implemented by driver #{self.class}"
234
+ send_message(ACTION[:resize],RESULT[:failure],id,error)
235
+ end
236
+
230
237
  private
231
238
 
232
239
  # Interface to handle the pending events from the ActionManager Interface
@@ -1,5 +1,5 @@
1
1
  # -------------------------------------------------------------------------- #
2
- # Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
3
  # #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
5
  # not use this file except in compliance with the License. You may obtain #
@@ -51,7 +51,7 @@ end
51
51
  module CloudClient
52
52
 
53
53
  # OpenNebula version
54
- VERSION = '5.12.6'
54
+ VERSION = '5.13.90'
55
55
 
56
56
  # #########################################################################
57
57
  # Default location for the authentication file
@@ -229,7 +229,7 @@ module CloudCLI
229
229
  def version_text
230
230
  version=<<EOT
231
231
  OpenNebula #{CloudClient::VERSION}
232
- Copyright 2002-2020, OpenNebula Project, OpenNebula Systems
232
+ Copyright 2002-2021, OpenNebula Project, OpenNebula Systems
233
233
 
234
234
  Licensed under the Apache License, Version 2.0 (the "License"); you may
235
235
  not use this file except in compliance with the License. You may obtain
data/lib/datacenter.rb ADDED
@@ -0,0 +1,1259 @@
1
+ # -------------------------------------------------------------------------- #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
+ # #
4
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
+ # not use this file except in compliance with the License. You may obtain #
6
+ # a copy of the License at #
7
+ # #
8
+ # http://www.apache.org/licenses/LICENSE-2.0 #
9
+ # #
10
+ # Unless required by applicable law or agreed to in writing, software #
11
+ # distributed under the License is distributed on an "AS IS" BASIS, #
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
13
+ # See the License for the specific language governing permissions and #
14
+ # limitations under the License. #
15
+ #--------------------------------------------------------------------------- #
16
+
17
+ require 'set'
18
+ require 'digest'
19
+
20
+ ##############################################################################
21
+ # Module VCenterDriver
22
+ ##############################################################################
23
+ module VCenterDriver
24
+
25
+ ##########################################################################
26
+ # Class DatacenterFolder
27
+ ##########################################################################
28
+ class DatacenterFolder
29
+
30
+ attr_accessor :items
31
+
32
+ def initialize(vi_client)
33
+ @vi_client = vi_client
34
+ @items = {}
35
+ end
36
+
37
+ ########################################################################
38
+ # Builds a hash with Datacenter-Ref / Datacenter to be used as a cache
39
+ # @return [Hash] in the form
40
+ # { dc_ref [Symbol] => Datacenter object }
41
+ ########################################################################
42
+ def fetch!
43
+ VIClient
44
+ .get_entities(
45
+ @vi_client.vim.root,
46
+ 'Datacenter'
47
+ ).each do |item|
48
+ item_name = item._ref
49
+ @items[item_name.to_sym] = Datacenter.new(item)
50
+ end
51
+ end
52
+
53
+ ########################################################################
54
+ # Returns a Datacenter. Uses the cache if available.
55
+ # @param ref [Symbol] the vcenter ref
56
+ # @return Datacenter
57
+ ########################################################################
58
+ def get(ref)
59
+ if !@items[ref.to_sym]
60
+ rbvmomi_dc = RbVmomi::VIM::Datacenter.new(@vi_client.vim, ref)
61
+ @items[ref.to_sym] = Datacenter.new(rbvmomi_dc)
62
+ end
63
+
64
+ @items[ref.to_sym]
65
+ end
66
+
67
+ def vcenter_instance_uuid
68
+ @vi_client.vim.serviceContent.about.instanceUuid
69
+ end
70
+
71
+ def vcenter_api_version
72
+ @vi_client.vim.serviceContent.about.apiVersion
73
+ end
74
+
75
+ def get_unimported_hosts(hpool, _vcenter_instance_name)
76
+ host_objects = {}
77
+
78
+ vcenter_uuid = vcenter_instance_uuid
79
+ vcenter_version = vcenter_api_version
80
+
81
+ fetch! if @items.empty? # Get datacenters
82
+
83
+ # Loop through datacenters
84
+ @items.values.each do |dc|
85
+ dc_name = dc.item.name
86
+ host_objects[dc_name] = []
87
+
88
+ # Get clusters inside a datacenter
89
+ host_folder = dc.host_folder
90
+ host_folder.fetch_clusters!
91
+ host_folder.items.values.each do |ccr|
92
+ # Check if the cluster is a host in OpenNebula's pool
93
+ one_host =
94
+ VCenterDriver::VIHelper
95
+ .find_by_ref(
96
+ OpenNebula::HostPool,
97
+ 'TEMPLATE/VCENTER_CCR_REF',
98
+ ccr['_ref'],
99
+ vcenter_uuid,
100
+ hpool
101
+ )
102
+ next if one_host
103
+
104
+ # Get a ClusterComputeResource object
105
+ cluster =
106
+ VCenterDriver::ClusterComputeResource
107
+ .new_from_ref(
108
+ ccr['_ref'],
109
+ @vi_client
110
+ )
111
+
112
+ # Obtain a list of resource pools found in the cluster
113
+ rpools =
114
+ cluster
115
+ .get_resource_pool_list
116
+ .reject {|rp| rp[:name].empty? }
117
+
118
+ # Determine a host location (folder and subfolders)
119
+ item = cluster.item
120
+ folders = []
121
+ until item.instance_of? RbVmomi::VIM::Datacenter
122
+ item = item.parent
123
+ if !item.instance_of?(RbVmomi::VIM::Datacenter) &&
124
+ item.name != 'host'
125
+ folders << item.name
126
+ end
127
+ raise "Could not find the host's location" if item.nil?
128
+ end
129
+ location = folders.reverse.join('/')
130
+ location = '/' if location.empty?
131
+
132
+ # Setting host import name and
133
+ # replace spaces and weird characters
134
+ cluster_name = (ccr['name']).to_s.tr(' ', '_')
135
+ cluster_name =
136
+ VCenterDriver::VIHelper
137
+ .one_name(
138
+ OpenNebula::HostPool,
139
+ cluster_name,
140
+ ccr['_ref']+vcenter_uuid,
141
+ hpool
142
+ )
143
+
144
+ # Prepare hash for import tool
145
+ host_info = {}
146
+ host_info[:simple_name] = ccr['name']
147
+ host_info[:cluster_name] = cluster_name
148
+ host_info[:cluster_ref] = ccr['_ref']
149
+ host_info[:cluster_location] = location
150
+ host_info[:vcenter_uuid] = vcenter_uuid
151
+ host_info[:vcenter_version] = vcenter_version
152
+ host_info[:rp_list] = rpools
153
+
154
+ # Add the hash to current datacenter
155
+ host_objects[dc_name] << host_info
156
+ end
157
+ end
158
+
159
+ host_objects
160
+ end
161
+
162
+ def get_unimported_datastores(dpool, vcenter_instance_name, hpool)
163
+ import_id = 0
164
+ ds_objects = {}
165
+ vcenter_uuid = vcenter_instance_uuid
166
+
167
+ # Get datacenters
168
+ fetch! if @items.empty?
169
+
170
+ @items.values.each do |dc|
171
+ clusters_in_ds = {}
172
+ dc_name = dc.item.name
173
+ dc_ref = dc.item._ref
174
+
175
+ datastore_folder = dc.datastore_folder
176
+ datastore_folder.fetch!
177
+
178
+ datastore_folder.items.values.each do |ds|
179
+ name, capacity, free_space =
180
+ ds
181
+ .item
182
+ .collect(
183
+ 'name',
184
+ 'summary.capacity',
185
+ 'summary.freeSpace'
186
+ )
187
+
188
+ ds_name = name.to_s
189
+ ds_total_mb = ((capacity.to_i / 1024) / 1024)
190
+ ds_free_mb = ((free_space.to_i / 1024) / 1024)
191
+ ds_ref = ds['_ref']
192
+
193
+ ds_objects[ds_ref] = {}
194
+ ds_objects[ds_ref][:ref] = ds_ref
195
+ ds_objects[ds_ref][:import_id] = import_id
196
+ ds_objects[ds_ref][:datacenter] = dc_name
197
+ ds_objects[ds_ref][:simple_name] = ds_name.to_s
198
+ ds_objects[ds_ref][:total_mb] = ds_total_mb
199
+ ds_objects[ds_ref][:free_mb] = ds_free_mb
200
+ ds_objects[ds_ref][:ds] = []
201
+ ds_objects[ds_ref][:cluster] = []
202
+
203
+ if ds.instance_of? VCenterDriver::Datastore
204
+ hosts = ds['host']
205
+ hosts.each do |host|
206
+ cluster_ref = host.key.parent._ref
207
+ if !clusters_in_ds.key?(cluster_ref)
208
+ clusters_in_ds[cluster_ref] = nil
209
+
210
+ # Try to locate cluster ref in host's pool
211
+ one_cluster =
212
+ VCenterDriver::VIHelper
213
+ .find_by_ref(
214
+ OpenNebula::HostPool,
215
+ 'TEMPLATE/VCENTER_CCR_REF',
216
+ cluster_ref,
217
+ vcenter_uuid,
218
+ hpool
219
+ )
220
+ if one_cluster
221
+ ds_objects[ds_ref][:cluster] <<
222
+ one_cluster['CLUSTER_ID'].to_i
223
+ clusters_in_ds[cluster_ref] =
224
+ one_cluster['CLUSTER_ID'].to_i
225
+ end
226
+ else
227
+ if clusters_in_ds[cluster_ref] &&
228
+ !ds_objects[ds_ref][:cluster]
229
+ .include?(
230
+ clusters_in_ds[cluster_ref]
231
+ )
232
+ ds_objects[ds_ref][:cluster] <<
233
+ clusters_in_ds[cluster_ref]
234
+ end
235
+ end
236
+ end
237
+
238
+ already_image_ds = VCenterDriver::Storage
239
+ .exists_one_by_ref_dc_and_type?(
240
+ ds_ref,
241
+ dc_ref,
242
+ vcenter_uuid,
243
+ 'IMAGE_DS',
244
+ dpool
245
+ )
246
+
247
+ key = ds_ref+vcenter_uuid
248
+ if !already_image_ds
249
+ ds_objects[ds_ref][:name] =
250
+ VCenterDriver::VIHelper
251
+ .one_name(
252
+ OpenNebula::DatastorePool,
253
+ "#{ds_name}(IMG)",
254
+ key
255
+ )
256
+ object =
257
+ ds
258
+ .to_one_template(
259
+ ds_objects[ds_ref],
260
+ vcenter_uuid,
261
+ dc_name,
262
+ dc_ref,
263
+ 'IMAGE_DS'
264
+ )
265
+ ds_objects[ds_ref][:ds] << object unless object.nil?
266
+ end
267
+
268
+ already_system_ds =
269
+ VCenterDriver::Storage
270
+ .exists_one_by_ref_dc_and_type?(
271
+ ds_ref,
272
+ dc_ref,
273
+ vcenter_uuid,
274
+ 'SYSTEM_DS',
275
+ dpool
276
+ )
277
+
278
+ if !already_system_ds
279
+ ds_objects[ds_ref][:name] =
280
+ VCenterDriver::VIHelper
281
+ .one_name(
282
+ OpenNebula::DatastorePool,
283
+ "#{ds_name}(SYS)",
284
+ key
285
+ )
286
+ object = ds
287
+ .to_one_template(
288
+ ds_objects[ds_ref],
289
+ vcenter_uuid,
290
+ dc_name,
291
+ dc_ref,
292
+ 'SYSTEM_DS'
293
+ )
294
+ ds_objects[ds_ref][:ds] << object unless object.nil?
295
+ end
296
+
297
+ ds_objects[ds_ref][:name] = ds_name.to_s
298
+ elsif ds.instance_of? VCenterDriver::StoragePod
299
+ ds['children'].each do |sp_ds|
300
+ hosts = sp_ds.host
301
+ hosts.each do |host|
302
+ cluster_ref = host.key.parent._ref
303
+ if !clusters_in_ds.include?(cluster_ref)
304
+ clusters_in_ds[cluster_ref] = nil
305
+ # Try to locate cluster
306
+ # ref in cluster's pool
307
+ one_cluster =
308
+ VCenterDriver::VIHelper
309
+ .find_by_ref(
310
+ OpenNebula::HostPool,
311
+ 'TEMPLATE/VCENTER_CCR_REF',
312
+ cluster_ref,
313
+ vcenter_uuid,
314
+ hpool
315
+ )
316
+ if one_cluster
317
+ ds_objects[ds_ref][:cluster] <<
318
+ one_cluster['CLUSTER_ID'].to_i
319
+ clusters_in_ds[cluster_ref] =
320
+ one_cluster['CLUSTER_ID'].to_i
321
+ end
322
+ else
323
+ if clusters_in_ds[cluster_ref] &&
324
+ !ds_objects[ds_ref][:cluster]
325
+ .include?(
326
+ clusters_in_ds[cluster_ref]
327
+ )
328
+ ds_objects[ds_ref][:cluster] <<
329
+ clusters_in_ds[cluster_ref]
330
+ end
331
+ end
332
+ end
333
+ end
334
+
335
+ already_system_ds = VCenterDriver::Storage
336
+ .exists_one_by_ref_dc_and_type?(
337
+ ds_ref,
338
+ dc_ref,
339
+ vcenter_uuid,
340
+ 'SYSTEM_DS',
341
+ dpool
342
+ )
343
+
344
+ if !already_system_ds
345
+ ds_objects[ds_ref][:name] = "#{ds_name} \
346
+ [#{vcenter_instance_name} - #{dc_name}] (StorDRS)"
347
+ object = ds.to_one_template(
348
+ ds_objects[ds_ref],
349
+ vcenter_uuid,
350
+ dc_name,
351
+ dc_ref,
352
+ 'SYSTEM_DS'
353
+ )
354
+ ds_objects[ds_ref][:ds] << object unless object.nil?
355
+ end
356
+ end
357
+
358
+ if ds_objects[ds_ref][:ds].empty?
359
+ ds_objects.delete(ds_ref)
360
+ else
361
+ import_id += 1
362
+ end
363
+ end
364
+ end
365
+
366
+ { vcenter_instance_name => ds_objects }
367
+ end
368
+
369
+ def get_unimported_templates(vi_client, tpool)
370
+ template_objects = {}
371
+ import_id = 0
372
+ vcenter_uuid = vcenter_instance_uuid
373
+
374
+ vcenter_instance_name = vi_client.vim.host
375
+
376
+ fetch! if @items.empty? # Get datacenters
377
+
378
+ @items.values.each do |dc|
379
+ rp_cache = {}
380
+ dc_name = dc.item.name
381
+
382
+ view = vi_client
383
+ .vim
384
+ .serviceContent
385
+ .viewManager
386
+ .CreateContainerView(
387
+ {
388
+ :container => dc.item.vmFolder,
389
+ :type => ['VirtualMachine'],
390
+ :recursive => true
391
+ }
392
+ )
393
+
394
+ pc = vi_client.vim.serviceContent.propertyCollector
395
+
396
+ filter_spec = RbVmomi::VIM.PropertyFilterSpec(
397
+ :objectSet => [
398
+ {
399
+ :obj => view,
400
+ :skip => true,
401
+ :selectSet => [
402
+ RbVmomi::VIM.TraversalSpec(
403
+ :name => 'traverseEntities',
404
+ :type => 'ContainerView',
405
+ :path => 'view',
406
+ :skip => false
407
+ )
408
+ ]
409
+ }
410
+ ],
411
+ :propSet => [
412
+ {
413
+ :type => 'VirtualMachine',
414
+ :pathSet => ['config.template']
415
+ }
416
+ ]
417
+ )
418
+
419
+ result = pc.RetrieveProperties(
420
+ :specSet => [filter_spec]
421
+ )
422
+
423
+ vms = {}
424
+ result.each do |r|
425
+ if r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
426
+ vms[r.obj._ref] = r.to_hash
427
+ end
428
+ end
429
+ templates = []
430
+ vms.each do |ref, value|
431
+ next unless value['config.template']
432
+
433
+ templates << VCenterDriver::Template
434
+ .new_from_ref(
435
+ ref,
436
+ vi_client
437
+ )
438
+ end
439
+
440
+ view.DestroyView # Destroy the view
441
+
442
+ templates.each do |template|
443
+ tref = template['_ref']
444
+ next if template_objects[tref]
445
+
446
+ one_template = VCenterDriver::VIHelper
447
+ .find_by_ref(
448
+ OpenNebula::TemplatePool,
449
+ 'TEMPLATE/VCENTER_TEMPLATE_REF',
450
+ tref,
451
+ vcenter_uuid,
452
+ tpool
453
+ )
454
+
455
+ # If the template has been already imported
456
+ next if one_template
457
+
458
+ one_template = VCenterDriver::Template
459
+ .get_xml_template(
460
+ template,
461
+ vcenter_uuid,
462
+ vi_client,
463
+ dc_name,
464
+ rp_cache
465
+ )
466
+
467
+ next if one_template.nil?
468
+
469
+ one_template[:import_id] = import_id
470
+ one_template[:vcenter] = vcenter_instance_name
471
+ import_id += 1
472
+ template_objects[tref] = one_template
473
+ end
474
+ end
475
+
476
+ {
477
+ vcenter_instance_name => template_objects
478
+ }
479
+ end
480
+
481
+ def cluster_networks(one_host)
482
+ ccr_ref = one_host['TEMPLATE/VCENTER_CCR_REF']
483
+ cluster = VCenterDriver::ClusterComputeResource
484
+ .new_from_ref(ccr_ref, @vi_client)
485
+ # cluster = cluster_mob(one_host)
486
+ raise "Cluster with ref: #{ccr_ref} not found" if cluster.nil?
487
+
488
+ cluster.item.network
489
+ end
490
+
491
+ # Return ONE cluster ID
492
+ def one_cluster_id(one_host)
493
+ if !one_host || !one_host['CLUSTER_ID']
494
+ cluster_id = -1
495
+ else
496
+ cluster_id = one_host['CLUSTER_ID']
497
+ end
498
+
499
+ cluster_id.to_i
500
+ end
501
+
502
+ # Determine if a network must be excluded from the list
503
+ def exclude_network?(vc_network, one_host, args, vc_network_hash)
504
+ vc_network_name = vc_network_hash[:vc_network_name]
505
+ vc_network_host = vc_network_hash[:vc_network_host]
506
+ vc_network_tag = vc_network_hash[:vc_network_tag]
507
+
508
+ # Exclude some networks if filter = true
509
+ if args[:filter]
510
+ if one_host && one_host['TEMPLATE/NSX_PASSWORD'].nil?
511
+ network_types = [
512
+ VCenterDriver::Network::NETWORK_TYPE_NSXT,
513
+ VCenterDriver::Network::NETWORK_TYPE_NSXV
514
+ ]
515
+
516
+ # Only NSX-V and NSX-T can be excluded
517
+ network_type = VCenterDriver::Network
518
+ .get_network_type(
519
+ vc_network,
520
+ vc_network_name
521
+ )
522
+
523
+ return true if network_types.include? network_type
524
+ end
525
+ # Exclude networks without hosts
526
+ if vc_network_host.empty?
527
+ return true
528
+ end
529
+
530
+ # Exclude DVS uplinks
531
+ if !vc_network_tag.empty? &&
532
+ vc_network_tag[0][:key] == 'SYSTEM/DVS.UPLINKPG'
533
+ return true
534
+ end
535
+ # Exclude portgroup used for VXLAN communication in NSX
536
+ if vc_network['name'].match(/^vxw-vmknicPg-dvs-(.*)/)
537
+ return true
538
+ end
539
+
540
+ return false
541
+ end
542
+ false
543
+ end
544
+
545
+ # Proccess each network
546
+ def process_network(params)
547
+ vc_network = params[:vc_network]
548
+ vcenter_instance_name = params[:vcenter_instance_name]
549
+ vcenter_uuid = params[:vcenter_uuid]
550
+ _hpool = params[:_hpool]
551
+ one_host = params[:one_host]
552
+ args = params[:args]
553
+
554
+ full_process = !args[:short]
555
+
556
+ vc_network_ref = vc_network._ref
557
+ vc_network_name = VCenterDriver::VcImporter.sanitize(
558
+ vc_network.name
559
+ )
560
+ vc_network_host = vc_network['host']
561
+ vc_network_tag = vc_network['tag']
562
+
563
+ vc_network_hash = {}
564
+ vc_network_hash[:vc_network_ref] = vc_network_ref
565
+ vc_network_hash[:vc_network_name] = vc_network_name
566
+ vc_network_hash[:vc_network_host] = vc_network_host
567
+ vc_network_hash[:vc_network_tag] = vc_network_tag
568
+
569
+ # Initialize network hash
570
+ network = {}
571
+ # Add name to network hash
572
+ network[vc_network_ref] = { 'name' => vc_network_name }
573
+ # By default no network is excluded
574
+ network[vc_network_ref][:excluded] = false
575
+
576
+ # Initialize opts hash used to inject data into one template
577
+ opts = {}
578
+
579
+ # if full_process
580
+ # Add network type to network hash
581
+ network_type = \
582
+ VCenterDriver::Network.get_network_type(
583
+ vc_network,
584
+ vc_network_name
585
+ )
586
+ network[vc_network_ref][:network_type] = network_type
587
+ network[vc_network_ref][:type] = network_type
588
+ # end
589
+
590
+ # Determine if the network must be excluded
591
+ network[vc_network_ref][:excluded] = exclude_network?(
592
+ vc_network,
593
+ one_host,
594
+ args,
595
+ vc_network_hash
596
+ )
597
+
598
+ return if network[vc_network_ref][:excluded] == true
599
+
600
+ if full_process
601
+ case network[vc_network_ref][:network_type]
602
+ # Distributed PortGroups
603
+ when VCenterDriver::Network::NETWORK_TYPE_DPG
604
+ network[vc_network_ref][:sw_name] = \
605
+ vc_network.config.distributedVirtualSwitch.name
606
+ # For DistributedVirtualPortgroups there
607
+ # is networks and uplinks
608
+ network[vc_network_ref][:uplink] = \
609
+ vc_network.config.uplink
610
+ # network[vc_network_ref][:uplink] = false
611
+ # NSX-V PortGroups
612
+ when VCenterDriver::Network::NETWORK_TYPE_NSXV
613
+ network[vc_network_ref][:sw_name] = \
614
+ vc_network.config.distributedVirtualSwitch.name
615
+ # For NSX-V ( is the same as DistributedVirtualPortgroups )
616
+ # there is networks and uplinks
617
+ network[vc_network_ref][:uplink] = \
618
+ vc_network.config.uplink
619
+ network[vc_network_ref][:uplink] = false
620
+ # Standard PortGroups
621
+ when VCenterDriver::Network::NETWORK_TYPE_PG
622
+ # There is no uplinks for standard portgroups,
623
+ # so all Standard
624
+ # PortGroups are networks and no uplinks
625
+ network[vc_network_ref][:uplink] = false
626
+ network[vc_network_ref][:sw_name] =
627
+ VCenterDriver::Network
628
+ .virtual_switch(
629
+ vc_network
630
+ )
631
+ # NSX-T PortGroups
632
+ when VCenterDriver::Network::NETWORK_TYPE_NSXT
633
+ network[vc_network_ref][:sw_name] = \
634
+ vc_network.summary.opaqueNetworkType
635
+ # There is no uplinks for NSX-T networks,
636
+ # so all NSX-T networks
637
+ # are networks and no uplinks
638
+ network[vc_network_ref][:uplink] = false
639
+ else
640
+ raise 'Unknown network type: ' \
641
+ "#{network[vc_network_ref][:network_type]}"
642
+ end
643
+ end
644
+
645
+ # Multicluster nets support
646
+ network[vc_network_ref][:clusters] = {}
647
+ network[vc_network_ref][:clusters][:refs] = []
648
+ network[vc_network_ref][:clusters][:one_ids] = []
649
+ network[vc_network_ref][:clusters][:names] = []
650
+
651
+ # Get hosts related to this network and add them if is not
652
+ # excluded
653
+ vc_hosts = vc_network.host
654
+ vc_hosts.each do |vc_host|
655
+ # Get vCenter Cluster
656
+ vc_cluster = vc_host.parent
657
+ vc_cluster_ref = vc_cluster._ref
658
+ vc_cluster_name = vc_cluster.name
659
+ # Get one host from each vCenter cluster
660
+ one_host = VCenterDriver::VIHelper
661
+ .find_by_ref(OpenNebula::HostPool,
662
+ 'TEMPLATE/VCENTER_CCR_REF',
663
+ vc_cluster_ref,
664
+ vcenter_uuid)
665
+ # Check if network is excluded from each host
666
+ next if exclude_network?(
667
+ vc_network,
668
+ one_host,
669
+ args,
670
+ vc_network_hash
671
+ )
672
+
673
+ # Insert vCenter cluster ref
674
+ network[vc_network_ref][:clusters][:refs] << vc_cluster_ref
675
+ # Insert OpenNebula cluster id
676
+ cluster_id = one_cluster_id(one_host)
677
+ network[vc_network_ref][:clusters][:one_ids] << cluster_id
678
+ # Insert vCenter cluster name
679
+ network[vc_network_ref][:clusters][:names] << vc_cluster_name
680
+ opts[:dc_name] = vc_cluster_name
681
+ end
682
+
683
+ # Remove duplicate entries
684
+ network[vc_network_ref][:clusters][:refs].uniq!
685
+ network[vc_network_ref][:clusters][:one_ids].uniq!
686
+ network[vc_network_ref][:clusters][:names].uniq!
687
+
688
+ # Mark network as processed
689
+ network[vc_network_ref][:processed] = true
690
+
691
+ if full_process
692
+ # General net_info related to datacenter
693
+ opts[:vcenter_uuid] = vcenter_uuid
694
+ opts[:vcenter_instance_name] = vcenter_instance_name
695
+ opts[:network_name] = network[vc_network_ref]['name']
696
+ opts[:network_ref] = network.keys.first
697
+ opts[:network_type] = network[vc_network_ref][:network_type]
698
+ opts[:sw_name] = network[vc_network_ref][:sw_name]
699
+
700
+ network[vc_network_ref] = \
701
+ network[vc_network_ref]
702
+ .merge(VCenterDriver::Network
703
+ .to_one_template(opts))
704
+ else
705
+ network[vc_network_ref][:ref] = \
706
+ vc_network_ref
707
+ network[vc_network_ref][:name] = \
708
+ network[vc_network_ref]['name']
709
+ end
710
+
711
+ network
712
+ end
713
+
714
+ # rubocop:disable Style/GlobalVars
715
+ def get_unimported_networks(npool, vcenter_instance_name, hpool, args)
716
+ vcenter_uuid = vcenter_instance_uuid
717
+ networks = {}
718
+
719
+ # Selected host in OpenNebula
720
+ if $conf.nil?
721
+ one_client = OpenNebula::Client.new
722
+ else
723
+ one_client = OpenNebula::Client.new(
724
+ nil,
725
+ $conf[:one_xmlrpc]
726
+ )
727
+ end
728
+
729
+ one_host = OpenNebula::Host.new_with_id(args[:host], one_client)
730
+ rc = one_host.info
731
+ raise rc.message if OpenNebula.is_error? rc
732
+
733
+ # Get all networks in vcenter cluster (one_host)
734
+ vc_cluster_networks = cluster_networks(one_host)
735
+
736
+ # Iterate over vcenter networks
737
+ vc_cluster_networks.each do |vc_cluster_network|
738
+ exist = VCenterDriver::VIHelper
739
+ .find_by_ref(OpenNebula::VirtualNetworkPool,
740
+ 'TEMPLATE/VCENTER_NET_REF',
741
+ vc_cluster_network._ref,
742
+ vcenter_uuid,
743
+ npool)
744
+
745
+ next if exist
746
+
747
+ params = {}
748
+
749
+ params[:vc_network]= vc_cluster_network
750
+ params[:vcenter_instance_name]= vcenter_instance_name
751
+ params[:vcenter_uuid]= vcenter_uuid
752
+ params[:_hpool]= hpool
753
+ params[:one_host]= one_host
754
+ params[:args] = args
755
+
756
+ network = process_network(params)
757
+
758
+ networks.merge!(network) unless network.nil?
759
+ end
760
+ # Added import id
761
+ imid = -1
762
+ networks.map {|_k, v| v[:import_id] = imid += 1 }
763
+ { vcenter_instance_name => networks }
764
+ end
765
+ # rubocop:enable Style/GlobalVars
766
+
767
+ end
768
+ # class DatatacenterFolder
769
+
770
+ ##########################################################################
771
+ # Class Datacenter
772
+ ##########################################################################
773
+ class Datacenter
774
+
775
+ attr_accessor :item
776
+
777
+ DPG_CREATE_TIMEOUT = 240
778
+
779
+ def initialize(item, vi_client = nil)
780
+ check_item(item, RbVmomi::VIM::Datacenter)
781
+
782
+ @vi_client = vi_client
783
+ @item = item
784
+ @net_rollback = []
785
+ @locking = true
786
+ end
787
+
788
+ def datastore_folder
789
+ DatastoreFolder.new(@item.datastoreFolder)
790
+ end
791
+
792
+ def host_folder
793
+ HostFolder.new(@item.hostFolder)
794
+ end
795
+
796
+ def vm_folder
797
+ VirtualMachineFolder.new(@item.vmFolder)
798
+ end
799
+
800
+ def network_folder
801
+ NetworkFolder.new(@item.networkFolder)
802
+ end
803
+
804
+ # Locking function. Similar to flock
805
+ def lock
806
+ hostlockname = @item['name'].downcase.tr(' ', '_')
807
+ return unless @locking
808
+
809
+ @locking_file =
810
+ File
811
+ .open("/tmp/vcenter-dc-#{hostlockname}-lock", 'w')
812
+ @locking_file.flock(File::LOCK_EX)
813
+ end
814
+
815
+ # Unlock driver execution mutex
816
+ def unlock
817
+ return unless @locking
818
+
819
+ @locking_file.close
820
+ end
821
+
822
+ ########################################################################
823
+ # Check if distributed virtual switch exists in host
824
+ ########################################################################
825
+ def dvs_exists(switch_name, net_folder)
826
+ net_folder.items.values.select do |dvs|
827
+ dvs.instance_of?(VCenterDriver::DistributedVirtualSwitch) &&
828
+ dvs['name'] == switch_name
829
+ end.first rescue nil
830
+ end
831
+
832
+ ########################################################################
833
+ # Is the distributed switch for the distributed pg different?
834
+ ########################################################################
835
+ def pg_changes_sw?(dpg, switch_name)
836
+ dpg['config.distributedVirtualSwitch.name'] != switch_name
837
+ end
838
+
839
+ ########################################################################
840
+ # Create a distributed vcenter switch in a datacenter
841
+ ########################################################################
842
+ def create_dvs(switch_name, pnics, mtu = 1500)
843
+ # Prepare spec for DVS creation
844
+ spec = RbVmomi::VIM::DVSCreateSpec.new
845
+ spec.configSpec = RbVmomi::VIM::VMwareDVSConfigSpec.new
846
+ spec.configSpec.name = switch_name
847
+
848
+ # Specify number of uplinks port for dpg
849
+ if pnics
850
+ pnics = pnics.split(',')
851
+ if !pnics.empty?
852
+ spec.configSpec.uplinkPortPolicy =
853
+ RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new
854
+ spec.configSpec.uplinkPortPolicy.uplinkPortName = []
855
+ (0..pnics.size-1).each do |index|
856
+ spec
857
+ .configSpec
858
+ .uplinkPortPolicy
859
+ .uplinkPortName[index]="dvUplink#{index+1}"
860
+ end
861
+ end
862
+ end
863
+
864
+ # Set maximum MTU
865
+ spec.configSpec.maxMtu = mtu
866
+
867
+ # The DVS must be created in the networkFolder of the datacenter
868
+ begin
869
+ dvs_creation_task = @item
870
+ .networkFolder
871
+ .CreateDVS_Task(
872
+ :spec => spec
873
+ )
874
+ dvs_creation_task.wait_for_completion
875
+
876
+ # If task finished successfuly we rename the uplink portgroup
877
+ dvs = nil
878
+ if dvs_creation_task.info.state == 'success'
879
+ dvs = dvs_creation_task.info.result
880
+ dvs
881
+ .config
882
+ .uplinkPortgroup[0]
883
+ .Rename_Task(
884
+ :newName => "#{switch_name}-uplink-pg"
885
+ ).wait_for_completion
886
+ else
887
+ raise "The Distributed vSwitch #{switch_name} \
888
+ could not be created. "
889
+ end
890
+ rescue StandardError => e
891
+ raise e
892
+ end
893
+
894
+ @net_rollback << {
895
+ :action => :delete_dvs,
896
+ :dvs => dvs,
897
+ :name => switch_name
898
+ }
899
+
900
+ VCenterDriver::DistributedVirtualSwitch.new(dvs, @vi_client)
901
+ end
902
+
903
+ ########################################################################
904
+ # Update a distributed vcenter switch
905
+ ########################################################################
906
+ def update_dvs(dvs, pnics, mtu)
907
+ # Prepare spec for DVS creation
908
+ spec = RbVmomi::VIM::VMwareDVSConfigSpec.new
909
+ changed = false
910
+
911
+ orig_spec = RbVmomi::VIM::VMwareDVSConfigSpec.new
912
+ orig_spec.maxMtu = dvs['config.maxMtu']
913
+ orig_spec.uplinkPortPolicy =
914
+ RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new
915
+ orig_spec.uplinkPortPolicy.uplinkPortName = []
916
+ (0..dvs['config.uplinkPortgroup'].length-1).each do |index|
917
+ orig_spec
918
+ .uplinkPortPolicy
919
+ .uplinkPortName[index]="dvUplink#{index+1}"
920
+ end
921
+
922
+ # Add more uplinks to default uplink
923
+ # port group according to number of pnics
924
+ if pnics
925
+ pnics = pnics.split(',')
926
+ if !pnics.empty? && dvs['config.uplinkPortgroup']
927
+ .length != pnics.size
928
+ spec.uplinkPortPolicy =
929
+ RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new
930
+ spec.uplinkPortPolicy.uplinkPortName = []
931
+ (dvs['config.uplinkPortgroup']
932
+ .length..num_pnics-1)
933
+ .each do |index|
934
+ spec
935
+ .uplinkPortPolicy
936
+ .uplinkPortName[index] =
937
+ "dvUplink#{index+1}"
938
+ end
939
+ changed = true
940
+ end
941
+ end
942
+
943
+ # Set maximum MTU
944
+ if mtu != dvs['config.maxMtu']
945
+ spec.maxMtu = mtu
946
+ changed = true
947
+ end
948
+
949
+ # The DVS must be created in the networkFolder of the datacenter
950
+ return unless changed
951
+
952
+ spec.configVersion = dvs['config.configVersion']
953
+
954
+ begin
955
+ dvs
956
+ .item
957
+ .ReconfigureDvs_Task(
958
+ :spec => spec
959
+ ).wait_for_completion
960
+ rescue StandardError => e
961
+ raise "The Distributed switch #{dvs['name']} could \
962
+ not be updated. "\
963
+ "Reason: #{e.message}"
964
+ end
965
+
966
+ @net_rollback << {
967
+ :action => :update_dvs,
968
+ :dvs => dvs.item,
969
+ :name => dvs['name'],
970
+ :spec => orig_spec
971
+ }
972
+ end
973
+
974
+ ########################################################################
975
+ # Remove a distributed vcenter switch in a datacenter
976
+ ########################################################################
977
+ def remove_dvs(dvs)
978
+ begin
979
+ dvs.item.Destroy_Task.wait_for_completion
980
+ rescue StandardError
981
+ # Ignore destroy task exception
982
+ end
983
+ end
984
+
985
+ ########################################################################
986
+ # Check if distributed port group exists in datacenter
987
+ ########################################################################
988
+ def dpg_exists(pg_name, net_folder)
989
+ net_folder.items.values.select do |dpg|
990
+ dpg.instance_of?(VCenterDriver::DistributedPortGroup) &&
991
+ dpg['name'] == pg_name
992
+ end.first rescue nil
993
+ end
994
+
995
+ ########################################################################
996
+ # Check if Opaque Network exists in datacenter
997
+ ########################################################################
998
+ def nsx_network(nsx_id, pg_type)
999
+ timeout = 180
1000
+ case pg_type
1001
+ when VCenterDriver::Network::NETWORK_TYPE_NSXT
1002
+ while timeout > 0
1003
+ net_folder = network_folder
1004
+ net_folder.fetch!
1005
+ net_folder.items.values.each do |net|
1006
+ if net.instance_of?(VCenterDriver::OpaqueNetwork) &&
1007
+ net.item.summary.opaqueNetworkId == nsx_id
1008
+ return net.item._ref
1009
+ end
1010
+ end
1011
+ sleep(1)
1012
+ timeout -= 1
1013
+ end
1014
+ # Not used right now, but maybe neccesary in the future.
1015
+ when VCenterDriver::Network::NETWORK_TYPE_NSXV
1016
+ while timeout > 0
1017
+ net_folder = network_folder
1018
+ net_folder.fetch!
1019
+ net_folder.items.values.each do |net|
1020
+ if net.instance_of?(
1021
+ VCenterDriver::DistributedPortGroup
1022
+ ) &&
1023
+ net.item.key == nsx_id
1024
+ return net.item._ref
1025
+ end
1026
+ end
1027
+ sleep(1)
1028
+ timeout -= 1
1029
+ end
1030
+ else
1031
+ raise "Unknown network Port Group type: #{pg_type}"
1032
+ end
1033
+ end
1034
+
1035
+ ########################################################################
1036
+ # Create a distributed vcenter port group
1037
+ ########################################################################
1038
+ def create_dpg(dvs, pg_name, vlan_id, num_ports)
1039
+ spec = RbVmomi::VIM::DVPortgroupConfigSpec.new
1040
+
1041
+ # OpenNebula use DVS static port binding with autoexpand
1042
+ if num_ports
1043
+ spec.autoExpand = true
1044
+ spec.numPorts = num_ports
1045
+ end
1046
+
1047
+ # Distributed port group name
1048
+ spec.name = pg_name
1049
+
1050
+ # Set VLAN information
1051
+ spec.defaultPortConfig =
1052
+ RbVmomi::VIM::VMwareDVSPortSetting.new
1053
+ spec.defaultPortConfig.vlan =
1054
+ RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new
1055
+ spec.defaultPortConfig.vlan.vlanId =
1056
+ vlan_id
1057
+ spec.defaultPortConfig.vlan.inherited =
1058
+ false
1059
+
1060
+ # earlyBinding. A free DistributedVirtualPort will be selected and
1061
+ # assigned to a VirtualMachine when
1062
+ # the virtual machine is reconfigured
1063
+ # to connect to the portgroup.
1064
+ spec.type = 'earlyBinding'
1065
+
1066
+ begin
1067
+ dvs
1068
+ .item
1069
+ .AddDVPortgroup_Task(
1070
+ :spec => [spec]
1071
+ ).wait_for_completion
1072
+ rescue StandardError => e
1073
+ raise "The Distributed port group #{pg_name} \
1074
+ could not be created. "\
1075
+ "Reason: #{e.message}"
1076
+ end
1077
+
1078
+ # wait until the network is ready and we have a reference
1079
+ portgroups = dvs['portgroup'].select do |dpg|
1080
+ dpg.instance_of?(RbVmomi::VIM::DistributedVirtualPortgroup) &&
1081
+ dpg['name'] == pg_name
1082
+ end
1083
+
1084
+ (0..DPG_CREATE_TIMEOUT).each do
1085
+ break unless portgroups.empty?
1086
+
1087
+ portgroups = dvs['portgroup'].select do |dpg|
1088
+ dpg
1089
+ .instance_of?(
1090
+ RbVmomi::VIM::DistributedVirtualPortgroup
1091
+ ) && dpg['name'] == pg_name
1092
+ end
1093
+ sleep 1
1094
+ end
1095
+
1096
+ if portgroups.empty?
1097
+ raise 'Cannot get VCENTER_NET_REF \
1098
+ for new distributed port group'
1099
+ end
1100
+
1101
+ @net_rollback << {
1102
+ :action => :delete_dpg,
1103
+ :dpg => portgroups.first,
1104
+ :name => pg_name
1105
+ }
1106
+
1107
+ portgroups.first._ref
1108
+ end
1109
+
1110
+ ########################################################################
1111
+ # Update a distributed vcenter port group
1112
+ ########################################################################
1113
+ def update_dpg(dpg, vlan_id, num_ports)
1114
+ spec = RbVmomi::VIM::DVPortgroupConfigSpec.new
1115
+
1116
+ changed = false
1117
+
1118
+ orig_spec =
1119
+ RbVmomi::VIM::DVPortgroupConfigSpec.new
1120
+ orig_spec.numPorts =
1121
+ dpg['config.numPorts']
1122
+ orig_spec.defaultPortConfig =
1123
+ RbVmomi::VIM::VMwareDVSPortSetting.new
1124
+ orig_spec.defaultPortConfig.vlan =
1125
+ RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new
1126
+ orig_spec.defaultPortConfig.vlan.vlanId =
1127
+ dpg['config.defaultPortConfig.vlan.vlanId']
1128
+ orig_spec.defaultPortConfig.vlan.inherited =
1129
+ false
1130
+
1131
+ if num_ports && num_ports != orig_spec.numPorts
1132
+ spec.numPorts = num_ports
1133
+ changed = true
1134
+ end
1135
+
1136
+ # earlyBinding. A free DistributedVirtualPort
1137
+ # will be selected and
1138
+ # assigned to a VirtualMachine when
1139
+ # the virtual machine is reconfigured
1140
+ # to connect to the portgroup.
1141
+ spec.type = 'earlyBinding'
1142
+
1143
+ if vlan_id != orig_spec.defaultPortConfig.vlan.vlanId
1144
+ spec.defaultPortConfig =
1145
+ RbVmomi::VIM::VMwareDVSPortSetting.new
1146
+ spec.defaultPortConfig.vlan =
1147
+ RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new
1148
+ spec.defaultPortConfig.vlan.vlanId = vlan_id
1149
+ spec.defaultPortConfig.vlan.inherited = false
1150
+ changed = true
1151
+ end
1152
+
1153
+ return unless changed
1154
+
1155
+ spec.configVersion = dpg['config.configVersion']
1156
+
1157
+ begin
1158
+ dpg
1159
+ .item
1160
+ .ReconfigureDVPortgroup_Task(
1161
+ :spec => spec
1162
+ ).wait_for_completion
1163
+ rescue StandardError => e
1164
+ raise "The Distributed port group #{dpg['name']} \
1165
+ could not be created. "\
1166
+ "Reason: #{e.message}"
1167
+ end
1168
+
1169
+ @net_rollback << {
1170
+ :action => :update_dpg,
1171
+ :dpg => dpg.item,
1172
+ :name => dpg['name'],
1173
+ :spec => orig_spec
1174
+ }
1175
+ end
1176
+
1177
+ ########################################################################
1178
+ # Remove distributed port group from datacenter
1179
+ ########################################################################
1180
+ def remove_dpg(dpg)
1181
+ begin
1182
+ dpg.item.Destroy_Task.wait_for_completion
1183
+ rescue RbVmomi::VIM::ResourceInUse
1184
+ STDERR.puts "The distributed portgroup \
1185
+ #{dpg['name']} is in use so it cannot be deleted"
1186
+ nil
1187
+ rescue StandardError => e
1188
+ raise "The Distributed portgroup #{dpg['name']} \
1189
+ could not be deleted. Reason: #{e.message} "
1190
+ end
1191
+ end
1192
+
1193
+ ########################################################################
1194
+ # Perform vcenter network rollback operations
1195
+ ########################################################################
1196
+ def network_rollback
1197
+ @net_rollback.reverse_each do |nr|
1198
+ case nr[:action]
1199
+ when :update_dpg
1200
+ begin
1201
+ nr[:dpg].ReconfigureDVPortgroup_Task(:spec => nr[:spec])
1202
+ .wait_for_completion
1203
+ rescue StandardError => e
1204
+ raise "A rollback operation for distributed \
1205
+ port group #{nr[:name]} could not \
1206
+ be performed. Reason: #{e.message}"
1207
+ end
1208
+ when :update_dvs
1209
+ begin
1210
+ nr[:dvs].ReconfigureDvs_Task(:spec => nr[:spec])
1211
+ .wait_for_completion
1212
+ rescue StandardError => e
1213
+ raise "A rollback operation for distributed\
1214
+ standard switch #{nr[:name]} could \
1215
+ not be performed. Reason: #{e.message}"
1216
+ end
1217
+ when :delete_dvs
1218
+ begin
1219
+ nr[:dvs].Destroy_Task.wait_for_completion
1220
+ rescue RbVmomi::VIM::ResourceInUse
1221
+ next # Ignore if switch in use
1222
+ rescue RbVmomi::VIM::NotFound
1223
+ next # Ignore if switch not found
1224
+ rescue StandardError => e
1225
+ raise "A rollback operation \
1226
+ for standard switch #{nr[:name]} \
1227
+ could not be performed. Reason: #{e.message}"
1228
+ end
1229
+ when :delete_dpg
1230
+ begin
1231
+ nr[:dpg].Destroy_Task.wait_for_completion
1232
+ rescue RbVmomi::VIM::ResourceInUse
1233
+ next # Ignore if pg in use
1234
+ rescue RbVmomi::VIM::NotFound
1235
+ next # Ignore if pg not found
1236
+ rescue StandardError => e
1237
+ raise "A rollback operation for \
1238
+ standard port group #{nr[:name]} could \
1239
+ not be performed. Reason: #{e.message}"
1240
+ end
1241
+ end
1242
+ end
1243
+ end
1244
+
1245
+ ########################################################################
1246
+ # PowerOn VMs
1247
+ ########################################################################
1248
+ def power_on_vm(vm)
1249
+ @item.PowerOnMultiVM_Task({ :vm => [vm] }).wait_for_completion
1250
+ end
1251
+
1252
+ def self.new_from_ref(ref, vi_client)
1253
+ new(RbVmomi::VIM::Datacenter.new(vi_client.vim, ref), vi_client)
1254
+ end
1255
+
1256
+ end
1257
+
1258
+ end
1259
+ # module VCenterDriver