opennebula 5.12.9 → 6.0.0.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (126) hide show
  1. checksums.yaml +4 -4
  2. data/lib/ActionManager.rb +1 -1
  3. data/lib/CommandManager.rb +1 -1
  4. data/lib/DriverExecHelper.rb +44 -28
  5. data/lib/OpenNebulaDriver.rb +8 -4
  6. data/lib/VirtualMachineDriver.rb +9 -2
  7. data/lib/cloud/CloudClient.rb +3 -3
  8. data/lib/datacenter.rb +1257 -0
  9. data/lib/datastore.rb +1025 -0
  10. data/lib/distributed_firewall.rb +280 -0
  11. data/lib/file_helper.rb +370 -0
  12. data/lib/host.rb +1517 -0
  13. data/lib/logical_port.rb +50 -0
  14. data/lib/logical_switch.rb +77 -0
  15. data/lib/memoize.rb +74 -0
  16. data/lib/models.rb +32 -0
  17. data/lib/models/role.rb +1126 -0
  18. data/lib/models/service.rb +709 -0
  19. data/lib/network.rb +699 -0
  20. data/lib/nsx_client.rb +144 -0
  21. data/lib/nsx_component.rb +28 -0
  22. data/lib/nsx_constants.rb +149 -0
  23. data/lib/nsx_driver.rb +78 -0
  24. data/lib/nsx_error.rb +77 -0
  25. data/lib/nsx_rule.rb +193 -0
  26. data/lib/nsxt_client.rb +176 -0
  27. data/lib/nsxt_dfw.rb +196 -0
  28. data/lib/nsxt_logical_port.rb +94 -0
  29. data/lib/nsxt_rule.rb +188 -0
  30. data/lib/nsxt_tz.rb +38 -0
  31. data/lib/nsxv_client.rb +176 -0
  32. data/lib/nsxv_dfw.rb +202 -0
  33. data/lib/nsxv_logical_port.rb +107 -0
  34. data/lib/nsxv_rule.rb +172 -0
  35. data/lib/nsxv_tz.rb +41 -0
  36. data/lib/opaque_network.rb +134 -0
  37. data/lib/opennebula.rb +5 -2
  38. data/lib/opennebula/acl.rb +1 -1
  39. data/lib/opennebula/acl_pool.rb +1 -1
  40. data/lib/opennebula/client.rb +10 -2
  41. data/lib/opennebula/cluster.rb +1 -1
  42. data/lib/opennebula/cluster_pool.rb +1 -1
  43. data/lib/opennebula/datastore.rb +1 -1
  44. data/lib/opennebula/datastore_pool.rb +1 -1
  45. data/lib/opennebula/document.rb +8 -29
  46. data/lib/opennebula/document_json.rb +42 -12
  47. data/lib/opennebula/document_pool.rb +1 -1
  48. data/lib/opennebula/document_pool_json.rb +1 -1
  49. data/lib/opennebula/error.rb +4 -1
  50. data/lib/opennebula/flow.rb +23 -0
  51. data/lib/opennebula/flow/grammar.rb +1195 -0
  52. data/lib/opennebula/flow/service_pool.rb +190 -0
  53. data/lib/opennebula/flow/service_template.rb +608 -0
  54. data/lib/opennebula/flow/service_template_ext.rb +84 -0
  55. data/lib/opennebula/flow/service_template_pool.rb +32 -0
  56. data/lib/opennebula/flow/validator.rb +499 -0
  57. data/lib/opennebula/group.rb +1 -1
  58. data/lib/opennebula/group_pool.rb +1 -1
  59. data/lib/opennebula/hook.rb +5 -12
  60. data/lib/opennebula/hook_log.rb +1 -1
  61. data/lib/opennebula/hook_pool.rb +1 -1
  62. data/lib/opennebula/host.rb +1 -1
  63. data/lib/opennebula/host_pool.rb +1 -1
  64. data/lib/opennebula/image.rb +19 -14
  65. data/lib/opennebula/image_pool.rb +1 -1
  66. data/lib/opennebula/ldap_auth.rb +1 -1
  67. data/lib/opennebula/ldap_auth_spec.rb +1 -1
  68. data/lib/opennebula/lockable_ext.rb +163 -0
  69. data/lib/opennebula/marketplace.rb +1 -1
  70. data/lib/opennebula/marketplace_pool.rb +1 -1
  71. data/lib/opennebula/marketplaceapp.rb +9 -119
  72. data/lib/opennebula/marketplaceapp_ext.rb +586 -0
  73. data/lib/opennebula/marketplaceapp_pool.rb +1 -1
  74. data/lib/opennebula/oneflow_client.rb +4 -3
  75. data/lib/opennebula/pool.rb +6 -3
  76. data/lib/opennebula/pool_element.rb +1 -1
  77. data/lib/opennebula/security_group.rb +1 -1
  78. data/lib/opennebula/security_group_pool.rb +1 -1
  79. data/lib/opennebula/server_cipher_auth.rb +1 -1
  80. data/lib/opennebula/server_x509_auth.rb +1 -1
  81. data/lib/opennebula/ssh_auth.rb +1 -1
  82. data/lib/opennebula/system.rb +1 -1
  83. data/lib/opennebula/template.rb +4 -13
  84. data/lib/opennebula/template_ext.rb +342 -0
  85. data/lib/opennebula/template_pool.rb +1 -1
  86. data/lib/opennebula/user.rb +26 -2
  87. data/lib/opennebula/user_pool.rb +1 -1
  88. data/lib/opennebula/utils.rb +1 -1
  89. data/lib/opennebula/vdc.rb +1 -1
  90. data/lib/opennebula/vdc_pool.rb +1 -1
  91. data/lib/opennebula/virtual_machine.rb +26 -206
  92. data/lib/opennebula/virtual_machine_ext.rb +469 -0
  93. data/lib/opennebula/virtual_machine_pool.rb +1 -1
  94. data/lib/opennebula/virtual_network.rb +4 -10
  95. data/lib/opennebula/virtual_network_pool.rb +1 -1
  96. data/lib/opennebula/virtual_router.rb +4 -12
  97. data/lib/opennebula/virtual_router_pool.rb +1 -1
  98. data/lib/opennebula/vm_group.rb +4 -11
  99. data/lib/opennebula/vm_group_pool.rb +1 -1
  100. data/lib/opennebula/vntemplate.rb +4 -13
  101. data/lib/opennebula/vntemplate_pool.rb +1 -1
  102. data/lib/opennebula/wait_ext.rb +257 -0
  103. data/lib/opennebula/x509_auth.rb +1 -1
  104. data/lib/opennebula/xml_element.rb +1 -1
  105. data/lib/opennebula/xml_pool.rb +1 -1
  106. data/lib/opennebula/xml_utils.rb +1 -1
  107. data/lib/opennebula/zone.rb +1 -1
  108. data/lib/opennebula/zone_pool.rb +1 -1
  109. data/lib/rest_client.rb +201 -0
  110. data/lib/scripts_common.rb +183 -0
  111. data/lib/transport_zone.rb +43 -0
  112. data/lib/vcenter_driver.rb +13 -12
  113. data/lib/vcenter_importer.rb +616 -0
  114. data/lib/vi_client.rb +281 -0
  115. data/lib/vi_helper.rb +313 -0
  116. data/lib/virtual_machine.rb +3477 -0
  117. data/lib/virtual_wire.rb +158 -0
  118. data/lib/vm_device.rb +80 -0
  119. data/lib/vm_disk.rb +202 -0
  120. data/lib/vm_folder.rb +69 -0
  121. data/lib/vm_helper.rb +30 -0
  122. data/lib/vm_monitor.rb +303 -0
  123. data/lib/vm_nic.rb +70 -0
  124. data/lib/vm_template.rb +1963 -0
  125. data/lib/vmm_importer.rb +121 -0
  126. metadata +140 -27
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 00ae069c15d9f55936ae0f7d3f8b31039e99894f
4
- data.tar.gz: 0d477dc8954df54fa3c0fa069911a233734c1d02
3
+ metadata.gz: 6f9aa7355cf72de3df70da8d7fceb82907f97396
4
+ data.tar.gz: 749eef5d47ae22112fb3c8c1a1dc1d7f313ab5a7
5
5
  SHA512:
6
- metadata.gz: 1c4fa578eb463047e275bbfe53a7f11c12f5d624476dd2d9f3fb5f8966e373265435863aa9e5f5cc80dc39ab8cf94680f752c7e6147de627daacd734c7765a52
7
- data.tar.gz: 0d62cd4c018cac34d3ef56f95c0a7f683c6b8974201aa50c687c6edc068914c323a9592e1b758278f8992ac811470c82687700c9dd0a655a296e9db0d2cb5b3a
6
+ metadata.gz: ba3c4cfcf52af7756cdc67ccefb827e773bf60c5b38a2b8ba32e1dc232582edbb57e1e45994168052e0cc572f14b9d70c4504cb7ea82796122df5f6564609d56
7
+ data.tar.gz: 40ecec9f0e8f2cd10ea50241e97a0554eb6e14204feaf0780879990373415d5eaaceea254efb7cb09b112ffb464ebc7324a17fb980d6306edfef92816c29a745
data/lib/ActionManager.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # -------------------------------------------------------------------------- */
2
- # Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License"); you may */
4
4
  # not use this file except in compliance with the License. You may obtain */
5
5
  # a copy of the License at */
@@ -1,5 +1,5 @@
1
1
  # --------------------------------------------------------------------------
2
- # Copyright 2002-2020, OpenNebula Project, OpenNebula Systems
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems
3
3
  #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License"); you may
5
5
  # not use this file except in compliance with the License. You may obtain
@@ -1,5 +1,6 @@
1
+ # rubocop:disable Naming/FileName
1
2
  # -------------------------------------------------------------------------- #
2
- # Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
3
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
4
  # #
4
5
  # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
6
  # not use this file except in compliance with the License. You may obtain #
@@ -18,14 +19,15 @@
18
19
  # OpenNebula Drivers. The module has been designed to be included as part
19
20
  # of a driver and not to be used standalone.
20
21
  module DriverExecHelper
22
+
21
23
  # Action result strings for messages
22
24
  RESULT = {
23
- :success => "SUCCESS",
24
- :failure => "FAILURE"
25
+ :success => 'SUCCESS',
26
+ :failure => 'FAILURE'
25
27
  }
26
28
 
27
29
  def self.failed?(rc_str)
28
- return rc_str == RESULT[:failure]
30
+ rc_str == RESULT[:failure]
29
31
  end
30
32
 
31
33
  # Initialize module variables
@@ -34,6 +36,7 @@ module DriverExecHelper
34
36
  @remote_scripts_base_path = @config['SCRIPTS_REMOTE_DIR']
35
37
 
36
38
  @local_actions = options[:local_actions]
39
+ @per_drvr_local_actions = options[:per_drvr_local_actions] || []
37
40
 
38
41
  if ENV['ONE_LOCATION'].nil?
39
42
  @local_scripts_base_path = '/var/lib/one/remotes'
@@ -57,62 +60,74 @@ module DriverExecHelper
57
60
  # actions is remote or local. If the local actions has defined an special
58
61
  # script name this is used, otherwise the action name in downcase is
59
62
  # used as the script name.
63
+ # When action is a String starting with '/' it's considered alreay full
64
+ # path command and no modification is performed apart from adding params.
60
65
  #
61
66
  # @param [String, Symbol] action name of the action
62
67
  # @param [String] parameters arguments for the script
63
68
  # @param [String, nil] default_name alternative name for the script
64
69
  # @param [String, ''] directory to append to the scripts path for actions
65
70
  # @return [String] command line needed to execute the action
66
- def action_command_line(action, parameters, default_name=nil, directory='')
67
- if action_is_local? action
71
+ def action_command_line(action, parameters,
72
+ default_name = nil, directory = '')
73
+
74
+ if action.is_a?(String) && action[0] == '/'
75
+ return action + ' ' + parameters if parameters
76
+
77
+ return action
78
+ elsif action_is_local?(action, directory)
68
79
  script_path=File.join(@local_scripts_path, directory)
69
80
  else
70
81
  script_path=File.join(@remote_scripts_path, directory)
71
82
  end
72
83
 
73
84
  File.join(script_path, action_script_name(action, default_name))+
74
- " "+parameters
85
+ ' '+parameters
75
86
  end
76
87
 
77
88
  # True if the action is meant to be executed locally
78
89
  #
79
90
  # @param [String, Symbol] action name of the action
80
- def action_is_local?(action)
81
- @local_actions.include? action.to_s.upcase
91
+ # @param [String, Symbol] driver name
92
+ def action_is_local?(action, driver = '')
93
+ @local_actions.include? action.to_s.upcase if driver.empty?
94
+
95
+ @local_actions.include? action.to_s.upcase or
96
+ @per_drvr_local_actions.include? "#{driver}-#{action}"
82
97
  end
83
98
 
84
99
  # Name of the script file for the given action
85
100
  #
86
101
  # @param [String, Symbol] action name of the action
87
102
  # @param [String, nil] default_name alternative name for the script
88
- def action_script_name(action, default_name=nil)
103
+ def action_script_name(action, default_name = nil)
89
104
  name=@local_actions[action.to_s.upcase]
90
105
 
91
- if name
92
- name
93
- else
94
- default_name || action.to_s.downcase
95
- end
106
+ name || default_name || action.to_s.downcase
96
107
  end
97
108
 
98
109
  #
99
110
  # METHODS FOR LOGS & COMMAND OUTPUT
100
111
  #
101
112
  # Sends a message to the OpenNebula core through stdout
102
- def send_message(action="-", result=RESULT[:failure], id="-", info="-")
103
- @send_mutex.synchronize {
113
+ # rubocop:disable Metrics/ParameterLists
114
+ def send_message(action = '-', result = RESULT[:failure],
115
+ id = '-', info = '-')
116
+
117
+ @send_mutex.synchronize do
104
118
  STDOUT.puts "#{action} #{result} #{id} #{info}"
105
119
  STDOUT.flush
106
- }
120
+ end
107
121
  end
122
+ # rubocop:enable Metrics/ParameterLists
108
123
 
109
124
  # Sends a log message to ONE. The +message+ can be multiline, it will
110
125
  # be automatically splitted by lines.
111
- def log(number, message, all=true)
126
+ def log(number, message, all = true)
112
127
  in_error_message=false
113
128
  msg=message.strip
114
- msg.each_line {|line|
115
- severity=all ? 'I' : nil
129
+ msg.each_line do |line|
130
+ all ? severity='I' : severity=nil
116
131
  l=line.strip
117
132
 
118
133
  if l=='ERROR MESSAGE --8<------'
@@ -125,8 +140,8 @@ module DriverExecHelper
125
140
  if in_error_message
126
141
  severity='E'
127
142
  elsif line.match(/^(ERROR|DEBUG|INFO):(.*)$/)
128
- line=$2
129
- case $1
143
+ line=Regexp.last_match(2)
144
+ case Regexp.last_match(1)
130
145
  when 'ERROR'
131
146
  severity='E'
132
147
  when 'DEBUG'
@@ -137,19 +152,19 @@ module DriverExecHelper
137
152
  end
138
153
  end
139
154
 
140
- send_message("LOG", severity, number, line.strip) if severity
141
- }
155
+ send_message('LOG', severity, number, line.strip) if severity
156
+ end
142
157
  end
143
158
 
144
159
  # Generates a proc with that calls log with a hardcoded number. It will
145
160
  # be used to add loging to command actions
146
161
  def log_method(num)
147
- lambda {|message, all=true|
162
+ lambda {|message, all = true|
148
163
  log(num, message, all)
149
164
  }
150
165
  end
151
166
 
152
- #This method returns the result in terms
167
+ # This method returns the result in terms
153
168
  def get_info_from_execution(command_exe)
154
169
  if command_exe.code == 0
155
170
  result = RESULT[:success]
@@ -159,7 +174,7 @@ module DriverExecHelper
159
174
  info = command_exe.get_error_message
160
175
  end
161
176
 
162
- info = "-" if info == nil || info.empty?
177
+ info = '-' if info.nil? || info.empty?
163
178
 
164
179
  [result, info]
165
180
  end
@@ -211,3 +226,4 @@ module DriverExecHelper
211
226
  end
212
227
 
213
228
  end
229
+ # rubocop:enable Naming/FileName
@@ -1,5 +1,5 @@
1
1
  # -------------------------------------------------------------------------- #
2
- # Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
3
  # #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
5
  # not use this file except in compliance with the License. You may obtain #
@@ -90,13 +90,17 @@ class OpenNebulaDriver < ActionManager
90
90
  :respond => true,
91
91
  :ssh_stream => nil,
92
92
  :base64 => false,
93
- :zip => false
93
+ :zip => false,
94
+ :no_extra_params => false
94
95
  }.merge(ops)
95
96
 
96
- params = parameters + " #{id} #{host}"
97
+ params = parameters
98
+ params = "#{params} #{id} #{host}" unless options[:no_extra_params]
97
99
  command = action_command_line(aname, params, options[:script_name])
98
100
 
99
- if action_is_local?(aname)
101
+ # if options[:is_local] is not specified (nil)
102
+ # we rely uniquely in actions_is_local?
103
+ if action_is_local?(aname) or options[:is_local]
100
104
  stdin = Base64.strict_encode64(options[:stdin].to_s)
101
105
  execution = LocalCommand.run(command,
102
106
  log_method(id),
@@ -1,5 +1,5 @@
1
1
  # -------------------------------------------------------------------------- #
2
- # Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
3
  # #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
5
  # not use this file except in compliance with the License. You may obtain #
@@ -52,7 +52,8 @@ class VirtualMachineDriver < OpenNebulaDriver
52
52
  :disk_snapshot_create => "DISKSNAPSHOTCREATE",
53
53
  :resize_disk => "RESIZEDISK",
54
54
  :update_sg => "UPDATESG",
55
- :update_conf => "UPDATECONF"
55
+ :update_conf => "UPDATECONF",
56
+ :resize => "RESIZE"
56
57
  }
57
58
 
58
59
  POLL_ATTRIBUTE = OpenNebula::VirtualMachine::Driver::POLL_ATTRIBUTE
@@ -98,6 +99,7 @@ class VirtualMachineDriver < OpenNebulaDriver
98
99
  register_action(ACTION[:resize_disk].to_sym, method("resize_disk"))
99
100
  register_action(ACTION[:update_sg].to_sym, method("update_sg"))
100
101
  register_action(ACTION[:update_conf].to_sym, method("update_conf"))
102
+ register_action(ACTION[:resize].to_sym, method("resize"))
101
103
  end
102
104
 
103
105
  # Decodes the encoded XML driver message received from the core
@@ -227,6 +229,11 @@ class VirtualMachineDriver < OpenNebulaDriver
227
229
  send_message(ACTION[:update_conf],RESULT[:failure],id,error)
228
230
  end
229
231
 
232
+ def resize(id, drv_message)
233
+ error = "Action not implemented by driver #{self.class}"
234
+ send_message(ACTION[:resize],RESULT[:failure],id,error)
235
+ end
236
+
230
237
  private
231
238
 
232
239
  # Interface to handle the pending events from the ActionManager Interface
@@ -1,5 +1,5 @@
1
1
  # -------------------------------------------------------------------------- #
2
- # Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
3
  # #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
5
  # not use this file except in compliance with the License. You may obtain #
@@ -51,7 +51,7 @@ end
51
51
  module CloudClient
52
52
 
53
53
  # OpenNebula version
54
- VERSION = '5.12.9'
54
+ VERSION = '6.0.0.1'
55
55
 
56
56
  # #########################################################################
57
57
  # Default location for the authentication file
@@ -229,7 +229,7 @@ module CloudCLI
229
229
  def version_text
230
230
  version=<<EOT
231
231
  OpenNebula #{CloudClient::VERSION}
232
- Copyright 2002-2020, OpenNebula Project, OpenNebula Systems
232
+ Copyright 2002-2021, OpenNebula Project, OpenNebula Systems
233
233
 
234
234
  Licensed under the Apache License, Version 2.0 (the "License"); you may
235
235
  not use this file except in compliance with the License. You may obtain
data/lib/datacenter.rb ADDED
@@ -0,0 +1,1257 @@
1
+ # -------------------------------------------------------------------------- #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
+ # #
4
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
+ # not use this file except in compliance with the License. You may obtain #
6
+ # a copy of the License at #
7
+ # #
8
+ # http://www.apache.org/licenses/LICENSE-2.0 #
9
+ # #
10
+ # Unless required by applicable law or agreed to in writing, software #
11
+ # distributed under the License is distributed on an "AS IS" BASIS, #
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
13
+ # See the License for the specific language governing permissions and #
14
+ # limitations under the License. #
15
+ #--------------------------------------------------------------------------- #
16
+
17
+ require 'set'
18
+ require 'digest'
19
+
20
+ ##############################################################################
21
+ # Module VCenterDriver
22
+ ##############################################################################
23
+ module VCenterDriver
24
+
25
+ ##########################################################################
26
+ # Class DatacenterFolder
27
+ ##########################################################################
28
+ class DatacenterFolder
29
+
30
+ attr_accessor :items
31
+
32
+ def initialize(vi_client)
33
+ @vi_client = vi_client
34
+ @items = {}
35
+ end
36
+
37
+ ########################################################################
38
+ # Builds a hash with Datacenter-Ref / Datacenter to be used as a cache
39
+ # @return [Hash] in the form
40
+ # { dc_ref [Symbol] => Datacenter object }
41
+ ########################################################################
42
+ def fetch!
43
+ VIClient
44
+ .get_entities(
45
+ @vi_client.vim.root,
46
+ 'Datacenter'
47
+ ).each do |item|
48
+ item_name = item._ref
49
+ @items[item_name.to_sym] = Datacenter.new(item)
50
+ end
51
+ end
52
+
53
+ ########################################################################
54
+ # Returns a Datacenter. Uses the cache if available.
55
+ # @param ref [Symbol] the vcenter ref
56
+ # @return Datacenter
57
+ ########################################################################
58
+ def get(ref)
59
+ if !@items[ref.to_sym]
60
+ rbvmomi_dc = RbVmomi::VIM::Datacenter.new(@vi_client.vim, ref)
61
+ @items[ref.to_sym] = Datacenter.new(rbvmomi_dc)
62
+ end
63
+
64
+ @items[ref.to_sym]
65
+ end
66
+
67
+ def vcenter_instance_uuid
68
+ @vi_client.vim.serviceContent.about.instanceUuid
69
+ end
70
+
71
+ def vcenter_api_version
72
+ @vi_client.vim.serviceContent.about.apiVersion
73
+ end
74
+
75
+ def get_unimported_hosts(hpool, _vcenter_instance_name)
76
+ host_objects = {}
77
+
78
+ vcenter_uuid = vcenter_instance_uuid
79
+ vcenter_version = vcenter_api_version
80
+
81
+ fetch! if @items.empty? # Get datacenters
82
+
83
+ # Loop through datacenters
84
+ @items.values.each do |dc|
85
+ dc_name = dc.item.name
86
+ host_objects[dc_name] = []
87
+
88
+ # Get clusters inside a datacenter
89
+ host_folder = dc.host_folder
90
+ host_folder.fetch_clusters!
91
+ host_folder.items.values.each do |ccr|
92
+ # Check if the cluster is a host in OpenNebula's pool
93
+ one_host =
94
+ VCenterDriver::VIHelper
95
+ .find_by_ref(
96
+ OpenNebula::HostPool,
97
+ 'TEMPLATE/VCENTER_CCR_REF',
98
+ ccr['_ref'],
99
+ vcenter_uuid,
100
+ hpool
101
+ )
102
+ next if one_host
103
+
104
+ # Get a ClusterComputeResource object
105
+ cluster =
106
+ VCenterDriver::ClusterComputeResource
107
+ .new_from_ref(
108
+ ccr['_ref'],
109
+ @vi_client
110
+ )
111
+
112
+ # Obtain a list of resource pools found in the cluster
113
+ rpools =
114
+ cluster
115
+ .get_resource_pool_list
116
+ .reject {|rp| rp[:name].empty? }
117
+
118
+ # Determine a host location (folder and subfolders)
119
+ item = cluster.item
120
+ folders = []
121
+ until item.instance_of? RbVmomi::VIM::Datacenter
122
+ item = item.parent
123
+ if !item.instance_of?(RbVmomi::VIM::Datacenter) &&
124
+ item.name != 'host'
125
+ folders << item.name
126
+ end
127
+ raise "Could not find the host's location" if item.nil?
128
+ end
129
+ location = folders.reverse.join('/')
130
+ location = '/' if location.empty?
131
+
132
+ # Setting host import name and
133
+ # replace spaces and weird characters
134
+ cluster_name = (ccr['name']).to_s.tr(' ', '_')
135
+ cluster_name =
136
+ VCenterDriver::VIHelper
137
+ .one_name(
138
+ OpenNebula::HostPool,
139
+ cluster_name,
140
+ ccr['_ref']+vcenter_uuid,
141
+ hpool
142
+ )
143
+
144
+ # Prepare hash for import tool
145
+ host_info = {}
146
+ host_info[:simple_name] = ccr['name']
147
+ host_info[:cluster_name] = cluster_name
148
+ host_info[:cluster_ref] = ccr['_ref']
149
+ host_info[:cluster_location] = location
150
+ host_info[:vcenter_uuid] = vcenter_uuid
151
+ host_info[:vcenter_version] = vcenter_version
152
+ host_info[:rp_list] = rpools
153
+
154
+ # Add the hash to current datacenter
155
+ host_objects[dc_name] << host_info
156
+ end
157
+ end
158
+
159
+ host_objects
160
+ end
161
+
162
+ def get_unimported_datastores(dpool, vcenter_instance_name, hpool)
163
+ import_id = 0
164
+ ds_objects = {}
165
+ vcenter_uuid = vcenter_instance_uuid
166
+
167
+ # Get datacenters
168
+ fetch! if @items.empty?
169
+
170
+ @items.values.each do |dc|
171
+ clusters_in_ds = {}
172
+ dc_name = dc.item.name
173
+ dc_ref = dc.item._ref
174
+
175
+ datastore_folder = dc.datastore_folder
176
+ datastore_folder.fetch!
177
+
178
+ datastore_folder.items.values.each do |ds|
179
+ name, capacity, free_space =
180
+ ds
181
+ .item
182
+ .collect(
183
+ 'name',
184
+ 'summary.capacity',
185
+ 'summary.freeSpace'
186
+ )
187
+
188
+ ds_name = name.to_s
189
+ ds_total_mb = ((capacity.to_i / 1024) / 1024)
190
+ ds_free_mb = ((free_space.to_i / 1024) / 1024)
191
+ ds_ref = ds['_ref']
192
+
193
+ ds_objects[ds_ref] = {}
194
+ ds_objects[ds_ref][:ref] = ds_ref
195
+ ds_objects[ds_ref][:import_id] = import_id
196
+ ds_objects[ds_ref][:datacenter] = dc_name
197
+ ds_objects[ds_ref][:simple_name] = ds_name.to_s
198
+ ds_objects[ds_ref][:total_mb] = ds_total_mb
199
+ ds_objects[ds_ref][:free_mb] = ds_free_mb
200
+ ds_objects[ds_ref][:ds] = []
201
+ ds_objects[ds_ref][:cluster] = []
202
+
203
+ if ds.instance_of? VCenterDriver::Datastore
204
+ hosts = ds['host']
205
+ hosts.each do |host|
206
+ cluster_ref = host.key.parent._ref
207
+ if !clusters_in_ds.key?(cluster_ref)
208
+ clusters_in_ds[cluster_ref] = nil
209
+
210
+ # Try to locate cluster ref in host's pool
211
+ one_cluster =
212
+ VCenterDriver::VIHelper
213
+ .find_by_ref(
214
+ OpenNebula::HostPool,
215
+ 'TEMPLATE/VCENTER_CCR_REF',
216
+ cluster_ref,
217
+ vcenter_uuid,
218
+ hpool
219
+ )
220
+ if one_cluster
221
+ ds_objects[ds_ref][:cluster] <<
222
+ one_cluster['CLUSTER_ID'].to_i
223
+ clusters_in_ds[cluster_ref] =
224
+ one_cluster['CLUSTER_ID'].to_i
225
+ end
226
+ else
227
+ if clusters_in_ds[cluster_ref] &&
228
+ !ds_objects[ds_ref][:cluster]
229
+ .include?(
230
+ clusters_in_ds[cluster_ref]
231
+ )
232
+ ds_objects[ds_ref][:cluster] <<
233
+ clusters_in_ds[cluster_ref]
234
+ end
235
+ end
236
+ end
237
+
238
+ already_image_ds = VCenterDriver::Storage
239
+ .exists_one_by_ref_dc_and_type?(
240
+ ds_ref,
241
+ dc_ref,
242
+ vcenter_uuid,
243
+ 'IMAGE_DS',
244
+ dpool
245
+ )
246
+
247
+ key = ds_ref+vcenter_uuid
248
+ if !already_image_ds
249
+ ds_objects[ds_ref][:name] =
250
+ VCenterDriver::VIHelper
251
+ .one_name(
252
+ OpenNebula::DatastorePool,
253
+ "#{ds_name}(IMG)",
254
+ key
255
+ )
256
+ object =
257
+ ds
258
+ .to_one_template(
259
+ ds_objects[ds_ref],
260
+ vcenter_uuid,
261
+ dc_name,
262
+ dc_ref,
263
+ 'IMAGE_DS'
264
+ )
265
+ ds_objects[ds_ref][:ds] << object unless object.nil?
266
+ end
267
+
268
+ already_system_ds =
269
+ VCenterDriver::Storage
270
+ .exists_one_by_ref_dc_and_type?(
271
+ ds_ref,
272
+ dc_ref,
273
+ vcenter_uuid,
274
+ 'SYSTEM_DS',
275
+ dpool
276
+ )
277
+
278
+ if !already_system_ds
279
+ ds_objects[ds_ref][:name] =
280
+ VCenterDriver::VIHelper
281
+ .one_name(
282
+ OpenNebula::DatastorePool,
283
+ "#{ds_name}(SYS)",
284
+ key
285
+ )
286
+ object = ds
287
+ .to_one_template(
288
+ ds_objects[ds_ref],
289
+ vcenter_uuid,
290
+ dc_name,
291
+ dc_ref,
292
+ 'SYSTEM_DS'
293
+ )
294
+ ds_objects[ds_ref][:ds] << object unless object.nil?
295
+ end
296
+
297
+ ds_objects[ds_ref][:name] = ds_name.to_s
298
+ elsif ds.instance_of? VCenterDriver::StoragePod
299
+ ds['children'].each do |sp_ds|
300
+ hosts = sp_ds.host
301
+ hosts.each do |host|
302
+ cluster_ref = host.key.parent._ref
303
+ if !clusters_in_ds.include?(cluster_ref)
304
+ clusters_in_ds[cluster_ref] = nil
305
+ # Try to locate cluster
306
+ # ref in cluster's pool
307
+ one_cluster =
308
+ VCenterDriver::VIHelper
309
+ .find_by_ref(
310
+ OpenNebula::HostPool,
311
+ 'TEMPLATE/VCENTER_CCR_REF',
312
+ cluster_ref,
313
+ vcenter_uuid,
314
+ hpool
315
+ )
316
+ if one_cluster
317
+ ds_objects[ds_ref][:cluster] <<
318
+ one_cluster['CLUSTER_ID'].to_i
319
+ clusters_in_ds[cluster_ref] =
320
+ one_cluster['CLUSTER_ID'].to_i
321
+ end
322
+ else
323
+ if clusters_in_ds[cluster_ref] &&
324
+ !ds_objects[ds_ref][:cluster]
325
+ .include?(
326
+ clusters_in_ds[cluster_ref]
327
+ )
328
+ ds_objects[ds_ref][:cluster] <<
329
+ clusters_in_ds[cluster_ref]
330
+ end
331
+ end
332
+ end
333
+ end
334
+
335
+ already_system_ds = VCenterDriver::Storage
336
+ .exists_one_by_ref_dc_and_type?(
337
+ ds_ref,
338
+ dc_ref,
339
+ vcenter_uuid,
340
+ 'SYSTEM_DS',
341
+ dpool
342
+ )
343
+
344
+ if !already_system_ds
345
+ ds_objects[ds_ref][:name] = "#{ds_name} \
346
+ [#{vcenter_instance_name} - #{dc_name}] (StorDRS)"
347
+ object = ds.to_one_template(
348
+ ds_objects[ds_ref],
349
+ vcenter_uuid,
350
+ dc_name,
351
+ dc_ref,
352
+ 'SYSTEM_DS'
353
+ )
354
+ ds_objects[ds_ref][:ds] << object unless object.nil?
355
+ end
356
+ end
357
+
358
+ if ds_objects[ds_ref][:ds].empty?
359
+ ds_objects.delete(ds_ref)
360
+ else
361
+ import_id += 1
362
+ end
363
+ end
364
+ end
365
+
366
+ { vcenter_instance_name => ds_objects }
367
+ end
368
+
369
+ def get_unimported_templates(vi_client, tpool)
370
+ template_objects = {}
371
+ import_id = 0
372
+ vcenter_uuid = vcenter_instance_uuid
373
+
374
+ vcenter_instance_name = vi_client.vim.host
375
+
376
+ fetch! if @items.empty? # Get datacenters
377
+
378
+ @items.values.each do |dc|
379
+ rp_cache = {}
380
+ dc_name = dc.item.name
381
+
382
+ view = vi_client
383
+ .vim
384
+ .serviceContent
385
+ .viewManager
386
+ .CreateContainerView(
387
+ {
388
+ :container => dc.item.vmFolder,
389
+ :type => ['VirtualMachine'],
390
+ :recursive => true
391
+ }
392
+ )
393
+
394
+ pc = vi_client.vim.serviceContent.propertyCollector
395
+
396
+ filter_spec = RbVmomi::VIM.PropertyFilterSpec(
397
+ :objectSet => [
398
+ {
399
+ :obj => view,
400
+ :skip => true,
401
+ :selectSet => [
402
+ RbVmomi::VIM.TraversalSpec(
403
+ :name => 'traverseEntities',
404
+ :type => 'ContainerView',
405
+ :path => 'view',
406
+ :skip => false
407
+ )
408
+ ]
409
+ }
410
+ ],
411
+ :propSet => [
412
+ {
413
+ :type => 'VirtualMachine',
414
+ :pathSet => ['config.template']
415
+ }
416
+ ]
417
+ )
418
+
419
+ result = pc.RetrieveProperties(
420
+ :specSet => [filter_spec]
421
+ )
422
+
423
+ vms = {}
424
+ result.each do |r|
425
+ if r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
426
+ vms[r.obj._ref] = r.to_hash
427
+ end
428
+ end
429
+ templates = []
430
+ vms.each do |ref, value|
431
+ next unless value['config.template']
432
+
433
+ templates << VCenterDriver::Template
434
+ .new_from_ref(
435
+ ref,
436
+ vi_client
437
+ )
438
+ end
439
+
440
+ view.DestroyView # Destroy the view
441
+
442
+ templates.each do |template|
443
+ tref = template['_ref']
444
+ next if template_objects[tref]
445
+
446
+ one_template = VCenterDriver::VIHelper
447
+ .find_by_ref(
448
+ OpenNebula::TemplatePool,
449
+ 'TEMPLATE/VCENTER_TEMPLATE_REF',
450
+ tref,
451
+ vcenter_uuid,
452
+ tpool
453
+ )
454
+
455
+ # If the template has been already imported
456
+ next if one_template
457
+
458
+ one_template = VCenterDriver::Template
459
+ .get_xml_template(
460
+ template,
461
+ vcenter_uuid,
462
+ vi_client,
463
+ dc_name,
464
+ rp_cache
465
+ )
466
+
467
+ next if one_template.nil?
468
+
469
+ one_template[:import_id] = import_id
470
+ one_template[:vcenter] = vcenter_instance_name
471
+ import_id += 1
472
+ template_objects[tref] = one_template
473
+ end
474
+ end
475
+
476
+ {
477
+ vcenter_instance_name => template_objects
478
+ }
479
+ end
480
+
481
+ def cluster_networks(one_host)
482
+ ccr_ref = one_host['TEMPLATE/VCENTER_CCR_REF']
483
+ cluster = VCenterDriver::ClusterComputeResource
484
+ .new_from_ref(ccr_ref, @vi_client)
485
+ # cluster = cluster_mob(one_host)
486
+ raise "Cluster with ref: #{ccr_ref} not found" if cluster.nil?
487
+
488
+ cluster.item.network
489
+ end
490
+
491
+ # Return ONE cluster ID
492
+ def one_cluster_id(one_host)
493
+ if !one_host || !one_host['CLUSTER_ID']
494
+ cluster_id = -1
495
+ else
496
+ cluster_id = one_host['CLUSTER_ID']
497
+ end
498
+
499
+ cluster_id.to_i
500
+ end
501
+
502
+ # Determine if a network must be excluded from the list
503
+ def exclude_network?(vc_network, one_host, args, vc_network_hash)
504
+ vc_network_name = vc_network_hash[:vc_network_name]
505
+ vc_network_host = vc_network_hash[:vc_network_host]
506
+ vc_network_tag = vc_network_hash[:vc_network_tag]
507
+
508
+ # Exclude some networks if filter = true
509
+ if args[:filter]
510
+ if one_host && one_host['TEMPLATE/NSX_PASSWORD'].nil?
511
+ network_types = [
512
+ VCenterDriver::Network::NETWORK_TYPE_NSXT,
513
+ VCenterDriver::Network::NETWORK_TYPE_NSXV
514
+ ]
515
+
516
+ # Only NSX-V and NSX-T can be excluded
517
+ network_type = VCenterDriver::Network
518
+ .get_network_type(
519
+ vc_network,
520
+ vc_network_name
521
+ )
522
+
523
+ return true if network_types.include? network_type
524
+ end
525
+ # Exclude networks without hosts
526
+ if vc_network_host.empty?
527
+ return true
528
+ end
529
+
530
+ # Exclude DVS uplinks
531
+ if !vc_network_tag.empty? &&
532
+ vc_network_tag[0][:key] == 'SYSTEM/DVS.UPLINKPG'
533
+ return true
534
+ end
535
+ # Exclude portgroup used for VXLAN communication in NSX
536
+ if vc_network['name'].match(/^vxw-vmknicPg-dvs-(.*)/)
537
+ return true
538
+ end
539
+
540
+ return false
541
+ end
542
+ false
543
+ end
544
+
545
+ # Proccess each network
546
+ def process_network(params)
547
+ vc_network = params[:vc_network]
548
+ vcenter_instance_name = params[:vcenter_instance_name]
549
+ vcenter_uuid = params[:vcenter_uuid]
550
+ _hpool = params[:_hpool]
551
+ one_host = params[:one_host]
552
+ args = params[:args]
553
+
554
+ full_process = !args[:short]
555
+
556
+ vc_network_ref = vc_network._ref
557
+ vc_network_name = VCenterDriver::VcImporter.sanitize(
558
+ vc_network.name
559
+ )
560
+ vc_network_host = vc_network['host']
561
+ vc_network_tag = vc_network['tag']
562
+
563
+ vc_network_hash = {}
564
+ vc_network_hash[:vc_network_ref] = vc_network_ref
565
+ vc_network_hash[:vc_network_name] = vc_network_name
566
+ vc_network_hash[:vc_network_host] = vc_network_host
567
+ vc_network_hash[:vc_network_tag] = vc_network_tag
568
+
569
+ # Initialize network hash
570
+ network = {}
571
+ # Add name to network hash
572
+ network[vc_network_ref] = { 'name' => vc_network_name }
573
+ # By default no network is excluded
574
+ network[vc_network_ref][:excluded] = false
575
+
576
+ # Initialize opts hash used to inject data into one template
577
+ opts = {}
578
+
579
+ # Add network type to network hash
580
+ network_type = \
581
+ VCenterDriver::Network.get_network_type(
582
+ vc_network,
583
+ vc_network_name
584
+ )
585
+ network[vc_network_ref][:network_type] = network_type
586
+ network[vc_network_ref][:type] = network_type
587
+
588
+ # Determine if the network must be excluded
589
+ network[vc_network_ref][:excluded] = exclude_network?(
590
+ vc_network,
591
+ one_host,
592
+ args,
593
+ vc_network_hash
594
+ )
595
+
596
+ return if network[vc_network_ref][:excluded] == true
597
+
598
+ if full_process
599
+ case network[vc_network_ref][:network_type]
600
+ # Distributed PortGroups
601
+ when VCenterDriver::Network::NETWORK_TYPE_DPG
602
+ network[vc_network_ref][:sw_name] = \
603
+ vc_network.config.distributedVirtualSwitch.name
604
+ # For DistributedVirtualPortgroups there
605
+ # is networks and uplinks
606
+ network[vc_network_ref][:uplink] = \
607
+ vc_network.config.uplink
608
+ # network[vc_network_ref][:uplink] = false
609
+ # NSX-V PortGroups
610
+ when VCenterDriver::Network::NETWORK_TYPE_NSXV
611
+ network[vc_network_ref][:sw_name] = \
612
+ vc_network.config.distributedVirtualSwitch.name
613
+ # For NSX-V ( is the same as DistributedVirtualPortgroups )
614
+ # there is networks and uplinks
615
+ network[vc_network_ref][:uplink] = \
616
+ vc_network.config.uplink
617
+ network[vc_network_ref][:uplink] = false
618
+ # Standard PortGroups
619
+ when VCenterDriver::Network::NETWORK_TYPE_PG
620
+ # There is no uplinks for standard portgroups,
621
+ # so all Standard
622
+ # PortGroups are networks and no uplinks
623
+ network[vc_network_ref][:uplink] = false
624
+ network[vc_network_ref][:sw_name] =
625
+ VCenterDriver::Network
626
+ .virtual_switch(
627
+ vc_network
628
+ )
629
+ # NSX-T PortGroups
630
+ when VCenterDriver::Network::NETWORK_TYPE_NSXT
631
+ network[vc_network_ref][:sw_name] = \
632
+ vc_network.summary.opaqueNetworkType
633
+ # There is no uplinks for NSX-T networks,
634
+ # so all NSX-T networks
635
+ # are networks and no uplinks
636
+ network[vc_network_ref][:uplink] = false
637
+ else
638
+ raise 'Unknown network type: ' \
639
+ "#{network[vc_network_ref][:network_type]}"
640
+ end
641
+ end
642
+
643
+ # Multicluster nets support
644
+ network[vc_network_ref][:clusters] = {}
645
+ network[vc_network_ref][:clusters][:refs] = []
646
+ network[vc_network_ref][:clusters][:one_ids] = []
647
+ network[vc_network_ref][:clusters][:names] = []
648
+
649
+ # Get hosts related to this network and add them if is not
650
+ # excluded
651
+ vc_hosts = vc_network.host
652
+ vc_hosts.each do |vc_host|
653
+ # Get vCenter Cluster
654
+ vc_cluster = vc_host.parent
655
+ vc_cluster_ref = vc_cluster._ref
656
+ vc_cluster_name = vc_cluster.name
657
+ # Get one host from each vCenter cluster
658
+ one_host = VCenterDriver::VIHelper
659
+ .find_by_ref(OpenNebula::HostPool,
660
+ 'TEMPLATE/VCENTER_CCR_REF',
661
+ vc_cluster_ref,
662
+ vcenter_uuid)
663
+ # Check if network is excluded from each host
664
+ next if exclude_network?(
665
+ vc_network,
666
+ one_host,
667
+ args,
668
+ vc_network_hash
669
+ )
670
+
671
+ # Insert vCenter cluster ref
672
+ network[vc_network_ref][:clusters][:refs] << vc_cluster_ref
673
+ # Insert OpenNebula cluster id
674
+ cluster_id = one_cluster_id(one_host)
675
+ network[vc_network_ref][:clusters][:one_ids] << cluster_id
676
+ # Insert vCenter cluster name
677
+ network[vc_network_ref][:clusters][:names] << vc_cluster_name
678
+ opts[:dc_name] = vc_cluster_name
679
+ end
680
+
681
+ # Remove duplicate entries
682
+ network[vc_network_ref][:clusters][:refs].uniq!
683
+ network[vc_network_ref][:clusters][:one_ids].uniq!
684
+ network[vc_network_ref][:clusters][:names].uniq!
685
+
686
+ # Mark network as processed
687
+ network[vc_network_ref][:processed] = true
688
+
689
+ if full_process
690
+ # General net_info related to datacenter
691
+ opts[:vcenter_uuid] = vcenter_uuid
692
+ opts[:vcenter_instance_name] = vcenter_instance_name
693
+ opts[:network_name] = network[vc_network_ref]['name']
694
+ opts[:network_ref] = network.keys.first
695
+ opts[:network_type] = network[vc_network_ref][:network_type]
696
+ opts[:sw_name] = network[vc_network_ref][:sw_name]
697
+
698
+ network[vc_network_ref] = \
699
+ network[vc_network_ref]
700
+ .merge(VCenterDriver::Network
701
+ .to_one_template(opts))
702
+ else
703
+ network[vc_network_ref][:ref] = \
704
+ vc_network_ref
705
+ network[vc_network_ref][:name] = \
706
+ network[vc_network_ref]['name']
707
+ end
708
+
709
+ network
710
+ end
711
+
712
+ # rubocop:disable Style/GlobalVars
713
+ def get_unimported_networks(npool, vcenter_instance_name, hpool, args)
714
+ vcenter_uuid = vcenter_instance_uuid
715
+ networks = {}
716
+
717
+ # Selected host in OpenNebula
718
+ if $conf.nil?
719
+ one_client = OpenNebula::Client.new
720
+ else
721
+ one_client = OpenNebula::Client.new(
722
+ nil,
723
+ $conf[:one_xmlrpc]
724
+ )
725
+ end
726
+
727
+ one_host = OpenNebula::Host.new_with_id(args[:host], one_client)
728
+ rc = one_host.info
729
+ raise rc.message if OpenNebula.is_error? rc
730
+
731
+ # Get all networks in vcenter cluster (one_host)
732
+ vc_cluster_networks = cluster_networks(one_host)
733
+
734
+ # Iterate over vcenter networks
735
+ vc_cluster_networks.each do |vc_cluster_network|
736
+ exist = VCenterDriver::VIHelper
737
+ .find_by_ref(OpenNebula::VirtualNetworkPool,
738
+ 'TEMPLATE/VCENTER_NET_REF',
739
+ vc_cluster_network._ref,
740
+ vcenter_uuid,
741
+ npool)
742
+
743
+ next if exist
744
+
745
+ params = {}
746
+
747
+ params[:vc_network]= vc_cluster_network
748
+ params[:vcenter_instance_name]= vcenter_instance_name
749
+ params[:vcenter_uuid]= vcenter_uuid
750
+ params[:_hpool]= hpool
751
+ params[:one_host]= one_host
752
+ params[:args] = args
753
+
754
+ network = process_network(params)
755
+
756
+ networks.merge!(network) unless network.nil?
757
+ end
758
+ # Added import id
759
+ imid = -1
760
+ networks.map {|_k, v| v[:import_id] = imid += 1 }
761
+ { vcenter_instance_name => networks }
762
+ end
763
+ # rubocop:enable Style/GlobalVars
764
+
765
+ end
766
+ # class DatatacenterFolder
767
+
768
+ ##########################################################################
769
+ # Class Datacenter
770
+ ##########################################################################
771
+ class Datacenter
772
+
773
+ attr_accessor :item
774
+
775
+ DPG_CREATE_TIMEOUT = 240
776
+
777
+ def initialize(item, vi_client = nil)
778
+ check_item(item, RbVmomi::VIM::Datacenter)
779
+
780
+ @vi_client = vi_client
781
+ @item = item
782
+ @net_rollback = []
783
+ @locking = true
784
+ end
785
+
786
+ def datastore_folder
787
+ DatastoreFolder.new(@item.datastoreFolder)
788
+ end
789
+
790
+ def host_folder
791
+ HostFolder.new(@item.hostFolder)
792
+ end
793
+
794
+ def vm_folder
795
+ VirtualMachineFolder.new(@item.vmFolder)
796
+ end
797
+
798
+ def network_folder
799
+ NetworkFolder.new(@item.networkFolder)
800
+ end
801
+
802
+ # Locking function. Similar to flock
803
+ def lock
804
+ hostlockname = @item['name'].downcase.tr(' ', '_')
805
+ return unless @locking
806
+
807
+ @locking_file =
808
+ File
809
+ .open("/tmp/vcenter-dc-#{hostlockname}-lock", 'w')
810
+ @locking_file.flock(File::LOCK_EX)
811
+ end
812
+
813
+ # Unlock driver execution mutex
814
+ def unlock
815
+ return unless @locking
816
+
817
+ @locking_file.close
818
+ end
819
+
820
+ ########################################################################
821
+ # Check if distributed virtual switch exists in host
822
+ ########################################################################
823
+ def dvs_exists(switch_name, net_folder)
824
+ net_folder.items.values.select do |dvs|
825
+ dvs.instance_of?(VCenterDriver::DistributedVirtualSwitch) &&
826
+ dvs['name'] == switch_name
827
+ end.first rescue nil
828
+ end
829
+
830
+ ########################################################################
831
+ # Is the distributed switch for the distributed pg different?
832
+ ########################################################################
833
+ def pg_changes_sw?(dpg, switch_name)
834
+ dpg['config.distributedVirtualSwitch.name'] != switch_name
835
+ end
836
+
837
+ ########################################################################
838
+ # Create a distributed vcenter switch in a datacenter
839
+ ########################################################################
840
+ def create_dvs(switch_name, pnics, mtu = 1500)
841
+ # Prepare spec for DVS creation
842
+ spec = RbVmomi::VIM::DVSCreateSpec.new
843
+ spec.configSpec = RbVmomi::VIM::VMwareDVSConfigSpec.new
844
+ spec.configSpec.name = switch_name
845
+
846
+ # Specify number of uplinks port for dpg
847
+ if pnics
848
+ pnics = pnics.split(',')
849
+ if !pnics.empty?
850
+ spec.configSpec.uplinkPortPolicy =
851
+ RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new
852
+ spec.configSpec.uplinkPortPolicy.uplinkPortName = []
853
+ (0..pnics.size-1).each do |index|
854
+ spec
855
+ .configSpec
856
+ .uplinkPortPolicy
857
+ .uplinkPortName[index]="dvUplink#{index+1}"
858
+ end
859
+ end
860
+ end
861
+
862
+ # Set maximum MTU
863
+ spec.configSpec.maxMtu = mtu
864
+
865
+ # The DVS must be created in the networkFolder of the datacenter
866
+ begin
867
+ dvs_creation_task = @item
868
+ .networkFolder
869
+ .CreateDVS_Task(
870
+ :spec => spec
871
+ )
872
+ dvs_creation_task.wait_for_completion
873
+
874
+ # If task finished successfuly we rename the uplink portgroup
875
+ dvs = nil
876
+ if dvs_creation_task.info.state == 'success'
877
+ dvs = dvs_creation_task.info.result
878
+ dvs
879
+ .config
880
+ .uplinkPortgroup[0]
881
+ .Rename_Task(
882
+ :newName => "#{switch_name}-uplink-pg"
883
+ ).wait_for_completion
884
+ else
885
+ raise "The Distributed vSwitch #{switch_name} \
886
+ could not be created. "
887
+ end
888
+ rescue StandardError => e
889
+ raise e
890
+ end
891
+
892
+ @net_rollback << {
893
+ :action => :delete_dvs,
894
+ :dvs => dvs,
895
+ :name => switch_name
896
+ }
897
+
898
+ VCenterDriver::DistributedVirtualSwitch.new(dvs, @vi_client)
899
+ end
900
+
901
+ ########################################################################
902
+ # Update a distributed vcenter switch
903
+ ########################################################################
904
+ def update_dvs(dvs, pnics, mtu)
905
+ # Prepare spec for DVS creation
906
+ spec = RbVmomi::VIM::VMwareDVSConfigSpec.new
907
+ changed = false
908
+
909
+ orig_spec = RbVmomi::VIM::VMwareDVSConfigSpec.new
910
+ orig_spec.maxMtu = dvs['config.maxMtu']
911
+ orig_spec.uplinkPortPolicy =
912
+ RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new
913
+ orig_spec.uplinkPortPolicy.uplinkPortName = []
914
+ (0..dvs['config.uplinkPortgroup'].length-1).each do |index|
915
+ orig_spec
916
+ .uplinkPortPolicy
917
+ .uplinkPortName[index]="dvUplink#{index+1}"
918
+ end
919
+
920
+ # Add more uplinks to default uplink
921
+ # port group according to number of pnics
922
+ if pnics
923
+ pnics = pnics.split(',')
924
+ if !pnics.empty? && dvs['config.uplinkPortgroup']
925
+ .length != pnics.size
926
+ spec.uplinkPortPolicy =
927
+ RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new
928
+ spec.uplinkPortPolicy.uplinkPortName = []
929
+ (dvs['config.uplinkPortgroup']
930
+ .length..num_pnics-1)
931
+ .each do |index|
932
+ spec
933
+ .uplinkPortPolicy
934
+ .uplinkPortName[index] =
935
+ "dvUplink#{index+1}"
936
+ end
937
+ changed = true
938
+ end
939
+ end
940
+
941
+ # Set maximum MTU
942
+ if mtu != dvs['config.maxMtu']
943
+ spec.maxMtu = mtu
944
+ changed = true
945
+ end
946
+
947
+ # The DVS must be created in the networkFolder of the datacenter
948
+ return unless changed
949
+
950
+ spec.configVersion = dvs['config.configVersion']
951
+
952
+ begin
953
+ dvs
954
+ .item
955
+ .ReconfigureDvs_Task(
956
+ :spec => spec
957
+ ).wait_for_completion
958
+ rescue StandardError => e
959
+ raise "The Distributed switch #{dvs['name']} could \
960
+ not be updated. "\
961
+ "Reason: #{e.message}"
962
+ end
963
+
964
+ @net_rollback << {
965
+ :action => :update_dvs,
966
+ :dvs => dvs.item,
967
+ :name => dvs['name'],
968
+ :spec => orig_spec
969
+ }
970
+ end
971
+
972
+ ########################################################################
973
+ # Remove a distributed vcenter switch in a datacenter
974
+ ########################################################################
975
+ def remove_dvs(dvs)
976
+ begin
977
+ dvs.item.Destroy_Task.wait_for_completion
978
+ rescue StandardError
979
+ # Ignore destroy task exception
980
+ end
981
+ end
982
+
983
+ ########################################################################
984
+ # Check if distributed port group exists in datacenter
985
+ ########################################################################
986
+ def dpg_exists(pg_name, net_folder)
987
+ net_folder.items.values.select do |dpg|
988
+ dpg.instance_of?(VCenterDriver::DistributedPortGroup) &&
989
+ dpg['name'] == pg_name
990
+ end.first rescue nil
991
+ end
992
+
993
+ ########################################################################
994
+ # Check if Opaque Network exists in datacenter
995
+ ########################################################################
996
+ def nsx_network(nsx_id, pg_type)
997
+ timeout = 180
998
+ case pg_type
999
+ when VCenterDriver::Network::NETWORK_TYPE_NSXT
1000
+ while timeout > 0
1001
+ net_folder = network_folder
1002
+ net_folder.fetch!
1003
+ net_folder.items.values.each do |net|
1004
+ if net.instance_of?(VCenterDriver::OpaqueNetwork) &&
1005
+ net.item.summary.opaqueNetworkId == nsx_id
1006
+ return net.item._ref
1007
+ end
1008
+ end
1009
+ sleep(1)
1010
+ timeout -= 1
1011
+ end
1012
+ # Not used right now, but maybe neccesary in the future.
1013
+ when VCenterDriver::Network::NETWORK_TYPE_NSXV
1014
+ while timeout > 0
1015
+ net_folder = network_folder
1016
+ net_folder.fetch!
1017
+ net_folder.items.values.each do |net|
1018
+ if net.instance_of?(
1019
+ VCenterDriver::DistributedPortGroup
1020
+ ) &&
1021
+ net.item.key == nsx_id
1022
+ return net.item._ref
1023
+ end
1024
+ end
1025
+ sleep(1)
1026
+ timeout -= 1
1027
+ end
1028
+ else
1029
+ raise "Unknown network Port Group type: #{pg_type}"
1030
+ end
1031
+ end
1032
+
1033
+ ########################################################################
1034
+ # Create a distributed vcenter port group
1035
+ ########################################################################
1036
+ def create_dpg(dvs, pg_name, vlan_id, num_ports)
1037
+ spec = RbVmomi::VIM::DVPortgroupConfigSpec.new
1038
+
1039
+ # OpenNebula use DVS static port binding with autoexpand
1040
+ if num_ports
1041
+ spec.autoExpand = true
1042
+ spec.numPorts = num_ports
1043
+ end
1044
+
1045
+ # Distributed port group name
1046
+ spec.name = pg_name
1047
+
1048
+ # Set VLAN information
1049
+ spec.defaultPortConfig =
1050
+ RbVmomi::VIM::VMwareDVSPortSetting.new
1051
+ spec.defaultPortConfig.vlan =
1052
+ RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new
1053
+ spec.defaultPortConfig.vlan.vlanId =
1054
+ vlan_id
1055
+ spec.defaultPortConfig.vlan.inherited =
1056
+ false
1057
+
1058
+ # earlyBinding. A free DistributedVirtualPort will be selected and
1059
+ # assigned to a VirtualMachine when
1060
+ # the virtual machine is reconfigured
1061
+ # to connect to the portgroup.
1062
+ spec.type = 'earlyBinding'
1063
+
1064
+ begin
1065
+ dvs
1066
+ .item
1067
+ .AddDVPortgroup_Task(
1068
+ :spec => [spec]
1069
+ ).wait_for_completion
1070
+ rescue StandardError => e
1071
+ raise "The Distributed port group #{pg_name} \
1072
+ could not be created. "\
1073
+ "Reason: #{e.message}"
1074
+ end
1075
+
1076
+ # wait until the network is ready and we have a reference
1077
+ portgroups = dvs['portgroup'].select do |dpg|
1078
+ dpg.instance_of?(RbVmomi::VIM::DistributedVirtualPortgroup) &&
1079
+ dpg['name'] == pg_name
1080
+ end
1081
+
1082
+ (0..DPG_CREATE_TIMEOUT).each do
1083
+ break unless portgroups.empty?
1084
+
1085
+ portgroups = dvs['portgroup'].select do |dpg|
1086
+ dpg
1087
+ .instance_of?(
1088
+ RbVmomi::VIM::DistributedVirtualPortgroup
1089
+ ) && dpg['name'] == pg_name
1090
+ end
1091
+ sleep 1
1092
+ end
1093
+
1094
+ if portgroups.empty?
1095
+ raise 'Cannot get VCENTER_NET_REF \
1096
+ for new distributed port group'
1097
+ end
1098
+
1099
+ @net_rollback << {
1100
+ :action => :delete_dpg,
1101
+ :dpg => portgroups.first,
1102
+ :name => pg_name
1103
+ }
1104
+
1105
+ portgroups.first._ref
1106
+ end
1107
+
1108
+ ########################################################################
1109
+ # Update a distributed vcenter port group
1110
+ ########################################################################
1111
+ def update_dpg(dpg, vlan_id, num_ports)
1112
+ spec = RbVmomi::VIM::DVPortgroupConfigSpec.new
1113
+
1114
+ changed = false
1115
+
1116
+ orig_spec =
1117
+ RbVmomi::VIM::DVPortgroupConfigSpec.new
1118
+ orig_spec.numPorts =
1119
+ dpg['config.numPorts']
1120
+ orig_spec.defaultPortConfig =
1121
+ RbVmomi::VIM::VMwareDVSPortSetting.new
1122
+ orig_spec.defaultPortConfig.vlan =
1123
+ RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new
1124
+ orig_spec.defaultPortConfig.vlan.vlanId =
1125
+ dpg['config.defaultPortConfig.vlan.vlanId']
1126
+ orig_spec.defaultPortConfig.vlan.inherited =
1127
+ false
1128
+
1129
+ if num_ports && num_ports != orig_spec.numPorts
1130
+ spec.numPorts = num_ports
1131
+ changed = true
1132
+ end
1133
+
1134
+ # earlyBinding. A free DistributedVirtualPort
1135
+ # will be selected and
1136
+ # assigned to a VirtualMachine when
1137
+ # the virtual machine is reconfigured
1138
+ # to connect to the portgroup.
1139
+ spec.type = 'earlyBinding'
1140
+
1141
+ if vlan_id != orig_spec.defaultPortConfig.vlan.vlanId
1142
+ spec.defaultPortConfig =
1143
+ RbVmomi::VIM::VMwareDVSPortSetting.new
1144
+ spec.defaultPortConfig.vlan =
1145
+ RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new
1146
+ spec.defaultPortConfig.vlan.vlanId = vlan_id
1147
+ spec.defaultPortConfig.vlan.inherited = false
1148
+ changed = true
1149
+ end
1150
+
1151
+ return unless changed
1152
+
1153
+ spec.configVersion = dpg['config.configVersion']
1154
+
1155
+ begin
1156
+ dpg
1157
+ .item
1158
+ .ReconfigureDVPortgroup_Task(
1159
+ :spec => spec
1160
+ ).wait_for_completion
1161
+ rescue StandardError => e
1162
+ raise "The Distributed port group #{dpg['name']} \
1163
+ could not be created. "\
1164
+ "Reason: #{e.message}"
1165
+ end
1166
+
1167
+ @net_rollback << {
1168
+ :action => :update_dpg,
1169
+ :dpg => dpg.item,
1170
+ :name => dpg['name'],
1171
+ :spec => orig_spec
1172
+ }
1173
+ end
1174
+
1175
+ ########################################################################
1176
+ # Remove distributed port group from datacenter
1177
+ ########################################################################
1178
+ def remove_dpg(dpg)
1179
+ begin
1180
+ dpg.item.Destroy_Task.wait_for_completion
1181
+ rescue RbVmomi::VIM::ResourceInUse
1182
+ STDERR.puts "The distributed portgroup \
1183
+ #{dpg['name']} is in use so it cannot be deleted"
1184
+ nil
1185
+ rescue StandardError => e
1186
+ raise "The Distributed portgroup #{dpg['name']} \
1187
+ could not be deleted. Reason: #{e.message} "
1188
+ end
1189
+ end
1190
+
1191
+ ########################################################################
1192
+ # Perform vcenter network rollback operations
1193
+ ########################################################################
1194
+ def network_rollback
1195
+ @net_rollback.reverse_each do |nr|
1196
+ case nr[:action]
1197
+ when :update_dpg
1198
+ begin
1199
+ nr[:dpg].ReconfigureDVPortgroup_Task(:spec => nr[:spec])
1200
+ .wait_for_completion
1201
+ rescue StandardError => e
1202
+ raise "A rollback operation for distributed \
1203
+ port group #{nr[:name]} could not \
1204
+ be performed. Reason: #{e.message}"
1205
+ end
1206
+ when :update_dvs
1207
+ begin
1208
+ nr[:dvs].ReconfigureDvs_Task(:spec => nr[:spec])
1209
+ .wait_for_completion
1210
+ rescue StandardError => e
1211
+ raise "A rollback operation for distributed\
1212
+ standard switch #{nr[:name]} could \
1213
+ not be performed. Reason: #{e.message}"
1214
+ end
1215
+ when :delete_dvs
1216
+ begin
1217
+ nr[:dvs].Destroy_Task.wait_for_completion
1218
+ rescue RbVmomi::VIM::ResourceInUse
1219
+ next # Ignore if switch in use
1220
+ rescue RbVmomi::VIM::NotFound
1221
+ next # Ignore if switch not found
1222
+ rescue StandardError => e
1223
+ raise "A rollback operation \
1224
+ for standard switch #{nr[:name]} \
1225
+ could not be performed. Reason: #{e.message}"
1226
+ end
1227
+ when :delete_dpg
1228
+ begin
1229
+ nr[:dpg].Destroy_Task.wait_for_completion
1230
+ rescue RbVmomi::VIM::ResourceInUse
1231
+ next # Ignore if pg in use
1232
+ rescue RbVmomi::VIM::NotFound
1233
+ next # Ignore if pg not found
1234
+ rescue StandardError => e
1235
+ raise "A rollback operation for \
1236
+ standard port group #{nr[:name]} could \
1237
+ not be performed. Reason: #{e.message}"
1238
+ end
1239
+ end
1240
+ end
1241
+ end
1242
+
1243
+ ########################################################################
1244
+ # PowerOn VMs
1245
+ ########################################################################
1246
+ def power_on_vm(vm)
1247
+ @item.PowerOnMultiVM_Task({ :vm => [vm] }).wait_for_completion
1248
+ end
1249
+
1250
+ def self.new_from_ref(ref, vi_client)
1251
+ new(RbVmomi::VIM::Datacenter.new(vi_client.vim, ref), vi_client)
1252
+ end
1253
+
1254
+ end
1255
+
1256
+ end
1257
+ # module VCenterDriver