opennebula 5.12.8 → 5.13.80.pre
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/ActionManager.rb +1 -1
- data/lib/CommandManager.rb +1 -1
- data/lib/DriverExecHelper.rb +44 -28
- data/lib/OpenNebulaDriver.rb +8 -4
- data/lib/VirtualMachineDriver.rb +9 -2
- data/lib/cloud/CloudClient.rb +3 -3
- data/lib/datacenter.rb +1258 -0
- data/lib/datastore.rb +1025 -0
- data/lib/distributed_firewall.rb +280 -0
- data/lib/file_helper.rb +370 -0
- data/lib/host.rb +1517 -0
- data/lib/logical_port.rb +50 -0
- data/lib/logical_switch.rb +77 -0
- data/lib/memoize.rb +74 -0
- data/lib/models.rb +32 -0
- data/lib/models/role.rb +1126 -0
- data/lib/models/service.rb +709 -0
- data/lib/network.rb +635 -0
- data/lib/nsx_client.rb +144 -0
- data/lib/nsx_component.rb +28 -0
- data/lib/nsx_constants.rb +149 -0
- data/lib/nsx_driver.rb +78 -0
- data/lib/nsx_error.rb +77 -0
- data/lib/nsx_rule.rb +193 -0
- data/lib/nsxt_client.rb +176 -0
- data/lib/nsxt_dfw.rb +196 -0
- data/lib/nsxt_logical_port.rb +94 -0
- data/lib/nsxt_rule.rb +188 -0
- data/lib/nsxt_tz.rb +38 -0
- data/lib/nsxv_client.rb +176 -0
- data/lib/nsxv_dfw.rb +202 -0
- data/lib/nsxv_logical_port.rb +107 -0
- data/lib/nsxv_rule.rb +172 -0
- data/lib/nsxv_tz.rb +41 -0
- data/lib/opaque_network.rb +134 -0
- data/lib/opennebula.rb +5 -2
- data/lib/opennebula/acl.rb +1 -1
- data/lib/opennebula/acl_pool.rb +1 -1
- data/lib/opennebula/client.rb +1 -1
- data/lib/opennebula/cluster.rb +1 -1
- data/lib/opennebula/cluster_pool.rb +1 -1
- data/lib/opennebula/datastore.rb +1 -1
- data/lib/opennebula/datastore_pool.rb +1 -1
- data/lib/opennebula/document.rb +8 -29
- data/lib/opennebula/document_json.rb +42 -12
- data/lib/opennebula/document_pool.rb +1 -1
- data/lib/opennebula/document_pool_json.rb +1 -1
- data/lib/opennebula/error.rb +4 -1
- data/lib/opennebula/flow.rb +23 -0
- data/lib/opennebula/flow/grammar.rb +1195 -0
- data/lib/opennebula/flow/service_pool.rb +190 -0
- data/lib/opennebula/flow/service_template.rb +572 -0
- data/lib/opennebula/flow/service_template_ext.rb +84 -0
- data/lib/opennebula/flow/service_template_pool.rb +32 -0
- data/lib/opennebula/flow/validator.rb +499 -0
- data/lib/opennebula/group.rb +1 -1
- data/lib/opennebula/group_pool.rb +1 -1
- data/lib/opennebula/hook.rb +5 -12
- data/lib/opennebula/hook_log.rb +1 -1
- data/lib/opennebula/hook_pool.rb +1 -1
- data/lib/opennebula/host.rb +1 -1
- data/lib/opennebula/host_pool.rb +1 -1
- data/lib/opennebula/image.rb +17 -14
- data/lib/opennebula/image_pool.rb +1 -1
- data/lib/opennebula/ldap_auth.rb +1 -1
- data/lib/opennebula/ldap_auth_spec.rb +1 -1
- data/lib/opennebula/lockable_ext.rb +163 -0
- data/lib/opennebula/marketplace.rb +1 -1
- data/lib/opennebula/marketplace_pool.rb +1 -1
- data/lib/opennebula/marketplaceapp.rb +9 -119
- data/lib/opennebula/marketplaceapp_ext.rb +522 -0
- data/lib/opennebula/marketplaceapp_pool.rb +1 -1
- data/lib/opennebula/oneflow_client.rb +4 -3
- data/lib/opennebula/pool.rb +4 -3
- data/lib/opennebula/pool_element.rb +1 -1
- data/lib/opennebula/security_group.rb +1 -1
- data/lib/opennebula/security_group_pool.rb +1 -1
- data/lib/opennebula/server_cipher_auth.rb +1 -1
- data/lib/opennebula/server_x509_auth.rb +1 -1
- data/lib/opennebula/ssh_auth.rb +1 -1
- data/lib/opennebula/system.rb +1 -1
- data/lib/opennebula/template.rb +4 -13
- data/lib/opennebula/template_ext.rb +325 -0
- data/lib/opennebula/template_pool.rb +1 -1
- data/lib/opennebula/user.rb +26 -2
- data/lib/opennebula/user_pool.rb +1 -1
- data/lib/opennebula/utils.rb +1 -1
- data/lib/opennebula/vdc.rb +1 -1
- data/lib/opennebula/vdc_pool.rb +1 -1
- data/lib/opennebula/virtual_machine.rb +25 -207
- data/lib/opennebula/virtual_machine_ext.rb +469 -0
- data/lib/opennebula/virtual_machine_pool.rb +1 -1
- data/lib/opennebula/virtual_network.rb +4 -10
- data/lib/opennebula/virtual_network_pool.rb +1 -1
- data/lib/opennebula/virtual_router.rb +4 -12
- data/lib/opennebula/virtual_router_pool.rb +1 -1
- data/lib/opennebula/vm_group.rb +4 -11
- data/lib/opennebula/vm_group_pool.rb +1 -1
- data/lib/opennebula/vntemplate.rb +4 -13
- data/lib/opennebula/vntemplate_pool.rb +1 -1
- data/lib/opennebula/wait_ext.rb +222 -0
- data/lib/opennebula/x509_auth.rb +1 -1
- data/lib/opennebula/xml_element.rb +1 -1
- data/lib/opennebula/xml_pool.rb +1 -1
- data/lib/opennebula/xml_utils.rb +1 -1
- data/lib/opennebula/zone.rb +1 -1
- data/lib/opennebula/zone_pool.rb +1 -1
- data/lib/rest_client.rb +201 -0
- data/lib/scripts_common.rb +180 -0
- data/lib/transport_zone.rb +43 -0
- data/lib/vcenter_driver.rb +13 -12
- data/lib/vcenter_importer.rb +616 -0
- data/lib/vi_client.rb +281 -0
- data/lib/vi_helper.rb +312 -0
- data/lib/virtual_machine.rb +3477 -0
- data/lib/virtual_wire.rb +158 -0
- data/lib/vm_device.rb +80 -0
- data/lib/vm_disk.rb +202 -0
- data/lib/vm_folder.rb +69 -0
- data/lib/vm_helper.rb +30 -0
- data/lib/vm_monitor.rb +303 -0
- data/lib/vm_nic.rb +70 -0
- data/lib/vm_template.rb +1961 -0
- data/lib/vmm_importer.rb +121 -0
- metadata +142 -29
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA1:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: cc16996ef3b6218367420d87066688a52ee86042
|
|
4
|
+
data.tar.gz: dae462f84f7ec7bfed459506c56981be1e27974e
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: d6a40c0cbf0a2125a07d18348d0d13471562d81581c06318126a7d671025bdcf34a30c5228af30bd37014af65329714ada099de91fcde70db29198b2731c0fc8
|
|
7
|
+
data.tar.gz: 4a53d311862746b7712066d355120306a5b3d4311315dabf24513f5e20dc509acb4254e5d58556090b768f3aa33e89df2311c10127d87c918f7c0befe2b6c8a1
|
data/lib/ActionManager.rb
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
# -------------------------------------------------------------------------- */
|
|
2
|
-
# Copyright 2002-
|
|
2
|
+
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
|
4
4
|
# not use this file except in compliance with the License. You may obtain */
|
|
5
5
|
# a copy of the License at */
|
data/lib/CommandManager.rb
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
# --------------------------------------------------------------------------
|
|
2
|
-
# Copyright 2002-
|
|
2
|
+
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems
|
|
3
3
|
#
|
|
4
4
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
5
5
|
# not use this file except in compliance with the License. You may obtain
|
data/lib/DriverExecHelper.rb
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
|
+
# rubocop:disable Naming/FileName
|
|
1
2
|
# -------------------------------------------------------------------------- #
|
|
2
|
-
# Copyright 2002-
|
|
3
|
+
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
|
|
3
4
|
# #
|
|
4
5
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
|
5
6
|
# not use this file except in compliance with the License. You may obtain #
|
|
@@ -18,14 +19,15 @@
|
|
|
18
19
|
# OpenNebula Drivers. The module has been designed to be included as part
|
|
19
20
|
# of a driver and not to be used standalone.
|
|
20
21
|
module DriverExecHelper
|
|
22
|
+
|
|
21
23
|
# Action result strings for messages
|
|
22
24
|
RESULT = {
|
|
23
|
-
:success =>
|
|
24
|
-
:failure =>
|
|
25
|
+
:success => 'SUCCESS',
|
|
26
|
+
:failure => 'FAILURE'
|
|
25
27
|
}
|
|
26
28
|
|
|
27
29
|
def self.failed?(rc_str)
|
|
28
|
-
|
|
30
|
+
rc_str == RESULT[:failure]
|
|
29
31
|
end
|
|
30
32
|
|
|
31
33
|
# Initialize module variables
|
|
@@ -34,6 +36,7 @@ module DriverExecHelper
|
|
|
34
36
|
@remote_scripts_base_path = @config['SCRIPTS_REMOTE_DIR']
|
|
35
37
|
|
|
36
38
|
@local_actions = options[:local_actions]
|
|
39
|
+
@per_drvr_local_actions = options[:per_drvr_local_actions] || []
|
|
37
40
|
|
|
38
41
|
if ENV['ONE_LOCATION'].nil?
|
|
39
42
|
@local_scripts_base_path = '/var/lib/one/remotes'
|
|
@@ -57,62 +60,74 @@ module DriverExecHelper
|
|
|
57
60
|
# actions is remote or local. If the local actions has defined an special
|
|
58
61
|
# script name this is used, otherwise the action name in downcase is
|
|
59
62
|
# used as the script name.
|
|
63
|
+
# When action is a String starting with '/' it's considered alreay full
|
|
64
|
+
# path command and no modification is performed apart from adding params.
|
|
60
65
|
#
|
|
61
66
|
# @param [String, Symbol] action name of the action
|
|
62
67
|
# @param [String] parameters arguments for the script
|
|
63
68
|
# @param [String, nil] default_name alternative name for the script
|
|
64
69
|
# @param [String, ''] directory to append to the scripts path for actions
|
|
65
70
|
# @return [String] command line needed to execute the action
|
|
66
|
-
def action_command_line(action, parameters,
|
|
67
|
-
|
|
71
|
+
def action_command_line(action, parameters,
|
|
72
|
+
default_name = nil, directory = '')
|
|
73
|
+
|
|
74
|
+
if action.is_a?(String) && action[0] == '/'
|
|
75
|
+
return action + ' ' + parameters if parameters
|
|
76
|
+
|
|
77
|
+
return action
|
|
78
|
+
elsif action_is_local?(action, directory)
|
|
68
79
|
script_path=File.join(@local_scripts_path, directory)
|
|
69
80
|
else
|
|
70
81
|
script_path=File.join(@remote_scripts_path, directory)
|
|
71
82
|
end
|
|
72
83
|
|
|
73
84
|
File.join(script_path, action_script_name(action, default_name))+
|
|
74
|
-
|
|
85
|
+
' '+parameters
|
|
75
86
|
end
|
|
76
87
|
|
|
77
88
|
# True if the action is meant to be executed locally
|
|
78
89
|
#
|
|
79
90
|
# @param [String, Symbol] action name of the action
|
|
80
|
-
|
|
81
|
-
|
|
91
|
+
# @param [String, Symbol] driver name
|
|
92
|
+
def action_is_local?(action, driver = '')
|
|
93
|
+
@local_actions.include? action.to_s.upcase if driver.empty?
|
|
94
|
+
|
|
95
|
+
@local_actions.include? action.to_s.upcase or
|
|
96
|
+
@per_drvr_local_actions.include? "#{driver}-#{action}"
|
|
82
97
|
end
|
|
83
98
|
|
|
84
99
|
# Name of the script file for the given action
|
|
85
100
|
#
|
|
86
101
|
# @param [String, Symbol] action name of the action
|
|
87
102
|
# @param [String, nil] default_name alternative name for the script
|
|
88
|
-
def action_script_name(action, default_name=nil)
|
|
103
|
+
def action_script_name(action, default_name = nil)
|
|
89
104
|
name=@local_actions[action.to_s.upcase]
|
|
90
105
|
|
|
91
|
-
|
|
92
|
-
name
|
|
93
|
-
else
|
|
94
|
-
default_name || action.to_s.downcase
|
|
95
|
-
end
|
|
106
|
+
name || default_name || action.to_s.downcase
|
|
96
107
|
end
|
|
97
108
|
|
|
98
109
|
#
|
|
99
110
|
# METHODS FOR LOGS & COMMAND OUTPUT
|
|
100
111
|
#
|
|
101
112
|
# Sends a message to the OpenNebula core through stdout
|
|
102
|
-
|
|
103
|
-
|
|
113
|
+
# rubocop:disable Metrics/ParameterLists
|
|
114
|
+
def send_message(action = '-', result = RESULT[:failure],
|
|
115
|
+
id = '-', info = '-')
|
|
116
|
+
|
|
117
|
+
@send_mutex.synchronize do
|
|
104
118
|
STDOUT.puts "#{action} #{result} #{id} #{info}"
|
|
105
119
|
STDOUT.flush
|
|
106
|
-
|
|
120
|
+
end
|
|
107
121
|
end
|
|
122
|
+
# rubocop:enable Metrics/ParameterLists
|
|
108
123
|
|
|
109
124
|
# Sends a log message to ONE. The +message+ can be multiline, it will
|
|
110
125
|
# be automatically splitted by lines.
|
|
111
|
-
def log(number, message, all=true)
|
|
126
|
+
def log(number, message, all = true)
|
|
112
127
|
in_error_message=false
|
|
113
128
|
msg=message.strip
|
|
114
|
-
msg.each_line
|
|
115
|
-
|
|
129
|
+
msg.each_line do |line|
|
|
130
|
+
all ? severity='I' : severity=nil
|
|
116
131
|
l=line.strip
|
|
117
132
|
|
|
118
133
|
if l=='ERROR MESSAGE --8<------'
|
|
@@ -125,8 +140,8 @@ module DriverExecHelper
|
|
|
125
140
|
if in_error_message
|
|
126
141
|
severity='E'
|
|
127
142
|
elsif line.match(/^(ERROR|DEBUG|INFO):(.*)$/)
|
|
128
|
-
line
|
|
129
|
-
case
|
|
143
|
+
line=Regexp.last_match(2)
|
|
144
|
+
case Regexp.last_match(1)
|
|
130
145
|
when 'ERROR'
|
|
131
146
|
severity='E'
|
|
132
147
|
when 'DEBUG'
|
|
@@ -137,19 +152,19 @@ module DriverExecHelper
|
|
|
137
152
|
end
|
|
138
153
|
end
|
|
139
154
|
|
|
140
|
-
send_message(
|
|
141
|
-
|
|
155
|
+
send_message('LOG', severity, number, line.strip) if severity
|
|
156
|
+
end
|
|
142
157
|
end
|
|
143
158
|
|
|
144
159
|
# Generates a proc with that calls log with a hardcoded number. It will
|
|
145
160
|
# be used to add loging to command actions
|
|
146
161
|
def log_method(num)
|
|
147
|
-
lambda {|message, all=true|
|
|
162
|
+
lambda {|message, all = true|
|
|
148
163
|
log(num, message, all)
|
|
149
164
|
}
|
|
150
165
|
end
|
|
151
166
|
|
|
152
|
-
#This method returns the result in terms
|
|
167
|
+
# This method returns the result in terms
|
|
153
168
|
def get_info_from_execution(command_exe)
|
|
154
169
|
if command_exe.code == 0
|
|
155
170
|
result = RESULT[:success]
|
|
@@ -159,7 +174,7 @@ module DriverExecHelper
|
|
|
159
174
|
info = command_exe.get_error_message
|
|
160
175
|
end
|
|
161
176
|
|
|
162
|
-
info =
|
|
177
|
+
info = '-' if info.nil? || info.empty?
|
|
163
178
|
|
|
164
179
|
[result, info]
|
|
165
180
|
end
|
|
@@ -211,3 +226,4 @@ module DriverExecHelper
|
|
|
211
226
|
end
|
|
212
227
|
|
|
213
228
|
end
|
|
229
|
+
# rubocop:enable Naming/FileName
|
data/lib/OpenNebulaDriver.rb
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
# -------------------------------------------------------------------------- #
|
|
2
|
-
# Copyright 2002-
|
|
2
|
+
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
|
|
3
3
|
# #
|
|
4
4
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
|
5
5
|
# not use this file except in compliance with the License. You may obtain #
|
|
@@ -90,13 +90,17 @@ class OpenNebulaDriver < ActionManager
|
|
|
90
90
|
:respond => true,
|
|
91
91
|
:ssh_stream => nil,
|
|
92
92
|
:base64 => false,
|
|
93
|
-
:zip => false
|
|
93
|
+
:zip => false,
|
|
94
|
+
:no_extra_params => false
|
|
94
95
|
}.merge(ops)
|
|
95
96
|
|
|
96
|
-
params
|
|
97
|
+
params = parameters
|
|
98
|
+
params = "#{params} #{id} #{host}" unless options[:no_extra_params]
|
|
97
99
|
command = action_command_line(aname, params, options[:script_name])
|
|
98
100
|
|
|
99
|
-
if
|
|
101
|
+
# if options[:is_local] is not specified (nil)
|
|
102
|
+
# we rely uniquely in actions_is_local?
|
|
103
|
+
if action_is_local?(aname) or options[:is_local]
|
|
100
104
|
stdin = Base64.strict_encode64(options[:stdin].to_s)
|
|
101
105
|
execution = LocalCommand.run(command,
|
|
102
106
|
log_method(id),
|
data/lib/VirtualMachineDriver.rb
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
# -------------------------------------------------------------------------- #
|
|
2
|
-
# Copyright 2002-
|
|
2
|
+
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
|
|
3
3
|
# #
|
|
4
4
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
|
5
5
|
# not use this file except in compliance with the License. You may obtain #
|
|
@@ -52,7 +52,8 @@ class VirtualMachineDriver < OpenNebulaDriver
|
|
|
52
52
|
:disk_snapshot_create => "DISKSNAPSHOTCREATE",
|
|
53
53
|
:resize_disk => "RESIZEDISK",
|
|
54
54
|
:update_sg => "UPDATESG",
|
|
55
|
-
:update_conf => "UPDATECONF"
|
|
55
|
+
:update_conf => "UPDATECONF",
|
|
56
|
+
:resize => "RESIZE"
|
|
56
57
|
}
|
|
57
58
|
|
|
58
59
|
POLL_ATTRIBUTE = OpenNebula::VirtualMachine::Driver::POLL_ATTRIBUTE
|
|
@@ -98,6 +99,7 @@ class VirtualMachineDriver < OpenNebulaDriver
|
|
|
98
99
|
register_action(ACTION[:resize_disk].to_sym, method("resize_disk"))
|
|
99
100
|
register_action(ACTION[:update_sg].to_sym, method("update_sg"))
|
|
100
101
|
register_action(ACTION[:update_conf].to_sym, method("update_conf"))
|
|
102
|
+
register_action(ACTION[:resize].to_sym, method("resize"))
|
|
101
103
|
end
|
|
102
104
|
|
|
103
105
|
# Decodes the encoded XML driver message received from the core
|
|
@@ -227,6 +229,11 @@ class VirtualMachineDriver < OpenNebulaDriver
|
|
|
227
229
|
send_message(ACTION[:update_conf],RESULT[:failure],id,error)
|
|
228
230
|
end
|
|
229
231
|
|
|
232
|
+
def resize(id, drv_message)
|
|
233
|
+
error = "Action not implemented by driver #{self.class}"
|
|
234
|
+
send_message(ACTION[:resize],RESULT[:failure],id,error)
|
|
235
|
+
end
|
|
236
|
+
|
|
230
237
|
private
|
|
231
238
|
|
|
232
239
|
# Interface to handle the pending events from the ActionManager Interface
|
data/lib/cloud/CloudClient.rb
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
# -------------------------------------------------------------------------- #
|
|
2
|
-
# Copyright 2002-
|
|
2
|
+
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
|
|
3
3
|
# #
|
|
4
4
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
|
5
5
|
# not use this file except in compliance with the License. You may obtain #
|
|
@@ -51,7 +51,7 @@ end
|
|
|
51
51
|
module CloudClient
|
|
52
52
|
|
|
53
53
|
# OpenNebula version
|
|
54
|
-
VERSION = '5.
|
|
54
|
+
VERSION = '5.13.80'
|
|
55
55
|
|
|
56
56
|
# #########################################################################
|
|
57
57
|
# Default location for the authentication file
|
|
@@ -229,7 +229,7 @@ module CloudCLI
|
|
|
229
229
|
def version_text
|
|
230
230
|
version=<<EOT
|
|
231
231
|
OpenNebula #{CloudClient::VERSION}
|
|
232
|
-
Copyright 2002-
|
|
232
|
+
Copyright 2002-2021, OpenNebula Project, OpenNebula Systems
|
|
233
233
|
|
|
234
234
|
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
235
235
|
not use this file except in compliance with the License. You may obtain
|
data/lib/datacenter.rb
ADDED
|
@@ -0,0 +1,1258 @@
|
|
|
1
|
+
# -------------------------------------------------------------------------- #
|
|
2
|
+
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
|
|
3
|
+
# #
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
|
5
|
+
# not use this file except in compliance with the License. You may obtain #
|
|
6
|
+
# a copy of the License at #
|
|
7
|
+
# #
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0 #
|
|
9
|
+
# #
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software #
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS, #
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
|
13
|
+
# See the License for the specific language governing permissions and #
|
|
14
|
+
# limitations under the License. #
|
|
15
|
+
#--------------------------------------------------------------------------- #
|
|
16
|
+
|
|
17
|
+
require 'set'
|
|
18
|
+
require 'digest'
|
|
19
|
+
|
|
20
|
+
##############################################################################
|
|
21
|
+
# Module VCenterDriver
|
|
22
|
+
##############################################################################
|
|
23
|
+
module VCenterDriver
|
|
24
|
+
|
|
25
|
+
##########################################################################
|
|
26
|
+
# Class DatacenterFolder
|
|
27
|
+
##########################################################################
|
|
28
|
+
class DatacenterFolder
|
|
29
|
+
|
|
30
|
+
attr_accessor :items
|
|
31
|
+
|
|
32
|
+
def initialize(vi_client)
|
|
33
|
+
@vi_client = vi_client
|
|
34
|
+
@items = {}
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
########################################################################
|
|
38
|
+
# Builds a hash with Datacenter-Ref / Datacenter to be used as a cache
|
|
39
|
+
# @return [Hash] in the form
|
|
40
|
+
# { dc_ref [Symbol] => Datacenter object }
|
|
41
|
+
########################################################################
|
|
42
|
+
def fetch!
|
|
43
|
+
VIClient
|
|
44
|
+
.get_entities(
|
|
45
|
+
@vi_client.vim.root,
|
|
46
|
+
'Datacenter'
|
|
47
|
+
).each do |item|
|
|
48
|
+
item_name = item._ref
|
|
49
|
+
@items[item_name.to_sym] = Datacenter.new(item)
|
|
50
|
+
end
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
########################################################################
|
|
54
|
+
# Returns a Datacenter. Uses the cache if available.
|
|
55
|
+
# @param ref [Symbol] the vcenter ref
|
|
56
|
+
# @return Datacenter
|
|
57
|
+
########################################################################
|
|
58
|
+
def get(ref)
|
|
59
|
+
if !@items[ref.to_sym]
|
|
60
|
+
rbvmomi_dc = RbVmomi::VIM::Datacenter.new(@vi_client.vim, ref)
|
|
61
|
+
@items[ref.to_sym] = Datacenter.new(rbvmomi_dc)
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
@items[ref.to_sym]
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
def vcenter_instance_uuid
|
|
68
|
+
@vi_client.vim.serviceContent.about.instanceUuid
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
def vcenter_api_version
|
|
72
|
+
@vi_client.vim.serviceContent.about.apiVersion
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
def get_unimported_hosts(hpool, _vcenter_instance_name)
|
|
76
|
+
host_objects = {}
|
|
77
|
+
|
|
78
|
+
vcenter_uuid = vcenter_instance_uuid
|
|
79
|
+
vcenter_version = vcenter_api_version
|
|
80
|
+
|
|
81
|
+
fetch! if @items.empty? # Get datacenters
|
|
82
|
+
|
|
83
|
+
# Loop through datacenters
|
|
84
|
+
@items.values.each do |dc|
|
|
85
|
+
dc_name = dc.item.name
|
|
86
|
+
host_objects[dc_name] = []
|
|
87
|
+
|
|
88
|
+
# Get clusters inside a datacenter
|
|
89
|
+
host_folder = dc.host_folder
|
|
90
|
+
host_folder.fetch_clusters!
|
|
91
|
+
host_folder.items.values.each do |ccr|
|
|
92
|
+
# Check if the cluster is a host in OpenNebula's pool
|
|
93
|
+
one_host =
|
|
94
|
+
VCenterDriver::VIHelper
|
|
95
|
+
.find_by_ref(
|
|
96
|
+
OpenNebula::HostPool,
|
|
97
|
+
'TEMPLATE/VCENTER_CCR_REF',
|
|
98
|
+
ccr['_ref'],
|
|
99
|
+
vcenter_uuid,
|
|
100
|
+
hpool
|
|
101
|
+
)
|
|
102
|
+
next if one_host
|
|
103
|
+
|
|
104
|
+
# Get a ClusterComputeResource object
|
|
105
|
+
cluster =
|
|
106
|
+
VCenterDriver::ClusterComputeResource
|
|
107
|
+
.new_from_ref(
|
|
108
|
+
ccr['_ref'],
|
|
109
|
+
@vi_client
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
# Obtain a list of resource pools found in the cluster
|
|
113
|
+
rpools =
|
|
114
|
+
cluster
|
|
115
|
+
.get_resource_pool_list
|
|
116
|
+
.reject {|rp| rp[:name].empty? }
|
|
117
|
+
|
|
118
|
+
# Determine a host location (folder and subfolders)
|
|
119
|
+
item = cluster.item
|
|
120
|
+
folders = []
|
|
121
|
+
until item.instance_of? RbVmomi::VIM::Datacenter
|
|
122
|
+
item = item.parent
|
|
123
|
+
if !item.instance_of?(RbVmomi::VIM::Datacenter) &&
|
|
124
|
+
item.name != 'host'
|
|
125
|
+
folders << item.name
|
|
126
|
+
end
|
|
127
|
+
raise "Could not find the host's location" if item.nil?
|
|
128
|
+
end
|
|
129
|
+
location = folders.reverse.join('/')
|
|
130
|
+
location = '/' if location.empty?
|
|
131
|
+
|
|
132
|
+
# Setting host import name and
|
|
133
|
+
# replace spaces and weird characters
|
|
134
|
+
cluster_name = (ccr['name']).to_s.tr(' ', '_')
|
|
135
|
+
cluster_name =
|
|
136
|
+
VCenterDriver::VIHelper
|
|
137
|
+
.one_name(
|
|
138
|
+
OpenNebula::HostPool,
|
|
139
|
+
cluster_name,
|
|
140
|
+
ccr['_ref']+vcenter_uuid,
|
|
141
|
+
hpool
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
# Prepare hash for import tool
|
|
145
|
+
host_info = {}
|
|
146
|
+
host_info[:simple_name] = ccr['name']
|
|
147
|
+
host_info[:cluster_name] = cluster_name
|
|
148
|
+
host_info[:cluster_ref] = ccr['_ref']
|
|
149
|
+
host_info[:cluster_location] = location
|
|
150
|
+
host_info[:vcenter_uuid] = vcenter_uuid
|
|
151
|
+
host_info[:vcenter_version] = vcenter_version
|
|
152
|
+
host_info[:rp_list] = rpools
|
|
153
|
+
|
|
154
|
+
# Add the hash to current datacenter
|
|
155
|
+
host_objects[dc_name] << host_info
|
|
156
|
+
end
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
host_objects
|
|
160
|
+
end
|
|
161
|
+
|
|
162
|
+
def get_unimported_datastores(dpool, vcenter_instance_name, hpool)
|
|
163
|
+
import_id = 0
|
|
164
|
+
ds_objects = {}
|
|
165
|
+
vcenter_uuid = vcenter_instance_uuid
|
|
166
|
+
|
|
167
|
+
# Get datacenters
|
|
168
|
+
fetch! if @items.empty?
|
|
169
|
+
|
|
170
|
+
@items.values.each do |dc|
|
|
171
|
+
clusters_in_ds = {}
|
|
172
|
+
dc_name = dc.item.name
|
|
173
|
+
dc_ref = dc.item._ref
|
|
174
|
+
|
|
175
|
+
datastore_folder = dc.datastore_folder
|
|
176
|
+
datastore_folder.fetch!
|
|
177
|
+
|
|
178
|
+
datastore_folder.items.values.each do |ds|
|
|
179
|
+
name, capacity, free_space =
|
|
180
|
+
ds
|
|
181
|
+
.item
|
|
182
|
+
.collect(
|
|
183
|
+
'name',
|
|
184
|
+
'summary.capacity',
|
|
185
|
+
'summary.freeSpace'
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
ds_name = name.to_s
|
|
189
|
+
ds_total_mb = ((capacity.to_i / 1024) / 1024)
|
|
190
|
+
ds_free_mb = ((free_space.to_i / 1024) / 1024)
|
|
191
|
+
ds_ref = ds['_ref']
|
|
192
|
+
|
|
193
|
+
ds_objects[ds_ref] = {}
|
|
194
|
+
ds_objects[ds_ref][:ref] = ds_ref
|
|
195
|
+
ds_objects[ds_ref][:import_id] = import_id
|
|
196
|
+
ds_objects[ds_ref][:datacenter] = dc_name
|
|
197
|
+
ds_objects[ds_ref][:simple_name] = ds_name.to_s
|
|
198
|
+
ds_objects[ds_ref][:total_mb] = ds_total_mb
|
|
199
|
+
ds_objects[ds_ref][:free_mb] = ds_free_mb
|
|
200
|
+
ds_objects[ds_ref][:ds] = []
|
|
201
|
+
ds_objects[ds_ref][:cluster] = []
|
|
202
|
+
|
|
203
|
+
if ds.instance_of? VCenterDriver::Datastore
|
|
204
|
+
hosts = ds['host']
|
|
205
|
+
hosts.each do |host|
|
|
206
|
+
cluster_ref = host.key.parent._ref
|
|
207
|
+
if !clusters_in_ds.key?(cluster_ref)
|
|
208
|
+
clusters_in_ds[cluster_ref] = nil
|
|
209
|
+
|
|
210
|
+
# Try to locate cluster ref in host's pool
|
|
211
|
+
one_cluster =
|
|
212
|
+
VCenterDriver::VIHelper
|
|
213
|
+
.find_by_ref(
|
|
214
|
+
OpenNebula::HostPool,
|
|
215
|
+
'TEMPLATE/VCENTER_CCR_REF',
|
|
216
|
+
cluster_ref,
|
|
217
|
+
vcenter_uuid,
|
|
218
|
+
hpool
|
|
219
|
+
)
|
|
220
|
+
if one_cluster
|
|
221
|
+
ds_objects[ds_ref][:cluster] <<
|
|
222
|
+
one_cluster['CLUSTER_ID'].to_i
|
|
223
|
+
clusters_in_ds[cluster_ref] =
|
|
224
|
+
one_cluster['CLUSTER_ID'].to_i
|
|
225
|
+
end
|
|
226
|
+
else
|
|
227
|
+
if clusters_in_ds[cluster_ref] &&
|
|
228
|
+
!ds_objects[ds_ref][:cluster]
|
|
229
|
+
.include?(
|
|
230
|
+
clusters_in_ds[cluster_ref]
|
|
231
|
+
)
|
|
232
|
+
ds_objects[ds_ref][:cluster] <<
|
|
233
|
+
clusters_in_ds[cluster_ref]
|
|
234
|
+
end
|
|
235
|
+
end
|
|
236
|
+
end
|
|
237
|
+
|
|
238
|
+
already_image_ds = VCenterDriver::Storage
|
|
239
|
+
.exists_one_by_ref_dc_and_type?(
|
|
240
|
+
ds_ref,
|
|
241
|
+
dc_ref,
|
|
242
|
+
vcenter_uuid,
|
|
243
|
+
'IMAGE_DS',
|
|
244
|
+
dpool
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
key = ds_ref+vcenter_uuid
|
|
248
|
+
if !already_image_ds
|
|
249
|
+
ds_objects[ds_ref][:name] =
|
|
250
|
+
VCenterDriver::VIHelper
|
|
251
|
+
.one_name(
|
|
252
|
+
OpenNebula::DatastorePool,
|
|
253
|
+
"#{ds_name}(IMG)",
|
|
254
|
+
key
|
|
255
|
+
)
|
|
256
|
+
object =
|
|
257
|
+
ds
|
|
258
|
+
.to_one_template(
|
|
259
|
+
ds_objects[ds_ref],
|
|
260
|
+
vcenter_uuid,
|
|
261
|
+
dc_name,
|
|
262
|
+
dc_ref,
|
|
263
|
+
'IMAGE_DS'
|
|
264
|
+
)
|
|
265
|
+
ds_objects[ds_ref][:ds] << object unless object.nil?
|
|
266
|
+
end
|
|
267
|
+
|
|
268
|
+
already_system_ds =
|
|
269
|
+
VCenterDriver::Storage
|
|
270
|
+
.exists_one_by_ref_dc_and_type?(
|
|
271
|
+
ds_ref,
|
|
272
|
+
dc_ref,
|
|
273
|
+
vcenter_uuid,
|
|
274
|
+
'SYSTEM_DS',
|
|
275
|
+
dpool
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
if !already_system_ds
|
|
279
|
+
ds_objects[ds_ref][:name] =
|
|
280
|
+
VCenterDriver::VIHelper
|
|
281
|
+
.one_name(
|
|
282
|
+
OpenNebula::DatastorePool,
|
|
283
|
+
"#{ds_name}(SYS)",
|
|
284
|
+
key
|
|
285
|
+
)
|
|
286
|
+
object = ds
|
|
287
|
+
.to_one_template(
|
|
288
|
+
ds_objects[ds_ref],
|
|
289
|
+
vcenter_uuid,
|
|
290
|
+
dc_name,
|
|
291
|
+
dc_ref,
|
|
292
|
+
'SYSTEM_DS'
|
|
293
|
+
)
|
|
294
|
+
ds_objects[ds_ref][:ds] << object unless object.nil?
|
|
295
|
+
end
|
|
296
|
+
|
|
297
|
+
ds_objects[ds_ref][:name] = ds_name.to_s
|
|
298
|
+
elsif ds.instance_of? VCenterDriver::StoragePod
|
|
299
|
+
ds['children'].each do |sp_ds|
|
|
300
|
+
hosts = sp_ds.host
|
|
301
|
+
hosts.each do |host|
|
|
302
|
+
cluster_ref = host.key.parent._ref
|
|
303
|
+
if !clusters_in_ds.include?(cluster_ref)
|
|
304
|
+
clusters_in_ds[cluster_ref] = nil
|
|
305
|
+
# Try to locate cluster
|
|
306
|
+
# ref in cluster's pool
|
|
307
|
+
one_cluster =
|
|
308
|
+
VCenterDriver::VIHelper
|
|
309
|
+
.find_by_ref(
|
|
310
|
+
OpenNebula::HostPool,
|
|
311
|
+
'TEMPLATE/VCENTER_CCR_REF',
|
|
312
|
+
cluster_ref,
|
|
313
|
+
vcenter_uuid,
|
|
314
|
+
hpool
|
|
315
|
+
)
|
|
316
|
+
if one_cluster
|
|
317
|
+
ds_objects[ds_ref][:cluster] <<
|
|
318
|
+
one_cluster['CLUSTER_ID'].to_i
|
|
319
|
+
clusters_in_ds[cluster_ref] =
|
|
320
|
+
one_cluster['CLUSTER_ID'].to_i
|
|
321
|
+
end
|
|
322
|
+
else
|
|
323
|
+
if clusters_in_ds[cluster_ref] &&
|
|
324
|
+
!ds_objects[ds_ref][:cluster]
|
|
325
|
+
.include?(
|
|
326
|
+
clusters_in_ds[cluster_ref]
|
|
327
|
+
)
|
|
328
|
+
ds_objects[ds_ref][:cluster] <<
|
|
329
|
+
clusters_in_ds[cluster_ref]
|
|
330
|
+
end
|
|
331
|
+
end
|
|
332
|
+
end
|
|
333
|
+
end
|
|
334
|
+
|
|
335
|
+
already_system_ds = VCenterDriver::Storage
|
|
336
|
+
.exists_one_by_ref_dc_and_type?(
|
|
337
|
+
ds_ref,
|
|
338
|
+
dc_ref,
|
|
339
|
+
vcenter_uuid,
|
|
340
|
+
'SYSTEM_DS',
|
|
341
|
+
dpool
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
if !already_system_ds
|
|
345
|
+
ds_objects[ds_ref][:name] = "#{ds_name} \
|
|
346
|
+
[#{vcenter_instance_name} - #{dc_name}] (StorDRS)"
|
|
347
|
+
object = ds.to_one_template(
|
|
348
|
+
ds_objects[ds_ref],
|
|
349
|
+
vcenter_uuid,
|
|
350
|
+
dc_name,
|
|
351
|
+
dc_ref,
|
|
352
|
+
'SYSTEM_DS'
|
|
353
|
+
)
|
|
354
|
+
ds_objects[ds_ref][:ds] << object unless object.nil?
|
|
355
|
+
end
|
|
356
|
+
end
|
|
357
|
+
|
|
358
|
+
if ds_objects[ds_ref][:ds].empty?
|
|
359
|
+
ds_objects.delete(ds_ref)
|
|
360
|
+
else
|
|
361
|
+
import_id += 1
|
|
362
|
+
end
|
|
363
|
+
end
|
|
364
|
+
end
|
|
365
|
+
|
|
366
|
+
{ vcenter_instance_name => ds_objects }
|
|
367
|
+
end
|
|
368
|
+
|
|
369
|
+
def get_unimported_templates(vi_client, tpool)
|
|
370
|
+
template_objects = {}
|
|
371
|
+
import_id = 0
|
|
372
|
+
vcenter_uuid = vcenter_instance_uuid
|
|
373
|
+
|
|
374
|
+
vcenter_instance_name = vi_client.vim.host
|
|
375
|
+
|
|
376
|
+
fetch! if @items.empty? # Get datacenters
|
|
377
|
+
|
|
378
|
+
@items.values.each do |dc|
|
|
379
|
+
rp_cache = {}
|
|
380
|
+
dc_name = dc.item.name
|
|
381
|
+
|
|
382
|
+
view = vi_client
|
|
383
|
+
.vim
|
|
384
|
+
.serviceContent
|
|
385
|
+
.viewManager
|
|
386
|
+
.CreateContainerView(
|
|
387
|
+
{
|
|
388
|
+
:container => dc.item.vmFolder,
|
|
389
|
+
:type => ['VirtualMachine'],
|
|
390
|
+
:recursive => true
|
|
391
|
+
}
|
|
392
|
+
)
|
|
393
|
+
|
|
394
|
+
pc = vi_client.vim.serviceContent.propertyCollector
|
|
395
|
+
|
|
396
|
+
filter_spec = RbVmomi::VIM.PropertyFilterSpec(
|
|
397
|
+
:objectSet => [
|
|
398
|
+
{
|
|
399
|
+
:obj => view,
|
|
400
|
+
:skip => true,
|
|
401
|
+
:selectSet => [
|
|
402
|
+
RbVmomi::VIM.TraversalSpec(
|
|
403
|
+
:name => 'traverseEntities',
|
|
404
|
+
:type => 'ContainerView',
|
|
405
|
+
:path => 'view',
|
|
406
|
+
:skip => false
|
|
407
|
+
)
|
|
408
|
+
]
|
|
409
|
+
}
|
|
410
|
+
],
|
|
411
|
+
:propSet => [
|
|
412
|
+
{
|
|
413
|
+
:type => 'VirtualMachine',
|
|
414
|
+
:pathSet => ['config.template']
|
|
415
|
+
}
|
|
416
|
+
]
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
result = pc.RetrieveProperties(
|
|
420
|
+
:specSet => [filter_spec]
|
|
421
|
+
)
|
|
422
|
+
|
|
423
|
+
vms = {}
|
|
424
|
+
result.each do |r|
|
|
425
|
+
if r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
|
|
426
|
+
vms[r.obj._ref] = r.to_hash
|
|
427
|
+
end
|
|
428
|
+
end
|
|
429
|
+
templates = []
|
|
430
|
+
vms.each do |ref, value|
|
|
431
|
+
next unless value['config.template']
|
|
432
|
+
|
|
433
|
+
templates << VCenterDriver::Template
|
|
434
|
+
.new_from_ref(
|
|
435
|
+
ref,
|
|
436
|
+
vi_client
|
|
437
|
+
)
|
|
438
|
+
end
|
|
439
|
+
|
|
440
|
+
view.DestroyView # Destroy the view
|
|
441
|
+
|
|
442
|
+
templates.each do |template|
|
|
443
|
+
tref = template['_ref']
|
|
444
|
+
next if template_objects[tref]
|
|
445
|
+
|
|
446
|
+
one_template = VCenterDriver::VIHelper
|
|
447
|
+
.find_by_ref(
|
|
448
|
+
OpenNebula::TemplatePool,
|
|
449
|
+
'TEMPLATE/VCENTER_TEMPLATE_REF',
|
|
450
|
+
tref,
|
|
451
|
+
vcenter_uuid,
|
|
452
|
+
tpool
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
# If the template has been already imported
|
|
456
|
+
next if one_template
|
|
457
|
+
|
|
458
|
+
one_template = VCenterDriver::Template
|
|
459
|
+
.get_xml_template(
|
|
460
|
+
template,
|
|
461
|
+
vcenter_uuid,
|
|
462
|
+
vi_client,
|
|
463
|
+
dc_name,
|
|
464
|
+
rp_cache
|
|
465
|
+
)
|
|
466
|
+
|
|
467
|
+
next if one_template.nil?
|
|
468
|
+
|
|
469
|
+
one_template[:import_id] = import_id
|
|
470
|
+
one_template[:vcenter] = vcenter_instance_name
|
|
471
|
+
import_id += 1
|
|
472
|
+
template_objects[tref] = one_template
|
|
473
|
+
end
|
|
474
|
+
end
|
|
475
|
+
|
|
476
|
+
{
|
|
477
|
+
vcenter_instance_name => template_objects
|
|
478
|
+
}
|
|
479
|
+
end
|
|
480
|
+
|
|
481
|
+
def cluster_networks(one_host)
|
|
482
|
+
ccr_ref = one_host['TEMPLATE/VCENTER_CCR_REF']
|
|
483
|
+
cluster = VCenterDriver::ClusterComputeResource
|
|
484
|
+
.new_from_ref(ccr_ref, @vi_client)
|
|
485
|
+
# cluster = cluster_mob(one_host)
|
|
486
|
+
raise "Cluster with ref: #{ccr_ref} not found" if cluster.nil?
|
|
487
|
+
|
|
488
|
+
cluster.item.network
|
|
489
|
+
end
|
|
490
|
+
|
|
491
|
+
# Return ONE cluster ID
|
|
492
|
+
def one_cluster_id(one_host)
|
|
493
|
+
if !one_host || !one_host['CLUSTER_ID']
|
|
494
|
+
cluster_id = -1
|
|
495
|
+
else
|
|
496
|
+
cluster_id = one_host['CLUSTER_ID']
|
|
497
|
+
end
|
|
498
|
+
|
|
499
|
+
cluster_id.to_i
|
|
500
|
+
end
|
|
501
|
+
|
|
502
|
+
# Determine if a network must be excluded from the list
|
|
503
|
+
def exclude_network?(vc_network, one_host, args, vc_network_hash)
|
|
504
|
+
vc_network_name = vc_network_hash[:vc_network_name]
|
|
505
|
+
vc_network_host = vc_network_hash[:vc_network_host]
|
|
506
|
+
vc_network_tag = vc_network_hash[:vc_network_tag]
|
|
507
|
+
|
|
508
|
+
# Exclude some networks if filter = true
|
|
509
|
+
if args[:filter]
|
|
510
|
+
if one_host && one_host['TEMPLATE/NSX_PASSWORD'].nil?
|
|
511
|
+
network_types = [
|
|
512
|
+
VCenterDriver::Network::NETWORK_TYPE_NSXT,
|
|
513
|
+
VCenterDriver::Network::NETWORK_TYPE_NSXV
|
|
514
|
+
]
|
|
515
|
+
|
|
516
|
+
# Only NSX-V and NSX-T can be excluded
|
|
517
|
+
network_type = VCenterDriver::Network
|
|
518
|
+
.get_network_type(
|
|
519
|
+
vc_network,
|
|
520
|
+
vc_network_name
|
|
521
|
+
)
|
|
522
|
+
|
|
523
|
+
return true if network_types.include? network_type
|
|
524
|
+
end
|
|
525
|
+
# Exclude networks without hosts
|
|
526
|
+
if vc_network_host.empty?
|
|
527
|
+
return true
|
|
528
|
+
end
|
|
529
|
+
|
|
530
|
+
# Exclude DVS uplinks
|
|
531
|
+
if !vc_network_tag.empty? &&
|
|
532
|
+
vc_network_tag[0][:key] == 'SYSTEM/DVS.UPLINKPG'
|
|
533
|
+
return true
|
|
534
|
+
end
|
|
535
|
+
# Exclude portgroup used for VXLAN communication in NSX
|
|
536
|
+
if vc_network['name'].match(/^vxw-vmknicPg-dvs-(.*)/)
|
|
537
|
+
return true
|
|
538
|
+
end
|
|
539
|
+
|
|
540
|
+
return false
|
|
541
|
+
end
|
|
542
|
+
false
|
|
543
|
+
end
|
|
544
|
+
|
|
545
|
+
# Proccess each network
|
|
546
|
+
def process_network(params)
|
|
547
|
+
vc_network = params[:vc_network]
|
|
548
|
+
vcenter_instance_name = params[:vcenter_instance_name]
|
|
549
|
+
vcenter_uuid = params[:vcenter_uuid]
|
|
550
|
+
_hpool = params[:_hpool]
|
|
551
|
+
one_host = params[:one_host]
|
|
552
|
+
args = params[:args]
|
|
553
|
+
|
|
554
|
+
full_process = !args[:short]
|
|
555
|
+
|
|
556
|
+
vc_network_ref = vc_network._ref
|
|
557
|
+
vc_network_name = VCenterDriver::VcImporter.sanitize(
|
|
558
|
+
vc_network.name
|
|
559
|
+
)
|
|
560
|
+
vc_network_host = vc_network['host']
|
|
561
|
+
vc_network_tag = vc_network['tag']
|
|
562
|
+
|
|
563
|
+
vc_network_hash = {}
|
|
564
|
+
vc_network_hash[:vc_network_ref] = vc_network_ref
|
|
565
|
+
vc_network_hash[:vc_network_name] = vc_network_name
|
|
566
|
+
vc_network_hash[:vc_network_host] = vc_network_host
|
|
567
|
+
vc_network_hash[:vc_network_tag] = vc_network_tag
|
|
568
|
+
|
|
569
|
+
# Initialize network hash
|
|
570
|
+
network = {}
|
|
571
|
+
# Add name to network hash
|
|
572
|
+
network[vc_network_ref] = { 'name' => vc_network_name }
|
|
573
|
+
# By default no network is excluded
|
|
574
|
+
network[vc_network_ref][:excluded] = false
|
|
575
|
+
|
|
576
|
+
# Initialize opts hash used to inject data into one template
|
|
577
|
+
opts = {}
|
|
578
|
+
|
|
579
|
+
if full_process
|
|
580
|
+
# Add network type to network hash
|
|
581
|
+
network_type = \
|
|
582
|
+
VCenterDriver::Network.get_network_type(
|
|
583
|
+
vc_network,
|
|
584
|
+
vc_network_name
|
|
585
|
+
)
|
|
586
|
+
network[vc_network_ref][:network_type] = network_type
|
|
587
|
+
end
|
|
588
|
+
|
|
589
|
+
# Determine if the network must be excluded
|
|
590
|
+
network[vc_network_ref][:excluded] = exclude_network?(
|
|
591
|
+
vc_network,
|
|
592
|
+
one_host,
|
|
593
|
+
args,
|
|
594
|
+
vc_network_hash
|
|
595
|
+
)
|
|
596
|
+
|
|
597
|
+
return if network[vc_network_ref][:excluded] == true
|
|
598
|
+
|
|
599
|
+
if full_process
|
|
600
|
+
case network[vc_network_ref][:network_type]
|
|
601
|
+
# Distributed PortGroups
|
|
602
|
+
when VCenterDriver::Network::NETWORK_TYPE_DPG
|
|
603
|
+
network[vc_network_ref][:sw_name] = \
|
|
604
|
+
vc_network.config.distributedVirtualSwitch.name
|
|
605
|
+
# For DistributedVirtualPortgroups there
|
|
606
|
+
# is networks and uplinks
|
|
607
|
+
network[vc_network_ref][:uplink] = \
|
|
608
|
+
vc_network.config.uplink
|
|
609
|
+
# network[vc_network_ref][:uplink] = false
|
|
610
|
+
# NSX-V PortGroups
|
|
611
|
+
when VCenterDriver::Network::NETWORK_TYPE_NSXV
|
|
612
|
+
network[vc_network_ref][:sw_name] = \
|
|
613
|
+
vc_network.config.distributedVirtualSwitch.name
|
|
614
|
+
# For NSX-V ( is the same as DistributedVirtualPortgroups )
|
|
615
|
+
# there is networks and uplinks
|
|
616
|
+
network[vc_network_ref][:uplink] = \
|
|
617
|
+
vc_network.config.uplink
|
|
618
|
+
network[vc_network_ref][:uplink] = false
|
|
619
|
+
# Standard PortGroups
|
|
620
|
+
when VCenterDriver::Network::NETWORK_TYPE_PG
|
|
621
|
+
# There is no uplinks for standard portgroups,
|
|
622
|
+
# so all Standard
|
|
623
|
+
# PortGroups are networks and no uplinks
|
|
624
|
+
network[vc_network_ref][:uplink] = false
|
|
625
|
+
network[vc_network_ref][:sw_name] =
|
|
626
|
+
VCenterDriver::Network
|
|
627
|
+
.virtual_switch(
|
|
628
|
+
vc_network
|
|
629
|
+
)
|
|
630
|
+
# NSX-T PortGroups
|
|
631
|
+
when VCenterDriver::Network::NETWORK_TYPE_NSXT
|
|
632
|
+
network[vc_network_ref][:sw_name] = \
|
|
633
|
+
vc_network.summary.opaqueNetworkType
|
|
634
|
+
# There is no uplinks for NSX-T networks,
|
|
635
|
+
# so all NSX-T networks
|
|
636
|
+
# are networks and no uplinks
|
|
637
|
+
network[vc_network_ref][:uplink] = false
|
|
638
|
+
else
|
|
639
|
+
raise 'Unknown network type: ' \
|
|
640
|
+
"#{network[vc_network_ref][:network_type]}"
|
|
641
|
+
end
|
|
642
|
+
end
|
|
643
|
+
|
|
644
|
+
# Multicluster nets support
|
|
645
|
+
network[vc_network_ref][:clusters] = {}
|
|
646
|
+
network[vc_network_ref][:clusters][:refs] = []
|
|
647
|
+
network[vc_network_ref][:clusters][:one_ids] = []
|
|
648
|
+
network[vc_network_ref][:clusters][:names] = []
|
|
649
|
+
|
|
650
|
+
# Get hosts related to this network and add them if is not
|
|
651
|
+
# excluded
|
|
652
|
+
vc_hosts = vc_network.host
|
|
653
|
+
vc_hosts.each do |vc_host|
|
|
654
|
+
# Get vCenter Cluster
|
|
655
|
+
vc_cluster = vc_host.parent
|
|
656
|
+
vc_cluster_ref = vc_cluster._ref
|
|
657
|
+
vc_cluster_name = vc_cluster.name
|
|
658
|
+
# Get one host from each vCenter cluster
|
|
659
|
+
one_host = VCenterDriver::VIHelper
|
|
660
|
+
.find_by_ref(OpenNebula::HostPool,
|
|
661
|
+
'TEMPLATE/VCENTER_CCR_REF',
|
|
662
|
+
vc_cluster_ref,
|
|
663
|
+
vcenter_uuid)
|
|
664
|
+
# Check if network is excluded from each host
|
|
665
|
+
next if exclude_network?(
|
|
666
|
+
vc_network,
|
|
667
|
+
one_host,
|
|
668
|
+
args,
|
|
669
|
+
vc_network_hash
|
|
670
|
+
)
|
|
671
|
+
|
|
672
|
+
# Insert vCenter cluster ref
|
|
673
|
+
network[vc_network_ref][:clusters][:refs] << vc_cluster_ref
|
|
674
|
+
# Insert OpenNebula cluster id
|
|
675
|
+
cluster_id = one_cluster_id(one_host)
|
|
676
|
+
network[vc_network_ref][:clusters][:one_ids] << cluster_id
|
|
677
|
+
# Insert vCenter cluster name
|
|
678
|
+
network[vc_network_ref][:clusters][:names] << vc_cluster_name
|
|
679
|
+
opts[:dc_name] = vc_cluster_name
|
|
680
|
+
end
|
|
681
|
+
|
|
682
|
+
# Remove duplicate entries
|
|
683
|
+
network[vc_network_ref][:clusters][:refs].uniq!
|
|
684
|
+
network[vc_network_ref][:clusters][:one_ids].uniq!
|
|
685
|
+
network[vc_network_ref][:clusters][:names].uniq!
|
|
686
|
+
|
|
687
|
+
# Mark network as processed
|
|
688
|
+
network[vc_network_ref][:processed] = true
|
|
689
|
+
|
|
690
|
+
if full_process
|
|
691
|
+
# General net_info related to datacenter
|
|
692
|
+
opts[:vcenter_uuid] = vcenter_uuid
|
|
693
|
+
opts[:vcenter_instance_name] = vcenter_instance_name
|
|
694
|
+
opts[:network_name] = network[vc_network_ref]['name']
|
|
695
|
+
opts[:network_ref] = network.keys.first
|
|
696
|
+
opts[:network_type] = network[vc_network_ref][:network_type]
|
|
697
|
+
opts[:sw_name] = network[vc_network_ref][:sw_name]
|
|
698
|
+
|
|
699
|
+
network[vc_network_ref] = \
|
|
700
|
+
network[vc_network_ref]
|
|
701
|
+
.merge(VCenterDriver::Network
|
|
702
|
+
.to_one_template(opts))
|
|
703
|
+
else
|
|
704
|
+
network[vc_network_ref][:ref] = \
|
|
705
|
+
vc_network_ref
|
|
706
|
+
network[vc_network_ref][:name] = \
|
|
707
|
+
network[vc_network_ref]['name']
|
|
708
|
+
end
|
|
709
|
+
|
|
710
|
+
network
|
|
711
|
+
end
|
|
712
|
+
|
|
713
|
+
# rubocop:disable Style/GlobalVars
|
|
714
|
+
def get_unimported_networks(npool, vcenter_instance_name, hpool, args)
|
|
715
|
+
vcenter_uuid = vcenter_instance_uuid
|
|
716
|
+
networks = {}
|
|
717
|
+
|
|
718
|
+
# Selected host in OpenNebula
|
|
719
|
+
if $conf.nil?
|
|
720
|
+
one_client = OpenNebula::Client.new
|
|
721
|
+
else
|
|
722
|
+
one_client = OpenNebula::Client.new(
|
|
723
|
+
nil,
|
|
724
|
+
$conf[:one_xmlrpc]
|
|
725
|
+
)
|
|
726
|
+
end
|
|
727
|
+
|
|
728
|
+
one_host = OpenNebula::Host.new_with_id(args[:host], one_client)
|
|
729
|
+
rc = one_host.info
|
|
730
|
+
raise rc.message if OpenNebula.is_error? rc
|
|
731
|
+
|
|
732
|
+
# Get all networks in vcenter cluster (one_host)
|
|
733
|
+
vc_cluster_networks = cluster_networks(one_host)
|
|
734
|
+
|
|
735
|
+
# Iterate over vcenter networks
|
|
736
|
+
vc_cluster_networks.each do |vc_cluster_network|
|
|
737
|
+
exist = VCenterDriver::VIHelper
|
|
738
|
+
.find_by_ref(OpenNebula::VirtualNetworkPool,
|
|
739
|
+
'TEMPLATE/VCENTER_NET_REF',
|
|
740
|
+
vc_cluster_network._ref,
|
|
741
|
+
vcenter_uuid,
|
|
742
|
+
npool)
|
|
743
|
+
|
|
744
|
+
next if exist
|
|
745
|
+
|
|
746
|
+
params = {}
|
|
747
|
+
|
|
748
|
+
params[:vc_network]= vc_cluster_network
|
|
749
|
+
params[:vcenter_instance_name]= vcenter_instance_name
|
|
750
|
+
params[:vcenter_uuid]= vcenter_uuid
|
|
751
|
+
params[:_hpool]= hpool
|
|
752
|
+
params[:one_host]= one_host
|
|
753
|
+
params[:args] = args
|
|
754
|
+
|
|
755
|
+
network = process_network(params)
|
|
756
|
+
|
|
757
|
+
networks.merge!(network) unless network.nil?
|
|
758
|
+
end
|
|
759
|
+
# Added import id
|
|
760
|
+
imid = -1
|
|
761
|
+
networks.map {|_k, v| v[:import_id] = imid += 1 }
|
|
762
|
+
{ vcenter_instance_name => networks }
|
|
763
|
+
end
|
|
764
|
+
# rubocop:enable Style/GlobalVars
|
|
765
|
+
|
|
766
|
+
end
|
|
767
|
+
# class DatatacenterFolder
|
|
768
|
+
|
|
769
|
+
##########################################################################
|
|
770
|
+
# Class Datacenter
|
|
771
|
+
##########################################################################
|
|
772
|
+
class Datacenter
|
|
773
|
+
|
|
774
|
+
attr_accessor :item
|
|
775
|
+
|
|
776
|
+
DPG_CREATE_TIMEOUT = 240
|
|
777
|
+
|
|
778
|
+
def initialize(item, vi_client = nil)
|
|
779
|
+
check_item(item, RbVmomi::VIM::Datacenter)
|
|
780
|
+
|
|
781
|
+
@vi_client = vi_client
|
|
782
|
+
@item = item
|
|
783
|
+
@net_rollback = []
|
|
784
|
+
@locking = true
|
|
785
|
+
end
|
|
786
|
+
|
|
787
|
+
def datastore_folder
|
|
788
|
+
DatastoreFolder.new(@item.datastoreFolder)
|
|
789
|
+
end
|
|
790
|
+
|
|
791
|
+
def host_folder
|
|
792
|
+
HostFolder.new(@item.hostFolder)
|
|
793
|
+
end
|
|
794
|
+
|
|
795
|
+
def vm_folder
|
|
796
|
+
VirtualMachineFolder.new(@item.vmFolder)
|
|
797
|
+
end
|
|
798
|
+
|
|
799
|
+
def network_folder
|
|
800
|
+
NetworkFolder.new(@item.networkFolder)
|
|
801
|
+
end
|
|
802
|
+
|
|
803
|
+
# Locking function. Similar to flock
|
|
804
|
+
def lock
|
|
805
|
+
hostlockname = @item['name'].downcase.tr(' ', '_')
|
|
806
|
+
return unless @locking
|
|
807
|
+
|
|
808
|
+
@locking_file =
|
|
809
|
+
File
|
|
810
|
+
.open("/tmp/vcenter-dc-#{hostlockname}-lock", 'w')
|
|
811
|
+
@locking_file.flock(File::LOCK_EX)
|
|
812
|
+
end
|
|
813
|
+
|
|
814
|
+
# Unlock driver execution mutex
|
|
815
|
+
def unlock
|
|
816
|
+
return unless @locking
|
|
817
|
+
|
|
818
|
+
@locking_file.close
|
|
819
|
+
end
|
|
820
|
+
|
|
821
|
+
########################################################################
|
|
822
|
+
# Check if distributed virtual switch exists in host
|
|
823
|
+
########################################################################
|
|
824
|
+
def dvs_exists(switch_name, net_folder)
|
|
825
|
+
net_folder.items.values.select do |dvs|
|
|
826
|
+
dvs.instance_of?(VCenterDriver::DistributedVirtualSwitch) &&
|
|
827
|
+
dvs['name'] == switch_name
|
|
828
|
+
end.first rescue nil
|
|
829
|
+
end
|
|
830
|
+
|
|
831
|
+
########################################################################
|
|
832
|
+
# Is the distributed switch for the distributed pg different?
|
|
833
|
+
########################################################################
|
|
834
|
+
def pg_changes_sw?(dpg, switch_name)
|
|
835
|
+
dpg['config.distributedVirtualSwitch.name'] != switch_name
|
|
836
|
+
end
|
|
837
|
+
|
|
838
|
+
########################################################################
|
|
839
|
+
# Create a distributed vcenter switch in a datacenter
|
|
840
|
+
########################################################################
|
|
841
|
+
def create_dvs(switch_name, pnics, mtu = 1500)
|
|
842
|
+
# Prepare spec for DVS creation
|
|
843
|
+
spec = RbVmomi::VIM::DVSCreateSpec.new
|
|
844
|
+
spec.configSpec = RbVmomi::VIM::VMwareDVSConfigSpec.new
|
|
845
|
+
spec.configSpec.name = switch_name
|
|
846
|
+
|
|
847
|
+
# Specify number of uplinks port for dpg
|
|
848
|
+
if pnics
|
|
849
|
+
pnics = pnics.split(',')
|
|
850
|
+
if !pnics.empty?
|
|
851
|
+
spec.configSpec.uplinkPortPolicy =
|
|
852
|
+
RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new
|
|
853
|
+
spec.configSpec.uplinkPortPolicy.uplinkPortName = []
|
|
854
|
+
(0..pnics.size-1).each do |index|
|
|
855
|
+
spec
|
|
856
|
+
.configSpec
|
|
857
|
+
.uplinkPortPolicy
|
|
858
|
+
.uplinkPortName[index]="dvUplink#{index+1}"
|
|
859
|
+
end
|
|
860
|
+
end
|
|
861
|
+
end
|
|
862
|
+
|
|
863
|
+
# Set maximum MTU
|
|
864
|
+
spec.configSpec.maxMtu = mtu
|
|
865
|
+
|
|
866
|
+
# The DVS must be created in the networkFolder of the datacenter
|
|
867
|
+
begin
|
|
868
|
+
dvs_creation_task = @item
|
|
869
|
+
.networkFolder
|
|
870
|
+
.CreateDVS_Task(
|
|
871
|
+
:spec => spec
|
|
872
|
+
)
|
|
873
|
+
dvs_creation_task.wait_for_completion
|
|
874
|
+
|
|
875
|
+
# If task finished successfuly we rename the uplink portgroup
|
|
876
|
+
dvs = nil
|
|
877
|
+
if dvs_creation_task.info.state == 'success'
|
|
878
|
+
dvs = dvs_creation_task.info.result
|
|
879
|
+
dvs
|
|
880
|
+
.config
|
|
881
|
+
.uplinkPortgroup[0]
|
|
882
|
+
.Rename_Task(
|
|
883
|
+
:newName => "#{switch_name}-uplink-pg"
|
|
884
|
+
).wait_for_completion
|
|
885
|
+
else
|
|
886
|
+
raise "The Distributed vSwitch #{switch_name} \
|
|
887
|
+
could not be created. "
|
|
888
|
+
end
|
|
889
|
+
rescue StandardError => e
|
|
890
|
+
raise e
|
|
891
|
+
end
|
|
892
|
+
|
|
893
|
+
@net_rollback << {
|
|
894
|
+
:action => :delete_dvs,
|
|
895
|
+
:dvs => dvs,
|
|
896
|
+
:name => switch_name
|
|
897
|
+
}
|
|
898
|
+
|
|
899
|
+
VCenterDriver::DistributedVirtualSwitch.new(dvs, @vi_client)
|
|
900
|
+
end
|
|
901
|
+
|
|
902
|
+
########################################################################
|
|
903
|
+
# Update a distributed vcenter switch
|
|
904
|
+
########################################################################
|
|
905
|
+
def update_dvs(dvs, pnics, mtu)
|
|
906
|
+
# Prepare spec for DVS creation
|
|
907
|
+
spec = RbVmomi::VIM::VMwareDVSConfigSpec.new
|
|
908
|
+
changed = false
|
|
909
|
+
|
|
910
|
+
orig_spec = RbVmomi::VIM::VMwareDVSConfigSpec.new
|
|
911
|
+
orig_spec.maxMtu = dvs['config.maxMtu']
|
|
912
|
+
orig_spec.uplinkPortPolicy =
|
|
913
|
+
RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new
|
|
914
|
+
orig_spec.uplinkPortPolicy.uplinkPortName = []
|
|
915
|
+
(0..dvs['config.uplinkPortgroup'].length-1).each do |index|
|
|
916
|
+
orig_spec
|
|
917
|
+
.uplinkPortPolicy
|
|
918
|
+
.uplinkPortName[index]="dvUplink#{index+1}"
|
|
919
|
+
end
|
|
920
|
+
|
|
921
|
+
# Add more uplinks to default uplink
|
|
922
|
+
# port group according to number of pnics
|
|
923
|
+
if pnics
|
|
924
|
+
pnics = pnics.split(',')
|
|
925
|
+
if !pnics.empty? && dvs['config.uplinkPortgroup']
|
|
926
|
+
.length != pnics.size
|
|
927
|
+
spec.uplinkPortPolicy =
|
|
928
|
+
RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new
|
|
929
|
+
spec.uplinkPortPolicy.uplinkPortName = []
|
|
930
|
+
(dvs['config.uplinkPortgroup']
|
|
931
|
+
.length..num_pnics-1)
|
|
932
|
+
.each do |index|
|
|
933
|
+
spec
|
|
934
|
+
.uplinkPortPolicy
|
|
935
|
+
.uplinkPortName[index] =
|
|
936
|
+
"dvUplink#{index+1}"
|
|
937
|
+
end
|
|
938
|
+
changed = true
|
|
939
|
+
end
|
|
940
|
+
end
|
|
941
|
+
|
|
942
|
+
# Set maximum MTU
|
|
943
|
+
if mtu != dvs['config.maxMtu']
|
|
944
|
+
spec.maxMtu = mtu
|
|
945
|
+
changed = true
|
|
946
|
+
end
|
|
947
|
+
|
|
948
|
+
# The DVS must be created in the networkFolder of the datacenter
|
|
949
|
+
return unless changed
|
|
950
|
+
|
|
951
|
+
spec.configVersion = dvs['config.configVersion']
|
|
952
|
+
|
|
953
|
+
begin
|
|
954
|
+
dvs
|
|
955
|
+
.item
|
|
956
|
+
.ReconfigureDvs_Task(
|
|
957
|
+
:spec => spec
|
|
958
|
+
).wait_for_completion
|
|
959
|
+
rescue StandardError => e
|
|
960
|
+
raise "The Distributed switch #{dvs['name']} could \
|
|
961
|
+
not be updated. "\
|
|
962
|
+
"Reason: #{e.message}"
|
|
963
|
+
end
|
|
964
|
+
|
|
965
|
+
@net_rollback << {
|
|
966
|
+
:action => :update_dvs,
|
|
967
|
+
:dvs => dvs.item,
|
|
968
|
+
:name => dvs['name'],
|
|
969
|
+
:spec => orig_spec
|
|
970
|
+
}
|
|
971
|
+
end
|
|
972
|
+
|
|
973
|
+
########################################################################
|
|
974
|
+
# Remove a distributed vcenter switch in a datacenter
|
|
975
|
+
########################################################################
|
|
976
|
+
def remove_dvs(dvs)
|
|
977
|
+
begin
|
|
978
|
+
dvs.item.Destroy_Task.wait_for_completion
|
|
979
|
+
rescue StandardError
|
|
980
|
+
# Ignore destroy task exception
|
|
981
|
+
end
|
|
982
|
+
end
|
|
983
|
+
|
|
984
|
+
########################################################################
|
|
985
|
+
# Check if distributed port group exists in datacenter
|
|
986
|
+
########################################################################
|
|
987
|
+
def dpg_exists(pg_name, net_folder)
|
|
988
|
+
net_folder.items.values.select do |dpg|
|
|
989
|
+
dpg.instance_of?(VCenterDriver::DistributedPortGroup) &&
|
|
990
|
+
dpg['name'] == pg_name
|
|
991
|
+
end.first rescue nil
|
|
992
|
+
end
|
|
993
|
+
|
|
994
|
+
########################################################################
|
|
995
|
+
# Check if Opaque Network exists in datacenter
|
|
996
|
+
########################################################################
|
|
997
|
+
def nsx_network(nsx_id, pg_type)
|
|
998
|
+
timeout = 180
|
|
999
|
+
case pg_type
|
|
1000
|
+
when VCenterDriver::Network::NETWORK_TYPE_NSXT
|
|
1001
|
+
while timeout > 0
|
|
1002
|
+
net_folder = network_folder
|
|
1003
|
+
net_folder.fetch!
|
|
1004
|
+
net_folder.items.values.each do |net|
|
|
1005
|
+
if net.instance_of?(VCenterDriver::OpaqueNetwork) &&
|
|
1006
|
+
net.item.summary.opaqueNetworkId == nsx_id
|
|
1007
|
+
return net.item._ref
|
|
1008
|
+
end
|
|
1009
|
+
end
|
|
1010
|
+
sleep(1)
|
|
1011
|
+
timeout -= 1
|
|
1012
|
+
end
|
|
1013
|
+
# Not used right now, but maybe neccesary in the future.
|
|
1014
|
+
when VCenterDriver::Network::NETWORK_TYPE_NSXV
|
|
1015
|
+
while timeout > 0
|
|
1016
|
+
net_folder = network_folder
|
|
1017
|
+
net_folder.fetch!
|
|
1018
|
+
net_folder.items.values.each do |net|
|
|
1019
|
+
if net.instance_of?(
|
|
1020
|
+
VCenterDriver::DistributedPortGroup
|
|
1021
|
+
) &&
|
|
1022
|
+
net.item.key == nsx_id
|
|
1023
|
+
return net.item._ref
|
|
1024
|
+
end
|
|
1025
|
+
end
|
|
1026
|
+
sleep(1)
|
|
1027
|
+
timeout -= 1
|
|
1028
|
+
end
|
|
1029
|
+
else
|
|
1030
|
+
raise "Unknown network Port Group type: #{pg_type}"
|
|
1031
|
+
end
|
|
1032
|
+
end
|
|
1033
|
+
|
|
1034
|
+
########################################################################
|
|
1035
|
+
# Create a distributed vcenter port group
|
|
1036
|
+
########################################################################
|
|
1037
|
+
def create_dpg(dvs, pg_name, vlan_id, num_ports)
|
|
1038
|
+
spec = RbVmomi::VIM::DVPortgroupConfigSpec.new
|
|
1039
|
+
|
|
1040
|
+
# OpenNebula use DVS static port binding with autoexpand
|
|
1041
|
+
if num_ports
|
|
1042
|
+
spec.autoExpand = true
|
|
1043
|
+
spec.numPorts = num_ports
|
|
1044
|
+
end
|
|
1045
|
+
|
|
1046
|
+
# Distributed port group name
|
|
1047
|
+
spec.name = pg_name
|
|
1048
|
+
|
|
1049
|
+
# Set VLAN information
|
|
1050
|
+
spec.defaultPortConfig =
|
|
1051
|
+
RbVmomi::VIM::VMwareDVSPortSetting.new
|
|
1052
|
+
spec.defaultPortConfig.vlan =
|
|
1053
|
+
RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new
|
|
1054
|
+
spec.defaultPortConfig.vlan.vlanId =
|
|
1055
|
+
vlan_id
|
|
1056
|
+
spec.defaultPortConfig.vlan.inherited =
|
|
1057
|
+
false
|
|
1058
|
+
|
|
1059
|
+
# earlyBinding. A free DistributedVirtualPort will be selected and
|
|
1060
|
+
# assigned to a VirtualMachine when
|
|
1061
|
+
# the virtual machine is reconfigured
|
|
1062
|
+
# to connect to the portgroup.
|
|
1063
|
+
spec.type = 'earlyBinding'
|
|
1064
|
+
|
|
1065
|
+
begin
|
|
1066
|
+
dvs
|
|
1067
|
+
.item
|
|
1068
|
+
.AddDVPortgroup_Task(
|
|
1069
|
+
:spec => [spec]
|
|
1070
|
+
).wait_for_completion
|
|
1071
|
+
rescue StandardError => e
|
|
1072
|
+
raise "The Distributed port group #{pg_name} \
|
|
1073
|
+
could not be created. "\
|
|
1074
|
+
"Reason: #{e.message}"
|
|
1075
|
+
end
|
|
1076
|
+
|
|
1077
|
+
# wait until the network is ready and we have a reference
|
|
1078
|
+
portgroups = dvs['portgroup'].select do |dpg|
|
|
1079
|
+
dpg.instance_of?(RbVmomi::VIM::DistributedVirtualPortgroup) &&
|
|
1080
|
+
dpg['name'] == pg_name
|
|
1081
|
+
end
|
|
1082
|
+
|
|
1083
|
+
(0..DPG_CREATE_TIMEOUT).each do
|
|
1084
|
+
break unless portgroups.empty?
|
|
1085
|
+
|
|
1086
|
+
portgroups = dvs['portgroup'].select do |dpg|
|
|
1087
|
+
dpg
|
|
1088
|
+
.instance_of?(
|
|
1089
|
+
RbVmomi::VIM::DistributedVirtualPortgroup
|
|
1090
|
+
) && dpg['name'] == pg_name
|
|
1091
|
+
end
|
|
1092
|
+
sleep 1
|
|
1093
|
+
end
|
|
1094
|
+
|
|
1095
|
+
if portgroups.empty?
|
|
1096
|
+
raise 'Cannot get VCENTER_NET_REF \
|
|
1097
|
+
for new distributed port group'
|
|
1098
|
+
end
|
|
1099
|
+
|
|
1100
|
+
@net_rollback << {
|
|
1101
|
+
:action => :delete_dpg,
|
|
1102
|
+
:dpg => portgroups.first,
|
|
1103
|
+
:name => pg_name
|
|
1104
|
+
}
|
|
1105
|
+
|
|
1106
|
+
portgroups.first._ref
|
|
1107
|
+
end
|
|
1108
|
+
|
|
1109
|
+
########################################################################
|
|
1110
|
+
# Update a distributed vcenter port group
|
|
1111
|
+
########################################################################
|
|
1112
|
+
def update_dpg(dpg, vlan_id, num_ports)
|
|
1113
|
+
spec = RbVmomi::VIM::DVPortgroupConfigSpec.new
|
|
1114
|
+
|
|
1115
|
+
changed = false
|
|
1116
|
+
|
|
1117
|
+
orig_spec =
|
|
1118
|
+
RbVmomi::VIM::DVPortgroupConfigSpec.new
|
|
1119
|
+
orig_spec.numPorts =
|
|
1120
|
+
dpg['config.numPorts']
|
|
1121
|
+
orig_spec.defaultPortConfig =
|
|
1122
|
+
RbVmomi::VIM::VMwareDVSPortSetting.new
|
|
1123
|
+
orig_spec.defaultPortConfig.vlan =
|
|
1124
|
+
RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new
|
|
1125
|
+
orig_spec.defaultPortConfig.vlan.vlanId =
|
|
1126
|
+
dpg['config.defaultPortConfig.vlan.vlanId']
|
|
1127
|
+
orig_spec.defaultPortConfig.vlan.inherited =
|
|
1128
|
+
false
|
|
1129
|
+
|
|
1130
|
+
if num_ports && num_ports != orig_spec.numPorts
|
|
1131
|
+
spec.numPorts = num_ports
|
|
1132
|
+
changed = true
|
|
1133
|
+
end
|
|
1134
|
+
|
|
1135
|
+
# earlyBinding. A free DistributedVirtualPort
|
|
1136
|
+
# will be selected and
|
|
1137
|
+
# assigned to a VirtualMachine when
|
|
1138
|
+
# the virtual machine is reconfigured
|
|
1139
|
+
# to connect to the portgroup.
|
|
1140
|
+
spec.type = 'earlyBinding'
|
|
1141
|
+
|
|
1142
|
+
if vlan_id != orig_spec.defaultPortConfig.vlan.vlanId
|
|
1143
|
+
spec.defaultPortConfig =
|
|
1144
|
+
RbVmomi::VIM::VMwareDVSPortSetting.new
|
|
1145
|
+
spec.defaultPortConfig.vlan =
|
|
1146
|
+
RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new
|
|
1147
|
+
spec.defaultPortConfig.vlan.vlanId = vlan_id
|
|
1148
|
+
spec.defaultPortConfig.vlan.inherited = false
|
|
1149
|
+
changed = true
|
|
1150
|
+
end
|
|
1151
|
+
|
|
1152
|
+
return unless changed
|
|
1153
|
+
|
|
1154
|
+
spec.configVersion = dpg['config.configVersion']
|
|
1155
|
+
|
|
1156
|
+
begin
|
|
1157
|
+
dpg
|
|
1158
|
+
.item
|
|
1159
|
+
.ReconfigureDVPortgroup_Task(
|
|
1160
|
+
:spec => spec
|
|
1161
|
+
).wait_for_completion
|
|
1162
|
+
rescue StandardError => e
|
|
1163
|
+
raise "The Distributed port group #{dpg['name']} \
|
|
1164
|
+
could not be created. "\
|
|
1165
|
+
"Reason: #{e.message}"
|
|
1166
|
+
end
|
|
1167
|
+
|
|
1168
|
+
@net_rollback << {
|
|
1169
|
+
:action => :update_dpg,
|
|
1170
|
+
:dpg => dpg.item,
|
|
1171
|
+
:name => dpg['name'],
|
|
1172
|
+
:spec => orig_spec
|
|
1173
|
+
}
|
|
1174
|
+
end
|
|
1175
|
+
|
|
1176
|
+
########################################################################
|
|
1177
|
+
# Remove distributed port group from datacenter
|
|
1178
|
+
########################################################################
|
|
1179
|
+
def remove_dpg(dpg)
|
|
1180
|
+
begin
|
|
1181
|
+
dpg.item.Destroy_Task.wait_for_completion
|
|
1182
|
+
rescue RbVmomi::VIM::ResourceInUse
|
|
1183
|
+
STDERR.puts "The distributed portgroup \
|
|
1184
|
+
#{dpg['name']} is in use so it cannot be deleted"
|
|
1185
|
+
nil
|
|
1186
|
+
rescue StandardError => e
|
|
1187
|
+
raise "The Distributed portgroup #{dpg['name']} \
|
|
1188
|
+
could not be deleted. Reason: #{e.message} "
|
|
1189
|
+
end
|
|
1190
|
+
end
|
|
1191
|
+
|
|
1192
|
+
########################################################################
|
|
1193
|
+
# Perform vcenter network rollback operations
|
|
1194
|
+
########################################################################
|
|
1195
|
+
def network_rollback
|
|
1196
|
+
@net_rollback.reverse_each do |nr|
|
|
1197
|
+
case nr[:action]
|
|
1198
|
+
when :update_dpg
|
|
1199
|
+
begin
|
|
1200
|
+
nr[:dpg].ReconfigureDVPortgroup_Task(:spec => nr[:spec])
|
|
1201
|
+
.wait_for_completion
|
|
1202
|
+
rescue StandardError => e
|
|
1203
|
+
raise "A rollback operation for distributed \
|
|
1204
|
+
port group #{nr[:name]} could not \
|
|
1205
|
+
be performed. Reason: #{e.message}"
|
|
1206
|
+
end
|
|
1207
|
+
when :update_dvs
|
|
1208
|
+
begin
|
|
1209
|
+
nr[:dvs].ReconfigureDvs_Task(:spec => nr[:spec])
|
|
1210
|
+
.wait_for_completion
|
|
1211
|
+
rescue StandardError => e
|
|
1212
|
+
raise "A rollback operation for distributed\
|
|
1213
|
+
standard switch #{nr[:name]} could \
|
|
1214
|
+
not be performed. Reason: #{e.message}"
|
|
1215
|
+
end
|
|
1216
|
+
when :delete_dvs
|
|
1217
|
+
begin
|
|
1218
|
+
nr[:dvs].Destroy_Task.wait_for_completion
|
|
1219
|
+
rescue RbVmomi::VIM::ResourceInUse
|
|
1220
|
+
next # Ignore if switch in use
|
|
1221
|
+
rescue RbVmomi::VIM::NotFound
|
|
1222
|
+
next # Ignore if switch not found
|
|
1223
|
+
rescue StandardError => e
|
|
1224
|
+
raise "A rollback operation \
|
|
1225
|
+
for standard switch #{nr[:name]} \
|
|
1226
|
+
could not be performed. Reason: #{e.message}"
|
|
1227
|
+
end
|
|
1228
|
+
when :delete_dpg
|
|
1229
|
+
begin
|
|
1230
|
+
nr[:dpg].Destroy_Task.wait_for_completion
|
|
1231
|
+
rescue RbVmomi::VIM::ResourceInUse
|
|
1232
|
+
next # Ignore if pg in use
|
|
1233
|
+
rescue RbVmomi::VIM::NotFound
|
|
1234
|
+
next # Ignore if pg not found
|
|
1235
|
+
rescue StandardError => e
|
|
1236
|
+
raise "A rollback operation for \
|
|
1237
|
+
standard port group #{nr[:name]} could \
|
|
1238
|
+
not be performed. Reason: #{e.message}"
|
|
1239
|
+
end
|
|
1240
|
+
end
|
|
1241
|
+
end
|
|
1242
|
+
end
|
|
1243
|
+
|
|
1244
|
+
########################################################################
|
|
1245
|
+
# PowerOn VMs
|
|
1246
|
+
########################################################################
|
|
1247
|
+
def power_on_vm(vm)
|
|
1248
|
+
@item.PowerOnMultiVM_Task({ :vm => [vm] }).wait_for_completion
|
|
1249
|
+
end
|
|
1250
|
+
|
|
1251
|
+
def self.new_from_ref(ref, vi_client)
|
|
1252
|
+
new(RbVmomi::VIM::Datacenter.new(vi_client.vim, ref), vi_client)
|
|
1253
|
+
end
|
|
1254
|
+
|
|
1255
|
+
end
|
|
1256
|
+
|
|
1257
|
+
end
|
|
1258
|
+
# module VCenterDriver
|