opennebula 5.12.4 → 5.13.80.pre

Sign up to get free protection for your applications and to get access to all the features.
Files changed (126) hide show
  1. checksums.yaml +4 -4
  2. data/lib/ActionManager.rb +1 -1
  3. data/lib/CommandManager.rb +1 -1
  4. data/lib/DriverExecHelper.rb +44 -28
  5. data/lib/OpenNebulaDriver.rb +8 -4
  6. data/lib/VirtualMachineDriver.rb +9 -2
  7. data/lib/cloud/CloudClient.rb +3 -3
  8. data/lib/datacenter.rb +1258 -0
  9. data/lib/datastore.rb +1025 -0
  10. data/lib/distributed_firewall.rb +280 -0
  11. data/lib/file_helper.rb +370 -0
  12. data/lib/host.rb +1517 -0
  13. data/lib/logical_port.rb +50 -0
  14. data/lib/logical_switch.rb +77 -0
  15. data/lib/memoize.rb +74 -0
  16. data/lib/models.rb +32 -0
  17. data/lib/models/role.rb +1126 -0
  18. data/lib/models/service.rb +709 -0
  19. data/lib/network.rb +635 -0
  20. data/lib/nsx_client.rb +144 -0
  21. data/lib/nsx_component.rb +28 -0
  22. data/lib/nsx_constants.rb +149 -0
  23. data/lib/nsx_driver.rb +78 -0
  24. data/lib/nsx_error.rb +77 -0
  25. data/lib/nsx_rule.rb +193 -0
  26. data/lib/nsxt_client.rb +176 -0
  27. data/lib/nsxt_dfw.rb +196 -0
  28. data/lib/nsxt_logical_port.rb +94 -0
  29. data/lib/nsxt_rule.rb +188 -0
  30. data/lib/nsxt_tz.rb +38 -0
  31. data/lib/nsxv_client.rb +176 -0
  32. data/lib/nsxv_dfw.rb +202 -0
  33. data/lib/nsxv_logical_port.rb +107 -0
  34. data/lib/nsxv_rule.rb +172 -0
  35. data/lib/nsxv_tz.rb +41 -0
  36. data/lib/opaque_network.rb +134 -0
  37. data/lib/opennebula.rb +5 -2
  38. data/lib/opennebula/acl.rb +1 -1
  39. data/lib/opennebula/acl_pool.rb +1 -1
  40. data/lib/opennebula/client.rb +1 -1
  41. data/lib/opennebula/cluster.rb +1 -1
  42. data/lib/opennebula/cluster_pool.rb +1 -1
  43. data/lib/opennebula/datastore.rb +1 -1
  44. data/lib/opennebula/datastore_pool.rb +1 -1
  45. data/lib/opennebula/document.rb +8 -29
  46. data/lib/opennebula/document_json.rb +42 -12
  47. data/lib/opennebula/document_pool.rb +1 -1
  48. data/lib/opennebula/document_pool_json.rb +1 -1
  49. data/lib/opennebula/error.rb +4 -1
  50. data/lib/opennebula/flow.rb +23 -0
  51. data/lib/opennebula/flow/grammar.rb +1195 -0
  52. data/lib/opennebula/flow/service_pool.rb +190 -0
  53. data/lib/opennebula/flow/service_template.rb +572 -0
  54. data/lib/opennebula/flow/service_template_ext.rb +84 -0
  55. data/lib/opennebula/flow/service_template_pool.rb +32 -0
  56. data/lib/opennebula/flow/validator.rb +499 -0
  57. data/lib/opennebula/group.rb +1 -1
  58. data/lib/opennebula/group_pool.rb +1 -1
  59. data/lib/opennebula/hook.rb +5 -12
  60. data/lib/opennebula/hook_log.rb +1 -1
  61. data/lib/opennebula/hook_pool.rb +1 -1
  62. data/lib/opennebula/host.rb +1 -1
  63. data/lib/opennebula/host_pool.rb +1 -1
  64. data/lib/opennebula/image.rb +17 -14
  65. data/lib/opennebula/image_pool.rb +1 -1
  66. data/lib/opennebula/ldap_auth.rb +1 -1
  67. data/lib/opennebula/ldap_auth_spec.rb +1 -1
  68. data/lib/opennebula/lockable_ext.rb +163 -0
  69. data/lib/opennebula/marketplace.rb +1 -1
  70. data/lib/opennebula/marketplace_pool.rb +1 -1
  71. data/lib/opennebula/marketplaceapp.rb +9 -119
  72. data/lib/opennebula/marketplaceapp_ext.rb +522 -0
  73. data/lib/opennebula/marketplaceapp_pool.rb +1 -1
  74. data/lib/opennebula/oneflow_client.rb +4 -3
  75. data/lib/opennebula/pool.rb +17 -3
  76. data/lib/opennebula/pool_element.rb +1 -1
  77. data/lib/opennebula/security_group.rb +1 -1
  78. data/lib/opennebula/security_group_pool.rb +1 -1
  79. data/lib/opennebula/server_cipher_auth.rb +1 -1
  80. data/lib/opennebula/server_x509_auth.rb +1 -1
  81. data/lib/opennebula/ssh_auth.rb +1 -1
  82. data/lib/opennebula/system.rb +1 -1
  83. data/lib/opennebula/template.rb +4 -13
  84. data/lib/opennebula/template_ext.rb +325 -0
  85. data/lib/opennebula/template_pool.rb +1 -1
  86. data/lib/opennebula/user.rb +26 -2
  87. data/lib/opennebula/user_pool.rb +1 -1
  88. data/lib/opennebula/utils.rb +1 -1
  89. data/lib/opennebula/vdc.rb +1 -1
  90. data/lib/opennebula/vdc_pool.rb +1 -1
  91. data/lib/opennebula/virtual_machine.rb +25 -207
  92. data/lib/opennebula/virtual_machine_ext.rb +469 -0
  93. data/lib/opennebula/virtual_machine_pool.rb +28 -17
  94. data/lib/opennebula/virtual_network.rb +4 -10
  95. data/lib/opennebula/virtual_network_pool.rb +1 -1
  96. data/lib/opennebula/virtual_router.rb +4 -12
  97. data/lib/opennebula/virtual_router_pool.rb +1 -1
  98. data/lib/opennebula/vm_group.rb +4 -11
  99. data/lib/opennebula/vm_group_pool.rb +1 -1
  100. data/lib/opennebula/vntemplate.rb +4 -13
  101. data/lib/opennebula/vntemplate_pool.rb +1 -1
  102. data/lib/opennebula/wait_ext.rb +222 -0
  103. data/lib/opennebula/x509_auth.rb +1 -1
  104. data/lib/opennebula/xml_element.rb +1 -1
  105. data/lib/opennebula/xml_pool.rb +1 -1
  106. data/lib/opennebula/xml_utils.rb +1 -1
  107. data/lib/opennebula/zone.rb +1 -1
  108. data/lib/opennebula/zone_pool.rb +1 -1
  109. data/lib/rest_client.rb +201 -0
  110. data/lib/scripts_common.rb +180 -0
  111. data/lib/transport_zone.rb +43 -0
  112. data/lib/vcenter_driver.rb +13 -11
  113. data/lib/vcenter_importer.rb +616 -0
  114. data/lib/vi_client.rb +281 -0
  115. data/lib/vi_helper.rb +312 -0
  116. data/lib/virtual_machine.rb +3477 -0
  117. data/lib/virtual_wire.rb +158 -0
  118. data/lib/vm_device.rb +80 -0
  119. data/lib/vm_disk.rb +202 -0
  120. data/lib/vm_folder.rb +69 -0
  121. data/lib/vm_helper.rb +30 -0
  122. data/lib/vm_monitor.rb +303 -0
  123. data/lib/vm_nic.rb +70 -0
  124. data/lib/vm_template.rb +1961 -0
  125. data/lib/vmm_importer.rb +121 -0
  126. metadata +118 -5
data/lib/vm_helper.rb ADDED
@@ -0,0 +1,30 @@
1
+ # -------------------------------------------------------------------------- #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
+ # #
4
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
+ # not use this file except in compliance with the License. You may obtain #
6
+ # a copy of the License at #
7
+ # #
8
+ # http://www.apache.org/licenses/LICENSE-2.0 #
9
+ # #
10
+ # Unless required by applicable law or agreed to in writing, software #
11
+ # distributed under the License is distributed on an "AS IS" BASIS, #
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
13
+ # See the License for the specific language governing permissions and #
14
+ # limitations under the License. #
15
+ #--------------------------------------------------------------------------- #
16
+ module VirtualMachineHelper
17
+
18
+ # This method raises an exception if the timeout is reached
19
+ # The exception needs to be handled in the VMM drivers and any
20
+ # process that uses this method
21
+ def wait_timeout(action, timeout = 300)
22
+ time_start = Time.now
23
+ until send(action)
24
+ sleep(1)
25
+ condition = (Time.now - time_start).to_i >= timeout
26
+ raise 'Reached deploy timeout' if condition
27
+ end
28
+ end
29
+
30
+ end
data/lib/vm_monitor.rb ADDED
@@ -0,0 +1,303 @@
1
+ # -------------------------------------------------------------------------- #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
+ # #
4
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
+ # not use this file except in compliance with the License. You may obtain #
6
+ # a copy of the License at #
7
+ # #
8
+ # http://www.apache.org/licenses/LICENSE-2.0 #
9
+ # #
10
+ # Unless required by applicable law or agreed to in writing, software #
11
+ # distributed under the License is distributed on an "AS IS" BASIS, #
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
13
+ # See the License for the specific language governing permissions and #
14
+ # limitations under the License. #
15
+ #--------------------------------------------------------------------------- #
16
+ module VirtualMachineMonitor
17
+
18
+ POLL_ATTRIBUTE = OpenNebula::VirtualMachine::Driver::POLL_ATTRIBUTE
19
+ VM_STATE = OpenNebula::VirtualMachine::Driver::VM_STATE
20
+
21
+ # Converts the VI string state to OpenNebula state convention
22
+ # Guest states are:
23
+ # - poweredOff The virtual machine is currently powered off.
24
+ # - poweredOn The virtual machine is currently powered on.
25
+ # - suspended The virtual machine is currently suspended.
26
+ def state_to_c(state)
27
+ case state
28
+ when 'poweredOn'
29
+ VM_STATE[:active]
30
+ when 'suspended'
31
+ VM_STATE[:paused]
32
+ when 'poweredOff'
33
+ VM_STATE[:deleted]
34
+ else
35
+ VM_STATE[:unknown]
36
+ end
37
+ end
38
+
39
+ # monitor function used when poll action is called for all vms
40
+ def monitor(stats)
41
+ reset_monitor
42
+
43
+ refresh_rate = 20 # 20 seconds between samples (realtime)
44
+
45
+ @state = state_to_c(@vm_info['summary.runtime.powerState'])
46
+
47
+ return if @state != VM_STATE[:active]
48
+
49
+ cpuMhz = @vm_info[:esx_host_cpu]
50
+
51
+ @monitor[:used_memory] = @vm_info['summary.quickStats.hostMemoryUsage']
52
+ .to_i * 1024
53
+
54
+ used_cpu = @vm_info['summary.quickStats.overallCpuUsage'].to_f / cpuMhz
55
+ used_cpu = (used_cpu * 100).to_s
56
+ @monitor[:used_cpu] = format('%.2f', used_cpu).to_s
57
+
58
+ # Check for negative values
59
+ @monitor[:used_memory] = 0 if @monitor[:used_memory].to_i < 0
60
+ @monitor[:used_cpu] = 0 if @monitor[:used_cpu].to_i < 0
61
+
62
+ guest_ip_addresses = []
63
+ unless self['guest.net'].empty?
64
+ @vm_info['guest.net'].each do |net|
65
+ next unless net.ipConfig
66
+ next if net.ipConfig.ipAddress.empty?
67
+
68
+ net.ipConfig.ipAddress.each do |ip|
69
+ guest_ip_addresses << ip.ipAddress
70
+ end
71
+ end
72
+ end
73
+
74
+ @guest_ip_addresses = guest_ip_addresses.join(',')
75
+
76
+ if stats.key?(@item)
77
+ metrics = stats[@item][:metrics]
78
+
79
+ nettx_kbpersec = 0
80
+ if metrics['net.transmitted']
81
+ metrics['net.transmitted'].each do |sample|
82
+ nettx_kbpersec += sample if sample > 0
83
+ end
84
+ end
85
+
86
+ netrx_kbpersec = 0
87
+ if metrics['net.bytesRx']
88
+ metrics['net.bytesRx'].each do |sample|
89
+ netrx_kbpersec += sample if sample > 0
90
+ end
91
+ end
92
+
93
+ read_kbpersec = 0
94
+ if metrics['virtualDisk.read']
95
+ metrics['virtualDisk.read'].each do |sample|
96
+ read_kbpersec += sample if sample > 0
97
+ end
98
+ end
99
+
100
+ read_iops = 0
101
+ if metrics['virtualDisk.numberReadAveraged']
102
+ metrics['virtualDisk.numberReadAveraged'].each do |sample|
103
+ read_iops += sample if sample > 0
104
+ end
105
+ end
106
+
107
+ write_kbpersec = 0
108
+ if metrics['virtualDisk.write']
109
+ metrics['virtualDisk.write'].each do |sample|
110
+ write_kbpersec += sample if sample > 0
111
+ end
112
+ end
113
+
114
+ write_iops = 0
115
+ if metrics['virtualDisk.numberWriteAveraged']
116
+ metrics['virtualDisk.numberWriteAveraged'].each do |sample|
117
+ write_iops += sample if sample > 0
118
+ end
119
+ end
120
+ else
121
+ nettx_kbpersec = 0
122
+ netrx_kbpersec = 0
123
+ read_kbpersec = 0
124
+ read_iops = 0
125
+ write_kbpersec = 0
126
+ write_iops = 0
127
+ end
128
+
129
+ # Accumulate values if present
130
+ if @one_item && @one_item['MONITORING/NETTX']
131
+ previous_nettx = @one_item['MONITORING/NETTX'].to_i
132
+ else
133
+ previous_nettx = 0
134
+ end
135
+
136
+ if @one_item && @one_item['MONITORING/NETRX']
137
+ previous_netrx = @one_item['MONITORING/NETRX'].to_i
138
+ else
139
+ previous_netrx = 0
140
+ end
141
+
142
+ if @one_item && @one_item['MONITORING/DISKRDIOPS']
143
+ previous_diskrdiops = @one_item['MONITORING/DISKRDIOPS'].to_i
144
+ else
145
+ previous_diskrdiops = 0
146
+ end
147
+
148
+ if @one_item && @one_item['MONITORING/DISKWRIOPS']
149
+ previous_diskwriops = @one_item['MONITORING/DISKWRIOPS'].to_i
150
+ else
151
+ previous_diskwriops = 0
152
+ end
153
+
154
+ if @one_item && @one_item['MONITORING/DISKRDBYTES']
155
+ previous_diskrdbytes = @one_item['MONITORING/DISKRDBYTES'].to_i
156
+ else
157
+ previous_diskrdbytes = 0
158
+ end
159
+
160
+ if @one_item && @one_item['MONITORING/DISKWRBYTES']
161
+ previous_diskwrbytes = @one_item['MONITORING/DISKWRBYTES'].to_i
162
+ else
163
+ previous_diskwrbytes = 0
164
+ end
165
+
166
+ @monitor[:nettx] = previous_nettx +
167
+ (nettx_kbpersec * 1024 * refresh_rate).to_i
168
+ @monitor[:netrx] = previous_netrx +
169
+ (netrx_kbpersec * 1024 * refresh_rate).to_i
170
+
171
+ @monitor[:diskrdiops] = previous_diskrdiops + read_iops
172
+ @monitor[:diskwriops] = previous_diskwriops + write_iops
173
+ @monitor[:diskrdbytes] = previous_diskrdbytes +
174
+ (read_kbpersec * 1024 * refresh_rate).to_i
175
+ @monitor[:diskwrbytes] = previous_diskwrbytes +
176
+ (write_kbpersec * 1024 * refresh_rate).to_i
177
+ end
178
+ # rubocop:enable Naming/VariableName
179
+ # rubocop:enable Style/FormatStringToken
180
+
181
+ # Generates a OpenNebula IM Driver valid string with the monitor info
182
+ def info
183
+ # return 'STATE=d' if @state == 'd'
184
+
185
+ if @vm_info
186
+ guest_ip = @vm_info['guest.ipAddress']
187
+ else
188
+ guest_ip = self['guest.ipAddress']
189
+ end
190
+
191
+ used_cpu = @monitor[:used_cpu]
192
+ used_memory = @monitor[:used_memory]
193
+ netrx = @monitor[:netrx]
194
+ nettx = @monitor[:nettx]
195
+ diskrdbytes = @monitor[:diskrdbytes]
196
+ diskwrbytes = @monitor[:diskwrbytes]
197
+ diskrdiops = @monitor[:diskrdiops]
198
+ diskwriops = @monitor[:diskwriops]
199
+
200
+ if @vm_info
201
+ esx_host = @vm_info[:esx_host_name].to_s
202
+ else
203
+ esx_host = self['runtime.host.name'].to_s
204
+ end
205
+
206
+ if @vm_info
207
+ guest_state = @vm_info['guest.guestState'].to_s
208
+ else
209
+ guest_state = self['guest.guestState'].to_s
210
+ end
211
+
212
+ if @vm_info
213
+ vmware_tools = @vm_info['guest.toolsRunningStatus'].to_s
214
+ else
215
+ vmware_tools = self['guest.toolsRunningStatus'].to_s
216
+ end
217
+
218
+ if @vm_info
219
+ vm_name = @vm_info['name'].to_s
220
+ else
221
+ vm_name = self['name'].to_s
222
+ end
223
+
224
+ if @vm_info
225
+ vmtools_ver = @vm_info['guest.toolsVersion'].to_s
226
+ else
227
+ vmtools_ver = self['guest.toolsVersion'].to_s
228
+ end
229
+
230
+ if @vm_info
231
+ vmtools_verst = @vm_info['guest.toolsVersionStatus2'].to_s
232
+ else
233
+ vmtools_verst = self['guest.toolsVersionStatus2'].to_s
234
+ end
235
+
236
+ if @vm_info
237
+ rp_name = @vm_info[:rp_list]
238
+ .select do |item|
239
+ item[:ref] == @vm_info['resourcePool']._ref
240
+ end
241
+ .first[:name] rescue ''
242
+
243
+ rp_name = 'Resources' if rp_name.empty?
244
+ else
245
+ rp_name = self['resourcePool'].name
246
+ end
247
+
248
+ str_info = ''
249
+
250
+ str_info = 'GUEST_IP=' << guest_ip.to_s << "\n" if guest_ip
251
+
252
+ if @guest_ip_addresses && !@guest_ip_addresses.empty?
253
+ str_info << 'GUEST_IP_ADDRESSES="' << @guest_ip_addresses.to_s \
254
+ << '" '
255
+ end
256
+
257
+ str_info << "#{POLL_ATTRIBUTE[:cpu]}=" << used_cpu.to_s << "\n"
258
+ str_info << "#{POLL_ATTRIBUTE[:memory]}=" << used_memory.to_s << "\n"
259
+ str_info << "#{POLL_ATTRIBUTE[:netrx]}=" << netrx.to_s << "\n"
260
+ str_info << "#{POLL_ATTRIBUTE[:nettx]}=" << nettx.to_s << "\n"
261
+
262
+ str_info << 'DISKRDBYTES=' << diskrdbytes.to_s << "\n"
263
+ str_info << 'DISKWRBYTES=' << diskwrbytes.to_s << "\n"
264
+ str_info << 'DISKRDIOPS=' << diskrdiops.to_s << "\n"
265
+ str_info << 'DISKWRIOPS=' << diskwriops.to_s << "\n"
266
+
267
+ str_info << 'VCENTER_ESX_HOST="' << esx_host << '" ' << "\n"
268
+ str_info << 'VCENTER_GUEST_STATE=' << guest_state << "\n"
269
+ str_info << 'VCENTER_VM_NAME="' << vm_name << '" ' << "\n"
270
+ str_info << 'VCENTER_VMWARETOOLS_RUNNING_STATUS=' \
271
+ << vmware_tools << "\n"
272
+ str_info << 'VCENTER_VMWARETOOLS_VERSION=' << vmtools_ver << "\n"
273
+ str_info << 'VCENTER_VMWARETOOLS_VERSION_STATUS=' \
274
+ << vmtools_verst << "\n"
275
+ str_info << 'VCENTER_RP_NAME="' << rp_name << '" ' << "\n"
276
+
277
+ info_disks.each do |disk|
278
+ next if disk[1].no_exists?
279
+
280
+ # Delete special characters
281
+ name = disk[0].gsub(/[^0-9A-Za-z]/, '_')
282
+
283
+ str_info << "DISK_#{name}_ACTUAL_PATH=\"[" <<
284
+ disk[1].ds.name << '] ' << disk[1].path << '" ' << "\n"
285
+ end
286
+
287
+ str_info
288
+ end
289
+
290
+ def reset_monitor
291
+ @monitor = {
292
+ :used_cpu => 0,
293
+ :used_memory => 0,
294
+ :netrx => 0,
295
+ :nettx => 0,
296
+ :diskrdbytes => 0,
297
+ :diskwrbytes => 0,
298
+ :diskrdiops => 0,
299
+ :diskwriops => 0
300
+ }
301
+ end
302
+
303
+ end
data/lib/vm_nic.rb ADDED
@@ -0,0 +1,70 @@
1
+ # -------------------------------------------------------------------------- #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
+ # #
4
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
+ # not use this file except in compliance with the License. You may obtain #
6
+ # a copy of the License at #
7
+ # #
8
+ # http://www.apache.org/licenses/LICENSE-2.0 #
9
+ # #
10
+ # Unless required by applicable law or agreed to in writing, software #
11
+ # distributed under the License is distributed on an "AS IS" BASIS, #
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
13
+ # See the License for the specific language governing permissions and #
14
+ # limitations under the License. #
15
+ #--------------------------------------------------------------------------- #
16
+ module VirtualMachineDevice
17
+
18
+ # Nic class
19
+ class Nic < Device
20
+
21
+ # Create the OpenNebula nic representation
22
+ # Allow as to create the class without vCenter representation
23
+ # example: attached nics not synced with vCenter
24
+ def self.one_nic(id, one_res)
25
+ new(id, one_res, nil)
26
+ end
27
+
28
+ # Create the vCenter nic representation
29
+ # Allow as to create the class without OpenNebula representation
30
+ # example: detached nics that not exists in OpenNebula
31
+ def self.vc_nic(vc_res)
32
+ new(nil, nil, vc_res)
33
+ end
34
+
35
+ def key
36
+ raise_if_no_exists_in_vcenter
37
+ @vc_res.key
38
+ end
39
+
40
+ def boot_dev
41
+ RbVmomi::VIM
42
+ .VirtualMachineBootOptionsBootableEthernetDevice(
43
+ :deviceKey => key
44
+ )
45
+ end
46
+
47
+ def self.nic_model_class(nicmodel)
48
+ case nicmodel
49
+ when 'virtuale1000', 'e1000'
50
+ RbVmomi::VIM::VirtualE1000
51
+ when 'virtuale1000e', 'e1000e'
52
+ RbVmomi::VIM::VirtualE1000e
53
+ when 'virtualpcnet32', 'pcnet32'
54
+ RbVmomi::VIM::VirtualPCNet32
55
+ when 'virtualsriovethernetcard', 'sriovethernetcard'
56
+ RbVmomi::VIM::VirtualSriovEthernetCard
57
+ when 'virtualvmxnetm', 'vmxnetm'
58
+ RbVmomi::VIM::VirtualVmxnetm
59
+ when 'virtualvmxnet2', 'vmnet2'
60
+ RbVmomi::VIM::VirtualVmxnet2
61
+ when 'virtualvmxnet3', 'vmxnet3'
62
+ RbVmomi::VIM::VirtualVmxnet3
63
+ else # If none matches, use vmxnet3
64
+ RbVmomi::VIM::VirtualVmxnet3
65
+ end
66
+ end
67
+
68
+ end
69
+
70
+ end
@@ -0,0 +1,1961 @@
1
+ # -------------------------------------------------------------------------- #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
+ # #
4
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
+ # not use this file except in compliance with the License. You may obtain #
6
+ # a copy of the License at #
7
+ # #
8
+ # http://www.apache.org/licenses/LICENSE-2.0 #
9
+ # #
10
+ # Unless required by applicable law or agreed to in writing, software #
11
+ # distributed under the License is distributed on an "AS IS" BASIS, #
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
13
+ # See the License for the specific language governing permissions and #
14
+ # limitations under the License. #
15
+ #--------------------------------------------------------------------------- #
16
+ require 'ipaddr'
17
+
18
+ ##############################################################################
19
+ # Module VCenterDriver
20
+ ##############################################################################
21
+ module VCenterDriver
22
+
23
+ ##########################################################################
24
+ # Class Template
25
+ ##########################################################################
26
+ class Template
27
+
28
+ attr_accessor :item
29
+
30
+ include Memoize
31
+
32
+ def initialize(item = nil, vi_client = nil)
33
+ @item = item
34
+ check_item(@item, nil) if @item
35
+ @vi_client = vi_client
36
+ @locking = true
37
+ end
38
+
39
+ # Locking function. Similar to flock
40
+ def lock
41
+ return unless @locking
42
+
43
+ @locking_file = File.open('/tmp/vcenter-importer-lock', 'w')
44
+ @locking_file.flock(File::LOCK_EX)
45
+ end
46
+
47
+ # Unlock driver execution mutex
48
+ def unlock
49
+ return unless @locking
50
+
51
+ @locking_file.close
52
+
53
+ return unless File.exist?('/tmp/vcenter-importer-lock')
54
+
55
+ File.delete('/tmp/vcenter-importer-lock')
56
+ end
57
+
58
+ def vm?
59
+ self.class == VCenterDriver::VirtualMachine
60
+ end
61
+
62
+ def online?
63
+ raise 'vcenter item not found!' unless @item
64
+
65
+ !@item['guest.net'].empty?
66
+ end
67
+
68
+ def datacenter
69
+ item = @item
70
+
71
+ trace = []
72
+ while item && !item.instance_of?(RbVmomi::VIM::Datacenter)
73
+ rp = item.resourcePool rescue nil
74
+ if rp && rp.instance_of?(RbVmomi::VIM::VirtualApp)
75
+ trace << 'rp:' + item.to_s
76
+ item = rp.parent rescue nil
77
+ else
78
+ trace << item.to_s
79
+ item = item.parent rescue nil
80
+ end
81
+ end
82
+
83
+ if item.nil?
84
+ trace = '[' + trace.join(', ') + ']'
85
+ raise "Could not find the parent Datacenter. Trace: #{trace}"
86
+ end
87
+
88
+ Datacenter.new(item)
89
+ end
90
+
91
+ def delete_template
92
+ @item.Destroy_Task.wait_for_completion
93
+ end
94
+
95
+ def vcenter_instance_uuid
96
+ @vi_client.vim.serviceContent.about.instanceUuid rescue nil
97
+ end
98
+
99
+ def create_template_copy(template_name)
100
+ error = nil
101
+ template_ref = nil
102
+
103
+ template_name = "one-#{self['name']}" if template_name.empty?
104
+
105
+ relocate_spec_params = {}
106
+ relocate_spec_params[:pool] = resource_pool
107
+ relocate_spec =
108
+ RbVmomi::VIM
109
+ .VirtualMachineRelocateSpec(
110
+ relocate_spec_params
111
+ )
112
+
113
+ clone_spec =
114
+ RbVmomi::VIM
115
+ .VirtualMachineCloneSpec(
116
+ {
117
+ :location => relocate_spec,
118
+ :powerOn => false,
119
+ :template => false
120
+ }
121
+ )
122
+
123
+ begin
124
+ template =
125
+ @item
126
+ .CloneVM_Task(
127
+ :folder => @item.parent,
128
+ :name => template_name,
129
+ :spec => clone_spec
130
+ ).wait_for_completion
131
+ template_ref = template._ref
132
+ rescue StandardError => e
133
+ if !e.message.start_with?('DuplicateName')
134
+ error = 'Could not create the template'\
135
+ " clone. Reason: #{e.message}"
136
+ return error, nil
137
+ end
138
+
139
+ dc = datacenter
140
+ vm_folder = dc.vm_folder
141
+ vm_folder.fetch!
142
+ vm = vm_folder.items
143
+ .select {|_k, v| v.item.name == template_name }
144
+ .values.first.item rescue nil
145
+
146
+ if vm
147
+ begin
148
+ vm.Destroy_Task.wait_for_completion
149
+ template =
150
+ @item
151
+ .CloneVM_Task(
152
+ :folder => @item.parent,
153
+ :name => template_name,
154
+ :spec => clone_spec
155
+ ).wait_for_completion
156
+ template_ref = template._ref
157
+ rescue StandardError
158
+ error = 'Could not delete the existing '\
159
+ 'template, please remove it manually'\
160
+ " from vCenter. Reason: #{e.message}"
161
+ end
162
+ else
163
+ error = 'Could not create the template '\
164
+ "clone. Reason: #{e.message}"
165
+ end
166
+ end
167
+
168
+ [error, template_ref]
169
+ end
170
+
171
+ # Linked Clone over existing template
172
+ def create_delta_disks
173
+ begin
174
+ disks =
175
+ @item['config.hardware.device']
176
+ .grep(RbVmomi::VIM::VirtualDisk)
177
+ disk_without_snapshots = disks.select do |x|
178
+ x.backing.parent.nil?
179
+ end
180
+ rescue StandardError
181
+ error = 'Cannot extract existing disks on template.'
182
+ use_linked_clones = false
183
+ return error, use_linked_clones
184
+ end
185
+
186
+ if !disk_without_snapshots.empty?
187
+
188
+ begin
189
+ if self['config.template']
190
+ @item.MarkAsVirtualMachine(
191
+ :pool => resource_pool,
192
+ :host => self['runtime.host']
193
+ )
194
+ end
195
+ rescue StandardError => e
196
+ @item.MarkAsTemplate()
197
+ error = 'Cannot mark the template as a VirtualMachine. '\
198
+ 'Not using linked clones. '\
199
+ "Reason: #{e.message}/#{e.backtrace}"
200
+ use_linked_clones = false
201
+ return error, use_linked_clones
202
+ end
203
+
204
+ begin
205
+ spec = {}
206
+ spec[:deviceChange] = []
207
+
208
+ disk_without_snapshots.each do |disk|
209
+ remove_disk_spec =
210
+ {
211
+ :operation => :remove,
212
+ :device => disk
213
+ }
214
+ spec[:deviceChange] << remove_disk_spec
215
+
216
+ add_disk_spec =
217
+ {
218
+ :operation => :add,
219
+ :fileOperation => :create,
220
+ :device => disk.dup.tap do |x|
221
+ x.backing =
222
+ x.backing.dup
223
+ x.backing.fileName =
224
+ "[#{disk.backing.datastore.name}]"
225
+ x.backing.parent =
226
+ disk.backing
227
+ end
228
+ }
229
+ spec[:deviceChange] << add_disk_spec
230
+ end
231
+
232
+ @item
233
+ .ReconfigVM_Task(
234
+ :spec => spec
235
+ ).wait_for_completion unless spec[:deviceChange].empty?
236
+ rescue StandardError => e
237
+ error = 'Cannot create the delta disks on top '\
238
+ "of the template. Reason: #{e.message}."
239
+
240
+ if VCenterDriver::CONFIG[:debug_information]
241
+ error += "\n\n#{e.backtrace}"
242
+ end
243
+
244
+ use_linked_clones = false
245
+ return error, use_linked_clones
246
+ end
247
+
248
+ begin
249
+ @item.MarkAsTemplate()
250
+ rescue StandardError => e
251
+ error = 'Cannot mark the VirtualMachine as '\
252
+ 'a template. Not using linked clones.' \
253
+ " Reason: #{e.message}."
254
+
255
+ if VCenterDriver::CONFIG[:debug_information]
256
+ error += "\n\n#{e.backtrace}"
257
+ end
258
+
259
+ use_linked_clones = false
260
+ return error, use_linked_clones
261
+ end
262
+ end
263
+
264
+ error = nil
265
+ use_linked_clones = true
266
+
267
+ [error, use_linked_clones]
268
+ end
269
+
270
+ ########################################################################
271
+ # Import vcenter disks
272
+ # @param type [object] contains the type of the object(:object) and
273
+ # identifier(:id)
274
+ # @return error, template_disks
275
+ ########################################################################
276
+ def import_vcenter_disks(vc_uuid, dpool, ipool, type)
277
+ disk_info = ''
278
+ error = ''
279
+ images = []
280
+
281
+ begin
282
+ # Lock import operation, to avoid concurrent creation of images
283
+ lock
284
+
285
+ dc = datacenter
286
+ dc_ref = dc.item._ref
287
+
288
+ # Get disks and info required
289
+ vc_disks = vcenter_disks_get
290
+ vc_disks.sort_by! {|d| d[:device].unitNumber }
291
+
292
+ # Track allocated images
293
+ allocated_images = []
294
+
295
+ vc_disks.each do |disk|
296
+ ds_ref = nil
297
+ begin
298
+ ds_ref = disk[:datastore]._ref
299
+ rescue StandardError
300
+ raise "The ISO #{disk[:path_wo_ds].name} cannot "\
301
+ 'be found because the datastore was '\
302
+ 'removed or deleted'
303
+ end
304
+ datastore_found =
305
+ VCenterDriver::Storage
306
+ .get_one_image_ds_by_ref_and_dc(
307
+ ds_ref,
308
+ dc_ref,
309
+ vc_uuid,
310
+ dpool
311
+ )
312
+
313
+ if datastore_found.nil?
314
+ error = "\n ERROR: datastore "\
315
+ "#{disk[:datastore].name}: "\
316
+ 'has to be imported first as'\
317
+ " an image datastore!\n"
318
+
319
+ # Rollback delete disk images
320
+ allocated_images.each do |i|
321
+ i.delete
322
+ end
323
+
324
+ break
325
+ end
326
+
327
+ params = {
328
+ :disk => disk,
329
+ :ipool => ipool,
330
+ :_type => type,
331
+ :ds_id => datastore_found['ID'],
332
+ :opts => {
333
+ :persistent => vm? ? 'YES':'NO'
334
+ },
335
+ :images => images
336
+ }
337
+
338
+ image_import, image_name =
339
+ VCenterDriver::Datastore
340
+ .get_image_import_template(
341
+ params
342
+ )
343
+ # Image is already in the datastore
344
+ if image_import[:one]
345
+ # This is the disk info
346
+ disk_tmp = ''
347
+ disk_tmp << "DISK=[\n"
348
+ disk_tmp <<
349
+ "IMAGE_ID=\"#{image_import[:one]['ID']}\",\n"
350
+ disk_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
351
+ disk_tmp << "]\n"
352
+ disk_info << disk_tmp
353
+
354
+ elsif !image_import[:template].empty?
355
+
356
+ # Then the image is created as it's not in the datastore
357
+ one_i =
358
+ VCenterDriver::VIHelper
359
+ .new_one_item(
360
+ OpenNebula::Image
361
+ )
362
+ allocated_images << one_i
363
+ rc = one_i.allocate(image_import[:template],
364
+ datastore_found['ID'].to_i, false)
365
+
366
+ if OpenNebula.is_error?(rc)
367
+ error = ' Error creating disk from '\
368
+ "template: #{rc.message}\n"
369
+ break
370
+ end
371
+
372
+ # Monitor image, we need READY state
373
+ one_i.info
374
+ start_time = Time.now
375
+
376
+ while (one_i.state_str != 'READY') &&
377
+ (Time.now - start_time < 300)
378
+ sleep 1
379
+ one_i.info
380
+ end
381
+
382
+ # Add info for One template
383
+ one_i.info
384
+ disk_info << "DISK=[\n"
385
+ disk_info << "IMAGE_ID=\"#{one_i['ID']}\",\n"
386
+ disk_info << "OPENNEBULA_MANAGED=\"NO\"\n"
387
+ disk_info << "]\n"
388
+
389
+ images.push(image_name)
390
+ end
391
+ end
392
+ rescue StandardError => e
393
+ error = "\n There was an error trying to create an "\
394
+ 'image for disk in vcenter template. '\
395
+ "Reason: #{e.message}"
396
+
397
+ if VCenterDriver::CONFIG[:debug_information]
398
+ error += "\n\n#{e.backtrace}"
399
+ end
400
+ ensure
401
+ unlock
402
+ if !error.empty? && allocated_images
403
+ # Rollback delete disk images
404
+ allocated_images.each do |i|
405
+ i.delete
406
+ end
407
+ end
408
+ end
409
+
410
+ [error, disk_info, allocated_images]
411
+ end
412
+
413
+ ########################################################################
414
+ # Create AR
415
+ # @param nic [object] contains properties of the nic
416
+ # @param with_id [Boolean] determine if AR will contains AR_ID
417
+ # @param ipv4 [string] create the AR with a IPv4 address
418
+ # @param ipv6 [string] create the AR with a IPv6 address
419
+ #
420
+ # * in case of IPv6 we use a standard PREFIX_LENGTH = 64
421
+ # * if we pass ipv4 we force nic use that IPv4
422
+ # * if we pass ipv6 we force nic use that IPv6
423
+ # @return ar_tmp
424
+ ########################################################################
425
+ def create_ar(nic, with_id = false, ipv4 = nil, ipv6 = nil)
426
+ ar_tmp = "AR=[\n"
427
+
428
+ # if ipv4 and ipv6 are defined create a IPv4 address with a static
429
+ # IPv6 address
430
+ if ipv4 && ipv6
431
+ ar_tmp << "TYPE=\"IP4_6_STATIC\",\n"
432
+ ar_tmp << "IP=\"#{ipv4}\",\n"
433
+ ar_tmp << "IP6=\"#{ipv6}\",\n"
434
+ ar_tmp << "PREFIX_LENGTH=\"64\",\n"
435
+ # if just ipv4 is defined create a AR with just a IPv4 address
436
+ elsif ipv4
437
+ ar_tmp << "TYPE=\"IP4\",\n"
438
+ ar_tmp << "IP=\"#{ipv4}\",\n"
439
+ # if just ipv6 is defined create a AR with just a IPv4 address
440
+ elsif ipv6
441
+ ar_tmp << "TYPE=\"IP6_STATIC\",\n"
442
+ ar_tmp << "IP6=\"#{ipv6}\",\n"
443
+ ar_tmp << "PREFIX_LENGTH=\"64\",\n"
444
+ # in case nic have defined mac, ipv4 and ipv6 create a AR with
445
+ # this configuration
446
+ elsif nic[:mac] && nic[:ipv4] && nic[:ipv6]
447
+ ar_tmp << "AR_ID=0,\n" if with_id
448
+ ar_tmp << "TYPE=\"IP4_6_STATIC\",\n"
449
+ ar_tmp << "IP=\"#{nic[:ipv4]}\",\n"
450
+ ar_tmp << "MAC=\"#{nic[:mac]}\",\n"
451
+ ar_tmp << "IP6=\"#{nic[:ipv6]}\",\n"
452
+ ar_tmp << "PREFIX_LENGTH=\"64\",\n"
453
+ # in case nic have defined mac and ipv6 create a AR with
454
+ # this configuration
455
+ elsif nic[:mac] && nic[:ipv6]
456
+ ar_tmp << "AR_ID=0,\n" if with_id
457
+ ar_tmp << "TYPE=\"IP6_STATIC\",\n"
458
+ ar_tmp << "MAC=\"#{nic[:mac]}\",\n"
459
+ ar_tmp << "IP6=\"#{nic[:ipv6]}\",\n"
460
+ ar_tmp << "PREFIX_LENGTH=\"64\",\n"
461
+ # in case nic have defined mac and ipv4 create a AR with
462
+ # this configuration
463
+ elsif nic[:mac] && nic[:ipv4]
464
+ ar_tmp << "AR_ID=0,\n" if with_id
465
+ ar_tmp << "TYPE=\"IP4\",\n"
466
+ ar_tmp << "IP=\"#{nic[:ipv4]}\",\n"
467
+ ar_tmp << "MAC=\"#{nic[:mac]}\",\n"
468
+ # in case nic not have any default configuration create ETHER
469
+ else
470
+ ar_tmp << "AR_ID=0,\n" if with_id
471
+ ar_tmp << "TYPE=\"ETHER\",\n"
472
+ ar_tmp << "MAC=\"#{nic[:mac]}\",\n"
473
+ end
474
+
475
+ ar_tmp << "SIZE=\"1\"\n"
476
+ ar_tmp << "]\n"
477
+
478
+ ar_tmp
479
+ end
480
+
481
+ def save_ar_ids(network_found, nic, ar_ids)
482
+ value = []
483
+ ars_new = network_found.to_hash['VNET']['AR_POOL']['AR']
484
+ ars_new = [ars_new] if ars_new.class.to_s.eql? 'Hash'
485
+ last_id = ars_new.last['AR_ID']
486
+ if ar_ids.key?(nic[:net_ref])
487
+ ref = nic[:net_ref]
488
+ value = ar_ids[ref.to_s]
489
+ end
490
+
491
+ value.insert(value.length, last_id)
492
+ ar_ids.store(nic[:net_ref], value)
493
+
494
+ last_id
495
+ end
496
+
497
+ def find_alias_ips_in_network(
498
+ network,
499
+ vm_object,
500
+ alias_ipv4 = nil,
501
+ alias_ipv6 = nil
502
+ )
503
+ ipv4 = ipv6 = ''
504
+ return unless vm_object.is_a?(VCenterDriver::VirtualMachine)
505
+
506
+ ip = nil
507
+
508
+ network.info
509
+
510
+ unless alias_ipv4.nil?
511
+ ip = IPAddr.new(alias_ipv4)
512
+ end
513
+
514
+ unless alias_ipv6.nil?
515
+ ip = IPAddr.new(alias_ipv6)
516
+ end
517
+
518
+ if ip.nil?
519
+ return [ipv4, ipv6]
520
+ end
521
+
522
+ ar_array = network.to_hash['VNET']['AR_POOL']['AR']
523
+ ar_array = [ar_array] if ar_array.is_a?(Hash)
524
+ ipv4, ipv6 = find_ip_in_ar(ip, ar_array) if ar_array
525
+
526
+ [ipv4, ipv6]
527
+ end
528
+
529
+ def find_ips_in_network(
530
+ network,
531
+ vm_object,
532
+ nic,
533
+ force = false,
534
+ first_ip = false
535
+ )
536
+ ipv4 = ipv6 = ''
537
+ ar_id = -1
538
+ return unless vm_object.is_a?(VCenterDriver::VirtualMachine)
539
+
540
+ network.info
541
+
542
+ # Iterate over Retrieve vCenter VM NICs
543
+ unless vm_object.item.guest.net.empty?
544
+ vm_object.item.guest.net.each do |net|
545
+ mac = net.macAddress
546
+ next unless nic[:mac] == mac
547
+ next unless net.ipConfig
548
+ next if net.ipConfig.ipAddress.empty?
549
+
550
+ net.ipConfig.ipAddress.each do |ip_config|
551
+ ip = IPAddr.new(ip_config.ipAddress)
552
+
553
+ if force
554
+ ipv4 = ip.to_s if ip.ipv4?
555
+ ipv6 = ip.to_s if ip.ipv6?
556
+ return [ipv4, ipv6]
557
+ end
558
+
559
+ ar_array = network.to_hash['VNET']['AR_POOL']['AR']
560
+ ar_array = [ar_array] if ar_array.is_a?(Hash)
561
+ ipv4, ipv6, ar_id = find_ip_in_ar(ip,
562
+ ar_array) if ar_array
563
+
564
+ if first_ip
565
+ return [ipv4, ipv6, ar_id]
566
+ end
567
+
568
+ break if (ipv4 !='') || (ipv6 != '')
569
+ end
570
+ break
571
+ end
572
+ end
573
+ [ipv4, ipv6, ar_id]
574
+ end
575
+
576
+ def find_ip_in_ar(ip, ar_array)
577
+ ipv4 = ipv6 = ''
578
+ ar_id = -1
579
+ ar_array.each do |ar|
580
+ first_condition = ar.key?('IP') && ar.key?('IP_END')
581
+ second_condition = ar.key?('IP6') && ar.key?('IP6_END')
582
+
583
+ next unless first_condition || second_condition
584
+
585
+ start_ip = IPAddr.new(ar['IP']) unless ar['IP'].nil?
586
+ end_ip = IPAddr.new(ar['IP_END']) unless ar['IP_END'].nil?
587
+ start_ip = IPAddr.new(ar['IP6']) unless ar['IP6'].nil?
588
+ end_ip = IPAddr.new(ar['IP6_END']) unless ar['IP6_END'].nil?
589
+
590
+ next unless ip.family == start_ip.family &&
591
+ ip.family == end_ip.family
592
+
593
+ next unless ip >= start_ip && ip <= end_ip
594
+
595
+ ipv4 = ip.to_s if ip.ipv4?
596
+ ipv6 = ip.to_s if ip.ipv6?
597
+ ar_id = ar['ID']
598
+ end
599
+ [ipv4, ipv6, ar_id]
600
+ end
601
+
602
+ def nic_alias_from_nic(id, nic, nic_index, network_found, vm_object)
603
+ nic_tmp = ''
604
+
605
+ nic_alias_index = 1
606
+ if nic[:ipv4_additionals]
607
+ nic[:ipv4_additionals].split(',').each do |ipv4_additional|
608
+ ipv4, ipv6 =
609
+ find_alias_ips_in_network(
610
+ network_found,
611
+ vm_object,
612
+ ipv4_additional
613
+ )
614
+ if ipv4.empty? && ipv6.empty?
615
+ ar_tmp = create_ar(
616
+ nic,
617
+ false,
618
+ ipv4_additional
619
+ )
620
+ network_found.add_ar(ar_tmp)
621
+ end
622
+ network_found.info
623
+
624
+ nic_tmp << "NIC_ALIAS=[\n"
625
+ nic_tmp << "NETWORK_ID=\"#{id}\",\n"
626
+ nic_tmp << "IP=\"#{ipv4_additional}\",\n"
627
+ nic_tmp <<
628
+ "NAME=\"NIC#{nic_index}_ALIAS#{nic_alias_index}\",\n"
629
+ nic_tmp << "PARENT=\"NIC#{nic_index}\"\n"
630
+ nic_tmp << "]\n"
631
+ nic_alias_index += 1
632
+ end
633
+ end
634
+ if nic[:ipv6_additionals]
635
+ nic[:ipv6_additionals].split(',').each do |ipv6_additional|
636
+ ipv4, ipv6 = find_alias_ips_in_network(
637
+ network_found,
638
+ vm_object,
639
+ ipv6_additional
640
+ )
641
+ if ipv4.empty? && ipv6.empty?
642
+ ar_tmp = create_ar(
643
+ nic,
644
+ false,
645
+ nil,
646
+ ipv6_additional
647
+ )
648
+ network_found.add_ar(ar_tmp)
649
+ end
650
+ network_found.info
651
+
652
+ nic_tmp << "NIC_ALIAS=[\n"
653
+ nic_tmp << "NETWORK_ID=\"#{id}\",\n"
654
+ nic_tmp << "IP6=\"#{ipv6_additional}\",\n"
655
+ nic_tmp <<
656
+ "NAME=\"NIC#{nic_index}_ALIAS#{nic_alias_index}\",\n"
657
+ nic_tmp << "PARENT=\"NIC#{nic_index}\"\n"
658
+ nic_tmp << "]\n"
659
+ nic_alias_index += 1
660
+ end
661
+ end
662
+
663
+ nic_tmp
664
+ end
665
+
666
+ def nic_from_network_created(one_vn, nic, nic_index, vm_object, _ar_ids)
667
+ nic_tmp = "NIC=[\n"
668
+ nic_tmp << "NETWORK_ID=\"#{one_vn.id}\",\n"
669
+ nic_tmp << "NAME =\"NIC#{nic_index}\",\n"
670
+
671
+ if vm?
672
+ if nic[:mac]
673
+ nic_tmp << "MAC=\"#{nic[:mac]}\",\n"
674
+ end
675
+ if nic[:ipv4_additionals]
676
+ nic_tmp <<
677
+ 'VCENTER_ADDITIONALS_IP4'\
678
+ "=\"#{nic[:ipv4_additionals]}\",\n"
679
+ end
680
+ if nic[:ipv6]
681
+ nic_tmp <<
682
+ "VCENTER_IP6=\"#{nic[:ipv6]}\",\n"
683
+ end
684
+ if nic[:ipv6_global]
685
+ nic_tmp <<
686
+ "IP6_GLOBAL=\"#{nic[:ipv6_global]}\",\n"
687
+ end
688
+ if nic[:ipv6_ula]
689
+ nic_tmp <<
690
+ "IP6_ULA=\"#{nic[:ipv6_ula]}\",\n"
691
+ end
692
+ if nic[:ipv6_additionals]
693
+ nic_tmp <<
694
+ 'VCENTER_ADDITIONALS_IP6'\
695
+ "=\"#{nic[:ipv6_additionals]}\",\n"
696
+ end
697
+ end
698
+
699
+ nic_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
700
+ nic_tmp << "]\n"
701
+
702
+ if vm?
703
+ nic_tmp << nic_alias_from_nic(one_vn.id, nic, nic_index,
704
+ one_vn, vm_object)
705
+ end
706
+
707
+ nic_tmp
708
+ end
709
+
710
+ def nic_from_network_found(
711
+ network_found,
712
+ vm_object,
713
+ nic,
714
+ _ar_ids,
715
+ nic_index
716
+ )
717
+ nic_tmp = "NIC=[\n"
718
+ nic_tmp << "NETWORK_ID=\"#{network_found['ID']}\",\n"
719
+ nic_tmp << "NAME =\"NIC#{nic_index}\",\n"
720
+
721
+ if vm?
722
+ ipv4, ipv6 = find_ips_in_network(network_found, vm_object,
723
+ nic, false, true)
724
+ if ipv4.empty? && ipv6.empty?
725
+ ar_tmp = create_ar(nic)
726
+ network_found.add_ar(ar_tmp)
727
+ end
728
+ ipv4, ipv6 = find_ips_in_network(network_found, vm_object,
729
+ nic, true)
730
+ network_found.info
731
+
732
+ # This is the existing nic info
733
+ if nic[:mac] && ipv4.empty? && ipv6.empty?
734
+ nic_tmp << "MAC=\"#{nic[:mac]}\",\n"
735
+ end
736
+ nic_tmp << "IP=\"#{ipv4}\"," unless ipv4.empty?
737
+ nic_tmp << "IP6=\"#{ipv6}\"," unless ipv6.empty?
738
+ if nic[:ipv4_additionals]
739
+ nic_tmp <<
740
+ 'VCENTER_ADDITIONALS_IP4'\
741
+ "=\"#{nic[:ipv4_additionals]}\",\n"
742
+ end
743
+ if nic[:ipv6]
744
+ nic_tmp << "VCENTER_IP6=\"#{nic[:ipv6]}\",\n"
745
+ end
746
+
747
+ if nic[:ipv6_global]
748
+ nic_tmp << "IP6_GLOBAL=\"#{nic[:ipv6_global]}\",\n"
749
+ end
750
+
751
+ if nic[:ipv6_ula]
752
+ nic_tmp << "IP6_ULA=\"#{nic[:ipv6_ula]}\",\n"
753
+ end
754
+
755
+ if nic[:ipv6_additionals]
756
+ nic_tmp <<
757
+ 'VCENTER_ADDITIONALS_IP6'\
758
+ "=\"#{nic[:ipv6_additionals]}\",\n"
759
+ end
760
+ end
761
+
762
+ nic_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
763
+ nic_tmp << "]\n"
764
+
765
+ if vm?
766
+ nic_tmp <<
767
+ nic_alias_from_nic(
768
+ network_found['ID'],
769
+ nic,
770
+ nic_index,
771
+ network_found,
772
+ vm_object
773
+ )
774
+ end
775
+
776
+ nic_tmp
777
+ end
778
+
779
+ # Creates an OpenNebula Virtual Network as part of the VM Template
780
+ # import process. This only need to happen if no VNET in OpenNebula
781
+ # is present that refers to the network where the NIC of the VM Template
782
+ # is hooked to.
783
+ def create_network_for_import(
784
+ opts
785
+ )
786
+ nic = opts[:nic]
787
+ ccr_ref = opts[:ccr_ref]
788
+ ccr_name = opts[:ccr_name]
789
+ vc_uuid = opts[:vc_uuid]
790
+ vcenter_instance_name = opts[:vcenter_instance_name]
791
+ dc_name = opts[:dc_name]
792
+ template_ref = opts[:template_ref]
793
+ dc_ref = opts[:dc_ref]
794
+ vm_id = opts[:vm_id]
795
+ hpool = opts[:hpool]
796
+ vi_client = opts[:vi_client]
797
+
798
+ config = {}
799
+ config[:refs] = nic[:refs]
800
+
801
+ # Let's get the OpenNebula hosts ids
802
+ # associated to the clusters references
803
+ config[:one_ids] = nic[:refs].map do |ref|
804
+ VCenterDriver::VIHelper
805
+ .find_by_ref(
806
+ OpenNebula::HostPool,
807
+ 'TEMPLATE/VCENTER_CCR_REF',
808
+ ref,
809
+ vc_uuid,
810
+ hpool
811
+ )['CLUSTER_ID'] rescue -1
812
+ end
813
+
814
+ if vm?
815
+ unmanaged = 'wild'
816
+ else
817
+ unmanaged = 'template'
818
+ end
819
+
820
+ net = VCenterDriver::Network
821
+ .new_from_ref(
822
+ nic[:net_ref],
823
+ vi_client
824
+ )
825
+ if net
826
+ vid = VCenterDriver::Network.retrieve_vlanid(net.item)
827
+ end
828
+ case nic[:pg_type]
829
+ # Distributed PortGroups
830
+ when VCenterDriver::Network::NETWORK_TYPE_DPG
831
+ config[:sw_name] =
832
+ nic[:network]
833
+ .config
834
+ .distributedVirtualSwitch
835
+ .name
836
+ # For DistributedVirtualPortgroups
837
+ # there is networks and uplinks
838
+ config[:uplink] = false
839
+ # NSX-V PortGroups
840
+ when VCenterDriver::Network::NETWORK_TYPE_NSXV
841
+ config[:sw_name] =
842
+ nic[:network]
843
+ .config
844
+ .distributedVirtualSwitch
845
+ .name
846
+ # For NSX-V ( is the same as
847
+ # DistributedVirtualPortgroups )
848
+ # there is networks and uplinks
849
+ config[:uplink] = false
850
+
851
+ host_id = vi_client.instance_variable_get '@host_id'
852
+
853
+ begin
854
+ nsx_client =
855
+ NSXDriver::NSXClient
856
+ .new_from_id(
857
+ host_id
858
+ )
859
+ rescue StandardError
860
+ nsx_client = nil
861
+ end
862
+
863
+ if !nsx_client.nil?
864
+ nsx_net =
865
+ NSXDriver::VirtualWire
866
+ .new_from_name(
867
+ nsx_client,
868
+ nic[:net_name]
869
+ )
870
+
871
+ config[:nsx_id] = nsx_net.ls_id
872
+ config[:nsx_vni] = nsx_net.ls_vni
873
+ config[:nsx_tz_id] = nsx_net.tz_id
874
+ end
875
+ # Standard PortGroups
876
+ when VCenterDriver::Network::NETWORK_TYPE_PG
877
+ # There is no uplinks for standard portgroups,
878
+ # so all Standard
879
+ # PortGroups are networks and no uplinks
880
+ config[:uplink] = false
881
+ config[:sw_name] =
882
+ VCenterDriver::Network
883
+ .virtual_switch(
884
+ nic[:network]
885
+ )
886
+ # NSX-T PortGroups
887
+ when VCenterDriver::Network::NETWORK_TYPE_NSXT
888
+ config[:sw_name] = \
889
+ nic[:network].summary.opaqueNetworkType
890
+ # There is no uplinks for NSX-T networks,
891
+ # so all NSX-T networks
892
+ # are networks and no uplinks
893
+ config[:uplink] = false
894
+
895
+ host_id = vi_client.instance_variable_get '@host_id'
896
+
897
+ begin
898
+ nsx_client = NSXDriver::NSXClient.new_from_id(host_id)
899
+ rescue StandardError
900
+ nsx_client = nil
901
+ end
902
+
903
+ if !nsx_client.nil?
904
+ nsx_net =
905
+ NSXDriver::OpaqueNetwork
906
+ .new_from_name(
907
+ nsx_client,
908
+ nic[:net_name]
909
+ )
910
+
911
+ config[:nsx_id] = nsx_net.ls_id
912
+ config[:nsx_vni] = nsx_net.ls_vni
913
+ config[:nsx_tz_id] = nsx_net.tz_id
914
+ end
915
+ else
916
+ raise "Unknown network type: #{nic[:pg_type]}"
917
+ end
918
+
919
+ import_opts = {
920
+ :network_name=> nic[:net_name],
921
+ :sw_name=> config[:sw_name],
922
+ :network_ref=> nic[:net_ref],
923
+ :network_type=> nic[:pg_type],
924
+ :ccr_ref=> ccr_ref,
925
+ :ccr_name=> ccr_name,
926
+ :vcenter_uuid=> vc_uuid,
927
+ :vcenter_instance_name=> vcenter_instance_name,
928
+ :dc_name=> dc_name,
929
+ :unmanaged=> unmanaged,
930
+ :template_ref=> template_ref,
931
+ :dc_ref=> dc_ref,
932
+ :template_id=> vm_id
933
+ }
934
+
935
+ if nic[:pg_type] ==
936
+ VCenterDriver::Network::NETWORK_TYPE_NSXV ||
937
+ nic[:pg_type] ==
938
+ VCenterDriver::Network::NETWORK_TYPE_NSXT
939
+ import_opts[:nsx_id] = config[:nsx_id]
940
+ import_opts[:nsx_vni] = config[:nsx_vni]
941
+ import_opts[:nsx_tz_id] = config[:nsx_tz_id]
942
+ end
943
+
944
+ if vid
945
+ vlanid = VCenterDriver::Network.vlanid(vid)
946
+
947
+ # we have vlan id
948
+ if /\A\d+\z/.match(vlanid)
949
+ import_opts[:vlanid] = vlanid
950
+ end
951
+ end
952
+
953
+ # Prepare the Virtual Network template
954
+ one_vnet = VCenterDriver::Network.to_one_template(import_opts)
955
+
956
+ # always has to be created because of
957
+ # templates when they are instantiated
958
+ ar_tmp = ''
959
+ ar_tmp << "AR=[\n"
960
+ ar_tmp << "TYPE=\"ETHER\",\n"
961
+ ar_tmp << "SIZE=255\n"
962
+ ar_tmp << "]\n"
963
+
964
+ if vm?
965
+ ar_tmp << create_ar(nic, true)
966
+ end
967
+
968
+ one_vnet[:one] << ar_tmp
969
+ config[:one_object] = one_vnet[:one]
970
+ _cluster_id = VCenterDriver::VIHelper
971
+ .get_cluster_id(config[:one_ids])
972
+
973
+ one_vn = VCenterDriver::Network.create_one_network(config)
974
+ VCenterDriver::VIHelper.clean_ref_hash
975
+ one_vn.info
976
+
977
+ one_vn
978
+ end
979
+
980
+ def import_vcenter_nics(
981
+ opts,
982
+ vm_id = nil,
983
+ dc_name = nil
984
+ )
985
+
986
+ vi_client = opts[:vi_client]
987
+ vc_uuid = opts[:vc_uuid]
988
+ npool = opts[:npool]
989
+ hpool = opts[:hpool]
990
+ vcenter_instance_name = opts[:vcenter]
991
+ template_ref = opts[:template_moref]
992
+ vm_object = opts[:vm_object]
993
+
994
+ nic_info = ''
995
+ error = ''
996
+ ar_ids = {}
997
+ begin
998
+ # Lock import operation, to avoid
999
+ # concurrent creation of networks
1000
+ lock
1001
+
1002
+ if !dc_name
1003
+ dc = datacenter
1004
+ dc_name = dc.item.name
1005
+ dc_ref = dc.item._ref
1006
+ end
1007
+
1008
+ ccr_ref = self['runtime.host.parent._ref']
1009
+ ccr_name = self['runtime.host.parent.name']
1010
+
1011
+ # Get nics and info required
1012
+ vc_nics = vcenter_nics_hash
1013
+
1014
+ # Track allocated networks for rollback
1015
+ allocated_networks = []
1016
+
1017
+ nic_index = 1
1018
+
1019
+ vc_nics.each do |nic|
1020
+ # Check if the network already exists
1021
+ network_found =
1022
+ VCenterDriver::VIHelper
1023
+ .find_by_ref(
1024
+ OpenNebula::VirtualNetworkPool,
1025
+ 'TEMPLATE/VCENTER_NET_REF',
1026
+ nic[:net_ref],
1027
+ vc_uuid,
1028
+ npool
1029
+ )
1030
+ # Network is already in OpenNebula
1031
+ if network_found
1032
+ nic_info << nic_from_network_found(network_found,
1033
+ vm_object,
1034
+ nic,
1035
+ ar_ids,
1036
+ nic_index.to_s)
1037
+ # Network not found
1038
+ else
1039
+ opts = {
1040
+ :nic => nic,
1041
+ :ccr_ref => ccr_ref,
1042
+ :ccr_name => ccr_name,
1043
+ :vc_uuid => vc_uuid,
1044
+ :vcenter_instance_name => vcenter_instance_name,
1045
+ :dc_name => dc_name,
1046
+ :template_ref => template_ref,
1047
+ :dc_ref => dc_ref,
1048
+ :vm_id => vm_id,
1049
+ :hpool => hpool,
1050
+ :vi_client => vi_client
1051
+ }
1052
+
1053
+ one_vn = create_network_for_import(
1054
+ opts
1055
+ )
1056
+
1057
+ allocated_networks << one_vn
1058
+
1059
+ nic_info << nic_from_network_created(
1060
+ one_vn,
1061
+ nic,
1062
+ nic_index.to_s,
1063
+ vm_object,
1064
+ ar_ids
1065
+ )
1066
+
1067
+ # Refresh npool
1068
+ npool.info_all
1069
+ end
1070
+ nic_index += 1
1071
+ end
1072
+ rescue StandardError => e
1073
+ error = "\n There was an error trying to create \
1074
+ a virtual network to repesent a \
1075
+ vCenter network for a VM or VM Template. \
1076
+ Reason: #{e.message}"
1077
+ ensure
1078
+ unlock
1079
+ # Rollback, delete virtual networks
1080
+ if !error.empty? && allocated_networks
1081
+ allocated_networks.each do |n|
1082
+ n.delete
1083
+ end
1084
+ end
1085
+ end
1086
+
1087
+ [error, nic_info, ar_ids, allocated_networks]
1088
+ end
1089
+
1090
+ def get_vcenter_disk_key(unit_number, controller_key)
1091
+ key = nil
1092
+
1093
+ @item['config.hardware.device'].each do |device|
1094
+ disk = {}
1095
+
1096
+ next unless disk_or_iso?(device)
1097
+
1098
+ disk[:device] = device
1099
+ next unless device.controllerKey == controller_key &&
1100
+ device.unitNumber == unit_number
1101
+
1102
+ key = device.key
1103
+ break
1104
+ end
1105
+
1106
+ key
1107
+ end
1108
+
1109
+ def vcenter_disks_get
1110
+ disks = []
1111
+ ide_controlled = []
1112
+ sata_controlled = []
1113
+ scsi_controlled = []
1114
+ controller = {}
1115
+
1116
+ @item['config.hardware.device'].each do |device|
1117
+ disk = {}
1118
+
1119
+ if device.is_a? RbVmomi::VIM::VirtualIDEController
1120
+ ide_controlled.concat(device.device)
1121
+ controller[device.key] = "ide#{device.busNumber}"
1122
+ end
1123
+
1124
+ if device.is_a? RbVmomi::VIM::VirtualSATAController
1125
+ sata_controlled.concat(device.device)
1126
+ controller[device.key] = "sata#{device.busNumber}"
1127
+ end
1128
+
1129
+ if device.is_a? RbVmomi::VIM::VirtualSCSIController
1130
+ scsi_controlled.concat(device.device)
1131
+ controller[device.key] = "scsi#{device.busNumber}"
1132
+ end
1133
+
1134
+ next unless disk_or_iso?(device)
1135
+
1136
+ disk[:device] = device
1137
+
1138
+ unless device.backing.datastore
1139
+ raise "datastore not found for VM's device"
1140
+ end
1141
+
1142
+ disk[:datastore] =
1143
+ device.backing.datastore
1144
+ disk[:path] =
1145
+ device.backing.fileName
1146
+ disk[:path_wo_ds]=
1147
+ disk[:path].sub(/^\[(.*?)\] /, '')
1148
+ disk?(device) ? disk[:type] = 'OS' : disk[:type] = 'CDROM'
1149
+ disk[:key] =
1150
+ device.key
1151
+ if ide_controlled.include?(device.key)
1152
+ disk[:prefix] = 'hd'
1153
+ end
1154
+ if scsi_controlled.include?(device.key)
1155
+ disk[:prefix] = 'sd'
1156
+ end
1157
+ if sata_controlled.include?(device.key)
1158
+ disk[:prefix] = 'sd'
1159
+ end
1160
+ disk[:tag] =
1161
+ "#{controller[device.controllerKey]}:#{device.unitNumber}"
1162
+
1163
+ disks << disk
1164
+ end
1165
+
1166
+ disks
1167
+ end
1168
+
1169
+ def vcenter_nics_list
1170
+ nics = []
1171
+ @item.config.hardware.device.each do |device|
1172
+ nics << device if VCenterDriver::Network.nic?(device)
1173
+ end
1174
+
1175
+ nics
1176
+ end
1177
+
1178
+ def identify_network(identifier, network)
1179
+ if network.class == RbVmomi::VIM::DistributedVirtualPortgroup
1180
+ return network if identifier == network.key
1181
+
1182
+ return
1183
+ end
1184
+
1185
+ if network.class == RbVmomi::VIM::Network
1186
+ return network if identifier == network
1187
+
1188
+ return
1189
+ end
1190
+
1191
+ return unless network.class == RbVmomi::VIM::OpaqueNetwork
1192
+
1193
+ if identifier == network.summary.opaqueNetworkId
1194
+ network
1195
+ else
1196
+ nil
1197
+ end
1198
+ end
1199
+
1200
+ def retrieve_from_device(device)
1201
+ device_network = nil
1202
+ device_network_id = nil
1203
+ # First search network corresponding this device
1204
+ # Distributed Networks and NSX-V Networks
1205
+ if !device.backing[:port].nil?
1206
+ device_network_id = device.backing.port.portgroupKey
1207
+ # Standard Networks
1208
+ elsif !device.backing[:network].nil?
1209
+ device_network_id = device.backing[:network]
1210
+ # NSX-T Opaque Networks
1211
+ elsif !device.backing[:opaqueNetworkId].nil?
1212
+ device_network_id = device.backing[:opaqueNetworkId]
1213
+ end
1214
+
1215
+ # Check if networkId exists
1216
+ if device_network_id.nil?
1217
+ raise "Invalid or not supported network #{device.backing}"
1218
+ end
1219
+
1220
+ # Matching between device and network objects
1221
+ @item.network.each do |net|
1222
+ device_network = identify_network(device_network_id, net)
1223
+ break unless device_network.nil?
1224
+ end
1225
+
1226
+ # Check network matching
1227
+ if device_network.nil?
1228
+ raise "\"#{device.deviceInfo.label}\" \
1229
+ not match any known network"
1230
+ end
1231
+
1232
+ res = {}
1233
+
1234
+ res[:refs] = device_network.host.map do |h|
1235
+ h.parent._ref if h.parent
1236
+ end
1237
+
1238
+ res[:net_name] =
1239
+ device_network.name
1240
+ res[:net_ref] =
1241
+ device_network._ref
1242
+ res[:pg_type] =
1243
+ VCenterDriver::Network
1244
+ .get_network_type(
1245
+ device_network,
1246
+ res[:net_name]
1247
+ )
1248
+ res[:network] =
1249
+ device_network
1250
+
1251
+ res
1252
+ end
1253
+
1254
+ def vcenter_nics_hash
1255
+ parse_live = lambda {|inets_raw|
1256
+ h = nil
1257
+ begin
1258
+ h = inets_raw.to_h
1259
+ rescue NoMethodError
1260
+ h = {}
1261
+ inets_raw.each do |nic_dev|
1262
+ h[nic_dev[0]] = nic_dev[1]
1263
+ end
1264
+ end
1265
+
1266
+ return h
1267
+ }
1268
+
1269
+ nics = []
1270
+ inets_raw = nil
1271
+ inets = {}
1272
+
1273
+ @item['config.hardware.device'].each do |device|
1274
+ next unless VCenterDriver::Network.nic?(device)
1275
+
1276
+ nic = retrieve_from_device(device)
1277
+ nic[:mac] = device.macAddress rescue nil
1278
+
1279
+ if vm? && online?
1280
+ inets_raw ||=
1281
+ @item['guest.net']
1282
+ .map
1283
+ .with_index {|x, _| [x.macAddress, x] }
1284
+ inets = parse_live.call(inets_raw) if inets.empty?
1285
+
1286
+ if !inets[nic[:mac]].nil?
1287
+ ip_addresses =
1288
+ inets[nic[:mac]]
1289
+ .ipConfig
1290
+ .ipAddress rescue nil
1291
+ end
1292
+
1293
+ if !ip_addresses.nil? && !ip_addresses.empty?
1294
+ nic[:ipv4],
1295
+ nic[:ipv4_additionals] = nil
1296
+ nic[:ipv6],
1297
+ nic[:ipv6_ula],
1298
+ nic[:ipv6_global],
1299
+ nic[:ipv6_additionals] = nil
1300
+ fill_nic(ip_addresses, nic)
1301
+ end
1302
+ end
1303
+ nics << nic
1304
+ end
1305
+
1306
+ nics
1307
+ end
1308
+
1309
+ def fill_nic(ip_addresses, nic)
1310
+ (0...ip_addresses.length).each do |i|
1311
+ ip = ip_addresses[i].ipAddress
1312
+ if ip =~ Resolv::IPv4::Regex
1313
+ if nic[:ipv4]
1314
+ if nic[:ipv4_additionals]
1315
+ nic[:ipv4_additionals] += ',' + ip
1316
+ else
1317
+ nic[:ipv4_additionals] = ip
1318
+ end
1319
+ else
1320
+ nic[:ipv4] = ip
1321
+ end
1322
+ elsif ip_addresses[i].ipAddress =~ Resolv::IPv6::Regex
1323
+ if get_ipv6_prefix(ip, 10) == 'fe80'
1324
+ # we not process this address
1325
+ elsif get_ipv6_prefix(ip, 7) == 'fc00'
1326
+ nic[:ipv6_ula] = ip
1327
+ else
1328
+ if nic[:ipv6]
1329
+ if nic[:ipv6_additionals]
1330
+ nic[:ipv6_additionals] += ',' + ip
1331
+ else
1332
+ nic[:ipv6_additionals] = ip
1333
+ end
1334
+ else
1335
+ nic[:ipv6] = ip
1336
+ end
1337
+ end
1338
+ end
1339
+ end
1340
+ end
1341
+
1342
+ def get_ipv6_prefix(ipv6, prefix_length)
1343
+ ip_slice =
1344
+ ipv6
1345
+ .split(':')
1346
+ .map {|elem| elem.hex }
1347
+ .map do |elem|
1348
+ int, dec = elem.divmod(1)
1349
+ bin = int.to_s(2).to_s
1350
+
1351
+ while dec > 0
1352
+ int, dec = (dec * 2).divmod(1)
1353
+ bin << int.to_s
1354
+ end
1355
+
1356
+ bin
1357
+ end.map {|elem| elem.rjust(16, '0') } # rubocop:disable Style/MultilineBlockChain
1358
+
1359
+ ip_chain = ip_slice.join
1360
+ prefix = ip_chain[0, prefix_length]
1361
+
1362
+ cont = 0
1363
+ limit = prefix.length
1364
+ index = 0
1365
+ slices = []
1366
+
1367
+ while cont < limit
1368
+ slices[index] = prefix.slice(cont, 4)
1369
+ slices[index] = slices[index].ljust(4, '0')
1370
+ index +=1
1371
+ cont+=4
1372
+ end
1373
+
1374
+ slices.map {|elem| format('%0x', elem.to_i(2)) }
1375
+ .join.ljust(4, '0')
1376
+ end
1377
+
1378
+ # Checks if a RbVmomi::VIM::VirtualDevice is a disk or a cdrom
1379
+ def disk_or_cdrom?(device)
1380
+ is_disk =
1381
+ !device
1382
+ .class
1383
+ .ancestors
1384
+ .index(RbVmomi::VIM::VirtualDisk).nil?
1385
+ is_cdrom =
1386
+ !device
1387
+ .class
1388
+ .ancestors
1389
+ .index(RbVmomi::VIM::VirtualCdrom).nil?
1390
+ is_disk || is_cdrom
1391
+ end
1392
+
1393
+ # Checks if a RbVmomi::VIM::VirtualDevice is a disk or an iso file
1394
+ def disk_or_iso?(device)
1395
+ is_disk =
1396
+ !device
1397
+ .class
1398
+ .ancestors
1399
+ .index(RbVmomi::VIM::VirtualDisk).nil?
1400
+ is_iso =
1401
+ device
1402
+ .backing
1403
+ .is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo
1404
+ is_disk || is_iso
1405
+ end
1406
+
1407
+ # Checks if a RbVmomi::VIM::VirtualDevice is a disk
1408
+ def disk?(device)
1409
+ !device.class.ancestors.index(RbVmomi::VIM::VirtualDisk).nil?
1410
+ end
1411
+
1412
+ def cdrom?(device)
1413
+ device.backing.is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo
1414
+ end
1415
+
1416
+ # @return RbVmomi::VIM::ResourcePool, first resource pool in cluster
1417
+ def resource_pool
1418
+ self['runtime.host.parent.resourcePool']
1419
+ end
1420
+
1421
+ def esx_name
1422
+ self['runtime.host.name']
1423
+ end
1424
+
1425
+ def vm_to_one(vm_name)
1426
+ str = "NAME = \"#{vm_name}\"\n"\
1427
+ "CPU = \"#{@vm_info['config.hardware.numCPU']}\"\n"\
1428
+ "vCPU = \"#{@vm_info['config.hardware.numCPU']}\"\n"\
1429
+ "MEMORY = \"#{@vm_info['config.hardware.memoryMB']}\"\n"\
1430
+ "HYPERVISOR = \"vcenter\"\n"\
1431
+ "CONTEXT = [\n"\
1432
+ " NETWORK = \"YES\",\n"\
1433
+ " SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
1434
+ "]\n"\
1435
+ "VCENTER_INSTANCE_ID =\"#{@vm_info[:vc_uuid]}\"\n"\
1436
+ "VCENTER_CCR_REF =\"#{@vm_info[:cluster_ref]}\"\n"
1437
+
1438
+ str << "DEPLOY_ID =\"#{self['_ref']}\"\n"
1439
+ @state = 'POWEROFF' if @state == 'd'
1440
+ str << "IMPORT_STATE =\"#{@state}\"\n"
1441
+
1442
+ # Get DS information
1443
+ if !@vm_info['datastore'].nil?
1444
+ !@vm_info['datastore'].last.nil? &&
1445
+ !@vm_info['datastore'].last._ref.nil?
1446
+ ds_ref = vm_template_ds_ref
1447
+ str << "VCENTER_DS_REF = \"#{ds_ref}\"\n"
1448
+ end
1449
+
1450
+ vnc_port = nil
1451
+ keymap =
1452
+ VCenterDriver::VIHelper
1453
+ .get_default(
1454
+ 'VM/TEMPLATE/GRAPHICS/KEYMAP'
1455
+ )
1456
+
1457
+ @vm_info['config.extraConfig'].select do |xtra|
1458
+ if xtra[:key].downcase=='remotedisplay.vnc.port'
1459
+ vnc_port = xtra[:value]
1460
+ end
1461
+
1462
+ if xtra[:key].downcase=='remotedisplay.vnc.keymap'
1463
+ keymap = xtra[:value]
1464
+ end
1465
+ end
1466
+
1467
+ if !@vm_info['config.extraConfig'].empty?
1468
+ str << "GRAPHICS = [\n"\
1469
+ " TYPE =\"vnc\",\n"
1470
+ str << " PORT =\"#{vnc_port}\",\n" if vnc_port
1471
+ str << " KEYMAP =\"#{keymap}\",\n" if keymap
1472
+ str << " LISTEN =\"0.0.0.0\"\n"
1473
+ str << "]\n"
1474
+ end
1475
+
1476
+ if !@vm_info['config.annotation'] || @vm_info['config.annotation']
1477
+ .empty?
1478
+ str << 'DESCRIPTION = "vCenter Template \
1479
+ imported by OpenNebula' \
1480
+ " from Cluster #{@vm_info['cluster_name']}\"\n"
1481
+ else
1482
+ notes = @vm_info['config.annotation']
1483
+ .gsub('\\', '\\\\')
1484
+ .gsub('"', '\\"')
1485
+ str << "DESCRIPTION = \"#{notes}\"\n"
1486
+ end
1487
+
1488
+ case @vm_info['guest.guestFullName']
1489
+ when /CentOS/i
1490
+ str << "LOGO=images/logos/centos.png\n"
1491
+ when /Debian/i
1492
+ str << "LOGO=images/logos/debian.png\n"
1493
+ when /Red Hat/i
1494
+ str << "LOGO=images/logos/redhat.png\n"
1495
+ when /Ubuntu/i
1496
+ str << "LOGO=images/logos/ubuntu.png\n"
1497
+ when /Windows XP/i
1498
+ str << "LOGO=images/logos/windowsxp.png\n"
1499
+ when /Windows/i
1500
+ str << "LOGO=images/logos/windows8.png\n"
1501
+ when /Linux/i
1502
+ str << "LOGO=images/logos/linux.png\n"
1503
+ end
1504
+
1505
+ str
1506
+ end
1507
+
1508
+ # Gets MOREF from Datastore used by the VM. It validates
1509
+ # the selected DS is not only used to host swap.
1510
+ def vm_template_ds_ref
1511
+ begin
1512
+ ds_ref = nil
1513
+ if @vm_info['datastore'].length > 1
1514
+ swap_path = ''
1515
+ @vm_info['config.extraConfig'].each do |element|
1516
+ if element.key == 'sched.swap.derivedName'
1517
+ swap_path = element.value
1518
+ end
1519
+ end
1520
+ @vm_info['datastore'].each do |datastore|
1521
+ path = datastore.summary.url.sub(%r{ds:///*}, '')
1522
+ if !swap_path.include?(path) && !datastore._ref.nil?
1523
+ ds_ref = datastore._ref
1524
+ break
1525
+ end
1526
+ end
1527
+ elsif @vm_info['datastore'].length == 1
1528
+ if !@vm_info['datastore'].first._ref.nil?
1529
+ ds_ref = @vm_info['datastore'].first._ref
1530
+ end
1531
+ end
1532
+
1533
+ ds_ref
1534
+ rescue StandardError => e
1535
+ "Could not find DATASTORE for this VM. Reason: #{e.message}"
1536
+ end
1537
+ end
1538
+
1539
+ def self.template_to_one(
1540
+ template,
1541
+ vc_uuid,
1542
+ ccr_ref,
1543
+ ccr_name,
1544
+ import_name
1545
+ )
1546
+ num_cpu, memory, annotation, guest_fullname =
1547
+ template
1548
+ .item
1549
+ .collect(
1550
+ 'config.hardware.numCPU',
1551
+ 'config.hardware.memoryMB',
1552
+ 'config.annotation',
1553
+ 'guest.guestFullName'
1554
+ )
1555
+
1556
+ str = "NAME = \"#{import_name}\"\n"\
1557
+ "CPU = \"#{num_cpu}\"\n"\
1558
+ "vCPU = \"#{num_cpu}\"\n"\
1559
+ "MEMORY = \"#{memory}\"\n"\
1560
+ "HYPERVISOR = \"vcenter\"\n"\
1561
+ "CONTEXT = [\n"\
1562
+ " NETWORK = \"YES\",\n"\
1563
+ " SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
1564
+ "]\n"\
1565
+ "VCENTER_INSTANCE_ID =\"#{vc_uuid}\"\n"
1566
+
1567
+ str << "VCENTER_TEMPLATE_REF =\"#{template['_ref']}\"\n"
1568
+ str << "VCENTER_CCR_REF =\"#{ccr_ref}\"\n"
1569
+
1570
+ str << "GRAPHICS = [\n"\
1571
+ " TYPE =\"vnc\",\n"
1572
+ str << " LISTEN =\"0.0.0.0\"\n"
1573
+ str << "]\n"
1574
+
1575
+ if annotation.nil? || annotation.empty?
1576
+ str << 'DESCRIPTION = "vCenter Template \
1577
+ imported by OpenNebula' \
1578
+ " from Cluster #{ccr_name}\"\n"
1579
+ else
1580
+ notes = annotation.gsub('\\', '\\\\').gsub('"', '\\"')
1581
+ str << "DESCRIPTION = \"#{notes}\"\n"
1582
+ end
1583
+
1584
+ case guest_fullname
1585
+ when /CentOS/i
1586
+ str << "LOGO=images/logos/centos.png\n"
1587
+ when /Debian/i
1588
+ str << "LOGO=images/logos/debian.png\n"
1589
+ when /Red Hat/i
1590
+ str << "LOGO=images/logos/redhat.png\n"
1591
+ when /Ubuntu/i
1592
+ str << "LOGO=images/logos/ubuntu.png\n"
1593
+ when /Windows XP/i
1594
+ str << "LOGO=images/logos/windowsxp.png\n"
1595
+ when /Windows/i
1596
+ str << "LOGO=images/logos/windows8.png\n"
1597
+ when /Linux/i
1598
+ str << "LOGO=images/logos/linux.png\n"
1599
+ end
1600
+
1601
+ str
1602
+ end
1603
+
1604
+ def self.get_xml_template(
1605
+ template,
1606
+ vcenter_uuid,
1607
+ vi_client,
1608
+ dc_name = nil,
1609
+ rp_cache = {}
1610
+ )
1611
+ begin
1612
+ template_ref = template['_ref']
1613
+ template_name = template['name']
1614
+ template_ccr = template['runtime.host.parent']
1615
+ template_ccr_ref = template_ccr._ref
1616
+ template_ccr_name = template_ccr.name
1617
+
1618
+ # Get datacenter info
1619
+ if !dc_name
1620
+ dc = datacenter
1621
+ dc_name = dc.item.name
1622
+ end
1623
+
1624
+ # Get resource pools and generate a list
1625
+ if !rp_cache[template_ccr_name]
1626
+ tmp_cluster =
1627
+ VCenterDriver::ClusterComputeResource
1628
+ .new_from_ref(
1629
+ template_ccr_ref,
1630
+ vi_client
1631
+ )
1632
+ rp_list = tmp_cluster.get_resource_pool_list
1633
+ rp = ''
1634
+ if !rp_list.empty?
1635
+ rp_name_list = []
1636
+ rp_list.each do |rp_hash|
1637
+ rp_name_list << rp_hash[:name]
1638
+ end
1639
+ rp = 'O|list|Which resource pool \
1640
+ you want this VM to run in? '
1641
+ rp << "|#{rp_name_list.join(',')}" # List of RP
1642
+ rp << "|#{rp_name_list.first}" # Default RP
1643
+ end
1644
+ rp_cache[template_ccr_name] = {}
1645
+ rp_cache[template_ccr_name][:rp] = rp
1646
+ rp_cache[template_ccr_name][:rp_list] = rp_list
1647
+ end
1648
+ rp = rp_cache[template_ccr_name][:rp]
1649
+ rp_list = rp_cache[template_ccr_name][:rp_list]
1650
+
1651
+ # Determine the location path for the template
1652
+ vcenter_template =
1653
+ VCenterDriver::VirtualMachine
1654
+ .new_without_id(
1655
+ vi_client,
1656
+ template_ref
1657
+ )
1658
+ item = vcenter_template.item
1659
+ folders = []
1660
+ until item.instance_of? RbVmomi::VIM::Datacenter
1661
+ item = item.parent
1662
+ first_condition = item.instance_of? RbVmomi::VIM::Datacenter
1663
+ second_condition = item.name != 'vm'
1664
+
1665
+ unless first_condition && second_condition
1666
+ folders << item.name
1667
+ end
1668
+ if item.nil?
1669
+ raise 'Could not find the templates parent location'
1670
+ end
1671
+ end
1672
+ location = folders.reverse.join('/')
1673
+ location = '/' if location.empty?
1674
+
1675
+ # Generate a crypto hash for the template
1676
+ # name and take the first 12 chars
1677
+ import_name =
1678
+ VCenterDriver::VIHelper
1679
+ .one_name(
1680
+ OpenNebula::TemplatePool,
1681
+ template_name,
1682
+ template_ref+vcenter_uuid
1683
+ )
1684
+
1685
+ template_name = template_name.tr("\u007F", '')
1686
+ template_ccr_name = template_ccr_name.tr("\u007F", '')
1687
+
1688
+ # Prepare the Hash that will be used by importers to display
1689
+ # the object being imported
1690
+ one_tmp = {}
1691
+ one_tmp[:name] = import_name
1692
+ one_tmp[:ref] = template_ref
1693
+ one_tmp[:dc_name] = dc_name
1694
+ one_tmp[:template_name] = template_name
1695
+ one_tmp[:sunstone_template_name]=
1696
+ "#{template_name} [ Cluster: #{template_ccr_name} \
1697
+ - Template location: #{location} ]"
1698
+ one_tmp[:template_location] = location
1699
+ one_tmp[:vcenter_ccr_ref] = template_ccr_ref
1700
+ one_tmp[:vcenter_ref] = template_ref
1701
+ one_tmp[:vcenter_instance_uuid] = vcenter_uuid
1702
+ one_tmp[:cluster_name] = template_ccr_name
1703
+ one_tmp[:rp] = rp
1704
+ one_tmp[:rp_list] = rp_list
1705
+ one_tmp[:template] = template
1706
+ # By default we import disks and nics
1707
+ one_tmp[:import_disks_and_nics] = true
1708
+
1709
+ # Get the host ID of the OpenNebula host
1710
+ # which represents the vCenter Cluster
1711
+ one_host =
1712
+ VCenterDriver::VIHelper
1713
+ .find_by_ref(
1714
+ OpenNebula::HostPool,
1715
+ 'TEMPLATE/VCENTER_CCR_REF',
1716
+ template_ccr_ref,
1717
+ vcenter_uuid
1718
+ )
1719
+ host_id = one_host['ID']
1720
+ unless host_id
1721
+ raise "Could not find the host's ID associated \
1722
+ to template being imported"
1723
+ end
1724
+
1725
+ # Get the OpenNebula's template hash
1726
+ one_tmp[:one] =
1727
+ template_to_one(
1728
+ template,
1729
+ vcenter_uuid,
1730
+ template_ccr_ref,
1731
+ template_ccr_name,
1732
+ import_name
1733
+ )
1734
+ one_tmp
1735
+ rescue StandardError
1736
+ nil
1737
+ end
1738
+ end
1739
+
1740
+ # TODO: check with uuid
1741
+ def self.new_from_ref(ref, vi_client)
1742
+ new(RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref), vi_client)
1743
+ end
1744
+
1745
+ end
1746
+
1747
+ ##########################################################################
1748
+ # Class VmImporter
1749
+ ##########################################################################
1750
+ class VmImporter < VCenterDriver::VcImporter
1751
+
1752
+ def initialize(one_client, vi_client)
1753
+ super(one_client, vi_client)
1754
+ @one_class = OpenNebula::Template
1755
+
1756
+ @defaults = {
1757
+ :linked_clone => '0',
1758
+ :copy => '0',
1759
+ :name => '',
1760
+ :folder => '',
1761
+ :resourcepool => [],
1762
+ :type => ''
1763
+ }
1764
+ end
1765
+
1766
+ def get_list(_args = {})
1767
+ dc_folder = VCenterDriver::DatacenterFolder.new(@vi_client)
1768
+
1769
+ # Get OpenNebula's templates pool
1770
+ tpool =
1771
+ VCenterDriver::VIHelper
1772
+ .one_pool(
1773
+ OpenNebula::TemplatePool,
1774
+ false
1775
+ )
1776
+ if tpool.respond_to?(:message)
1777
+ raise "Could not get OpenNebula TemplatePool: #{tpool.message}"
1778
+ end
1779
+
1780
+ @list = dc_folder.get_unimported_templates(@vi_client, tpool)
1781
+ end
1782
+
1783
+ def rp_opts(type, rps)
1784
+ str = ''
1785
+
1786
+ return str if (type == 'default') || rps.empty?
1787
+
1788
+ if type == 'fixed'
1789
+ str << "VCENTER_RESOURCE_POOL=\"#{rps}\"\n"
1790
+ else
1791
+ default = rps.first
1792
+ rps_str = rps.join(',')
1793
+
1794
+ str << 'USER_INPUTS=['
1795
+ str << "VCENTER_RESOURCE_POOL=\"M|list|resource \
1796
+ pool list|#{rps_str}|#{default}\""
1797
+ str << ']'
1798
+ end
1799
+
1800
+ str
1801
+ end
1802
+
1803
+ def import(selected)
1804
+ opts = @info[selected[:ref]][:opts]
1805
+ working_template = selected
1806
+
1807
+ vcenter = selected[:vcenter]
1808
+ vc_uuid = selected[:vcenter_instance_uuid]
1809
+ dc = selected[:dc_name]
1810
+
1811
+ linked_clone = opts[:linked_clone] == '1'
1812
+ copy = opts[:copy] == '1'
1813
+ deploy_in_folder = !opts[:folder].empty?
1814
+
1815
+ res = { :id => [], :name => selected[:name] }
1816
+ dpool, ipool, npool, hpool = create_pools
1817
+
1818
+ template =
1819
+ VCenterDriver::Template
1820
+ .new_from_ref(
1821
+ selected[:vcenter_ref],
1822
+ @vi_client
1823
+ )
1824
+ # Linked clones and copy preparation
1825
+ if linked_clone
1826
+ # reached this point we need to delete
1827
+ # the template if something go wrong
1828
+ if copy
1829
+ error, template_copy_ref =
1830
+ selected[:template]
1831
+ .create_template_copy(
1832
+ opts[:name]
1833
+ )
1834
+ unless template_copy_ref
1835
+ raise 'There is a problem creating creating' \
1836
+ "your copy: #{error}"
1837
+ end
1838
+
1839
+ template =
1840
+ VCenterDriver::Template
1841
+ .new_from_ref(
1842
+ template_copy_ref,
1843
+ @vi_client
1844
+ )
1845
+ @rollback <<
1846
+ Raction
1847
+ .new(
1848
+ template,
1849
+ :delete_template
1850
+ )
1851
+
1852
+ one_template =
1853
+ VCenterDriver::Template
1854
+ .get_xml_template(
1855
+ template,
1856
+ vc_uuid,
1857
+ @vi_client,
1858
+ dc
1859
+ )
1860
+ unless one_template
1861
+ raise 'There is a problem obtaining info '\
1862
+ "from your template's copy"
1863
+ end
1864
+
1865
+ working_template = one_template
1866
+ end
1867
+
1868
+ lc_error, use_lc = template.create_delta_disks
1869
+ if lc_error
1870
+ raise 'Something was wront with create \
1871
+ delta disk operation'
1872
+ end
1873
+
1874
+ if use_lc
1875
+ working_template[:one] <<
1876
+ "\nVCENTER_LINKED_CLONES=\"YES\"\n"
1877
+ end
1878
+ end
1879
+
1880
+ if deploy_in_folder
1881
+ working_template[:one] <<
1882
+ "VCENTER_VM_FOLDER=\"#{opts[:folder]}\"\n"
1883
+ end
1884
+
1885
+ working_template[:one] <<
1886
+ "VCENTER_TEMPLATE_NAME=\"#{selected[:name]}\"\n"
1887
+
1888
+ create(working_template[:one]) do |one_object, id|
1889
+ res[:id] << id
1890
+
1891
+ type = { :object => 'template', :id => id }
1892
+ error, template_disks, allocated_images =
1893
+ template
1894
+ .import_vcenter_disks(
1895
+ vc_uuid,
1896
+ dpool,
1897
+ ipool,
1898
+ type
1899
+ )
1900
+
1901
+ if allocated_images
1902
+ # rollback stack
1903
+ allocated_images.reverse.each do |i|
1904
+ @rollback.unshift(Raction.new(i, :delete))
1905
+ end
1906
+ end
1907
+ raise error unless error.empty?
1908
+
1909
+ working_template[:one] << template_disks
1910
+
1911
+ if template_copy_ref
1912
+ template_moref = template_copy_ref
1913
+ else
1914
+ template_moref = selected[:vcenter_ref]
1915
+ end
1916
+
1917
+ opts_nics = {
1918
+ :vi_client => @vi_client,
1919
+ :vc_uuid => vc_uuid,
1920
+ :npool => npool,
1921
+ :hpool => hpool,
1922
+ :vcenter => vcenter,
1923
+ :template_moref => template_moref,
1924
+ :vm_object => nil
1925
+ }
1926
+
1927
+ error, template_nics, _ar_ids, allocated_nets =
1928
+ template
1929
+ .import_vcenter_nics(
1930
+ opts_nics,
1931
+ id,
1932
+ dc
1933
+ )
1934
+
1935
+ if allocated_nets
1936
+ # rollback stack
1937
+ allocated_nets.reverse.each do |n|
1938
+ @rollback.unshift(Raction.new(n, :delete))
1939
+ end
1940
+ end
1941
+ raise error unless error.empty?
1942
+
1943
+ working_template[:one] << template_nics
1944
+ working_template[:one] << rp_opts(
1945
+ opts[:type],
1946
+ opts[:resourcepool]
1947
+ )
1948
+
1949
+ one_object.update(working_template[:one])
1950
+ end
1951
+
1952
+ res
1953
+ end
1954
+
1955
+ def attr
1956
+ 'TEMPLATE/VCENTER_TEMPLATE_REF'
1957
+ end
1958
+
1959
+ end
1960
+
1961
+ end