vmpooler-provider-vsphere 1.4.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/lib/vmpooler/providers/vsphere.rb +1173 -0
- data/lib/vmpooler-provider-vsphere/version.rb +5 -0
- metadata +212 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: c8065ff117266c13945c80a47fd98b0b7a8a4fe3ec8018b4ea2edd384cc733de
|
4
|
+
data.tar.gz: 7f6ef47c263526a44208b36dbbaed4fca5b9c6985d68ab968d9bf258a133c96e
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 4c774b0ffd019c106b1d69cb11aafd0c8acddb566e339b195d8a3bad373844855c9943823268b6ac34b5ea312d39b237093c707aaa46b72d22208fa25005ecb7
|
7
|
+
data.tar.gz: aaaa1c54f356a1039a13c6c2b59b509f7a070349eeb02fe900acf527ae2b9bbd13f7819ec9a9283d53429137ec279c3f2dbcb24719fcbce058d3a71894283372
|
@@ -0,0 +1,1173 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'bigdecimal'
|
4
|
+
require 'bigdecimal/util'
|
5
|
+
require 'rbvmomi'
|
6
|
+
require 'vmpooler/providers/base'
|
7
|
+
|
8
|
+
module Vmpooler
|
9
|
+
class PoolManager
|
10
|
+
class Provider
|
11
|
+
class VSphere < Vmpooler::PoolManager::Provider::Base
|
12
|
+
# The connection_pool method is normally used only for testing
|
13
|
+
attr_reader :connection_pool
|
14
|
+
|
15
|
+
def initialize(config, logger, metrics, redis_connection_pool, name, options)
|
16
|
+
super(config, logger, metrics, redis_connection_pool, name, options)
|
17
|
+
|
18
|
+
task_limit = global_config[:config].nil? || global_config[:config]['task_limit'].nil? ? 10 : global_config[:config]['task_limit'].to_i
|
19
|
+
# The default connection pool size is:
|
20
|
+
# Whatever is biggest from:
|
21
|
+
# - How many pools this provider services
|
22
|
+
# - Maximum number of cloning tasks allowed
|
23
|
+
# - Need at least 2 connections so that a pool can have inventory functions performed while cloning etc.
|
24
|
+
default_connpool_size = [provided_pools.count, task_limit, 2].max
|
25
|
+
connpool_size = provider_config['connection_pool_size'].nil? ? default_connpool_size : provider_config['connection_pool_size'].to_i
|
26
|
+
# The default connection pool timeout should be quite large - 60 seconds
|
27
|
+
connpool_timeout = provider_config['connection_pool_timeout'].nil? ? 60 : provider_config['connection_pool_timeout'].to_i
|
28
|
+
logger.log('d', "[#{name}] ConnPool - Creating a connection pool of size #{connpool_size} with timeout #{connpool_timeout}")
|
29
|
+
@connection_pool = Vmpooler::PoolManager::GenericConnectionPool.new(
|
30
|
+
metrics: metrics,
|
31
|
+
connpool_type: 'provider_connection_pool',
|
32
|
+
connpool_provider: name,
|
33
|
+
size: connpool_size,
|
34
|
+
timeout: connpool_timeout
|
35
|
+
) do
|
36
|
+
logger.log('d', "[#{name}] Connection Pool - Creating a connection object")
|
37
|
+
# Need to wrap the vSphere connection object in another object. The generic connection pooler will preserve
|
38
|
+
# the object reference for the connection, which means it cannot "reconnect" by creating an entirely new connection
|
39
|
+
# object. Instead by wrapping it in a Hash, the Hash object reference itself never changes but the content of the
|
40
|
+
# Hash can change, and is preserved across invocations.
|
41
|
+
new_conn = connect_to_vsphere
|
42
|
+
{ connection: new_conn }
|
43
|
+
end
|
44
|
+
@provider_hosts = {}
|
45
|
+
@provider_hosts_lock = Mutex.new
|
46
|
+
@redis = redis_connection_pool
|
47
|
+
end
|
48
|
+
|
49
|
+
# name of the provider class
|
50
|
+
def name
|
51
|
+
'vsphere'
|
52
|
+
end
|
53
|
+
|
54
|
+
def folder_configured?(folder_title, base_folder, configured_folders, whitelist)
|
55
|
+
return true if whitelist&.include?(folder_title)
|
56
|
+
return false unless configured_folders.keys.include?(folder_title)
|
57
|
+
return false unless configured_folders[folder_title] == base_folder
|
58
|
+
|
59
|
+
true
|
60
|
+
end
|
61
|
+
|
62
|
+
def destroy_vm_and_log(vm_name, vm_object, pool, data_ttl)
|
63
|
+
try = 0 if try.nil?
|
64
|
+
max_tries = 3
|
65
|
+
@redis.with_metrics do |redis|
|
66
|
+
redis.multi
|
67
|
+
redis.srem("vmpooler__completed__#{pool}", vm_name)
|
68
|
+
redis.hdel("vmpooler__active__#{pool}", vm_name)
|
69
|
+
redis.hset("vmpooler__vm__#{vm_name}", 'destroy', Time.now)
|
70
|
+
|
71
|
+
# Auto-expire metadata key
|
72
|
+
redis.expire("vmpooler__vm__#{vm_name}", (data_ttl * 60 * 60))
|
73
|
+
redis.exec
|
74
|
+
end
|
75
|
+
|
76
|
+
start = Time.now
|
77
|
+
|
78
|
+
if vm_object.is_a? RbVmomi::VIM::Folder
|
79
|
+
logger.log('s', "[!] [#{pool}] '#{vm_name}' is a folder, bailing on destroying")
|
80
|
+
raise('Expected VM, but received a folder object')
|
81
|
+
end
|
82
|
+
vm_object.PowerOffVM_Task.wait_for_completion if vm_object.runtime&.powerState && vm_object.runtime.powerState == 'poweredOn'
|
83
|
+
vm_object.Destroy_Task.wait_for_completion
|
84
|
+
|
85
|
+
finish = format('%<time>.2f', time: Time.now - start)
|
86
|
+
logger.log('s', "[-] [#{pool}] '#{vm_name}' destroyed in #{finish} seconds")
|
87
|
+
metrics.timing("destroy.#{pool}", finish)
|
88
|
+
rescue RuntimeError
|
89
|
+
raise
|
90
|
+
rescue StandardError => e
|
91
|
+
try += 1
|
92
|
+
logger.log('s', "[!] [#{pool}] failed to destroy '#{vm_name}' with an error: #{e}")
|
93
|
+
try >= max_tries ? raise : retry
|
94
|
+
end
|
95
|
+
|
96
|
+
def destroy_folder_and_children(folder_object)
|
97
|
+
vms = {}
|
98
|
+
data_ttl = $config[:redis]['data_ttl'].to_i
|
99
|
+
folder_name = folder_object.name
|
100
|
+
unless folder_object.childEntity.count == 0
|
101
|
+
folder_object.childEntity.each do |vm|
|
102
|
+
vms[vm.name] = vm
|
103
|
+
end
|
104
|
+
|
105
|
+
vms.each do |vm_name, vm_object|
|
106
|
+
destroy_vm_and_log(vm_name, vm_object, folder_name, data_ttl)
|
107
|
+
end
|
108
|
+
end
|
109
|
+
destroy_folder(folder_object)
|
110
|
+
end
|
111
|
+
|
112
|
+
def destroy_folder(folder_object)
|
113
|
+
try = 0 if try.nil?
|
114
|
+
max_tries = 3
|
115
|
+
logger.log('s', "[-] [#{folder_object.name}] removing unconfigured folder")
|
116
|
+
folder_object.Destroy_Task.wait_for_completion
|
117
|
+
rescue StandardError
|
118
|
+
try += 1
|
119
|
+
try >= max_tries ? raise : retry
|
120
|
+
end
|
121
|
+
|
122
|
+
def purge_unconfigured_folders(base_folders, configured_folders, whitelist)
|
123
|
+
@connection_pool.with_metrics do |pool_object|
|
124
|
+
connection = ensured_vsphere_connection(pool_object)
|
125
|
+
|
126
|
+
base_folders.each do |base_folder|
|
127
|
+
folder_children = get_folder_children(base_folder, connection)
|
128
|
+
next if folder_children.empty?
|
129
|
+
|
130
|
+
folder_children.each do |folder_hash|
|
131
|
+
folder_hash.each do |folder_title, folder_object|
|
132
|
+
destroy_folder_and_children(folder_object) unless folder_configured?(folder_title, base_folder, configured_folders, whitelist)
|
133
|
+
end
|
134
|
+
end
|
135
|
+
end
|
136
|
+
end
|
137
|
+
end
|
138
|
+
|
139
|
+
def get_folder_children(folder_name, connection)
|
140
|
+
folders = []
|
141
|
+
|
142
|
+
propSpecs = { # rubocop:disable Naming/VariableName
|
143
|
+
entity: self,
|
144
|
+
inventoryPath: folder_name
|
145
|
+
}
|
146
|
+
folder_object = connection.searchIndex.FindByInventoryPath(propSpecs) # rubocop:disable Naming/VariableName
|
147
|
+
|
148
|
+
return folders if folder_object.nil?
|
149
|
+
|
150
|
+
folder_object.childEntity.each do |folder|
|
151
|
+
next unless folder.is_a? RbVmomi::VIM::Folder
|
152
|
+
|
153
|
+
folders << { folder.name => folder }
|
154
|
+
end
|
155
|
+
|
156
|
+
folders
|
157
|
+
end
|
158
|
+
|
159
|
+
def vms_in_pool(pool_name)
|
160
|
+
vms = []
|
161
|
+
@connection_pool.with_metrics do |pool_object|
|
162
|
+
connection = ensured_vsphere_connection(pool_object)
|
163
|
+
folder_object = find_vm_folder(pool_name, connection)
|
164
|
+
|
165
|
+
return vms if folder_object.nil?
|
166
|
+
|
167
|
+
folder_object.childEntity.each do |vm|
|
168
|
+
vms << { 'name' => vm.name } if vm.is_a? RbVmomi::VIM::VirtualMachine
|
169
|
+
end
|
170
|
+
end
|
171
|
+
vms
|
172
|
+
end
|
173
|
+
|
174
|
+
def select_target_hosts(target, cluster, datacenter)
|
175
|
+
percentage = 100
|
176
|
+
dc = "#{datacenter}_#{cluster}"
|
177
|
+
@provider_hosts_lock.synchronize do
|
178
|
+
begin
|
179
|
+
target[dc] = {} unless target.key?(dc)
|
180
|
+
target[dc]['checking'] = true
|
181
|
+
hosts_hash = find_least_used_hosts(cluster, datacenter, percentage)
|
182
|
+
target[dc] = hosts_hash
|
183
|
+
rescue StandardError
|
184
|
+
target[dc] = {}
|
185
|
+
raise
|
186
|
+
ensure
|
187
|
+
target[dc]['check_time_finished'] = Time.now
|
188
|
+
end
|
189
|
+
end
|
190
|
+
end
|
191
|
+
|
192
|
+
def run_select_hosts(pool_name, target)
|
193
|
+
now = Time.now
|
194
|
+
max_age = @config[:config]['host_selection_max_age'] || 60
|
195
|
+
loop_delay = 5
|
196
|
+
datacenter = get_target_datacenter_from_config(pool_name)
|
197
|
+
cluster = get_target_cluster_from_config(pool_name)
|
198
|
+
raise("cluster for pool #{pool_name} cannot be identified") if cluster.nil?
|
199
|
+
raise("datacenter for pool #{pool_name} cannot be identified") if datacenter.nil?
|
200
|
+
|
201
|
+
dc = "#{datacenter}_#{cluster}"
|
202
|
+
unless target.key?(dc)
|
203
|
+
select_target_hosts(target, cluster, datacenter)
|
204
|
+
return
|
205
|
+
end
|
206
|
+
wait_for_host_selection(dc, target, loop_delay, max_age) if target[dc].key?('checking')
|
207
|
+
select_target_hosts(target, cluster, datacenter) if target[dc].key?('check_time_finished') && now - target[dc]['check_time_finished'] > max_age
|
208
|
+
end
|
209
|
+
|
210
|
+
def wait_for_host_selection(dc, target, maxloop = 0, loop_delay = 1, max_age = 60)
|
211
|
+
loop_count = 1
|
212
|
+
until target.key?(dc) && target[dc].key?('check_time_finished')
|
213
|
+
sleep(loop_delay)
|
214
|
+
unless maxloop == 0
|
215
|
+
break if loop_count >= maxloop
|
216
|
+
|
217
|
+
loop_count += 1
|
218
|
+
end
|
219
|
+
end
|
220
|
+
return unless target[dc].key?('check_time_finished')
|
221
|
+
|
222
|
+
loop_count = 1
|
223
|
+
while Time.now - target[dc]['check_time_finished'] > max_age
|
224
|
+
sleep(loop_delay)
|
225
|
+
unless maxloop == 0
|
226
|
+
break if loop_count >= maxloop
|
227
|
+
|
228
|
+
loop_count += 1
|
229
|
+
end
|
230
|
+
end
|
231
|
+
end
|
232
|
+
|
233
|
+
def select_next_host(pool_name, target, architecture = nil)
|
234
|
+
datacenter = get_target_datacenter_from_config(pool_name)
|
235
|
+
cluster = get_target_cluster_from_config(pool_name)
|
236
|
+
raise("cluster for pool #{pool_name} cannot be identified") if cluster.nil?
|
237
|
+
raise("datacenter for pool #{pool_name} cannot be identified") if datacenter.nil?
|
238
|
+
|
239
|
+
dc = "#{datacenter}_#{cluster}"
|
240
|
+
@provider_hosts_lock.synchronize do
|
241
|
+
if architecture
|
242
|
+
raise("there is no candidate in vcenter that meets all the required conditions, that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory") unless target[dc].key?('architectures')
|
243
|
+
|
244
|
+
host = target[dc]['architectures'][architecture].shift
|
245
|
+
target[dc]['architectures'][architecture] << host
|
246
|
+
if target[dc]['hosts'].include?(host)
|
247
|
+
target[dc]['hosts'].delete(host)
|
248
|
+
target[dc]['hosts'] << host
|
249
|
+
end
|
250
|
+
else
|
251
|
+
raise("there is no candidate in vcenter that meets all the required conditions, that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory") unless target[dc].key?('hosts')
|
252
|
+
|
253
|
+
host = target[dc]['hosts'].shift
|
254
|
+
target[dc]['hosts'] << host
|
255
|
+
target[dc]['architectures'].each do |arch|
|
256
|
+
target[dc]['architectures'][arch] = arch.partition { |v| v != host }.flatten if arch.include?(host)
|
257
|
+
end
|
258
|
+
end
|
259
|
+
|
260
|
+
return host
|
261
|
+
end
|
262
|
+
end
|
263
|
+
|
264
|
+
def vm_in_target?(pool_name, parent_host, architecture, target)
|
265
|
+
datacenter = get_target_datacenter_from_config(pool_name)
|
266
|
+
cluster = get_target_cluster_from_config(pool_name)
|
267
|
+
raise("cluster for pool #{pool_name} cannot be identified") if cluster.nil?
|
268
|
+
raise("datacenter for pool #{pool_name} cannot be identified") if datacenter.nil?
|
269
|
+
|
270
|
+
dc = "#{datacenter}_#{cluster}"
|
271
|
+
raise("there is no candidate in vcenter that meets all the required conditions, that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory") unless target[dc].key?('hosts')
|
272
|
+
return true if target[dc]['hosts'].include?(parent_host)
|
273
|
+
return true if target[dc]['architectures'][architecture].include?(parent_host)
|
274
|
+
|
275
|
+
false
|
276
|
+
end
|
277
|
+
|
278
|
+
def get_vm(pool_name, vm_name)
|
279
|
+
vm_hash = nil
|
280
|
+
@connection_pool.with_metrics do |pool_object|
|
281
|
+
connection = ensured_vsphere_connection(pool_object)
|
282
|
+
vm_object = find_vm(pool_name, vm_name, connection)
|
283
|
+
return vm_hash if vm_object.nil?
|
284
|
+
|
285
|
+
vm_hash = generate_vm_hash(vm_object, pool_name)
|
286
|
+
end
|
287
|
+
vm_hash
|
288
|
+
end
|
289
|
+
|
290
|
+
def create_vm(pool_name, new_vmname)
|
291
|
+
pool = pool_config(pool_name)
|
292
|
+
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
293
|
+
|
294
|
+
vm_hash = nil
|
295
|
+
@connection_pool.with_metrics do |pool_object|
|
296
|
+
connection = ensured_vsphere_connection(pool_object)
|
297
|
+
# Assume all pool config is valid i.e. not missing
|
298
|
+
template_path = pool['template']
|
299
|
+
target_folder_path = pool['folder']
|
300
|
+
target_datastore = pool['datastore']
|
301
|
+
target_datacenter_name = get_target_datacenter_from_config(pool_name)
|
302
|
+
|
303
|
+
# Get the template VM object
|
304
|
+
raise("Pool #{pool_name} did not specify a full path for the template for the provider #{name}") unless valid_template_path? template_path
|
305
|
+
|
306
|
+
template_vm_object = find_template_vm(pool, connection)
|
307
|
+
|
308
|
+
extra_config = [
|
309
|
+
{ key: 'guestinfo.hostname', value: new_vmname }
|
310
|
+
]
|
311
|
+
|
312
|
+
if pool.key?('snapshot_mainMem_ioBlockPages')
|
313
|
+
ioblockpages = pool['snapshot_mainMem_ioBlockPages']
|
314
|
+
extra_config.push(
|
315
|
+
{ key: 'mainMem.ioBlockPages', value: ioblockpages }
|
316
|
+
)
|
317
|
+
end
|
318
|
+
if pool.key?('snapshot_mainMem_iowait')
|
319
|
+
iowait = pool['snapshot_mainMem_iowait']
|
320
|
+
extra_config.push(
|
321
|
+
{ key: 'mainMem.iowait', value: iowait }
|
322
|
+
)
|
323
|
+
end
|
324
|
+
|
325
|
+
# Annotate with creation time, origin template, etc.
|
326
|
+
# Add extraconfig options that can be queried by vmtools
|
327
|
+
config_spec = create_config_spec(new_vmname, template_path, extra_config)
|
328
|
+
|
329
|
+
# Check if alternate network configuration is specified and add configuration
|
330
|
+
if pool.key?('network')
|
331
|
+
template_vm_network_device = template_vm_object.config.hardware.device.grep(RbVmomi::VIM::VirtualEthernetCard).first
|
332
|
+
network_name = pool['network']
|
333
|
+
network_device = set_network_device(target_datacenter_name, template_vm_network_device, network_name, connection)
|
334
|
+
config_spec.deviceChange = [{ operation: 'edit', device: network_device }]
|
335
|
+
end
|
336
|
+
|
337
|
+
# Put the VM in the specified folder and resource pool
|
338
|
+
relocate_spec = create_relocate_spec(target_datastore, target_datacenter_name, pool_name, connection)
|
339
|
+
|
340
|
+
# Create a clone spec
|
341
|
+
clone_spec = create_clone_spec(relocate_spec, config_spec)
|
342
|
+
|
343
|
+
begin
|
344
|
+
vm_target_folder = find_vm_folder(pool_name, connection)
|
345
|
+
vm_target_folder ||= create_folder(connection, target_folder_path, target_datacenter_name) if @config[:config].key?('create_folders') && (@config[:config]['create_folders'] == true)
|
346
|
+
rescue StandardError
|
347
|
+
if @config[:config].key?('create_folders') && (@config[:config]['create_folders'] == true)
|
348
|
+
vm_target_folder = create_folder(connection, target_folder_path, target_datacenter_name)
|
349
|
+
else
|
350
|
+
raise
|
351
|
+
end
|
352
|
+
end
|
353
|
+
raise ArgumentError, "Cannot find the configured folder for #{pool_name} #{target_folder_path}" unless vm_target_folder
|
354
|
+
|
355
|
+
# Create the new VM
|
356
|
+
new_vm_object = template_vm_object.CloneVM_Task(
|
357
|
+
folder: vm_target_folder,
|
358
|
+
name: new_vmname,
|
359
|
+
spec: clone_spec
|
360
|
+
).wait_for_completion
|
361
|
+
|
362
|
+
vm_hash = generate_vm_hash(new_vm_object, pool_name)
|
363
|
+
end
|
364
|
+
vm_hash
|
365
|
+
end
|
366
|
+
|
367
|
+
def create_config_spec(vm_name, template_name, extra_config)
|
368
|
+
RbVmomi::VIM.VirtualMachineConfigSpec(
|
369
|
+
annotation: JSON.pretty_generate(
|
370
|
+
name: vm_name,
|
371
|
+
created_by: provider_config['username'],
|
372
|
+
base_template: template_name,
|
373
|
+
creation_timestamp: Time.now.utc
|
374
|
+
),
|
375
|
+
extraConfig: extra_config
|
376
|
+
)
|
377
|
+
end
|
378
|
+
|
379
|
+
def create_relocate_spec(target_datastore, target_datacenter_name, pool_name, connection)
|
380
|
+
pool = pool_config(pool_name)
|
381
|
+
target_cluster_name = get_target_cluster_from_config(pool_name)
|
382
|
+
|
383
|
+
relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
|
384
|
+
datastore: find_datastore(target_datastore, connection, target_datacenter_name),
|
385
|
+
diskMoveType: get_disk_backing(pool)
|
386
|
+
)
|
387
|
+
manage_host_selection = @config[:config]['manage_host_selection'] if @config[:config].key?('manage_host_selection')
|
388
|
+
if manage_host_selection
|
389
|
+
run_select_hosts(pool_name, @provider_hosts)
|
390
|
+
target_host = select_next_host(pool_name, @provider_hosts)
|
391
|
+
host_object = find_host_by_dnsname(connection, target_host)
|
392
|
+
relocate_spec.host = host_object
|
393
|
+
else
|
394
|
+
# Choose a cluster/host to place the new VM on
|
395
|
+
target_cluster_object = find_cluster(target_cluster_name, connection, target_datacenter_name)
|
396
|
+
relocate_spec.pool = target_cluster_object.resourcePool
|
397
|
+
end
|
398
|
+
relocate_spec
|
399
|
+
end
|
400
|
+
|
401
|
+
def create_clone_spec(relocate_spec, config_spec)
|
402
|
+
RbVmomi::VIM.VirtualMachineCloneSpec(
|
403
|
+
location: relocate_spec,
|
404
|
+
config: config_spec,
|
405
|
+
powerOn: true,
|
406
|
+
template: false
|
407
|
+
)
|
408
|
+
end
|
409
|
+
|
410
|
+
def set_network_device(datacenter_name, template_vm_network_device, network_name, connection)
|
411
|
+
# Retrieve network object
|
412
|
+
datacenter = connection.serviceInstance.find_datacenter(datacenter_name)
|
413
|
+
new_network = datacenter.network.find { |n| n.name == network_name }
|
414
|
+
|
415
|
+
raise("Cannot find network #{network_name} in datacenter #{datacenter_name}") unless new_network
|
416
|
+
|
417
|
+
# Determine network device type
|
418
|
+
# All possible device type options here: https://vdc-download.vmware.com/vmwb-repository/dcr-public/98d63b35-d822-47fe-a87a-ddefd469df06/2e3c7b58-f2bd-486e-8bb1-a75eb0640bee/doc/vim.vm.device.VirtualEthernetCard.html
|
419
|
+
network_device =
|
420
|
+
if template_vm_network_device.instance_of? RbVmomi::VIM::VirtualVmxnet2
|
421
|
+
RbVmomi::VIM.VirtualVmxnet2
|
422
|
+
elsif template_vm_network_device.instance_of? RbVmomi::VIM::VirtualVmxnet3
|
423
|
+
RbVmomi::VIM.VirtualVmxnet3
|
424
|
+
elsif template_vm_network_device.instance_of? RbVmomi::VIM::VirtualE1000
|
425
|
+
RbVmomi::VIM.VirtualE1000
|
426
|
+
elsif template_vm_network_device.instance_of? RbVmomi::VIM::VirtualE1000e
|
427
|
+
RbVmomi::VIM.VirtualE1000e
|
428
|
+
elsif template_vm_network_device.instance_of? RbVmomi::VIM::VirtualSriovEthernetCard
|
429
|
+
RbVmomi::VIM.VirtualSriovEthernetCard
|
430
|
+
else
|
431
|
+
RbVmomi::VIM.VirtualPCNet32
|
432
|
+
end
|
433
|
+
|
434
|
+
# Set up new network device attributes
|
435
|
+
network_device.key = template_vm_network_device.key
|
436
|
+
network_device.deviceInfo = RbVmomi::VIM.Description(
|
437
|
+
label: template_vm_network_device.deviceInfo.label,
|
438
|
+
summary: network_name
|
439
|
+
)
|
440
|
+
network_device.backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo(
|
441
|
+
deviceName: network_name,
|
442
|
+
network: new_network,
|
443
|
+
useAutoDetect: false
|
444
|
+
)
|
445
|
+
network_device.addressType = 'assigned'
|
446
|
+
network_device.connectable = RbVmomi::VIM.VirtualDeviceConnectInfo(
|
447
|
+
allowGuestControl: true,
|
448
|
+
startConnected: true,
|
449
|
+
connected: true
|
450
|
+
)
|
451
|
+
network_device
|
452
|
+
end
|
453
|
+
|
454
|
+
def create_disk(pool_name, vm_name, disk_size)
|
455
|
+
pool = pool_config(pool_name)
|
456
|
+
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
457
|
+
|
458
|
+
datastore_name = pool['datastore']
|
459
|
+
raise("Pool #{pool_name} does not have a datastore defined for the provider #{name}") if datastore_name.nil?
|
460
|
+
|
461
|
+
@connection_pool.with_metrics do |pool_object|
|
462
|
+
connection = ensured_vsphere_connection(pool_object)
|
463
|
+
vm_object = find_vm(pool_name, vm_name, connection)
|
464
|
+
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil?
|
465
|
+
|
466
|
+
add_disk(vm_object, disk_size, datastore_name, connection, get_target_datacenter_from_config(pool_name))
|
467
|
+
end
|
468
|
+
true
|
469
|
+
end
|
470
|
+
|
471
|
+
def create_snapshot(pool_name, vm_name, new_snapshot_name)
|
472
|
+
@connection_pool.with_metrics do |pool_object|
|
473
|
+
connection = ensured_vsphere_connection(pool_object)
|
474
|
+
vm_object = find_vm(pool_name, vm_name, connection)
|
475
|
+
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil?
|
476
|
+
|
477
|
+
old_snap = find_snapshot(vm_object, new_snapshot_name)
|
478
|
+
raise("Snapshot #{new_snapshot_name} for VM #{vm_name} in pool #{pool_name} already exists for the provider #{name}") unless old_snap.nil?
|
479
|
+
|
480
|
+
vm_object.CreateSnapshot_Task(
|
481
|
+
name: new_snapshot_name,
|
482
|
+
description: 'vmpooler',
|
483
|
+
memory: true,
|
484
|
+
quiesce: true
|
485
|
+
).wait_for_completion
|
486
|
+
end
|
487
|
+
true
|
488
|
+
end
|
489
|
+
|
490
|
+
def revert_snapshot(pool_name, vm_name, snapshot_name)
|
491
|
+
@connection_pool.with_metrics do |pool_object|
|
492
|
+
connection = ensured_vsphere_connection(pool_object)
|
493
|
+
vm_object = find_vm(pool_name, vm_name, connection)
|
494
|
+
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil?
|
495
|
+
|
496
|
+
snapshot_object = find_snapshot(vm_object, snapshot_name)
|
497
|
+
raise("Snapshot #{snapshot_name} for VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if snapshot_object.nil?
|
498
|
+
|
499
|
+
snapshot_object.RevertToSnapshot_Task.wait_for_completion
|
500
|
+
end
|
501
|
+
true
|
502
|
+
end
|
503
|
+
|
504
|
+
def destroy_vm(pool_name, vm_name)
|
505
|
+
@connection_pool.with_metrics do |pool_object|
|
506
|
+
connection = ensured_vsphere_connection(pool_object)
|
507
|
+
vm_object = find_vm(pool_name, vm_name, connection)
|
508
|
+
# If a VM doesn't exist then it is effectively deleted
|
509
|
+
return true if vm_object.nil?
|
510
|
+
|
511
|
+
# Poweroff the VM if it's running
|
512
|
+
vm_object.PowerOffVM_Task.wait_for_completion if vm_object.runtime&.powerState && vm_object.runtime.powerState == 'poweredOn'
|
513
|
+
|
514
|
+
# Kill it with fire
|
515
|
+
vm_object.Destroy_Task.wait_for_completion
|
516
|
+
end
|
517
|
+
true
|
518
|
+
end
|
519
|
+
|
520
|
+
def vm_ready?(_pool_name, vm_name)
|
521
|
+
begin
|
522
|
+
open_socket(vm_name, global_config[:config]['domain'])
|
523
|
+
rescue StandardError => _e
|
524
|
+
return false
|
525
|
+
end
|
526
|
+
|
527
|
+
true
|
528
|
+
end
|
529
|
+
|
530
|
+
# VSphere Helper methods
|
531
|
+
|
532
|
+
def get_target_cluster_from_config(pool_name)
|
533
|
+
pool = pool_config(pool_name)
|
534
|
+
return nil if pool.nil?
|
535
|
+
|
536
|
+
return pool['clone_target'] unless pool['clone_target'].nil?
|
537
|
+
return global_config[:config]['clone_target'] unless global_config[:config]['clone_target'].nil?
|
538
|
+
|
539
|
+
nil
|
540
|
+
end
|
541
|
+
|
542
|
+
def get_target_datacenter_from_config(pool_name)
|
543
|
+
pool = pool_config(pool_name)
|
544
|
+
return nil if pool.nil?
|
545
|
+
|
546
|
+
return pool['datacenter'] unless pool['datacenter'].nil?
|
547
|
+
return provider_config['datacenter'] unless provider_config['datacenter'].nil?
|
548
|
+
|
549
|
+
nil
|
550
|
+
end
|
551
|
+
|
552
|
+
# Return a hash of VM data
|
553
|
+
# Provides vmname, hostname, template, poolname, boottime and powerstate information
|
554
|
+
def generate_vm_hash(vm_object, pool_name)
|
555
|
+
pool_configuration = pool_config(pool_name)
|
556
|
+
return nil if pool_configuration.nil?
|
557
|
+
|
558
|
+
hostname = vm_object.summary.guest.hostName if vm_object.summary&.guest && vm_object.summary.guest.hostName
|
559
|
+
boottime = vm_object.runtime.bootTime if vm_object.runtime&.bootTime
|
560
|
+
powerstate = vm_object.runtime.powerState if vm_object.runtime&.powerState
|
561
|
+
|
562
|
+
{
|
563
|
+
'name' => vm_object.name,
|
564
|
+
'hostname' => hostname,
|
565
|
+
'template' => pool_configuration['template'],
|
566
|
+
'poolname' => pool_name,
|
567
|
+
'boottime' => boottime,
|
568
|
+
'powerstate' => powerstate
|
569
|
+
}
|
570
|
+
end
|
571
|
+
|
572
|
+
# vSphere helper methods
|
573
|
+
ADAPTER_TYPE = 'lsiLogic'
|
574
|
+
DISK_TYPE = 'thin'
|
575
|
+
DISK_MODE = 'persistent'
|
576
|
+
|
577
|
+
def ensured_vsphere_connection(connection_pool_object)
|
578
|
+
connection_pool_object[:connection] = connect_to_vsphere unless vsphere_connection_ok?(connection_pool_object[:connection])
|
579
|
+
connection_pool_object[:connection]
|
580
|
+
end
|
581
|
+
|
582
|
+
def vsphere_connection_ok?(connection)
|
583
|
+
_result = connection.serviceInstance.CurrentTime
|
584
|
+
true
|
585
|
+
rescue StandardError
|
586
|
+
false
|
587
|
+
end
|
588
|
+
|
589
|
+
def connect_to_vsphere
|
590
|
+
max_tries = global_config[:config]['max_tries'] || 3
|
591
|
+
retry_factor = global_config[:config]['retry_factor'] || 10
|
592
|
+
try = 1
|
593
|
+
begin
|
594
|
+
connection = RbVmomi::VIM.connect host: provider_config['server'],
|
595
|
+
user: provider_config['username'],
|
596
|
+
password: provider_config['password'],
|
597
|
+
insecure: provider_config['insecure'] || false
|
598
|
+
metrics.increment('connect.open')
|
599
|
+
connection
|
600
|
+
rescue StandardError => e
|
601
|
+
metrics.increment('connect.fail')
|
602
|
+
raise e if try >= max_tries
|
603
|
+
|
604
|
+
sleep(try * retry_factor)
|
605
|
+
try += 1
|
606
|
+
retry
|
607
|
+
end
|
608
|
+
end
|
609
|
+
|
610
|
+
# This should supercede the open_socket method in the Pool Manager
|
611
|
+
def open_socket(host, domain = nil, timeout = 5, port = 22, &_block)
|
612
|
+
Timeout.timeout(timeout) do
|
613
|
+
target_host = host
|
614
|
+
target_host = "#{host}.#{domain}" if domain
|
615
|
+
sock = TCPSocket.new target_host, port
|
616
|
+
begin
|
617
|
+
yield sock if block_given?
|
618
|
+
ensure
|
619
|
+
sock.close
|
620
|
+
end
|
621
|
+
end
|
622
|
+
end
|
623
|
+
|
624
|
+
def get_vm_folder_path(vm_object)
|
625
|
+
# This gives an array starting from the root Datacenters folder all the way to the VM
|
626
|
+
# [ [Object, String], [Object, String ] ... ]
|
627
|
+
# It's then reversed so that it now goes from the VM to the Datacenter
|
628
|
+
full_path = vm_object.path.reverse
|
629
|
+
|
630
|
+
# Find the Datacenter object
|
631
|
+
dc_index = full_path.index { |p| p[0].is_a?(RbVmomi::VIM::Datacenter) }
|
632
|
+
return nil if dc_index.nil?
|
633
|
+
# The Datacenter should be at least 2 otherwise there's something
|
634
|
+
# wrong with the array passed in
|
635
|
+
# This is the minimum:
|
636
|
+
# [ VM (0), VM ROOT FOLDER (1), DC (2)]
|
637
|
+
return nil if dc_index <= 1
|
638
|
+
|
639
|
+
# Remove the VM name (Starting position of 1 in the slice)
|
640
|
+
# Up until the Root VM Folder of DataCenter Node (dc_index - 2)
|
641
|
+
full_path = full_path.slice(1..dc_index - 2)
|
642
|
+
|
643
|
+
# Reverse the array back to normal and
|
644
|
+
# then convert the array of paths into a '/' seperated string
|
645
|
+
(full_path.reverse.map { |p| p[1] }).join('/')
|
646
|
+
end
|
647
|
+
|
648
|
+
def add_disk(vm, size, datastore, connection, datacentername)
|
649
|
+
return false unless size.to_i > 0
|
650
|
+
|
651
|
+
vmdk_datastore = find_datastore(datastore, connection, datacentername)
|
652
|
+
raise("Datastore '#{datastore}' does not exist in datacenter '#{datacentername}'") if vmdk_datastore.nil?
|
653
|
+
|
654
|
+
datacenter = connection.serviceInstance.find_datacenter(datacentername)
|
655
|
+
controller = find_disk_controller(vm)
|
656
|
+
disk_unit_number = find_disk_unit_number(vm, controller)
|
657
|
+
disk_count = vm.config.hardware.device.grep(RbVmomi::VIM::VirtualDisk).count
|
658
|
+
vmdk_file_name = "#{vm['name']}/#{vm['name']}_#{disk_count}.vmdk"
|
659
|
+
|
660
|
+
vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec(
|
661
|
+
capacityKb: size.to_i * 1024 * 1024,
|
662
|
+
adapterType: ADAPTER_TYPE,
|
663
|
+
diskType: DISK_TYPE
|
664
|
+
)
|
665
|
+
|
666
|
+
vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
|
667
|
+
datastore: vmdk_datastore,
|
668
|
+
diskMode: DISK_MODE,
|
669
|
+
fileName: "[#{datastore}] #{vmdk_file_name}"
|
670
|
+
)
|
671
|
+
|
672
|
+
device = RbVmomi::VIM::VirtualDisk(
|
673
|
+
backing: vmdk_backing,
|
674
|
+
capacityInKB: size.to_i * 1024 * 1024,
|
675
|
+
controllerKey: controller.key,
|
676
|
+
key: -1,
|
677
|
+
unitNumber: disk_unit_number
|
678
|
+
)
|
679
|
+
|
680
|
+
device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
|
681
|
+
device: device,
|
682
|
+
operation: RbVmomi::VIM::VirtualDeviceConfigSpecOperation('add')
|
683
|
+
)
|
684
|
+
|
685
|
+
vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
|
686
|
+
deviceChange: [device_config_spec]
|
687
|
+
)
|
688
|
+
|
689
|
+
connection.serviceContent.virtualDiskManager.CreateVirtualDisk_Task(
|
690
|
+
datacenter: datacenter,
|
691
|
+
name: "[#{datastore}] #{vmdk_file_name}",
|
692
|
+
spec: vmdk_spec
|
693
|
+
).wait_for_completion
|
694
|
+
|
695
|
+
vm.ReconfigVM_Task(spec: vm_config_spec).wait_for_completion
|
696
|
+
|
697
|
+
true
|
698
|
+
end
|
699
|
+
|
700
|
+
def find_datastore(datastorename, connection, datacentername)
|
701
|
+
datacenter = connection.serviceInstance.find_datacenter(datacentername)
|
702
|
+
raise("Datacenter #{datacentername} does not exist") if datacenter.nil?
|
703
|
+
|
704
|
+
datacenter.find_datastore(datastorename)
|
705
|
+
end
|
706
|
+
|
707
|
+
def find_device(vm, device_name)
|
708
|
+
vm.config.hardware.device.each do |device|
|
709
|
+
return device if device.deviceInfo.label == device_name
|
710
|
+
end
|
711
|
+
|
712
|
+
nil
|
713
|
+
end
|
714
|
+
|
715
|
+
def find_disk_controller(vm)
|
716
|
+
devices = find_disk_devices(vm)
|
717
|
+
|
718
|
+
devices.keys.sort.each do |device|
|
719
|
+
return find_device(vm, devices[device]['device'].deviceInfo.label) if devices[device]['children'].length < 15
|
720
|
+
end
|
721
|
+
|
722
|
+
nil
|
723
|
+
end
|
724
|
+
|
725
|
+
def find_disk_devices(vm)
|
726
|
+
devices = {}
|
727
|
+
|
728
|
+
vm.config.hardware.device.each do |device|
|
729
|
+
if device.is_a? RbVmomi::VIM::VirtualSCSIController
|
730
|
+
if devices[device.controllerKey].nil?
|
731
|
+
devices[device.key] = {}
|
732
|
+
devices[device.key]['children'] = []
|
733
|
+
end
|
734
|
+
|
735
|
+
devices[device.key]['device'] = device
|
736
|
+
end
|
737
|
+
|
738
|
+
if device.is_a? RbVmomi::VIM::VirtualDisk
|
739
|
+
if devices[device.controllerKey].nil?
|
740
|
+
devices[device.controllerKey] = {}
|
741
|
+
devices[device.controllerKey]['children'] = []
|
742
|
+
end
|
743
|
+
|
744
|
+
devices[device.controllerKey]['children'].push(device)
|
745
|
+
end
|
746
|
+
end
|
747
|
+
|
748
|
+
devices
|
749
|
+
end
|
750
|
+
|
751
|
+
def find_disk_unit_number(vm, controller)
|
752
|
+
used_unit_numbers = []
|
753
|
+
available_unit_numbers = []
|
754
|
+
|
755
|
+
devices = find_disk_devices(vm)
|
756
|
+
|
757
|
+
devices.keys.sort.each do |c|
|
758
|
+
next unless controller.key == devices[c]['device'].key
|
759
|
+
|
760
|
+
used_unit_numbers.push(devices[c]['device'].scsiCtlrUnitNumber)
|
761
|
+
devices[c]['children'].each do |disk|
|
762
|
+
used_unit_numbers.push(disk.unitNumber)
|
763
|
+
end
|
764
|
+
end
|
765
|
+
|
766
|
+
(0..15).each do |scsi_id|
|
767
|
+
available_unit_numbers.push(scsi_id) if used_unit_numbers.grep(scsi_id).length <= 0
|
768
|
+
end
|
769
|
+
|
770
|
+
available_unit_numbers.min
|
771
|
+
end
|
772
|
+
|
773
|
+
# Finds a folder object by inventory path
|
774
|
+
# Params:
|
775
|
+
# +pool_name+:: the pool to find the folder for
|
776
|
+
# +connection+:: the vsphere connection object
|
777
|
+
# returns a ManagedObjectReference for the folder found or nil if not found
|
778
|
+
def find_vm_folder(pool_name, connection)
|
779
|
+
# Find a folder by its inventory path and return the object
|
780
|
+
# Returns nil when the object found is not a folder
|
781
|
+
pool_configuration = pool_config(pool_name)
|
782
|
+
return nil if pool_configuration.nil?
|
783
|
+
|
784
|
+
folder = pool_configuration['folder']
|
785
|
+
datacenter = get_target_datacenter_from_config(pool_name)
|
786
|
+
return nil if datacenter.nil?
|
787
|
+
|
788
|
+
propSpecs = { # rubocop:disable Naming/VariableName
|
789
|
+
entity: self,
|
790
|
+
inventoryPath: "#{datacenter}/vm/#{folder}"
|
791
|
+
}
|
792
|
+
|
793
|
+
folder_object = connection.searchIndex.FindByInventoryPath(propSpecs) # rubocop:disable Naming/VariableName
|
794
|
+
return nil unless folder_object.instance_of? RbVmomi::VIM::Folder
|
795
|
+
|
796
|
+
folder_object
|
797
|
+
end
|
798
|
+
|
799
|
+
# Returns an array containing cumulative CPU and memory utilization of a host, and its object reference
|
800
|
+
# Params:
|
801
|
+
# +model+:: CPU arch version to match on
|
802
|
+
# +limit+:: Hard limit for CPU or memory utilization beyond which a host is excluded for deployments
|
803
|
+
# returns nil if one on these conditions is true:
|
804
|
+
# the model param is defined and cannot be found
|
805
|
+
# the host is in maintenance mode
|
806
|
+
# the host status is not 'green'
|
807
|
+
# the cpu or memory utilization is bigger than the limit param
|
808
|
+
def get_host_utilization(host, model = nil, limit = 90)
|
809
|
+
limit = @config[:config]['utilization_limit'] if @config[:config].key?('utilization_limit')
|
810
|
+
return nil if model && !host_has_cpu_model?(host, model)
|
811
|
+
return nil if host.runtime.inMaintenanceMode
|
812
|
+
return nil unless host.overallStatus == 'green'
|
813
|
+
return nil unless host.configIssue.empty?
|
814
|
+
|
815
|
+
cpu_utilization = cpu_utilization_for host
|
816
|
+
memory_utilization = memory_utilization_for host
|
817
|
+
|
818
|
+
return nil if cpu_utilization.nil?
|
819
|
+
return nil if cpu_utilization.to_d == 0.0.to_d
|
820
|
+
return nil if memory_utilization.nil?
|
821
|
+
return nil if memory_utilization.to_d == 0.0.to_d
|
822
|
+
|
823
|
+
return nil if cpu_utilization > limit
|
824
|
+
return nil if memory_utilization > limit
|
825
|
+
|
826
|
+
[cpu_utilization, host]
|
827
|
+
end
|
828
|
+
|
829
|
+
def host_has_cpu_model?(host, model)
|
830
|
+
get_host_cpu_arch_version(host) == model
|
831
|
+
end
|
832
|
+
|
833
|
+
def get_host_cpu_arch_version(host)
|
834
|
+
cpu_model = host.hardware.cpuPkg[0].description
|
835
|
+
cpu_model_parts = cpu_model.split
|
836
|
+
cpu_model_parts[4]
|
837
|
+
end
|
838
|
+
|
839
|
+
def cpu_utilization_for(host)
|
840
|
+
cpu_usage = host.summary.quickStats.overallCpuUsage
|
841
|
+
return nil if cpu_usage.nil?
|
842
|
+
|
843
|
+
cpu_size = host.summary.hardware.cpuMhz * host.summary.hardware.numCpuCores
|
844
|
+
cpu_usage.fdiv(cpu_size) * 100
|
845
|
+
end
|
846
|
+
|
847
|
+
def memory_utilization_for(host)
|
848
|
+
memory_usage = host.summary.quickStats.overallMemoryUsage
|
849
|
+
return nil if memory_usage.nil?
|
850
|
+
|
851
|
+
memory_size = host.summary.hardware.memorySize / 1024 / 1024
|
852
|
+
memory_usage.fdiv(memory_size) * 100
|
853
|
+
end
|
854
|
+
|
855
|
+
def get_average_cluster_utilization(hosts)
|
856
|
+
utilization_counts = hosts.map { |host| host[0] }
|
857
|
+
utilization_counts.inject(:+) / hosts.count
|
858
|
+
end
|
859
|
+
|
860
|
+
def build_compatible_hosts_lists(hosts, percentage)
|
861
|
+
hosts_with_arch_versions = hosts.map do |h|
|
862
|
+
{
|
863
|
+
'utilization' => h[0],
|
864
|
+
'host_object' => h[1],
|
865
|
+
'architecture' => get_host_cpu_arch_version(h[1])
|
866
|
+
}
|
867
|
+
end
|
868
|
+
versions = hosts_with_arch_versions.map { |host| host['architecture'] }.uniq
|
869
|
+
architectures = {}
|
870
|
+
versions.each do |version|
|
871
|
+
architectures[version] = []
|
872
|
+
end
|
873
|
+
|
874
|
+
hosts_with_arch_versions.each do |h|
|
875
|
+
architectures[h['architecture']] << [h['utilization'], h['host_object'], h['architecture']]
|
876
|
+
end
|
877
|
+
|
878
|
+
versions.each do |version|
|
879
|
+
targets = select_least_used_hosts(architectures[version], percentage)
|
880
|
+
architectures[version] = targets
|
881
|
+
end
|
882
|
+
architectures
|
883
|
+
end
|
884
|
+
|
885
|
+
def select_least_used_hosts(hosts, percentage)
|
886
|
+
raise('Provided hosts list to select_least_used_hosts is empty') if hosts.empty?
|
887
|
+
|
888
|
+
average_utilization = get_average_cluster_utilization(hosts)
|
889
|
+
least_used_hosts = []
|
890
|
+
hosts.each do |host|
|
891
|
+
least_used_hosts << host if host[0] <= average_utilization
|
892
|
+
end
|
893
|
+
hosts_to_select = (hosts.count * (percentage / 100.0)).to_int
|
894
|
+
hosts_to_select = hosts.count - 1 if percentage == 100
|
895
|
+
least_used_hosts.sort[0..hosts_to_select].map { |host| host[1].name }
|
896
|
+
end
|
897
|
+
|
898
|
+
def find_least_used_hosts(cluster, datacentername, percentage)
|
899
|
+
@connection_pool.with_metrics do |pool_object|
|
900
|
+
connection = ensured_vsphere_connection(pool_object)
|
901
|
+
cluster_object = find_cluster(cluster, connection, datacentername)
|
902
|
+
raise("Cluster #{cluster} cannot be found") if cluster_object.nil?
|
903
|
+
|
904
|
+
target_hosts = get_cluster_host_utilization(cluster_object)
|
905
|
+
raise("there is no candidate in vcenter that meets all the required conditions, that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory'") if target_hosts.empty?
|
906
|
+
|
907
|
+
architectures = build_compatible_hosts_lists(target_hosts, percentage)
|
908
|
+
least_used_hosts = select_least_used_hosts(target_hosts, percentage)
|
909
|
+
{
|
910
|
+
'hosts' => least_used_hosts,
|
911
|
+
'architectures' => architectures
|
912
|
+
}
|
913
|
+
end
|
914
|
+
end
|
915
|
+
|
916
|
+
def find_host_by_dnsname(connection, dnsname)
|
917
|
+
host_object = connection.searchIndex.FindByDnsName(dnsName: dnsname, vmSearch: false)
|
918
|
+
return nil if host_object.nil?
|
919
|
+
|
920
|
+
host_object
|
921
|
+
end
|
922
|
+
|
923
|
+
def find_least_used_host(cluster, connection, datacentername)
|
924
|
+
cluster_object = find_cluster(cluster, connection, datacentername)
|
925
|
+
target_hosts = get_cluster_host_utilization(cluster_object)
|
926
|
+
raise("There is no host candidate in vcenter that meets all the required conditions, check that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory'") if target_hosts.empty?
|
927
|
+
|
928
|
+
target_hosts.min[1]
|
929
|
+
end
|
930
|
+
|
931
|
+
def find_cluster(cluster, connection, datacentername)
|
932
|
+
datacenter = connection.serviceInstance.find_datacenter(datacentername)
|
933
|
+
raise("Datacenter #{datacentername} does not exist") if datacenter.nil?
|
934
|
+
|
935
|
+
# In the event the cluster is not a direct descendent of the
|
936
|
+
# datacenter, we use a ContainerView to leverage its recursive
|
937
|
+
# search. This will find clusters which are, for example, in
|
938
|
+
# folders under the datacenter. This will also find standalone
|
939
|
+
# hosts which are not part of a cluster.
|
940
|
+
cv = connection.serviceContent.viewManager.CreateContainerView(
|
941
|
+
container: datacenter.hostFolder,
|
942
|
+
type: ['ComputeResource', 'ClusterComputeResource'],
|
943
|
+
recursive: true
|
944
|
+
)
|
945
|
+
cluster = cv.view.find { |cluster_object| cluster_object.name == cluster }
|
946
|
+
cv.DestroyView
|
947
|
+
cluster
|
948
|
+
end
|
949
|
+
|
950
|
+
def get_cluster_host_utilization(cluster, model = nil)
|
951
|
+
cluster_hosts = []
|
952
|
+
cluster.host.each do |host|
|
953
|
+
host_usage = get_host_utilization(host, model)
|
954
|
+
cluster_hosts << host_usage if host_usage
|
955
|
+
end
|
956
|
+
cluster_hosts
|
957
|
+
end
|
958
|
+
|
959
|
+
def find_least_used_vpshere_compatible_host(vm)
|
960
|
+
source_host = vm.summary.runtime.host
|
961
|
+
model = get_host_cpu_arch_version(source_host)
|
962
|
+
cluster = source_host.parent
|
963
|
+
target_hosts = get_cluster_host_utilization(cluster, model)
|
964
|
+
raise("There is no host candidate in vcenter that meets all the required conditions, check that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory'") if target_hosts.empty?
|
965
|
+
|
966
|
+
target_host = target_hosts.min[1]
|
967
|
+
[target_host, target_host.name]
|
968
|
+
end
|
969
|
+
|
970
|
+
def find_snapshot(vm, snapshotname)
|
971
|
+
get_snapshot_list(vm.snapshot.rootSnapshotList, snapshotname) if vm.snapshot
|
972
|
+
end
|
973
|
+
|
974
|
+
def build_propSpecs(datacenter, folder, vmname) # rubocop:disable Naming/MethodName
|
975
|
+
{
|
976
|
+
entity => self,
|
977
|
+
:inventoryPath => "#{datacenter}/vm/#{folder}/#{vmname}"
|
978
|
+
}
|
979
|
+
end
|
980
|
+
|
981
|
+
def find_vm(pool_name, vmname, connection)
|
982
|
+
# Find a VM by its inventory path and return the VM object
|
983
|
+
# Returns nil when a VM, or pool configuration, cannot be found
|
984
|
+
pool_configuration = pool_config(pool_name)
|
985
|
+
return nil if pool_configuration.nil?
|
986
|
+
|
987
|
+
folder = pool_configuration['folder']
|
988
|
+
datacenter = get_target_datacenter_from_config(pool_name)
|
989
|
+
return nil if datacenter.nil?
|
990
|
+
|
991
|
+
propSpecs = { # rubocop:disable Naming/VariableName
|
992
|
+
entity: self,
|
993
|
+
inventoryPath: "#{datacenter}/vm/#{folder}/#{vmname}"
|
994
|
+
}
|
995
|
+
|
996
|
+
connection.searchIndex.FindByInventoryPath(propSpecs) # rubocop:disable Naming/VariableName
|
997
|
+
end
|
998
|
+
|
999
|
+
def get_base_vm_container_from(connection)
|
1000
|
+
view_manager = connection.serviceContent.viewManager
|
1001
|
+
view_manager.CreateContainerView(
|
1002
|
+
container: connection.serviceContent.rootFolder,
|
1003
|
+
recursive: true,
|
1004
|
+
type: ['VirtualMachine']
|
1005
|
+
)
|
1006
|
+
end
|
1007
|
+
|
1008
|
+
def get_snapshot_list(tree, snapshotname)
|
1009
|
+
snapshot = nil
|
1010
|
+
|
1011
|
+
tree.each do |child|
|
1012
|
+
if child.name == snapshotname
|
1013
|
+
snapshot ||= child.snapshot
|
1014
|
+
else
|
1015
|
+
snapshot ||= get_snapshot_list(child.childSnapshotList, snapshotname)
|
1016
|
+
end
|
1017
|
+
end
|
1018
|
+
|
1019
|
+
snapshot
|
1020
|
+
end
|
1021
|
+
|
1022
|
+
def get_vm_details(pool_name, vm_name, connection)
|
1023
|
+
vm_object = find_vm(pool_name, vm_name, connection)
|
1024
|
+
return nil if vm_object.nil?
|
1025
|
+
|
1026
|
+
parent_host_object = vm_object.summary.runtime.host if vm_object.summary&.runtime && vm_object.summary.runtime.host
|
1027
|
+
raise('Unable to determine which host the VM is running on') if parent_host_object.nil?
|
1028
|
+
|
1029
|
+
parent_host = parent_host_object.name
|
1030
|
+
architecture = get_host_cpu_arch_version(parent_host_object)
|
1031
|
+
{
|
1032
|
+
'host_name' => parent_host,
|
1033
|
+
'object' => vm_object,
|
1034
|
+
'architecture' => architecture
|
1035
|
+
}
|
1036
|
+
end
|
1037
|
+
|
1038
|
+
def migration_enabled?(config)
|
1039
|
+
migration_limit = config[:config]['migration_limit']
|
1040
|
+
return false unless migration_limit.is_a? Integer
|
1041
|
+
return true if migration_limit > 0
|
1042
|
+
|
1043
|
+
false
|
1044
|
+
end
|
1045
|
+
|
1046
|
+
def migrate_vm(pool_name, vm_name)
|
1047
|
+
@connection_pool.with_metrics do |pool_object|
|
1048
|
+
begin
|
1049
|
+
connection = ensured_vsphere_connection(pool_object)
|
1050
|
+
vm_hash = get_vm_details(pool_name, vm_name, connection)
|
1051
|
+
@redis.with_metrics do |redis|
|
1052
|
+
redis.hset("vmpooler__vm__#{vm_name}", 'host', vm_hash['host_name'])
|
1053
|
+
migration_count = redis.scard('vmpooler__migration')
|
1054
|
+
migration_limit = @config[:config]['migration_limit'] if @config[:config].key?('migration_limit')
|
1055
|
+
if migration_enabled? @config
|
1056
|
+
if migration_count >= migration_limit
|
1057
|
+
logger.log('s', "[ ] [#{pool_name}] '#{vm_name}' is running on #{vm_hash['host_name']}. No migration will be evaluated since the migration_limit has been reached")
|
1058
|
+
break
|
1059
|
+
end
|
1060
|
+
run_select_hosts(pool_name, @provider_hosts)
|
1061
|
+
if vm_in_target?(pool_name, vm_hash['host_name'], vm_hash['architecture'], @provider_hosts)
|
1062
|
+
logger.log('s', "[ ] [#{pool_name}] No migration required for '#{vm_name}' running on #{vm_hash['host_name']}")
|
1063
|
+
else
|
1064
|
+
migrate_vm_to_new_host(pool_name, vm_name, vm_hash, connection)
|
1065
|
+
end
|
1066
|
+
else
|
1067
|
+
logger.log('s', "[ ] [#{pool_name}] '#{vm_name}' is running on #{vm_hash['host_name']}")
|
1068
|
+
end
|
1069
|
+
end
|
1070
|
+
rescue StandardError
|
1071
|
+
logger.log('s', "[!] [#{pool_name}] '#{vm_name}' is running on #{vm_hash['host_name']}")
|
1072
|
+
raise
|
1073
|
+
end
|
1074
|
+
end
|
1075
|
+
end
|
1076
|
+
|
1077
|
+
def migrate_vm_to_new_host(pool_name, vm_name, vm_hash, connection)
|
1078
|
+
@redis.with_metrics do |redis|
|
1079
|
+
redis.sadd('vmpooler__migration', vm_name)
|
1080
|
+
end
|
1081
|
+
target_host_name = select_next_host(pool_name, @provider_hosts, vm_hash['architecture'])
|
1082
|
+
target_host_object = find_host_by_dnsname(connection, target_host_name)
|
1083
|
+
finish = migrate_vm_and_record_timing(pool_name, vm_name, vm_hash, target_host_object, target_host_name)
|
1084
|
+
@redis.with_metrics do |redis|
|
1085
|
+
redis.multi
|
1086
|
+
redis.hset("vmpooler__vm__#{vm_name}", 'host', target_host_name)
|
1087
|
+
redis.hset("vmpooler__vm__#{vm_name}", 'migrated', true)
|
1088
|
+
redis.exec
|
1089
|
+
end
|
1090
|
+
logger.log('s', "[>] [#{pool_name}] '#{vm_name}' migrated from #{vm_hash['host_name']} to #{target_host_name} in #{finish} seconds")
|
1091
|
+
ensure
|
1092
|
+
@redis.with_metrics do |redis|
|
1093
|
+
redis.srem('vmpooler__migration', vm_name)
|
1094
|
+
end
|
1095
|
+
end
|
1096
|
+
|
1097
|
+
def migrate_vm_and_record_timing(pool_name, vm_name, vm_hash, target_host_object, dest_host_name)
|
1098
|
+
start = Time.now
|
1099
|
+
migrate_vm_host(vm_hash['object'], target_host_object)
|
1100
|
+
finish = format('%<time>.2f', time: Time.now - start)
|
1101
|
+
metrics.timing("migrate.#{pool_name}", finish)
|
1102
|
+
metrics.increment("migrate_from.#{vm_hash['host_name']}")
|
1103
|
+
metrics.increment("migrate_to.#{dest_host_name}")
|
1104
|
+
@redis.with_metrics do |redis|
|
1105
|
+
checkout_to_migration = format('%<time>.2f', time: Time.now - Time.parse(redis.hget("vmpooler__vm__#{vm_name}", 'checkout')))
|
1106
|
+
redis.multi
|
1107
|
+
redis.hset("vmpooler__vm__#{vm_name}", 'migration_time', finish)
|
1108
|
+
redis.hset("vmpooler__vm__#{vm_name}", 'checkout_to_migration', checkout_to_migration)
|
1109
|
+
redis.exec
|
1110
|
+
end
|
1111
|
+
finish
|
1112
|
+
end
|
1113
|
+
|
1114
|
+
def migrate_vm_host(vm_object, host)
|
1115
|
+
relospec = RbVmomi::VIM.VirtualMachineRelocateSpec(host: host)
|
1116
|
+
vm_object.RelocateVM_Task(spec: relospec).wait_for_completion
|
1117
|
+
end
|
1118
|
+
|
1119
|
+
def create_folder(connection, new_folder, datacenter)
|
1120
|
+
dc = connection.serviceInstance.find_datacenter(datacenter)
|
1121
|
+
folder_object = dc.vmFolder.traverse(new_folder, RbVmomi::VIM::Folder, true)
|
1122
|
+
raise("Cannot create folder #{new_folder}") if folder_object.nil?
|
1123
|
+
|
1124
|
+
folder_object
|
1125
|
+
end
|
1126
|
+
|
1127
|
+
def find_template_vm(pool, connection)
|
1128
|
+
datacenter = get_target_datacenter_from_config(pool['name'])
|
1129
|
+
raise('cannot find datacenter') if datacenter.nil?
|
1130
|
+
|
1131
|
+
propSpecs = { # rubocop:disable Naming/VariableName
|
1132
|
+
entity: self,
|
1133
|
+
inventoryPath: "#{datacenter}/vm/#{pool['template']}"
|
1134
|
+
}
|
1135
|
+
|
1136
|
+
template_vm_object = connection.searchIndex.FindByInventoryPath(propSpecs) # rubocop:disable Naming/VariableName
|
1137
|
+
raise("Pool #{pool['name']} specifies a template VM of #{pool['template']} which does not exist for the provider #{name}") if template_vm_object.nil?
|
1138
|
+
|
1139
|
+
template_vm_object
|
1140
|
+
end
|
1141
|
+
|
1142
|
+
def create_template_delta_disks(pool)
|
1143
|
+
@connection_pool.with_metrics do |pool_object|
|
1144
|
+
connection = ensured_vsphere_connection(pool_object)
|
1145
|
+
template_vm_object = find_template_vm(pool, connection)
|
1146
|
+
|
1147
|
+
template_vm_object.add_delta_disk_layer_on_all_disks
|
1148
|
+
end
|
1149
|
+
end
|
1150
|
+
|
1151
|
+
def valid_template_path?(template)
|
1152
|
+
return false unless template.include?('/')
|
1153
|
+
return false if template[0] == '/'
|
1154
|
+
return false if template[-1] == '/'
|
1155
|
+
|
1156
|
+
true
|
1157
|
+
end
|
1158
|
+
|
1159
|
+
def get_disk_backing(pool)
|
1160
|
+
return :moveChildMostDiskBacking if linked_clone?(pool)
|
1161
|
+
|
1162
|
+
:moveAllDiskBackingsAndConsolidate
|
1163
|
+
end
|
1164
|
+
|
1165
|
+
def linked_clone?(pool)
|
1166
|
+
return if pool['create_linked_clone'] == false
|
1167
|
+
return true if pool['create_linked_clone']
|
1168
|
+
return true if @config[:config]['create_linked_clones']
|
1169
|
+
end
|
1170
|
+
end
|
1171
|
+
end
|
1172
|
+
end
|
1173
|
+
end
|
metadata
ADDED
@@ -0,0 +1,212 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: vmpooler-provider-vsphere
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 1.4.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Puppet
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2021-12-08 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: rbvmomi
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - ">="
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '2.1'
|
20
|
+
- - "<"
|
21
|
+
- !ruby/object:Gem::Version
|
22
|
+
version: '4.0'
|
23
|
+
type: :runtime
|
24
|
+
prerelease: false
|
25
|
+
version_requirements: !ruby/object:Gem::Requirement
|
26
|
+
requirements:
|
27
|
+
- - ">="
|
28
|
+
- !ruby/object:Gem::Version
|
29
|
+
version: '2.1'
|
30
|
+
- - "<"
|
31
|
+
- !ruby/object:Gem::Version
|
32
|
+
version: '4.0'
|
33
|
+
- !ruby/object:Gem::Dependency
|
34
|
+
name: vmpooler
|
35
|
+
requirement: !ruby/object:Gem::Requirement
|
36
|
+
requirements:
|
37
|
+
- - "~>"
|
38
|
+
- !ruby/object:Gem::Version
|
39
|
+
version: '2.0'
|
40
|
+
type: :development
|
41
|
+
prerelease: false
|
42
|
+
version_requirements: !ruby/object:Gem::Requirement
|
43
|
+
requirements:
|
44
|
+
- - "~>"
|
45
|
+
- !ruby/object:Gem::Version
|
46
|
+
version: '2.0'
|
47
|
+
- !ruby/object:Gem::Dependency
|
48
|
+
name: climate_control
|
49
|
+
requirement: !ruby/object:Gem::Requirement
|
50
|
+
requirements:
|
51
|
+
- - ">="
|
52
|
+
- !ruby/object:Gem::Version
|
53
|
+
version: 0.2.0
|
54
|
+
type: :development
|
55
|
+
prerelease: false
|
56
|
+
version_requirements: !ruby/object:Gem::Requirement
|
57
|
+
requirements:
|
58
|
+
- - ">="
|
59
|
+
- !ruby/object:Gem::Version
|
60
|
+
version: 0.2.0
|
61
|
+
- !ruby/object:Gem::Dependency
|
62
|
+
name: mock_redis
|
63
|
+
requirement: !ruby/object:Gem::Requirement
|
64
|
+
requirements:
|
65
|
+
- - ">="
|
66
|
+
- !ruby/object:Gem::Version
|
67
|
+
version: 0.17.0
|
68
|
+
type: :development
|
69
|
+
prerelease: false
|
70
|
+
version_requirements: !ruby/object:Gem::Requirement
|
71
|
+
requirements:
|
72
|
+
- - ">="
|
73
|
+
- !ruby/object:Gem::Version
|
74
|
+
version: 0.17.0
|
75
|
+
- !ruby/object:Gem::Dependency
|
76
|
+
name: pry
|
77
|
+
requirement: !ruby/object:Gem::Requirement
|
78
|
+
requirements:
|
79
|
+
- - ">="
|
80
|
+
- !ruby/object:Gem::Version
|
81
|
+
version: '0'
|
82
|
+
type: :development
|
83
|
+
prerelease: false
|
84
|
+
version_requirements: !ruby/object:Gem::Requirement
|
85
|
+
requirements:
|
86
|
+
- - ">="
|
87
|
+
- !ruby/object:Gem::Version
|
88
|
+
version: '0'
|
89
|
+
- !ruby/object:Gem::Dependency
|
90
|
+
name: rack-test
|
91
|
+
requirement: !ruby/object:Gem::Requirement
|
92
|
+
requirements:
|
93
|
+
- - ">="
|
94
|
+
- !ruby/object:Gem::Version
|
95
|
+
version: '0.6'
|
96
|
+
type: :development
|
97
|
+
prerelease: false
|
98
|
+
version_requirements: !ruby/object:Gem::Requirement
|
99
|
+
requirements:
|
100
|
+
- - ">="
|
101
|
+
- !ruby/object:Gem::Version
|
102
|
+
version: '0.6'
|
103
|
+
- !ruby/object:Gem::Dependency
|
104
|
+
name: rspec
|
105
|
+
requirement: !ruby/object:Gem::Requirement
|
106
|
+
requirements:
|
107
|
+
- - ">="
|
108
|
+
- !ruby/object:Gem::Version
|
109
|
+
version: '3.2'
|
110
|
+
type: :development
|
111
|
+
prerelease: false
|
112
|
+
version_requirements: !ruby/object:Gem::Requirement
|
113
|
+
requirements:
|
114
|
+
- - ">="
|
115
|
+
- !ruby/object:Gem::Version
|
116
|
+
version: '3.2'
|
117
|
+
- !ruby/object:Gem::Dependency
|
118
|
+
name: rubocop
|
119
|
+
requirement: !ruby/object:Gem::Requirement
|
120
|
+
requirements:
|
121
|
+
- - "~>"
|
122
|
+
- !ruby/object:Gem::Version
|
123
|
+
version: 1.1.0
|
124
|
+
type: :development
|
125
|
+
prerelease: false
|
126
|
+
version_requirements: !ruby/object:Gem::Requirement
|
127
|
+
requirements:
|
128
|
+
- - "~>"
|
129
|
+
- !ruby/object:Gem::Version
|
130
|
+
version: 1.1.0
|
131
|
+
- !ruby/object:Gem::Dependency
|
132
|
+
name: simplecov
|
133
|
+
requirement: !ruby/object:Gem::Requirement
|
134
|
+
requirements:
|
135
|
+
- - ">="
|
136
|
+
- !ruby/object:Gem::Version
|
137
|
+
version: 0.11.2
|
138
|
+
type: :development
|
139
|
+
prerelease: false
|
140
|
+
version_requirements: !ruby/object:Gem::Requirement
|
141
|
+
requirements:
|
142
|
+
- - ">="
|
143
|
+
- !ruby/object:Gem::Version
|
144
|
+
version: 0.11.2
|
145
|
+
- !ruby/object:Gem::Dependency
|
146
|
+
name: thor
|
147
|
+
requirement: !ruby/object:Gem::Requirement
|
148
|
+
requirements:
|
149
|
+
- - "~>"
|
150
|
+
- !ruby/object:Gem::Version
|
151
|
+
version: '1.0'
|
152
|
+
- - ">="
|
153
|
+
- !ruby/object:Gem::Version
|
154
|
+
version: 1.0.1
|
155
|
+
type: :development
|
156
|
+
prerelease: false
|
157
|
+
version_requirements: !ruby/object:Gem::Requirement
|
158
|
+
requirements:
|
159
|
+
- - "~>"
|
160
|
+
- !ruby/object:Gem::Version
|
161
|
+
version: '1.0'
|
162
|
+
- - ">="
|
163
|
+
- !ruby/object:Gem::Version
|
164
|
+
version: 1.0.1
|
165
|
+
- !ruby/object:Gem::Dependency
|
166
|
+
name: yarjuf
|
167
|
+
requirement: !ruby/object:Gem::Requirement
|
168
|
+
requirements:
|
169
|
+
- - ">="
|
170
|
+
- !ruby/object:Gem::Version
|
171
|
+
version: '2.0'
|
172
|
+
type: :development
|
173
|
+
prerelease: false
|
174
|
+
version_requirements: !ruby/object:Gem::Requirement
|
175
|
+
requirements:
|
176
|
+
- - ">="
|
177
|
+
- !ruby/object:Gem::Version
|
178
|
+
version: '2.0'
|
179
|
+
description:
|
180
|
+
email:
|
181
|
+
- support@puppet.com
|
182
|
+
executables: []
|
183
|
+
extensions: []
|
184
|
+
extra_rdoc_files: []
|
185
|
+
files:
|
186
|
+
- lib/vmpooler-provider-vsphere/version.rb
|
187
|
+
- lib/vmpooler/providers/vsphere.rb
|
188
|
+
homepage: https://github.com/puppetlabs/vmpooler-provider-vsphere
|
189
|
+
licenses:
|
190
|
+
- Apache-2.0
|
191
|
+
metadata: {}
|
192
|
+
post_install_message:
|
193
|
+
rdoc_options: []
|
194
|
+
require_paths:
|
195
|
+
- lib
|
196
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
197
|
+
requirements:
|
198
|
+
- - ">="
|
199
|
+
- !ruby/object:Gem::Version
|
200
|
+
version: 2.3.0
|
201
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
202
|
+
requirements:
|
203
|
+
- - ">="
|
204
|
+
- !ruby/object:Gem::Version
|
205
|
+
version: '0'
|
206
|
+
requirements: []
|
207
|
+
rubyforge_project:
|
208
|
+
rubygems_version: 2.7.6.2
|
209
|
+
signing_key:
|
210
|
+
specification_version: 4
|
211
|
+
summary: VMware provider for VMPooler
|
212
|
+
test_files: []
|