vmpooler 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/bin/vmpooler +54 -0
- data/lib/vmpooler.rb +161 -0
- data/lib/vmpooler/api.rb +53 -0
- data/lib/vmpooler/api/dashboard.rb +143 -0
- data/lib/vmpooler/api/helpers.rb +431 -0
- data/lib/vmpooler/api/reroute.rb +71 -0
- data/lib/vmpooler/api/v1.rb +938 -0
- data/lib/vmpooler/dashboard.rb +14 -0
- data/lib/vmpooler/dummy_statsd.rb +20 -0
- data/lib/vmpooler/generic_connection_pool.rb +53 -0
- data/lib/vmpooler/graphite.rb +42 -0
- data/lib/vmpooler/logger.rb +22 -0
- data/lib/vmpooler/pool_manager.rb +1029 -0
- data/lib/vmpooler/providers.rb +7 -0
- data/lib/vmpooler/providers/base.rb +231 -0
- data/lib/vmpooler/providers/dummy.rb +402 -0
- data/lib/vmpooler/providers/vsphere.rb +929 -0
- data/lib/vmpooler/public/bootstrap.min.css +5 -0
- data/lib/vmpooler/public/img/bg.png +0 -0
- data/lib/vmpooler/public/img/logo.gif +0 -0
- data/lib/vmpooler/public/img/spinner.svg +38 -0
- data/lib/vmpooler/public/img/subtle_dots.png +0 -0
- data/lib/vmpooler/public/img/textured_paper.png +0 -0
- data/lib/vmpooler/public/lib/bootstrap.min.js +7 -0
- data/lib/vmpooler/public/lib/d3.min.js +5 -0
- data/lib/vmpooler/public/lib/dashboard.js +738 -0
- data/lib/vmpooler/public/lib/jquery.min.js +4 -0
- data/lib/vmpooler/public/vmpooler.css +125 -0
- data/lib/vmpooler/statsd.rb +37 -0
- data/lib/vmpooler/version.rb +4 -0
- data/lib/vmpooler/views/dashboard.erb +63 -0
- data/lib/vmpooler/views/layout.erb +48 -0
- metadata +218 -0
@@ -0,0 +1,929 @@
|
|
1
|
+
module Vmpooler
|
2
|
+
class PoolManager
|
3
|
+
class Provider
|
4
|
+
class VSphere < Vmpooler::PoolManager::Provider::Base
|
5
|
+
# The connection_pool method is normally used only for testing
|
6
|
+
attr_reader :connection_pool
|
7
|
+
|
8
|
+
def initialize(config, logger, metrics, name, options)
|
9
|
+
super(config, logger, metrics, name, options)
|
10
|
+
|
11
|
+
task_limit = global_config[:config].nil? || global_config[:config]['task_limit'].nil? ? 10 : global_config[:config]['task_limit'].to_i
|
12
|
+
# The default connection pool size is:
|
13
|
+
# Whatever is biggest from:
|
14
|
+
# - How many pools this provider services
|
15
|
+
# - Maximum number of cloning tasks allowed
|
16
|
+
# - Need at least 2 connections so that a pool can have inventory functions performed while cloning etc.
|
17
|
+
default_connpool_size = [provided_pools.count, task_limit, 2].max
|
18
|
+
connpool_size = provider_config['connection_pool_size'].nil? ? default_connpool_size : provider_config['connection_pool_size'].to_i
|
19
|
+
# The default connection pool timeout should be quite large - 60 seconds
|
20
|
+
connpool_timeout = provider_config['connection_pool_timeout'].nil? ? 60 : provider_config['connection_pool_timeout'].to_i
|
21
|
+
logger.log('d', "[#{name}] ConnPool - Creating a connection pool of size #{connpool_size} with timeout #{connpool_timeout}")
|
22
|
+
@connection_pool = Vmpooler::PoolManager::GenericConnectionPool.new(
|
23
|
+
metrics: metrics,
|
24
|
+
metric_prefix: "#{name}_provider_connection_pool",
|
25
|
+
size: connpool_size,
|
26
|
+
timeout: connpool_timeout
|
27
|
+
) do
|
28
|
+
logger.log('d', "[#{name}] Connection Pool - Creating a connection object")
|
29
|
+
# Need to wrap the vSphere connection object in another object. The generic connection pooler will preserve
|
30
|
+
# the object reference for the connection, which means it cannot "reconnect" by creating an entirely new connection
|
31
|
+
# object. Instead by wrapping it in a Hash, the Hash object reference itself never changes but the content of the
|
32
|
+
# Hash can change, and is preserved across invocations.
|
33
|
+
new_conn = connect_to_vsphere
|
34
|
+
{ connection: new_conn }
|
35
|
+
end
|
36
|
+
@provider_hosts = {}
|
37
|
+
@provider_hosts_lock = Mutex.new
|
38
|
+
end
|
39
|
+
|
40
|
+
# name of the provider class
|
41
|
+
def name
|
42
|
+
'vsphere'
|
43
|
+
end
|
44
|
+
|
45
|
+
def vms_in_pool(pool_name)
|
46
|
+
vms = []
|
47
|
+
@connection_pool.with_metrics do |pool_object|
|
48
|
+
connection = ensured_vsphere_connection(pool_object)
|
49
|
+
folder_object = find_vm_folder(pool_name, connection)
|
50
|
+
|
51
|
+
return vms if folder_object.nil?
|
52
|
+
|
53
|
+
folder_object.childEntity.each do |vm|
|
54
|
+
vms << { 'name' => vm.name } if vm.is_a? RbVmomi::VIM::VirtualMachine
|
55
|
+
end
|
56
|
+
end
|
57
|
+
vms
|
58
|
+
end
|
59
|
+
|
60
|
+
def select_target_hosts(target, cluster, datacenter)
|
61
|
+
percentage = 100
|
62
|
+
dc = "#{datacenter}_#{cluster}"
|
63
|
+
@provider_hosts_lock.synchronize do
|
64
|
+
begin
|
65
|
+
target[dc] = {} unless target.key?(dc)
|
66
|
+
target[dc]['checking'] = true
|
67
|
+
hosts_hash = find_least_used_hosts(cluster, datacenter, percentage)
|
68
|
+
target[dc] = hosts_hash
|
69
|
+
rescue => _err
|
70
|
+
target[dc] = {}
|
71
|
+
raise(_err)
|
72
|
+
ensure
|
73
|
+
target[dc]['check_time_finished'] = Time.now
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
def run_select_hosts(pool_name, target)
|
79
|
+
now = Time.now
|
80
|
+
max_age = @config[:config]['host_selection_max_age'] || 60
|
81
|
+
loop_delay = 5
|
82
|
+
datacenter = get_target_datacenter_from_config(pool_name)
|
83
|
+
cluster = get_target_cluster_from_config(pool_name)
|
84
|
+
raise("cluster for pool #{pool_name} cannot be identified") if cluster.nil?
|
85
|
+
raise("datacenter for pool #{pool_name} cannot be identified") if datacenter.nil?
|
86
|
+
dc = "#{datacenter}_#{cluster}"
|
87
|
+
unless target.key?(dc)
|
88
|
+
select_target_hosts(target, cluster, datacenter)
|
89
|
+
return
|
90
|
+
end
|
91
|
+
if target[dc].key?('checking')
|
92
|
+
wait_for_host_selection(dc, target, loop_delay, max_age)
|
93
|
+
end
|
94
|
+
if target[dc].key?('check_time_finished')
|
95
|
+
if now - target[dc]['check_time_finished'] > max_age
|
96
|
+
select_target_hosts(target, cluster, datacenter)
|
97
|
+
end
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
101
|
+
def wait_for_host_selection(dc, target, maxloop = 0, loop_delay = 1, max_age = 60)
|
102
|
+
loop_count = 1
|
103
|
+
until target.key?(dc) and target[dc].key?('check_time_finished')
|
104
|
+
sleep(loop_delay)
|
105
|
+
unless maxloop.zero?
|
106
|
+
break if loop_count >= maxloop
|
107
|
+
loop_count += 1
|
108
|
+
end
|
109
|
+
end
|
110
|
+
return unless target[dc].key?('check_time_finished')
|
111
|
+
loop_count = 1
|
112
|
+
while Time.now - target[dc]['check_time_finished'] > max_age
|
113
|
+
sleep(loop_delay)
|
114
|
+
unless maxloop.zero?
|
115
|
+
break if loop_count >= maxloop
|
116
|
+
loop_count += 1
|
117
|
+
end
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
def select_next_host(pool_name, target, architecture = nil)
|
122
|
+
datacenter = get_target_datacenter_from_config(pool_name)
|
123
|
+
cluster = get_target_cluster_from_config(pool_name)
|
124
|
+
raise("cluster for pool #{pool_name} cannot be identified") if cluster.nil?
|
125
|
+
raise("datacenter for pool #{pool_name} cannot be identified") if datacenter.nil?
|
126
|
+
dc = "#{datacenter}_#{cluster}"
|
127
|
+
@provider_hosts_lock.synchronize do
|
128
|
+
if architecture
|
129
|
+
raise("there is no candidate in vcenter that meets all the required conditions, that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory") unless target[dc].key?('architectures')
|
130
|
+
host = target[dc]['architectures'][architecture].shift
|
131
|
+
target[dc]['architectures'][architecture] << host
|
132
|
+
if target[dc]['hosts'].include?(host)
|
133
|
+
target[dc]['hosts'].delete(host)
|
134
|
+
target[dc]['hosts'] << host
|
135
|
+
end
|
136
|
+
return host
|
137
|
+
else
|
138
|
+
raise("there is no candidate in vcenter that meets all the required conditions, that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory") unless target[dc].key?('hosts')
|
139
|
+
host = target[dc]['hosts'].shift
|
140
|
+
target[dc]['hosts'] << host
|
141
|
+
target[dc]['architectures'].each do |arch|
|
142
|
+
if arch.include?(host)
|
143
|
+
target[dc]['architectures'][arch] = arch.partition { |v| v != host }.flatten
|
144
|
+
end
|
145
|
+
end
|
146
|
+
return host
|
147
|
+
end
|
148
|
+
end
|
149
|
+
end
|
150
|
+
|
151
|
+
def vm_in_target?(pool_name, parent_host, architecture, target)
|
152
|
+
datacenter = get_target_datacenter_from_config(pool_name)
|
153
|
+
cluster = get_target_cluster_from_config(pool_name)
|
154
|
+
raise("cluster for pool #{pool_name} cannot be identified") if cluster.nil?
|
155
|
+
raise("datacenter for pool #{pool_name} cannot be identified") if datacenter.nil?
|
156
|
+
dc = "#{datacenter}_#{cluster}"
|
157
|
+
raise("there is no candidate in vcenter that meets all the required conditions, that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory") unless target[dc].key?('hosts')
|
158
|
+
return true if target[dc]['hosts'].include?(parent_host)
|
159
|
+
return true if target[dc]['architectures'][architecture].include?(parent_host)
|
160
|
+
return false
|
161
|
+
end
|
162
|
+
|
163
|
+
def get_vm(pool_name, vm_name)
|
164
|
+
vm_hash = nil
|
165
|
+
@connection_pool.with_metrics do |pool_object|
|
166
|
+
connection = ensured_vsphere_connection(pool_object)
|
167
|
+
vm_object = find_vm(pool_name, vm_name, connection)
|
168
|
+
return vm_hash if vm_object.nil?
|
169
|
+
|
170
|
+
vm_hash = generate_vm_hash(vm_object, pool_name)
|
171
|
+
end
|
172
|
+
vm_hash
|
173
|
+
end
|
174
|
+
|
175
|
+
def create_vm(pool_name, new_vmname)
|
176
|
+
pool = pool_config(pool_name)
|
177
|
+
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
178
|
+
vm_hash = nil
|
179
|
+
@connection_pool.with_metrics do |pool_object|
|
180
|
+
connection = ensured_vsphere_connection(pool_object)
|
181
|
+
# Assume all pool config is valid i.e. not missing
|
182
|
+
template_path = pool['template']
|
183
|
+
target_folder_path = pool['folder']
|
184
|
+
target_datastore = pool['datastore']
|
185
|
+
target_cluster_name = get_target_cluster_from_config(pool_name)
|
186
|
+
target_datacenter_name = get_target_datacenter_from_config(pool_name)
|
187
|
+
|
188
|
+
# Get the template VM object
|
189
|
+
raise("Pool #{pool_name} did not specify a full path for the template for the provider #{name}") unless valid_template_path? template_path
|
190
|
+
|
191
|
+
template_vm_object = find_template_vm(pool, connection)
|
192
|
+
|
193
|
+
# Annotate with creation time, origin template, etc.
|
194
|
+
# Add extraconfig options that can be queried by vmtools
|
195
|
+
config_spec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
196
|
+
annotation: JSON.pretty_generate(
|
197
|
+
name: new_vmname,
|
198
|
+
created_by: provider_config['username'],
|
199
|
+
base_template: template_path,
|
200
|
+
creation_timestamp: Time.now.utc
|
201
|
+
),
|
202
|
+
extraConfig: [
|
203
|
+
{ key: 'guestinfo.hostname', value: new_vmname }
|
204
|
+
]
|
205
|
+
)
|
206
|
+
|
207
|
+
# Put the VM in the specified folder and resource pool
|
208
|
+
relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
|
209
|
+
datastore: find_datastore(target_datastore, connection, target_datacenter_name),
|
210
|
+
diskMoveType: :moveChildMostDiskBacking
|
211
|
+
)
|
212
|
+
|
213
|
+
manage_host_selection = @config[:config]['manage_host_selection'] if @config[:config].key?('manage_host_selection')
|
214
|
+
if manage_host_selection
|
215
|
+
run_select_hosts(pool_name, @provider_hosts)
|
216
|
+
target_host = select_next_host(pool_name, @provider_hosts)
|
217
|
+
host_object = find_host_by_dnsname(connection, target_host)
|
218
|
+
relocate_spec.host = host_object
|
219
|
+
else
|
220
|
+
# Choose a cluster/host to place the new VM on
|
221
|
+
target_cluster_object = find_cluster(target_cluster_name, connection, target_datacenter_name)
|
222
|
+
relocate_spec.pool = target_cluster_object.resourcePool
|
223
|
+
end
|
224
|
+
|
225
|
+
# Create a clone spec
|
226
|
+
clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(
|
227
|
+
location: relocate_spec,
|
228
|
+
config: config_spec,
|
229
|
+
powerOn: true,
|
230
|
+
template: false
|
231
|
+
)
|
232
|
+
|
233
|
+
begin
|
234
|
+
vm_target_folder = find_vm_folder(pool_name, connection)
|
235
|
+
if vm_target_folder.nil? and @config[:config].key?('create_folders') and @config[:config]['create_folders'] == true
|
236
|
+
vm_target_folder = create_folder(connection, target_folder_path, target_datacenter_name)
|
237
|
+
end
|
238
|
+
rescue => _err
|
239
|
+
if @config[:config].key?('create_folders') and @config[:config]['create_folders'] == true
|
240
|
+
vm_target_folder = create_folder(connection, target_folder_path, target_datacenter_name)
|
241
|
+
else
|
242
|
+
raise(_err)
|
243
|
+
end
|
244
|
+
end
|
245
|
+
|
246
|
+
# Create the new VM
|
247
|
+
new_vm_object = template_vm_object.CloneVM_Task(
|
248
|
+
folder: vm_target_folder,
|
249
|
+
name: new_vmname,
|
250
|
+
spec: clone_spec
|
251
|
+
).wait_for_completion
|
252
|
+
|
253
|
+
vm_hash = generate_vm_hash(new_vm_object, pool_name)
|
254
|
+
end
|
255
|
+
vm_hash
|
256
|
+
end
|
257
|
+
|
258
|
+
def create_disk(pool_name, vm_name, disk_size)
|
259
|
+
pool = pool_config(pool_name)
|
260
|
+
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
261
|
+
|
262
|
+
datastore_name = pool['datastore']
|
263
|
+
raise("Pool #{pool_name} does not have a datastore defined for the provider #{name}") if datastore_name.nil?
|
264
|
+
|
265
|
+
@connection_pool.with_metrics do |pool_object|
|
266
|
+
connection = ensured_vsphere_connection(pool_object)
|
267
|
+
vm_object = find_vm(pool_name, vm_name, connection)
|
268
|
+
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil?
|
269
|
+
|
270
|
+
add_disk(vm_object, disk_size, datastore_name, connection, get_target_datacenter_from_config(pool_name))
|
271
|
+
end
|
272
|
+
true
|
273
|
+
end
|
274
|
+
|
275
|
+
def create_snapshot(pool_name, vm_name, new_snapshot_name)
|
276
|
+
@connection_pool.with_metrics do |pool_object|
|
277
|
+
connection = ensured_vsphere_connection(pool_object)
|
278
|
+
vm_object = find_vm(pool_name, vm_name, connection)
|
279
|
+
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil?
|
280
|
+
|
281
|
+
old_snap = find_snapshot(vm_object, new_snapshot_name)
|
282
|
+
raise("Snapshot #{new_snapshot_name} for VM #{vm_name} in pool #{pool_name} already exists for the provider #{name}") unless old_snap.nil?
|
283
|
+
|
284
|
+
vm_object.CreateSnapshot_Task(
|
285
|
+
name: new_snapshot_name,
|
286
|
+
description: 'vmpooler',
|
287
|
+
memory: true,
|
288
|
+
quiesce: true
|
289
|
+
).wait_for_completion
|
290
|
+
end
|
291
|
+
true
|
292
|
+
end
|
293
|
+
|
294
|
+
def revert_snapshot(pool_name, vm_name, snapshot_name)
|
295
|
+
@connection_pool.with_metrics do |pool_object|
|
296
|
+
connection = ensured_vsphere_connection(pool_object)
|
297
|
+
vm_object = find_vm(pool_name, vm_name, connection)
|
298
|
+
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil?
|
299
|
+
|
300
|
+
snapshot_object = find_snapshot(vm_object, snapshot_name)
|
301
|
+
raise("Snapshot #{snapshot_name} for VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if snapshot_object.nil?
|
302
|
+
|
303
|
+
snapshot_object.RevertToSnapshot_Task.wait_for_completion
|
304
|
+
end
|
305
|
+
true
|
306
|
+
end
|
307
|
+
|
308
|
+
def destroy_vm(pool_name, vm_name)
|
309
|
+
@connection_pool.with_metrics do |pool_object|
|
310
|
+
connection = ensured_vsphere_connection(pool_object)
|
311
|
+
vm_object = find_vm(pool_name, vm_name, connection)
|
312
|
+
# If a VM doesn't exist then it is effectively deleted
|
313
|
+
return true if vm_object.nil?
|
314
|
+
|
315
|
+
# Poweroff the VM if it's running
|
316
|
+
vm_object.PowerOffVM_Task.wait_for_completion if vm_object.runtime && vm_object.runtime.powerState && vm_object.runtime.powerState == 'poweredOn'
|
317
|
+
|
318
|
+
# Kill it with fire
|
319
|
+
vm_object.Destroy_Task.wait_for_completion
|
320
|
+
end
|
321
|
+
true
|
322
|
+
end
|
323
|
+
|
324
|
+
def vm_ready?(_pool_name, vm_name)
|
325
|
+
begin
|
326
|
+
open_socket(vm_name)
|
327
|
+
rescue => _err
|
328
|
+
return false
|
329
|
+
end
|
330
|
+
|
331
|
+
true
|
332
|
+
end
|
333
|
+
|
334
|
+
# VSphere Helper methods
|
335
|
+
|
336
|
+
def get_target_cluster_from_config(pool_name)
|
337
|
+
pool = pool_config(pool_name)
|
338
|
+
return nil if pool.nil?
|
339
|
+
|
340
|
+
return pool['clone_target'] unless pool['clone_target'].nil?
|
341
|
+
return global_config[:config]['clone_target'] unless global_config[:config]['clone_target'].nil?
|
342
|
+
|
343
|
+
nil
|
344
|
+
end
|
345
|
+
|
346
|
+
def get_target_datacenter_from_config(pool_name)
|
347
|
+
pool = pool_config(pool_name)
|
348
|
+
return nil if pool.nil?
|
349
|
+
|
350
|
+
return pool['datacenter'] unless pool['datacenter'].nil?
|
351
|
+
return provider_config['datacenter'] unless provider_config['datacenter'].nil?
|
352
|
+
|
353
|
+
nil
|
354
|
+
end
|
355
|
+
|
356
|
+
# Return a hash of VM data
|
357
|
+
# Provides vmname, hostname, template, poolname, boottime and powerstate information
|
358
|
+
def generate_vm_hash(vm_object, pool_name)
|
359
|
+
pool_configuration = pool_config(pool_name)
|
360
|
+
return nil if pool_configuration.nil?
|
361
|
+
|
362
|
+
hostname = vm_object.summary.guest.hostName if vm_object.summary && vm_object.summary.guest && vm_object.summary.guest.hostName
|
363
|
+
boottime = vm_object.runtime.bootTime if vm_object.runtime && vm_object.runtime.bootTime
|
364
|
+
powerstate = vm_object.runtime.powerState if vm_object.runtime && vm_object.runtime.powerState
|
365
|
+
|
366
|
+
hash = {
|
367
|
+
'name' => vm_object.name,
|
368
|
+
'hostname' => hostname,
|
369
|
+
'template' => pool_configuration['template'],
|
370
|
+
'poolname' => pool_name,
|
371
|
+
'boottime' => boottime,
|
372
|
+
'powerstate' => powerstate,
|
373
|
+
}
|
374
|
+
|
375
|
+
hash
|
376
|
+
end
|
377
|
+
|
378
|
+
# vSphere helper methods
|
379
|
+
ADAPTER_TYPE = 'lsiLogic'.freeze
|
380
|
+
DISK_TYPE = 'thin'.freeze
|
381
|
+
DISK_MODE = 'persistent'.freeze
|
382
|
+
|
383
|
+
def ensured_vsphere_connection(connection_pool_object)
|
384
|
+
connection_pool_object[:connection] = connect_to_vsphere unless vsphere_connection_ok?(connection_pool_object[:connection])
|
385
|
+
connection_pool_object[:connection]
|
386
|
+
end
|
387
|
+
|
388
|
+
def vsphere_connection_ok?(connection)
|
389
|
+
_result = connection.serviceInstance.CurrentTime
|
390
|
+
return true
|
391
|
+
rescue
|
392
|
+
return false
|
393
|
+
end
|
394
|
+
|
395
|
+
def connect_to_vsphere
|
396
|
+
max_tries = global_config[:config]['max_tries'] || 3
|
397
|
+
retry_factor = global_config[:config]['retry_factor'] || 10
|
398
|
+
try = 1
|
399
|
+
begin
|
400
|
+
connection = RbVmomi::VIM.connect host: provider_config['server'],
|
401
|
+
user: provider_config['username'],
|
402
|
+
password: provider_config['password'],
|
403
|
+
insecure: provider_config['insecure'] || false
|
404
|
+
metrics.increment('connect.open')
|
405
|
+
return connection
|
406
|
+
rescue => err
|
407
|
+
metrics.increment('connect.fail')
|
408
|
+
raise err if try >= max_tries
|
409
|
+
sleep(try * retry_factor)
|
410
|
+
try += 1
|
411
|
+
retry
|
412
|
+
end
|
413
|
+
end
|
414
|
+
|
415
|
+
# This should supercede the open_socket method in the Pool Manager
|
416
|
+
def open_socket(host, domain = nil, timeout = 5, port = 22, &_block)
|
417
|
+
Timeout.timeout(timeout) do
|
418
|
+
target_host = host
|
419
|
+
target_host = "#{host}.#{domain}" if domain
|
420
|
+
sock = TCPSocket.new target_host, port
|
421
|
+
begin
|
422
|
+
yield sock if block_given?
|
423
|
+
ensure
|
424
|
+
sock.close
|
425
|
+
end
|
426
|
+
end
|
427
|
+
end
|
428
|
+
|
429
|
+
def get_vm_folder_path(vm_object)
|
430
|
+
# This gives an array starting from the root Datacenters folder all the way to the VM
|
431
|
+
# [ [Object, String], [Object, String ] ... ]
|
432
|
+
# It's then reversed so that it now goes from the VM to the Datacenter
|
433
|
+
full_path = vm_object.path.reverse
|
434
|
+
|
435
|
+
# Find the Datacenter object
|
436
|
+
dc_index = full_path.index { |p| p[0].is_a?(RbVmomi::VIM::Datacenter) }
|
437
|
+
return nil if dc_index.nil?
|
438
|
+
# The Datacenter should be at least 2 otherwise there's something
|
439
|
+
# wrong with the array passed in
|
440
|
+
# This is the minimum:
|
441
|
+
# [ VM (0), VM ROOT FOLDER (1), DC (2)]
|
442
|
+
return nil if dc_index <= 1
|
443
|
+
|
444
|
+
# Remove the VM name (Starting position of 1 in the slice)
|
445
|
+
# Up until the Root VM Folder of DataCenter Node (dc_index - 2)
|
446
|
+
full_path = full_path.slice(1..dc_index - 2)
|
447
|
+
|
448
|
+
# Reverse the array back to normal and
|
449
|
+
# then convert the array of paths into a '/' seperated string
|
450
|
+
(full_path.reverse.map { |p| p[1] }).join('/')
|
451
|
+
end
|
452
|
+
|
453
|
+
def add_disk(vm, size, datastore, connection, datacentername)
|
454
|
+
return false unless size.to_i > 0
|
455
|
+
|
456
|
+
vmdk_datastore = find_datastore(datastore, connection, datacentername)
|
457
|
+
raise("Datastore '#{datastore}' does not exist in datacenter '#{datacentername}'") if vmdk_datastore.nil?
|
458
|
+
|
459
|
+
datacenter = connection.serviceInstance.find_datacenter(datacentername)
|
460
|
+
controller = find_disk_controller(vm)
|
461
|
+
disk_unit_number = find_disk_unit_number(vm, controller)
|
462
|
+
disk_count = vm.config.hardware.device.grep(RbVmomi::VIM::VirtualDisk).count
|
463
|
+
vmdk_file_name = "#{vm['name']}/#{vm['name']}_#{disk_count}.vmdk"
|
464
|
+
|
465
|
+
vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec(
|
466
|
+
capacityKb: size.to_i * 1024 * 1024,
|
467
|
+
adapterType: ADAPTER_TYPE,
|
468
|
+
diskType: DISK_TYPE
|
469
|
+
)
|
470
|
+
|
471
|
+
vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
|
472
|
+
datastore: vmdk_datastore,
|
473
|
+
diskMode: DISK_MODE,
|
474
|
+
fileName: "[#{datastore}] #{vmdk_file_name}"
|
475
|
+
)
|
476
|
+
|
477
|
+
device = RbVmomi::VIM::VirtualDisk(
|
478
|
+
backing: vmdk_backing,
|
479
|
+
capacityInKB: size.to_i * 1024 * 1024,
|
480
|
+
controllerKey: controller.key,
|
481
|
+
key: -1,
|
482
|
+
unitNumber: disk_unit_number
|
483
|
+
)
|
484
|
+
|
485
|
+
device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
|
486
|
+
device: device,
|
487
|
+
operation: RbVmomi::VIM::VirtualDeviceConfigSpecOperation('add')
|
488
|
+
)
|
489
|
+
|
490
|
+
vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
|
491
|
+
deviceChange: [device_config_spec]
|
492
|
+
)
|
493
|
+
|
494
|
+
connection.serviceContent.virtualDiskManager.CreateVirtualDisk_Task(
|
495
|
+
datacenter: datacenter,
|
496
|
+
name: "[#{datastore}] #{vmdk_file_name}",
|
497
|
+
spec: vmdk_spec
|
498
|
+
).wait_for_completion
|
499
|
+
|
500
|
+
vm.ReconfigVM_Task(spec: vm_config_spec).wait_for_completion
|
501
|
+
|
502
|
+
true
|
503
|
+
end
|
504
|
+
|
505
|
+
def find_datastore(datastorename, connection, datacentername)
|
506
|
+
datacenter = connection.serviceInstance.find_datacenter(datacentername)
|
507
|
+
raise("Datacenter #{datacentername} does not exist") if datacenter.nil?
|
508
|
+
datacenter.find_datastore(datastorename)
|
509
|
+
end
|
510
|
+
|
511
|
+
def find_device(vm, device_name)
|
512
|
+
vm.config.hardware.device.each do |device|
|
513
|
+
return device if device.deviceInfo.label == device_name
|
514
|
+
end
|
515
|
+
|
516
|
+
nil
|
517
|
+
end
|
518
|
+
|
519
|
+
def find_disk_controller(vm)
|
520
|
+
devices = find_disk_devices(vm)
|
521
|
+
|
522
|
+
devices.keys.sort.each do |device|
|
523
|
+
if devices[device]['children'].length < 15
|
524
|
+
return find_device(vm, devices[device]['device'].deviceInfo.label)
|
525
|
+
end
|
526
|
+
end
|
527
|
+
|
528
|
+
nil
|
529
|
+
end
|
530
|
+
|
531
|
+
def find_disk_devices(vm)
|
532
|
+
devices = {}
|
533
|
+
|
534
|
+
vm.config.hardware.device.each do |device|
|
535
|
+
if device.is_a? RbVmomi::VIM::VirtualSCSIController
|
536
|
+
if devices[device.controllerKey].nil?
|
537
|
+
devices[device.key] = {}
|
538
|
+
devices[device.key]['children'] = []
|
539
|
+
end
|
540
|
+
|
541
|
+
devices[device.key]['device'] = device
|
542
|
+
end
|
543
|
+
|
544
|
+
if device.is_a? RbVmomi::VIM::VirtualDisk
|
545
|
+
if devices[device.controllerKey].nil?
|
546
|
+
devices[device.controllerKey] = {}
|
547
|
+
devices[device.controllerKey]['children'] = []
|
548
|
+
end
|
549
|
+
|
550
|
+
devices[device.controllerKey]['children'].push(device)
|
551
|
+
end
|
552
|
+
end
|
553
|
+
|
554
|
+
devices
|
555
|
+
end
|
556
|
+
|
557
|
+
def find_disk_unit_number(vm, controller)
|
558
|
+
used_unit_numbers = []
|
559
|
+
available_unit_numbers = []
|
560
|
+
|
561
|
+
devices = find_disk_devices(vm)
|
562
|
+
|
563
|
+
devices.keys.sort.each do |c|
|
564
|
+
next unless controller.key == devices[c]['device'].key
|
565
|
+
used_unit_numbers.push(devices[c]['device'].scsiCtlrUnitNumber)
|
566
|
+
devices[c]['children'].each do |disk|
|
567
|
+
used_unit_numbers.push(disk.unitNumber)
|
568
|
+
end
|
569
|
+
end
|
570
|
+
|
571
|
+
(0..15).each do |scsi_id|
|
572
|
+
if used_unit_numbers.grep(scsi_id).length <= 0
|
573
|
+
available_unit_numbers.push(scsi_id)
|
574
|
+
end
|
575
|
+
end
|
576
|
+
|
577
|
+
available_unit_numbers.sort[0]
|
578
|
+
end
|
579
|
+
|
580
|
+
# Finds a folder object by inventory path
|
581
|
+
# Params:
|
582
|
+
# +pool_name+:: the pool to find the folder for
|
583
|
+
# +connection+:: the vsphere connection object
|
584
|
+
# returns a ManagedObjectReference for the folder found or nil if not found
|
585
|
+
def find_vm_folder(pool_name, connection)
|
586
|
+
# Find a folder by its inventory path and return the object
|
587
|
+
# Returns nil when the object found is not a folder
|
588
|
+
pool_configuration = pool_config(pool_name)
|
589
|
+
return nil if pool_configuration.nil?
|
590
|
+
folder = pool_configuration['folder']
|
591
|
+
datacenter = get_target_datacenter_from_config(pool_name)
|
592
|
+
return nil if datacenter.nil?
|
593
|
+
|
594
|
+
propSpecs = {
|
595
|
+
:entity => self,
|
596
|
+
:inventoryPath => "#{datacenter}/vm/#{folder}"
|
597
|
+
}
|
598
|
+
|
599
|
+
folder_object = connection.searchIndex.FindByInventoryPath(propSpecs)
|
600
|
+
return nil unless folder_object.class == RbVmomi::VIM::Folder
|
601
|
+
folder_object
|
602
|
+
end
|
603
|
+
|
604
|
+
# Returns an array containing cumulative CPU and memory utilization of a host, and its object reference
|
605
|
+
# Params:
|
606
|
+
# +model+:: CPU arch version to match on
|
607
|
+
# +limit+:: Hard limit for CPU or memory utilization beyond which a host is excluded for deployments
|
608
|
+
# returns nil if one on these conditions is true:
|
609
|
+
# the model param is defined and cannot be found
|
610
|
+
# the host is in maintenance mode
|
611
|
+
# the host status is not 'green'
|
612
|
+
# the cpu or memory utilization is bigger than the limit param
|
613
|
+
def get_host_utilization(host, model = nil, limit = 90)
|
614
|
+
limit = @config[:config]['utilization_limit'] if @config[:config].key?('utilization_limit')
|
615
|
+
if model
|
616
|
+
return nil unless host_has_cpu_model?(host, model)
|
617
|
+
end
|
618
|
+
return nil if host.runtime.inMaintenanceMode
|
619
|
+
return nil unless host.overallStatus == 'green'
|
620
|
+
return nil unless host.configIssue.empty?
|
621
|
+
|
622
|
+
cpu_utilization = cpu_utilization_for host
|
623
|
+
memory_utilization = memory_utilization_for host
|
624
|
+
|
625
|
+
return nil if cpu_utilization.nil?
|
626
|
+
return nil if cpu_utilization == 0.0
|
627
|
+
return nil if memory_utilization.nil?
|
628
|
+
return nil if memory_utilization == 0.0
|
629
|
+
|
630
|
+
return nil if cpu_utilization > limit
|
631
|
+
return nil if memory_utilization > limit
|
632
|
+
|
633
|
+
[cpu_utilization, host]
|
634
|
+
end
|
635
|
+
|
636
|
+
def host_has_cpu_model?(host, model)
|
637
|
+
get_host_cpu_arch_version(host) == model
|
638
|
+
end
|
639
|
+
|
640
|
+
def get_host_cpu_arch_version(host)
|
641
|
+
cpu_model = host.hardware.cpuPkg[0].description
|
642
|
+
cpu_model_parts = cpu_model.split
|
643
|
+
arch_version = cpu_model_parts[4]
|
644
|
+
arch_version
|
645
|
+
end
|
646
|
+
|
647
|
+
def cpu_utilization_for(host)
|
648
|
+
cpu_usage = host.summary.quickStats.overallCpuUsage
|
649
|
+
return nil if cpu_usage.nil?
|
650
|
+
cpu_size = host.summary.hardware.cpuMhz * host.summary.hardware.numCpuCores
|
651
|
+
(cpu_usage.to_f / cpu_size.to_f) * 100
|
652
|
+
end
|
653
|
+
|
654
|
+
def memory_utilization_for(host)
|
655
|
+
memory_usage = host.summary.quickStats.overallMemoryUsage
|
656
|
+
return nil if memory_usage.nil?
|
657
|
+
memory_size = host.summary.hardware.memorySize / 1024 / 1024
|
658
|
+
(memory_usage.to_f / memory_size.to_f) * 100
|
659
|
+
end
|
660
|
+
|
661
|
+
def get_average_cluster_utilization(hosts)
|
662
|
+
utilization_counts = hosts.map { |host| host[0] }
|
663
|
+
utilization_counts.inject(:+) / hosts.count
|
664
|
+
end
|
665
|
+
|
666
|
+
def build_compatible_hosts_lists(hosts, percentage)
|
667
|
+
hosts_with_arch_versions = hosts.map { |h|
|
668
|
+
{
|
669
|
+
'utilization' => h[0],
|
670
|
+
'host_object' => h[1],
|
671
|
+
'architecture' => get_host_cpu_arch_version(h[1])
|
672
|
+
}
|
673
|
+
}
|
674
|
+
versions = hosts_with_arch_versions.map { |host| host['architecture'] }.uniq
|
675
|
+
architectures = {}
|
676
|
+
versions.each do |version|
|
677
|
+
architectures[version] = []
|
678
|
+
end
|
679
|
+
|
680
|
+
hosts_with_arch_versions.each do |h|
|
681
|
+
architectures[h['architecture']] << [h['utilization'], h['host_object'], h['architecture']]
|
682
|
+
end
|
683
|
+
|
684
|
+
versions.each do |version|
|
685
|
+
targets = []
|
686
|
+
targets = select_least_used_hosts(architectures[version], percentage)
|
687
|
+
architectures[version] = targets
|
688
|
+
end
|
689
|
+
architectures
|
690
|
+
end
|
691
|
+
|
692
|
+
def select_least_used_hosts(hosts, percentage)
|
693
|
+
raise('Provided hosts list to select_least_used_hosts is empty') if hosts.empty?
|
694
|
+
average_utilization = get_average_cluster_utilization(hosts)
|
695
|
+
least_used_hosts = []
|
696
|
+
hosts.each do |host|
|
697
|
+
least_used_hosts << host if host[0] <= average_utilization
|
698
|
+
end
|
699
|
+
hosts_to_select = (hosts.count * (percentage / 100.0)).to_int
|
700
|
+
hosts_to_select = hosts.count - 1 if percentage == 100
|
701
|
+
least_used_hosts.sort[0..hosts_to_select].map { |host| host[1].name }
|
702
|
+
end
|
703
|
+
|
704
|
+
def find_least_used_hosts(cluster, datacentername, percentage)
|
705
|
+
@connection_pool.with_metrics do |pool_object|
|
706
|
+
connection = ensured_vsphere_connection(pool_object)
|
707
|
+
cluster_object = find_cluster(cluster, connection, datacentername)
|
708
|
+
raise("Cluster #{cluster} cannot be found") if cluster_object.nil?
|
709
|
+
target_hosts = get_cluster_host_utilization(cluster_object)
|
710
|
+
raise("there is no candidate in vcenter that meets all the required conditions, that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory'") if target_hosts.empty?
|
711
|
+
architectures = build_compatible_hosts_lists(target_hosts, percentage)
|
712
|
+
least_used_hosts = select_least_used_hosts(target_hosts, percentage)
|
713
|
+
{
|
714
|
+
'hosts' => least_used_hosts,
|
715
|
+
'architectures' => architectures
|
716
|
+
}
|
717
|
+
end
|
718
|
+
end
|
719
|
+
|
720
|
+
def find_host_by_dnsname(connection, dnsname)
|
721
|
+
host_object = connection.searchIndex.FindByDnsName(dnsName: dnsname, vmSearch: false)
|
722
|
+
return nil if host_object.nil?
|
723
|
+
host_object
|
724
|
+
end
|
725
|
+
|
726
|
+
def find_least_used_host(cluster, connection, datacentername)
|
727
|
+
cluster_object = find_cluster(cluster, connection, datacentername)
|
728
|
+
target_hosts = get_cluster_host_utilization(cluster_object)
|
729
|
+
raise("There is no host candidate in vcenter that meets all the required conditions, check that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory'") if target_hosts.empty?
|
730
|
+
least_used_host = target_hosts.sort[0][1]
|
731
|
+
least_used_host
|
732
|
+
end
|
733
|
+
|
734
|
+
def find_cluster(cluster, connection, datacentername)
|
735
|
+
datacenter = connection.serviceInstance.find_datacenter(datacentername)
|
736
|
+
raise("Datacenter #{datacentername} does not exist") if datacenter.nil?
|
737
|
+
datacenter.hostFolder.children.find { |cluster_object| cluster_object.name == cluster }
|
738
|
+
end
|
739
|
+
|
740
|
+
def get_cluster_host_utilization(cluster, model = nil)
|
741
|
+
cluster_hosts = []
|
742
|
+
cluster.host.each do |host|
|
743
|
+
host_usage = get_host_utilization(host, model)
|
744
|
+
cluster_hosts << host_usage if host_usage
|
745
|
+
end
|
746
|
+
cluster_hosts
|
747
|
+
end
|
748
|
+
|
749
|
+
def find_least_used_vpshere_compatible_host(vm)
|
750
|
+
source_host = vm.summary.runtime.host
|
751
|
+
model = get_host_cpu_arch_version(source_host)
|
752
|
+
cluster = source_host.parent
|
753
|
+
target_hosts = get_cluster_host_utilization(cluster, model)
|
754
|
+
raise("There is no host candidate in vcenter that meets all the required conditions, check that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory'") if target_hosts.empty?
|
755
|
+
target_host = target_hosts.sort[0][1]
|
756
|
+
[target_host, target_host.name]
|
757
|
+
end
|
758
|
+
|
759
|
+
def find_snapshot(vm, snapshotname)
|
760
|
+
get_snapshot_list(vm.snapshot.rootSnapshotList, snapshotname) if vm.snapshot
|
761
|
+
end
|
762
|
+
|
763
|
+
def build_propSpecs(datacenter, folder, vmname)
|
764
|
+
propSpecs = {
|
765
|
+
entity => self,
|
766
|
+
:inventoryPath => "#{datacenter}/vm/#{folder}/#{vmname}"
|
767
|
+
}
|
768
|
+
propSpecs
|
769
|
+
end
|
770
|
+
|
771
|
+
def find_vm(pool_name, vmname, connection)
|
772
|
+
# Find a VM by its inventory path and return the VM object
|
773
|
+
# Returns nil when a VM, or pool configuration, cannot be found
|
774
|
+
pool_configuration = pool_config(pool_name)
|
775
|
+
return nil if pool_configuration.nil?
|
776
|
+
folder = pool_configuration['folder']
|
777
|
+
datacenter = get_target_datacenter_from_config(pool_name)
|
778
|
+
return nil if datacenter.nil?
|
779
|
+
|
780
|
+
propSpecs = {
|
781
|
+
:entity => self,
|
782
|
+
:inventoryPath => "#{datacenter}/vm/#{folder}/#{vmname}"
|
783
|
+
}
|
784
|
+
|
785
|
+
connection.searchIndex.FindByInventoryPath(propSpecs)
|
786
|
+
end
|
787
|
+
|
788
|
+
def get_base_vm_container_from(connection)
|
789
|
+
view_manager = connection.serviceContent.viewManager
|
790
|
+
view_manager.CreateContainerView(
|
791
|
+
container: connection.serviceContent.rootFolder,
|
792
|
+
recursive: true,
|
793
|
+
type: ['VirtualMachine']
|
794
|
+
)
|
795
|
+
end
|
796
|
+
|
797
|
+
def get_snapshot_list(tree, snapshotname)
|
798
|
+
snapshot = nil
|
799
|
+
|
800
|
+
tree.each do |child|
|
801
|
+
if child.name == snapshotname
|
802
|
+
snapshot ||= child.snapshot
|
803
|
+
else
|
804
|
+
snapshot ||= get_snapshot_list(child.childSnapshotList, snapshotname)
|
805
|
+
end
|
806
|
+
end
|
807
|
+
|
808
|
+
snapshot
|
809
|
+
end
|
810
|
+
|
811
|
+
def get_vm_details(pool_name, vm_name, connection)
|
812
|
+
vm_object = find_vm(pool_name, vm_name, connection)
|
813
|
+
return nil if vm_object.nil?
|
814
|
+
parent_host_object = vm_object.summary.runtime.host if vm_object.summary && vm_object.summary.runtime && vm_object.summary.runtime.host
|
815
|
+
raise('Unable to determine which host the VM is running on') if parent_host_object.nil?
|
816
|
+
parent_host = parent_host_object.name
|
817
|
+
architecture = get_host_cpu_arch_version(parent_host_object)
|
818
|
+
{
|
819
|
+
'host_name' => parent_host,
|
820
|
+
'object' => vm_object,
|
821
|
+
'architecture' => architecture
|
822
|
+
}
|
823
|
+
end
|
824
|
+
|
825
|
+
def migration_enabled?(config)
|
826
|
+
migration_limit = config[:config]['migration_limit']
|
827
|
+
return false unless migration_limit.is_a? Integer
|
828
|
+
return true if migration_limit > 0
|
829
|
+
false
|
830
|
+
end
|
831
|
+
|
832
|
+
def migrate_vm(pool_name, vm_name)
|
833
|
+
@connection_pool.with_metrics do |pool_object|
|
834
|
+
begin
|
835
|
+
connection = ensured_vsphere_connection(pool_object)
|
836
|
+
vm_hash = get_vm_details(pool_name, vm_name, connection)
|
837
|
+
migration_limit = @config[:config]['migration_limit'] if @config[:config].key?('migration_limit')
|
838
|
+
migration_count = $redis.scard('vmpooler__migration')
|
839
|
+
if migration_enabled? @config
|
840
|
+
if migration_count >= migration_limit
|
841
|
+
logger.log('s', "[ ] [#{pool_name}] '#{vm_name}' is running on #{vm_hash['host_name']}. No migration will be evaluated since the migration_limit has been reached")
|
842
|
+
return
|
843
|
+
end
|
844
|
+
run_select_hosts(pool_name, @provider_hosts)
|
845
|
+
if vm_in_target?(pool_name, vm_hash['host_name'], vm_hash['architecture'], @provider_hosts)
|
846
|
+
logger.log('s', "[ ] [#{pool_name}] No migration required for '#{vm_name}' running on #{vm_hash['host_name']}")
|
847
|
+
else
|
848
|
+
migrate_vm_to_new_host(pool_name, vm_name, vm_hash, connection)
|
849
|
+
end
|
850
|
+
else
|
851
|
+
logger.log('s', "[ ] [#{pool_name}] '#{vm_name}' is running on #{vm_hash['host_name']}")
|
852
|
+
end
|
853
|
+
rescue => _err
|
854
|
+
logger.log('s', "[!] [#{pool_name}] '#{vm_name}' is running on #{vm_hash['host_name']}")
|
855
|
+
raise _err
|
856
|
+
end
|
857
|
+
end
|
858
|
+
end
|
859
|
+
|
860
|
+
def migrate_vm_to_new_host(pool_name, vm_name, vm_hash, connection)
|
861
|
+
$redis.sadd('vmpooler__migration', vm_name)
|
862
|
+
target_host_name = select_next_host(pool_name, @provider_hosts, vm_hash['architecture'])
|
863
|
+
target_host_object = find_host_by_dnsname(connection, target_host_name)
|
864
|
+
finish = migrate_vm_and_record_timing(pool_name, vm_name, vm_hash, target_host_object, target_host_name)
|
865
|
+
#logger.log('s', "Provider_hosts is: #{provider.provider_hosts}")
|
866
|
+
logger.log('s', "[>] [#{pool_name}] '#{vm_name}' migrated from #{vm_hash['host_name']} to #{target_host_name} in #{finish} seconds")
|
867
|
+
ensure
|
868
|
+
$redis.srem('vmpooler__migration', vm_name)
|
869
|
+
end
|
870
|
+
|
871
|
+
def migrate_vm_and_record_timing(pool_name, vm_name, vm_hash, target_host_object, dest_host_name)
|
872
|
+
start = Time.now
|
873
|
+
migrate_vm_host(vm_hash['object'], target_host_object)
|
874
|
+
finish = format('%.2f', Time.now - start)
|
875
|
+
metrics.timing("migrate.#{pool_name}", finish)
|
876
|
+
metrics.increment("migrate_from.#{vm_hash['host_name']}")
|
877
|
+
metrics.increment("migrate_to.#{dest_host_name}")
|
878
|
+
checkout_to_migration = format('%.2f', Time.now - Time.parse($redis.hget("vmpooler__vm__#{vm_name}", 'checkout')))
|
879
|
+
$redis.hset("vmpooler__vm__#{vm_name}", 'migration_time', finish)
|
880
|
+
$redis.hset("vmpooler__vm__#{vm_name}", 'checkout_to_migration', checkout_to_migration)
|
881
|
+
finish
|
882
|
+
end
|
883
|
+
|
884
|
+
def migrate_vm_host(vm_object, host)
|
885
|
+
relospec = RbVmomi::VIM.VirtualMachineRelocateSpec(host: host)
|
886
|
+
vm_object.RelocateVM_Task(spec: relospec).wait_for_completion
|
887
|
+
end
|
888
|
+
|
889
|
+
def create_folder(connection, new_folder, datacenter)
|
890
|
+
dc = connection.serviceInstance.find_datacenter(datacenter)
|
891
|
+
folder_object = dc.vmFolder.traverse(new_folder, type=RbVmomi::VIM::Folder, create=true)
|
892
|
+
raise("Cannot create folder #{new_folder}") if folder_object.nil?
|
893
|
+
folder_object
|
894
|
+
end
|
895
|
+
|
896
|
+
def find_template_vm(pool, connection)
|
897
|
+
datacenter = get_target_datacenter_from_config(pool['name'])
|
898
|
+
raise('cannot find datacenter') if datacenter.nil?
|
899
|
+
|
900
|
+
propSpecs = {
|
901
|
+
:entity => self,
|
902
|
+
:inventoryPath => "#{datacenter}/vm/#{pool['template']}"
|
903
|
+
}
|
904
|
+
|
905
|
+
template_vm_object = connection.searchIndex.FindByInventoryPath(propSpecs)
|
906
|
+
raise("Pool #{pool['name']} specifies a template VM of #{pool['template']} which does not exist for the provider #{name}") if template_vm_object.nil?
|
907
|
+
|
908
|
+
template_vm_object
|
909
|
+
end
|
910
|
+
|
911
|
+
def create_template_delta_disks(pool)
|
912
|
+
@connection_pool.with_metrics do |pool_object|
|
913
|
+
connection = ensured_vsphere_connection(pool_object)
|
914
|
+
template_vm_object = find_template_vm(pool, connection)
|
915
|
+
|
916
|
+
template_vm_object.add_delta_disk_layer_on_all_disks
|
917
|
+
end
|
918
|
+
end
|
919
|
+
|
920
|
+
def valid_template_path?(template)
|
921
|
+
return false unless template.include?('/')
|
922
|
+
return false if template[0] == '/'
|
923
|
+
return false if template[-1] == '/'
|
924
|
+
return true
|
925
|
+
end
|
926
|
+
end
|
927
|
+
end
|
928
|
+
end
|
929
|
+
end
|