vagrant-rbvmomi 1.8.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.yardopts +6 -0
- data/LICENSE +19 -0
- data/README.rdoc +78 -0
- data/Rakefile +31 -0
- data/VERSION +1 -0
- data/bin/rbvmomish +138 -0
- data/devel/analyze-vim-declarations.rb +213 -0
- data/devel/analyze-xml.rb +46 -0
- data/devel/benchmark.rb +117 -0
- data/devel/collisions.rb +18 -0
- data/devel/merge-internal-vmodl.rb +59 -0
- data/devel/merge-manual-vmodl.rb +32 -0
- data/examples/annotate.rb +54 -0
- data/examples/cached_ovf_deploy.rb +120 -0
- data/examples/clone_vm.rb +84 -0
- data/examples/create_vm-1.9.rb +93 -0
- data/examples/create_vm.rb +93 -0
- data/examples/extraConfig.rb +54 -0
- data/examples/lease_tool.rb +102 -0
- data/examples/logbundle.rb +63 -0
- data/examples/logtail.rb +60 -0
- data/examples/nfs_datastore.rb +95 -0
- data/examples/power.rb +59 -0
- data/examples/readme-1.rb +35 -0
- data/examples/readme-2.rb +51 -0
- data/examples/run.sh +41 -0
- data/examples/screenshot.rb +48 -0
- data/examples/vdf.rb +81 -0
- data/examples/vm_drs_behavior.rb +76 -0
- data/lib/rbvmomi.rb +12 -0
- data/lib/rbvmomi/basic_types.rb +375 -0
- data/lib/rbvmomi/connection.rb +270 -0
- data/lib/rbvmomi/deserialization.rb +248 -0
- data/lib/rbvmomi/fault.rb +17 -0
- data/lib/rbvmomi/pbm.rb +66 -0
- data/lib/rbvmomi/sms.rb +61 -0
- data/lib/rbvmomi/sms/SmsStorageManager.rb +7 -0
- data/lib/rbvmomi/trivial_soap.rb +114 -0
- data/lib/rbvmomi/trollop.rb +70 -0
- data/lib/rbvmomi/type_loader.rb +136 -0
- data/lib/rbvmomi/utils/admission_control.rb +398 -0
- data/lib/rbvmomi/utils/deploy.rb +336 -0
- data/lib/rbvmomi/utils/leases.rb +142 -0
- data/lib/rbvmomi/utils/perfdump.rb +628 -0
- data/lib/rbvmomi/vim.rb +128 -0
- data/lib/rbvmomi/vim/ComputeResource.rb +51 -0
- data/lib/rbvmomi/vim/Datacenter.rb +17 -0
- data/lib/rbvmomi/vim/Datastore.rb +68 -0
- data/lib/rbvmomi/vim/DynamicTypeMgrAllTypeInfo.rb +75 -0
- data/lib/rbvmomi/vim/DynamicTypeMgrDataTypeInfo.rb +20 -0
- data/lib/rbvmomi/vim/DynamicTypeMgrManagedTypeInfo.rb +46 -0
- data/lib/rbvmomi/vim/Folder.rb +207 -0
- data/lib/rbvmomi/vim/HostSystem.rb +174 -0
- data/lib/rbvmomi/vim/ManagedEntity.rb +57 -0
- data/lib/rbvmomi/vim/ManagedObject.rb +60 -0
- data/lib/rbvmomi/vim/ObjectContent.rb +23 -0
- data/lib/rbvmomi/vim/ObjectUpdate.rb +23 -0
- data/lib/rbvmomi/vim/OvfManager.rb +200 -0
- data/lib/rbvmomi/vim/PerfCounterInfo.rb +26 -0
- data/lib/rbvmomi/vim/PerformanceManager.rb +110 -0
- data/lib/rbvmomi/vim/PropertyCollector.rb +25 -0
- data/lib/rbvmomi/vim/ReflectManagedMethodExecuter.rb +30 -0
- data/lib/rbvmomi/vim/ResourcePool.rb +55 -0
- data/lib/rbvmomi/vim/ServiceInstance.rb +55 -0
- data/lib/rbvmomi/vim/Task.rb +65 -0
- data/lib/rbvmomi/vim/VirtualMachine.rb +74 -0
- data/test/test_deserialization.rb +383 -0
- data/test/test_emit_request.rb +128 -0
- data/test/test_exceptions.rb +14 -0
- data/test/test_helper.rb +14 -0
- data/test/test_misc.rb +24 -0
- data/test/test_parse_response.rb +69 -0
- data/test/test_serialization.rb +311 -0
- data/vmodl.db +0 -0
- metadata +163 -0
@@ -0,0 +1,398 @@
|
|
1
|
+
|
2
|
+
# An admission controlled resource scheduler for large scale vSphere deployments
|
3
|
+
#
|
4
|
+
# While DRS (Dynamic Resource Scheduler) in vSphere handles CPU and Memory
|
5
|
+
# allocations within a single vSphere cluster, larger deployments require
|
6
|
+
# another layer of scheduling to make the use of multiple clusters transparent.
|
7
|
+
# So this class doesn't replace DRS, but in fact works on top of it.
|
8
|
+
#
|
9
|
+
# The scheduler in this class performs admission control to make sure clusters
|
10
|
+
# don't get overloaded. It does so by adding additional metrics to the already
|
11
|
+
# existing CPU and Memory reservation system that DRS has. After admission
|
12
|
+
# control it also performs very basic initial placement. Note that in-cluster
|
13
|
+
# placement and load-balancing is left to DRS. Also note that no cross-cluster
|
14
|
+
# load balancing is done.
|
15
|
+
#
|
16
|
+
# This class uses the concept of a Pod: A set of clusters that share a set of
|
17
|
+
# datastores. From a datastore perspective, we are free to place a VM on any
|
18
|
+
# host or cluster. So admission control is done at the Pod level first. Pods
|
19
|
+
# are automatically dicovered based on lists of clusters and datastores.
|
20
|
+
#
|
21
|
+
# Admission control covers the following metrics:
|
22
|
+
# - Host availability: If no hosts are available within a cluster or pod,
|
23
|
+
# admission is denied.
|
24
|
+
# - Minimum free space: If a datastore falls below this free space percentage,
|
25
|
+
# admission to it will be denied. Admission to a pod is granted as long at
|
26
|
+
# least one datastore passes admission control.
|
27
|
+
# - Maximum number of VMs: If a Pod exceeds a configured number of powered on
|
28
|
+
# VMs, admission is denied. This is a crude but effective catch-all metric
|
29
|
+
# in case users didn't set proper individual CPU or Memory reservations or
|
30
|
+
# if the scalability limit doesn't originate from CPU or Memory.
|
31
|
+
#
|
32
|
+
# Placement after admission control:
|
33
|
+
# - Cluster selection: A load metric based on a combination of CPU and Memory
|
34
|
+
# load is used to always select the "least loaded" cluster. The metric is very
|
35
|
+
# crude and only meant to do very rough load balancing. If DRS clusters are
|
36
|
+
# large enough, this is good enough in most cases though.
|
37
|
+
# - Datastore selection: Right now NO intelligence is implemented here.
|
38
|
+
#
|
39
|
+
# Usage:
|
40
|
+
# Instantiate the class, call make_placement_decision and then use the exposed
|
41
|
+
# computer (cluster), resource pool, vm_folder and datastore. Currently once
|
42
|
+
# computed, a new updated placement can't be generated.
|
43
|
+
class AdmissionControlledResourceScheduler
|
44
|
+
attr_reader :rp
|
45
|
+
|
46
|
+
def initialize vim, opts = {}
|
47
|
+
@vim = vim
|
48
|
+
|
49
|
+
@datacenter = opts[:datacenter]
|
50
|
+
@datacenter_path = opts[:datacenter_path]
|
51
|
+
@vm_folder = opts[:vm_folder]
|
52
|
+
@vm_folder_path = opts[:vm_folder_path]
|
53
|
+
@rp_path = opts[:rp_path]
|
54
|
+
@computers = opts[:computers]
|
55
|
+
@computer_names = opts[:computer_names]
|
56
|
+
@datastores = opts[:datastores]
|
57
|
+
@datastore_paths = opts[:datastore_paths]
|
58
|
+
|
59
|
+
@max_vms_per_pod = opts[:max_vms_per_pod]
|
60
|
+
@min_ds_free = opts[:min_ds_free]
|
61
|
+
@service_docs_url = opts[:service_docs_url]
|
62
|
+
|
63
|
+
@pc = @vim.serviceContent.propertyCollector
|
64
|
+
@root_folder = @vim.serviceContent.rootFolder
|
65
|
+
|
66
|
+
@logger = opts[:logger]
|
67
|
+
end
|
68
|
+
|
69
|
+
def log x
|
70
|
+
if @logger
|
71
|
+
@logger.info x
|
72
|
+
else
|
73
|
+
puts "#{Time.now}: #{x}"
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
# Returns the used VM folder. If not set yet, uses the vm_folder_path to
|
78
|
+
# lookup the folder. If it doesn't exist, it is created. Collisions between
|
79
|
+
# multiple clients concurrently creating the same folder are handled.
|
80
|
+
# @return [VIM::Folder] The VM folder
|
81
|
+
def vm_folder
|
82
|
+
retries = 1
|
83
|
+
begin
|
84
|
+
@vm_folder ||= datacenter.vmFolder.traverse!(@vm_folder_path, VIM::Folder)
|
85
|
+
if !@vm_folder
|
86
|
+
fail "VM folder #{@vm_folder_path} not found"
|
87
|
+
end
|
88
|
+
rescue RbVmomi::Fault => fault
|
89
|
+
if !fault.fault.is_a?(RbVmomi::VIM::DuplicateName)
|
90
|
+
raise
|
91
|
+
else
|
92
|
+
retries -= 1
|
93
|
+
retry if retries >= 0
|
94
|
+
end
|
95
|
+
end
|
96
|
+
@vm_folder
|
97
|
+
end
|
98
|
+
|
99
|
+
# Returns the used Datacenter. If not set yet, uses the datacenter_path to
|
100
|
+
# lookup the datacenter.
|
101
|
+
# @return [VIM::Datacenter] The datacenter
|
102
|
+
def datacenter
|
103
|
+
if !@datacenter
|
104
|
+
@datacenter = @root_folder.traverse(@datacenter_path, VIM::Datacenter)
|
105
|
+
if !@datacenter
|
106
|
+
fail "datacenter #{@datacenter_path} not found"
|
107
|
+
end
|
108
|
+
end
|
109
|
+
@datacenter
|
110
|
+
end
|
111
|
+
|
112
|
+
# Returns the candidate datastores. If not set yet, uses the datastore_paths
|
113
|
+
# to lookup the datastores under the datacenter.
|
114
|
+
# As a side effect, also looks up properties about all the datastores
|
115
|
+
# @return [Array] List of VIM::Datastore
|
116
|
+
def datastores
|
117
|
+
if !@datastores
|
118
|
+
@datastores = @datastore_paths.map do |path|
|
119
|
+
ds = datacenter.datastoreFolder.traverse(path, VIM::Datastore)
|
120
|
+
if !ds
|
121
|
+
fail "datastore #{path} not found"
|
122
|
+
end
|
123
|
+
ds
|
124
|
+
end
|
125
|
+
end
|
126
|
+
if !@datastore_props
|
127
|
+
@datastore_props = @pc.collectMultiple(@datastores, 'summary', 'name')
|
128
|
+
end
|
129
|
+
@datastores
|
130
|
+
end
|
131
|
+
|
132
|
+
# Returns the candidate computers (aka clusters). If not set yet, uses the
|
133
|
+
# computer_names to look them up.
|
134
|
+
# @return [Array] List of [VIM::ClusterComputeResource, Hash] tuples, where
|
135
|
+
# the Hash is a list of stats about the computer
|
136
|
+
def computers
|
137
|
+
if !@computers
|
138
|
+
@computers = @computer_names.map do |name|
|
139
|
+
computer = datacenter.find_compute_resource(name)
|
140
|
+
[computer, computer.stats]
|
141
|
+
end
|
142
|
+
end
|
143
|
+
@computers
|
144
|
+
end
|
145
|
+
|
146
|
+
# Returns the candidate pods. If not set, automatically computes the pods
|
147
|
+
# based on the list of computers (aka clusters) and datastores.
|
148
|
+
# @return [Array] List of pods, where a pod is a list of VIM::ClusterComputeResource
|
149
|
+
def pods
|
150
|
+
if !@pods
|
151
|
+
# A pod is defined as a set of clusters (aka computers) that share the same
|
152
|
+
# datastore accessibility. Computing pods is done automatically using simple
|
153
|
+
# set theory math.
|
154
|
+
computersProps = @pc.collectMultiple(computers.map{|x| x[0]}, 'datastore')
|
155
|
+
@pods = computers.map do |computer, stats|
|
156
|
+
computersProps[computer]['datastore'] & self.datastores
|
157
|
+
end.uniq.map do |ds_list|
|
158
|
+
computers.map{|x| x[0]}.select do |computer|
|
159
|
+
(computer.datastore & self.datastores) == ds_list
|
160
|
+
end
|
161
|
+
end
|
162
|
+
end
|
163
|
+
@pods
|
164
|
+
end
|
165
|
+
|
166
|
+
# Returns all VMs residing with a pod. Doesn't account for templates. Does so
|
167
|
+
# very efficiently using a single API query.
|
168
|
+
# @return [Hash] Hash of VMs as keys and their properties as values.
|
169
|
+
def pod_vms pod
|
170
|
+
# This function retrieves all VMs residing inside a pod
|
171
|
+
filterSpec = VIM.PropertyFilterSpec(
|
172
|
+
objectSet: pod.map do |computer, stats|
|
173
|
+
{
|
174
|
+
obj: computer.resourcePool,
|
175
|
+
selectSet: [
|
176
|
+
VIM.TraversalSpec(
|
177
|
+
name: 'tsFolder',
|
178
|
+
type: 'ResourcePool',
|
179
|
+
path: 'resourcePool',
|
180
|
+
skip: false,
|
181
|
+
selectSet: [
|
182
|
+
VIM.SelectionSpec(name: 'tsFolder'),
|
183
|
+
VIM.SelectionSpec(name: 'tsVM'),
|
184
|
+
]
|
185
|
+
),
|
186
|
+
VIM.TraversalSpec(
|
187
|
+
name: 'tsVM',
|
188
|
+
type: 'ResourcePool',
|
189
|
+
path: 'vm',
|
190
|
+
skip: false,
|
191
|
+
selectSet: [],
|
192
|
+
)
|
193
|
+
]
|
194
|
+
}
|
195
|
+
end,
|
196
|
+
propSet: [
|
197
|
+
{ type: 'ResourcePool', pathSet: ['name'] },
|
198
|
+
{ type: 'VirtualMachine', pathSet: %w(runtime.powerState) }
|
199
|
+
]
|
200
|
+
)
|
201
|
+
|
202
|
+
result = @vim.propertyCollector.RetrieveProperties(specSet: [filterSpec])
|
203
|
+
|
204
|
+
out = result.map { |x| [x.obj, Hash[x.propSet.map { |y| [y.name, y.val] }]] }
|
205
|
+
out.select{|obj, props| obj.is_a?(VIM::VirtualMachine)}
|
206
|
+
end
|
207
|
+
|
208
|
+
# Returns all candidate datastores for a given pod.
|
209
|
+
# @return [Array] List of VIM::Datastore
|
210
|
+
def pod_datastores pod
|
211
|
+
pod.first.datastore & self.datastores
|
212
|
+
end
|
213
|
+
|
214
|
+
# Returns the list of pods that pass admission control. If not set yet, performs
|
215
|
+
# admission control to compute the list. If no pods passed the admission
|
216
|
+
# control, an exception is thrown.
|
217
|
+
# @return [Array] List of pods, where a pod is a list of VIM::ClusterComputeResource
|
218
|
+
def filtered_pods
|
219
|
+
# This function applies admission control and returns those pods that have
|
220
|
+
# passed admission control. An exception is thrown if access was denied to
|
221
|
+
# all pods.
|
222
|
+
if !@filtered_pods
|
223
|
+
log "Performing admission control:"
|
224
|
+
@filtered_pods = self.pods.select do |pod|
|
225
|
+
# Gather some statistics about the pod ...
|
226
|
+
on_vms = pod_vms(pod).select{|k,v| v['runtime.powerState'] == 'poweredOn'}
|
227
|
+
num_pod_vms = on_vms.length
|
228
|
+
pod_datastores = self.pod_datastores(pod)
|
229
|
+
log "Pod: #{pod.map{|x| x.name}.join(', ')}"
|
230
|
+
log " #{num_pod_vms} VMs"
|
231
|
+
pod_datastores.each do |ds|
|
232
|
+
ds_sum = @datastore_props[ds]['summary']
|
233
|
+
@datastore_props[ds]['free_percent'] = ds_sum.freeSpace.to_f * 100 / ds_sum.capacity
|
234
|
+
end
|
235
|
+
pod_datastores.each do |ds|
|
236
|
+
ds_props = @datastore_props[ds]
|
237
|
+
ds_name = ds_props['name']
|
238
|
+
free = ds_props['free_percent']
|
239
|
+
free_gb = ds_props['summary'].freeSpace.to_f / 1024**3
|
240
|
+
free_str = "%.2f GB (%.2f%%)" % [free_gb, free]
|
241
|
+
log " Datastore #{ds_name}: #{free_str} free"
|
242
|
+
end
|
243
|
+
|
244
|
+
# Admission check: VM limit
|
245
|
+
denied = false
|
246
|
+
max_vms = @max_vms_per_pod
|
247
|
+
if max_vms && max_vms > 0
|
248
|
+
if num_pod_vms > max_vms
|
249
|
+
err = "VM limit (#{max_vms}) exceeded on this Pod"
|
250
|
+
denied = true
|
251
|
+
end
|
252
|
+
end
|
253
|
+
|
254
|
+
# Admission check: Free space on datastores
|
255
|
+
min_ds_free = @min_ds_free
|
256
|
+
if min_ds_free && min_ds_free > 0
|
257
|
+
# We need at least one datastore with enough free space
|
258
|
+
low_list = pod_datastores.select do |ds|
|
259
|
+
@datastore_props[ds]['free_percent'] <= min_ds_free
|
260
|
+
end
|
261
|
+
|
262
|
+
if low_list.length == pod_datastores.length
|
263
|
+
dsNames = low_list.map{|ds| @datastore_props[ds]['name']}.join(", ")
|
264
|
+
err = "Datastores #{dsNames} below minimum free disk space (#{min_ds_free}%)"
|
265
|
+
denied = true
|
266
|
+
end
|
267
|
+
end
|
268
|
+
|
269
|
+
# Admission check: Hosts are available
|
270
|
+
if !denied
|
271
|
+
hosts_available = pod.any? do |computer|
|
272
|
+
stats = Hash[self.computers][computer]
|
273
|
+
stats[:totalCPU] > 0 && stats[:totalMem] > 0
|
274
|
+
end
|
275
|
+
if !hosts_available
|
276
|
+
err = "No hosts are current available in this pod"
|
277
|
+
denied = true
|
278
|
+
end
|
279
|
+
end
|
280
|
+
|
281
|
+
if denied
|
282
|
+
log " Admission DENIED: #{err}"
|
283
|
+
else
|
284
|
+
log " Admission granted"
|
285
|
+
end
|
286
|
+
|
287
|
+
!denied
|
288
|
+
end
|
289
|
+
end
|
290
|
+
if @filtered_pods.length == 0
|
291
|
+
log "Couldn't find any Pod with enough resources."
|
292
|
+
if @service_docs_url
|
293
|
+
log "Check #{@service_docs_url} to see which other Pods you may be able to use"
|
294
|
+
end
|
295
|
+
fail "Admission denied"
|
296
|
+
end
|
297
|
+
@filtered_pods
|
298
|
+
end
|
299
|
+
|
300
|
+
# Returns the computer (aka cluster) to be used for placement. If not set yet,
|
301
|
+
# computs the least loaded cluster (using a metric that combines CPU and Memory
|
302
|
+
# load) that passes admission control.
|
303
|
+
# @return [VIM::ClusterComputeResource] Chosen computer (aka cluster)
|
304
|
+
def pick_computer placementhint = nil
|
305
|
+
if !@computer
|
306
|
+
# Out of the pods to which we have been granted access, pick the cluster
|
307
|
+
# (aka computer) with the lowest CPU/Mem utilization for load balancing
|
308
|
+
available = self.filtered_pods.flatten
|
309
|
+
eligible = self.computers.select do |computer,stats|
|
310
|
+
available.member?(computer) && stats[:totalCPU] > 0 and stats[:totalMem] > 0
|
311
|
+
end
|
312
|
+
computer = nil
|
313
|
+
if placementhint
|
314
|
+
if eligible.length > 0
|
315
|
+
computer = eligible.map{|x| x[0]}[placementhint % eligible.length]
|
316
|
+
end
|
317
|
+
else
|
318
|
+
computer, = eligible.min_by do |computer,stats|
|
319
|
+
2**(stats[:usedCPU].to_f/stats[:totalCPU]) + (stats[:usedMem].to_f/stats[:totalMem])
|
320
|
+
end
|
321
|
+
end
|
322
|
+
|
323
|
+
if !computer
|
324
|
+
fail "No clusters available, should have been prevented by admission control"
|
325
|
+
end
|
326
|
+
@computer = computer
|
327
|
+
end
|
328
|
+
@computer
|
329
|
+
end
|
330
|
+
|
331
|
+
# Returns the datastore to be used for placement. If not set yet, picks a
|
332
|
+
# datastore without much intelligence, as long as it passes admission control.
|
333
|
+
# @return [VIM::Datastore] Chosen datastore
|
334
|
+
def datastore placementHint = nil
|
335
|
+
if @datastore
|
336
|
+
return @datastore
|
337
|
+
end
|
338
|
+
|
339
|
+
pod_datastores = pick_computer.datastore & datastores
|
340
|
+
|
341
|
+
eligible = pod_datastores.select do |ds|
|
342
|
+
min_ds_free = @min_ds_free
|
343
|
+
if min_ds_free && min_ds_free > 0
|
344
|
+
ds_sum = @datastore_props[ds]['summary']
|
345
|
+
free_percent = ds_sum.freeSpace.to_f * 100 / ds_sum.capacity
|
346
|
+
free_percent > min_ds_free
|
347
|
+
else
|
348
|
+
true
|
349
|
+
end
|
350
|
+
end
|
351
|
+
|
352
|
+
if eligible.length == 0
|
353
|
+
fail "Couldn't find any eligible datastore. Admission control should have prevented this"
|
354
|
+
end
|
355
|
+
|
356
|
+
if placementHint && placementHint > 0
|
357
|
+
@datastore = eligible[placementHint % eligible.length]
|
358
|
+
else
|
359
|
+
@datastore = eligible.first
|
360
|
+
end
|
361
|
+
@datastore
|
362
|
+
end
|
363
|
+
|
364
|
+
# Runs the placement algorithm and populates all the various properties as
|
365
|
+
# a side effect. Run this first, before using the other functions of this
|
366
|
+
# class.
|
367
|
+
def make_placement_decision opts = {}
|
368
|
+
self.filtered_pods
|
369
|
+
self.pick_computer opts[:placementHint]
|
370
|
+
log "Selected compute resource: #{@computer.name}"
|
371
|
+
|
372
|
+
@rp = @computer.resourcePool.traverse(@rp_path)
|
373
|
+
if !@rp
|
374
|
+
fail "Resource pool #{@rp_path} not found"
|
375
|
+
end
|
376
|
+
log "Resource pool: #{@rp.pretty_path}"
|
377
|
+
|
378
|
+
stats = @computer.stats
|
379
|
+
if stats[:totalMem] > 0 && stats[:totalCPU] > 0
|
380
|
+
cpu_load = "#{(100*stats[:usedCPU])/stats[:totalCPU]}% cpu"
|
381
|
+
mem_load = "#{(100*stats[:usedMem])/stats[:totalMem]}% mem"
|
382
|
+
log "Cluster utilization: #{cpu_load}, #{mem_load}"
|
383
|
+
end
|
384
|
+
|
385
|
+
user_vms = vm_folder.inventory_flat('VirtualMachine' => %w(name storage)).select do |k, v|
|
386
|
+
k.is_a?(RbVmomi::VIM::VirtualMachine)
|
387
|
+
end
|
388
|
+
numVms = user_vms.length
|
389
|
+
unshared = user_vms.map do |vm, info|
|
390
|
+
info['storage'].perDatastoreUsage.map{|x| x.unshared}.inject(0, &:+)
|
391
|
+
end.inject(0, &:+)
|
392
|
+
log "User stats: #{numVms} VMs using %.2fGB of storage" % [unshared.to_f / 1024**3]
|
393
|
+
|
394
|
+
@placement_hint = opts[:placement_hint] || (rand(100) + 1)
|
395
|
+
datastore = self.datastore @placement_hint
|
396
|
+
log "Datastore: #{datastore.name}"
|
397
|
+
end
|
398
|
+
end
|
@@ -0,0 +1,336 @@
|
|
1
|
+
require 'open-uri'
|
2
|
+
require 'nokogiri'
|
3
|
+
require 'rbvmomi'
|
4
|
+
|
5
|
+
# The cached ovf deployer is an optimization on top of regular OVF deployment
|
6
|
+
# as it is offered by the VIM::OVFManager. Creating a VM becomes a multi-stage
|
7
|
+
# process: First the OVF is uploaded and instead of directly using it, it is
|
8
|
+
# prepared for linked cloning and marked as a template. It can then be cloned
|
9
|
+
# many times over, without the cost of repeated OVF deploys (network and storage
|
10
|
+
# IO) and the cost of storing the same base VM several times (storage space).
|
11
|
+
# Multiple concurrent users can try to follow this process and collisions are
|
12
|
+
# automatically detected and de-duplicated. One thread will win to create the
|
13
|
+
# OVF template, while the other will wait for the winning thread to finish the
|
14
|
+
# task. So even fully independent, distributed and unsynchronized clients using
|
15
|
+
# this call with be auto-synchronized just by talking to the same vCenter
|
16
|
+
# instance and using the name naming scheme for the templates.
|
17
|
+
#
|
18
|
+
# The caching concept above can be extended to multiple levels. Lets assume
|
19
|
+
# many VMs will share the same base OS, but are running different builds of the
|
20
|
+
# application running inside the VM. If it is expected that again many (but not
|
21
|
+
# all) VMs will share the same build of the application, a tree structure of
|
22
|
+
# templates becomes useful. At the root of the tree is the template with just
|
23
|
+
# the base OS. It is uploaded from an OVF if needed. Then, this base OS image
|
24
|
+
# is cloned, a particular build is installed and the resulting VM is again marked
|
25
|
+
# as a template. Users can then instantiate that particular build with very
|
26
|
+
# little extra overhead. This class supports such multi level templates via the
|
27
|
+
# :is_template parameter of linked_clone().
|
28
|
+
class CachedOvfDeployer
|
29
|
+
# Constructor. Gets the VIM connection and important VIM objects
|
30
|
+
# @param vim [VIM] VIM Connection
|
31
|
+
# @param network [VIM::Network] Network to attach templates and VMs to
|
32
|
+
# @param computer [VIM::ComputeResource] Host/Cluster to deploy templates/VMs to
|
33
|
+
# @param template_folder [VIM::Folder] Folder in which all templates are kept
|
34
|
+
# @param vm_folder [VIM::Folder] Folder into which to deploy VMs
|
35
|
+
# @param datastore [VIM::Folder] Datastore to store template/VM in
|
36
|
+
# @param opts [Hash] Additional parameters
|
37
|
+
def initialize vim, network, computer, template_folder, vm_folder, datastore, opts = {}
|
38
|
+
@vim = vim
|
39
|
+
@network = network
|
40
|
+
@computer = computer
|
41
|
+
@rp = @computer.resourcePool
|
42
|
+
@template_folder = template_folder
|
43
|
+
@vmfolder = vm_folder
|
44
|
+
@datastore = datastore
|
45
|
+
@logger = opts[:logger]
|
46
|
+
end
|
47
|
+
|
48
|
+
def log x
|
49
|
+
if @logger
|
50
|
+
@logger.info x
|
51
|
+
else
|
52
|
+
puts "#{Time.now}: #{x}"
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
# Internal helper method that executes the passed in block while disabling
|
57
|
+
# the handling of SIGINT and SIGTERM signals. Restores their handlers after
|
58
|
+
# the block is executed.
|
59
|
+
# @param enabled [Boolean] If false, this function is a no-op
|
60
|
+
def _run_without_interruptions enabled
|
61
|
+
if enabled
|
62
|
+
int_handler = Signal.trap("SIGINT", 'IGNORE')
|
63
|
+
term_handler = Signal.trap("SIGTERM", 'IGNORE')
|
64
|
+
end
|
65
|
+
|
66
|
+
yield
|
67
|
+
|
68
|
+
if enabled
|
69
|
+
Signal.trap("SIGINT", int_handler)
|
70
|
+
Signal.trap("SIGTERM", term_handler)
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
# Uploads an OVF, prepares the resulting VM for linked cloning and then marks
|
75
|
+
# it as a template. If another thread happens to race to do the same task,
|
76
|
+
# the losing thread will not do the actual work, but instead wait for the
|
77
|
+
# winning thread to do the work by looking up the template VM and waiting for
|
78
|
+
# it to be marked as a template. This way, the cost of uploading and keeping
|
79
|
+
# the full size of the VM is only paid once.
|
80
|
+
# @param ovf_url [String] URL to the OVF to be deployed. Currently only http
|
81
|
+
# and https are supported
|
82
|
+
# @param template_name [String] Name of the template to be used. Should be the
|
83
|
+
# same name for the same URL. A cluster specific
|
84
|
+
# post-fix will automatically be added.
|
85
|
+
# @option opts [int] :run_without_interruptions Whether or not to disable
|
86
|
+
# SIGINT and SIGTERM during
|
87
|
+
# the OVF upload.
|
88
|
+
# @option opts [Hash] :config VM Config delta to apply after the OVF deploy is
|
89
|
+
# done. Allows the template to be customized, e.g.
|
90
|
+
# to set annotations.
|
91
|
+
# @return [VIM::VirtualMachine] The template as a VIM::VirtualMachine instance
|
92
|
+
def upload_ovf_as_template ovf_url, template_name, opts = {}
|
93
|
+
# Optimization: If there happens to be a fully prepared template, then
|
94
|
+
# there is no need to do the complicated OVF upload dance.
|
95
|
+
# Also takes care of adding the right suffix if not called with opts[:simple_vm_name]
|
96
|
+
if opts[:simple_vm_name]
|
97
|
+
template = lookup_template template_name
|
98
|
+
else
|
99
|
+
template = lookup_template template_name + "-#{@computer.name}"
|
100
|
+
end
|
101
|
+
|
102
|
+
if template
|
103
|
+
return template
|
104
|
+
end
|
105
|
+
|
106
|
+
# The OVFManager expects us to know the names of the networks mentioned
|
107
|
+
# in the OVF file so we can map them to VIM::Network objects. For
|
108
|
+
# simplicity this function assumes we need to read the OVF file
|
109
|
+
# ourselves to know the names, and we map all of them to the same
|
110
|
+
# VIM::Network.
|
111
|
+
|
112
|
+
# If we're handling a file:// URI we need to strip the scheme as open-uri
|
113
|
+
# can't handle them.
|
114
|
+
if URI(ovf_url).scheme == "file" && URI(ovf_url).host.nil?
|
115
|
+
ovf_url = URI(ovf_url).path
|
116
|
+
end
|
117
|
+
|
118
|
+
ovf = open(ovf_url, 'r'){|io| Nokogiri::XML(io.read)}
|
119
|
+
ovf.remove_namespaces!
|
120
|
+
networks = ovf.xpath('//NetworkSection/Network').map{|x| x['name']}
|
121
|
+
network_mappings = Hash[networks.map{|x| [x, @network]}]
|
122
|
+
|
123
|
+
network_mappings_str = network_mappings.map{|k, v| "#{k} = #{v.name}"}
|
124
|
+
log "networks: #{network_mappings_str.join(', ')}"
|
125
|
+
|
126
|
+
pc = @vim.serviceContent.propertyCollector
|
127
|
+
|
128
|
+
# OVFs need to be uploaded to a specific host. DRS won't just pick one
|
129
|
+
# for us, so we need to pick one wisely. The host needs to be connected,
|
130
|
+
# not be in maintenance mode and must have the destination datastore
|
131
|
+
# accessible.
|
132
|
+
hosts = @computer.host
|
133
|
+
hosts_props = pc.collectMultiple(
|
134
|
+
hosts,
|
135
|
+
'datastore', 'runtime.connectionState',
|
136
|
+
'runtime.inMaintenanceMode', 'name'
|
137
|
+
)
|
138
|
+
host = hosts.shuffle.find do |x|
|
139
|
+
host_props = hosts_props[x]
|
140
|
+
is_connected = host_props['runtime.connectionState'] == 'connected'
|
141
|
+
is_ds_accessible = host_props['datastore'].member?(@datastore)
|
142
|
+
is_connected && is_ds_accessible && !host_props['runtime.inMaintenanceMode']
|
143
|
+
end
|
144
|
+
if !host
|
145
|
+
fail "No host in the cluster available to upload OVF to"
|
146
|
+
end
|
147
|
+
|
148
|
+
log "Uploading OVF to #{hosts_props[host]['name']}..."
|
149
|
+
property_mappings = {}
|
150
|
+
|
151
|
+
# To work around the VMFS 8-host limit (existed until ESX 5.0), as
|
152
|
+
# well as just for organization purposes, we create one template per
|
153
|
+
# cluster. This also provides us with additional isolation.
|
154
|
+
# This setting can be overriden by passing opts[:simple_vm_name].
|
155
|
+
if opts[:simple_vm_name]
|
156
|
+
vm_name = template_name
|
157
|
+
else
|
158
|
+
vm_name = template_name + "-#{@computer.name}"
|
159
|
+
end
|
160
|
+
|
161
|
+
vm = nil
|
162
|
+
wait_for_template = false
|
163
|
+
# If the user sets opts[:run_without_interruptions], we will block
|
164
|
+
# signals from the user (SIGINT, SIGTERM) in order to not be interrupted.
|
165
|
+
# This is desirable, as other threads depend on this thread finishing
|
166
|
+
# its prepare job and thus interrupting it has impacts beyond this
|
167
|
+
# single thread or process.
|
168
|
+
_run_without_interruptions(opts[:run_without_interruptions]) do
|
169
|
+
begin
|
170
|
+
vm = @vim.serviceContent.ovfManager.deployOVF(
|
171
|
+
uri: ovf_url,
|
172
|
+
vmName: vm_name,
|
173
|
+
vmFolder: @template_folder,
|
174
|
+
host: host,
|
175
|
+
resourcePool: @rp,
|
176
|
+
datastore: @datastore,
|
177
|
+
networkMappings: network_mappings,
|
178
|
+
propertyMappings: property_mappings)
|
179
|
+
rescue RbVmomi::Fault => fault
|
180
|
+
# If two threads execute this script at the same time to upload
|
181
|
+
# the same template under the same name, one will win and the other
|
182
|
+
# with be rejected by VC. We catch those cases here, and handle
|
183
|
+
# them by waiting for the winning thread to finish preparing the
|
184
|
+
# template, see below ...
|
185
|
+
is_duplicate = fault.fault.is_a?(RbVmomi::VIM::DuplicateName)
|
186
|
+
is_duplicate ||= (fault.fault.is_a?(RbVmomi::VIM::InvalidState) &&
|
187
|
+
!fault.fault.is_a?(RbVmomi::VIM::InvalidHostState))
|
188
|
+
if is_duplicate
|
189
|
+
wait_for_template = true
|
190
|
+
else
|
191
|
+
raise fault
|
192
|
+
end
|
193
|
+
end
|
194
|
+
|
195
|
+
# The winning thread succeeded in uploading the OVF. Now we need to
|
196
|
+
# prepare it for (linked) cloning and mark it as a template to signal
|
197
|
+
# we are done.
|
198
|
+
if !wait_for_template
|
199
|
+
config = opts[:config] || {}
|
200
|
+
config = vm.update_spec_add_delta_disk_layer_on_all_disks(config)
|
201
|
+
# XXX: Should we add a version that does retries?
|
202
|
+
vm.ReconfigVM_Task(:spec => config).wait_for_completion
|
203
|
+
vm.MarkAsTemplate
|
204
|
+
end
|
205
|
+
end
|
206
|
+
|
207
|
+
# The losing thread now needs to wait for the winning thread to finish
|
208
|
+
# uploading and preparing the template
|
209
|
+
if wait_for_template
|
210
|
+
log "Template already exists, waiting for it to be ready"
|
211
|
+
vm = _wait_for_template_ready @template_folder, vm_name
|
212
|
+
log "Template fully prepared and ready to be cloned"
|
213
|
+
end
|
214
|
+
|
215
|
+
vm
|
216
|
+
end
|
217
|
+
|
218
|
+
# Looks up a template by name in the configured template_path. Should be used
|
219
|
+
# before uploading the VM via upload_ovf_as_template, although that is
|
220
|
+
# not strictly required, but a lot more efficient.
|
221
|
+
# @param template_name [String] Name of the template to be used. A cluster
|
222
|
+
# specific post-fix will automatically be added.
|
223
|
+
# @return [VIM::VirtualMachine] The template as a VIM::VirtualMachine instance
|
224
|
+
# or nil
|
225
|
+
def lookup_template template_name
|
226
|
+
|
227
|
+
# This code used to be template_path = "#{template_name}-#{@computer.name}"
|
228
|
+
# changed this as it should be reflected in the calling code and not here.
|
229
|
+
template_path = "#{template_name}"
|
230
|
+
|
231
|
+
template = @template_folder.traverse(template_path, RbVmomi::VIM::VirtualMachine)
|
232
|
+
if template
|
233
|
+
config = template.config
|
234
|
+
is_template = config && config.template
|
235
|
+
if !is_template
|
236
|
+
template = nil
|
237
|
+
end
|
238
|
+
end
|
239
|
+
template
|
240
|
+
end
|
241
|
+
|
242
|
+
# Creates a linked clone of a template prepared with upload_ovf_as_template.
|
243
|
+
# The function waits for completion on the clone task. Optionally, in case
|
244
|
+
# two level templates are being used, this function can wait for another
|
245
|
+
# thread to finish creating the second level template. See class comments
|
246
|
+
# for the concept of multi level templates.
|
247
|
+
# @param template_name [String] Name of the template to be used. A cluster
|
248
|
+
# specific post-fix will automatically be added.
|
249
|
+
# @param vm_name [String] Name of the new VM that is being created via cloning.
|
250
|
+
# @param config [Hash] VM Config delta to apply after the VM is cloned.
|
251
|
+
# Allows the template to be customized, e.g. to adjust
|
252
|
+
# CPU or Memory sizes or set annotations.
|
253
|
+
# @option opts [int] :is_template If true, the clone is assumed to be a template
|
254
|
+
# again and collision and de-duping logic kicks
|
255
|
+
# in.
|
256
|
+
# :simple_vm_name If true, the template name will not
|
257
|
+
# include #{@computer.name}
|
258
|
+
# @return [VIM::VirtualMachine] The VIM::VirtualMachine instance of the clone
|
259
|
+
def linked_clone template_vm, vm_name, config, opts = {}
|
260
|
+
spec = {
|
261
|
+
location: {
|
262
|
+
pool: @rp,
|
263
|
+
datastore: @datastore,
|
264
|
+
diskMoveType: :moveChildMostDiskBacking,
|
265
|
+
},
|
266
|
+
powerOn: false,
|
267
|
+
template: false,
|
268
|
+
config: config,
|
269
|
+
}
|
270
|
+
if opts[:is_template]
|
271
|
+
wait_for_template = false
|
272
|
+
|
273
|
+
if opts[:simple_vm_name]
|
274
|
+
template_name = "#{vm_name}"
|
275
|
+
else
|
276
|
+
template_name = "#{vm_name}-#{@computer.name}"
|
277
|
+
end
|
278
|
+
begin
|
279
|
+
vm = template_vm.CloneVM_Task(
|
280
|
+
folder: @template_folder,
|
281
|
+
name: template_name,
|
282
|
+
spec: spec
|
283
|
+
).wait_for_completion
|
284
|
+
rescue RbVmomi::Fault => fault
|
285
|
+
if fault.fault.is_a?(RbVmomi::VIM::DuplicateName)
|
286
|
+
wait_for_template = true
|
287
|
+
else
|
288
|
+
raise
|
289
|
+
end
|
290
|
+
end
|
291
|
+
|
292
|
+
if wait_for_template
|
293
|
+
puts "#{Time.now}: Template already exists, waiting for it to be ready"
|
294
|
+
vm = _wait_for_template_ready @template_folder, template_name
|
295
|
+
puts "#{Time.now}: Template ready"
|
296
|
+
end
|
297
|
+
else
|
298
|
+
vm = template_vm.CloneVM_Task(
|
299
|
+
folder: @vmfolder,
|
300
|
+
name: vm_name,
|
301
|
+
spec: spec
|
302
|
+
).wait_for_completion
|
303
|
+
end
|
304
|
+
vm
|
305
|
+
end
|
306
|
+
|
307
|
+
# Internal helper method that waits for a template to be fully created. It
|
308
|
+
# polls until it finds the VM in the inventory, and once it is there, waits
|
309
|
+
# for it to be fully created and marked as a template. This function will
|
310
|
+
# block for forever if the template never gets created or marked as a
|
311
|
+
# template.
|
312
|
+
# @param vm_folder [VIM::Folder] Folder in which we expect the template to show up
|
313
|
+
# @param vm_name [String] Name of the VM we are waiting for
|
314
|
+
# @return [VIM::VirtualMachine] The VM we were waiting for when it is ready
|
315
|
+
def _wait_for_template_ready vm_folder, vm_name
|
316
|
+
vm = nil
|
317
|
+
while !vm
|
318
|
+
sleep 3
|
319
|
+
# XXX: Optimize this
|
320
|
+
vm = vm_folder.children.find{|x| x.name == vm_name}
|
321
|
+
end
|
322
|
+
log "Template VM found"
|
323
|
+
sleep 2
|
324
|
+
while true
|
325
|
+
runtime, template = vm.collect 'runtime', 'config.template'
|
326
|
+
ready = runtime && runtime.host && runtime.powerState == "poweredOff"
|
327
|
+
ready = ready && template
|
328
|
+
if ready
|
329
|
+
break
|
330
|
+
end
|
331
|
+
sleep 5
|
332
|
+
end
|
333
|
+
|
334
|
+
vm
|
335
|
+
end
|
336
|
+
end
|