rbvmomi2 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +19 -0
- data/README.md +114 -0
- data/exe/rbvmomish +138 -0
- data/lib/rbvmomi/basic_types.rb +383 -0
- data/lib/rbvmomi/connection.rb +272 -0
- data/lib/rbvmomi/deserialization.rb +249 -0
- data/lib/rbvmomi/fault.rb +19 -0
- data/lib/rbvmomi/optimist.rb +72 -0
- data/lib/rbvmomi/pbm.rb +68 -0
- data/lib/rbvmomi/sms/SmsStorageManager.rb +10 -0
- data/lib/rbvmomi/sms.rb +63 -0
- data/lib/rbvmomi/sso.rb +313 -0
- data/lib/rbvmomi/trivial_soap.rb +122 -0
- data/lib/rbvmomi/type_loader.rb +138 -0
- data/lib/rbvmomi/utils/admission_control.rb +401 -0
- data/lib/rbvmomi/utils/deploy.rb +318 -0
- data/lib/rbvmomi/utils/leases.rb +145 -0
- data/lib/rbvmomi/utils/perfdump.rb +631 -0
- data/lib/rbvmomi/version.rb +6 -0
- data/lib/rbvmomi/vim/ComputeResource.rb +54 -0
- data/lib/rbvmomi/vim/Datacenter.rb +25 -0
- data/lib/rbvmomi/vim/Datastore.rb +72 -0
- data/lib/rbvmomi/vim/DynamicTypeMgrAllTypeInfo.rb +78 -0
- data/lib/rbvmomi/vim/DynamicTypeMgrDataTypeInfo.rb +23 -0
- data/lib/rbvmomi/vim/DynamicTypeMgrManagedTypeInfo.rb +54 -0
- data/lib/rbvmomi/vim/Folder.rb +214 -0
- data/lib/rbvmomi/vim/HostSystem.rb +177 -0
- data/lib/rbvmomi/vim/ManagedEntity.rb +60 -0
- data/lib/rbvmomi/vim/ManagedObject.rb +63 -0
- data/lib/rbvmomi/vim/ObjectContent.rb +26 -0
- data/lib/rbvmomi/vim/ObjectUpdate.rb +26 -0
- data/lib/rbvmomi/vim/OvfManager.rb +204 -0
- data/lib/rbvmomi/vim/PerfCounterInfo.rb +28 -0
- data/lib/rbvmomi/vim/PerformanceManager.rb +113 -0
- data/lib/rbvmomi/vim/PropertyCollector.rb +28 -0
- data/lib/rbvmomi/vim/ReflectManagedMethodExecuter.rb +33 -0
- data/lib/rbvmomi/vim/ResourcePool.rb +58 -0
- data/lib/rbvmomi/vim/ServiceInstance.rb +58 -0
- data/lib/rbvmomi/vim/Task.rb +68 -0
- data/lib/rbvmomi/vim/VirtualMachine.rb +75 -0
- data/lib/rbvmomi/vim.rb +157 -0
- data/lib/rbvmomi.rb +16 -0
- data/lib/rbvmomi2.rb +3 -0
- data/vmodl.db +0 -0
- metadata +214 -0
@@ -0,0 +1,401 @@
|
|
1
|
+
# Copyright (c) 2012-2017 VMware, Inc. All Rights Reserved.
|
2
|
+
# SPDX-License-Identifier: MIT
|
3
|
+
|
4
|
+
|
5
|
+
# An admission controlled resource scheduler for large scale vSphere deployments
|
6
|
+
#
|
7
|
+
# While DRS (Dynamic Resource Scheduler) in vSphere handles CPU and Memory
|
8
|
+
# allocations within a single vSphere cluster, larger deployments require
|
9
|
+
# another layer of scheduling to make the use of multiple clusters transparent.
|
10
|
+
# So this class doesn't replace DRS, but in fact works on top of it.
|
11
|
+
#
|
12
|
+
# The scheduler in this class performs admission control to make sure clusters
|
13
|
+
# don't get overloaded. It does so by adding additional metrics to the already
|
14
|
+
# existing CPU and Memory reservation system that DRS has. After admission
|
15
|
+
# control it also performs very basic initial placement. Note that in-cluster
|
16
|
+
# placement and load-balancing is left to DRS. Also note that no cross-cluster
|
17
|
+
# load balancing is done.
|
18
|
+
#
|
19
|
+
# This class uses the concept of a Pod: A set of clusters that share a set of
|
20
|
+
# datastores. From a datastore perspective, we are free to place a VM on any
|
21
|
+
# host or cluster. So admission control is done at the Pod level first. Pods
|
22
|
+
# are automatically dicovered based on lists of clusters and datastores.
|
23
|
+
#
|
24
|
+
# Admission control covers the following metrics:
|
25
|
+
# - Host availability: If no hosts are available within a cluster or pod,
|
26
|
+
# admission is denied.
|
27
|
+
# - Minimum free space: If a datastore falls below this free space percentage,
|
28
|
+
# admission to it will be denied. Admission to a pod is granted as long at
|
29
|
+
# least one datastore passes admission control.
|
30
|
+
# - Maximum number of VMs: If a Pod exceeds a configured number of powered on
|
31
|
+
# VMs, admission is denied. This is a crude but effective catch-all metric
|
32
|
+
# in case users didn't set proper individual CPU or Memory reservations or
|
33
|
+
# if the scalability limit doesn't originate from CPU or Memory.
|
34
|
+
#
|
35
|
+
# Placement after admission control:
|
36
|
+
# - Cluster selection: A load metric based on a combination of CPU and Memory
|
37
|
+
# load is used to always select the "least loaded" cluster. The metric is very
|
38
|
+
# crude and only meant to do very rough load balancing. If DRS clusters are
|
39
|
+
# large enough, this is good enough in most cases though.
|
40
|
+
# - Datastore selection: Right now NO intelligence is implemented here.
|
41
|
+
#
|
42
|
+
# Usage:
|
43
|
+
# Instantiate the class, call make_placement_decision and then use the exposed
|
44
|
+
# computer (cluster), resource pool, vm_folder and datastore. Currently once
|
45
|
+
# computed, a new updated placement can't be generated.
|
46
|
+
class AdmissionControlledResourceScheduler
|
47
|
+
attr_reader :rp
|
48
|
+
|
49
|
+
def initialize vim, opts = {}
|
50
|
+
@vim = vim
|
51
|
+
|
52
|
+
@datacenter = opts[:datacenter]
|
53
|
+
@datacenter_path = opts[:datacenter_path]
|
54
|
+
@vm_folder = opts[:vm_folder]
|
55
|
+
@vm_folder_path = opts[:vm_folder_path]
|
56
|
+
@rp_path = opts[:rp_path]
|
57
|
+
@computers = opts[:computers]
|
58
|
+
@computer_names = opts[:computer_names]
|
59
|
+
@datastores = opts[:datastores]
|
60
|
+
@datastore_paths = opts[:datastore_paths]
|
61
|
+
|
62
|
+
@max_vms_per_pod = opts[:max_vms_per_pod]
|
63
|
+
@min_ds_free = opts[:min_ds_free]
|
64
|
+
@service_docs_url = opts[:service_docs_url]
|
65
|
+
|
66
|
+
@pc = @vim.serviceContent.propertyCollector
|
67
|
+
@root_folder = @vim.serviceContent.rootFolder
|
68
|
+
|
69
|
+
@logger = opts[:logger]
|
70
|
+
end
|
71
|
+
|
72
|
+
def log x
|
73
|
+
if @logger
|
74
|
+
@logger.info x
|
75
|
+
else
|
76
|
+
puts "#{Time.now}: #{x}"
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
# Returns the used VM folder. If not set yet, uses the vm_folder_path to
|
81
|
+
# lookup the folder. If it doesn't exist, it is created. Collisions between
|
82
|
+
# multiple clients concurrently creating the same folder are handled.
|
83
|
+
# @return [RbVmomi::VIM::Folder] The VM folder
|
84
|
+
def vm_folder
|
85
|
+
retries = 1
|
86
|
+
begin
|
87
|
+
@vm_folder ||= datacenter.vmFolder.traverse!(@vm_folder_path, RbVmomi::VIM::Folder)
|
88
|
+
if !@vm_folder
|
89
|
+
fail "VM folder #{@vm_folder_path} not found"
|
90
|
+
end
|
91
|
+
rescue RbVmomi::Fault => fault
|
92
|
+
if !fault.fault.is_a?(RbVmomi::VIM::DuplicateName)
|
93
|
+
raise
|
94
|
+
else
|
95
|
+
retries -= 1
|
96
|
+
retry if retries >= 0
|
97
|
+
end
|
98
|
+
end
|
99
|
+
@vm_folder
|
100
|
+
end
|
101
|
+
|
102
|
+
# Returns the used Datacenter. If not set yet, uses the datacenter_path to
|
103
|
+
# lookup the datacenter.
|
104
|
+
# @return [RbVmomi::VIM::Datacenter] The datacenter
|
105
|
+
def datacenter
|
106
|
+
if !@datacenter
|
107
|
+
@datacenter = @root_folder.traverse(@datacenter_path, RbVmomi::VIM::Datacenter)
|
108
|
+
if !@datacenter
|
109
|
+
fail "datacenter #{@datacenter_path} not found"
|
110
|
+
end
|
111
|
+
end
|
112
|
+
@datacenter
|
113
|
+
end
|
114
|
+
|
115
|
+
# Returns the candidate datastores. If not set yet, uses the datastore_paths
|
116
|
+
# to lookup the datastores under the datacenter.
|
117
|
+
# As a side effect, also looks up properties about all the datastores
|
118
|
+
# @return [Array] List of RbVmomi::VIM::Datastore
|
119
|
+
def datastores
|
120
|
+
if !@datastores
|
121
|
+
@datastores = @datastore_paths.map do |path|
|
122
|
+
ds = datacenter.datastoreFolder.traverse(path, RbVmomi::VIM::Datastore)
|
123
|
+
if !ds
|
124
|
+
fail "datastore #{path} not found"
|
125
|
+
end
|
126
|
+
ds
|
127
|
+
end
|
128
|
+
end
|
129
|
+
if !@datastore_props
|
130
|
+
@datastore_props = @pc.collectMultiple(@datastores, 'summary', 'name')
|
131
|
+
end
|
132
|
+
@datastores
|
133
|
+
end
|
134
|
+
|
135
|
+
# Returns the candidate computers (aka clusters). If not set yet, uses the
|
136
|
+
# computer_names to look them up.
|
137
|
+
# @return [Array] List of [RbVmomi::VIM::ClusterComputeResource, Hash] tuples, where
|
138
|
+
# the Hash is a list of stats about the computer
|
139
|
+
def computers
|
140
|
+
if !@computers
|
141
|
+
@computers = @computer_names.map do |name|
|
142
|
+
computer = datacenter.find_compute_resource(name)
|
143
|
+
[computer, computer.stats]
|
144
|
+
end
|
145
|
+
end
|
146
|
+
@computers
|
147
|
+
end
|
148
|
+
|
149
|
+
# Returns the candidate pods. If not set, automatically computes the pods
|
150
|
+
# based on the list of computers (aka clusters) and datastores.
|
151
|
+
# @return [Array] List of pods, where a pod is a list of RbVmomi::VIM::ClusterComputeResource
|
152
|
+
def pods
|
153
|
+
if !@pods
|
154
|
+
# A pod is defined as a set of clusters (aka computers) that share the same
|
155
|
+
# datastore accessibility. Computing pods is done automatically using simple
|
156
|
+
# set theory math.
|
157
|
+
computersProps = @pc.collectMultiple(computers.map{|x| x[0]}, 'datastore')
|
158
|
+
@pods = computers.map do |computer, stats|
|
159
|
+
computersProps[computer]['datastore'] & self.datastores
|
160
|
+
end.uniq.map do |ds_list|
|
161
|
+
computers.map{|x| x[0]}.select do |computer|
|
162
|
+
(computer.datastore & self.datastores) == ds_list
|
163
|
+
end
|
164
|
+
end
|
165
|
+
end
|
166
|
+
@pods
|
167
|
+
end
|
168
|
+
|
169
|
+
# Returns all VMs residing with a pod. Doesn't account for templates. Does so
|
170
|
+
# very efficiently using a single API query.
|
171
|
+
# @return [Hash] Hash of VMs as keys and their properties as values.
|
172
|
+
def pod_vms pod
|
173
|
+
# This function retrieves all VMs residing inside a pod
|
174
|
+
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
|
175
|
+
objectSet: pod.map do |computer, stats|
|
176
|
+
{
|
177
|
+
obj: computer.resourcePool,
|
178
|
+
selectSet: [
|
179
|
+
RbVmomi::VIM.TraversalSpec(
|
180
|
+
name: 'tsFolder',
|
181
|
+
type: 'ResourcePool',
|
182
|
+
path: 'resourcePool',
|
183
|
+
skip: false,
|
184
|
+
selectSet: [
|
185
|
+
RbVmomi::VIM.SelectionSpec(name: 'tsFolder'),
|
186
|
+
RbVmomi::VIM.SelectionSpec(name: 'tsVM'),
|
187
|
+
]
|
188
|
+
),
|
189
|
+
RbVmomi::VIM.TraversalSpec(
|
190
|
+
name: 'tsVM',
|
191
|
+
type: 'ResourcePool',
|
192
|
+
path: 'vm',
|
193
|
+
skip: false,
|
194
|
+
selectSet: [],
|
195
|
+
)
|
196
|
+
]
|
197
|
+
}
|
198
|
+
end,
|
199
|
+
propSet: [
|
200
|
+
{ type: 'ResourcePool', pathSet: ['name'] },
|
201
|
+
{ type: 'VirtualMachine', pathSet: %w(runtime.powerState) }
|
202
|
+
]
|
203
|
+
)
|
204
|
+
|
205
|
+
result = @vim.propertyCollector.RetrieveProperties(specSet: [filterSpec])
|
206
|
+
|
207
|
+
out = result.map { |x| [x.obj, Hash[x.propSet.map { |y| [y.name, y.val] }]] }
|
208
|
+
out.select{|obj, props| obj.is_a?(RbVmomi::VIM::VirtualMachine)}
|
209
|
+
end
|
210
|
+
|
211
|
+
# Returns all candidate datastores for a given pod.
|
212
|
+
# @return [Array] List of RbVmomi::VIM::Datastore
|
213
|
+
def pod_datastores pod
|
214
|
+
pod.first.datastore & self.datastores
|
215
|
+
end
|
216
|
+
|
217
|
+
# Returns the list of pods that pass admission control. If not set yet, performs
|
218
|
+
# admission control to compute the list. If no pods passed the admission
|
219
|
+
# control, an exception is thrown.
|
220
|
+
# @return [Array] List of pods, where a pod is a list of RbVmomi::VIM::ClusterComputeResource
|
221
|
+
def filtered_pods
|
222
|
+
# This function applies admission control and returns those pods that have
|
223
|
+
# passed admission control. An exception is thrown if access was denied to
|
224
|
+
# all pods.
|
225
|
+
if !@filtered_pods
|
226
|
+
log "Performing admission control:"
|
227
|
+
@filtered_pods = self.pods.select do |pod|
|
228
|
+
# Gather some statistics about the pod ...
|
229
|
+
on_vms = pod_vms(pod).select{|k,v| v['runtime.powerState'] == 'poweredOn'}
|
230
|
+
num_pod_vms = on_vms.length
|
231
|
+
pod_datastores = self.pod_datastores(pod)
|
232
|
+
log "Pod: #{pod.map{|x| x.name}.join(', ')}"
|
233
|
+
log " #{num_pod_vms} VMs"
|
234
|
+
pod_datastores.each do |ds|
|
235
|
+
ds_sum = @datastore_props[ds]['summary']
|
236
|
+
@datastore_props[ds]['free_percent'] = ds_sum.freeSpace.to_f * 100 / ds_sum.capacity
|
237
|
+
end
|
238
|
+
pod_datastores.each do |ds|
|
239
|
+
ds_props = @datastore_props[ds]
|
240
|
+
ds_name = ds_props['name']
|
241
|
+
free = ds_props['free_percent']
|
242
|
+
free_gb = ds_props['summary'].freeSpace.to_f / 1024**3
|
243
|
+
free_str = "%.2f GB (%.2f%%)" % [free_gb, free]
|
244
|
+
log " Datastore #{ds_name}: #{free_str} free"
|
245
|
+
end
|
246
|
+
|
247
|
+
# Admission check: VM limit
|
248
|
+
denied = false
|
249
|
+
max_vms = @max_vms_per_pod
|
250
|
+
if max_vms && max_vms > 0
|
251
|
+
if num_pod_vms > max_vms
|
252
|
+
err = "VM limit (#{max_vms}) exceeded on this Pod"
|
253
|
+
denied = true
|
254
|
+
end
|
255
|
+
end
|
256
|
+
|
257
|
+
# Admission check: Free space on datastores
|
258
|
+
min_ds_free = @min_ds_free
|
259
|
+
if min_ds_free && min_ds_free > 0
|
260
|
+
# We need at least one datastore with enough free space
|
261
|
+
low_list = pod_datastores.select do |ds|
|
262
|
+
@datastore_props[ds]['free_percent'] <= min_ds_free
|
263
|
+
end
|
264
|
+
|
265
|
+
if low_list.length == pod_datastores.length
|
266
|
+
dsNames = low_list.map{|ds| @datastore_props[ds]['name']}.join(", ")
|
267
|
+
err = "Datastores #{dsNames} below minimum free disk space (#{min_ds_free}%)"
|
268
|
+
denied = true
|
269
|
+
end
|
270
|
+
end
|
271
|
+
|
272
|
+
# Admission check: Hosts are available
|
273
|
+
if !denied
|
274
|
+
hosts_available = pod.any? do |computer|
|
275
|
+
stats = Hash[self.computers][computer]
|
276
|
+
stats[:totalCPU] > 0 && stats[:totalMem] > 0
|
277
|
+
end
|
278
|
+
if !hosts_available
|
279
|
+
err = "No hosts are current available in this pod"
|
280
|
+
denied = true
|
281
|
+
end
|
282
|
+
end
|
283
|
+
|
284
|
+
if denied
|
285
|
+
log " Admission DENIED: #{err}"
|
286
|
+
else
|
287
|
+
log " Admission granted"
|
288
|
+
end
|
289
|
+
|
290
|
+
!denied
|
291
|
+
end
|
292
|
+
end
|
293
|
+
if @filtered_pods.length == 0
|
294
|
+
log "Couldn't find any Pod with enough resources."
|
295
|
+
if @service_docs_url
|
296
|
+
log "Check #{@service_docs_url} to see which other Pods you may be able to use"
|
297
|
+
end
|
298
|
+
fail "Admission denied"
|
299
|
+
end
|
300
|
+
@filtered_pods
|
301
|
+
end
|
302
|
+
|
303
|
+
# Returns the computer (aka cluster) to be used for placement. If not set yet,
|
304
|
+
# computs the least loaded cluster (using a metric that combines CPU and Memory
|
305
|
+
# load) that passes admission control.
|
306
|
+
# @return [RbVmomi::VIM::ClusterComputeResource] Chosen computer (aka cluster)
|
307
|
+
def pick_computer placementhint = nil
|
308
|
+
if !@computer
|
309
|
+
# Out of the pods to which we have been granted access, pick the cluster
|
310
|
+
# (aka computer) with the lowest CPU/Mem utilization for load balancing
|
311
|
+
available = self.filtered_pods.flatten
|
312
|
+
eligible = self.computers.select do |computer,stats|
|
313
|
+
available.member?(computer) && stats[:totalCPU] > 0 and stats[:totalMem] > 0
|
314
|
+
end
|
315
|
+
computer = nil
|
316
|
+
if placementhint
|
317
|
+
if eligible.length > 0
|
318
|
+
computer = eligible.map{|x| x[0]}[placementhint % eligible.length]
|
319
|
+
end
|
320
|
+
else
|
321
|
+
computer, = eligible.min_by do |computer,stats|
|
322
|
+
2**(stats[:usedCPU].to_f/stats[:totalCPU]) + (stats[:usedMem].to_f/stats[:totalMem])
|
323
|
+
end
|
324
|
+
end
|
325
|
+
|
326
|
+
if !computer
|
327
|
+
fail "No clusters available, should have been prevented by admission control"
|
328
|
+
end
|
329
|
+
@computer = computer
|
330
|
+
end
|
331
|
+
@computer
|
332
|
+
end
|
333
|
+
|
334
|
+
# Returns the datastore to be used for placement. If not set yet, picks a
|
335
|
+
# datastore without much intelligence, as long as it passes admission control.
|
336
|
+
# @return [RbVmomi::VIM::Datastore] Chosen datastore
|
337
|
+
def datastore placementHint = nil
|
338
|
+
if @datastore
|
339
|
+
return @datastore
|
340
|
+
end
|
341
|
+
|
342
|
+
pod_datastores = pick_computer.datastore & datastores
|
343
|
+
|
344
|
+
eligible = pod_datastores.select do |ds|
|
345
|
+
min_ds_free = @min_ds_free
|
346
|
+
if min_ds_free && min_ds_free > 0
|
347
|
+
ds_sum = @datastore_props[ds]['summary']
|
348
|
+
free_percent = ds_sum.freeSpace.to_f * 100 / ds_sum.capacity
|
349
|
+
free_percent > min_ds_free
|
350
|
+
else
|
351
|
+
true
|
352
|
+
end
|
353
|
+
end
|
354
|
+
|
355
|
+
if eligible.length == 0
|
356
|
+
fail "Couldn't find any eligible datastore. Admission control should have prevented this"
|
357
|
+
end
|
358
|
+
|
359
|
+
if placementHint && placementHint > 0
|
360
|
+
@datastore = eligible[placementHint % eligible.length]
|
361
|
+
else
|
362
|
+
@datastore = eligible.first
|
363
|
+
end
|
364
|
+
@datastore
|
365
|
+
end
|
366
|
+
|
367
|
+
# Runs the placement algorithm and populates all the various properties as
|
368
|
+
# a side effect. Run this first, before using the other functions of this
|
369
|
+
# class.
|
370
|
+
def make_placement_decision opts = {}
|
371
|
+
self.filtered_pods
|
372
|
+
self.pick_computer opts[:placementHint]
|
373
|
+
log "Selected compute resource: #{@computer.name}"
|
374
|
+
|
375
|
+
@rp = @computer.resourcePool.traverse(@rp_path)
|
376
|
+
if !@rp
|
377
|
+
fail "Resource pool #{@rp_path} not found"
|
378
|
+
end
|
379
|
+
log "Resource pool: #{@rp.pretty_path}"
|
380
|
+
|
381
|
+
stats = @computer.stats
|
382
|
+
if stats[:totalMem] > 0 && stats[:totalCPU] > 0
|
383
|
+
cpu_load = "#{(100*stats[:usedCPU])/stats[:totalCPU]}% cpu"
|
384
|
+
mem_load = "#{(100*stats[:usedMem])/stats[:totalMem]}% mem"
|
385
|
+
log "Cluster utilization: #{cpu_load}, #{mem_load}"
|
386
|
+
end
|
387
|
+
|
388
|
+
user_vms = vm_folder.inventory_flat('VirtualMachine' => %w(name storage)).select do |k, v|
|
389
|
+
k.is_a?(RbVmomi::VIM::VirtualMachine)
|
390
|
+
end
|
391
|
+
numVms = user_vms.length
|
392
|
+
unshared = user_vms.map do |vm, info|
|
393
|
+
info['storage'].perDatastoreUsage.map{|x| x.unshared}.inject(0, &:+)
|
394
|
+
end.inject(0, &:+)
|
395
|
+
log "User stats: #{numVms} VMs using %.2fGB of storage" % [unshared.to_f / 1024**3]
|
396
|
+
|
397
|
+
@placement_hint = opts[:placement_hint] || (rand(100) + 1)
|
398
|
+
datastore = self.datastore @placement_hint
|
399
|
+
log "Datastore: #{datastore.name}"
|
400
|
+
end
|
401
|
+
end
|