rbvmomi2 3.0.0 → 3.0.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +11 -25
- data/exe/rbvmomish +50 -48
- data/lib/rbvmomi/basic_types.rb +318 -294
- data/lib/rbvmomi/connection.rb +221 -216
- data/lib/rbvmomi/deserialization.rb +201 -205
- data/lib/rbvmomi/fault.rb +10 -9
- data/lib/rbvmomi/optimist.rb +51 -50
- data/lib/rbvmomi/pbm.rb +52 -50
- data/lib/rbvmomi/sms/SmsStorageManager.rb +2 -1
- data/lib/rbvmomi/sms.rb +48 -46
- data/lib/rbvmomi/sso.rb +13 -18
- data/lib/rbvmomi/trivial_soap.rb +9 -8
- data/lib/rbvmomi/type_loader.rb +100 -101
- data/lib/rbvmomi/utils/admission_control.rb +90 -106
- data/lib/rbvmomi/utils/deploy.rb +77 -85
- data/lib/rbvmomi/utils/leases.rb +31 -33
- data/lib/rbvmomi/utils/perfdump.rb +177 -207
- data/lib/rbvmomi/version.rb +2 -1
- data/lib/rbvmomi/vim/ComputeResource.rb +17 -15
- data/lib/rbvmomi/vim/Datacenter.rb +1 -0
- data/lib/rbvmomi/vim/Datastore.rb +18 -15
- data/lib/rbvmomi/vim/DynamicTypeMgrAllTypeInfo.rb +7 -6
- data/lib/rbvmomi/vim/DynamicTypeMgrDataTypeInfo.rb +3 -2
- data/lib/rbvmomi/vim/DynamicTypeMgrManagedTypeInfo.rb +7 -6
- data/lib/rbvmomi/vim/Folder.rb +37 -33
- data/lib/rbvmomi/vim/HostSystem.rb +139 -136
- data/lib/rbvmomi/vim/ManagedEntity.rb +15 -14
- data/lib/rbvmomi/vim/ManagedObject.rb +11 -10
- data/lib/rbvmomi/vim/ObjectContent.rb +3 -1
- data/lib/rbvmomi/vim/ObjectUpdate.rb +3 -1
- data/lib/rbvmomi/vim/OvfManager.rb +50 -57
- data/lib/rbvmomi/vim/PerfCounterInfo.rb +4 -3
- data/lib/rbvmomi/vim/PerformanceManager.rb +28 -31
- data/lib/rbvmomi/vim/PropertyCollector.rb +8 -7
- data/lib/rbvmomi/vim/ReflectManagedMethodExecuter.rb +22 -21
- data/lib/rbvmomi/vim/ResourcePool.rb +19 -18
- data/lib/rbvmomi/vim/ServiceInstance.rb +8 -7
- data/lib/rbvmomi/vim/Task.rb +6 -5
- data/lib/rbvmomi/vim/VirtualMachine.rb +8 -7
- data/lib/rbvmomi/vim.rb +112 -129
- data/lib/rbvmomi.rb +1 -0
- metadata +54 -10
@@ -1,20 +1,21 @@
|
|
1
|
+
# frozen_string_literal: true
|
1
2
|
# Copyright (c) 2012-2017 VMware, Inc. All Rights Reserved.
|
2
3
|
# SPDX-License-Identifier: MIT
|
3
4
|
|
4
5
|
|
5
6
|
# An admission controlled resource scheduler for large scale vSphere deployments
|
6
7
|
#
|
7
|
-
# While DRS (Dynamic Resource Scheduler) in vSphere handles CPU and Memory
|
8
|
-
# allocations within a single vSphere cluster, larger deployments require
|
8
|
+
# While DRS (Dynamic Resource Scheduler) in vSphere handles CPU and Memory
|
9
|
+
# allocations within a single vSphere cluster, larger deployments require
|
9
10
|
# another layer of scheduling to make the use of multiple clusters transparent.
|
10
|
-
# So this class doesn't replace DRS, but in fact works on top of it.
|
11
|
+
# So this class doesn't replace DRS, but in fact works on top of it.
|
11
12
|
#
|
12
13
|
# The scheduler in this class performs admission control to make sure clusters
|
13
14
|
# don't get overloaded. It does so by adding additional metrics to the already
|
14
|
-
# existing CPU and Memory reservation system that DRS has. After admission
|
15
|
+
# existing CPU and Memory reservation system that DRS has. After admission
|
15
16
|
# control it also performs very basic initial placement. Note that in-cluster
|
16
17
|
# placement and load-balancing is left to DRS. Also note that no cross-cluster
|
17
|
-
# load balancing is done.
|
18
|
+
# load balancing is done.
|
18
19
|
#
|
19
20
|
# This class uses the concept of a Pod: A set of clusters that share a set of
|
20
21
|
# datastores. From a datastore perspective, we are free to place a VM on any
|
@@ -22,33 +23,33 @@
|
|
22
23
|
# are automatically dicovered based on lists of clusters and datastores.
|
23
24
|
#
|
24
25
|
# Admission control covers the following metrics:
|
25
|
-
# - Host availability: If no hosts are available within a cluster or pod,
|
26
|
+
# - Host availability: If no hosts are available within a cluster or pod,
|
26
27
|
# admission is denied.
|
27
28
|
# - Minimum free space: If a datastore falls below this free space percentage,
|
28
|
-
# admission to it will be denied. Admission to a pod is granted as long at
|
29
|
+
# admission to it will be denied. Admission to a pod is granted as long at
|
29
30
|
# least one datastore passes admission control.
|
30
31
|
# - Maximum number of VMs: If a Pod exceeds a configured number of powered on
|
31
32
|
# VMs, admission is denied. This is a crude but effective catch-all metric
|
32
|
-
# in case users didn't set proper individual CPU or Memory reservations or
|
33
|
+
# in case users didn't set proper individual CPU or Memory reservations or
|
33
34
|
# if the scalability limit doesn't originate from CPU or Memory.
|
34
35
|
#
|
35
36
|
# Placement after admission control:
|
36
37
|
# - Cluster selection: A load metric based on a combination of CPU and Memory
|
37
38
|
# load is used to always select the "least loaded" cluster. The metric is very
|
38
|
-
# crude and only meant to do very rough load balancing. If DRS clusters are
|
39
|
-
# large enough, this is good enough in most cases though.
|
40
|
-
# - Datastore selection: Right now NO intelligence is implemented here.
|
39
|
+
# crude and only meant to do very rough load balancing. If DRS clusters are
|
40
|
+
# large enough, this is good enough in most cases though.
|
41
|
+
# - Datastore selection: Right now NO intelligence is implemented here.
|
41
42
|
#
|
42
43
|
# Usage:
|
43
44
|
# Instantiate the class, call make_placement_decision and then use the exposed
|
44
|
-
# computer (cluster), resource pool, vm_folder and datastore. Currently once
|
45
|
+
# computer (cluster), resource pool, vm_folder and datastore. Currently once
|
45
46
|
# computed, a new updated placement can't be generated.
|
46
47
|
class AdmissionControlledResourceScheduler
|
47
48
|
attr_reader :rp
|
48
|
-
|
49
|
+
|
49
50
|
def initialize vim, opts = {}
|
50
51
|
@vim = vim
|
51
|
-
|
52
|
+
|
52
53
|
@datacenter = opts[:datacenter]
|
53
54
|
@datacenter_path = opts[:datacenter_path]
|
54
55
|
@vm_folder = opts[:vm_folder]
|
@@ -58,17 +59,17 @@ class AdmissionControlledResourceScheduler
|
|
58
59
|
@computer_names = opts[:computer_names]
|
59
60
|
@datastores = opts[:datastores]
|
60
61
|
@datastore_paths = opts[:datastore_paths]
|
61
|
-
|
62
|
+
|
62
63
|
@max_vms_per_pod = opts[:max_vms_per_pod]
|
63
64
|
@min_ds_free = opts[:min_ds_free]
|
64
65
|
@service_docs_url = opts[:service_docs_url]
|
65
|
-
|
66
|
+
|
66
67
|
@pc = @vim.serviceContent.propertyCollector
|
67
68
|
@root_folder = @vim.serviceContent.rootFolder
|
68
|
-
|
69
|
+
|
69
70
|
@logger = opts[:logger]
|
70
71
|
end
|
71
|
-
|
72
|
+
|
72
73
|
def log x
|
73
74
|
if @logger
|
74
75
|
@logger.info x
|
@@ -77,63 +78,56 @@ class AdmissionControlledResourceScheduler
|
|
77
78
|
end
|
78
79
|
end
|
79
80
|
|
80
|
-
# Returns the used VM folder. If not set yet, uses the vm_folder_path to
|
81
|
+
# Returns the used VM folder. If not set yet, uses the vm_folder_path to
|
81
82
|
# lookup the folder. If it doesn't exist, it is created. Collisions between
|
82
83
|
# multiple clients concurrently creating the same folder are handled.
|
83
84
|
# @return [RbVmomi::VIM::Folder] The VM folder
|
84
|
-
def vm_folder
|
85
|
+
def vm_folder
|
85
86
|
retries = 1
|
86
87
|
begin
|
87
88
|
@vm_folder ||= datacenter.vmFolder.traverse!(@vm_folder_path, RbVmomi::VIM::Folder)
|
88
|
-
if !@vm_folder
|
89
|
-
fail "VM folder #{@vm_folder_path} not found"
|
90
|
-
end
|
89
|
+
raise "VM folder #{@vm_folder_path} not found" if !@vm_folder
|
91
90
|
rescue RbVmomi::Fault => fault
|
92
91
|
if !fault.fault.is_a?(RbVmomi::VIM::DuplicateName)
|
93
92
|
raise
|
94
93
|
else
|
95
94
|
retries -= 1
|
96
|
-
retry if retries >= 0
|
97
|
-
end
|
95
|
+
retry if retries >= 0
|
96
|
+
end
|
98
97
|
end
|
99
|
-
@vm_folder
|
98
|
+
@vm_folder
|
100
99
|
end
|
101
100
|
|
102
|
-
# Returns the used Datacenter. If not set yet, uses the datacenter_path to
|
103
|
-
# lookup the datacenter.
|
101
|
+
# Returns the used Datacenter. If not set yet, uses the datacenter_path to
|
102
|
+
# lookup the datacenter.
|
104
103
|
# @return [RbVmomi::VIM::Datacenter] The datacenter
|
105
104
|
def datacenter
|
106
105
|
if !@datacenter
|
107
|
-
@datacenter = @root_folder.traverse(@datacenter_path, RbVmomi::VIM::Datacenter)
|
108
|
-
if !@datacenter
|
109
|
-
fail "datacenter #{@datacenter_path} not found"
|
110
|
-
end
|
106
|
+
@datacenter = @root_folder.traverse(@datacenter_path, RbVmomi::VIM::Datacenter)
|
107
|
+
raise "datacenter #{@datacenter_path} not found" if !@datacenter
|
111
108
|
end
|
112
109
|
@datacenter
|
113
110
|
end
|
114
111
|
|
115
|
-
# Returns the candidate datastores. If not set yet, uses the datastore_paths
|
112
|
+
# Returns the candidate datastores. If not set yet, uses the datastore_paths
|
116
113
|
# to lookup the datastores under the datacenter.
|
117
|
-
# As a side effect, also looks up properties about all the datastores
|
114
|
+
# As a side effect, also looks up properties about all the datastores
|
118
115
|
# @return [Array] List of RbVmomi::VIM::Datastore
|
119
116
|
def datastores
|
120
117
|
if !@datastores
|
121
118
|
@datastores = @datastore_paths.map do |path|
|
122
119
|
ds = datacenter.datastoreFolder.traverse(path, RbVmomi::VIM::Datastore)
|
123
|
-
if !ds
|
124
|
-
|
125
|
-
end
|
120
|
+
raise "datastore #{path} not found" if !ds
|
121
|
+
|
126
122
|
ds
|
127
123
|
end
|
128
124
|
end
|
129
|
-
if !@datastore_props
|
130
|
-
@datastore_props = @pc.collectMultiple(@datastores, 'summary', 'name')
|
131
|
-
end
|
125
|
+
@datastore_props = @pc.collectMultiple(@datastores, 'summary', 'name') if !@datastore_props
|
132
126
|
@datastores
|
133
127
|
end
|
134
128
|
|
135
|
-
# Returns the candidate computers (aka clusters). If not set yet, uses the
|
136
|
-
# computer_names to look them up.
|
129
|
+
# Returns the candidate computers (aka clusters). If not set yet, uses the
|
130
|
+
# computer_names to look them up.
|
137
131
|
# @return [Array] List of [RbVmomi::VIM::ClusterComputeResource, Hash] tuples, where
|
138
132
|
# the Hash is a list of stats about the computer
|
139
133
|
def computers
|
@@ -146,26 +140,26 @@ class AdmissionControlledResourceScheduler
|
|
146
140
|
@computers
|
147
141
|
end
|
148
142
|
|
149
|
-
# Returns the candidate pods. If not set, automatically computes the pods
|
150
|
-
# based on the list of computers (aka clusters) and datastores.
|
143
|
+
# Returns the candidate pods. If not set, automatically computes the pods
|
144
|
+
# based on the list of computers (aka clusters) and datastores.
|
151
145
|
# @return [Array] List of pods, where a pod is a list of RbVmomi::VIM::ClusterComputeResource
|
152
146
|
def pods
|
153
147
|
if !@pods
|
154
148
|
# A pod is defined as a set of clusters (aka computers) that share the same
|
155
149
|
# datastore accessibility. Computing pods is done automatically using simple
|
156
150
|
# set theory math.
|
157
|
-
computersProps = @pc.collectMultiple(computers.map{|x| x[0]}, 'datastore')
|
151
|
+
computersProps = @pc.collectMultiple(computers.map{ |x| x[0] }, 'datastore')
|
158
152
|
@pods = computers.map do |computer, stats|
|
159
153
|
computersProps[computer]['datastore'] & self.datastores
|
160
154
|
end.uniq.map do |ds_list|
|
161
|
-
computers.map{|x| x[0]}.select do |computer|
|
155
|
+
computers.map{ |x| x[0] }.select do |computer|
|
162
156
|
(computer.datastore & self.datastores) == ds_list
|
163
157
|
end
|
164
158
|
end
|
165
159
|
end
|
166
|
-
@pods
|
160
|
+
@pods
|
167
161
|
end
|
168
|
-
|
162
|
+
|
169
163
|
# Returns all VMs residing with a pod. Doesn't account for templates. Does so
|
170
164
|
# very efficiently using a single API query.
|
171
165
|
# @return [Hash] Hash of VMs as keys and their properties as values.
|
@@ -201,35 +195,35 @@ class AdmissionControlledResourceScheduler
|
|
201
195
|
{ type: 'VirtualMachine', pathSet: %w(runtime.powerState) }
|
202
196
|
]
|
203
197
|
)
|
204
|
-
|
198
|
+
|
205
199
|
result = @vim.propertyCollector.RetrieveProperties(specSet: [filterSpec])
|
206
|
-
|
200
|
+
|
207
201
|
out = result.map { |x| [x.obj, Hash[x.propSet.map { |y| [y.name, y.val] }]] }
|
208
|
-
out.select{|obj, props| obj.is_a?(RbVmomi::VIM::VirtualMachine)}
|
202
|
+
out.select{ |obj, props| obj.is_a?(RbVmomi::VIM::VirtualMachine) }
|
209
203
|
end
|
210
|
-
|
204
|
+
|
211
205
|
# Returns all candidate datastores for a given pod.
|
212
206
|
# @return [Array] List of RbVmomi::VIM::Datastore
|
213
207
|
def pod_datastores pod
|
214
208
|
pod.first.datastore & self.datastores
|
215
209
|
end
|
216
|
-
|
210
|
+
|
217
211
|
# Returns the list of pods that pass admission control. If not set yet, performs
|
218
|
-
# admission control to compute the list. If no pods passed the admission
|
212
|
+
# admission control to compute the list. If no pods passed the admission
|
219
213
|
# control, an exception is thrown.
|
220
214
|
# @return [Array] List of pods, where a pod is a list of RbVmomi::VIM::ClusterComputeResource
|
221
215
|
def filtered_pods
|
222
216
|
# This function applies admission control and returns those pods that have
|
223
|
-
# passed admission control. An exception is thrown if access was denied to
|
217
|
+
# passed admission control. An exception is thrown if access was denied to
|
224
218
|
# all pods.
|
225
219
|
if !@filtered_pods
|
226
|
-
log
|
220
|
+
log 'Performing admission control:'
|
227
221
|
@filtered_pods = self.pods.select do |pod|
|
228
222
|
# Gather some statistics about the pod ...
|
229
|
-
on_vms = pod_vms(pod).select{|k,v| v['runtime.powerState'] == 'poweredOn'}
|
223
|
+
on_vms = pod_vms(pod).select{ |k, v| v['runtime.powerState'] == 'poweredOn' }
|
230
224
|
num_pod_vms = on_vms.length
|
231
225
|
pod_datastores = self.pod_datastores(pod)
|
232
|
-
log "Pod: #{pod.map{|x| x.name}.join(', ')}"
|
226
|
+
log "Pod: #{pod.map{ |x| x.name }.join(', ')}"
|
233
227
|
log " #{num_pod_vms} VMs"
|
234
228
|
pod_datastores.each do |ds|
|
235
229
|
ds_sum = @datastore_props[ds]['summary']
|
@@ -240,10 +234,10 @@ class AdmissionControlledResourceScheduler
|
|
240
234
|
ds_name = ds_props['name']
|
241
235
|
free = ds_props['free_percent']
|
242
236
|
free_gb = ds_props['summary'].freeSpace.to_f / 1024**3
|
243
|
-
free_str =
|
237
|
+
free_str = '%.2f GB (%.2f%%)' % [free_gb, free]
|
244
238
|
log " Datastore #{ds_name}: #{free_str} free"
|
245
239
|
end
|
246
|
-
|
240
|
+
|
247
241
|
# Admission check: VM limit
|
248
242
|
denied = false
|
249
243
|
max_vms = @max_vms_per_pod
|
@@ -253,7 +247,7 @@ class AdmissionControlledResourceScheduler
|
|
253
247
|
denied = true
|
254
248
|
end
|
255
249
|
end
|
256
|
-
|
250
|
+
|
257
251
|
# Admission check: Free space on datastores
|
258
252
|
min_ds_free = @min_ds_free
|
259
253
|
if min_ds_free && min_ds_free > 0
|
@@ -261,14 +255,14 @@ class AdmissionControlledResourceScheduler
|
|
261
255
|
low_list = pod_datastores.select do |ds|
|
262
256
|
@datastore_props[ds]['free_percent'] <= min_ds_free
|
263
257
|
end
|
264
|
-
|
258
|
+
|
265
259
|
if low_list.length == pod_datastores.length
|
266
|
-
dsNames = low_list.map{|ds| @datastore_props[ds]['name']}.join(
|
260
|
+
dsNames = low_list.map{ |ds| @datastore_props[ds]['name'] }.join(', ')
|
267
261
|
err = "Datastores #{dsNames} below minimum free disk space (#{min_ds_free}%)"
|
268
262
|
denied = true
|
269
263
|
end
|
270
264
|
end
|
271
|
-
|
265
|
+
|
272
266
|
# Admission check: Hosts are available
|
273
267
|
if !denied
|
274
268
|
hosts_available = pod.any? do |computer|
|
@@ -276,26 +270,24 @@ class AdmissionControlledResourceScheduler
|
|
276
270
|
stats[:totalCPU] > 0 && stats[:totalMem] > 0
|
277
271
|
end
|
278
272
|
if !hosts_available
|
279
|
-
err =
|
273
|
+
err = 'No hosts are current available in this pod'
|
280
274
|
denied = true
|
281
275
|
end
|
282
276
|
end
|
283
|
-
|
284
|
-
if denied
|
277
|
+
|
278
|
+
if denied
|
285
279
|
log " Admission DENIED: #{err}"
|
286
280
|
else
|
287
|
-
log
|
281
|
+
log ' Admission granted'
|
288
282
|
end
|
289
|
-
|
283
|
+
|
290
284
|
!denied
|
291
285
|
end
|
292
286
|
end
|
293
287
|
if @filtered_pods.length == 0
|
294
288
|
log "Couldn't find any Pod with enough resources."
|
295
|
-
if @service_docs_url
|
296
|
-
|
297
|
-
end
|
298
|
-
fail "Admission denied"
|
289
|
+
log "Check #{@service_docs_url} to see which other Pods you may be able to use" if @service_docs_url
|
290
|
+
raise 'Admission denied'
|
299
291
|
end
|
300
292
|
@filtered_pods
|
301
293
|
end
|
@@ -309,23 +301,20 @@ class AdmissionControlledResourceScheduler
|
|
309
301
|
# Out of the pods to which we have been granted access, pick the cluster
|
310
302
|
# (aka computer) with the lowest CPU/Mem utilization for load balancing
|
311
303
|
available = self.filtered_pods.flatten
|
312
|
-
eligible = self.computers.select do |computer,stats|
|
304
|
+
eligible = self.computers.select do |computer, stats|
|
313
305
|
available.member?(computer) && stats[:totalCPU] > 0 and stats[:totalMem] > 0
|
314
306
|
end
|
315
307
|
computer = nil
|
316
308
|
if placementhint
|
317
|
-
if eligible.length > 0
|
318
|
-
computer = eligible.map{|x| x[0]}[placementhint % eligible.length]
|
319
|
-
end
|
309
|
+
computer = eligible.map{ |x| x[0] }[placementhint % eligible.length] if eligible.length > 0
|
320
310
|
else
|
321
|
-
computer, = eligible.min_by do |computer,stats|
|
311
|
+
computer, = eligible.min_by do |computer, stats|
|
322
312
|
2**(stats[:usedCPU].to_f/stats[:totalCPU]) + (stats[:usedMem].to_f/stats[:totalMem])
|
323
313
|
end
|
324
314
|
end
|
325
|
-
|
326
|
-
if !computer
|
327
|
-
|
328
|
-
end
|
315
|
+
|
316
|
+
raise 'No clusters available, should have been prevented by admission control' if !computer
|
317
|
+
|
329
318
|
@computer = computer
|
330
319
|
end
|
331
320
|
@computer
|
@@ -335,65 +324,60 @@ class AdmissionControlledResourceScheduler
|
|
335
324
|
# datastore without much intelligence, as long as it passes admission control.
|
336
325
|
# @return [RbVmomi::VIM::Datastore] Chosen datastore
|
337
326
|
def datastore placementHint = nil
|
338
|
-
if @datastore
|
339
|
-
|
340
|
-
end
|
341
|
-
|
327
|
+
return @datastore if @datastore
|
328
|
+
|
342
329
|
pod_datastores = pick_computer.datastore & datastores
|
343
|
-
|
330
|
+
|
344
331
|
eligible = pod_datastores.select do |ds|
|
345
332
|
min_ds_free = @min_ds_free
|
346
333
|
if min_ds_free && min_ds_free > 0
|
347
334
|
ds_sum = @datastore_props[ds]['summary']
|
348
|
-
free_percent = ds_sum.freeSpace.to_f * 100 / ds_sum.capacity
|
335
|
+
free_percent = ds_sum.freeSpace.to_f * 100 / ds_sum.capacity
|
349
336
|
free_percent > min_ds_free
|
350
337
|
else
|
351
338
|
true
|
352
339
|
end
|
353
340
|
end
|
354
|
-
|
355
|
-
if eligible.length == 0
|
356
|
-
|
357
|
-
end
|
358
|
-
|
341
|
+
|
342
|
+
raise "Couldn't find any eligible datastore. Admission control should have prevented this" if eligible.length == 0
|
343
|
+
|
359
344
|
if placementHint && placementHint > 0
|
360
345
|
@datastore = eligible[placementHint % eligible.length]
|
361
346
|
else
|
362
347
|
@datastore = eligible.first
|
363
|
-
end
|
348
|
+
end
|
364
349
|
@datastore
|
365
350
|
end
|
366
|
-
|
367
|
-
# Runs the placement algorithm and populates all the various properties as
|
351
|
+
|
352
|
+
# Runs the placement algorithm and populates all the various properties as
|
368
353
|
# a side effect. Run this first, before using the other functions of this
|
369
354
|
# class.
|
370
355
|
def make_placement_decision opts = {}
|
371
356
|
self.filtered_pods
|
372
357
|
self.pick_computer opts[:placementHint]
|
373
358
|
log "Selected compute resource: #{@computer.name}"
|
374
|
-
|
359
|
+
|
375
360
|
@rp = @computer.resourcePool.traverse(@rp_path)
|
376
|
-
if !@rp
|
377
|
-
|
378
|
-
end
|
361
|
+
raise "Resource pool #{@rp_path} not found" if !@rp
|
362
|
+
|
379
363
|
log "Resource pool: #{@rp.pretty_path}"
|
380
|
-
|
364
|
+
|
381
365
|
stats = @computer.stats
|
382
366
|
if stats[:totalMem] > 0 && stats[:totalCPU] > 0
|
383
367
|
cpu_load = "#{(100*stats[:usedCPU])/stats[:totalCPU]}% cpu"
|
384
368
|
mem_load = "#{(100*stats[:usedMem])/stats[:totalMem]}% mem"
|
385
369
|
log "Cluster utilization: #{cpu_load}, #{mem_load}"
|
386
370
|
end
|
387
|
-
|
388
|
-
user_vms = vm_folder.inventory_flat('VirtualMachine' => %w(name storage)).select do |k, v|
|
371
|
+
|
372
|
+
user_vms = vm_folder.inventory_flat('VirtualMachine' => %w(name storage)).select do |k, v|
|
389
373
|
k.is_a?(RbVmomi::VIM::VirtualMachine)
|
390
374
|
end
|
391
375
|
numVms = user_vms.length
|
392
|
-
unshared = user_vms.map do |vm, info|
|
393
|
-
info['storage'].perDatastoreUsage.map{|x| x.unshared}.inject(0, &:+)
|
376
|
+
unshared = user_vms.map do |vm, info|
|
377
|
+
info['storage'].perDatastoreUsage.map{ |x| x.unshared }.inject(0, &:+)
|
394
378
|
end.inject(0, &:+)
|
395
379
|
log "User stats: #{numVms} VMs using %.2fGB of storage" % [unshared.to_f / 1024**3]
|
396
|
-
|
380
|
+
|
397
381
|
@placement_hint = opts[:placement_hint] || (rand(100) + 1)
|
398
382
|
datastore = self.datastore @placement_hint
|
399
383
|
log "Datastore: #{datastore.name}"
|