rbvmomi 1.5.1 → 1.6.0
Sign up to get free protection for your applications and to get access to all the features.
- data/Rakefile +2 -2
- data/VERSION +1 -1
- data/examples/cached_ovf_deploy.rb +120 -0
- data/examples/lease_tool.rb +102 -0
- data/lib/rbvmomi/connection.rb +2 -2
- data/lib/rbvmomi/utils/admission_control.rb +386 -0
- data/lib/rbvmomi/utils/deploy.rb +295 -0
- data/lib/rbvmomi/utils/leases.rb +142 -0
- data/lib/rbvmomi/vim/PerfCounterInfo.rb +26 -0
- data/lib/rbvmomi/vim/PerformanceManager.rb +85 -0
- data/lib/rbvmomi/vim/ReflectManagedMethodExecuter.rb +5 -1
- data/lib/rbvmomi/vim/Task.rb +34 -0
- data/lib/rbvmomi/vim/VirtualMachine.rb +46 -0
- data/test/test_serialization.rb +16 -0
- data/vmodl.db +0 -0
- metadata +68 -73
data/Rakefile
CHANGED
@@ -9,8 +9,8 @@ begin
|
|
9
9
|
gem.summary = "Ruby interface to the VMware vSphere API"
|
10
10
|
#gem.description = ""
|
11
11
|
gem.email = "rlane@vmware.com"
|
12
|
-
gem.homepage = "https://github.com/
|
13
|
-
gem.authors = ["Rich Lane"]
|
12
|
+
gem.homepage = "https://github.com/vmware/rbvmomi"
|
13
|
+
gem.authors = ["Rich Lane", "Christian Dickmann"]
|
14
14
|
gem.add_dependency 'nokogiri', '>= 1.4.1'
|
15
15
|
gem.add_dependency 'builder'
|
16
16
|
gem.add_dependency 'trollop'
|
data/VERSION
CHANGED
@@ -1 +1 @@
|
|
1
|
-
1.
|
1
|
+
1.6.0
|
@@ -0,0 +1,120 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
require 'trollop'
|
3
|
+
require 'rbvmomi'
|
4
|
+
require 'rbvmomi/trollop'
|
5
|
+
require 'rbvmomi/utils/deploy'
|
6
|
+
require 'rbvmomi/utils/admission_control'
|
7
|
+
require 'yaml'
|
8
|
+
|
9
|
+
VIM = RbVmomi::VIM
|
10
|
+
|
11
|
+
opts = Trollop.options do
|
12
|
+
banner <<-EOS
|
13
|
+
Deploy an OVF to a cluster, using a cached template if available.
|
14
|
+
|
15
|
+
Usage:
|
16
|
+
cached_ovf_deploy.rb [options] <vmname> <ovfurl>
|
17
|
+
|
18
|
+
VIM connection options:
|
19
|
+
EOS
|
20
|
+
|
21
|
+
rbvmomi_connection_opts
|
22
|
+
|
23
|
+
text <<-EOS
|
24
|
+
|
25
|
+
VM location options:
|
26
|
+
EOS
|
27
|
+
|
28
|
+
rbvmomi_datacenter_opt
|
29
|
+
rbvmomi_datastore_opt
|
30
|
+
|
31
|
+
text <<-EOS
|
32
|
+
|
33
|
+
Other options:
|
34
|
+
EOS
|
35
|
+
|
36
|
+
opt :template_name, "Name to give to the (cached) template", :type => :string
|
37
|
+
opt :template_path, "Path where templates are stored", :default => 'templates', :type => :string
|
38
|
+
opt :computer_path, "Path to the cluster to deploy into", :type => :string
|
39
|
+
opt :network, "Name of the network to attach template to", :type => :string
|
40
|
+
opt :vm_folder_path, "Path to VM folder to deploy VM into", :type => :string
|
41
|
+
opt :lease, "Lease in days", :type => :int, :default => 3
|
42
|
+
end
|
43
|
+
|
44
|
+
Trollop.die("must specify host") unless opts[:host]
|
45
|
+
Trollop.die("no cluster path given") unless opts[:computer_path]
|
46
|
+
template_folder_path = opts[:template_path]
|
47
|
+
template_name = opts[:template_name] or Trollop.die("no template name given")
|
48
|
+
vm_name = ARGV[0] or Trollop.die("no VM name given")
|
49
|
+
ovf_url = ARGV[1] or Trollop.die("No OVF URL given")
|
50
|
+
|
51
|
+
vim = VIM.connect opts
|
52
|
+
dc = vim.serviceInstance.find_datacenter(opts[:datacenter]) or abort "datacenter not found"
|
53
|
+
|
54
|
+
root_vm_folder = dc.vmFolder
|
55
|
+
vm_folder = root_vm_folder
|
56
|
+
if opts[:vm_folder_path]
|
57
|
+
vm_folder = root_vm_folder.traverse(opts[:vm_folder_path], VIM::Folder)
|
58
|
+
end
|
59
|
+
template_folder = root_vm_folder.traverse!(template_folder_path, VIM::Folder)
|
60
|
+
|
61
|
+
scheduler = AdmissionControlledResourceScheduler.new(
|
62
|
+
vim,
|
63
|
+
:datacenter => dc,
|
64
|
+
:computer_names => [opts[:computer_path]],
|
65
|
+
:vm_folder => vm_folder,
|
66
|
+
:rp_path => '/',
|
67
|
+
:datastore_paths => [opts[:datastore]],
|
68
|
+
:max_vms_per_pod => nil, # No limits
|
69
|
+
:min_ds_free => nil, # No limits
|
70
|
+
)
|
71
|
+
scheduler.make_placement_decision
|
72
|
+
|
73
|
+
datastore = scheduler.datastore
|
74
|
+
computer = scheduler.pick_computer
|
75
|
+
# XXX: Do this properly
|
76
|
+
if opts[:network]
|
77
|
+
network = computer.network.find{|x| x.name == opts[:network]}
|
78
|
+
else
|
79
|
+
network = computer.network[0]
|
80
|
+
end
|
81
|
+
|
82
|
+
lease_tool = LeaseTool.new
|
83
|
+
lease = opts[:lease] * 24 * 60 * 60
|
84
|
+
deployer = CachedOvfDeployer.new(
|
85
|
+
vim, network, computer, template_folder, vm_folder, datastore
|
86
|
+
)
|
87
|
+
template = deployer.lookup_template template_name
|
88
|
+
|
89
|
+
if !template
|
90
|
+
puts "#{Time.now}: Uploading/Preparing OVF template ..."
|
91
|
+
|
92
|
+
template = deployer.upload_ovf_as_template(
|
93
|
+
ovf_url, template_name,
|
94
|
+
:run_without_interruptions => true,
|
95
|
+
:config => lease_tool.set_lease_in_vm_config({}, lease)
|
96
|
+
)
|
97
|
+
end
|
98
|
+
|
99
|
+
puts "#{Time.now}: Cloning template ..."
|
100
|
+
config = {
|
101
|
+
:numCPUs => opts[:cpus],
|
102
|
+
:memoryMB => opts[:memory],
|
103
|
+
}
|
104
|
+
config = lease_tool.set_lease_in_vm_config(config, lease)
|
105
|
+
vm = deployer.linked_clone template, vm_name, config
|
106
|
+
|
107
|
+
puts "#{Time.now}: Powering On VM ..."
|
108
|
+
# XXX: Add a retrying version?
|
109
|
+
vm.PowerOnVM_Task.wait_for_completion
|
110
|
+
|
111
|
+
puts "#{Time.now}: Waiting for VM to be up ..."
|
112
|
+
ip = nil
|
113
|
+
while !(ip = vm.guest_ip)
|
114
|
+
sleep 5
|
115
|
+
end
|
116
|
+
|
117
|
+
puts "#{Time.now}: VM got IP: #{ip}"
|
118
|
+
|
119
|
+
puts "#{Time.now}: Done"
|
120
|
+
|
@@ -0,0 +1,102 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
require 'trollop'
|
3
|
+
require 'rbvmomi'
|
4
|
+
require 'rbvmomi/trollop'
|
5
|
+
require 'rbvmomi/utils/leases'
|
6
|
+
require 'yaml'
|
7
|
+
|
8
|
+
VIM = RbVmomi::VIM
|
9
|
+
CMDS = ['set_lease_on_leaseless_vms', 'show_expired_vms',
|
10
|
+
'show_soon_expired_vms', 'kill_expired_vms']
|
11
|
+
|
12
|
+
opts = Trollop.options do
|
13
|
+
banner <<-EOS
|
14
|
+
Tool for managing leases on VMs where leases are stored in YAML on VM annotations.
|
15
|
+
|
16
|
+
Usage:
|
17
|
+
lease_tool.rb [options] <cmd>
|
18
|
+
|
19
|
+
Commands: #{CMDS * ' '}
|
20
|
+
|
21
|
+
VIM connection options:
|
22
|
+
EOS
|
23
|
+
|
24
|
+
rbvmomi_connection_opts
|
25
|
+
|
26
|
+
text <<-EOS
|
27
|
+
|
28
|
+
VM location options:
|
29
|
+
EOS
|
30
|
+
|
31
|
+
rbvmomi_datacenter_opt
|
32
|
+
|
33
|
+
text <<-EOS
|
34
|
+
|
35
|
+
Other options:
|
36
|
+
EOS
|
37
|
+
|
38
|
+
opt :vm_folder_path, "Path to VM folder to deploy VM into", :type => :string
|
39
|
+
opt :force, "Really perform VMs. Used with kill_expired_vms"
|
40
|
+
|
41
|
+
stop_on CMDS
|
42
|
+
end
|
43
|
+
|
44
|
+
Trollop.die("must specify host") unless opts[:host]
|
45
|
+
cmd = ARGV[0] or Trollop.die("no command given")
|
46
|
+
Trollop.die("no vm folder path given") unless opts[:vm_folder_path]
|
47
|
+
|
48
|
+
vim = VIM.connect opts
|
49
|
+
dc = vim.serviceInstance.find_datacenter(opts[:datacenter]) or abort "datacenter not found"
|
50
|
+
|
51
|
+
root_vm_folder = dc.vmFolder
|
52
|
+
vm_folder = root_vm_folder.traverse(opts[:vm_folder_path], VIM::Folder)
|
53
|
+
|
54
|
+
lease_tool = LeaseTool.new
|
55
|
+
vms_props_list = (['runtime.powerState'] + lease_tool.vms_props_list).uniq
|
56
|
+
inventory = vm_folder.inventory_flat('VirtualMachine' => vms_props_list)
|
57
|
+
inventory = inventory.select{|obj, props| obj.is_a?(VIM::VirtualMachine)}
|
58
|
+
case cmd
|
59
|
+
when 'set_lease_on_leaseless_vms'
|
60
|
+
lease_tool.set_lease_on_leaseless_vms(
|
61
|
+
inventory.keys, inventory,
|
62
|
+
:lease_minutes => 3 * 24 * 60 * 60 # 3 days
|
63
|
+
)
|
64
|
+
when 'show_expired_vms'
|
65
|
+
vms = lease_tool.filter_expired_vms inventory.keys, inventory
|
66
|
+
vms.each do |vm, time_to_expiration|
|
67
|
+
puts "VM '#{inventory[vm]['name']}' is expired"
|
68
|
+
end
|
69
|
+
when 'kill_expired_vms'
|
70
|
+
vms = lease_tool.filter_expired_vms inventory.keys, inventory
|
71
|
+
vms.each do |vm, time_to_expiration|
|
72
|
+
puts "VM '#{inventory[vm]['name']}' is expired"
|
73
|
+
if !opts[:force]
|
74
|
+
puts "NOT killing VM '#{inventory[vm]['name']}' because --force not set"
|
75
|
+
else
|
76
|
+
puts "Killing expired VM '#{inventory[vm]['name']}'"
|
77
|
+
# Destroying VMs is very stressful for vCenter, and we aren't in a rush
|
78
|
+
# so do one VM at a time
|
79
|
+
if inventory[vm]['runtime.powerState'] == 'poweredOn'
|
80
|
+
vm.PowerOffVM_Task.wait_for_completion
|
81
|
+
end
|
82
|
+
vm.Destroy_Task.wait_for_completion
|
83
|
+
end
|
84
|
+
end
|
85
|
+
when 'show_soon_expired_vms'
|
86
|
+
vms = lease_tool.filter_expired_vms(
|
87
|
+
inventory.keys, inventory,
|
88
|
+
:time_delta => 3.5 * 24 * 60 * 60, # 3.5 days
|
89
|
+
)
|
90
|
+
# We could send the user emails here, but for this example, just print the
|
91
|
+
# VMs that will expire within the next 3.5 days
|
92
|
+
vms.each do |vm, time_to_expiration|
|
93
|
+
if time_to_expiration > 0
|
94
|
+
hours_to_expiration = time_to_expiration / (60.0 * 60.0)
|
95
|
+
puts "VM '%s' expires in %.2fh" % [inventory[vm]['name'], hours_to_expiration]
|
96
|
+
else
|
97
|
+
puts "VM '#{inventory[vm]['name']}' is expired"
|
98
|
+
end
|
99
|
+
end
|
100
|
+
else
|
101
|
+
abort "invalid command"
|
102
|
+
end
|
data/lib/rbvmomi/connection.rb
CHANGED
@@ -132,10 +132,10 @@ class Connection < TrivialSoap
|
|
132
132
|
end
|
133
133
|
end
|
134
134
|
when BasicTypes::ManagedObject
|
135
|
-
fail "expected #{expected.wsdl_name}, got #{o.class.wsdl_name} for field #{name.inspect}" if expected and not expected >= o.class
|
135
|
+
fail "expected #{expected.wsdl_name}, got #{o.class.wsdl_name} for field #{name.inspect}" if expected and not expected >= o.class and not expected == BasicTypes::AnyType
|
136
136
|
xml.tag! name, o._ref, :type => o.class.wsdl_name
|
137
137
|
when BasicTypes::DataObject
|
138
|
-
fail "expected #{expected.wsdl_name}, got #{o.class.wsdl_name} for field #{name.inspect}" if expected and not expected >= o.class
|
138
|
+
fail "expected #{expected.wsdl_name}, got #{o.class.wsdl_name} for field #{name.inspect}" if expected and not expected >= o.class and not expected == BasicTypes::AnyType
|
139
139
|
xml.tag! name, attrs.merge("xsi:type" => o.class.wsdl_name) do
|
140
140
|
o.class.full_props_desc.each do |desc|
|
141
141
|
if o.props.member? desc['name'].to_sym
|
@@ -0,0 +1,386 @@
|
|
1
|
+
|
2
|
+
# An admission controlled resource scheduler for large scale vSphere deployments
|
3
|
+
#
|
4
|
+
# While DRS (Dynamic Resource Scheduler) in vSphere handles CPU and Memory
|
5
|
+
# allocations within a single vSphere cluster, larger deployments require
|
6
|
+
# another layer of scheduling to make the use of multiple clusters transparent.
|
7
|
+
# So this class doesn't replace DRS, but in fact works on top of it.
|
8
|
+
#
|
9
|
+
# The scheduler in this class performs admission control to make sure clusters
|
10
|
+
# don't get overloaded. It does so by adding additional metrics to the already
|
11
|
+
# existing CPU and Memory reservation system that DRS has. After admission
|
12
|
+
# control it also performs very basic initial placement. Note that in-cluster
|
13
|
+
# placement and load-balancing is left to DRS. Also note that no cross-cluster
|
14
|
+
# load balancing is done.
|
15
|
+
#
|
16
|
+
# This class uses the concept of a Pod: A set of clusters that share a set of
|
17
|
+
# datastores. From a datastore perspective, we are free to place a VM on any
|
18
|
+
# host or cluster. So admission control is done at the Pod level first. Pods
|
19
|
+
# are automatically dicovered based on lists of clusters and datastores.
|
20
|
+
#
|
21
|
+
# Admission control covers the following metrics:
|
22
|
+
# - Host availability: If no hosts are available within a cluster or pod,
|
23
|
+
# admission is denied.
|
24
|
+
# - Minimum free space: If a datastore falls below this free space percentage,
|
25
|
+
# admission to it will be denied. Admission to a pod is granted as long at
|
26
|
+
# least one datastore passes admission control.
|
27
|
+
# - Maximum number of VMs: If a Pod exceeds a configured number of powered on
|
28
|
+
# VMs, admission is denied. This is a crude but effective catch-all metric
|
29
|
+
# in case users didn't set proper individual CPU or Memory reservations or
|
30
|
+
# if the scalability limit doesn't originate from CPU or Memory.
|
31
|
+
#
|
32
|
+
# Placement after admission control:
|
33
|
+
# - Cluster selection: A load metric based on a combination of CPU and Memory
|
34
|
+
# load is used to always select the "least loaded" cluster. The metric is very
|
35
|
+
# crude and only meant to do very rough load balancing. If DRS clusters are
|
36
|
+
# large enough, this is good enough in most cases though.
|
37
|
+
# - Datastore selection: Right now NO intelligence is implemented here.
|
38
|
+
#
|
39
|
+
# Usage:
|
40
|
+
# Instantiate the class, call make_placement_decision and then use the exposed
|
41
|
+
# computer (cluster), resource pool, vm_folder and datastore. Currently once
|
42
|
+
# computed, a new updated placement can't be generated.
|
43
|
+
class AdmissionControlledResourceScheduler
|
44
|
+
def initialize vim, opts = {}
|
45
|
+
@vim = vim
|
46
|
+
|
47
|
+
@datacenter = opts[:datacenter]
|
48
|
+
@datacenter_path = opts[:datacenter_path]
|
49
|
+
@vm_folder = opts[:vm_folder]
|
50
|
+
@vm_folder_path = opts[:vm_folder_path]
|
51
|
+
@rp_path = opts[:rp_path]
|
52
|
+
@computers = opts[:computers]
|
53
|
+
@computer_names = opts[:computer_names]
|
54
|
+
@datastores = opts[:datastores]
|
55
|
+
@datastore_paths = opts[:datastore_paths]
|
56
|
+
|
57
|
+
@max_vms_per_pod = opts[:max_vms_per_pod]
|
58
|
+
@min_ds_free = opts[:min_ds_free]
|
59
|
+
@service_docs_url = opts[:service_docs_url]
|
60
|
+
|
61
|
+
@pc = @vim.serviceContent.propertyCollector
|
62
|
+
@root_folder = @vim.serviceContent.rootFolder
|
63
|
+
end
|
64
|
+
|
65
|
+
def log x
|
66
|
+
puts "#{Time.now}: #{x}"
|
67
|
+
end
|
68
|
+
|
69
|
+
# Returns the used VM folder. If not set yet, uses the vm_folder_path to
|
70
|
+
# lookup the folder. If it doesn't exist, it is created. Collisions between
|
71
|
+
# multiple clients concurrently creating the same folder are handled.
|
72
|
+
# @return [VIM::Folder] The VM folder
|
73
|
+
def vm_folder
|
74
|
+
retries = 1
|
75
|
+
begin
|
76
|
+
@vm_folder ||= datacenter.vmFolder.traverse!(@vm_folder_path, VIM::Folder)
|
77
|
+
if !@vm_folder
|
78
|
+
fail "VM folder #{@vm_folder_path} not found"
|
79
|
+
end
|
80
|
+
rescue RbVmomi::Fault => fault
|
81
|
+
if !fault.fault.is_a?(RbVmomi::VIM::DuplicateName)
|
82
|
+
raise
|
83
|
+
else
|
84
|
+
retries -= 1
|
85
|
+
retry if retries >= 0
|
86
|
+
end
|
87
|
+
end
|
88
|
+
@vm_folder
|
89
|
+
end
|
90
|
+
|
91
|
+
# Returns the used Datacenter. If not set yet, uses the datacenter_path to
|
92
|
+
# lookup the datacenter.
|
93
|
+
# @return [VIM::Datacenter] The datacenter
|
94
|
+
def datacenter
|
95
|
+
if !@datacenter
|
96
|
+
@datacenter = @root_folder.traverse(@datacenter_path, VIM::Datacenter)
|
97
|
+
if !@datacenter
|
98
|
+
fail "datacenter #{@datacenter_path} not found"
|
99
|
+
end
|
100
|
+
end
|
101
|
+
@datacenter
|
102
|
+
end
|
103
|
+
|
104
|
+
# Returns the candidate datastores. If not set yet, uses the datastore_paths
|
105
|
+
# to lookup the datastores under the datacenter.
|
106
|
+
# As a side effect, also looks up properties about all the datastores
|
107
|
+
# @return [Array] List of VIM::Datastore
|
108
|
+
def datastores
|
109
|
+
if !@datastores
|
110
|
+
@datastores = @datastore_paths.map do |path|
|
111
|
+
ds = datacenter.datastoreFolder.traverse(path, VIM::Datastore)
|
112
|
+
if !ds
|
113
|
+
fail "datastore #{path} not found"
|
114
|
+
end
|
115
|
+
ds
|
116
|
+
end
|
117
|
+
end
|
118
|
+
if !@datastore_props
|
119
|
+
@datastore_props = @pc.collectMultiple(@datastores, 'summary', 'name')
|
120
|
+
end
|
121
|
+
@datastores
|
122
|
+
end
|
123
|
+
|
124
|
+
# Returns the candidate computers (aka clusters). If not set yet, uses the
|
125
|
+
# computer_names to look them up.
|
126
|
+
# @return [Array] List of [VIM::ClusterComputeResource, Hash] tuples, where
|
127
|
+
# the Hash is a list of stats about the computer
|
128
|
+
def computers
|
129
|
+
if !@computers
|
130
|
+
@computers = @computer_names.map do |name|
|
131
|
+
computer = datacenter.find_compute_resource(name)
|
132
|
+
[computer, computer.stats]
|
133
|
+
end
|
134
|
+
end
|
135
|
+
@computers
|
136
|
+
end
|
137
|
+
|
138
|
+
# Returns the candidate pods. If not set, automatically computes the pods
|
139
|
+
# based on the list of computers (aka clusters) and datastores.
|
140
|
+
# @return [Array] List of pods, where a pod is a list of VIM::ClusterComputeResource
|
141
|
+
def pods
|
142
|
+
if !@pods
|
143
|
+
# A pod is defined as a set of clusters (aka computers) that share the same
|
144
|
+
# datastore accessibility. Computing pods is done automatically using simple
|
145
|
+
# set theory math.
|
146
|
+
computersProps = @pc.collectMultiple(computers.map{|x| x[0]}, 'datastore')
|
147
|
+
@pods = computers.map do |computer, stats|
|
148
|
+
computersProps[computer]['datastore'] & self.datastores
|
149
|
+
end.uniq.map do |ds_list|
|
150
|
+
computers.map{|x| x[0]}.select do |computer|
|
151
|
+
(computer.datastore & self.datastores) == ds_list
|
152
|
+
end
|
153
|
+
end
|
154
|
+
end
|
155
|
+
@pods
|
156
|
+
end
|
157
|
+
|
158
|
+
# Returns all VMs residing with a pod. Doesn't account for templates. Does so
|
159
|
+
# very efficiently using a single API query.
|
160
|
+
# @return [Hash] Hash of VMs as keys and their properties as values.
|
161
|
+
def pod_vms pod
|
162
|
+
# This function retrieves all VMs residing inside a pod
|
163
|
+
filterSpec = VIM.PropertyFilterSpec(
|
164
|
+
objectSet: pod.map do |computer, stats|
|
165
|
+
{
|
166
|
+
obj: computer.resourcePool,
|
167
|
+
selectSet: [
|
168
|
+
VIM.TraversalSpec(
|
169
|
+
name: 'tsFolder',
|
170
|
+
type: 'ResourcePool',
|
171
|
+
path: 'resourcePool',
|
172
|
+
skip: false,
|
173
|
+
selectSet: [
|
174
|
+
VIM.SelectionSpec(name: 'tsFolder'),
|
175
|
+
VIM.SelectionSpec(name: 'tsVM'),
|
176
|
+
]
|
177
|
+
),
|
178
|
+
VIM.TraversalSpec(
|
179
|
+
name: 'tsVM',
|
180
|
+
type: 'ResourcePool',
|
181
|
+
path: 'vm',
|
182
|
+
skip: false,
|
183
|
+
selectSet: [],
|
184
|
+
)
|
185
|
+
]
|
186
|
+
}
|
187
|
+
end,
|
188
|
+
propSet: [
|
189
|
+
{ type: 'ResourcePool', pathSet: ['name'] },
|
190
|
+
{ type: 'VirtualMachine', pathSet: %w(runtime.powerState) }
|
191
|
+
]
|
192
|
+
)
|
193
|
+
|
194
|
+
result = @vim.propertyCollector.RetrieveProperties(specSet: [filterSpec])
|
195
|
+
|
196
|
+
out = result.map { |x| [x.obj, Hash[x.propSet.map { |y| [y.name, y.val] }]] }
|
197
|
+
out.select{|obj, props| obj.is_a?(VIM::VirtualMachine)}
|
198
|
+
end
|
199
|
+
|
200
|
+
# Returns all candidate datastores for a given pod.
|
201
|
+
# @return [Array] List of VIM::Datastore
|
202
|
+
def pod_datastores pod
|
203
|
+
pod.first.datastore & self.datastores
|
204
|
+
end
|
205
|
+
|
206
|
+
# Returns the list of pods that pass admission control. If not set yet, performs
|
207
|
+
# admission control to compute the list. If no pods passed the admission
|
208
|
+
# control, an exception is thrown.
|
209
|
+
# @return [Array] List of pods, where a pod is a list of VIM::ClusterComputeResource
|
210
|
+
def filtered_pods
|
211
|
+
# This function applies admission control and returns those pods that have
|
212
|
+
# passed admission control. An exception is thrown if access was denied to
|
213
|
+
# all pods.
|
214
|
+
if !@filtered_pods
|
215
|
+
log "Performing admission control:"
|
216
|
+
@filtered_pods = self.pods.select do |pod|
|
217
|
+
# Gather some statistics about the pod ...
|
218
|
+
on_vms = pod_vms(pod).select{|k,v| v['runtime.powerState'] == 'poweredOn'}
|
219
|
+
num_pod_vms = on_vms.length
|
220
|
+
pod_datastores = self.pod_datastores(pod)
|
221
|
+
log "Pod: #{pod.map{|x| x.name}.join(', ')}"
|
222
|
+
log " #{num_pod_vms} VMs"
|
223
|
+
pod_datastores.each do |ds|
|
224
|
+
ds_sum = @datastore_props[ds]['summary']
|
225
|
+
@datastore_props[ds]['free_percent'] = ds_sum.freeSpace.to_f * 100 / ds_sum.capacity
|
226
|
+
end
|
227
|
+
pod_datastores.each do |ds|
|
228
|
+
ds_props = @datastore_props[ds]
|
229
|
+
ds_name = ds_props['name']
|
230
|
+
free = ds_props['free_percent']
|
231
|
+
free_gb = ds_props['summary'].freeSpace.to_f / 1024**3
|
232
|
+
free_str = "%.2f GB (%.2f%%)" % [free_gb, free]
|
233
|
+
log " Datastore #{ds_name}: #{free_str} free"
|
234
|
+
end
|
235
|
+
|
236
|
+
# Admission check: VM limit
|
237
|
+
denied = false
|
238
|
+
max_vms = @max_vms_per_pod
|
239
|
+
if max_vms && max_vms > 0
|
240
|
+
if num_pod_vms > max_vms
|
241
|
+
err = "VM limit (#{max_vms}) exceeded on this Pod"
|
242
|
+
denied = true
|
243
|
+
end
|
244
|
+
end
|
245
|
+
|
246
|
+
# Admission check: Free space on datastores
|
247
|
+
min_ds_free = @min_ds_free
|
248
|
+
if min_ds_free && min_ds_free > 0
|
249
|
+
# We need at least one datastore with enough free space
|
250
|
+
low_list = pod_datastores.select do |ds|
|
251
|
+
@datastore_props[ds]['free_percent'] <= min_ds_free
|
252
|
+
end
|
253
|
+
|
254
|
+
if low_list.length == pod_datastores.length
|
255
|
+
dsNames = low_list.map{|ds| @datastore_props[ds]['name']}.join(", ")
|
256
|
+
err = "Datastores #{dsNames} below minimum free disk space (#{min_ds_free}%)"
|
257
|
+
denied = true
|
258
|
+
end
|
259
|
+
end
|
260
|
+
|
261
|
+
# Admission check: Hosts are available
|
262
|
+
if !denied
|
263
|
+
hosts_available = pod.any? do |computer|
|
264
|
+
stats = Hash[self.computers][computer]
|
265
|
+
stats[:totalCPU] > 0 && stats[:totalMem] > 0
|
266
|
+
end
|
267
|
+
if !hosts_available
|
268
|
+
err = "No hosts are current available in this pod"
|
269
|
+
denied = true
|
270
|
+
end
|
271
|
+
end
|
272
|
+
|
273
|
+
if denied
|
274
|
+
log " Admission DENIED: #{err}"
|
275
|
+
else
|
276
|
+
log " Admission granted"
|
277
|
+
end
|
278
|
+
|
279
|
+
!denied
|
280
|
+
end
|
281
|
+
end
|
282
|
+
if @filtered_pods.length == 0
|
283
|
+
log "Couldn't find any Pod with enough resources."
|
284
|
+
if @service_docs_url
|
285
|
+
log "Check #{@service_docs_url} to see which other Pods you may be able to use"
|
286
|
+
end
|
287
|
+
fail "Admission denied"
|
288
|
+
end
|
289
|
+
@filtered_pods
|
290
|
+
end
|
291
|
+
|
292
|
+
# Returns the computer (aka cluster) to be used for placement. If not set yet,
|
293
|
+
# computs the least loaded cluster (using a metric that combines CPU and Memory
|
294
|
+
# load) that passes admission control.
|
295
|
+
# @return [VIM::ClusterComputeResource] Chosen computer (aka cluster)
|
296
|
+
def pick_computer placementhint = nil
|
297
|
+
if !@computer
|
298
|
+
# Out of the pods to which we have been granted access, pick the cluster
|
299
|
+
# (aka computer) with the lowest CPU/Mem utilization for load balancing
|
300
|
+
available = self.filtered_pods.flatten
|
301
|
+
eligible = self.computers.select do |computer,stats|
|
302
|
+
available.member?(computer) && stats[:totalCPU] > 0 and stats[:totalMem] > 0
|
303
|
+
end
|
304
|
+
computer = nil
|
305
|
+
if placementhint
|
306
|
+
if eligible.length > 0
|
307
|
+
computer = eligible.map{|x| x[0]}[placementhint % eligible.length]
|
308
|
+
end
|
309
|
+
else
|
310
|
+
computer, = eligible.min_by do |computer,stats|
|
311
|
+
2**(stats[:usedCPU].to_f/stats[:totalCPU]) + (stats[:usedMem].to_f/stats[:totalMem])
|
312
|
+
end
|
313
|
+
end
|
314
|
+
|
315
|
+
if !computer
|
316
|
+
fail "No clusters available, should have been prevented by admission control"
|
317
|
+
end
|
318
|
+
@computer = computer
|
319
|
+
end
|
320
|
+
@computer
|
321
|
+
end
|
322
|
+
|
323
|
+
# Returns the datastore to be used for placement. If not set yet, picks a
|
324
|
+
# datastore without much intelligence, as long as it passes admission control.
|
325
|
+
# @return [VIM::Datastore] Chosen datastore
|
326
|
+
def datastore placementHint = nil
|
327
|
+
pod_datastores = pick_computer.datastore & datastores
|
328
|
+
|
329
|
+
eligible = pod_datastores.select do |ds|
|
330
|
+
min_ds_free = @min_ds_free
|
331
|
+
if min_ds_free && min_ds_free > 0
|
332
|
+
ds_sum = @datastore_props[ds]['summary']
|
333
|
+
free_percent = ds_sum.freeSpace.to_f * 100 / ds_sum.capacity
|
334
|
+
free_percent > min_ds_free
|
335
|
+
else
|
336
|
+
true
|
337
|
+
end
|
338
|
+
end
|
339
|
+
|
340
|
+
if eligible.length == 0
|
341
|
+
fail "Couldn't find any eligible datastore. Admission control should have prevented this"
|
342
|
+
end
|
343
|
+
|
344
|
+
if placementHint && placementHint > 0
|
345
|
+
@datastore = eligible[placementHint % eligible.length]
|
346
|
+
else
|
347
|
+
@datastore = eligible.first
|
348
|
+
end
|
349
|
+
@datastore
|
350
|
+
end
|
351
|
+
|
352
|
+
# Runs the placement algorithm and populates all the various properties as
|
353
|
+
# a side effect. Run this first, before using the other functions of this
|
354
|
+
# class.
|
355
|
+
def make_placement_decision opts = {}
|
356
|
+
self.filtered_pods
|
357
|
+
self.pick_computer opts[:placementHint]
|
358
|
+
log "Selected compute resource: #{@computer.name}"
|
359
|
+
|
360
|
+
@rp = @computer.resourcePool.traverse(@rp_path)
|
361
|
+
if !@rp
|
362
|
+
fail "Resource pool #{@rp_path} not found"
|
363
|
+
end
|
364
|
+
log "Resource pool: #{@rp.pretty_path}"
|
365
|
+
|
366
|
+
stats = @computer.stats
|
367
|
+
if stats[:totalMem] > 0 && stats[:totalCPU] > 0
|
368
|
+
cpu_load = "#{(100*stats[:usedCPU])/stats[:totalCPU]}% cpu"
|
369
|
+
mem_load = "#{(100*stats[:usedMem])/stats[:totalMem]}% mem"
|
370
|
+
log "Cluster utilization: #{cpu_load}, #{mem_load}"
|
371
|
+
end
|
372
|
+
|
373
|
+
user_vms = vm_folder.inventory_flat('VirtualMachine' => %w(name storage)).select do |k, v|
|
374
|
+
k.is_a?(RbVmomi::VIM::VirtualMachine)
|
375
|
+
end
|
376
|
+
numVms = user_vms.length
|
377
|
+
unshared = user_vms.map do |vm, info|
|
378
|
+
info['storage'].perDatastoreUsage.map{|x| x.unshared}.inject(0, &:+)
|
379
|
+
end.inject(0, &:+)
|
380
|
+
log "User stats: #{numVms} VMs using %.2fGB of storage" % [unshared.to_f / 1024**3]
|
381
|
+
|
382
|
+
@placement_hint = opts[:placement_hint] || (rand(100) + 1)
|
383
|
+
datastore = self.datastore @placement_hint
|
384
|
+
log "Datastore: #{datastore.name}"
|
385
|
+
end
|
386
|
+
end
|