rbvmomi 1.5.1 → 1.6.0
Sign up to get free protection for your applications and to get access to all the features.
- data/Rakefile +2 -2
- data/VERSION +1 -1
- data/examples/cached_ovf_deploy.rb +120 -0
- data/examples/lease_tool.rb +102 -0
- data/lib/rbvmomi/connection.rb +2 -2
- data/lib/rbvmomi/utils/admission_control.rb +386 -0
- data/lib/rbvmomi/utils/deploy.rb +295 -0
- data/lib/rbvmomi/utils/leases.rb +142 -0
- data/lib/rbvmomi/vim/PerfCounterInfo.rb +26 -0
- data/lib/rbvmomi/vim/PerformanceManager.rb +85 -0
- data/lib/rbvmomi/vim/ReflectManagedMethodExecuter.rb +5 -1
- data/lib/rbvmomi/vim/Task.rb +34 -0
- data/lib/rbvmomi/vim/VirtualMachine.rb +46 -0
- data/test/test_serialization.rb +16 -0
- data/vmodl.db +0 -0
- metadata +68 -73
@@ -0,0 +1,295 @@
|
|
1
|
+
require 'open-uri'
|
2
|
+
require 'nokogiri'
|
3
|
+
require 'rbvmomi'
|
4
|
+
|
5
|
+
# The cached ovf deployer is an optimization on top of regular OVF deployment
|
6
|
+
# as it is offered by the VIM::OVFManager. Creating a VM becomes a multi-stage
|
7
|
+
# process: First the OVF is uploaded and instead of directly using it, it is
|
8
|
+
# prepared for linked cloning and marked as a template. It can then be cloned
|
9
|
+
# many times over, without the cost of repeated OVF deploys (network and storage
|
10
|
+
# IO) and the cost of storing the same base VM several times (storage space).
|
11
|
+
# Multiple concurrent users can try to follow this process and collisions are
|
12
|
+
# automatically detected and de-duplicated. One thread will win to create the
|
13
|
+
# OVF template, while the other will wait for the winning thread to finish the
|
14
|
+
# task. So even fully independent, distributed and unsynchronized clients using
|
15
|
+
# this call with be auto-synchronized just by talking to the same vCenter
|
16
|
+
# instance and using the name naming scheme for the templates.
|
17
|
+
#
|
18
|
+
# The caching concept above can be extended to multiple levels. Lets assume
|
19
|
+
# many VMs will share the same base OS, but are running different builds of the
|
20
|
+
# application running inside the VM. If it is expected that again many (but not
|
21
|
+
# all) VMs will share the same build of the application, a tree structure of
|
22
|
+
# templates becomes useful. At the root of the tree is the template with just
|
23
|
+
# the base OS. It is uploaded from an OVF if needed. Then, this base OS image
|
24
|
+
# is cloned, a particular build is installed and the resulting VM is again marked
|
25
|
+
# as a template. Users can then instantiate that particular build with very
|
26
|
+
# little extra overhead. This class supports such multi level templates via the
|
27
|
+
# :is_template parameter of linked_clone().
|
28
|
+
class CachedOvfDeployer
|
29
|
+
# Constructor. Gets the VIM connection and important VIM objects
|
30
|
+
# @param vim [VIM] VIM Connection
|
31
|
+
# @param network [VIM::Network] Network to attach templates and VMs to
|
32
|
+
# @param computer [VIM::ComputeResource] Host/Cluster to deploy templates/VMs to
|
33
|
+
# @param template_folder [VIM::Folder] Folder in which all templates are kept
|
34
|
+
# @param vm_folder [VIM::Folder] Folder into which to deploy VMs
|
35
|
+
# @param datastore [VIM::Folder] Datastore to store template/VM in
|
36
|
+
def initialize vim, network, computer, template_folder, vm_folder, datastore
|
37
|
+
@vim = vim
|
38
|
+
@network = network
|
39
|
+
@computer = computer
|
40
|
+
@rp = @computer.resourcePool
|
41
|
+
@template_folder = template_folder
|
42
|
+
@vmfolder = vm_folder
|
43
|
+
@datastore = datastore
|
44
|
+
end
|
45
|
+
|
46
|
+
def log x
|
47
|
+
# XXX: Should find a better way for users to customize how logging is done
|
48
|
+
puts "#{Time.now}: #{x}"
|
49
|
+
end
|
50
|
+
|
51
|
+
# Internal helper method that executes the passed in block while disabling
|
52
|
+
# the handling of SIGINT and SIGTERM signals. Restores their handlers after
|
53
|
+
# the block is executed.
|
54
|
+
# @param enabled [Boolean] If false, this function is a no-op
|
55
|
+
def _run_without_interruptions enabled
|
56
|
+
if enabled
|
57
|
+
int_handler = Signal.trap("SIGINT", 'IGNORE')
|
58
|
+
term_handler = Signal.trap("SIGTERM", 'IGNORE')
|
59
|
+
end
|
60
|
+
|
61
|
+
yield
|
62
|
+
|
63
|
+
if enabled
|
64
|
+
Signal.trap("SIGINT", int_handler)
|
65
|
+
Signal.trap("SIGTERM", term_handler)
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
# Uploads an OVF, prepares the resulting VM for linked cloning and then marks
|
70
|
+
# it as a template. If another thread happens to race to do the same task,
|
71
|
+
# the losing thread will not do the actual work, but instead wait for the
|
72
|
+
# winning thread to do the work by looking up the template VM and waiting for
|
73
|
+
# it to be marked as a template. This way, the cost of uploading and keeping
|
74
|
+
# the full size of the VM is only paid once.
|
75
|
+
# @param ovf_url [String] URL to the OVF to be deployed. Currently only http
|
76
|
+
# and https are supported
|
77
|
+
# @param template_name [String] Name of the template to be used. Should be the
|
78
|
+
# same name for the same URL. A cluster specific
|
79
|
+
# post-fix will automatically be added.
|
80
|
+
# @option opts [int] :run_without_interruptions Whether or not to disable
|
81
|
+
# SIGINT and SIGTERM during
|
82
|
+
# the OVF upload.
|
83
|
+
# @option opts [Hash] :config VM Config delta to apply after the OVF deploy is
|
84
|
+
# done. Allows the template to be customized, e.g.
|
85
|
+
# to set annotations.
|
86
|
+
# @return [VIM::VirtualMachine] The template as a VIM::VirtualMachine instance
|
87
|
+
def upload_ovf_as_template ovf_url, template_name, opts = {}
|
88
|
+
# The OVFManager expects us to know the names of the networks mentioned
|
89
|
+
# in the OVF file so we can map them to VIM::Network objects. For
|
90
|
+
# simplicity this function assumes we need to read the OVF file
|
91
|
+
# ourselves to know the names, and we map all of them to the same
|
92
|
+
# VIM::Network.
|
93
|
+
ovf = open(ovf_url, 'r'){|io| Nokogiri::XML(io.read)}
|
94
|
+
ovf.remove_namespaces!
|
95
|
+
networks = ovf.xpath('//NetworkSection/Network').map{|x| x['name']}
|
96
|
+
network_mappings = Hash[networks.map{|x| [x, @network]}]
|
97
|
+
|
98
|
+
network_mappings_str = network_mappings.map{|k, v| "#{k} = #{v.name}"}
|
99
|
+
log "networks: #{network_mappings_str.join(', ')}"
|
100
|
+
|
101
|
+
pc = @vim.serviceContent.propertyCollector
|
102
|
+
|
103
|
+
# OVFs need to be uploaded to a specific host. DRS won't just pick one
|
104
|
+
# for us, so we need to pick one wisely. The host needs to be connected,
|
105
|
+
# not be in maintenance mode and must have the destination datastore
|
106
|
+
# accessible.
|
107
|
+
hosts = @computer.host
|
108
|
+
hosts_props = pc.collectMultiple(
|
109
|
+
hosts,
|
110
|
+
'datastore', 'runtime.connectionState',
|
111
|
+
'runtime.inMaintenanceMode', 'name'
|
112
|
+
)
|
113
|
+
host = hosts.shuffle.find do |x|
|
114
|
+
host_props = hosts_props[x]
|
115
|
+
is_connected = host_props['runtime.connectionState'] == 'connected'
|
116
|
+
is_ds_accessible = host_props['datastore'].member?(@datastore)
|
117
|
+
is_connected && is_ds_accessible && !host_props['runtime.inMaintenanceMode']
|
118
|
+
end
|
119
|
+
if !host
|
120
|
+
fail "No host in the cluster available to upload OVF to"
|
121
|
+
end
|
122
|
+
|
123
|
+
log "Uploading OVF to #{hosts_props[host]['name']}..."
|
124
|
+
property_mappings = {}
|
125
|
+
|
126
|
+
# To work around the VMFS 8-host limit (existed until ESX 5.0), as
|
127
|
+
# well as just for organization purposes, we create one template per
|
128
|
+
# cluster. This also provides us with additional isolation.
|
129
|
+
vm_name = template_name+"-#{@computer.name}"
|
130
|
+
|
131
|
+
vm = nil
|
132
|
+
wait_for_template = false
|
133
|
+
# If the user sets opts[:run_without_interruptions], we will block
|
134
|
+
# signals from the user (SIGINT, SIGTERM) in order to not be interrupted.
|
135
|
+
# This is desirable, as other threads depend on this thread finishing
|
136
|
+
# its prepare job and thus interrupting it has impacts beyond this
|
137
|
+
# single thread or process.
|
138
|
+
_run_without_interruptions(opts[:run_without_interruptions]) do
|
139
|
+
begin
|
140
|
+
vm = @vim.serviceContent.ovfManager.deployOVF(
|
141
|
+
uri: ovf_url,
|
142
|
+
vmName: vm_name,
|
143
|
+
vmFolder: @template_folder,
|
144
|
+
host: host,
|
145
|
+
resourcePool: @rp,
|
146
|
+
datastore: @datastore,
|
147
|
+
networkMappings: network_mappings,
|
148
|
+
propertyMappings: property_mappings)
|
149
|
+
rescue RbVmomi::Fault => fault
|
150
|
+
# If two threads execute this script at the same time to upload
|
151
|
+
# the same template under the same name, one will win and the other
|
152
|
+
# with be rejected by VC. We catch those cases here, and handle
|
153
|
+
# them by waiting for the winning thread to finish preparing the
|
154
|
+
# template, see below ...
|
155
|
+
is_duplicate = fault.fault.is_a?(RbVmomi::VIM::DuplicateName)
|
156
|
+
is_duplicate ||= (fault.fault.is_a?(RbVmomi::VIM::InvalidState) &&
|
157
|
+
!fault.fault.is_a?(RbVmomi::VIM::InvalidHostState))
|
158
|
+
if is_duplicate
|
159
|
+
wait_for_template = true
|
160
|
+
else
|
161
|
+
raise fault
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
# The winning thread succeeded in uploading the OVF. Now we need to
|
166
|
+
# prepare it for (linked) cloning and mark it as a template to signal
|
167
|
+
# we are done.
|
168
|
+
if !wait_for_template
|
169
|
+
vm.add_delta_disk_layer_on_all_disks
|
170
|
+
if opts[:config]
|
171
|
+
# XXX: Should we add a version that does retries?
|
172
|
+
vm.ReconfigVM_Task(:spec => opts[:config]).wait_for_completion
|
173
|
+
end
|
174
|
+
vm.MarkAsTemplate
|
175
|
+
end
|
176
|
+
end
|
177
|
+
|
178
|
+
# The losing thread now needs to wait for the winning thread to finish
|
179
|
+
# uploading and preparing the template
|
180
|
+
if wait_for_template
|
181
|
+
log "Template already exists, waiting for it to be ready"
|
182
|
+
vm = _wait_for_template_ready @template_folder, vm_name
|
183
|
+
log "Template fully prepared and ready to be cloned"
|
184
|
+
end
|
185
|
+
|
186
|
+
vm
|
187
|
+
end
|
188
|
+
|
189
|
+
# Looks up a template by name in the configured template_path. Should be used
|
190
|
+
# before uploading the VM via upload_ovf_as_template, although that is
|
191
|
+
# not strictly required, but a lot more efficient.
|
192
|
+
# @param template_name [String] Name of the template to be used. A cluster
|
193
|
+
# specific post-fix will automatically be added.
|
194
|
+
# @return [VIM::VirtualMachine] The template as a VIM::VirtualMachine instance
|
195
|
+
# or nil
|
196
|
+
def lookup_template template_name
|
197
|
+
template_path = "#{template_name}-#{@computer.name}"
|
198
|
+
template = @template_folder.traverse(template_path, VIM::VirtualMachine)
|
199
|
+
if template
|
200
|
+
is_template = template.collect 'config.template'
|
201
|
+
if !is_template
|
202
|
+
template = nil
|
203
|
+
end
|
204
|
+
end
|
205
|
+
template
|
206
|
+
end
|
207
|
+
|
208
|
+
# Creates a linked clone of a template prepared with upload_ovf_as_template.
|
209
|
+
# The function waits for completion on the clone task. Optionally, in case
|
210
|
+
# two level templates are being used, this function can wait for another
|
211
|
+
# thread to finish creating the second level template. See class comments
|
212
|
+
# for the concept of multi level templates.
|
213
|
+
# @param template_name [String] Name of the template to be used. A cluster
|
214
|
+
# specific post-fix will automatically be added.
|
215
|
+
# @param vm_name [String] Name of the new VM that is being created via cloning.
|
216
|
+
# @param config [Hash] VM Config delta to apply after the VM is cloned.
|
217
|
+
# Allows the template to be customized, e.g. to adjust
|
218
|
+
# CPU or Memory sizes or set annotations.
|
219
|
+
# @option opts [int] :is_template If true, the clone is assumed to be a template
|
220
|
+
# again and collision and de-duping logic kicks
|
221
|
+
# in.
|
222
|
+
# @return [VIM::VirtualMachine] The VIM::VirtualMachine instance of the clone
|
223
|
+
def linked_clone template_vm, vm_name, config, opts = {}
|
224
|
+
spec = {
|
225
|
+
location: {
|
226
|
+
pool: @rp,
|
227
|
+
datastore: @datastore,
|
228
|
+
diskMoveType: :moveChildMostDiskBacking,
|
229
|
+
},
|
230
|
+
powerOn: false,
|
231
|
+
template: false,
|
232
|
+
config: config,
|
233
|
+
}
|
234
|
+
if opts[:is_template]
|
235
|
+
wait_for_template = false
|
236
|
+
template_name = "#{vm_name}-#{@computer.name}"
|
237
|
+
begin
|
238
|
+
vm = template_vm.CloneVM_Task(
|
239
|
+
folder: @template_folder,
|
240
|
+
name: template_name,
|
241
|
+
spec: spec
|
242
|
+
).wait_for_completion
|
243
|
+
rescue RbVmomi::Fault => fault
|
244
|
+
if fault.fault.is_a?(RbVmomi::VIM::DuplicateName)
|
245
|
+
wait_for_template = true
|
246
|
+
else
|
247
|
+
raise
|
248
|
+
end
|
249
|
+
end
|
250
|
+
|
251
|
+
if wait_for_template
|
252
|
+
puts "#{Time.now}: Template already exists, waiting for it to be ready"
|
253
|
+
vm = _wait_for_template_ready @template_folder, template_name
|
254
|
+
puts "#{Time.now}: Template ready"
|
255
|
+
end
|
256
|
+
else
|
257
|
+
vm = template_vm.CloneVM_Task(
|
258
|
+
folder: @vmfolder,
|
259
|
+
name: vm_name,
|
260
|
+
spec: spec
|
261
|
+
).wait_for_completion
|
262
|
+
end
|
263
|
+
vm
|
264
|
+
end
|
265
|
+
|
266
|
+
# Internal helper method that waits for a template to be fully created. It
|
267
|
+
# polls until it finds the VM in the inventory, and once it is there, waits
|
268
|
+
# for it to be fully created and marked as a template. This function will
|
269
|
+
# block for forever if the template never gets created or marked as a
|
270
|
+
# template.
|
271
|
+
# @param vm_folder [VIM::Folder] Folder in which we expect the template to show up
|
272
|
+
# @param vm_name [String] Name of the VM we are waiting for
|
273
|
+
# @return [VIM::VirtualMachine] The VM we were waiting for when it is ready
|
274
|
+
def _wait_for_template_ready vm_folder, vm_name
|
275
|
+
vm = nil
|
276
|
+
while !vm
|
277
|
+
sleep 3
|
278
|
+
# XXX: Optimize this
|
279
|
+
vm = vm_folder.children.find{|x| x.name == vm_name}
|
280
|
+
end
|
281
|
+
log "Template VM found"
|
282
|
+
sleep 2
|
283
|
+
while true
|
284
|
+
runtime, template = vm.collect 'runtime', 'config.template'
|
285
|
+
ready = runtime && runtime.host && runtime.powerState == "poweredOff"
|
286
|
+
ready = ready && template
|
287
|
+
if ready
|
288
|
+
break
|
289
|
+
end
|
290
|
+
sleep 5
|
291
|
+
end
|
292
|
+
|
293
|
+
vm
|
294
|
+
end
|
295
|
+
end
|
@@ -0,0 +1,142 @@
|
|
1
|
+
require 'yaml'
|
2
|
+
|
3
|
+
# A class to manage VM leases
|
4
|
+
#
|
5
|
+
# This class uses YAML encoded VM annotations (config.annotation) to manage a
|
6
|
+
# lease system. It helps add such lease info onto new and existing VMs and to
|
7
|
+
# find VMs that have expired leases or that are about to have expired leases.
|
8
|
+
# The calling code can use those to generate emails with about-to-expire
|
9
|
+
# notifications, suspend, power off or destroy VMs that have exceeded their
|
10
|
+
# lease, etc.
|
11
|
+
class LeaseTool
|
12
|
+
# Lists of VM properties the LeaseTool needs to do its job. Can be used to
|
13
|
+
# construct larger property collector calls that retrieve more info than just
|
14
|
+
# one subsystem needs.
|
15
|
+
# @return [Array] List of property names
|
16
|
+
def vms_props_list
|
17
|
+
['name', 'config.annotation']
|
18
|
+
end
|
19
|
+
|
20
|
+
# Fetch all VM properties that the LeaseTool needs on all VMs passed in.
|
21
|
+
# @param vms [Array] List of VIM::VirtualMachine instances
|
22
|
+
# @return [Hash] Hash of VMs as keys and their properties as values
|
23
|
+
def get_vms_props vms
|
24
|
+
out = {}
|
25
|
+
if vms.length > 0
|
26
|
+
pc = vms.first._connection.serviceContent.propertyCollector
|
27
|
+
out = pc.collectMultiple(vms, 'name', 'config.annotation')
|
28
|
+
end
|
29
|
+
out
|
30
|
+
end
|
31
|
+
|
32
|
+
# Retrieve the current time as used by the lease tool.
|
33
|
+
# @return [Time] Current time as used by the lease tool
|
34
|
+
def current_time
|
35
|
+
# XXX: Should swith to time provided by VC
|
36
|
+
Time.now
|
37
|
+
end
|
38
|
+
|
39
|
+
# Helper function that sets the lease info in a passed in VM config. If there
|
40
|
+
# is no annotation, it is added. If there is an annotation, it is updated to
|
41
|
+
# include the lease info. Note that if the annotation isn't YAML, it is
|
42
|
+
# overwritten.
|
43
|
+
# @param vmconfig [Hash] Virtual Machine config spec
|
44
|
+
# @param lease_minutes [int] Time to lease expiration from now in minutes
|
45
|
+
# @return [Hash] Updated Virtual Machine config spec
|
46
|
+
def set_lease_in_vm_config vmconfig, lease_minutes
|
47
|
+
annotation = vmconfig[:annotation]
|
48
|
+
annotation ||= ""
|
49
|
+
note = YAML.load annotation
|
50
|
+
if !note.is_a?(Hash)
|
51
|
+
note = {}
|
52
|
+
end
|
53
|
+
lease = current_time + lease_minutes * 60
|
54
|
+
note['lease'] = lease
|
55
|
+
vmconfig[:annotation] = YAML.dump(note)
|
56
|
+
vmconfig
|
57
|
+
end
|
58
|
+
|
59
|
+
# Issue ReconfigVM_Task on the VM to update the lease. User can pass in current
|
60
|
+
# annotation, but if not, it is retrieved on demand. A task is returned, i.e.
|
61
|
+
# function doesn't wait for completion.
|
62
|
+
# @param vm [VIM::VirtualMachine] Virtual Machine instance
|
63
|
+
# @param lease_minutes [int] Time to lease expiration from now in minutes
|
64
|
+
# @param annotation [String] 'config.annotation' property of the VM. Optional.
|
65
|
+
# @return [VIM::Task] VM reconfiguration task
|
66
|
+
def set_lease_on_vm_task vm, lease_minutes, annotation = nil
|
67
|
+
if !annotation
|
68
|
+
annotation = vm.collect 'config.annotation'
|
69
|
+
end
|
70
|
+
vmconfig = {:annotation => annotation}
|
71
|
+
vmconfig = set_lease_in_vm_config vmconfig, lease_minutes
|
72
|
+
# XXX: It may be a good idea to cite the VM version here to avoid
|
73
|
+
# concurrent writes to the annotation stepping on each others toes
|
74
|
+
vm.ReconfigVM_Task(:spec => vmconfig)
|
75
|
+
end
|
76
|
+
|
77
|
+
# Issue ReconfigVM_Task to set the lease on all VMs that currently do not
|
78
|
+
# have a lease. All VM reconfigurations are done in parallel and the function
|
79
|
+
# waits for all of them to complete
|
80
|
+
# @param vms [Array] List of VIM::VirtualMachine instances, may or may not have leases
|
81
|
+
# @param vmprops [Hash] Hash of VIM::VirtualMachine instances to their properties
|
82
|
+
# @option opts [int] :lease_minutes Time to lease expiration from now in minutes
|
83
|
+
# @return [Array] List of previously leaseless VMs that now have a lease
|
84
|
+
def set_lease_on_leaseless_vms vms, vmprops, opts = {}
|
85
|
+
lease_minutes = opts[:lease_minutes]
|
86
|
+
if !lease_minutes
|
87
|
+
raise "Expected lease_minutes to be specified"
|
88
|
+
end
|
89
|
+
vms = find_leaseless_vms vms, vmprops
|
90
|
+
if vms.length > 0
|
91
|
+
tasks = vms.map do |vm|
|
92
|
+
annotation = vmprops[vm]['config.annotation']
|
93
|
+
task = set_lease_on_vm_task(vm, lease_minutes, annotation)
|
94
|
+
task
|
95
|
+
end
|
96
|
+
si = vms.first._connection.serviceInstance
|
97
|
+
si.wait_for_multiple_tasks [], tasks
|
98
|
+
end
|
99
|
+
vms
|
100
|
+
end
|
101
|
+
|
102
|
+
# Filter the list of passed in Virtual Machines and find the ones that currently
|
103
|
+
# do not have a lease.
|
104
|
+
# @param vms [Array] List of VIM::VirtualMachine instances, may or may not have leases
|
105
|
+
# @param vmprops [Hash] Hash of VIM::VirtualMachine instances to their properties
|
106
|
+
# @return [Array] List of leaseless VMs
|
107
|
+
def find_leaseless_vms vms, vmprops
|
108
|
+
vms.reject do |vm|
|
109
|
+
props = vmprops[vm]
|
110
|
+
annotation = props['config.annotation']
|
111
|
+
if annotation
|
112
|
+
note = YAML.load annotation
|
113
|
+
note.is_a?(Hash) && note['lease']
|
114
|
+
end
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
# Filter the list of passed in Virtul Machines and find the one that are
|
119
|
+
# expired. A time offset can be used to identify VMs that will expire at
|
120
|
+
# a certain point in the future.
|
121
|
+
# If a VM doesn't have a lease, it is treated as never expiring.
|
122
|
+
# @param vms [Array] List of VIM::VirtualMachine instances, may or may not have leases
|
123
|
+
# @param vmprops [Hash] Hash of VIM::VirtualMachine instances to their properties
|
124
|
+
# @option opts [int] :time_delta Time delta (seconds) to be added to current time
|
125
|
+
# @return [Array] List of expired VMs
|
126
|
+
def filter_expired_vms vms, vmprops, opts = {}
|
127
|
+
time_delta = opts[:time_delta] || 0
|
128
|
+
time = current_time + time_delta
|
129
|
+
|
130
|
+
out = vms.map do |vm|
|
131
|
+
props = vmprops[vm]
|
132
|
+
next unless annotation = props['config.annotation']
|
133
|
+
note = YAML.load annotation
|
134
|
+
next unless note.is_a?(Hash) && lease = note['lease']
|
135
|
+
next unless time > lease
|
136
|
+
time_to_expiration = ((lease - time) + time_delta)
|
137
|
+
[vm, time_to_expiration]
|
138
|
+
end.compact
|
139
|
+
out = Hash[out]
|
140
|
+
out
|
141
|
+
end
|
142
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# Copyright (c) 2011 VMware, Inc. All Rights Reserved.
|
2
|
+
#
|
3
|
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
4
|
+
# of this software and associated documentation files (the "Software"), to deal
|
5
|
+
# in the Software without restriction, including without limitation the rights
|
6
|
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
7
|
+
# copies of the Software, and to permit persons to whom the Software is
|
8
|
+
# furnished to do so, subject to the following conditions:
|
9
|
+
#
|
10
|
+
# The above copyright notice and this permission notice shall be included in
|
11
|
+
# all copies or substantial portions of the Software.
|
12
|
+
#
|
13
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14
|
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15
|
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16
|
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17
|
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
18
|
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
19
|
+
# THE SOFTWARE.
|
20
|
+
|
21
|
+
class RbVmomi::VIM::PerfCounterInfo
|
22
|
+
def name
|
23
|
+
"#{groupInfo.key}.#{nameInfo.key}"
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
@@ -0,0 +1,85 @@
|
|
1
|
+
require 'date'
|
2
|
+
|
3
|
+
class Time
|
4
|
+
def to_datetime
|
5
|
+
# Convert seconds + microseconds into a fractional number of seconds
|
6
|
+
seconds = sec + Rational(usec, 10**6)
|
7
|
+
|
8
|
+
# Convert a UTC offset measured in minutes to one measured in a
|
9
|
+
# fraction of a day.
|
10
|
+
offset = Rational(utc_offset, 60 * 60 * 24)
|
11
|
+
DateTime.new(year, month, day, hour, min, seconds, offset)
|
12
|
+
end
|
13
|
+
end
|
14
|
+
|
15
|
+
RbVmomi::VIM::PerformanceManager
|
16
|
+
class RbVmomi::VIM::PerformanceManager
|
17
|
+
def perfcounter_cached
|
18
|
+
@perfcounter ||= perfCounter
|
19
|
+
end
|
20
|
+
|
21
|
+
def perfcounter_hash
|
22
|
+
@perfcounter_hash ||= Hash[perfcounter_cached.map{|x| [x.name, x]}]
|
23
|
+
end
|
24
|
+
|
25
|
+
def perfcounter_idhash
|
26
|
+
@perfcounter_idhash ||= Hash[perfcounter_cached.map{|x| [x.key, x]}]
|
27
|
+
end
|
28
|
+
|
29
|
+
def provider_summary obj
|
30
|
+
@provider_summary ||= {}
|
31
|
+
@provider_summary[obj.class] ||= QueryPerfProviderSummary(:entity => obj)
|
32
|
+
end
|
33
|
+
|
34
|
+
def retrieve_stats objects, metrics, opts = {}
|
35
|
+
opts = opts.dup
|
36
|
+
max_samples = opts[:max_samples] || 1
|
37
|
+
realtime = false
|
38
|
+
if not opts[:interval]
|
39
|
+
provider = provider_summary objects.first
|
40
|
+
opts[:interval] = provider.refreshRate
|
41
|
+
realtime = true
|
42
|
+
else
|
43
|
+
provider = provider_summary objects.first
|
44
|
+
if opts[:interval] == provider.refreshRate
|
45
|
+
realtime = true
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
metric_ids = metrics.map do |x|
|
50
|
+
counter = perfcounter_hash[x]
|
51
|
+
if !counter
|
52
|
+
pp perfcounter_hash.keys
|
53
|
+
fail "Counter for #{x} couldn't be found"
|
54
|
+
end
|
55
|
+
RbVmomi::VIM::PerfMetricId(:counterId => counter.key, :instance => '*')
|
56
|
+
end
|
57
|
+
query_specs = objects.map do |obj|
|
58
|
+
RbVmomi::VIM::PerfQuerySpec({
|
59
|
+
:maxSample => max_samples,
|
60
|
+
:entity => obj,
|
61
|
+
:metricId => metric_ids,
|
62
|
+
:intervalId => opts[:interval],
|
63
|
+
:startTime => (realtime == false ? opts[:start_time].to_datetime : nil),
|
64
|
+
})
|
65
|
+
end
|
66
|
+
stats = QueryPerf(:querySpec => query_specs)
|
67
|
+
|
68
|
+
Hash[stats.map do |res|
|
69
|
+
[
|
70
|
+
res.entity,
|
71
|
+
{
|
72
|
+
:sampleInfo => res.sampleInfo,
|
73
|
+
:metrics => Hash[res.value.map do |metric|
|
74
|
+
[perfcounter_idhash[metric.id.counterId].name, metric.value]
|
75
|
+
end]
|
76
|
+
}
|
77
|
+
]
|
78
|
+
end]
|
79
|
+
end
|
80
|
+
|
81
|
+
def active_intervals
|
82
|
+
intervals = historicalInterval
|
83
|
+
Hash[(1..4).map { |level| [level, intervals.select { |x| x.enabled && x.level >= level }] }]
|
84
|
+
end
|
85
|
+
end
|
@@ -18,7 +18,11 @@ class VIM::ReflectManagedMethodExecuter
|
|
18
18
|
end
|
19
19
|
result = ExecuteSoap(:moid => moid, :version => 'urn:vim25/5.0',
|
20
20
|
:method => method, :argument => soap_args)
|
21
|
-
|
21
|
+
if result
|
22
|
+
_connection.deserializer.deserialize Nokogiri(result.response).root, nil
|
23
|
+
else
|
24
|
+
nil
|
25
|
+
end
|
22
26
|
end
|
23
27
|
end
|
24
28
|
|
data/lib/rbvmomi/vim/Task.rb
CHANGED
@@ -11,6 +11,26 @@ class RbVmomi::VIM::Task
|
|
11
11
|
raise info.error
|
12
12
|
end
|
13
13
|
end
|
14
|
+
|
15
|
+
# Wait for all child tasks to finish. If any one child task failed,
|
16
|
+
# the exception of the first failing task is thrown.
|
17
|
+
# @return [Hash] Map of tasks to their +info.result+ on success.
|
18
|
+
# @raise +info.error+ on error.
|
19
|
+
def wait_for_childtask_completion
|
20
|
+
si = _connection.serviceInstance
|
21
|
+
tasks_props = si.wait_for_multiple_tasks(
|
22
|
+
['info.state', 'info.result', 'info.error'],
|
23
|
+
self.child_tasks
|
24
|
+
)
|
25
|
+
Hash[tasks_props.map do |task, props|
|
26
|
+
case props['info.state']
|
27
|
+
when 'success'
|
28
|
+
[task, props['info.result']]
|
29
|
+
when 'error'
|
30
|
+
raise props['info.error']
|
31
|
+
end
|
32
|
+
end]
|
33
|
+
end
|
14
34
|
|
15
35
|
# Wait for a task to finish, with progress notifications.
|
16
36
|
# @return (see #wait_for_completion)
|
@@ -28,4 +48,18 @@ class RbVmomi::VIM::Task
|
|
28
48
|
raise info.error
|
29
49
|
end
|
30
50
|
end
|
51
|
+
|
52
|
+
# Get child tasks of this task.
|
53
|
+
# @return [Array] List of VIM::Task objects
|
54
|
+
def child_tasks
|
55
|
+
tm = _connection.serviceContent.taskManager
|
56
|
+
col = tm.CreateCollectorForTasks(:filter => {
|
57
|
+
:rootTaskKey => self.info.key
|
58
|
+
})
|
59
|
+
# XXX: Likely this is not enough and we need to collect pages other
|
60
|
+
# than the latest.
|
61
|
+
tasks = col.latestPage.map{|x| x.task}
|
62
|
+
col.DestroyCollector()
|
63
|
+
tasks
|
64
|
+
end
|
31
65
|
end
|
@@ -10,4 +10,50 @@ class RbVmomi::VIM::VirtualMachine
|
|
10
10
|
def disks
|
11
11
|
self.config.hardware.device.grep(RbVmomi::VIM::VirtualDisk)
|
12
12
|
end
|
13
|
+
|
14
|
+
# Get the IP of the guest, but only if it is not stale
|
15
|
+
# @return [String] Current IP reported (as per VMware Tools) or nil
|
16
|
+
def guest_ip
|
17
|
+
g = self.guest
|
18
|
+
if g.ipAddress && (g.toolsStatus == "toolsOk" || g.toolsStatus == "toolsOld")
|
19
|
+
g.ipAddress
|
20
|
+
else
|
21
|
+
nil
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
# Add a layer of delta disks (redo logs) in front of every disk on the VM.
|
26
|
+
# This is similar to taking a snapshot and makes the VM a valid target for
|
27
|
+
# creating a linked clone.
|
28
|
+
#
|
29
|
+
# Background: The API for linked clones is quite strange. We can't create
|
30
|
+
# a linked straight from any VM. The disks of the VM for which we can create a
|
31
|
+
# linked clone need to be read-only and thus VC demands that the VM we
|
32
|
+
# are cloning from uses delta-disks. Only then it will allow us to
|
33
|
+
# share the base disk.
|
34
|
+
def add_delta_disk_layer_on_all_disks
|
35
|
+
devices, = self.collect 'config.hardware.device'
|
36
|
+
disks = devices.grep(RbVmomi::VIM::VirtualDisk)
|
37
|
+
# XXX: Should create a single reconfig spec instead of one per disk
|
38
|
+
disks.each do |disk|
|
39
|
+
spec = {
|
40
|
+
:deviceChange => [
|
41
|
+
{
|
42
|
+
:operation => :remove,
|
43
|
+
:device => disk
|
44
|
+
},
|
45
|
+
{
|
46
|
+
:operation => :add,
|
47
|
+
:fileOperation => :create,
|
48
|
+
:device => disk.dup.tap { |x|
|
49
|
+
x.backing = x.backing.dup
|
50
|
+
x.backing.fileName = "[#{disk.backing.datastore.name}]"
|
51
|
+
x.backing.parent = disk.backing
|
52
|
+
},
|
53
|
+
}
|
54
|
+
]
|
55
|
+
}
|
56
|
+
self.ReconfigVM_Task(:spec => spec).wait_for_completion
|
57
|
+
end
|
58
|
+
end
|
13
59
|
end
|
data/test/test_serialization.rb
CHANGED
@@ -292,4 +292,20 @@ class SerializationTest < Test::Unit::TestCase
|
|
292
292
|
<root>2011-11-16T13:36:08-08:00</root>
|
293
293
|
EOS
|
294
294
|
end
|
295
|
+
|
296
|
+
# TODO test all types
|
297
|
+
def test_any_type
|
298
|
+
obj = 1
|
299
|
+
check <<-EOS, obj, 'xsd:anyType', false
|
300
|
+
<root xsi:type="xsd:long">1</root>
|
301
|
+
EOS
|
302
|
+
|
303
|
+
obj = VIM::HostAccountSpec(:id => 'root', :password => 'foo')
|
304
|
+
check <<-EOS, obj, 'xsd:anyType', false
|
305
|
+
<root xsi:type="HostAccountSpec">
|
306
|
+
<id>root</id>
|
307
|
+
<password>foo</password>
|
308
|
+
</root>
|
309
|
+
EOS
|
310
|
+
end
|
295
311
|
end
|
data/vmodl.db
CHANGED
Binary file
|