mkuzmin-rbvmomi 1.8.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. checksums.yaml +7 -0
  2. data/.yardopts +6 -0
  3. data/LICENSE +19 -0
  4. data/README.rdoc +78 -0
  5. data/Rakefile +45 -0
  6. data/VERSION +1 -0
  7. data/bin/rbvmomish +138 -0
  8. data/devel/analyze-vim-declarations.rb +213 -0
  9. data/devel/analyze-xml.rb +46 -0
  10. data/devel/benchmark.rb +117 -0
  11. data/devel/collisions.rb +18 -0
  12. data/devel/merge-internal-vmodl.rb +59 -0
  13. data/devel/merge-manual-vmodl.rb +32 -0
  14. data/examples/annotate.rb +54 -0
  15. data/examples/cached_ovf_deploy.rb +120 -0
  16. data/examples/clone_vm.rb +84 -0
  17. data/examples/create_vm-1.9.rb +93 -0
  18. data/examples/create_vm.rb +93 -0
  19. data/examples/extraConfig.rb +54 -0
  20. data/examples/lease_tool.rb +102 -0
  21. data/examples/logbundle.rb +63 -0
  22. data/examples/logtail.rb +60 -0
  23. data/examples/nfs_datastore.rb +95 -0
  24. data/examples/power.rb +59 -0
  25. data/examples/readme-1.rb +35 -0
  26. data/examples/readme-2.rb +51 -0
  27. data/examples/run.sh +41 -0
  28. data/examples/screenshot.rb +48 -0
  29. data/examples/vdf.rb +81 -0
  30. data/examples/vm_drs_behavior.rb +76 -0
  31. data/lib/rbvmomi.rb +12 -0
  32. data/lib/rbvmomi/basic_types.rb +375 -0
  33. data/lib/rbvmomi/connection.rb +270 -0
  34. data/lib/rbvmomi/deserialization.rb +248 -0
  35. data/lib/rbvmomi/fault.rb +17 -0
  36. data/lib/rbvmomi/pbm.rb +66 -0
  37. data/lib/rbvmomi/sms.rb +61 -0
  38. data/lib/rbvmomi/sms/SmsStorageManager.rb +7 -0
  39. data/lib/rbvmomi/trivial_soap.rb +114 -0
  40. data/lib/rbvmomi/trollop.rb +70 -0
  41. data/lib/rbvmomi/type_loader.rb +136 -0
  42. data/lib/rbvmomi/utils/admission_control.rb +398 -0
  43. data/lib/rbvmomi/utils/deploy.rb +314 -0
  44. data/lib/rbvmomi/utils/leases.rb +142 -0
  45. data/lib/rbvmomi/utils/perfdump.rb +628 -0
  46. data/lib/rbvmomi/vim.rb +128 -0
  47. data/lib/rbvmomi/vim/ComputeResource.rb +51 -0
  48. data/lib/rbvmomi/vim/Datacenter.rb +17 -0
  49. data/lib/rbvmomi/vim/Datastore.rb +68 -0
  50. data/lib/rbvmomi/vim/DynamicTypeMgrAllTypeInfo.rb +75 -0
  51. data/lib/rbvmomi/vim/DynamicTypeMgrDataTypeInfo.rb +20 -0
  52. data/lib/rbvmomi/vim/DynamicTypeMgrManagedTypeInfo.rb +46 -0
  53. data/lib/rbvmomi/vim/Folder.rb +207 -0
  54. data/lib/rbvmomi/vim/HostSystem.rb +174 -0
  55. data/lib/rbvmomi/vim/ManagedEntity.rb +57 -0
  56. data/lib/rbvmomi/vim/ManagedObject.rb +60 -0
  57. data/lib/rbvmomi/vim/ObjectContent.rb +23 -0
  58. data/lib/rbvmomi/vim/ObjectUpdate.rb +23 -0
  59. data/lib/rbvmomi/vim/OvfManager.rb +200 -0
  60. data/lib/rbvmomi/vim/PerfCounterInfo.rb +26 -0
  61. data/lib/rbvmomi/vim/PerformanceManager.rb +110 -0
  62. data/lib/rbvmomi/vim/PropertyCollector.rb +25 -0
  63. data/lib/rbvmomi/vim/ReflectManagedMethodExecuter.rb +30 -0
  64. data/lib/rbvmomi/vim/ResourcePool.rb +55 -0
  65. data/lib/rbvmomi/vim/ServiceInstance.rb +55 -0
  66. data/lib/rbvmomi/vim/Task.rb +65 -0
  67. data/lib/rbvmomi/vim/VirtualMachine.rb +74 -0
  68. data/test/test_deserialization.rb +383 -0
  69. data/test/test_emit_request.rb +128 -0
  70. data/test/test_exceptions.rb +14 -0
  71. data/test/test_helper.rb +14 -0
  72. data/test/test_misc.rb +24 -0
  73. data/test/test_parse_response.rb +69 -0
  74. data/test/test_serialization.rb +311 -0
  75. data/vmodl.db +0 -0
  76. metadata +163 -0
@@ -0,0 +1,398 @@
1
+
2
+ # An admission controlled resource scheduler for large scale vSphere deployments
3
+ #
4
+ # While DRS (Dynamic Resource Scheduler) in vSphere handles CPU and Memory
5
+ # allocations within a single vSphere cluster, larger deployments require
6
+ # another layer of scheduling to make the use of multiple clusters transparent.
7
+ # So this class doesn't replace DRS, but in fact works on top of it.
8
+ #
9
+ # The scheduler in this class performs admission control to make sure clusters
10
+ # don't get overloaded. It does so by adding additional metrics to the already
11
+ # existing CPU and Memory reservation system that DRS has. After admission
12
+ # control it also performs very basic initial placement. Note that in-cluster
13
+ # placement and load-balancing is left to DRS. Also note that no cross-cluster
14
+ # load balancing is done.
15
+ #
16
+ # This class uses the concept of a Pod: A set of clusters that share a set of
17
+ # datastores. From a datastore perspective, we are free to place a VM on any
18
+ # host or cluster. So admission control is done at the Pod level first. Pods
19
+ # are automatically dicovered based on lists of clusters and datastores.
20
+ #
21
+ # Admission control covers the following metrics:
22
+ # - Host availability: If no hosts are available within a cluster or pod,
23
+ # admission is denied.
24
+ # - Minimum free space: If a datastore falls below this free space percentage,
25
+ # admission to it will be denied. Admission to a pod is granted as long at
26
+ # least one datastore passes admission control.
27
+ # - Maximum number of VMs: If a Pod exceeds a configured number of powered on
28
+ # VMs, admission is denied. This is a crude but effective catch-all metric
29
+ # in case users didn't set proper individual CPU or Memory reservations or
30
+ # if the scalability limit doesn't originate from CPU or Memory.
31
+ #
32
+ # Placement after admission control:
33
+ # - Cluster selection: A load metric based on a combination of CPU and Memory
34
+ # load is used to always select the "least loaded" cluster. The metric is very
35
+ # crude and only meant to do very rough load balancing. If DRS clusters are
36
+ # large enough, this is good enough in most cases though.
37
+ # - Datastore selection: Right now NO intelligence is implemented here.
38
+ #
39
+ # Usage:
40
+ # Instantiate the class, call make_placement_decision and then use the exposed
41
+ # computer (cluster), resource pool, vm_folder and datastore. Currently once
42
+ # computed, a new updated placement can't be generated.
43
+ class AdmissionControlledResourceScheduler
44
+ attr_reader :rp
45
+
46
+ def initialize vim, opts = {}
47
+ @vim = vim
48
+
49
+ @datacenter = opts[:datacenter]
50
+ @datacenter_path = opts[:datacenter_path]
51
+ @vm_folder = opts[:vm_folder]
52
+ @vm_folder_path = opts[:vm_folder_path]
53
+ @rp_path = opts[:rp_path]
54
+ @computers = opts[:computers]
55
+ @computer_names = opts[:computer_names]
56
+ @datastores = opts[:datastores]
57
+ @datastore_paths = opts[:datastore_paths]
58
+
59
+ @max_vms_per_pod = opts[:max_vms_per_pod]
60
+ @min_ds_free = opts[:min_ds_free]
61
+ @service_docs_url = opts[:service_docs_url]
62
+
63
+ @pc = @vim.serviceContent.propertyCollector
64
+ @root_folder = @vim.serviceContent.rootFolder
65
+
66
+ @logger = opts[:logger]
67
+ end
68
+
69
+ def log x
70
+ if @logger
71
+ @logger.info x
72
+ else
73
+ puts "#{Time.now}: #{x}"
74
+ end
75
+ end
76
+
77
+ # Returns the used VM folder. If not set yet, uses the vm_folder_path to
78
+ # lookup the folder. If it doesn't exist, it is created. Collisions between
79
+ # multiple clients concurrently creating the same folder are handled.
80
+ # @return [VIM::Folder] The VM folder
81
+ def vm_folder
82
+ retries = 1
83
+ begin
84
+ @vm_folder ||= datacenter.vmFolder.traverse!(@vm_folder_path, VIM::Folder)
85
+ if !@vm_folder
86
+ fail "VM folder #{@vm_folder_path} not found"
87
+ end
88
+ rescue RbVmomi::Fault => fault
89
+ if !fault.fault.is_a?(RbVmomi::VIM::DuplicateName)
90
+ raise
91
+ else
92
+ retries -= 1
93
+ retry if retries >= 0
94
+ end
95
+ end
96
+ @vm_folder
97
+ end
98
+
99
+ # Returns the used Datacenter. If not set yet, uses the datacenter_path to
100
+ # lookup the datacenter.
101
+ # @return [VIM::Datacenter] The datacenter
102
+ def datacenter
103
+ if !@datacenter
104
+ @datacenter = @root_folder.traverse(@datacenter_path, VIM::Datacenter)
105
+ if !@datacenter
106
+ fail "datacenter #{@datacenter_path} not found"
107
+ end
108
+ end
109
+ @datacenter
110
+ end
111
+
112
+ # Returns the candidate datastores. If not set yet, uses the datastore_paths
113
+ # to lookup the datastores under the datacenter.
114
+ # As a side effect, also looks up properties about all the datastores
115
+ # @return [Array] List of VIM::Datastore
116
+ def datastores
117
+ if !@datastores
118
+ @datastores = @datastore_paths.map do |path|
119
+ ds = datacenter.datastoreFolder.traverse(path, VIM::Datastore)
120
+ if !ds
121
+ fail "datastore #{path} not found"
122
+ end
123
+ ds
124
+ end
125
+ end
126
+ if !@datastore_props
127
+ @datastore_props = @pc.collectMultiple(@datastores, 'summary', 'name')
128
+ end
129
+ @datastores
130
+ end
131
+
132
+ # Returns the candidate computers (aka clusters). If not set yet, uses the
133
+ # computer_names to look them up.
134
+ # @return [Array] List of [VIM::ClusterComputeResource, Hash] tuples, where
135
+ # the Hash is a list of stats about the computer
136
+ def computers
137
+ if !@computers
138
+ @computers = @computer_names.map do |name|
139
+ computer = datacenter.find_compute_resource(name)
140
+ [computer, computer.stats]
141
+ end
142
+ end
143
+ @computers
144
+ end
145
+
146
+ # Returns the candidate pods. If not set, automatically computes the pods
147
+ # based on the list of computers (aka clusters) and datastores.
148
+ # @return [Array] List of pods, where a pod is a list of VIM::ClusterComputeResource
149
+ def pods
150
+ if !@pods
151
+ # A pod is defined as a set of clusters (aka computers) that share the same
152
+ # datastore accessibility. Computing pods is done automatically using simple
153
+ # set theory math.
154
+ computersProps = @pc.collectMultiple(computers.map{|x| x[0]}, 'datastore')
155
+ @pods = computers.map do |computer, stats|
156
+ computersProps[computer]['datastore'] & self.datastores
157
+ end.uniq.map do |ds_list|
158
+ computers.map{|x| x[0]}.select do |computer|
159
+ (computer.datastore & self.datastores) == ds_list
160
+ end
161
+ end
162
+ end
163
+ @pods
164
+ end
165
+
166
+ # Returns all VMs residing with a pod. Doesn't account for templates. Does so
167
+ # very efficiently using a single API query.
168
+ # @return [Hash] Hash of VMs as keys and their properties as values.
169
+ def pod_vms pod
170
+ # This function retrieves all VMs residing inside a pod
171
+ filterSpec = VIM.PropertyFilterSpec(
172
+ objectSet: pod.map do |computer, stats|
173
+ {
174
+ obj: computer.resourcePool,
175
+ selectSet: [
176
+ VIM.TraversalSpec(
177
+ name: 'tsFolder',
178
+ type: 'ResourcePool',
179
+ path: 'resourcePool',
180
+ skip: false,
181
+ selectSet: [
182
+ VIM.SelectionSpec(name: 'tsFolder'),
183
+ VIM.SelectionSpec(name: 'tsVM'),
184
+ ]
185
+ ),
186
+ VIM.TraversalSpec(
187
+ name: 'tsVM',
188
+ type: 'ResourcePool',
189
+ path: 'vm',
190
+ skip: false,
191
+ selectSet: [],
192
+ )
193
+ ]
194
+ }
195
+ end,
196
+ propSet: [
197
+ { type: 'ResourcePool', pathSet: ['name'] },
198
+ { type: 'VirtualMachine', pathSet: %w(runtime.powerState) }
199
+ ]
200
+ )
201
+
202
+ result = @vim.propertyCollector.RetrieveProperties(specSet: [filterSpec])
203
+
204
+ out = result.map { |x| [x.obj, Hash[x.propSet.map { |y| [y.name, y.val] }]] }
205
+ out.select{|obj, props| obj.is_a?(VIM::VirtualMachine)}
206
+ end
207
+
208
+ # Returns all candidate datastores for a given pod.
209
+ # @return [Array] List of VIM::Datastore
210
+ def pod_datastores pod
211
+ pod.first.datastore & self.datastores
212
+ end
213
+
214
+ # Returns the list of pods that pass admission control. If not set yet, performs
215
+ # admission control to compute the list. If no pods passed the admission
216
+ # control, an exception is thrown.
217
+ # @return [Array] List of pods, where a pod is a list of VIM::ClusterComputeResource
218
+ def filtered_pods
219
+ # This function applies admission control and returns those pods that have
220
+ # passed admission control. An exception is thrown if access was denied to
221
+ # all pods.
222
+ if !@filtered_pods
223
+ log "Performing admission control:"
224
+ @filtered_pods = self.pods.select do |pod|
225
+ # Gather some statistics about the pod ...
226
+ on_vms = pod_vms(pod).select{|k,v| v['runtime.powerState'] == 'poweredOn'}
227
+ num_pod_vms = on_vms.length
228
+ pod_datastores = self.pod_datastores(pod)
229
+ log "Pod: #{pod.map{|x| x.name}.join(', ')}"
230
+ log " #{num_pod_vms} VMs"
231
+ pod_datastores.each do |ds|
232
+ ds_sum = @datastore_props[ds]['summary']
233
+ @datastore_props[ds]['free_percent'] = ds_sum.freeSpace.to_f * 100 / ds_sum.capacity
234
+ end
235
+ pod_datastores.each do |ds|
236
+ ds_props = @datastore_props[ds]
237
+ ds_name = ds_props['name']
238
+ free = ds_props['free_percent']
239
+ free_gb = ds_props['summary'].freeSpace.to_f / 1024**3
240
+ free_str = "%.2f GB (%.2f%%)" % [free_gb, free]
241
+ log " Datastore #{ds_name}: #{free_str} free"
242
+ end
243
+
244
+ # Admission check: VM limit
245
+ denied = false
246
+ max_vms = @max_vms_per_pod
247
+ if max_vms && max_vms > 0
248
+ if num_pod_vms > max_vms
249
+ err = "VM limit (#{max_vms}) exceeded on this Pod"
250
+ denied = true
251
+ end
252
+ end
253
+
254
+ # Admission check: Free space on datastores
255
+ min_ds_free = @min_ds_free
256
+ if min_ds_free && min_ds_free > 0
257
+ # We need at least one datastore with enough free space
258
+ low_list = pod_datastores.select do |ds|
259
+ @datastore_props[ds]['free_percent'] <= min_ds_free
260
+ end
261
+
262
+ if low_list.length == pod_datastores.length
263
+ dsNames = low_list.map{|ds| @datastore_props[ds]['name']}.join(", ")
264
+ err = "Datastores #{dsNames} below minimum free disk space (#{min_ds_free}%)"
265
+ denied = true
266
+ end
267
+ end
268
+
269
+ # Admission check: Hosts are available
270
+ if !denied
271
+ hosts_available = pod.any? do |computer|
272
+ stats = Hash[self.computers][computer]
273
+ stats[:totalCPU] > 0 && stats[:totalMem] > 0
274
+ end
275
+ if !hosts_available
276
+ err = "No hosts are current available in this pod"
277
+ denied = true
278
+ end
279
+ end
280
+
281
+ if denied
282
+ log " Admission DENIED: #{err}"
283
+ else
284
+ log " Admission granted"
285
+ end
286
+
287
+ !denied
288
+ end
289
+ end
290
+ if @filtered_pods.length == 0
291
+ log "Couldn't find any Pod with enough resources."
292
+ if @service_docs_url
293
+ log "Check #{@service_docs_url} to see which other Pods you may be able to use"
294
+ end
295
+ fail "Admission denied"
296
+ end
297
+ @filtered_pods
298
+ end
299
+
300
+ # Returns the computer (aka cluster) to be used for placement. If not set yet,
301
+ # computs the least loaded cluster (using a metric that combines CPU and Memory
302
+ # load) that passes admission control.
303
+ # @return [VIM::ClusterComputeResource] Chosen computer (aka cluster)
304
+ def pick_computer placementhint = nil
305
+ if !@computer
306
+ # Out of the pods to which we have been granted access, pick the cluster
307
+ # (aka computer) with the lowest CPU/Mem utilization for load balancing
308
+ available = self.filtered_pods.flatten
309
+ eligible = self.computers.select do |computer,stats|
310
+ available.member?(computer) && stats[:totalCPU] > 0 and stats[:totalMem] > 0
311
+ end
312
+ computer = nil
313
+ if placementhint
314
+ if eligible.length > 0
315
+ computer = eligible.map{|x| x[0]}[placementhint % eligible.length]
316
+ end
317
+ else
318
+ computer, = eligible.min_by do |computer,stats|
319
+ 2**(stats[:usedCPU].to_f/stats[:totalCPU]) + (stats[:usedMem].to_f/stats[:totalMem])
320
+ end
321
+ end
322
+
323
+ if !computer
324
+ fail "No clusters available, should have been prevented by admission control"
325
+ end
326
+ @computer = computer
327
+ end
328
+ @computer
329
+ end
330
+
331
+ # Returns the datastore to be used for placement. If not set yet, picks a
332
+ # datastore without much intelligence, as long as it passes admission control.
333
+ # @return [VIM::Datastore] Chosen datastore
334
+ def datastore placementHint = nil
335
+ if @datastore
336
+ return @datastore
337
+ end
338
+
339
+ pod_datastores = pick_computer.datastore & datastores
340
+
341
+ eligible = pod_datastores.select do |ds|
342
+ min_ds_free = @min_ds_free
343
+ if min_ds_free && min_ds_free > 0
344
+ ds_sum = @datastore_props[ds]['summary']
345
+ free_percent = ds_sum.freeSpace.to_f * 100 / ds_sum.capacity
346
+ free_percent > min_ds_free
347
+ else
348
+ true
349
+ end
350
+ end
351
+
352
+ if eligible.length == 0
353
+ fail "Couldn't find any eligible datastore. Admission control should have prevented this"
354
+ end
355
+
356
+ if placementHint && placementHint > 0
357
+ @datastore = eligible[placementHint % eligible.length]
358
+ else
359
+ @datastore = eligible.first
360
+ end
361
+ @datastore
362
+ end
363
+
364
+ # Runs the placement algorithm and populates all the various properties as
365
+ # a side effect. Run this first, before using the other functions of this
366
+ # class.
367
+ def make_placement_decision opts = {}
368
+ self.filtered_pods
369
+ self.pick_computer opts[:placementHint]
370
+ log "Selected compute resource: #{@computer.name}"
371
+
372
+ @rp = @computer.resourcePool.traverse(@rp_path)
373
+ if !@rp
374
+ fail "Resource pool #{@rp_path} not found"
375
+ end
376
+ log "Resource pool: #{@rp.pretty_path}"
377
+
378
+ stats = @computer.stats
379
+ if stats[:totalMem] > 0 && stats[:totalCPU] > 0
380
+ cpu_load = "#{(100*stats[:usedCPU])/stats[:totalCPU]}% cpu"
381
+ mem_load = "#{(100*stats[:usedMem])/stats[:totalMem]}% mem"
382
+ log "Cluster utilization: #{cpu_load}, #{mem_load}"
383
+ end
384
+
385
+ user_vms = vm_folder.inventory_flat('VirtualMachine' => %w(name storage)).select do |k, v|
386
+ k.is_a?(RbVmomi::VIM::VirtualMachine)
387
+ end
388
+ numVms = user_vms.length
389
+ unshared = user_vms.map do |vm, info|
390
+ info['storage'].perDatastoreUsage.map{|x| x.unshared}.inject(0, &:+)
391
+ end.inject(0, &:+)
392
+ log "User stats: #{numVms} VMs using %.2fGB of storage" % [unshared.to_f / 1024**3]
393
+
394
+ @placement_hint = opts[:placement_hint] || (rand(100) + 1)
395
+ datastore = self.datastore @placement_hint
396
+ log "Datastore: #{datastore.name}"
397
+ end
398
+ end
@@ -0,0 +1,314 @@
1
+ require 'open-uri'
2
+ require 'nokogiri'
3
+ require 'rbvmomi'
4
+
5
+ # The cached ovf deployer is an optimization on top of regular OVF deployment
6
+ # as it is offered by the VIM::OVFManager. Creating a VM becomes a multi-stage
7
+ # process: First the OVF is uploaded and instead of directly using it, it is
8
+ # prepared for linked cloning and marked as a template. It can then be cloned
9
+ # many times over, without the cost of repeated OVF deploys (network and storage
10
+ # IO) and the cost of storing the same base VM several times (storage space).
11
+ # Multiple concurrent users can try to follow this process and collisions are
12
+ # automatically detected and de-duplicated. One thread will win to create the
13
+ # OVF template, while the other will wait for the winning thread to finish the
14
+ # task. So even fully independent, distributed and unsynchronized clients using
15
+ # this call with be auto-synchronized just by talking to the same vCenter
16
+ # instance and using the name naming scheme for the templates.
17
+ #
18
+ # The caching concept above can be extended to multiple levels. Lets assume
19
+ # many VMs will share the same base OS, but are running different builds of the
20
+ # application running inside the VM. If it is expected that again many (but not
21
+ # all) VMs will share the same build of the application, a tree structure of
22
+ # templates becomes useful. At the root of the tree is the template with just
23
+ # the base OS. It is uploaded from an OVF if needed. Then, this base OS image
24
+ # is cloned, a particular build is installed and the resulting VM is again marked
25
+ # as a template. Users can then instantiate that particular build with very
26
+ # little extra overhead. This class supports such multi level templates via the
27
+ # :is_template parameter of linked_clone().
28
+ class CachedOvfDeployer
29
+ # Constructor. Gets the VIM connection and important VIM objects
30
+ # @param vim [VIM] VIM Connection
31
+ # @param network [VIM::Network] Network to attach templates and VMs to
32
+ # @param computer [VIM::ComputeResource] Host/Cluster to deploy templates/VMs to
33
+ # @param template_folder [VIM::Folder] Folder in which all templates are kept
34
+ # @param vm_folder [VIM::Folder] Folder into which to deploy VMs
35
+ # @param datastore [VIM::Folder] Datastore to store template/VM in
36
+ # @param opts [Hash] Additional parameters
37
+ def initialize vim, network, computer, template_folder, vm_folder, datastore, opts = {}
38
+ @vim = vim
39
+ @network = network
40
+ @computer = computer
41
+ @rp = @computer.resourcePool
42
+ @template_folder = template_folder
43
+ @vmfolder = vm_folder
44
+ @datastore = datastore
45
+ @logger = opts[:logger]
46
+ end
47
+
48
+ def log x
49
+ if @logger
50
+ @logger.info x
51
+ else
52
+ puts "#{Time.now}: #{x}"
53
+ end
54
+ end
55
+
56
+ # Internal helper method that executes the passed in block while disabling
57
+ # the handling of SIGINT and SIGTERM signals. Restores their handlers after
58
+ # the block is executed.
59
+ # @param enabled [Boolean] If false, this function is a no-op
60
+ def _run_without_interruptions enabled
61
+ if enabled
62
+ int_handler = Signal.trap("SIGINT", 'IGNORE')
63
+ term_handler = Signal.trap("SIGTERM", 'IGNORE')
64
+ end
65
+
66
+ yield
67
+
68
+ if enabled
69
+ Signal.trap("SIGINT", int_handler)
70
+ Signal.trap("SIGTERM", term_handler)
71
+ end
72
+ end
73
+
74
+ # Uploads an OVF, prepares the resulting VM for linked cloning and then marks
75
+ # it as a template. If another thread happens to race to do the same task,
76
+ # the losing thread will not do the actual work, but instead wait for the
77
+ # winning thread to do the work by looking up the template VM and waiting for
78
+ # it to be marked as a template. This way, the cost of uploading and keeping
79
+ # the full size of the VM is only paid once.
80
+ # @param ovf_url [String] URL to the OVF to be deployed. Currently only http
81
+ # and https are supported
82
+ # @param template_name [String] Name of the template to be used. Should be the
83
+ # same name for the same URL. A cluster specific
84
+ # post-fix will automatically be added.
85
+ # @option opts [int] :run_without_interruptions Whether or not to disable
86
+ # SIGINT and SIGTERM during
87
+ # the OVF upload.
88
+ # @option opts [Hash] :config VM Config delta to apply after the OVF deploy is
89
+ # done. Allows the template to be customized, e.g.
90
+ # to set annotations.
91
+ # @return [VIM::VirtualMachine] The template as a VIM::VirtualMachine instance
92
+ def upload_ovf_as_template ovf_url, template_name, opts = {}
93
+ # Optimization: If there happens to be a fully prepared template, then
94
+ # there is no need to do the complicated OVF upload dance
95
+ template = lookup_template template_name
96
+ if template
97
+ return template
98
+ end
99
+
100
+ # The OVFManager expects us to know the names of the networks mentioned
101
+ # in the OVF file so we can map them to VIM::Network objects. For
102
+ # simplicity this function assumes we need to read the OVF file
103
+ # ourselves to know the names, and we map all of them to the same
104
+ # VIM::Network.
105
+
106
+ # If we're handling a file:// URI we need to strip the scheme as open-uri
107
+ # can't handle them.
108
+ if URI(ovf_url).scheme == "file" && URI(ovf_url).host.nil?
109
+ ovf_url = URI(ovf_url).path
110
+ end
111
+
112
+ ovf = open(ovf_url, 'r'){|io| Nokogiri::XML(io.read)}
113
+ ovf.remove_namespaces!
114
+ networks = ovf.xpath('//NetworkSection/Network').map{|x| x['name']}
115
+ network_mappings = Hash[networks.map{|x| [x, @network]}]
116
+
117
+ network_mappings_str = network_mappings.map{|k, v| "#{k} = #{v.name}"}
118
+ log "networks: #{network_mappings_str.join(', ')}"
119
+
120
+ pc = @vim.serviceContent.propertyCollector
121
+
122
+ # OVFs need to be uploaded to a specific host. DRS won't just pick one
123
+ # for us, so we need to pick one wisely. The host needs to be connected,
124
+ # not be in maintenance mode and must have the destination datastore
125
+ # accessible.
126
+ hosts = @computer.host
127
+ hosts_props = pc.collectMultiple(
128
+ hosts,
129
+ 'datastore', 'runtime.connectionState',
130
+ 'runtime.inMaintenanceMode', 'name'
131
+ )
132
+ host = hosts.shuffle.find do |x|
133
+ host_props = hosts_props[x]
134
+ is_connected = host_props['runtime.connectionState'] == 'connected'
135
+ is_ds_accessible = host_props['datastore'].member?(@datastore)
136
+ is_connected && is_ds_accessible && !host_props['runtime.inMaintenanceMode']
137
+ end
138
+ if !host
139
+ fail "No host in the cluster available to upload OVF to"
140
+ end
141
+
142
+ log "Uploading OVF to #{hosts_props[host]['name']}..."
143
+ property_mappings = {}
144
+
145
+ # To work around the VMFS 8-host limit (existed until ESX 5.0), as
146
+ # well as just for organization purposes, we create one template per
147
+ # cluster. This also provides us with additional isolation.
148
+ vm_name = template_name+"-#{@computer.name}"
149
+
150
+ vm = nil
151
+ wait_for_template = false
152
+ # If the user sets opts[:run_without_interruptions], we will block
153
+ # signals from the user (SIGINT, SIGTERM) in order to not be interrupted.
154
+ # This is desirable, as other threads depend on this thread finishing
155
+ # its prepare job and thus interrupting it has impacts beyond this
156
+ # single thread or process.
157
+ _run_without_interruptions(opts[:run_without_interruptions]) do
158
+ begin
159
+ vm = @vim.serviceContent.ovfManager.deployOVF(
160
+ uri: ovf_url,
161
+ vmName: vm_name,
162
+ vmFolder: @template_folder,
163
+ host: host,
164
+ resourcePool: @rp,
165
+ datastore: @datastore,
166
+ networkMappings: network_mappings,
167
+ propertyMappings: property_mappings)
168
+ rescue RbVmomi::Fault => fault
169
+ # If two threads execute this script at the same time to upload
170
+ # the same template under the same name, one will win and the other
171
+ # with be rejected by VC. We catch those cases here, and handle
172
+ # them by waiting for the winning thread to finish preparing the
173
+ # template, see below ...
174
+ is_duplicate = fault.fault.is_a?(RbVmomi::VIM::DuplicateName)
175
+ is_duplicate ||= (fault.fault.is_a?(RbVmomi::VIM::InvalidState) &&
176
+ !fault.fault.is_a?(RbVmomi::VIM::InvalidHostState))
177
+ if is_duplicate
178
+ wait_for_template = true
179
+ else
180
+ raise fault
181
+ end
182
+ end
183
+
184
+ # The winning thread succeeded in uploading the OVF. Now we need to
185
+ # prepare it for (linked) cloning and mark it as a template to signal
186
+ # we are done.
187
+ if !wait_for_template
188
+ config = opts[:config] || {}
189
+ config = vm.update_spec_add_delta_disk_layer_on_all_disks(config)
190
+ # XXX: Should we add a version that does retries?
191
+ vm.ReconfigVM_Task(:spec => config).wait_for_completion
192
+ vm.MarkAsTemplate
193
+ end
194
+ end
195
+
196
+ # The losing thread now needs to wait for the winning thread to finish
197
+ # uploading and preparing the template
198
+ if wait_for_template
199
+ log "Template already exists, waiting for it to be ready"
200
+ vm = _wait_for_template_ready @template_folder, vm_name
201
+ log "Template fully prepared and ready to be cloned"
202
+ end
203
+
204
+ vm
205
+ end
206
+
207
+ # Looks up a template by name in the configured template_path. Should be used
208
+ # before uploading the VM via upload_ovf_as_template, although that is
209
+ # not strictly required, but a lot more efficient.
210
+ # @param template_name [String] Name of the template to be used. A cluster
211
+ # specific post-fix will automatically be added.
212
+ # @return [VIM::VirtualMachine] The template as a VIM::VirtualMachine instance
213
+ # or nil
214
+ def lookup_template template_name
215
+ template_path = "#{template_name}-#{@computer.name}"
216
+ template = @template_folder.traverse(template_path, RbVmomi::VIM::VirtualMachine)
217
+ if template
218
+ config = template.config
219
+ is_template = config && config.template
220
+ if !is_template
221
+ template = nil
222
+ end
223
+ end
224
+ template
225
+ end
226
+
227
+ # Creates a linked clone of a template prepared with upload_ovf_as_template.
228
+ # The function waits for completion on the clone task. Optionally, in case
229
+ # two level templates are being used, this function can wait for another
230
+ # thread to finish creating the second level template. See class comments
231
+ # for the concept of multi level templates.
232
+ # @param template_name [String] Name of the template to be used. A cluster
233
+ # specific post-fix will automatically be added.
234
+ # @param vm_name [String] Name of the new VM that is being created via cloning.
235
+ # @param config [Hash] VM Config delta to apply after the VM is cloned.
236
+ # Allows the template to be customized, e.g. to adjust
237
+ # CPU or Memory sizes or set annotations.
238
+ # @option opts [int] :is_template If true, the clone is assumed to be a template
239
+ # again and collision and de-duping logic kicks
240
+ # in.
241
+ # @return [VIM::VirtualMachine] The VIM::VirtualMachine instance of the clone
242
+ def linked_clone template_vm, vm_name, config, opts = {}
243
+ spec = {
244
+ location: {
245
+ pool: @rp,
246
+ datastore: @datastore,
247
+ diskMoveType: :moveChildMostDiskBacking,
248
+ },
249
+ powerOn: false,
250
+ template: false,
251
+ config: config,
252
+ }
253
+ if opts[:is_template]
254
+ wait_for_template = false
255
+ template_name = "#{vm_name}-#{@computer.name}"
256
+ begin
257
+ vm = template_vm.CloneVM_Task(
258
+ folder: @template_folder,
259
+ name: template_name,
260
+ spec: spec
261
+ ).wait_for_completion
262
+ rescue RbVmomi::Fault => fault
263
+ if fault.fault.is_a?(RbVmomi::VIM::DuplicateName)
264
+ wait_for_template = true
265
+ else
266
+ raise
267
+ end
268
+ end
269
+
270
+ if wait_for_template
271
+ puts "#{Time.now}: Template already exists, waiting for it to be ready"
272
+ vm = _wait_for_template_ready @template_folder, template_name
273
+ puts "#{Time.now}: Template ready"
274
+ end
275
+ else
276
+ vm = template_vm.CloneVM_Task(
277
+ folder: @vmfolder,
278
+ name: vm_name,
279
+ spec: spec
280
+ ).wait_for_completion
281
+ end
282
+ vm
283
+ end
284
+
285
+ # Internal helper method that waits for a template to be fully created. It
286
+ # polls until it finds the VM in the inventory, and once it is there, waits
287
+ # for it to be fully created and marked as a template. This function will
288
+ # block for forever if the template never gets created or marked as a
289
+ # template.
290
+ # @param vm_folder [VIM::Folder] Folder in which we expect the template to show up
291
+ # @param vm_name [String] Name of the VM we are waiting for
292
+ # @return [VIM::VirtualMachine] The VM we were waiting for when it is ready
293
+ def _wait_for_template_ready vm_folder, vm_name
294
+ vm = nil
295
+ while !vm
296
+ sleep 3
297
+ # XXX: Optimize this
298
+ vm = vm_folder.children.find{|x| x.name == vm_name}
299
+ end
300
+ log "Template VM found"
301
+ sleep 2
302
+ while true
303
+ runtime, template = vm.collect 'runtime', 'config.template'
304
+ ready = runtime && runtime.host && runtime.powerState == "poweredOff"
305
+ ready = ready && template
306
+ if ready
307
+ break
308
+ end
309
+ sleep 5
310
+ end
311
+
312
+ vm
313
+ end
314
+ end