rbvmomi 1.6.0 → 1.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/Rakefile +13 -13
- data/VERSION +1 -1
- data/devel/analyze-vim-declarations.rb +17 -4
- data/devel/merge-internal-vmodl.rb +1 -1
- data/devel/merge-manual-vmodl.rb +32 -0
- data/lib/rbvmomi/basic_types.rb +19 -3
- data/lib/rbvmomi/connection.rb +22 -8
- data/lib/rbvmomi/deserialization.rb +8 -2
- data/lib/rbvmomi/pbm.rb +66 -0
- data/lib/rbvmomi/sms.rb +61 -0
- data/lib/rbvmomi/sms/SmsStorageManager.rb +7 -0
- data/lib/rbvmomi/trivial_soap.rb +6 -1
- data/lib/rbvmomi/type_loader.rb +21 -1
- data/lib/rbvmomi/utils/admission_control.rb +13 -1
- data/lib/rbvmomi/utils/deploy.rb +21 -9
- data/lib/rbvmomi/utils/perfdump.rb +628 -0
- data/lib/rbvmomi/vim.rb +36 -3
- data/lib/rbvmomi/vim/Folder.rb +10 -0
- data/lib/rbvmomi/vim/ManagedObject.rb +6 -1
- data/lib/rbvmomi/vim/OvfManager.rb +95 -6
- data/lib/rbvmomi/vim/PerformanceManager.rb +38 -13
- data/lib/rbvmomi/vim/Task.rb +1 -1
- data/lib/rbvmomi/vim/VirtualMachine.rb +33 -18
- data/test/test_deserialization.rb +3 -0
- data/test/test_serialization.rb +2 -2
- data/vmodl.db +0 -0
- metadata +7 -2
data/lib/rbvmomi/type_loader.rb
CHANGED
@@ -47,11 +47,18 @@ class TypeLoader
|
|
47
47
|
|
48
48
|
def has? name
|
49
49
|
fail unless name.is_a? String
|
50
|
+
|
50
51
|
@db.member?(name) or BasicTypes::BUILTIN.member?(name)
|
51
52
|
end
|
52
53
|
|
53
54
|
def get name
|
54
|
-
fail unless name.is_a? String
|
55
|
+
fail "name '#{name}' is #{name.class} expecting String" unless name.is_a? String
|
56
|
+
|
57
|
+
first_char = name[0].chr
|
58
|
+
if first_char.downcase == first_char
|
59
|
+
name = "%s%s" % [first_char.upcase, name[1..-1]]
|
60
|
+
end
|
61
|
+
|
55
62
|
return @loaded[name] if @loaded.member? name
|
56
63
|
@lock.synchronize do
|
57
64
|
return @loaded[name] if @loaded.member? name
|
@@ -65,6 +72,16 @@ class TypeLoader
|
|
65
72
|
def add_types types
|
66
73
|
@lock.synchronize do
|
67
74
|
@db.merge! types
|
75
|
+
@db = Hash[@db.map do |name, value|
|
76
|
+
if value
|
77
|
+
value['wsdl_name'] ||= name
|
78
|
+
end
|
79
|
+
first_char = name[0].chr
|
80
|
+
if first_char.downcase == first_char
|
81
|
+
name = "%s%s" % [first_char.upcase, name[1..-1]]
|
82
|
+
end
|
83
|
+
[name, value]
|
84
|
+
end]
|
68
85
|
end
|
69
86
|
end
|
70
87
|
|
@@ -96,6 +113,7 @@ class TypeLoader
|
|
96
113
|
superclass = get desc['wsdl_base']
|
97
114
|
Class.new(superclass).tap do |klass|
|
98
115
|
klass.init name, desc['props']
|
116
|
+
klass.wsdl_name = desc['wsdl_name']
|
99
117
|
end
|
100
118
|
end
|
101
119
|
|
@@ -103,12 +121,14 @@ class TypeLoader
|
|
103
121
|
superclass = get desc['wsdl_base']
|
104
122
|
Class.new(superclass).tap do |klass|
|
105
123
|
klass.init name, desc['props'], desc['methods']
|
124
|
+
klass.wsdl_name = desc['wsdl_name']
|
106
125
|
end
|
107
126
|
end
|
108
127
|
|
109
128
|
def make_enum_type name, desc
|
110
129
|
Class.new(BasicTypes::Enum).tap do |klass|
|
111
130
|
klass.init name, desc['values']
|
131
|
+
klass.wsdl_name = desc['wsdl_name']
|
112
132
|
end
|
113
133
|
end
|
114
134
|
end
|
@@ -41,6 +41,8 @@
|
|
41
41
|
# computer (cluster), resource pool, vm_folder and datastore. Currently once
|
42
42
|
# computed, a new updated placement can't be generated.
|
43
43
|
class AdmissionControlledResourceScheduler
|
44
|
+
attr_reader :rp
|
45
|
+
|
44
46
|
def initialize vim, opts = {}
|
45
47
|
@vim = vim
|
46
48
|
|
@@ -60,10 +62,16 @@ class AdmissionControlledResourceScheduler
|
|
60
62
|
|
61
63
|
@pc = @vim.serviceContent.propertyCollector
|
62
64
|
@root_folder = @vim.serviceContent.rootFolder
|
65
|
+
|
66
|
+
@logger = opts[:logger]
|
63
67
|
end
|
64
68
|
|
65
69
|
def log x
|
66
|
-
|
70
|
+
if @logger
|
71
|
+
@logger.info x
|
72
|
+
else
|
73
|
+
puts "#{Time.now}: #{x}"
|
74
|
+
end
|
67
75
|
end
|
68
76
|
|
69
77
|
# Returns the used VM folder. If not set yet, uses the vm_folder_path to
|
@@ -324,6 +332,10 @@ class AdmissionControlledResourceScheduler
|
|
324
332
|
# datastore without much intelligence, as long as it passes admission control.
|
325
333
|
# @return [VIM::Datastore] Chosen datastore
|
326
334
|
def datastore placementHint = nil
|
335
|
+
if @datastore
|
336
|
+
return @datastore
|
337
|
+
end
|
338
|
+
|
327
339
|
pod_datastores = pick_computer.datastore & datastores
|
328
340
|
|
329
341
|
eligible = pod_datastores.select do |ds|
|
data/lib/rbvmomi/utils/deploy.rb
CHANGED
@@ -33,7 +33,8 @@ class CachedOvfDeployer
|
|
33
33
|
# @param template_folder [VIM::Folder] Folder in which all templates are kept
|
34
34
|
# @param vm_folder [VIM::Folder] Folder into which to deploy VMs
|
35
35
|
# @param datastore [VIM::Folder] Datastore to store template/VM in
|
36
|
-
|
36
|
+
# @param opts [Hash] Additional parameters
|
37
|
+
def initialize vim, network, computer, template_folder, vm_folder, datastore, opts = {}
|
37
38
|
@vim = vim
|
38
39
|
@network = network
|
39
40
|
@computer = computer
|
@@ -41,11 +42,15 @@ class CachedOvfDeployer
|
|
41
42
|
@template_folder = template_folder
|
42
43
|
@vmfolder = vm_folder
|
43
44
|
@datastore = datastore
|
45
|
+
@logger = opts[:logger]
|
44
46
|
end
|
45
47
|
|
46
48
|
def log x
|
47
|
-
|
48
|
-
|
49
|
+
if @logger
|
50
|
+
@logger.info x
|
51
|
+
else
|
52
|
+
puts "#{Time.now}: #{x}"
|
53
|
+
end
|
49
54
|
end
|
50
55
|
|
51
56
|
# Internal helper method that executes the passed in block while disabling
|
@@ -85,6 +90,13 @@ class CachedOvfDeployer
|
|
85
90
|
# to set annotations.
|
86
91
|
# @return [VIM::VirtualMachine] The template as a VIM::VirtualMachine instance
|
87
92
|
def upload_ovf_as_template ovf_url, template_name, opts = {}
|
93
|
+
# Optimization: If there happens to be a fully prepared template, then
|
94
|
+
# there is no need to do the complicated OVF upload dance
|
95
|
+
template = lookup_template template_name
|
96
|
+
if template
|
97
|
+
return template
|
98
|
+
end
|
99
|
+
|
88
100
|
# The OVFManager expects us to know the names of the networks mentioned
|
89
101
|
# in the OVF file so we can map them to VIM::Network objects. For
|
90
102
|
# simplicity this function assumes we need to read the OVF file
|
@@ -166,11 +178,10 @@ class CachedOvfDeployer
|
|
166
178
|
# prepare it for (linked) cloning and mark it as a template to signal
|
167
179
|
# we are done.
|
168
180
|
if !wait_for_template
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
end
|
181
|
+
config = opts[:config] || {}
|
182
|
+
config = vm.update_spec_add_delta_disk_layer_on_all_disks(config)
|
183
|
+
# XXX: Should we add a version that does retries?
|
184
|
+
vm.ReconfigVM_Task(:spec => config).wait_for_completion
|
174
185
|
vm.MarkAsTemplate
|
175
186
|
end
|
176
187
|
end
|
@@ -197,7 +208,8 @@ class CachedOvfDeployer
|
|
197
208
|
template_path = "#{template_name}-#{@computer.name}"
|
198
209
|
template = @template_folder.traverse(template_path, VIM::VirtualMachine)
|
199
210
|
if template
|
200
|
-
|
211
|
+
config = template.config
|
212
|
+
is_template = config && config.template
|
201
213
|
if !is_template
|
202
214
|
template = nil
|
203
215
|
end
|
@@ -0,0 +1,628 @@
|
|
1
|
+
require 'set'
|
2
|
+
require 'yaml'
|
3
|
+
|
4
|
+
# PerfAggregator is a class that, given connections to a list of vCenter
|
5
|
+
# Servers, will fetch the entire VM folder and ResourcePool hierarchies,
|
6
|
+
# including all VIM::VirtualMachine objects and aggregate VM stats along
|
7
|
+
# the tree hierarchies. The PerfAggregator class allows for users to
|
8
|
+
# perform post processing on the data returned by vCenter, e.g. to augment
|
9
|
+
# it with addtional data that was obtained using a combination of
|
10
|
+
# VM annotations (or custom values) and an external DB. Post processing
|
11
|
+
# can also define additional tree structures that may be completely
|
12
|
+
# independent of the VM folder and ResourcePool hirarchies provided by
|
13
|
+
# vCenter, e.g. one based on VMs used for testing of a set of source code
|
14
|
+
# branches.
|
15
|
+
class PerfAggregator
|
16
|
+
attr_accessor :path_types
|
17
|
+
|
18
|
+
def initialize logger = nil
|
19
|
+
@logger = logger
|
20
|
+
@path_types = Set.new
|
21
|
+
@path_types << 'rp'
|
22
|
+
@path_types << 'vmfolder'
|
23
|
+
|
24
|
+
# XXX: Rename this variable
|
25
|
+
@perf_metrics = {
|
26
|
+
'virtualDisk.read' => :sum,
|
27
|
+
'virtualDisk.write' => :sum,
|
28
|
+
'virtualDisk.numberReadAveraged' => :sum,
|
29
|
+
'virtualDisk.numberWriteAveraged' => :sum,
|
30
|
+
'virtualDisk.totalReadLatency.avg' => :avg_ignore_zero,
|
31
|
+
'virtualDisk.totalWriteLatency.avg' => :avg_ignore_zero,
|
32
|
+
'virtualDisk.totalReadLatency.max' => :max,
|
33
|
+
'virtualDisk.totalWriteLatency.max' => :max,
|
34
|
+
'num.vm' => :sum,
|
35
|
+
'num.poweredonvm' => :sum,
|
36
|
+
'summary.quickStats.hostMemoryUsage' => :sum,
|
37
|
+
'summary.quickStats.guestMemoryUsage' => :sum,
|
38
|
+
'summary.quickStats.overallCpuUsage' => :sum,
|
39
|
+
'summary.config.memorySizeMB' => :sum,
|
40
|
+
'summary.config.numCpu' => :sum,
|
41
|
+
'storage.space.committed' => :sum,
|
42
|
+
'storage.space.uncommitted' => :sum,
|
43
|
+
'storage.space.unshared' => :sum,
|
44
|
+
}
|
45
|
+
end
|
46
|
+
|
47
|
+
def log text
|
48
|
+
if @logger
|
49
|
+
@logger.info text
|
50
|
+
else
|
51
|
+
puts "#{Time.now}: #{text}"
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
def set_vm_processing_callback &block
|
56
|
+
@vm_processing_callback = block
|
57
|
+
end
|
58
|
+
|
59
|
+
def add_node_unless_exists inventory, id, props
|
60
|
+
if !inventory[id]
|
61
|
+
inventory[id] = props.merge({'children' => []})
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
# Method that extracts the entire VM folder and ResourcePool hierarchy
|
66
|
+
# from vCenter with a single API call. It generates a flat list of
|
67
|
+
# VIM objects which will include VIM::Folder, VIM::Datacenter,
|
68
|
+
# VIM::ClusterComputeResource, VIM::ResourcePool and VIM::VirtualMachine.
|
69
|
+
#
|
70
|
+
# Post processing is done (using helper methods) to populate full paths,
|
71
|
+
# lists of parents (ancestry) so that the tree structure can be understood.
|
72
|
+
# Information about two seperate sub-trees is gathered: The tree following
|
73
|
+
# the VM folders and one tree following the clusters and resource pools.
|
74
|
+
# In the vSphere Client there are called the "VM/Template View" and the
|
75
|
+
# "Host and Clusters View".
|
76
|
+
#
|
77
|
+
# @param rootFolder [VIM::Folder] Expected to be the rootFolder of the VC
|
78
|
+
# @param vm_prop_names [Array] List of VM properties to fetch
|
79
|
+
def all_inventory_flat rootFolder, vm_prop_names = ['name']
|
80
|
+
conn = rootFolder._connection
|
81
|
+
pc = conn.propertyCollector
|
82
|
+
|
83
|
+
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
|
84
|
+
:objectSet => [
|
85
|
+
:obj => rootFolder,
|
86
|
+
:selectSet => [
|
87
|
+
RbVmomi::VIM.TraversalSpec(
|
88
|
+
:name => 'tsFolder',
|
89
|
+
:type => 'Folder',
|
90
|
+
:path => 'childEntity',
|
91
|
+
:skip => false,
|
92
|
+
:selectSet => [
|
93
|
+
RbVmomi::VIM.SelectionSpec(:name => 'tsFolder'),
|
94
|
+
RbVmomi::VIM.SelectionSpec(:name => 'tsDatacenterVmFolder'),
|
95
|
+
RbVmomi::VIM.SelectionSpec(:name => 'tsDatacenterHostFolder'),
|
96
|
+
RbVmomi::VIM.SelectionSpec(:name => 'tsClusterRP'),
|
97
|
+
RbVmomi::VIM.SelectionSpec(:name => 'tsClusterHost'),
|
98
|
+
]
|
99
|
+
),
|
100
|
+
RbVmomi::VIM.TraversalSpec(
|
101
|
+
:name => 'tsDatacenterVmFolder',
|
102
|
+
:type => 'Datacenter',
|
103
|
+
:path => 'vmFolder',
|
104
|
+
:skip => false,
|
105
|
+
:selectSet => [
|
106
|
+
RbVmomi::VIM.SelectionSpec(:name => 'tsFolder')
|
107
|
+
]
|
108
|
+
),
|
109
|
+
RbVmomi::VIM.TraversalSpec(
|
110
|
+
:name => 'tsDatacenterHostFolder',
|
111
|
+
:type => 'Datacenter',
|
112
|
+
:path => 'hostFolder',
|
113
|
+
:skip => false,
|
114
|
+
:selectSet => [
|
115
|
+
RbVmomi::VIM.SelectionSpec(:name => 'tsFolder')
|
116
|
+
]
|
117
|
+
),
|
118
|
+
RbVmomi::VIM.TraversalSpec(
|
119
|
+
:name => 'tsClusterRP',
|
120
|
+
:type => 'ClusterComputeResource',
|
121
|
+
:path => 'resourcePool',
|
122
|
+
:skip => false,
|
123
|
+
:selectSet => [
|
124
|
+
RbVmomi::VIM.SelectionSpec(:name => 'tsRP'),
|
125
|
+
]
|
126
|
+
),
|
127
|
+
RbVmomi::VIM.TraversalSpec(
|
128
|
+
:name => 'tsClusterHost',
|
129
|
+
:type => 'ClusterComputeResource',
|
130
|
+
:path => 'host',
|
131
|
+
:skip => false,
|
132
|
+
:selectSet => []
|
133
|
+
),
|
134
|
+
RbVmomi::VIM.TraversalSpec(
|
135
|
+
:name => 'tsRP',
|
136
|
+
:type => 'ResourcePool',
|
137
|
+
:path => 'resourcePool',
|
138
|
+
:skip => false,
|
139
|
+
:selectSet => [
|
140
|
+
RbVmomi::VIM.SelectionSpec(:name => 'tsRP'),
|
141
|
+
]
|
142
|
+
),
|
143
|
+
]
|
144
|
+
],
|
145
|
+
:propSet => [
|
146
|
+
{ :type => 'Folder', :pathSet => ['name', 'parent'] },
|
147
|
+
{ :type => 'Datacenter', :pathSet => ['name', 'parent'] },
|
148
|
+
{ :type => 'ClusterComputeResource',
|
149
|
+
:pathSet => ['name', 'parent', 'summary.effectiveCpu', 'summary.effectiveMemory']
|
150
|
+
},
|
151
|
+
{ :type => 'ResourcePool', :pathSet => ['name', 'parent'] },
|
152
|
+
{ :type => 'HostSystem', :pathSet => ['name', 'parent', 'runtime.connectionState'] },
|
153
|
+
{ :type => 'VirtualMachine', :pathSet => vm_prop_names },
|
154
|
+
]
|
155
|
+
)
|
156
|
+
|
157
|
+
result = pc.RetrieveProperties(:specSet => [filterSpec])
|
158
|
+
inventory = {}
|
159
|
+
vms = {}
|
160
|
+
result.each do |r|
|
161
|
+
if r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
|
162
|
+
vms[r.obj] = r.to_hash
|
163
|
+
else
|
164
|
+
inventory[r.obj] = r.to_hash
|
165
|
+
end
|
166
|
+
end
|
167
|
+
inventory['root'] = {
|
168
|
+
'name' => 'root',
|
169
|
+
'path' => 'root',
|
170
|
+
'parent' => nil,
|
171
|
+
'parents' => [],
|
172
|
+
}
|
173
|
+
inventory[conn.host] = {
|
174
|
+
'name' => conn.host,
|
175
|
+
'path' => "root/#{conn.host}",
|
176
|
+
'parent' => 'root',
|
177
|
+
'parents' => ['root'],
|
178
|
+
}
|
179
|
+
_compute_vmfolders_and_rp_paths conn.host, inventory
|
180
|
+
_compute_parents_and_children inventory
|
181
|
+
[vms, inventory]
|
182
|
+
end
|
183
|
+
|
184
|
+
# Helper method that computes full paths and parent lists out of a
|
185
|
+
# flat list of objects. Operates recursively and doesn't yet split
|
186
|
+
# the paths into different tree types.
|
187
|
+
# @param obj [Hash] Property hash of current element
|
188
|
+
# @param objs [Array] Flat list of tree elements
|
189
|
+
def _compute_vmfolder_and_rp_path_and_parents vc, obj, objs
|
190
|
+
if obj['path']
|
191
|
+
return
|
192
|
+
end
|
193
|
+
if !obj['parent']
|
194
|
+
obj['parent'] = vc
|
195
|
+
obj['path'] = "root/#{vc}/#{obj['name']}"
|
196
|
+
obj['parents'] = ['root', vc]
|
197
|
+
return
|
198
|
+
end
|
199
|
+
parent = objs[obj['parent']]
|
200
|
+
_compute_vmfolder_and_rp_path_and_parents(vc, parent, objs)
|
201
|
+
obj['path'] = "%s/%s" % [parent['path'], obj['name']]
|
202
|
+
obj['parents'] = [obj['parent']] + parent['parents']
|
203
|
+
nil
|
204
|
+
end
|
205
|
+
|
206
|
+
# Helper method that computes full paths and parent lists out of a
|
207
|
+
# flat list of objects. Full paths are tracked seperately per type
|
208
|
+
# of tree, i.e. seperately for the ResourcePool tree and the VM folder
|
209
|
+
# tree.
|
210
|
+
# @param objs [Array] Flat list of tree elements
|
211
|
+
def _compute_vmfolders_and_rp_paths vc, objs
|
212
|
+
objs.each do |obj, props|
|
213
|
+
_compute_vmfolder_and_rp_path_and_parents(vc, props, objs)
|
214
|
+
|
215
|
+
props['paths'] = {}
|
216
|
+
obj_with_parents = [obj] + props['parents']
|
217
|
+
dc = obj_with_parents.find{|x| x.is_a?(RbVmomi::VIM::Datacenter)}
|
218
|
+
# Everything above and including a VIM::Datacenter is part of
|
219
|
+
# both the rp and vmfolder tree. Anything below depends on the
|
220
|
+
# folder of the datacenter it is under: The hostFolder is called
|
221
|
+
# "host" while the root vm folder is called "vm".
|
222
|
+
if !dc || obj.is_a?(RbVmomi::VIM::Datacenter)
|
223
|
+
props['paths']['rp'] = props['path']
|
224
|
+
props['paths']['vmfolder'] = props['path']
|
225
|
+
else
|
226
|
+
dc_index = obj_with_parents.index dc
|
227
|
+
folder = obj_with_parents[dc_index - 1]
|
228
|
+
if objs[folder]['name'] == 'host'
|
229
|
+
props['paths']['rp'] = props['path']
|
230
|
+
else
|
231
|
+
props['paths']['vmfolder'] = props['path']
|
232
|
+
end
|
233
|
+
end
|
234
|
+
|
235
|
+
props['children'] = []
|
236
|
+
end
|
237
|
+
end
|
238
|
+
|
239
|
+
# Helper method that computes children references and parent paths on
|
240
|
+
# all objects, if not computed yet. Assumes that full paths of each
|
241
|
+
# object have been calculated already.
|
242
|
+
# @param objs [Array] Flat list of tree elements
|
243
|
+
def _compute_parents_and_children objs
|
244
|
+
objs.each do |obj, props|
|
245
|
+
if props['parent_paths']
|
246
|
+
next
|
247
|
+
end
|
248
|
+
props['parent_paths'] = {}
|
249
|
+
if !props['parent']
|
250
|
+
next
|
251
|
+
end
|
252
|
+
parent = objs[props['parent']]
|
253
|
+
props['paths'].keys.each do |type|
|
254
|
+
props['parent_paths'][type] = parent['paths'][type]
|
255
|
+
end
|
256
|
+
parent['children'] << obj
|
257
|
+
end
|
258
|
+
end
|
259
|
+
|
260
|
+
def _aggregate_metrics vms_stats, perf_metrics
|
261
|
+
out = Hash[perf_metrics.keys.map{|x| [x, 0]}]
|
262
|
+
avg_counter = Hash[perf_metrics.keys.map{|x| [x, 0]}]
|
263
|
+
|
264
|
+
vms_stats.each do |vm_stats|
|
265
|
+
perf_metrics.each do |key, type|
|
266
|
+
values = vm_stats[key]
|
267
|
+
if !values.is_a?(Array)
|
268
|
+
values = [values]
|
269
|
+
end
|
270
|
+
values.compact.each do |val|
|
271
|
+
if type == :sum
|
272
|
+
out[key] += val
|
273
|
+
elsif type == :max
|
274
|
+
out[key] = [out[key], val].max
|
275
|
+
elsif type == :avg
|
276
|
+
out[key] += val.to_f
|
277
|
+
avg_counter[key] += 1
|
278
|
+
elsif type == :avg_ignore_zero
|
279
|
+
if val > 0
|
280
|
+
out[key] += val.to_f
|
281
|
+
avg_counter[key] += 1
|
282
|
+
end
|
283
|
+
end
|
284
|
+
end
|
285
|
+
end
|
286
|
+
end
|
287
|
+
|
288
|
+
perf_metrics.each do |key, type|
|
289
|
+
if type == :avg_ignore_zero || type == :avg
|
290
|
+
if avg_counter[key] > 0
|
291
|
+
out[key] = out[key] / avg_counter[key]
|
292
|
+
end
|
293
|
+
end
|
294
|
+
end
|
295
|
+
|
296
|
+
out
|
297
|
+
end
|
298
|
+
|
299
|
+
def _collect_info_on_all_vms_single root_folder, opts = {}
|
300
|
+
prop_names = opts[:prop_names]
|
301
|
+
if !prop_names
|
302
|
+
prop_names = [
|
303
|
+
'name',
|
304
|
+
'config.template',
|
305
|
+
'runtime.powerState', 'datastore', 'config.annotation',
|
306
|
+
'parent', 'resourcePool', 'storage.perDatastoreUsage',
|
307
|
+
'summary.config.memorySizeMB',
|
308
|
+
'summary.config.numCpu',
|
309
|
+
'summary.quickStats.hostMemoryUsage',
|
310
|
+
'summary.quickStats.guestMemoryUsage',
|
311
|
+
'summary.quickStats.overallCpuUsage',
|
312
|
+
'runtime.connectionState',
|
313
|
+
'config.instanceUuid',
|
314
|
+
'customValue',
|
315
|
+
]
|
316
|
+
end
|
317
|
+
perf_metrics = opts[:perf_metrics]
|
318
|
+
if !perf_metrics
|
319
|
+
perf_metrics = {
|
320
|
+
'virtualDisk.read' => :avg,
|
321
|
+
'virtualDisk.write' => :avg,
|
322
|
+
'virtualDisk.numberReadAveraged' => :avg,
|
323
|
+
'virtualDisk.numberWriteAveraged' => :avg,
|
324
|
+
'virtualDisk.totalReadLatency' => :avg_ignore_zero,
|
325
|
+
'virtualDisk.totalWriteLatency' => :avg_ignore_zero,
|
326
|
+
}
|
327
|
+
end
|
328
|
+
host_perf_metrics = opts[:host_perf_metrics]
|
329
|
+
if !host_perf_metrics
|
330
|
+
host_perf_metrics = {
|
331
|
+
'cpu.usage' => :avg,
|
332
|
+
'mem.usage' => :avg,
|
333
|
+
}
|
334
|
+
end
|
335
|
+
|
336
|
+
vms_props, inventory = all_inventory_flat root_folder, prop_names
|
337
|
+
vms = vms_props.keys
|
338
|
+
|
339
|
+
hosts_props = inventory.select{|k, v| k.is_a?(VIM::HostSystem)}
|
340
|
+
|
341
|
+
conn = root_folder._connection
|
342
|
+
sc = conn.serviceContent
|
343
|
+
pc = sc.propertyCollector
|
344
|
+
pm = sc.perfManager
|
345
|
+
vc_uuid = conn.instanceUuid
|
346
|
+
|
347
|
+
connected_vms = vms_props.select do |vm, props|
|
348
|
+
is_connected = props['runtime.connectionState'] != "disconnected"
|
349
|
+
is_template = props['config.template']
|
350
|
+
is_connected && !is_template
|
351
|
+
end.keys
|
352
|
+
|
353
|
+
begin
|
354
|
+
# XXX: Need to find a good way to get the "right" samples
|
355
|
+
if connected_vms.length == 0
|
356
|
+
{}
|
357
|
+
else
|
358
|
+
vms_stats = pm.retrieve_stats(
|
359
|
+
connected_vms, perf_metrics.keys,
|
360
|
+
:max_samples => 3
|
361
|
+
)
|
362
|
+
end
|
363
|
+
rescue RbVmomi::Fault => ex
|
364
|
+
if ex.fault.is_a? RbVmomi::VIM::ManagedObjectNotFound
|
365
|
+
connected_vms -= [ex.fault.obj]
|
366
|
+
retry
|
367
|
+
end
|
368
|
+
raise
|
369
|
+
end
|
370
|
+
|
371
|
+
connected_hosts = hosts_props.select do |k,v|
|
372
|
+
v['runtime.connectionState'] != "disconnected"
|
373
|
+
end
|
374
|
+
if connected_hosts.length > 0
|
375
|
+
hosts_stats = pm.retrieve_stats(
|
376
|
+
connected_hosts.keys, host_perf_metrics.keys,
|
377
|
+
:max_samples => 3
|
378
|
+
)
|
379
|
+
end
|
380
|
+
hosts_props.each do |host, props|
|
381
|
+
if !connected_hosts[host]
|
382
|
+
next
|
383
|
+
end
|
384
|
+
|
385
|
+
stats = hosts_stats[host] || {}
|
386
|
+
stats = stats[:metrics] || {}
|
387
|
+
stats = _aggregate_metrics [stats], host_perf_metrics
|
388
|
+
props.merge!(stats)
|
389
|
+
end
|
390
|
+
|
391
|
+
vms_props.each do |vm, props|
|
392
|
+
if !connected_vms.member?(vm)
|
393
|
+
next
|
394
|
+
end
|
395
|
+
props['num.vm'] = 1
|
396
|
+
powered_on = (props['runtime.powerState'] == 'poweredOn')
|
397
|
+
props['num.poweredonvm'] = powered_on ? 1 : 0
|
398
|
+
|
399
|
+
stats = vms_stats[vm] || {}
|
400
|
+
stats = stats[:metrics] || {}
|
401
|
+
stats = _aggregate_metrics [stats], perf_metrics
|
402
|
+
props.merge!(stats)
|
403
|
+
props['virtualDisk.totalReadLatency.avg'] = props['virtualDisk.totalReadLatency']
|
404
|
+
props['virtualDisk.totalWriteLatency.avg'] = props['virtualDisk.totalWriteLatency']
|
405
|
+
props['virtualDisk.totalReadLatency.max'] = props['virtualDisk.totalReadLatency']
|
406
|
+
props['virtualDisk.totalWriteLatency.max'] = props['virtualDisk.totalWriteLatency']
|
407
|
+
props.delete('virtualDisk.totalReadLatency')
|
408
|
+
props.delete('virtualDisk.totalWriteLatency')
|
409
|
+
|
410
|
+
per_ds_usage = props['storage.perDatastoreUsage']
|
411
|
+
props['storage.space.committed'] = per_ds_usage.map{|x| x.committed}.inject(0, &:+)
|
412
|
+
props['storage.space.uncommitted'] = per_ds_usage.map{|x| x.uncommitted}.inject(0, &:+)
|
413
|
+
props['storage.space.unshared'] = per_ds_usage.map{|x| x.unshared}.inject(0, &:+)
|
414
|
+
|
415
|
+
props['parent_paths'] = {}
|
416
|
+
if inventory[props['parent']]
|
417
|
+
props['parent_paths']['vmfolder'] = inventory[props['parent']]['path']
|
418
|
+
end
|
419
|
+
if !props['config.template']
|
420
|
+
rp_props = inventory[props['resourcePool']]
|
421
|
+
props['parent_paths']['rp'] = rp_props['path']
|
422
|
+
end
|
423
|
+
|
424
|
+
props['annotation_yaml'] = YAML.load(props['config.annotation'] || '')
|
425
|
+
if !props['annotation_yaml'].is_a?(Hash)
|
426
|
+
props['annotation_yaml'] = {}
|
427
|
+
end
|
428
|
+
|
429
|
+
props['customValue'] = Hash[props['customValue'].map do |x|
|
430
|
+
[x.key, x.value]
|
431
|
+
end]
|
432
|
+
|
433
|
+
props['vc_uuid'] = vc_uuid
|
434
|
+
end
|
435
|
+
|
436
|
+
[vms_props, inventory, hosts_props]
|
437
|
+
end
|
438
|
+
|
439
|
+
def collect_info_on_all_vms root_folders, opts = {}
|
440
|
+
log "Fetching information from all VCs ..."
|
441
|
+
vms_props = {}
|
442
|
+
hosts_props = {}
|
443
|
+
inventory = {}
|
444
|
+
lock = Mutex.new
|
445
|
+
root_folders.map do |root_folder|
|
446
|
+
Thread.new do
|
447
|
+
begin
|
448
|
+
single_vms_props, single_inventory, single_hosts_props =
|
449
|
+
_collect_info_on_all_vms_single(root_folder, opts)
|
450
|
+
|
451
|
+
lock.synchronize do
|
452
|
+
vms_props.merge!(single_vms_props)
|
453
|
+
if inventory['root']
|
454
|
+
single_inventory['root']['children'] += inventory['root']['children']
|
455
|
+
end
|
456
|
+
inventory.merge!(single_inventory)
|
457
|
+
hosts_props.merge!(single_hosts_props)
|
458
|
+
end
|
459
|
+
rescue Exception => ex
|
460
|
+
log "#{ex.class}: #{ex.message}"
|
461
|
+
ex.backtrace.each do |line|
|
462
|
+
log line
|
463
|
+
end
|
464
|
+
raise
|
465
|
+
end
|
466
|
+
end
|
467
|
+
end.each{|t| t.join}
|
468
|
+
|
469
|
+
log "Make data marshal friendly ..."
|
470
|
+
inventory = _make_marshal_friendly(inventory)
|
471
|
+
vms_props = _make_marshal_friendly(vms_props)
|
472
|
+
hosts_props = _make_marshal_friendly(hosts_props)
|
473
|
+
|
474
|
+
log "Perform external post processing ..."
|
475
|
+
if @vm_processing_callback
|
476
|
+
@vm_processing_callback.call(self, vms_props, inventory)
|
477
|
+
end
|
478
|
+
|
479
|
+
log "Perform data aggregation ..."
|
480
|
+
# Processing the annotations may have added new nodes to the
|
481
|
+
# inventory list, hence we need to run _compute_parents_and_children
|
482
|
+
# again to calculate the parents and children for the newly
|
483
|
+
# added nodes.
|
484
|
+
_compute_parents_and_children inventory
|
485
|
+
|
486
|
+
# Now that we have all VMs and a proper inventory tree built, we can
|
487
|
+
# aggregate the VM stats along all trees and tree nodes. This
|
488
|
+
# de-normalizes the data heavily, but thats fine
|
489
|
+
path_types = opts[:path_types] || @path_types
|
490
|
+
inventory = _aggregate_vms path_types, vms_props, inventory
|
491
|
+
|
492
|
+
log "Done collecting and aggregating stats"
|
493
|
+
|
494
|
+
@inventory = inventory
|
495
|
+
@vms_props = vms_props
|
496
|
+
|
497
|
+
{
|
498
|
+
'inventory' => inventory,
|
499
|
+
'vms_props' => vms_props,
|
500
|
+
'hosts_props' => hosts_props,
|
501
|
+
}
|
502
|
+
end
|
503
|
+
|
504
|
+
def _make_marshal_friendly hash
|
505
|
+
hash = Hash[hash.map do |k, v|
|
506
|
+
if v['parent']
|
507
|
+
v['parent'] = _mo2str(v['parent'])
|
508
|
+
end
|
509
|
+
if v['resourcePool']
|
510
|
+
v['resourcePool'] = _mo2str(v['resourcePool'])
|
511
|
+
end
|
512
|
+
if v['children']
|
513
|
+
v['children'] = v['children'].map{|x| _mo2str(x)}
|
514
|
+
end
|
515
|
+
if v['parents']
|
516
|
+
v['parents'] = v['parents'].map{|x| _mo2str(x)}
|
517
|
+
end
|
518
|
+
if v['datastore']
|
519
|
+
v['datastore'] = v['datastore'].map{|x| _mo2str(x)}
|
520
|
+
end
|
521
|
+
v['type'] = k.class.name
|
522
|
+
[_mo2str(k), v]
|
523
|
+
end]
|
524
|
+
# Marhsal hash to JSON and back. This is just debug code to ensure
|
525
|
+
# that all further processing can be done on a serialized dump of
|
526
|
+
# the data.
|
527
|
+
hash = JSON.load(JSON.dump(hash))
|
528
|
+
end
|
529
|
+
|
530
|
+
def _mo2str mo
|
531
|
+
if !mo.is_a?(RbVmomi::VIM::ManagedObject)
|
532
|
+
mo
|
533
|
+
else
|
534
|
+
"vim-#{mo._connection.instanceUuid}-#{mo._ref}"
|
535
|
+
end
|
536
|
+
end
|
537
|
+
|
538
|
+
# Helper method that aggregates the VM stats along all trees and
|
539
|
+
# tree nodes. This de-normalizes the data heavily, but thats fine.
|
540
|
+
def _aggregate_vms path_types, vms_props, inventory
|
541
|
+
# XXX: Opimtization:
|
542
|
+
# This function is currently quite wasteful. It computes all VMs
|
543
|
+
# at each level and then aggregates the VMs for each node individually
|
544
|
+
# Instead, the aggregation itself should explot the tree structure.
|
545
|
+
path_types.each do |path_type|
|
546
|
+
index = {}
|
547
|
+
reverse_index = {}
|
548
|
+
inventory.each do |k, v|
|
549
|
+
if v['paths'] && v['paths'][path_type]
|
550
|
+
path = v['paths'][path_type]
|
551
|
+
index[path] = v
|
552
|
+
reverse_index[path] = k
|
553
|
+
end
|
554
|
+
end
|
555
|
+
|
556
|
+
paths_vms = {}
|
557
|
+
|
558
|
+
vms_props.each do |vm, props|
|
559
|
+
if !props['parent_paths'] || !props['parent_paths'][path_type]
|
560
|
+
next
|
561
|
+
end
|
562
|
+
parent_path = props['parent_paths'][path_type]
|
563
|
+
while parent_path
|
564
|
+
parent = index[parent_path]
|
565
|
+
if !parent
|
566
|
+
puts "Parent is nil, so dumping some stuff"
|
567
|
+
puts path_type
|
568
|
+
puts "parent path: #{parent_path}"
|
569
|
+
pp index.keys
|
570
|
+
pp props
|
571
|
+
end
|
572
|
+
paths_vms[parent_path] ||= []
|
573
|
+
paths_vms[parent_path] << vm
|
574
|
+
parent_path = parent['parent_paths'][path_type]
|
575
|
+
end
|
576
|
+
end
|
577
|
+
|
578
|
+
paths_vms.each do |k, vms|
|
579
|
+
inventory[reverse_index[k]]['vms'] ||= {}
|
580
|
+
inventory[reverse_index[k]]['vms'][path_type] = vms
|
581
|
+
vms_stats = vms_props.select{|k, v| vms.member?(k)}.values
|
582
|
+
stats = _aggregate_metrics vms_stats, @perf_metrics
|
583
|
+
inventory[reverse_index[k]]['stats'] ||= {}
|
584
|
+
inventory[reverse_index[k]]['stats'][path_type] = stats
|
585
|
+
end
|
586
|
+
|
587
|
+
#pp paths_vms.map{|k, v| [k, reverse_index[k], v.length, index[k]['stats'][path_type].length]}
|
588
|
+
end
|
589
|
+
|
590
|
+
inventory
|
591
|
+
end
|
592
|
+
|
593
|
+
def visualize_vm_props
|
594
|
+
path_types_rows = construct_tree_rows_from_vm_props
|
595
|
+
path_types_rows.each do |path_type, rows|
|
596
|
+
puts "Path type #{path_type}:"
|
597
|
+
rows.each do |row|
|
598
|
+
indent, name, stats = row
|
599
|
+
puts "#{' ' * indent}#{name}: #{stats['num.vm']}"
|
600
|
+
end
|
601
|
+
puts ""
|
602
|
+
end
|
603
|
+
end
|
604
|
+
|
605
|
+
def construct_tree_rows_from_vm_props path_types = nil
|
606
|
+
path_types ||= @path_types
|
607
|
+
def visualize_node path_type, node, inventory, indent = 0
|
608
|
+
rows = []
|
609
|
+
if !node || !node['stats'] || !node['stats'][path_type]
|
610
|
+
stats = {}
|
611
|
+
return []
|
612
|
+
else
|
613
|
+
stats = node['stats'][path_type]
|
614
|
+
end
|
615
|
+
rows << [indent, node['name'], stats]
|
616
|
+
node['children'].each do |child|
|
617
|
+
rows += visualize_node path_type, inventory[child], inventory, indent + 1
|
618
|
+
end
|
619
|
+
rows
|
620
|
+
end
|
621
|
+
|
622
|
+
Hash[path_types.map do |path_type|
|
623
|
+
key, root = @inventory.find{|k, v| v['paths'][path_type] == 'root'}
|
624
|
+
rows = visualize_node path_type, root, @inventory
|
625
|
+
[path_type, rows]
|
626
|
+
end]
|
627
|
+
end
|
628
|
+
end
|