bosh_vsphere_cpi 0.4.8
Sign up to get free protection for your applications and to get access to all the features.
- data/README +1 -0
- data/Rakefile +50 -0
- data/lib/cloud/vsphere/client.rb +431 -0
- data/lib/cloud/vsphere/cloud.rb +1085 -0
- data/lib/cloud/vsphere/lease_updater.rb +40 -0
- data/lib/cloud/vsphere/models/disk.rb +8 -0
- data/lib/cloud/vsphere/resources.rb +530 -0
- data/lib/cloud/vsphere/version.rb +7 -0
- data/lib/cloud/vsphere.rb +29 -0
- data/spec/spec_helper.rb +31 -0
- data/spec/unit/vsphere_resource_spec.rb +254 -0
- metadata +118 -0
@@ -0,0 +1,40 @@
|
|
1
|
+
module VSphereCloud
|
2
|
+
class LeaseUpdater
|
3
|
+
attr_accessor :progress
|
4
|
+
|
5
|
+
def initialize(client, lease)
|
6
|
+
@progress = 0
|
7
|
+
@client = client
|
8
|
+
@lease = lease
|
9
|
+
@state = :running
|
10
|
+
@lock = Mutex.new
|
11
|
+
@thread = Thread.new { run }
|
12
|
+
end
|
13
|
+
|
14
|
+
def run
|
15
|
+
loop do
|
16
|
+
@lock.synchronize do
|
17
|
+
break if @state != :running
|
18
|
+
@lease.progress(@progress)
|
19
|
+
end
|
20
|
+
sleep(1)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
def abort
|
25
|
+
@lock.synchronize do
|
26
|
+
@state = :abort
|
27
|
+
@lease.abort
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def finish
|
32
|
+
@lock.synchronize do
|
33
|
+
@state = :finish
|
34
|
+
@lease.progress(100)
|
35
|
+
@lease.complete
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
end
|
40
|
+
end
|
@@ -0,0 +1,530 @@
|
|
1
|
+
module VSphereCloud
|
2
|
+
|
3
|
+
class Resources
|
4
|
+
include VimSdk
|
5
|
+
|
6
|
+
MEMORY_THRESHOLD = 128
|
7
|
+
DISK_THRESHOLD = 512
|
8
|
+
|
9
|
+
class Datacenter
|
10
|
+
attr_accessor :mob
|
11
|
+
attr_accessor :name
|
12
|
+
attr_accessor :clusters
|
13
|
+
attr_accessor :vm_folder
|
14
|
+
attr_accessor :vm_folder_name
|
15
|
+
attr_accessor :template_folder
|
16
|
+
attr_accessor :template_folder_name
|
17
|
+
attr_accessor :disk_path
|
18
|
+
attr_accessor :datastore_pattern
|
19
|
+
attr_accessor :persistent_datastore_pattern
|
20
|
+
attr_accessor :allow_mixed_datastores
|
21
|
+
attr_accessor :spec
|
22
|
+
|
23
|
+
def inspect
|
24
|
+
"<Datacenter: #{@mob} / #{@name}>"
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
class Datastore
|
29
|
+
attr_accessor :mob
|
30
|
+
attr_accessor :name
|
31
|
+
attr_accessor :total_space
|
32
|
+
attr_accessor :free_space
|
33
|
+
attr_accessor :unaccounted_space
|
34
|
+
|
35
|
+
def real_free_space
|
36
|
+
@free_space - @unaccounted_space
|
37
|
+
end
|
38
|
+
|
39
|
+
def inspect
|
40
|
+
"<Datastore: #{@mob} / #{@name}>"
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
class Cluster
|
45
|
+
attr_accessor :mob
|
46
|
+
attr_accessor :name
|
47
|
+
attr_accessor :datacenter
|
48
|
+
attr_accessor :resource_pool
|
49
|
+
attr_accessor :datastores
|
50
|
+
attr_accessor :persistent_datastores
|
51
|
+
attr_accessor :idle_cpu
|
52
|
+
attr_accessor :total_memory
|
53
|
+
attr_accessor :free_memory
|
54
|
+
attr_accessor :unaccounted_memory
|
55
|
+
attr_accessor :mem_over_commit
|
56
|
+
|
57
|
+
def real_free_memory
|
58
|
+
@free_memory - @unaccounted_memory * @mem_over_commit
|
59
|
+
end
|
60
|
+
|
61
|
+
def inspect
|
62
|
+
"<Cluster: #{@mob} / #{@name}>"
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
def initialize(client, vcenter, mem_over_commit = 1.0)
|
67
|
+
@client = client
|
68
|
+
@vcenter = vcenter
|
69
|
+
@datacenters = {}
|
70
|
+
@timestamp = 0
|
71
|
+
@lock = Monitor.new
|
72
|
+
@logger = Bosh::Clouds::Config.logger
|
73
|
+
@mem_over_commit = mem_over_commit
|
74
|
+
end
|
75
|
+
|
76
|
+
def resource_pools_in_use?(datacenter_spec)
|
77
|
+
cluster_spec = get_cluster_spec(datacenter_spec["clusters"])
|
78
|
+
cluster_spec.find {|_, properties| properties["resource_pool"] != nil }
|
79
|
+
end
|
80
|
+
|
81
|
+
def setup_folder(datacenter, base_folder_name)
|
82
|
+
base_folder = @client.find_by_inventory_path([datacenter.name, "vm", base_folder_name])
|
83
|
+
raise "Missing folder #{base_folder_name}" if base_folder.nil?
|
84
|
+
return base_folder_name, base_folder unless datacenter.spec["use_sub_folder"] || resource_pools_in_use?(datacenter.spec)
|
85
|
+
|
86
|
+
# Create unique folder
|
87
|
+
sub_folder_name = [base_folder_name, Bosh::Clouds::Config.uuid]
|
88
|
+
@logger.debug("Searching for folder #{sub_folder_name.join("/")}")
|
89
|
+
sub_folder = @client.find_by_inventory_path([datacenter.name, "vm", sub_folder_name])
|
90
|
+
if sub_folder.nil?
|
91
|
+
@logger.info("Creating folder #{sub_folder_name.join("/")}")
|
92
|
+
sub_folder = base_folder.create_folder(Bosh::Clouds::Config.uuid)
|
93
|
+
else
|
94
|
+
@logger.debug("Found folder #{sub_folder_name.join("/")}")
|
95
|
+
end
|
96
|
+
[sub_folder_name, sub_folder]
|
97
|
+
end
|
98
|
+
|
99
|
+
def fetch_datacenters
|
100
|
+
datacenters = @client.get_managed_objects(Vim::Datacenter)
|
101
|
+
properties = @client.get_properties(datacenters, Vim::Datacenter, ["name"])
|
102
|
+
datacenter_specs = {}
|
103
|
+
|
104
|
+
@vcenter["datacenters"].each { |spec| datacenter_specs[spec["name"]] = spec }
|
105
|
+
properties.delete_if { |_, datacenter_properties| !datacenter_specs.has_key?(datacenter_properties["name"]) }
|
106
|
+
|
107
|
+
datacenters = {}
|
108
|
+
properties.each_value do |datacenter_properties|
|
109
|
+
datacenter = Datacenter.new
|
110
|
+
datacenter.mob = datacenter_properties[:obj]
|
111
|
+
datacenter.name = datacenter_properties["name"]
|
112
|
+
|
113
|
+
@logger.debug("Found datacenter: #{datacenter.name} @ #{datacenter.mob}")
|
114
|
+
|
115
|
+
datacenter.spec = datacenter_specs[datacenter.name]
|
116
|
+
|
117
|
+
# Setup folders
|
118
|
+
datacenter.template_folder_name, datacenter.template_folder = setup_folder(datacenter,
|
119
|
+
datacenter.spec["template_folder"])
|
120
|
+
datacenter.vm_folder_name, datacenter.vm_folder = setup_folder(datacenter, datacenter.spec["vm_folder"])
|
121
|
+
|
122
|
+
datacenter.disk_path = datacenter.spec["disk_path"]
|
123
|
+
datacenter.datastore_pattern = Regexp.new(datacenter.spec["datastore_pattern"])
|
124
|
+
raise "Missing persistent_datastore_pattern in director config" if datacenter.spec["persistent_datastore_pattern"].nil?
|
125
|
+
datacenter.persistent_datastore_pattern = Regexp.new(datacenter.spec["persistent_datastore_pattern"])
|
126
|
+
|
127
|
+
datacenter.allow_mixed_datastores = !!datacenter.spec["allow_mixed_datastores"]
|
128
|
+
|
129
|
+
datacenter.clusters = fetch_clusters(datacenter)
|
130
|
+
datacenters[datacenter.name] = datacenter
|
131
|
+
end
|
132
|
+
datacenters
|
133
|
+
end
|
134
|
+
|
135
|
+
# Allow clusters to be specified as
|
136
|
+
#
|
137
|
+
# clusters: clusters:
|
138
|
+
# - CLUSTER1 - CLUSTER1:
|
139
|
+
# - CLUSTER2 OR resource_pool: SOME_RP
|
140
|
+
# - CLUSTER3 - CLUSTER2
|
141
|
+
# - CLUSTER3
|
142
|
+
def get_cluster_spec(clusters)
|
143
|
+
cluster_spec = {}
|
144
|
+
clusters.each do |cluster|
|
145
|
+
case cluster
|
146
|
+
when String
|
147
|
+
cluster_spec[cluster] = {}
|
148
|
+
when Hash
|
149
|
+
cluster_spec[cluster.keys.first] = {"resource_pool" => cluster[cluster.keys.first]["resource_pool"]}
|
150
|
+
else
|
151
|
+
raise "Bad cluster information in datacenter spec #{clusters.pretty_inspect}"
|
152
|
+
end
|
153
|
+
end
|
154
|
+
cluster_spec
|
155
|
+
end
|
156
|
+
|
157
|
+
def fetch_clusters(datacenter)
|
158
|
+
datacenter_spec = datacenter.spec
|
159
|
+
cluster_mobs = @client.get_managed_objects(Vim::ClusterComputeResource, :root => datacenter.mob)
|
160
|
+
properties = @client.get_properties(cluster_mobs, Vim::ClusterComputeResource,
|
161
|
+
["name", "datastore", "resourcePool", "host"], :ensure_all => true)
|
162
|
+
|
163
|
+
cluster_spec = get_cluster_spec(datacenter_spec["clusters"])
|
164
|
+
cluster_names = Set.new(cluster_spec.keys)
|
165
|
+
properties.delete_if { |_, cluster_properties| !cluster_names.include?(cluster_properties["name"]) }
|
166
|
+
|
167
|
+
clusters = []
|
168
|
+
properties.each_value do |cluster_properties|
|
169
|
+
requested_resource_pool = cluster_spec[cluster_properties["name"]]["resource_pool"]
|
170
|
+
cluster_resource_pool = fetch_resource_pool(requested_resource_pool, cluster_properties)
|
171
|
+
next if cluster_resource_pool.nil?
|
172
|
+
|
173
|
+
cluster = Cluster.new
|
174
|
+
cluster.mem_over_commit = @mem_over_commit
|
175
|
+
cluster.mob = cluster_properties[:obj]
|
176
|
+
cluster.name = cluster_properties["name"]
|
177
|
+
|
178
|
+
@logger.debug("Found cluster: #{cluster.name} @ #{cluster.mob}")
|
179
|
+
|
180
|
+
cluster.resource_pool = cluster_resource_pool
|
181
|
+
cluster.datacenter = datacenter
|
182
|
+
cluster.datastores = fetch_datastores(datacenter, cluster_properties["datastore"],
|
183
|
+
datacenter.datastore_pattern)
|
184
|
+
cluster.persistent_datastores = fetch_datastores(datacenter, cluster_properties["datastore"],
|
185
|
+
datacenter.persistent_datastore_pattern)
|
186
|
+
|
187
|
+
# make sure datastores and persistent_datastores are mutually exclusive
|
188
|
+
datastore_names = cluster.datastores.map { |ds|
|
189
|
+
ds.name
|
190
|
+
}
|
191
|
+
persistent_datastore_names = cluster.persistent_datastores.map { |ds|
|
192
|
+
ds.name
|
193
|
+
}
|
194
|
+
if (datastore_names & persistent_datastore_names).length != 0 && !datacenter.allow_mixed_datastores
|
195
|
+
raise("datastore patterns are not mutually exclusive non-persistent are " +
|
196
|
+
"#{datastore_names.pretty_inspect}\n persistent are #{persistent_datastore_names.pretty_inspect}, " +
|
197
|
+
"please use allow_mixed_datastores director configuration parameter to allow this")
|
198
|
+
end
|
199
|
+
@logger.debug("non-persistent datastores are " + "#{datastore_names.pretty_inspect}\n " +
|
200
|
+
"persistent datastores are #{persistent_datastore_names.pretty_inspect}")
|
201
|
+
|
202
|
+
if requested_resource_pool.nil?
|
203
|
+
# Ideally we would just get the utilization for the root resource pool, but
|
204
|
+
# VC does not really have "real time" updates for utilization so for
|
205
|
+
# now we work around that by querying the cluster hosts directly.
|
206
|
+
fetch_cluster_utilization(cluster, cluster_properties["host"])
|
207
|
+
else
|
208
|
+
fetch_resource_pool_utilization(requested_resource_pool, cluster)
|
209
|
+
end
|
210
|
+
|
211
|
+
clusters << cluster
|
212
|
+
end
|
213
|
+
clusters
|
214
|
+
end
|
215
|
+
|
216
|
+
def fetch_resource_pool(requested_resource_pool, cluster_properties)
|
217
|
+
root_resource_pool = cluster_properties["resourcePool"]
|
218
|
+
|
219
|
+
return root_resource_pool if requested_resource_pool.nil?
|
220
|
+
|
221
|
+
# Get list of resource pools under this cluster
|
222
|
+
properties = @client.get_properties(root_resource_pool, Vim::ResourcePool, ["resourcePool"])
|
223
|
+
if properties && properties["resourcePool"] && properties["resourcePool"].size != 0
|
224
|
+
|
225
|
+
# Get the name of each resource pool under this cluster
|
226
|
+
child_properties = @client.get_properties(properties["resourcePool"], Vim::ResourcePool, ["name"])
|
227
|
+
if child_properties
|
228
|
+
child_properties.each_value do | resource_pool |
|
229
|
+
if resource_pool["name"] == requested_resource_pool
|
230
|
+
@logger.info("Found requested resource pool #{requested_resource_pool} under cluster #{cluster_properties["name"]}")
|
231
|
+
return resource_pool[:obj]
|
232
|
+
end
|
233
|
+
end
|
234
|
+
end
|
235
|
+
end
|
236
|
+
@logger.info("Could not find requested resource pool #{requested_resource_pool} under cluster #{cluster_properties["name"]}")
|
237
|
+
nil
|
238
|
+
end
|
239
|
+
|
240
|
+
def fetch_datastores(datacenter, datastore_mobs, match_pattern)
|
241
|
+
properties = @client.get_properties(datastore_mobs, Vim::Datastore,
|
242
|
+
["summary.freeSpace", "summary.capacity", "name"])
|
243
|
+
properties.delete_if { |_, datastore_properties| datastore_properties["name"] !~ match_pattern }
|
244
|
+
|
245
|
+
datastores = []
|
246
|
+
properties.each_value do |datastore_properties|
|
247
|
+
datastore = Datastore.new
|
248
|
+
datastore.mob = datastore_properties[:obj]
|
249
|
+
datastore.name = datastore_properties["name"]
|
250
|
+
|
251
|
+
@logger.debug("Found datastore: #{datastore.name} @ #{datastore.mob}")
|
252
|
+
|
253
|
+
datastore.free_space = datastore_properties["summary.freeSpace"].to_i / (1024 * 1024)
|
254
|
+
datastore.total_space = datastore_properties["summary.capacity"].to_i / (1024 * 1024)
|
255
|
+
datastore.unaccounted_space = 0
|
256
|
+
datastores << datastore
|
257
|
+
end
|
258
|
+
datastores
|
259
|
+
end
|
260
|
+
|
261
|
+
def fetch_cluster_utilization(cluster, host_mobs)
|
262
|
+
properties = @client.get_properties(host_mobs, Vim::HostSystem,
|
263
|
+
["hardware.memorySize", "runtime.inMaintenanceMode"], :ensure_all => true)
|
264
|
+
properties.delete_if { |_, host_properties| host_properties["runtime.inMaintenanceMode"] == "true" }
|
265
|
+
|
266
|
+
samples = 0
|
267
|
+
total_memory = 0
|
268
|
+
free_memory = 0
|
269
|
+
cpu_usage = 0
|
270
|
+
|
271
|
+
perf_counters = @client.get_perf_counters(host_mobs, ["cpu.usage.average", "mem.usage.average"], :max_sample => 5)
|
272
|
+
perf_counters.each do |host_mob, perf_counter|
|
273
|
+
host_properties = properties[host_mob]
|
274
|
+
next if host_properties.nil?
|
275
|
+
host_total_memory = host_properties["hardware.memorySize"].to_i
|
276
|
+
host_percent_memory_used = average_csv(perf_counter["mem.usage.average"]) / 10000
|
277
|
+
host_free_memory = (1.0 - host_percent_memory_used) * host_total_memory
|
278
|
+
|
279
|
+
samples += 1
|
280
|
+
total_memory += host_total_memory
|
281
|
+
free_memory += host_free_memory.to_i
|
282
|
+
cpu_usage += average_csv(perf_counter["cpu.usage.average"]) / 100
|
283
|
+
end
|
284
|
+
|
285
|
+
cluster.idle_cpu = (100 - cpu_usage / samples) / 100
|
286
|
+
cluster.total_memory = total_memory/(1024 * 1024)
|
287
|
+
cluster.free_memory = free_memory/(1024 * 1024)
|
288
|
+
cluster.unaccounted_memory = 0
|
289
|
+
end
|
290
|
+
|
291
|
+
def fetch_resource_pool_utilization(resource_pool, cluster)
|
292
|
+
properties = @client.get_properties(cluster.resource_pool, Vim::ResourcePool, ["summary"])
|
293
|
+
raise "Failed to get utilization for resource pool #{resource_pool}" if properties.nil?
|
294
|
+
|
295
|
+
if properties["summary"].runtime.overall_status == "green"
|
296
|
+
runtime_info = properties["summary"].runtime
|
297
|
+
cluster.idle_cpu = ((runtime_info.cpu.max_usage - runtime_info.cpu.overall_usage) * 1.0)/runtime_info.cpu.max_usage
|
298
|
+
cluster.total_memory = (runtime_info.memory.reservation_used + runtime_info.memory.unreserved_for_vm)/(1024 * 1024)
|
299
|
+
cluster.free_memory = [runtime_info.memory.unreserved_for_vm, runtime_info.memory.max_usage - runtime_info.memory.overall_usage].min/(1024 * 1024)
|
300
|
+
cluster.unaccounted_memory = 0
|
301
|
+
else
|
302
|
+
@logger.warn("Ignoring cluster: #{cluster.name} resource_pool: #{resource_pool} as its state" +
|
303
|
+
"is unreliable #{properties["summary"].runtime.overall_status}")
|
304
|
+
# resource pool is in an unreliable state
|
305
|
+
cluster.idle_cpu = 0
|
306
|
+
cluster.total_memory = 0
|
307
|
+
cluster.free_memory = 0
|
308
|
+
cluster.unaccounted_memory = 0
|
309
|
+
end
|
310
|
+
end
|
311
|
+
|
312
|
+
def average_csv(csv)
|
313
|
+
values = csv.split(",")
|
314
|
+
result = 0
|
315
|
+
values.each { |v| result += v.to_f }
|
316
|
+
result / values.size
|
317
|
+
end
|
318
|
+
|
319
|
+
def datacenters
|
320
|
+
@lock.synchronize do
|
321
|
+
if Time.now.to_i - @timestamp > 60
|
322
|
+
@datacenters = fetch_datacenters
|
323
|
+
@timestamp = Time.now.to_i
|
324
|
+
end
|
325
|
+
end
|
326
|
+
@datacenters
|
327
|
+
end
|
328
|
+
|
329
|
+
def filter_used_resources(memory, vm_disk_size, persistent_disks_size, cluster_affinity)
|
330
|
+
resources = []
|
331
|
+
datacenters.each_value do |datacenter|
|
332
|
+
datacenter.clusters.each do |cluster|
|
333
|
+
next unless cluster_affinity.nil? || cluster.mob == cluster_affinity.mob
|
334
|
+
next unless cluster.real_free_memory - memory > MEMORY_THRESHOLD
|
335
|
+
next if pick_datastore(cluster.persistent_datastores, persistent_disks_size).nil?
|
336
|
+
next if (datastore = pick_datastore(cluster.datastores, vm_disk_size)).nil?
|
337
|
+
resources << [cluster, datastore]
|
338
|
+
end
|
339
|
+
end
|
340
|
+
resources
|
341
|
+
end
|
342
|
+
|
343
|
+
def get_cluster(dc_name, cluster_name)
|
344
|
+
datacenter = datacenters[dc_name]
|
345
|
+
return nil if datacenter.nil?
|
346
|
+
|
347
|
+
cluster = nil
|
348
|
+
datacenter.clusters.each do |c|
|
349
|
+
if c.name == cluster_name
|
350
|
+
cluster = c
|
351
|
+
break
|
352
|
+
end
|
353
|
+
end
|
354
|
+
cluster
|
355
|
+
end
|
356
|
+
|
357
|
+
def validate_persistent_datastore(dc_name, datastore_name)
|
358
|
+
datacenter = datacenters[dc_name]
|
359
|
+
raise "Invalid datacenter #{dc_name} #{datacenters.pretty_inspect}" if datacenter.nil?
|
360
|
+
|
361
|
+
return datastore_name =~ datacenter.persistent_datastore_pattern
|
362
|
+
end
|
363
|
+
|
364
|
+
def get_persistent_datastore(dc_name, cluster_name, persistent_datastore_name)
|
365
|
+
cluster = get_cluster(dc_name, cluster_name)
|
366
|
+
return nil if cluster.nil?
|
367
|
+
|
368
|
+
datastore = nil
|
369
|
+
cluster.persistent_datastores.each { |ds|
|
370
|
+
if ds.name == persistent_datastore_name
|
371
|
+
datastore = ds
|
372
|
+
break
|
373
|
+
end
|
374
|
+
}
|
375
|
+
datastore
|
376
|
+
end
|
377
|
+
|
378
|
+
def pick_datastore(datastores, disk_space)
|
379
|
+
selected_datastores = {}
|
380
|
+
datastores.each { |ds|
|
381
|
+
if ds.real_free_space - disk_space > DISK_THRESHOLD
|
382
|
+
selected_datastores[ds] = score_datastore(ds, disk_space)
|
383
|
+
end
|
384
|
+
}
|
385
|
+
return nil if selected_datastores.empty?
|
386
|
+
pick_random_with_score(selected_datastores)
|
387
|
+
end
|
388
|
+
|
389
|
+
def get_datastore_cluster(datacenter, datastore)
|
390
|
+
datacenter = datacenters[datacenter]
|
391
|
+
if !datacenter.nil?
|
392
|
+
datacenter.clusters.each do |c|
|
393
|
+
c.persistent_datastores.select do |ds|
|
394
|
+
yield c if ds.name == datastore
|
395
|
+
end
|
396
|
+
end
|
397
|
+
end
|
398
|
+
end
|
399
|
+
|
400
|
+
def find_persistent_datastore(dc_name, cluster_name, disk_space)
|
401
|
+
cluster = get_cluster(dc_name, cluster_name)
|
402
|
+
return nil if cluster.nil?
|
403
|
+
|
404
|
+
chosen_datastore = nil
|
405
|
+
@lock.synchronize do
|
406
|
+
chosen_datastore = pick_datastore(cluster.persistent_datastores, disk_space)
|
407
|
+
break if chosen_datastore.nil?
|
408
|
+
|
409
|
+
chosen_datastore.unaccounted_space += disk_space
|
410
|
+
end
|
411
|
+
chosen_datastore
|
412
|
+
end
|
413
|
+
|
414
|
+
def find_resources(memory, disk_size, persistent_disks_size, cluster_affinity)
|
415
|
+
cluster = nil
|
416
|
+
datastore = nil
|
417
|
+
|
418
|
+
# account for swap
|
419
|
+
disk_size += memory
|
420
|
+
|
421
|
+
@lock.synchronize do
|
422
|
+
resources = filter_used_resources(memory, disk_size, persistent_disks_size, cluster_affinity)
|
423
|
+
break if resources.empty?
|
424
|
+
|
425
|
+
scored_resources = {}
|
426
|
+
resources.each do |resource|
|
427
|
+
cluster, datastore = resource
|
428
|
+
scored_resources[resource] = score_resource(cluster, datastore, memory, disk_size)
|
429
|
+
end
|
430
|
+
|
431
|
+
scored_resources = scored_resources.sort_by { |resource| 1 - resource.last }
|
432
|
+
scored_resources = scored_resources[0..2]
|
433
|
+
|
434
|
+
scored_resources.each do |resource, score|
|
435
|
+
cluster, datastore = resource
|
436
|
+
@logger.debug("Cluster: #{cluster.inspect} Datastore: #{datastore.inspect} score: #{score}")
|
437
|
+
end
|
438
|
+
|
439
|
+
cluster, datastore = pick_random_with_score(scored_resources)
|
440
|
+
|
441
|
+
@logger.debug("Picked: #{cluster.inspect} / #{datastore.inspect}")
|
442
|
+
|
443
|
+
cluster.unaccounted_memory += memory
|
444
|
+
datastore.unaccounted_space += disk_size
|
445
|
+
end
|
446
|
+
|
447
|
+
return [] if cluster.nil?
|
448
|
+
[cluster, datastore]
|
449
|
+
end
|
450
|
+
|
451
|
+
def get_resources(memory_size=1, disks=[])
|
452
|
+
# Sort out the persistent and non persistent disks
|
453
|
+
non_persistent_disks_size = 0
|
454
|
+
persistent_disks = {}
|
455
|
+
persistent_disks_size = 0
|
456
|
+
disks.each do |disk|
|
457
|
+
if disk["persistent"]
|
458
|
+
if !disk["datastore"].nil?
|
459
|
+
# sanity check the persistent disks
|
460
|
+
raise "Invalid persistent disk #{disk.pretty_inspect}" unless validate_persistent_datastore(disk["datacenter"], disk["datastore"])
|
461
|
+
|
462
|
+
# sort the persistent disks into clusters they belong to
|
463
|
+
get_datastore_cluster(disk["datacenter"], disk["datastore"]) { |cluster|
|
464
|
+
persistent_disks[cluster] ||= 0
|
465
|
+
persistent_disks[cluster] += disk["size"]
|
466
|
+
}
|
467
|
+
end
|
468
|
+
persistent_disks_size += disk["size"]
|
469
|
+
else
|
470
|
+
non_persistent_disks_size += disk["size"]
|
471
|
+
end
|
472
|
+
end
|
473
|
+
non_persistent_disks_size = 1 if non_persistent_disks_size == 0
|
474
|
+
persistent_disks_size = 1 if persistent_disks_size == 0
|
475
|
+
|
476
|
+
if !persistent_disks.empty?
|
477
|
+
# Sort clusters by largest persistent disk footprint
|
478
|
+
persistent_disks_by_size = persistent_disks.sort { |a, b| b[1] <=> a [1] }
|
479
|
+
|
480
|
+
# Search for resources near the desired cluster
|
481
|
+
persistent_disks_by_size.each do |cluster, size|
|
482
|
+
resources = find_resources(memory_size, non_persistent_disks_size, persistent_disks_size - size, cluster)
|
483
|
+
return resources unless resources.empty?
|
484
|
+
end
|
485
|
+
@logger.info("Ignoring datastore locality as we could not find any resources near persistent disks" +
|
486
|
+
"#{persistent_disks.pretty_inspect}")
|
487
|
+
end
|
488
|
+
|
489
|
+
resources = find_resources(memory_size, non_persistent_disks_size, persistent_disks_size, nil)
|
490
|
+
raise "No available resources" if resources.empty?
|
491
|
+
resources
|
492
|
+
end
|
493
|
+
|
494
|
+
def score_datastore(datastore, disk)
|
495
|
+
percent_of_free_disk = 1 - (disk.to_f / datastore.real_free_space)
|
496
|
+
percent_of_total_disk = 1 - (disk.to_f / datastore.total_space)
|
497
|
+
percent_of_free_disk * 0.67 + percent_of_total_disk * 0.33
|
498
|
+
end
|
499
|
+
|
500
|
+
def score_resource(cluster, datastore, memory, disk)
|
501
|
+
percent_of_free_mem = 1 - (memory.to_f / cluster.real_free_memory)
|
502
|
+
percent_of_total_mem = 1 - (memory.to_f / cluster.total_memory)
|
503
|
+
percent_free_mem_left = (cluster.real_free_memory.to_f - memory) / cluster.total_memory
|
504
|
+
memory_score = percent_of_free_mem * 0.5 + percent_of_total_mem * 0.25 + percent_free_mem_left * 0.25
|
505
|
+
|
506
|
+
cpu_score = cluster.idle_cpu
|
507
|
+
disk_score = score_datastore(datastore, disk)
|
508
|
+
memory_score * 0.5 + cpu_score * 0.25 + disk_score * 0.25
|
509
|
+
end
|
510
|
+
|
511
|
+
def pick_random_with_score(elements)
|
512
|
+
score_sum = 0
|
513
|
+
elements.each { |element| score_sum += element[1] }
|
514
|
+
|
515
|
+
random_score = rand * score_sum
|
516
|
+
base_score = 0
|
517
|
+
|
518
|
+
elements.each do |element|
|
519
|
+
score = element[1]
|
520
|
+
return element[0] if base_score + score > random_score
|
521
|
+
base_score += score
|
522
|
+
end
|
523
|
+
|
524
|
+
# fall through
|
525
|
+
elements.last[0]
|
526
|
+
end
|
527
|
+
|
528
|
+
end
|
529
|
+
|
530
|
+
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
require "common/thread_pool"
|
2
|
+
require "common/thread_formatter"
|
3
|
+
require "uuidtools"
|
4
|
+
|
5
|
+
autoload :VSphereCloud, "cloud/vsphere/cloud"
|
6
|
+
|
7
|
+
module Bosh
|
8
|
+
module Clouds
|
9
|
+
|
10
|
+
class VSphere
|
11
|
+
extend Forwardable
|
12
|
+
|
13
|
+
def_delegators :@delegate,
|
14
|
+
:create_stemcell, :delete_stemcell,
|
15
|
+
:create_vm, :delete_vm, :reboot_vm,
|
16
|
+
:configure_networks,
|
17
|
+
:create_disk, :delete_disk,
|
18
|
+
:attach_disk, :detach_disk,
|
19
|
+
:validate_deployment
|
20
|
+
|
21
|
+
def initialize(options)
|
22
|
+
@delegate = VSphereCloud::Cloud.new(options)
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
Vsphere = VSphere # alias name for dynamic plugin loading
|
27
|
+
end
|
28
|
+
|
29
|
+
end
|
data/spec/spec_helper.rb
ADDED
@@ -0,0 +1,31 @@
|
|
1
|
+
# Copyright (c) 2009-2012 VMware, Inc.
|
2
|
+
|
3
|
+
ENV["BUNDLE_GEMFILE"] ||= File.expand_path("../../Gemfile", __FILE__)
|
4
|
+
|
5
|
+
require "rubygems"
|
6
|
+
require "bundler"
|
7
|
+
Bundler.setup(:default, :test)
|
8
|
+
|
9
|
+
require "rspec"
|
10
|
+
|
11
|
+
require "sequel"
|
12
|
+
require "sequel/adapters/sqlite"
|
13
|
+
|
14
|
+
Sequel.extension :migration
|
15
|
+
db = Sequel.sqlite(':memory:')
|
16
|
+
migration = File.expand_path("../../db/migrations/vsphere_cpi", __FILE__)
|
17
|
+
Sequel::TimestampMigrator.new(db, migration, :table => "vsphere_cpi_schema").run
|
18
|
+
|
19
|
+
require 'cloud'
|
20
|
+
require 'cloud/vsphere'
|
21
|
+
|
22
|
+
class VSphereSpecConfig
|
23
|
+
attr_accessor :db, :logger, :uuid
|
24
|
+
end
|
25
|
+
|
26
|
+
config = VSphereSpecConfig.new
|
27
|
+
config.db = db
|
28
|
+
config.logger = Logger.new(STDOUT)
|
29
|
+
config.logger.level = Logger::ERROR
|
30
|
+
|
31
|
+
Bosh::Clouds::Config.configure(config)
|