bosh_vsphere_cpi 0.4.9 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,570 +1,220 @@
1
+ # Copyright (c) 2009-2012 VMware, Inc.
2
+
1
3
  module VSphereCloud
2
4
 
5
+ # Resources model.
3
6
  class Resources
4
- include VimSdk
5
-
6
7
  MEMORY_THRESHOLD = 128
7
- DISK_THRESHOLD = 512
8
-
9
- class Datacenter
10
- attr_accessor :mob
11
- attr_accessor :name
12
- attr_accessor :clusters
13
- attr_accessor :vm_folder
14
- attr_accessor :vm_folder_name
15
- attr_accessor :template_folder
16
- attr_accessor :template_folder_name
17
- attr_accessor :disk_path
18
- attr_accessor :datastore_pattern
19
- attr_accessor :persistent_datastore_pattern
20
- attr_accessor :allow_mixed_datastores
21
- attr_accessor :spec
22
-
23
- def inspect
24
- "<Datacenter: #{@mob} / #{@name}>"
25
- end
26
- end
27
-
28
- class Datastore
29
- attr_accessor :mob
30
- attr_accessor :name
31
- attr_accessor :total_space
32
- attr_accessor :free_space
33
- attr_accessor :unaccounted_space
8
+ DISK_THRESHOLD = 1024
9
+ STALE_TIMEOUT = 60
10
+ BYTES_IN_MB = 1024 * 1024
34
11
 
35
- def real_free_space
36
- @free_space - @unaccounted_space
37
- end
38
-
39
- def inspect
40
- "<Datastore: #{@mob} / #{@name}>"
41
- end
12
+ # Creates a new resources model.
13
+ def initialize
14
+ @client = Config.client
15
+ @logger = Config.logger
16
+ @last_update = 0
17
+ @lock = Monitor.new
42
18
  end
43
19
 
44
- class Cluster
45
- attr_accessor :mob
46
- attr_accessor :name
47
- attr_accessor :datacenter
48
- attr_accessor :resource_pool
49
- attr_accessor :datastores
50
- attr_accessor :persistent_datastores
51
- attr_accessor :idle_cpu
52
- attr_accessor :total_memory
53
- attr_accessor :free_memory
54
- attr_accessor :unaccounted_memory
55
- attr_accessor :mem_over_commit
56
-
57
- def real_free_memory
58
- @free_memory - @unaccounted_memory * @mem_over_commit
59
- end
60
-
61
- def inspect
62
- "<Cluster: #{@mob} / #{@name}>"
20
+ # Returns the list of datacenters available for placement.
21
+ #
22
+ # Will lazily load them and reload the data when it's stale.
23
+ #
24
+ # @return [List<Resources::Datacenter>] datacenters.
25
+ def datacenters
26
+ @lock.synchronize do
27
+ update if Time.now.to_i - @last_update > STALE_TIMEOUT
63
28
  end
29
+ @datacenters
64
30
  end
65
31
 
66
- def initialize(client, vcenter, mem_over_commit = 1.0)
67
- @client = client
68
- @vcenter = vcenter
69
- @datacenters = {}
70
- @timestamp = 0
71
- @lock = Monitor.new
72
- @logger = Bosh::Clouds::Config.logger
73
- @mem_over_commit = mem_over_commit
74
- end
75
-
76
- def resource_pools_in_use?(datacenter_spec)
77
- cluster_spec = get_cluster_spec(datacenter_spec["clusters"])
78
- cluster_spec.find {|_, properties| properties["resource_pool"] != nil }
32
+ # Returns the persistent datastore for the requested context.
33
+ #
34
+ # @param [String] dc_name datacenter name.
35
+ # @param [String] cluster_name cluster name.
36
+ # @param [String] datastore_name datastore name.
37
+ # @return [Resources::Datastore] persistent datastore.
38
+ def persistent_datastore(dc_name, cluster_name, datastore_name)
39
+ datacenter = datacenters[dc_name]
40
+ return nil if datacenter.nil?
41
+ cluster = datacenter.clusters[cluster_name]
42
+ return nil if cluster.nil?
43
+ cluster.persistent(datastore_name)
79
44
  end
80
45
 
81
- def setup_folder(datacenter, base_folder_name)
82
- base_folder = @client.find_by_inventory_path([datacenter.name, "vm", base_folder_name])
83
- raise "Missing folder #{base_folder_name}" if base_folder.nil?
84
- return base_folder_name, base_folder unless datacenter.spec["use_sub_folder"] || resource_pools_in_use?(datacenter.spec)
85
-
86
- # Create unique folder
87
- sub_folder_name = [base_folder_name, Bosh::Clouds::Config.uuid]
88
- @logger.debug("Searching for folder #{sub_folder_name.join("/")}")
89
- sub_folder = @client.find_by_inventory_path([datacenter.name, "vm", sub_folder_name])
90
- if sub_folder.nil?
91
- @logger.info("Creating folder #{sub_folder_name.join("/")}")
92
- sub_folder = base_folder.create_folder(Bosh::Clouds::Config.uuid)
93
- else
94
- @logger.debug("Found folder #{sub_folder_name.join("/")}")
46
+ # Validate that the persistent datastore is still valid so we don't have to
47
+ # move the disk.
48
+ #
49
+ # @param [String] dc_name datacenter name.
50
+ # @param [String] datastore_name datastore name.
51
+ # @return [true, false] true iff the datastore still exists and is in the
52
+ # persistent pool.
53
+ def validate_persistent_datastore(dc_name, datastore_name)
54
+ datacenter = datacenters[dc_name]
55
+ # TODO: should actually check vCenter since we can move disks across
56
+ # datacenters.
57
+ if datacenter.nil?
58
+ raise "Invalid datacenter #{dc_name} #{datacenters.inspect}"
95
59
  end
96
- [sub_folder_name, sub_folder]
97
- end
98
-
99
- def fetch_datacenters
100
- datacenters = @client.get_managed_objects(Vim::Datacenter)
101
- properties = @client.get_properties(datacenters, Vim::Datacenter, ["name"])
102
- datacenter_specs = {}
103
-
104
- @vcenter["datacenters"].each { |spec| datacenter_specs[spec["name"]] = spec }
105
- properties.delete_if { |_, datacenter_properties| !datacenter_specs.has_key?(datacenter_properties["name"]) }
106
-
107
- datacenters = {}
108
- properties.each_value do |datacenter_properties|
109
- datacenter = Datacenter.new
110
- datacenter.mob = datacenter_properties[:obj]
111
- datacenter.name = datacenter_properties["name"]
112
-
113
- @logger.debug("Found datacenter: #{datacenter.name} @ #{datacenter.mob}")
114
-
115
- datacenter.spec = datacenter_specs[datacenter.name]
116
-
117
- # Setup folders
118
- datacenter.template_folder_name, datacenter.template_folder = setup_folder(datacenter,
119
- datacenter.spec["template_folder"])
120
- datacenter.vm_folder_name, datacenter.vm_folder = setup_folder(datacenter, datacenter.spec["vm_folder"])
121
-
122
- datacenter.disk_path = datacenter.spec["disk_path"]
123
- datacenter.datastore_pattern = Regexp.new(datacenter.spec["datastore_pattern"])
124
- raise "Missing persistent_datastore_pattern in director config" if datacenter.spec["persistent_datastore_pattern"].nil?
125
- datacenter.persistent_datastore_pattern = Regexp.new(datacenter.spec["persistent_datastore_pattern"])
126
-
127
- datacenter.allow_mixed_datastores = !!datacenter.spec["allow_mixed_datastores"]
128
-
129
- datacenter.clusters = fetch_clusters(datacenter)
130
- datacenters[datacenter.name] = datacenter
60
+ datacenter.clusters.each_value do |cluster|
61
+ return true unless cluster.persistent(datastore_name).nil?
131
62
  end
132
- datacenters
63
+ false
133
64
  end
134
65
 
135
- # Allow clusters to be specified as
66
+ # Place the persistent datastore in the given datacenter and cluster with
67
+ # the requested disk space.
136
68
  #
137
- # clusters: clusters:
138
- # - CLUSTER1 - CLUSTER1:
139
- # - CLUSTER2 OR resource_pool: SOME_RP
140
- # - CLUSTER3 - CLUSTER2
141
- # - CLUSTER3
142
- def get_cluster_spec(clusters)
143
- cluster_spec = {}
144
- clusters.each do |cluster|
145
- case cluster
146
- when String
147
- cluster_spec[cluster] = {}
148
- when Hash
149
- cluster_spec[cluster.keys.first] = {"resource_pool" => cluster[cluster.keys.first]["resource_pool"]}
150
- else
151
- raise "Bad cluster information in datacenter spec #{clusters.pretty_inspect}"
152
- end
153
- end
154
- cluster_spec
155
- end
156
-
157
- def fetch_clusters(datacenter)
158
- datacenter_spec = datacenter.spec
159
- cluster_mobs = @client.get_managed_objects(Vim::ClusterComputeResource, :root => datacenter.mob)
160
- properties = @client.get_properties(cluster_mobs, Vim::ClusterComputeResource,
161
- ["name", "datastore", "resourcePool", "host"], :ensure_all => true)
162
-
163
- cluster_spec = get_cluster_spec(datacenter_spec["clusters"])
164
- cluster_names = Set.new(cluster_spec.keys)
165
- properties.delete_if { |_, cluster_properties| !cluster_names.include?(cluster_properties["name"]) }
166
-
167
- clusters = []
168
- properties.each_value do |cluster_properties|
169
- requested_resource_pool = cluster_spec[cluster_properties["name"]]["resource_pool"]
170
- cluster_resource_pool = fetch_resource_pool(requested_resource_pool, cluster_properties)
171
- next if cluster_resource_pool.nil?
172
-
173
- cluster = Cluster.new
174
- cluster.mem_over_commit = @mem_over_commit
175
- cluster.mob = cluster_properties[:obj]
176
- cluster.name = cluster_properties["name"]
177
-
178
- @logger.debug("Found cluster: #{cluster.name} @ #{cluster.mob}")
179
-
180
- cluster.resource_pool = cluster_resource_pool
181
- cluster.datacenter = datacenter
182
- cluster.datastores = fetch_datastores(datacenter, cluster_properties["datastore"],
183
- datacenter.datastore_pattern)
184
- cluster.persistent_datastores = fetch_datastores(datacenter, cluster_properties["datastore"],
185
- datacenter.persistent_datastore_pattern)
186
-
187
- # make sure datastores and persistent_datastores are mutually exclusive
188
- datastore_names = cluster.datastores.map { |ds|
189
- ds.name
190
- }
191
- persistent_datastore_names = cluster.persistent_datastores.map { |ds|
192
- ds.name
193
- }
194
- if (datastore_names & persistent_datastore_names).length != 0 && !datacenter.allow_mixed_datastores
195
- raise("datastore patterns are not mutually exclusive non-persistent are " +
196
- "#{datastore_names.pretty_inspect}\n persistent are #{persistent_datastore_names.pretty_inspect}, " +
197
- "please use allow_mixed_datastores director configuration parameter to allow this")
198
- end
199
- @logger.debug("non-persistent datastores are " + "#{datastore_names.pretty_inspect}\n " +
200
- "persistent datastores are #{persistent_datastore_names.pretty_inspect}")
201
-
202
- if requested_resource_pool.nil?
203
- # Ideally we would just get the utilization for the root resource pool, but
204
- # VC does not really have "real time" updates for utilization so for
205
- # now we work around that by querying the cluster hosts directly.
206
- fetch_cluster_utilization(cluster, cluster_properties["host"])
207
- else
208
- fetch_resource_pool_utilization(requested_resource_pool, cluster)
209
- end
210
-
211
- clusters << cluster
69
+ # @param [String] dc_name datacenter name.
70
+ # @param [String] cluster_name cluster name.
71
+ # @param [Integer] disk_space disk space.
72
+ # @return [Datastore?] datastore if it was placed succesfuly.
73
+ def place_persistent_datastore(dc_name, cluster_name, disk_space)
74
+ @lock.synchronize do
75
+ datacenter = datacenters[dc_name]
76
+ return nil if datacenter.nil?
77
+ cluster = datacenter.clusters[cluster_name]
78
+ return nil if cluster.nil?
79
+ datastore = cluster.pick_persistent(disk_space)
80
+ return nil if datastore.nil?
81
+ datastore.allocate(disk_space)
82
+ return datastore
212
83
  end
213
- clusters
214
- end
215
-
216
- def fetch_resource_pool(requested_resource_pool, cluster_properties)
217
- root_resource_pool = cluster_properties["resourcePool"]
218
-
219
- return root_resource_pool if requested_resource_pool.nil?
220
-
221
- return traverse_resource_pool(requested_resource_pool, root_resource_pool)
222
84
  end
223
85
 
224
- def traverse_resource_pool(requested_resource_pool, resource_pool)
225
- # Get list of resource pools under this resource pool
226
- properties = @client.get_properties(resource_pool, Vim::ResourcePool, ["resourcePool"])
227
-
228
- if properties && properties["resourcePool"] && properties["resourcePool"].size != 0
86
+ # Find a place for the requested resources.
87
+ #
88
+ # @param [Integer] memory requested memory.
89
+ # @param [Integer] ephemeral requested ephemeral storage.
90
+ # @param [Array<Hash>] persistent requested persistent storage.
91
+ # @return [Array] an array/tuple of Cluster and Datastore if the resources
92
+ # were placed successfully, otherwise exception.
93
+ def place(memory, ephemeral, persistent)
94
+ populate_resources(persistent)
95
+
96
+ # calculate locality to prioritizing clusters that contain the most
97
+ # persistent data.
98
+ locality = cluster_locality(persistent)
99
+ locality.sort! { |a, b| b[1] <=> a[1] }
229
100
 
230
- # Get the name of each resource pool under this resource pool
231
- child_properties = @client.get_properties(properties["resourcePool"], Vim::ResourcePool, ["name"])
232
- if child_properties
233
- child_properties.each_value do | resource_pool |
234
- if resource_pool["name"] == requested_resource_pool
235
- @logger.info("Found requested resource pool #{requested_resource_pool}")
236
- return resource_pool[:obj]
101
+ @lock.synchronize do
102
+ locality.each do |cluster, _|
103
+ persistent_sizes = persistent_sizes_for_cluster(cluster, persistent)
104
+
105
+ scorer = Scorer.new(cluster, memory, ephemeral, persistent_sizes)
106
+ if scorer.score > 0
107
+ datastore = cluster.pick_ephemeral(ephemeral)
108
+ if datastore
109
+ cluster.allocate(memory)
110
+ datastore.allocate(ephemeral)
111
+ return [cluster, datastore]
237
112
  end
238
113
  end
239
-
240
- child_properties.each_value do | resource_pool |
241
- pool = traverse_resource_pool(requested_resource_pool, resource_pool[:obj])
242
- return pool if pool != nil
243
- end
244
114
  end
245
115
 
246
- return nil
247
- end
248
-
249
- return nil
250
- end
251
-
252
- def fetch_datastores(datacenter, datastore_mobs, match_pattern)
253
- properties = @client.get_properties(datastore_mobs, Vim::Datastore,
254
- ["summary.freeSpace", "summary.capacity", "name"])
255
- properties.delete_if { |_, datastore_properties| datastore_properties["name"] !~ match_pattern }
256
-
257
- datastores = []
258
- properties.each_value do |datastore_properties|
259
- datastore = Datastore.new
260
- datastore.mob = datastore_properties[:obj]
261
- datastore.name = datastore_properties["name"]
262
-
263
- @logger.debug("Found datastore: #{datastore.name} @ #{datastore.mob}")
264
-
265
- datastore.free_space = datastore_properties["summary.freeSpace"].to_i / (1024 * 1024)
266
- datastore.total_space = datastore_properties["summary.capacity"].to_i / (1024 * 1024)
267
- datastore.unaccounted_space = 0
268
- datastores << datastore
269
- end
270
- datastores
271
- end
272
-
273
- def fetch_cluster_utilization(cluster, host_mobs)
274
- properties = @client.get_properties(host_mobs, Vim::HostSystem,
275
- ["hardware.memorySize", "runtime.inMaintenanceMode"], :ensure_all => true)
276
- properties.delete_if { |_, host_properties| host_properties["runtime.inMaintenanceMode"] == "true" }
277
-
278
- samples = 0
279
- total_memory = 0
280
- free_memory = 0
281
- cpu_usage = 0
282
-
283
- perf_counters = @client.get_perf_counters(host_mobs, ["cpu.usage.average", "mem.usage.average"], :max_sample => 5)
284
- perf_counters.each do |host_mob, perf_counter|
285
- host_properties = properties[host_mob]
286
- next if host_properties.nil?
287
- host_total_memory = host_properties["hardware.memorySize"].to_i
288
- host_percent_memory_used = average_csv(perf_counter["mem.usage.average"]) / 10000
289
- host_free_memory = (1.0 - host_percent_memory_used) * host_total_memory
290
-
291
- samples += 1
292
- total_memory += host_total_memory
293
- free_memory += host_free_memory.to_i
294
- cpu_usage += average_csv(perf_counter["cpu.usage.average"]) / 100
295
- end
296
-
297
- cluster.idle_cpu = (100 - cpu_usage / samples) / 100
298
- cluster.total_memory = total_memory/(1024 * 1024)
299
- cluster.free_memory = free_memory/(1024 * 1024)
300
- cluster.unaccounted_memory = 0
301
- end
302
-
303
- def fetch_resource_pool_utilization(resource_pool, cluster)
304
- properties = @client.get_properties(cluster.resource_pool, Vim::ResourcePool, ["summary"])
305
- raise "Failed to get utilization for resource pool #{resource_pool}" if properties.nil?
306
-
307
- if properties["summary"].runtime.overall_status == "green"
308
- runtime_info = properties["summary"].runtime
309
- cluster.idle_cpu = ((runtime_info.cpu.max_usage - runtime_info.cpu.overall_usage) * 1.0)/runtime_info.cpu.max_usage
310
- cluster.total_memory = (runtime_info.memory.reservation_used + runtime_info.memory.unreserved_for_vm)/(1024 * 1024)
311
- cluster.free_memory = [runtime_info.memory.unreserved_for_vm, runtime_info.memory.max_usage - runtime_info.memory.overall_usage].min/(1024 * 1024)
312
- cluster.unaccounted_memory = 0
313
- else
314
- @logger.warn("Ignoring cluster: #{cluster.name} resource_pool: #{resource_pool} as its state" +
315
- "is unreliable #{properties["summary"].runtime.overall_status}")
316
- # resource pool is in an unreliable state
317
- cluster.idle_cpu = 0
318
- cluster.total_memory = 0
319
- cluster.free_memory = 0
320
- cluster.unaccounted_memory = 0
321
- end
322
- end
323
-
324
- def average_csv(csv)
325
- values = csv.split(",")
326
- result = 0
327
- values.each { |v| result += v.to_f }
328
- result / values.size
329
- end
330
-
331
- def datacenters
332
- @lock.synchronize do
333
- if Time.now.to_i - @timestamp > 60
334
- @datacenters = fetch_datacenters
335
- @timestamp = Time.now.to_i
116
+ unless locality.empty?
117
+ @logger.debug("Ignoring datastore locality as we could not find " +
118
+ "any resources near disks: #{persistent.inspect}")
336
119
  end
337
- end
338
- @datacenters
339
- end
340
-
341
- def filter_used_resources(memory, vm_disk_size, persistent_disks_size, cluster_affinity,
342
- report = nil)
343
- resources = []
344
- datacenters.each_value do |datacenter|
345
- datacenter.clusters.each do |cluster|
346
-
347
- unless cluster_affinity.nil? || cluster.mob == cluster_affinity.mob
348
- report << "Skipping cluster #{cluster.name} because of affinity mismatch" if report
349
- next
350
- end
351
120
 
352
- unless cluster.real_free_memory - memory > MEMORY_THRESHOLD
353
- report << "Skipping cluster #{cluster.name} because of memory constraint. " +
354
- "Free #{cluster.real_free_memory}, requested #{memory}, " +
355
- "threshold #{MEMORY_THRESHOLD}" if report
356
- next
121
+ weighted_clusters = []
122
+ datacenters.each_value do |datacenter|
123
+ datacenter.clusters.each_value do |cluster|
124
+ persistent_sizes = persistent_sizes_for_cluster(cluster, persistent)
125
+ scorer = Scorer.new(cluster, memory, ephemeral, persistent_sizes)
126
+ score = scorer.score
127
+ @logger.debug("Score: #{cluster.name}: #{score}")
128
+ weighted_clusters << [cluster, score] if score > 0
357
129
  end
358
-
359
- if pick_datastore(cluster.persistent_datastores, persistent_disks_size, report).nil?
360
- report << "Skipping cluster #{cluster.name} because of above persistent " +
361
- "disk constraint failure." if report
362
- next
363
- end
364
-
365
- if (datastore = pick_datastore(cluster.datastores, vm_disk_size, report)).nil?
366
- report << "Skipping cluster #{cluster.name} because of above " +
367
- "disk constraint failure." if report
368
- next
369
- end
370
-
371
- resources << [cluster, datastore]
372
130
  end
373
- end
374
- resources
375
- end
376
-
377
- def get_cluster(dc_name, cluster_name)
378
- datacenter = datacenters[dc_name]
379
- return nil if datacenter.nil?
380
131
 
381
- cluster = nil
382
- datacenter.clusters.each do |c|
383
- if c.name == cluster_name
384
- cluster = c
385
- break
386
- end
387
- end
388
- cluster
389
- end
132
+ raise "No available resources" if weighted_clusters.empty?
390
133
 
391
- def validate_persistent_datastore(dc_name, datastore_name)
392
- datacenter = datacenters[dc_name]
393
- raise "Invalid datacenter #{dc_name} #{datacenters.pretty_inspect}" if datacenter.nil?
134
+ cluster = Util.weighted_random(weighted_clusters)
135
+ datastore = cluster.pick_ephemeral(ephemeral)
394
136
 
395
- return datastore_name =~ datacenter.persistent_datastore_pattern
396
- end
397
-
398
- def get_persistent_datastore(dc_name, cluster_name, persistent_datastore_name)
399
- cluster = get_cluster(dc_name, cluster_name)
400
- return nil if cluster.nil?
401
-
402
- datastore = nil
403
- cluster.persistent_datastores.each { |ds|
404
- if ds.name == persistent_datastore_name
405
- datastore = ds
406
- break
137
+ if datastore
138
+ cluster.allocate(memory)
139
+ datastore.allocate(ephemeral)
140
+ return [cluster, datastore]
407
141
  end
408
- }
409
- datastore
410
- end
411
142
 
412
- def pick_datastore(datastores, disk_space, report = nil)
413
- selected_datastores = {}
414
- datastores.each { |ds|
415
- if ds.real_free_space - disk_space > DISK_THRESHOLD
416
- selected_datastores[ds] = score_datastore(ds, disk_space)
417
- else
418
- report << "Skipping datastore #{ds.name}. Free space #{ds.real_free_space}, " +
419
- "requested #{disk_space}, threshold #{DISK_THRESHOLD}" if report
420
- end
421
- }
422
- return nil if selected_datastores.empty?
423
- pick_random_with_score(selected_datastores)
424
- end
425
-
426
- def get_datastore_cluster(datacenter, datastore)
427
- datacenter = datacenters[datacenter]
428
- if !datacenter.nil?
429
- datacenter.clusters.each do |c|
430
- c.persistent_datastores.select do |ds|
431
- yield c if ds.name == datastore
432
- end
433
- end
143
+ raise "No available resources"
434
144
  end
435
145
  end
436
146
 
437
- def find_persistent_datastore(dc_name, cluster_name, disk_space)
438
- cluster = get_cluster(dc_name, cluster_name)
439
- return nil if cluster.nil?
147
+ private
440
148
 
441
- chosen_datastore = nil
442
- @lock.synchronize do
443
- chosen_datastore = pick_datastore(cluster.persistent_datastores, disk_space)
444
- break if chosen_datastore.nil?
445
-
446
- chosen_datastore.unaccounted_space += disk_space
149
+ # Updates the resource models from vSphere.
150
+ # @return [void]
151
+ def update
152
+ @datacenters = {}
153
+ Config.vcenter.datacenters.each_value do |config|
154
+ @datacenters[config.name] = Datacenter.new(config)
447
155
  end
448
- chosen_datastore
156
+ @last_update = Time.now.to_i
449
157
  end
450
158
 
451
- def find_resources(memory, disk_size, persistent_disks_size, cluster_affinity, report = nil)
452
- cluster = nil
453
- datastore = nil
454
-
455
- # account for swap
456
- disk_size += memory
457
-
458
- @lock.synchronize do
459
- resources = filter_used_resources(memory, disk_size, persistent_disks_size,
460
- cluster_affinity, report)
461
- break if resources.empty?
462
-
463
- scored_resources = {}
464
- resources.each do |resource|
465
- cluster, datastore = resource
466
- scored_resources[resource] = score_resource(cluster, datastore, memory, disk_size)
467
- end
468
-
469
- scored_resources = scored_resources.sort_by { |resource| 1 - resource.last }
470
- scored_resources = scored_resources[0..2]
471
-
472
- scored_resources.each do |resource, score|
473
- cluster, datastore = resource
474
- @logger.debug("Cluster: #{cluster.inspect} Datastore: #{datastore.inspect} score: #{score}")
159
+ # Calculates the cluster locality for the provided persistent disks.
160
+ #
161
+ # @param [Array<Hash>] disks persistent disk specs.
162
+ # @return [Hash<String, Integer>] hash of cluster names to amount of
163
+ # persistent disk space is currently allocated on them.
164
+ def cluster_locality(disks)
165
+ locality = {}
166
+ disks.each do |disk|
167
+ cluster = disk[:cluster]
168
+ unless cluster.nil?
169
+ locality[cluster] ||= 0
170
+ locality[cluster] += disk[:size]
475
171
  end
476
-
477
- cluster, datastore = pick_random_with_score(scored_resources)
478
-
479
- @logger.debug("Picked: #{cluster.inspect} / #{datastore.inspect}")
480
-
481
- cluster.unaccounted_memory += memory
482
- datastore.unaccounted_space += disk_size
483
172
  end
484
-
485
- return [] if cluster.nil?
486
- [cluster, datastore]
173
+ locality.to_a
487
174
  end
488
175
 
489
- def get_resources(memory_size=1, disks=[])
490
- # Sort out the persistent and non persistent disks
491
- non_persistent_disks_size = 0
492
- persistent_disks = {}
493
- persistent_disks_size = 0
176
+ # Fill in the resource models on the provided persistent disk specs.
177
+ # @param [Array<Hash>] disks persistent disk specs.
178
+ # @return [void]
179
+ def populate_resources(disks)
494
180
  disks.each do |disk|
495
- if disk["persistent"]
496
- if !disk["datastore"].nil?
497
- # sanity check the persistent disks
498
- raise "Invalid persistent disk #{disk.pretty_inspect}" unless validate_persistent_datastore(disk["datacenter"], disk["datastore"])
499
-
500
- # sort the persistent disks into clusters they belong to
501
- get_datastore_cluster(disk["datacenter"], disk["datastore"]) { |cluster|
502
- persistent_disks[cluster] ||= 0
503
- persistent_disks[cluster] += disk["size"]
504
- }
181
+ unless disk[:ds_name].nil?
182
+ resources = persistent_datastore_resources(disk[:dc_name],
183
+ disk[:ds_name])
184
+ if resources
185
+ disk[:datacenter], disk[:cluster], disk[:datastore] = resources
505
186
  end
506
- persistent_disks_size += disk["size"]
507
- else
508
- non_persistent_disks_size += disk["size"]
509
- end
510
- end
511
- non_persistent_disks_size = 1 if non_persistent_disks_size == 0
512
- persistent_disks_size = 1 if persistent_disks_size == 0
513
-
514
- if !persistent_disks.empty?
515
- # Sort clusters by largest persistent disk footprint
516
- persistent_disks_by_size = persistent_disks.sort { |a, b| b[1] <=> a [1] }
517
-
518
- # Search for resources near the desired cluster
519
- persistent_disks_by_size.each do |cluster, size|
520
- resources = find_resources(memory_size, non_persistent_disks_size, persistent_disks_size - size, cluster)
521
- return resources unless resources.empty?
522
187
  end
523
- @logger.info("Ignoring datastore locality as we could not find any resources near persistent disks" +
524
- "#{persistent_disks.pretty_inspect}")
525
188
  end
526
-
527
- report = []
528
- resources = find_resources(memory_size, non_persistent_disks_size,
529
- persistent_disks_size, nil, report)
530
- raise "No available resources as #{report.join("\n")}" if resources.empty?
531
- resources
532
- end
533
-
534
- def score_datastore(datastore, disk)
535
- percent_of_free_disk = 1 - (disk.to_f / datastore.real_free_space)
536
- percent_of_total_disk = 1 - (disk.to_f / datastore.total_space)
537
- percent_of_free_disk * 0.67 + percent_of_total_disk * 0.33
538
189
  end
539
190
 
540
- def score_resource(cluster, datastore, memory, disk)
541
- percent_of_free_mem = 1 - (memory.to_f / cluster.real_free_memory)
542
- percent_of_total_mem = 1 - (memory.to_f / cluster.total_memory)
543
- percent_free_mem_left = (cluster.real_free_memory.to_f - memory) / cluster.total_memory
544
- memory_score = percent_of_free_mem * 0.5 + percent_of_total_mem * 0.25 + percent_free_mem_left * 0.25
545
-
546
- cpu_score = cluster.idle_cpu
547
- disk_score = score_datastore(datastore, disk)
548
- memory_score * 0.5 + cpu_score * 0.25 + disk_score * 0.25
549
- end
550
-
551
- def pick_random_with_score(elements)
552
- score_sum = 0
553
- elements.each { |element| score_sum += element[1] }
554
-
555
- random_score = rand * score_sum
556
- base_score = 0
557
-
558
- elements.each do |element|
559
- score = element[1]
560
- return element[0] if base_score + score > random_score
561
- base_score += score
191
+ # Find the resource models for a given datacenter and datastore name.
192
+ #
193
+ # Has to traverse the resource hierarchy to find the cluster, then returns
194
+ # all of the resources.
195
+ #
196
+ # @param [String] dc_name datacenter name.
197
+ # @param [String] ds_name datastore name.
198
+ # @return [Array] array/tuple of Datacenter, Cluster, and Datastore.
199
+ def persistent_datastore_resources(dc_name, ds_name)
200
+ datacenter = datacenters[dc_name]
201
+ return nil if datacenter.nil?
202
+ datacenter.clusters.each_value do |cluster|
203
+ datastore = cluster.persistent(ds_name)
204
+ return [datacenter, cluster, datastore] unless datastore.nil?
562
205
  end
563
-
564
- # fall through
565
- elements.last[0]
206
+ nil
566
207
  end
567
208
 
209
+ # Filters out all of the persistent disk specs that were already allocated
210
+ # in the cluster.
211
+ #
212
+ # @param [Resources::Cluster] cluster specified cluster.
213
+ # @param [Array<Hash>] disks disk specs.
214
+ # @return [Array<Hash>] filtered out disk specs.
215
+ def persistent_sizes_for_cluster(cluster, disks)
216
+ disks.select { |disk| disk[:cluster] != cluster }.
217
+ collect { |disk| disk[:size] }
218
+ end
568
219
  end
569
-
570
220
  end