bosh_vsphere_cpi 1.2865.0 → 1.2881.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/bin/vsphere_cpi +0 -16
- data/bin/vsphere_cpi_console +2 -9
- data/lib/cloud/vsphere/client.rb +38 -41
- data/lib/cloud/vsphere/cloud.rb +120 -273
- data/lib/cloud/vsphere/config.rb +0 -9
- data/lib/cloud/vsphere/disk_provider.rb +91 -0
- data/lib/cloud/vsphere/file_provider.rb +3 -1
- data/lib/cloud/vsphere/fixed_cluster_placer.rb +18 -7
- data/lib/cloud/vsphere/resources/cluster.rb +40 -81
- data/lib/cloud/vsphere/resources/datacenter.rb +37 -10
- data/lib/cloud/vsphere/resources/datastore.rb +21 -10
- data/lib/cloud/vsphere/resources/disk/disk_config.rb +5 -5
- data/lib/cloud/vsphere/resources/disk/ephemeral_disk.rb +3 -3
- data/lib/cloud/vsphere/resources/disk.rb +19 -0
- data/lib/cloud/vsphere/resources/resource_pool.rb +18 -13
- data/lib/cloud/vsphere/resources/scorer.rb +40 -97
- data/lib/cloud/vsphere/resources/vm.rb +185 -0
- data/lib/cloud/vsphere/resources.rb +106 -161
- data/lib/cloud/vsphere/version.rb +1 -1
- data/lib/cloud/vsphere/vm_creator.rb +45 -46
- data/lib/cloud/vsphere/vm_creator_builder.rb +2 -1
- data/lib/cloud/vsphere/vm_provider.rb +16 -0
- data/lib/cloud/vsphere.rb +1 -1
- metadata +94 -38
- data/db/migrations/20120123235022_initial.rb +0 -24
- data/db/migrations/20121204174707_add_uuid_to_disks.rb +0 -14
- data/lib/cloud/vsphere/models/disk.rb +0 -11
- data/lib/cloud/vsphere/resources/disk/persistent_disk.rb +0 -110
@@ -2,128 +2,71 @@
|
|
2
2
|
|
3
3
|
module VSphereCloud
|
4
4
|
class Resources
|
5
|
-
|
6
|
-
# Resource Scorer.
|
7
5
|
class Scorer
|
8
6
|
|
9
|
-
# Creates a new Scorer given a cluster and requested memory and storage.
|
10
|
-
#
|
11
|
-
# @param [Cluster] cluster requested cluster.
|
12
|
-
# @param [Integer] memory required memory.
|
13
|
-
# @param [Array<Integer>] ephemeral list of required ephemeral disk sizes.
|
14
|
-
# @param [Array<Integer>] persistent list of required persistent disk
|
15
|
-
# sizes.
|
16
|
-
def initialize(config, cluster, memory, ephemeral, persistent)
|
17
|
-
@logger = config.logger
|
18
|
-
@cluster = cluster
|
19
|
-
@memory = memory
|
20
|
-
@ephemeral = ephemeral
|
21
|
-
@persistent = persistent
|
22
|
-
|
23
|
-
@free_memory = cluster.free_memory
|
24
|
-
|
25
|
-
@free_ephemeral = []
|
26
|
-
cluster.ephemeral_datastores.each_value do |datastore|
|
27
|
-
@free_ephemeral << datastore.free_space
|
28
|
-
end
|
29
|
-
|
30
|
-
@free_persistent = []
|
31
|
-
cluster.persistent_datastores.each_value do |datastore|
|
32
|
-
@free_persistent << datastore.free_space
|
33
|
-
end
|
34
|
-
|
35
|
-
@free_shared = []
|
36
|
-
cluster.shared_datastores.each_value do |datastore|
|
37
|
-
@free_shared << datastore.free_space
|
38
|
-
end
|
39
|
-
end
|
40
|
-
|
41
7
|
# Run the scoring function and return the placement score for the required
|
42
8
|
# resources.
|
43
9
|
#
|
10
|
+
# @param [Logging::Logger] logger logger to which to log.
|
11
|
+
# @param [Integer] requested_memory required memory.
|
12
|
+
# @param [Cluster] cluster requested cluster.
|
13
|
+
# @param [Integer] requested_ephemeral_size disk size in mb.
|
14
|
+
# @param [Array<Integer>] requested_persistent_sizes list of requested persistent sizes in mb.
|
44
15
|
# @return [Integer] score.
|
45
|
-
def score
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
if !min_persistent.nil? && min_persistent < min_shared
|
50
|
-
min_shared = min_persistent
|
51
|
-
end
|
52
|
-
|
53
|
-
# Filter out any datastores that are below the min threshold
|
54
|
-
filter(@free_ephemeral, min_ephemeral + DISK_THRESHOLD)
|
55
|
-
filter(@free_shared, min_shared + DISK_THRESHOLD)
|
56
|
-
unless @persistent.empty?
|
57
|
-
filter(@free_persistent, min_persistent + DISK_THRESHOLD)
|
58
|
-
end
|
16
|
+
def self.score(logger, cluster, requested_memory, requested_ephemeral_size, requested_persistent_sizes)
|
17
|
+
free_memory = cluster.free_memory
|
18
|
+
ephemeral_pool = DiskPool.new(cluster.ephemeral_datastores.values.map(&:free_space))
|
19
|
+
persistent_pool = DiskPool.new(cluster.persistent_datastores.values.map(&:free_space))
|
59
20
|
|
60
|
-
|
21
|
+
successful_allocations = 0
|
61
22
|
loop do
|
62
|
-
|
63
|
-
if
|
64
|
-
|
23
|
+
free_memory -= requested_memory
|
24
|
+
if free_memory < MEMORY_HEADROOM
|
25
|
+
logger.debug("#{cluster.name} memory bound")
|
65
26
|
break
|
66
27
|
end
|
67
28
|
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
@logger.debug("#{@cluster.name} ephemeral disk bound")
|
72
|
-
break
|
73
|
-
end
|
29
|
+
unless ephemeral_pool.consume_disk(requested_ephemeral_size)
|
30
|
+
logger.debug("#{cluster.name} ephemeral disk bound")
|
31
|
+
break
|
74
32
|
end
|
75
33
|
|
76
|
-
unless
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
unless consume_disk(@free_shared, size, min_shared)
|
82
|
-
consumed_all = false
|
83
|
-
@logger.debug("#{@cluster.name} persistent disk bound")
|
84
|
-
break
|
85
|
-
end
|
86
|
-
end
|
34
|
+
unless requested_persistent_sizes.empty?
|
35
|
+
placed = requested_persistent_sizes.select { |size| persistent_pool.consume_disk(size) }
|
36
|
+
unless requested_persistent_sizes == placed
|
37
|
+
logger.debug("#{cluster.name} persistent disk bound")
|
38
|
+
break
|
87
39
|
end
|
88
|
-
break unless consumed_all
|
89
40
|
end
|
90
41
|
|
91
|
-
|
42
|
+
successful_allocations += 1
|
92
43
|
end
|
93
44
|
|
94
|
-
|
45
|
+
successful_allocations
|
95
46
|
end
|
96
47
|
|
97
48
|
private
|
98
49
|
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
# @param [Integer] threshold free space threshold
|
104
|
-
# @return [Array<Integer>] filtered pool.
|
105
|
-
def filter(pool, threshold)
|
106
|
-
pool.delete_if { |size| size < threshold }
|
107
|
-
end
|
50
|
+
class DiskPool
|
51
|
+
def initialize(sizes)
|
52
|
+
@sizes = sizes
|
53
|
+
end
|
108
54
|
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
if pool[0] >= size + DISK_THRESHOLD
|
121
|
-
pool[0] -= size
|
122
|
-
pool.delete_at(0) if pool[0] < min + DISK_THRESHOLD
|
123
|
-
return true
|
55
|
+
# Consumes disk space from a datastore pool.
|
56
|
+
#
|
57
|
+
# @param [Integer] requested_size requested disk size.
|
58
|
+
# @return [true, false] boolean indicating that the disk space was consumed.
|
59
|
+
def consume_disk(requested_size)
|
60
|
+
unless @sizes.empty?
|
61
|
+
@sizes.sort! { |a, b| b <=> a }
|
62
|
+
if @sizes[0] >= requested_size + DISK_HEADROOM
|
63
|
+
@sizes[0] -= requested_size
|
64
|
+
return true
|
65
|
+
end
|
124
66
|
end
|
67
|
+
|
68
|
+
false
|
125
69
|
end
|
126
|
-
false
|
127
70
|
end
|
128
71
|
end
|
129
72
|
end
|
@@ -0,0 +1,185 @@
|
|
1
|
+
module VSphereCloud
|
2
|
+
class Resources
|
3
|
+
class VM
|
4
|
+
include VimSdk
|
5
|
+
include RetryBlock
|
6
|
+
|
7
|
+
attr_reader :mob, :cid
|
8
|
+
|
9
|
+
def initialize(cid, mob, client, logger)
|
10
|
+
@client = client
|
11
|
+
@mob = mob
|
12
|
+
@cid = cid
|
13
|
+
@logger = logger
|
14
|
+
end
|
15
|
+
|
16
|
+
def inspect
|
17
|
+
"<VM: #{@mob} / #{@cid}>"
|
18
|
+
end
|
19
|
+
|
20
|
+
def cluster
|
21
|
+
cluster = cloud_searcher.get_properties(host_properties['parent'], Vim::ClusterComputeResource, 'name', ensure_all: true)
|
22
|
+
cluster['name']
|
23
|
+
end
|
24
|
+
|
25
|
+
def accessible_datastores
|
26
|
+
host_properties['datastore'].map do |store|
|
27
|
+
ds = cloud_searcher.get_properties(store, Vim::Datastore, 'info', ensure_all: true)
|
28
|
+
ds['info'].name
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
def datacenter
|
33
|
+
@client.find_parent(@mob, Vim::Datacenter)
|
34
|
+
end
|
35
|
+
|
36
|
+
def powered_on?
|
37
|
+
power_state == Vim::VirtualMachine::PowerState::POWERED_ON
|
38
|
+
end
|
39
|
+
|
40
|
+
def devices
|
41
|
+
properties['config.hardware.device']
|
42
|
+
end
|
43
|
+
|
44
|
+
def nics
|
45
|
+
devices.select { |device| device.kind_of?(Vim::Vm::Device::VirtualEthernetCard) }
|
46
|
+
end
|
47
|
+
|
48
|
+
def cdrom
|
49
|
+
devices.find { |device| device.kind_of?(Vim::Vm::Device::VirtualCdrom) }
|
50
|
+
end
|
51
|
+
|
52
|
+
def system_disk
|
53
|
+
devices.find { |device| device.kind_of?(Vim::Vm::Device::VirtualDisk) }
|
54
|
+
end
|
55
|
+
|
56
|
+
def persistent_disks
|
57
|
+
devices.select do |device|
|
58
|
+
device.kind_of?(Vim::Vm::Device::VirtualDisk) &&
|
59
|
+
device.backing.disk_mode == Vim::Vm::Device::VirtualDiskOption::DiskMode::INDEPENDENT_PERSISTENT
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
def pci_controller
|
64
|
+
devices.find { |device| device.kind_of?(Vim::Vm::Device::VirtualPCIController) }
|
65
|
+
end
|
66
|
+
|
67
|
+
def fix_device_unit_numbers(device_changes)
|
68
|
+
controllers_available_unit_numbers = Hash.new { |h,k| h[k] = (0..15).to_a }
|
69
|
+
devices.each do |device|
|
70
|
+
if device.controller_key
|
71
|
+
available_unit_numbers = controllers_available_unit_numbers[device.controller_key]
|
72
|
+
available_unit_numbers.delete(device.unit_number)
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
device_changes.each do |device_change|
|
77
|
+
device = device_change.device
|
78
|
+
if device.controller_key && device.unit_number.nil?
|
79
|
+
available_unit_numbers = controllers_available_unit_numbers[device.controller_key]
|
80
|
+
raise "No available unit numbers for device: #{device.inspect}" if available_unit_numbers.empty?
|
81
|
+
device.unit_number = available_unit_numbers.shift
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
def shutdown
|
87
|
+
@logger.debug('Waiting for the VM to shutdown')
|
88
|
+
begin
|
89
|
+
begin
|
90
|
+
@mob.shutdown_guest
|
91
|
+
rescue => e
|
92
|
+
@logger.debug("Ignoring possible race condition when a VM has powered off by the time we ask it to shutdown: #{e.inspect}")
|
93
|
+
end
|
94
|
+
|
95
|
+
wait_until_off(60)
|
96
|
+
rescue VSphereCloud::Cloud::TimeoutException
|
97
|
+
@logger.debug('The guest did not shutdown in time, requesting it to power off')
|
98
|
+
@client.power_off_vm(@mob)
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
def power_off
|
103
|
+
retry_block do
|
104
|
+
question = properties['runtime.question']
|
105
|
+
if question
|
106
|
+
choices = question.choice
|
107
|
+
@logger.info("VM is blocked on a question: #{question.text}, " +
|
108
|
+
"providing default answer: #{choices.choice_info[choices.default_index].label}")
|
109
|
+
@client.answer_vm(@mob, question.id, choices.choice_info[choices.default_index].key)
|
110
|
+
power_state = cloud_searcher.get_property(@mob, Vim::VirtualMachine, 'runtime.powerState')
|
111
|
+
else
|
112
|
+
power_state = properties['runtime.powerState']
|
113
|
+
end
|
114
|
+
|
115
|
+
if power_state != Vim::VirtualMachine::PowerState::POWERED_OFF
|
116
|
+
@logger.info("Powering off vm: #{@cid}")
|
117
|
+
@client.power_off_vm(@mob)
|
118
|
+
end
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
def disk_by_cid(disk_cid)
|
123
|
+
devices.find do |d|
|
124
|
+
d.kind_of?(Vim::Vm::Device::VirtualDisk) &&
|
125
|
+
d.backing.file_name.end_with?("/#{disk_cid}.vmdk")
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
def reboot
|
130
|
+
@mob.reboot_guest
|
131
|
+
end
|
132
|
+
|
133
|
+
def power_on
|
134
|
+
@client.power_on_vm(datacenter, @mob)
|
135
|
+
end
|
136
|
+
|
137
|
+
def delete
|
138
|
+
retry_block { @client.delete_vm(@mob) }
|
139
|
+
end
|
140
|
+
|
141
|
+
def reload
|
142
|
+
@properties = nil
|
143
|
+
@host_properties = nil
|
144
|
+
end
|
145
|
+
|
146
|
+
def wait_until_off(timeout)
|
147
|
+
started = Time.now
|
148
|
+
loop do
|
149
|
+
power_state = cloud_searcher.get_property(@mob, Vim::VirtualMachine, 'runtime.powerState')
|
150
|
+
break if power_state == Vim::VirtualMachine::PowerState::POWERED_OFF
|
151
|
+
raise VSphereCloud::Cloud::TimeoutException if Time.now - started > timeout
|
152
|
+
sleep(1.0)
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
private
|
157
|
+
|
158
|
+
def power_state
|
159
|
+
properties['runtime.powerState']
|
160
|
+
end
|
161
|
+
|
162
|
+
def properties
|
163
|
+
@properties ||= cloud_searcher.get_properties(
|
164
|
+
@mob,
|
165
|
+
Vim::VirtualMachine,
|
166
|
+
['runtime.powerState', 'runtime.question', 'config.hardware.device', 'name', 'runtime'],
|
167
|
+
ensure: ['config.hardware.device', 'runtime']
|
168
|
+
)
|
169
|
+
end
|
170
|
+
|
171
|
+
def host_properties
|
172
|
+
@host_properties ||= cloud_searcher.get_properties(
|
173
|
+
properties['runtime'].host,
|
174
|
+
Vim::HostSystem,
|
175
|
+
['datastore', 'parent'],
|
176
|
+
ensure_all: true
|
177
|
+
)
|
178
|
+
end
|
179
|
+
|
180
|
+
def cloud_searcher
|
181
|
+
@client.cloud_searcher
|
182
|
+
end
|
183
|
+
end
|
184
|
+
end
|
185
|
+
end
|
@@ -2,14 +2,15 @@ require 'cloud/vsphere/resources/datacenter'
|
|
2
2
|
|
3
3
|
module VSphereCloud
|
4
4
|
class Resources
|
5
|
-
|
6
|
-
|
5
|
+
MEMORY_HEADROOM = 128
|
6
|
+
DISK_HEADROOM = 1024
|
7
7
|
STALE_TIMEOUT = 60
|
8
8
|
BYTES_IN_MB = 1024 * 1024
|
9
9
|
|
10
10
|
attr_reader :drs_rules
|
11
11
|
|
12
|
-
def initialize(config)
|
12
|
+
def initialize(datacenter, config)
|
13
|
+
@datacenter = datacenter
|
13
14
|
@config = config
|
14
15
|
@logger = config.logger
|
15
16
|
@last_update = 0
|
@@ -17,128 +18,111 @@ module VSphereCloud
|
|
17
18
|
@drs_rules = []
|
18
19
|
end
|
19
20
|
|
20
|
-
# Returns the list of datacenters available for placement.
|
21
|
-
#
|
22
|
-
# Will lazily load them and reload the data when it's stale.
|
23
|
-
#
|
24
|
-
# @return [List<Resources::Datacenter>] datacenters.
|
25
|
-
def datacenters
|
26
|
-
@lock.synchronize do
|
27
|
-
update if Time.now.to_i - @last_update > STALE_TIMEOUT
|
28
|
-
end
|
29
|
-
@datacenters
|
30
|
-
end
|
31
|
-
|
32
|
-
# Returns the persistent datastore for the requested context.
|
33
|
-
#
|
34
|
-
# @param [String] dc_name datacenter name.
|
35
|
-
# @param [String] cluster_name cluster name.
|
36
|
-
# @param [String] datastore_name datastore name.
|
37
|
-
# @return [Resources::Datastore] persistent datastore.
|
38
|
-
def persistent_datastore(dc_name, cluster_name, datastore_name)
|
39
|
-
datacenter = datacenters[dc_name]
|
40
|
-
return nil if datacenter.nil?
|
41
|
-
cluster = datacenter.clusters[cluster_name]
|
42
|
-
return nil if cluster.nil?
|
43
|
-
cluster.persistent(datastore_name)
|
44
|
-
end
|
45
|
-
|
46
|
-
# Validate that the persistent datastore is still valid so we don't have to
|
47
|
-
# move the disk.
|
48
|
-
#
|
49
|
-
# @param [String] dc_name datacenter name.
|
50
|
-
# @param [String] datastore_name datastore name.
|
51
|
-
# @return [true, false] true iff the datastore still exists and is in the
|
52
|
-
# persistent pool.
|
53
|
-
def validate_persistent_datastore(dc_name, datastore_name)
|
54
|
-
datacenter = datacenters[dc_name]
|
55
|
-
if datacenter.nil?
|
56
|
-
raise "Invalid datacenter #{dc_name} #{datacenters.inspect}"
|
57
|
-
end
|
58
|
-
datacenter.clusters.each_value do |cluster|
|
59
|
-
return true unless cluster.persistent(datastore_name).nil?
|
60
|
-
end
|
61
|
-
false
|
62
|
-
end
|
63
|
-
|
64
21
|
# Place the persistent datastore in the given datacenter and cluster with
|
65
22
|
# the requested disk space.
|
66
23
|
#
|
67
|
-
# @param [String] dc_name datacenter name.
|
68
24
|
# @param [String] cluster_name cluster name.
|
69
|
-
# @param [Integer]
|
25
|
+
# @param [Integer] disk_size_in_mb disk size in mb.
|
70
26
|
# @return [Datastore?] datastore if it was placed succesfuly.
|
71
|
-
def
|
27
|
+
def pick_persistent_datastore_in_cluster(cluster_name, disk_size_in_mb)
|
72
28
|
@lock.synchronize do
|
73
|
-
|
74
|
-
return nil if datacenter.nil?
|
75
|
-
cluster = datacenter.clusters[cluster_name]
|
29
|
+
cluster = @datacenter.clusters[cluster_name]
|
76
30
|
return nil if cluster.nil?
|
77
|
-
|
78
|
-
|
79
|
-
datastore.allocate(disk_space)
|
80
|
-
return datastore
|
31
|
+
|
32
|
+
pick_datastore(cluster, disk_size_in_mb)
|
81
33
|
end
|
82
34
|
end
|
83
35
|
|
84
|
-
# Find a
|
36
|
+
# Find a cluster for a vm with the requested memory and ephemeral storage, attempting
|
37
|
+
# to allocate it near existing persistent disks.
|
85
38
|
#
|
86
|
-
# @param [Integer]
|
87
|
-
# @param [Integer]
|
88
|
-
# @param [Array<
|
89
|
-
# @return [
|
90
|
-
|
91
|
-
def place(memory, ephemeral, persistent)
|
92
|
-
populate_resources(persistent)
|
93
|
-
|
94
|
-
# calculate locality to prioritizing clusters that contain the most
|
95
|
-
# persistent data.
|
96
|
-
locality = cluster_locality(persistent)
|
97
|
-
locality.sort! { |a, b| b[1] <=> a[1] }
|
98
|
-
|
39
|
+
# @param [Integer] requested_memory_in_mb requested memory.
|
40
|
+
# @param [Integer] requested_ephemeral_disk_size_in_mb requested ephemeral storage.
|
41
|
+
# @param [Array<Resources::Disk>] existing_persistent_disks existing persistent disks, if any.
|
42
|
+
# @return [Cluster] selected cluster if the resources were placed successfully, otherwise raises.
|
43
|
+
def pick_cluster_for_vm(requested_memory_in_mb, requested_ephemeral_disk_size_in_mb, existing_persistent_disks)
|
99
44
|
@lock.synchronize do
|
100
|
-
locality
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
cluster.allocate(memory)
|
108
|
-
datastore.allocate(ephemeral)
|
109
|
-
return [cluster, datastore]
|
110
|
-
end
|
45
|
+
# calculate locality to prioritizing clusters that contain the most persistent data.
|
46
|
+
clusters = @datacenter.clusters.values
|
47
|
+
persistent_disk_index = PersistentDiskIndex.new(clusters, existing_persistent_disks)
|
48
|
+
|
49
|
+
scored_clusters = clusters.map do |cluster|
|
50
|
+
persistent_disk_not_in_this_cluster = existing_persistent_disks.reject do |disk|
|
51
|
+
persistent_disk_index.clusters_connected_to_disk(disk).include?(cluster)
|
111
52
|
end
|
53
|
+
|
54
|
+
score = Scorer.score(
|
55
|
+
@config.logger,
|
56
|
+
cluster,
|
57
|
+
requested_memory_in_mb,
|
58
|
+
requested_ephemeral_disk_size_in_mb,
|
59
|
+
persistent_disk_not_in_this_cluster.map(&:size_in_mb)
|
60
|
+
)
|
61
|
+
|
62
|
+
[cluster, score]
|
112
63
|
end
|
113
64
|
|
114
|
-
|
115
|
-
|
116
|
-
|
65
|
+
acceptable_clusters = scored_clusters.select { |_, score| score > 0 }
|
66
|
+
|
67
|
+
@logger.debug("Acceptable clusters: #{acceptable_clusters.inspect}")
|
68
|
+
|
69
|
+
if acceptable_clusters.empty?
|
70
|
+
total_persistent_size = existing_persistent_disks.map(&:size_in_mb).inject(0, :+)
|
71
|
+
cluster_infos = clusters.map { |cluster| describe_cluster(cluster) }
|
72
|
+
|
73
|
+
raise "Unable to allocate vm with #{requested_memory_in_mb}mb RAM, " +
|
74
|
+
"#{requested_ephemeral_disk_size_in_mb / 1024}gb ephemeral disk, " +
|
75
|
+
"and #{total_persistent_size / 1024}gb persistent disk from any cluster.\n#{cluster_infos.join(", ")}."
|
117
76
|
end
|
118
77
|
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
78
|
+
acceptable_clusters = acceptable_clusters.sort_by do |cluster, _score|
|
79
|
+
persistent_disk_index.disks_connected_to_cluster(cluster).map(&:size_in_mb).inject(0, :+)
|
80
|
+
end.reverse
|
81
|
+
|
82
|
+
if acceptable_clusters.any? { |cluster, _| persistent_disk_index.disks_connected_to_cluster(cluster).any? }
|
83
|
+
@logger.debug('Choosing cluster with the greatest available disk')
|
84
|
+
selected_cluster, _ = acceptable_clusters.first
|
85
|
+
else
|
86
|
+
@logger.debug('Choosing cluster by weighted random')
|
87
|
+
selected_cluster = Util.weighted_random(acceptable_clusters)
|
127
88
|
end
|
128
89
|
|
129
|
-
|
90
|
+
@logger.debug("Selected cluster '#{selected_cluster.name}'")
|
91
|
+
|
92
|
+
selected_cluster.allocate(requested_memory_in_mb)
|
93
|
+
selected_cluster
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
def describe_cluster(cluster)
|
98
|
+
"#{cluster.name} has #{cluster.free_memory}mb/" +
|
99
|
+
"#{cluster.total_free_ephemeral_disk_in_mb / 1024}gb/" +
|
100
|
+
"#{cluster.total_free_persistent_disk_in_mb / 1024}gb"
|
101
|
+
end
|
130
102
|
|
131
|
-
|
103
|
+
def pick_ephemeral_datastore(cluster, disk_size_in_mb)
|
104
|
+
@lock.synchronize do
|
105
|
+
datastore = cluster.pick_ephemeral(disk_size_in_mb)
|
106
|
+
if datastore.nil?
|
107
|
+
raise Bosh::Clouds::NoDiskSpace.new(
|
108
|
+
"Not enough ephemeral disk space (#{disk_size_in_mb}MB) in cluster #{cluster.name}")
|
109
|
+
end
|
132
110
|
|
133
|
-
datastore
|
111
|
+
datastore.allocate(disk_size_in_mb)
|
112
|
+
datastore
|
113
|
+
end
|
114
|
+
end
|
134
115
|
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
116
|
+
def pick_persistent_datastore(cluster, disk_size_in_mb)
|
117
|
+
@lock.synchronize do
|
118
|
+
datastore = cluster.pick_persistent(disk_size_in_mb)
|
119
|
+
if datastore.nil?
|
120
|
+
raise Bosh::Clouds::NoDiskSpace.new(
|
121
|
+
"Not enough persistent disk space (#{disk_size_in_mb}MB) in cluster #{cluster.name}")
|
139
122
|
end
|
140
123
|
|
141
|
-
|
124
|
+
datastore.allocate(disk_size_in_mb)
|
125
|
+
datastore
|
142
126
|
end
|
143
127
|
end
|
144
128
|
|
@@ -146,74 +130,35 @@ module VSphereCloud
|
|
146
130
|
|
147
131
|
attr_reader :config
|
148
132
|
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
@datacenters = { datacenter.name => datacenter }
|
155
|
-
@last_update = Time.now.to_i
|
133
|
+
def pick_datastore(cluster, disk_space)
|
134
|
+
datastore = cluster.pick_persistent(disk_space)
|
135
|
+
return nil if datastore.nil?
|
136
|
+
datastore.allocate(disk_space)
|
137
|
+
datastore
|
156
138
|
end
|
157
139
|
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
unless cluster.nil?
|
168
|
-
locality[cluster] ||= 0
|
169
|
-
locality[cluster] += disk[:size]
|
170
|
-
end
|
140
|
+
class PersistentDiskIndex
|
141
|
+
def initialize(clusters, existing_persistent_disks)
|
142
|
+
@clusters_to_disks = Hash[*clusters.map do |cluster|
|
143
|
+
[cluster, existing_persistent_disks.select { |disk| cluster_includes_datastore?(cluster, disk.datastore) }]
|
144
|
+
end.flatten(1)]
|
145
|
+
|
146
|
+
@disks_to_clusters = Hash[*existing_persistent_disks.map do |disk|
|
147
|
+
[disk, clusters.select { |cluster| cluster_includes_datastore?(cluster, disk.datastore) }]
|
148
|
+
end.flatten(1)]
|
171
149
|
end
|
172
|
-
locality.to_a
|
173
|
-
end
|
174
150
|
|
175
|
-
|
176
|
-
|
177
|
-
# @return [void]
|
178
|
-
def populate_resources(disks)
|
179
|
-
disks.each do |disk|
|
180
|
-
unless disk[:ds_name].nil?
|
181
|
-
resources = persistent_datastore_resources(disk[:dc_name],
|
182
|
-
disk[:ds_name])
|
183
|
-
if resources
|
184
|
-
disk[:datacenter], disk[:cluster], disk[:datastore] = resources
|
185
|
-
end
|
186
|
-
end
|
151
|
+
def cluster_includes_datastore?(cluster, datastore)
|
152
|
+
cluster.persistent(datastore.name) != nil
|
187
153
|
end
|
188
|
-
end
|
189
154
|
|
190
|
-
|
191
|
-
|
192
|
-
# Has to traverse the resource hierarchy to find the cluster, then returns
|
193
|
-
# all of the resources.
|
194
|
-
#
|
195
|
-
# @param [String] dc_name datacenter name.
|
196
|
-
# @param [String] ds_name datastore name.
|
197
|
-
# @return [Array] array/tuple of Datacenter, Cluster, and Datastore.
|
198
|
-
def persistent_datastore_resources(dc_name, ds_name)
|
199
|
-
datacenter = datacenters[dc_name]
|
200
|
-
return nil if datacenter.nil?
|
201
|
-
datacenter.clusters.each_value do |cluster|
|
202
|
-
datastore = cluster.persistent(ds_name)
|
203
|
-
return [datacenter, cluster, datastore] unless datastore.nil?
|
155
|
+
def disks_connected_to_cluster(cluster)
|
156
|
+
@clusters_to_disks[cluster]
|
204
157
|
end
|
205
|
-
nil
|
206
|
-
end
|
207
158
|
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
# @param [Resources::Cluster] cluster specified cluster.
|
212
|
-
# @param [Array<Hash>] disks disk specs.
|
213
|
-
# @return [Array<Hash>] filtered out disk specs.
|
214
|
-
def persistent_sizes_for_cluster(cluster, disks)
|
215
|
-
disks.select { |disk| disk[:cluster] != cluster }.
|
216
|
-
collect { |disk| disk[:size] }
|
159
|
+
def clusters_connected_to_disk(disk)
|
160
|
+
@disks_to_clusters[disk]
|
161
|
+
end
|
217
162
|
end
|
218
163
|
end
|
219
164
|
end
|