bosh_vsphere_cpi 0.4.9 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/lib/cloud/vsphere/client.rb +3 -2
- data/lib/cloud/vsphere/cloud.rb +84 -95
- data/lib/cloud/vsphere/config.rb +254 -0
- data/lib/cloud/vsphere/resources.rb +164 -514
- data/lib/cloud/vsphere/resources/cluster.rb +294 -0
- data/lib/cloud/vsphere/resources/datacenter.rb +86 -0
- data/lib/cloud/vsphere/resources/datastore.rb +61 -0
- data/lib/cloud/vsphere/resources/folder.rb +54 -0
- data/lib/cloud/vsphere/resources/resource_pool.rb +39 -0
- data/lib/cloud/vsphere/resources/scorer.rb +130 -0
- data/lib/cloud/vsphere/resources/util.rb +44 -0
- data/lib/cloud/vsphere/version.rb +1 -1
- data/spec/spec_helper.rb +2 -0
- data/spec/unit/cloud/vsphere/resources/cluster_spec.rb +383 -0
- data/spec/unit/cloud/vsphere/resources/datacenter_spec.rb +72 -0
- data/spec/unit/cloud/vsphere/resources/datastore_spec.rb +43 -0
- data/spec/unit/cloud/vsphere/resources/folder_spec.rb +63 -0
- data/spec/unit/cloud/vsphere/resources/resource_pool_spec.rb +42 -0
- data/spec/unit/cloud/vsphere/resources/scorer_spec.rb +73 -0
- data/spec/unit/cloud/vsphere/resources/util_spec.rb +35 -0
- data/spec/unit/cloud/vsphere/resources_spec.rb +216 -0
- metadata +48 -15
- data/spec/unit/vsphere_resource_spec.rb +0 -274
@@ -0,0 +1,39 @@
|
|
1
|
+
# Copyright (c) 2009-2012 VMware, Inc.
|
2
|
+
|
3
|
+
module VSphereCloud
|
4
|
+
class Resources
|
5
|
+
|
6
|
+
# ResourcePool resource.
|
7
|
+
class ResourcePool
|
8
|
+
include VimSdk
|
9
|
+
|
10
|
+
# @!attribute mob
|
11
|
+
# @return [Vim::ResourcePool] resource pool vSphere MOB.
|
12
|
+
attr_accessor :mob
|
13
|
+
|
14
|
+
# Creates a new ResourcePool resource.
|
15
|
+
#
|
16
|
+
# @param [Cluster] cluster parent cluster.
|
17
|
+
# @param [Vim::ResourcePool] root_resource_pool cluster's root resource
|
18
|
+
# pool.
|
19
|
+
def initialize(cluster, root_resource_pool)
|
20
|
+
if cluster.config.resource_pool.nil?
|
21
|
+
@mob = root_resource_pool
|
22
|
+
else
|
23
|
+
client = Config.client
|
24
|
+
logger = Config.logger
|
25
|
+
@mob = client.get_managed_object(
|
26
|
+
Vim::ResourcePool,
|
27
|
+
:root => root_resource_pool,
|
28
|
+
:name => cluster.config.resource_pool)
|
29
|
+
logger.debug("Found requested resource pool: #@mob")
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
# @return [String] debug resource pool information.
|
34
|
+
def inspect
|
35
|
+
"<Resource Pool: #@mob>"
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
@@ -0,0 +1,130 @@
|
|
1
|
+
# Copyright (c) 2009-2012 VMware, Inc.
|
2
|
+
|
3
|
+
module VSphereCloud
|
4
|
+
class Resources
|
5
|
+
|
6
|
+
# Resource Scorer.
|
7
|
+
class Scorer
|
8
|
+
|
9
|
+
# Creates a new Scorer given a cluster and requested memory and storage.
|
10
|
+
#
|
11
|
+
# @param [Cluster] cluster requested cluster.
|
12
|
+
# @param [Integer] memory required memory.
|
13
|
+
# @param [Array<Integer>] ephemeral list of required ephemeral disk sizes.
|
14
|
+
# @param [Array<Integer>] persistent list of required persistent disk
|
15
|
+
# sizes.
|
16
|
+
def initialize(cluster, memory, ephemeral, persistent)
|
17
|
+
@logger = Config.logger
|
18
|
+
@cluster = cluster
|
19
|
+
@memory = memory
|
20
|
+
@ephemeral = ephemeral
|
21
|
+
@persistent = persistent
|
22
|
+
|
23
|
+
@free_memory = cluster.free_memory
|
24
|
+
|
25
|
+
@free_ephemeral = []
|
26
|
+
cluster.ephemeral_datastores.each_value do |datastore|
|
27
|
+
@free_ephemeral << datastore.free_space
|
28
|
+
end
|
29
|
+
|
30
|
+
@free_persistent = []
|
31
|
+
cluster.persistent_datastores.each_value do |datastore|
|
32
|
+
@free_persistent << datastore.free_space
|
33
|
+
end
|
34
|
+
|
35
|
+
@free_shared = []
|
36
|
+
cluster.shared_datastores.each_value do |datastore|
|
37
|
+
@free_shared << datastore.free_space
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
# Run the scoring function and return the placement score for the required
|
42
|
+
# resources.
|
43
|
+
#
|
44
|
+
# @return [Integer] score.
|
45
|
+
def score
|
46
|
+
min_ephemeral = @ephemeral
|
47
|
+
min_persistent = @persistent.min
|
48
|
+
min_shared = min_ephemeral
|
49
|
+
if !min_persistent.nil? && min_persistent < min_shared
|
50
|
+
min_shared = min_persistent
|
51
|
+
end
|
52
|
+
|
53
|
+
# Filter out any datastores that are below the min threshold
|
54
|
+
filter(@free_ephemeral, min_ephemeral + DISK_THRESHOLD)
|
55
|
+
filter(@free_shared, min_shared + DISK_THRESHOLD)
|
56
|
+
unless @persistent.empty?
|
57
|
+
filter(@free_persistent, min_persistent + DISK_THRESHOLD)
|
58
|
+
end
|
59
|
+
|
60
|
+
count = 0
|
61
|
+
loop do
|
62
|
+
@free_memory -= @memory
|
63
|
+
if @free_memory < MEMORY_THRESHOLD
|
64
|
+
@logger.debug("#{@cluster.name} memory bound")
|
65
|
+
break
|
66
|
+
end
|
67
|
+
|
68
|
+
consumed = consume_disk(@free_ephemeral, @ephemeral, min_ephemeral)
|
69
|
+
unless consumed
|
70
|
+
unless consume_disk(@free_shared, @ephemeral, min_shared)
|
71
|
+
@logger.debug("#{@cluster.name} ephemeral disk bound")
|
72
|
+
break
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
unless @persistent.empty?
|
77
|
+
consumed_all = true
|
78
|
+
@persistent.each do |size|
|
79
|
+
consumed = consume_disk(@free_persistent, size, min_persistent)
|
80
|
+
unless consumed
|
81
|
+
unless consume_disk(@free_shared, size, min_shared)
|
82
|
+
consumed_all = false
|
83
|
+
@logger.debug("#{@cluster.name} persistent disk bound")
|
84
|
+
break
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
88
|
+
break unless consumed_all
|
89
|
+
end
|
90
|
+
|
91
|
+
count += 1
|
92
|
+
end
|
93
|
+
|
94
|
+
count
|
95
|
+
end
|
96
|
+
|
97
|
+
private
|
98
|
+
|
99
|
+
# Filter out datastores from the pool that are below the free space
|
100
|
+
# threshold.
|
101
|
+
#
|
102
|
+
# @param [Array<Integer>] pool datastore pool.
|
103
|
+
# @param [Integer] threshold free space threshold
|
104
|
+
# @return [Array<Integer>] filtered pool.
|
105
|
+
def filter(pool, threshold)
|
106
|
+
pool.delete_if { |size| size < threshold }
|
107
|
+
end
|
108
|
+
|
109
|
+
# Consumes disk space from a datastore pool.
|
110
|
+
#
|
111
|
+
# @param [Array<Integer>] pool datastore pool.
|
112
|
+
# @param [Integer] size requested disk size.
|
113
|
+
# @param [Integer] min requested disk size, so the datastore can be
|
114
|
+
# removed from the pool if it falls below this threshold.
|
115
|
+
# @return [true, false] boolean indicating that the disk space was
|
116
|
+
# consumed.
|
117
|
+
def consume_disk(pool, size, min)
|
118
|
+
unless pool.empty?
|
119
|
+
pool.sort! { |a, b| b <=> a }
|
120
|
+
if pool[0] >= size + DISK_THRESHOLD
|
121
|
+
pool[0] -= size
|
122
|
+
pool.delete_at(0) if pool[0] < min + DISK_THRESHOLD
|
123
|
+
return true
|
124
|
+
end
|
125
|
+
end
|
126
|
+
false
|
127
|
+
end
|
128
|
+
end
|
129
|
+
end
|
130
|
+
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
# Copyright (c) 2009-2012 VMware, Inc.
|
2
|
+
|
3
|
+
module VSphereCloud
|
4
|
+
class Resources
|
5
|
+
|
6
|
+
# Resources common utility class.
|
7
|
+
class Util
|
8
|
+
class << self
|
9
|
+
|
10
|
+
# Returns the average value from a given CSV string.
|
11
|
+
#
|
12
|
+
# @param [String] csv CSV string of integers/floats.
|
13
|
+
# @return [Numeric] average value
|
14
|
+
def average_csv(csv)
|
15
|
+
values = csv.split(",")
|
16
|
+
result = 0
|
17
|
+
return result if values.empty?
|
18
|
+
values.each { |v| result += v.to_f }
|
19
|
+
result / values.size
|
20
|
+
end
|
21
|
+
|
22
|
+
# Returns a random item from the given list distributed based on the
|
23
|
+
# provided weight.
|
24
|
+
#
|
25
|
+
# @param [Array] list array of tuples containing the item and weight.
|
26
|
+
# @return [Object] random item based on provided weight.
|
27
|
+
def weighted_random(list)
|
28
|
+
return nil if list.empty?
|
29
|
+
|
30
|
+
weight_sum = list.inject(0) { |sum, x| sum + x[1] }
|
31
|
+
index = rand(weight_sum)
|
32
|
+
offset = 0
|
33
|
+
list.each do |el, weight|
|
34
|
+
offset += weight
|
35
|
+
return el if index < offset
|
36
|
+
end
|
37
|
+
|
38
|
+
# Should never happen
|
39
|
+
raise ArgumentError, "index: #{index} sum: #{weight_sum}"
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
data/spec/spec_helper.rb
CHANGED
@@ -0,0 +1,383 @@
|
|
1
|
+
# Copyright (c) 2009-2012 VMware, Inc.
|
2
|
+
|
3
|
+
require File.expand_path("../../../../../spec_helper", __FILE__)
|
4
|
+
|
5
|
+
describe VSphereCloud::Resources::Cluster do
|
6
|
+
before(:each) do
|
7
|
+
@client = mock(:client)
|
8
|
+
VSphereCloud::Config.client = @client
|
9
|
+
VSphereCloud::Config.mem_overcommit = 1.0
|
10
|
+
@dc = mock(:datacenter)
|
11
|
+
@dc_config = mock(:datacenter_config)
|
12
|
+
@dc.stub!(:config).and_return(@dc_config)
|
13
|
+
end
|
14
|
+
|
15
|
+
describe :initialize do
|
16
|
+
it "should create a cluster" do
|
17
|
+
cluster_mob = mock(:cluster_mob)
|
18
|
+
|
19
|
+
@client.should_receive(:get_properties).with(
|
20
|
+
[], VimSdk::Vim::Datastore,
|
21
|
+
%w(summary.freeSpace summary.capacity name)).and_return(
|
22
|
+
{})
|
23
|
+
|
24
|
+
VSphereCloud::Resources::Cluster.any_instance.stub(
|
25
|
+
:fetch_cluster_utilization)
|
26
|
+
|
27
|
+
cluster_config = mock(:cluster_config)
|
28
|
+
cluster_config.stub!(:name).and_return("foo")
|
29
|
+
cluster_config.stub!(:resource_pool).and_return(nil)
|
30
|
+
cluster = VSphereCloud::Resources::Cluster.new(@dc, cluster_config, {
|
31
|
+
:obj => cluster_mob,
|
32
|
+
"datastore" => []
|
33
|
+
})
|
34
|
+
|
35
|
+
cluster.name.should == "foo"
|
36
|
+
cluster.mob.should == cluster_mob
|
37
|
+
cluster.ephemeral_datastores.should be_empty
|
38
|
+
cluster.persistent_datastores.should be_empty
|
39
|
+
cluster.shared_datastores.should be_empty
|
40
|
+
end
|
41
|
+
|
42
|
+
it "should create a cluster with dedicated datastores" do
|
43
|
+
datastore_config = mock(:datastore_config)
|
44
|
+
datastore_config.stub!(:ephemeral_pattern).and_return(/a/)
|
45
|
+
datastore_config.stub!(:persistent_pattern).and_return(/b/)
|
46
|
+
datastore_config.stub!(:allow_mixed).and_return(false)
|
47
|
+
@dc_config.stub!(:datastores).and_return(datastore_config)
|
48
|
+
|
49
|
+
datastore_a = mock(:datastore_a)
|
50
|
+
datastore_a.stub(:name).and_return("a")
|
51
|
+
datastore_a_mob = mock(:datastore_a_mob)
|
52
|
+
datastore_a_properties = {
|
53
|
+
"name" => "a",
|
54
|
+
"summary.capacity" => 128 * 1024 * 1024 * 1024,
|
55
|
+
"summary.freeSpace" => 32 * 1024 * 1024 * 1024
|
56
|
+
}
|
57
|
+
VSphereCloud::Resources::Datastore.stub!(:new).
|
58
|
+
with(datastore_a_properties).and_return(datastore_a)
|
59
|
+
|
60
|
+
datastore_b = mock(:datastore_b)
|
61
|
+
datastore_b.stub(:name).and_return("b")
|
62
|
+
datastore_b_mob = mock(:datastore_b_mob)
|
63
|
+
datastore_b_properties = {
|
64
|
+
"name" => "b",
|
65
|
+
"summary.capacity" => 64 * 1024 * 1024 * 1024,
|
66
|
+
"summary.freeSpace" => 8 * 1024 * 1024 * 1024
|
67
|
+
}
|
68
|
+
VSphereCloud::Resources::Datastore.stub!(:new).
|
69
|
+
with(datastore_b_properties).and_return(datastore_b)
|
70
|
+
|
71
|
+
@client.should_receive(:get_properties).with(
|
72
|
+
[datastore_a_mob, datastore_b_mob], VimSdk::Vim::Datastore,
|
73
|
+
%w(summary.freeSpace summary.capacity name)).and_return(
|
74
|
+
{"a" => datastore_a_properties, "b" => datastore_b_properties})
|
75
|
+
|
76
|
+
VSphereCloud::Resources::Cluster.any_instance.stub(
|
77
|
+
:fetch_cluster_utilization)
|
78
|
+
|
79
|
+
cluster_config = mock(:cluster_config)
|
80
|
+
cluster_config.stub!(:name).and_return("foo")
|
81
|
+
cluster_config.stub!(:resource_pool).and_return(nil)
|
82
|
+
cluster = VSphereCloud::Resources::Cluster.new(@dc, cluster_config, {
|
83
|
+
"datastore" => [datastore_a_mob, datastore_b_mob]
|
84
|
+
})
|
85
|
+
|
86
|
+
cluster.ephemeral_datastores.should == {"a" => datastore_a}
|
87
|
+
cluster.persistent_datastores.should == {"b" => datastore_b}
|
88
|
+
cluster.shared_datastores.should be_empty
|
89
|
+
end
|
90
|
+
|
91
|
+
it "should fail to create a cluster with overlapped dedicated datastores" do
|
92
|
+
datastore_config = mock(:datastore_config)
|
93
|
+
datastore_config.stub!(:ephemeral_pattern).and_return(/[ab]/)
|
94
|
+
datastore_config.stub!(:persistent_pattern).and_return(/b/)
|
95
|
+
datastore_config.stub!(:allow_mixed).and_return(false)
|
96
|
+
@dc_config.stub!(:datastores).and_return(datastore_config)
|
97
|
+
|
98
|
+
datastore_a = mock(:datastore_a)
|
99
|
+
datastore_a.stub(:name).and_return("a")
|
100
|
+
datastore_a_mob = mock(:datastore_a_mob)
|
101
|
+
datastore_a_properties = {
|
102
|
+
"name" => "a"
|
103
|
+
}
|
104
|
+
VSphereCloud::Resources::Datastore.stub!(:new).
|
105
|
+
with(datastore_a_properties).and_return(datastore_a)
|
106
|
+
|
107
|
+
datastore_b = mock(:datastore_b)
|
108
|
+
datastore_b.stub(:name).and_return("b")
|
109
|
+
datastore_b_mob = mock(:datastore_b_mob)
|
110
|
+
datastore_b_properties = {
|
111
|
+
"name" => "b"
|
112
|
+
}
|
113
|
+
VSphereCloud::Resources::Datastore.stub!(:new).
|
114
|
+
with(datastore_b_properties).and_return(datastore_b)
|
115
|
+
|
116
|
+
@client.should_receive(:get_properties).with(
|
117
|
+
[datastore_a_mob, datastore_b_mob], VimSdk::Vim::Datastore,
|
118
|
+
%w(summary.freeSpace summary.capacity name)).and_return(
|
119
|
+
{"a" => datastore_a_properties, "b" => datastore_b_properties})
|
120
|
+
|
121
|
+
VSphereCloud::Resources::Cluster.any_instance.stub(
|
122
|
+
:fetch_cluster_utilization)
|
123
|
+
|
124
|
+
cluster_config = mock(:cluster_config)
|
125
|
+
cluster_config.stub!(:name).and_return("foo")
|
126
|
+
cluster_config.stub!(:resource_pool).and_return(nil)
|
127
|
+
expect {
|
128
|
+
VSphereCloud::Resources::Cluster.new(@dc, cluster_config, {
|
129
|
+
"datastore" => [datastore_a_mob, datastore_b_mob]
|
130
|
+
})
|
131
|
+
}.to raise_error /Datastore patterns are not mutually exclusive/
|
132
|
+
end
|
133
|
+
|
134
|
+
it "should create a cluster with shared datastores" do
|
135
|
+
datastore_config = mock(:datastore_config)
|
136
|
+
datastore_config.stub!(:ephemeral_pattern).and_return(/a/)
|
137
|
+
datastore_config.stub!(:persistent_pattern).and_return(/a/)
|
138
|
+
datastore_config.stub!(:allow_mixed).and_return(true)
|
139
|
+
@dc_config.stub!(:datastores).and_return(datastore_config)
|
140
|
+
|
141
|
+
datastore_a = mock(:datastore_a)
|
142
|
+
datastore_a.stub(:name).and_return("a")
|
143
|
+
datastore_a_mob = mock(:datastore_a_mob)
|
144
|
+
datastore_a_properties = {
|
145
|
+
"name" => "a",
|
146
|
+
"summary.capacity" => 128 * 1024 * 1024 * 1024,
|
147
|
+
"summary.freeSpace" => 32 * 1024 * 1024 * 1024
|
148
|
+
}
|
149
|
+
VSphereCloud::Resources::Datastore.stub!(:new).
|
150
|
+
with(datastore_a_properties).and_return(datastore_a)
|
151
|
+
|
152
|
+
@client.should_receive(:get_properties).with(
|
153
|
+
[datastore_a_mob], VimSdk::Vim::Datastore,
|
154
|
+
%w(summary.freeSpace summary.capacity name)).and_return(
|
155
|
+
{"a" => datastore_a_properties})
|
156
|
+
|
157
|
+
VSphereCloud::Resources::Cluster.any_instance.stub(
|
158
|
+
:fetch_cluster_utilization)
|
159
|
+
|
160
|
+
cluster_config = mock(:cluster_config)
|
161
|
+
cluster_config.stub!(:name).and_return("foo")
|
162
|
+
cluster_config.stub!(:resource_pool).and_return(nil)
|
163
|
+
cluster = VSphereCloud::Resources::Cluster.new(@dc, cluster_config, {
|
164
|
+
"datastore" => [datastore_a_mob]
|
165
|
+
})
|
166
|
+
|
167
|
+
cluster.ephemeral_datastores.should be_empty
|
168
|
+
cluster.persistent_datastores.should be_empty
|
169
|
+
cluster.shared_datastores.should == {"a" => datastore_a}
|
170
|
+
end
|
171
|
+
|
172
|
+
it "should create a cluster without a resource pool" do
|
173
|
+
host = mock(:host)
|
174
|
+
host_properties = {
|
175
|
+
:obj => host,
|
176
|
+
"runtime.inMaintenanceMode" => "false",
|
177
|
+
"hardware.memorySize" => 64 * 1024 * 1024 * 1024
|
178
|
+
}
|
179
|
+
host_counters = {
|
180
|
+
"cpu.usage.average" => "1000",
|
181
|
+
"mem.usage.average" => "5000"
|
182
|
+
}
|
183
|
+
@client.should_receive(:get_properties).with(
|
184
|
+
[], VimSdk::Vim::Datastore,
|
185
|
+
%w(summary.freeSpace summary.capacity name)).and_return(
|
186
|
+
{})
|
187
|
+
@client.should_receive(:get_properties).with(
|
188
|
+
[host], VimSdk::Vim::HostSystem,
|
189
|
+
%w(hardware.memorySize runtime.inMaintenanceMode),
|
190
|
+
{:ensure_all => true}).and_return(
|
191
|
+
{"foo" => host_properties})
|
192
|
+
@client.should_receive(:get_perf_counters).with(
|
193
|
+
[host], %w(cpu.usage.average mem.usage.average), {:max_sample => 5}).
|
194
|
+
and_return({"foo" => host_counters})
|
195
|
+
cluster_config = mock(:cluster_config)
|
196
|
+
cluster_config.stub!(:name).and_return("foo")
|
197
|
+
cluster_config.stub!(:resource_pool).and_return(nil)
|
198
|
+
cluster = VSphereCloud::Resources::Cluster.new(@dc, cluster_config, {
|
199
|
+
"datastore" => [],
|
200
|
+
"host" => [host]
|
201
|
+
})
|
202
|
+
|
203
|
+
cluster.free_memory.should == 32768
|
204
|
+
cluster.total_memory.should == 65536
|
205
|
+
cluster.idle_cpu.should == 0.9
|
206
|
+
end
|
207
|
+
|
208
|
+
it "should create a cluster with a resource pool" do
|
209
|
+
resource_pool = mock(:resource_pool)
|
210
|
+
resource_pool_mob = mock(:resource_pool_mob)
|
211
|
+
resource_pool.stub(:mob).and_return(resource_pool_mob)
|
212
|
+
VSphereCloud::Resources::ResourcePool.stub!(:new).
|
213
|
+
with(an_instance_of(VSphereCloud::Resources::Cluster), nil).
|
214
|
+
and_return(resource_pool)
|
215
|
+
|
216
|
+
summary = mock(:summary)
|
217
|
+
runtime = mock(:runtime)
|
218
|
+
runtime.stub(:overall_status).and_return("green")
|
219
|
+
cpu = mock(:cpu)
|
220
|
+
cpu.stub(:overall_usage).and_return(5)
|
221
|
+
cpu.stub(:max_usage).and_return(10)
|
222
|
+
runtime.stub(:cpu).and_return(cpu)
|
223
|
+
memory = mock(:memory)
|
224
|
+
memory.stub(:overall_usage).and_return(32 * 1024 * 1024 * 1024)
|
225
|
+
memory.stub(:max_usage).and_return(64 * 1024 * 1024 * 1024)
|
226
|
+
runtime.stub(:memory).and_return(memory)
|
227
|
+
summary.stub(:runtime).and_return(runtime)
|
228
|
+
|
229
|
+
@client.should_receive(:get_properties).with(
|
230
|
+
[], VimSdk::Vim::Datastore,
|
231
|
+
%w(summary.freeSpace summary.capacity name)).and_return(
|
232
|
+
{})
|
233
|
+
@client.should_receive(:get_properties).with(
|
234
|
+
resource_pool_mob, VimSdk::Vim::ResourcePool, %w(summary)).
|
235
|
+
and_return({"summary" => summary})
|
236
|
+
cluster_config = mock(:cluster_config)
|
237
|
+
cluster_config.stub!(:name).and_return("foo")
|
238
|
+
cluster_config.stub!(:resource_pool).and_return("baz")
|
239
|
+
cluster = VSphereCloud::Resources::Cluster.new(@dc, cluster_config, {
|
240
|
+
"datastore" => []
|
241
|
+
})
|
242
|
+
|
243
|
+
cluster.free_memory.should == 32768
|
244
|
+
cluster.total_memory.should == 65536
|
245
|
+
cluster.idle_cpu.should == 0.5
|
246
|
+
end
|
247
|
+
|
248
|
+
it "should create a cluster with an unhealthy resource pool" do
|
249
|
+
resource_pool = mock(:resource_pool)
|
250
|
+
resource_pool_mob = mock(:resource_pool_mob)
|
251
|
+
resource_pool.stub(:mob).and_return(resource_pool_mob)
|
252
|
+
VSphereCloud::Resources::ResourcePool.stub!(:new).
|
253
|
+
with(an_instance_of(VSphereCloud::Resources::Cluster), nil).
|
254
|
+
and_return(resource_pool)
|
255
|
+
|
256
|
+
summary = mock(:summary)
|
257
|
+
runtime = mock(:runtime)
|
258
|
+
runtime.stub(:overall_status).and_return("gray")
|
259
|
+
summary.stub(:runtime).and_return(runtime)
|
260
|
+
|
261
|
+
@client.should_receive(:get_properties).with(
|
262
|
+
[], VimSdk::Vim::Datastore,
|
263
|
+
%w(summary.freeSpace summary.capacity name)).and_return(
|
264
|
+
{})
|
265
|
+
@client.should_receive(:get_properties).with(
|
266
|
+
resource_pool_mob, VimSdk::Vim::ResourcePool, %w(summary)).
|
267
|
+
and_return({"summary" => summary})
|
268
|
+
cluster_config = mock(:cluster_config)
|
269
|
+
cluster_config.stub!(:name).and_return("foo")
|
270
|
+
cluster_config.stub!(:resource_pool).and_return("baz")
|
271
|
+
cluster = VSphereCloud::Resources::Cluster.new(@dc, cluster_config, {
|
272
|
+
"datastore" => []
|
273
|
+
})
|
274
|
+
|
275
|
+
cluster.free_memory.should == 0
|
276
|
+
cluster.total_memory.should == 0
|
277
|
+
cluster.idle_cpu.should == 0.0
|
278
|
+
end
|
279
|
+
end
|
280
|
+
|
281
|
+
describe :allocate do
|
282
|
+
it "should record the allocation against the cached utilization" do
|
283
|
+
@client.should_receive(:get_properties).with(
|
284
|
+
[], VimSdk::Vim::Datastore,
|
285
|
+
%w(summary.freeSpace summary.capacity name)).and_return(
|
286
|
+
{})
|
287
|
+
|
288
|
+
VSphereCloud::Resources::Cluster.any_instance.stub(
|
289
|
+
:fetch_cluster_utilization)
|
290
|
+
|
291
|
+
cluster_config = mock(:cluster_config)
|
292
|
+
cluster_config.stub!(:name).and_return("foo")
|
293
|
+
cluster_config.stub!(:resource_pool).and_return(nil)
|
294
|
+
cluster = VSphereCloud::Resources::Cluster.new(@dc, cluster_config, {
|
295
|
+
"datastore" => []
|
296
|
+
})
|
297
|
+
cluster.instance_eval { @synced_free_memory = 2048 }
|
298
|
+
cluster.allocate(1024)
|
299
|
+
cluster.free_memory.should == 1024
|
300
|
+
end
|
301
|
+
end
|
302
|
+
|
303
|
+
describe :pick_persistent do
|
304
|
+
before(:each) do
|
305
|
+
@client.should_receive(:get_properties).with(
|
306
|
+
[], VimSdk::Vim::Datastore,
|
307
|
+
%w(summary.freeSpace summary.capacity name)).and_return(
|
308
|
+
{})
|
309
|
+
|
310
|
+
VSphereCloud::Resources::Cluster.any_instance.stub(
|
311
|
+
:fetch_cluster_utilization)
|
312
|
+
|
313
|
+
cluster_config = mock(:cluster_config)
|
314
|
+
cluster_config.stub!(:name).and_return("foo")
|
315
|
+
cluster_config.stub!(:resource_pool).and_return(nil)
|
316
|
+
@cluster = VSphereCloud::Resources::Cluster.new(@dc, cluster_config, {
|
317
|
+
"datastore" => []
|
318
|
+
})
|
319
|
+
end
|
320
|
+
|
321
|
+
it "should only use persistent datastores if possible" do
|
322
|
+
datastore_a = mock(:datastore_a)
|
323
|
+
datastore_a.stub(:free_space).and_return(4096)
|
324
|
+
datastore_b = mock(:datastore_b)
|
325
|
+
|
326
|
+
@cluster.persistent_datastores["foo"] = datastore_a
|
327
|
+
@cluster.shared_datastores["bar"] = datastore_b
|
328
|
+
|
329
|
+
VSphereCloud::Resources::Util.should_receive(:weighted_random).
|
330
|
+
with([[datastore_a, 4096]]).and_return(datastore_a)
|
331
|
+
|
332
|
+
datastore = @cluster.pick_persistent(1024)
|
333
|
+
datastore.should == datastore_a
|
334
|
+
end
|
335
|
+
|
336
|
+
it "should filter out datastores that are low on free space" do
|
337
|
+
datastore_a = mock(:datastore_a)
|
338
|
+
datastore_a.stub(:free_space).and_return(2000)
|
339
|
+
datastore_b = mock(:datastore_b)
|
340
|
+
datastore_b.stub(:free_space).and_return(4096)
|
341
|
+
|
342
|
+
@cluster.persistent_datastores["foo"] = datastore_a
|
343
|
+
@cluster.shared_datastores["bar"] = datastore_b
|
344
|
+
|
345
|
+
VSphereCloud::Resources::Util.should_receive(:weighted_random).
|
346
|
+
with([[datastore_b, 4096]]).and_return(datastore_b)
|
347
|
+
|
348
|
+
datastore = @cluster.pick_persistent(1024)
|
349
|
+
datastore.should == datastore_b
|
350
|
+
end
|
351
|
+
end
|
352
|
+
|
353
|
+
describe :pick_ephemeral do
|
354
|
+
it "should only use ephemeral datastores if possible" do
|
355
|
+
@client.should_receive(:get_properties).with(
|
356
|
+
[], VimSdk::Vim::Datastore,
|
357
|
+
%w(summary.freeSpace summary.capacity name)).and_return(
|
358
|
+
{})
|
359
|
+
|
360
|
+
VSphereCloud::Resources::Cluster.any_instance.stub(
|
361
|
+
:fetch_cluster_utilization)
|
362
|
+
|
363
|
+
cluster_config = mock(:cluster_config)
|
364
|
+
cluster_config.stub!(:name).and_return("foo")
|
365
|
+
cluster_config.stub!(:resource_pool).and_return(nil)
|
366
|
+
@cluster = VSphereCloud::Resources::Cluster.new(@dc, cluster_config, {
|
367
|
+
"datastore" => []
|
368
|
+
})
|
369
|
+
datastore_a = mock(:datastore_a)
|
370
|
+
datastore_a.stub(:free_space).and_return(4096)
|
371
|
+
datastore_b = mock(:datastore_b)
|
372
|
+
|
373
|
+
@cluster.ephemeral_datastores["foo"] = datastore_a
|
374
|
+
@cluster.shared_datastores["bar"] = datastore_b
|
375
|
+
|
376
|
+
VSphereCloud::Resources::Util.should_receive(:weighted_random).
|
377
|
+
with([[datastore_a, 4096]]).and_return(datastore_a)
|
378
|
+
|
379
|
+
datastore = @cluster.pick_ephemeral(1024)
|
380
|
+
datastore.should == datastore_a
|
381
|
+
end
|
382
|
+
end
|
383
|
+
end
|