vmpooler 0.16.3 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/vmpooler.rb +9 -14
- data/lib/vmpooler/api/dashboard.rb +18 -20
- data/lib/vmpooler/api/helpers.rb +15 -15
- data/lib/vmpooler/api/v1.rb +51 -52
- data/lib/vmpooler/metrics/dummy_statsd.rb +0 -3
- data/lib/vmpooler/metrics/graphite.rb +2 -0
- data/lib/vmpooler/metrics/promstats.rb +2 -0
- data/lib/vmpooler/metrics/promstats/collector_middleware.rb +1 -1
- data/lib/vmpooler/metrics/statsd.rb +5 -3
- data/lib/vmpooler/pool_manager.rb +48 -55
- data/lib/vmpooler/providers/dummy.rb +20 -38
- data/lib/vmpooler/providers/vsphere.rb +48 -38
- data/lib/vmpooler/version.rb +1 -1
- metadata +38 -46
@@ -7,6 +7,7 @@ module Vmpooler
|
|
7
7
|
class Graphite < Metrics
|
8
8
|
attr_reader :server, :port, :prefix
|
9
9
|
|
10
|
+
# rubocop:disable Lint/MissingSuper
|
10
11
|
def initialize(logger, params = {})
|
11
12
|
raise ArgumentError, "Graphite server is required. Config: #{params.inspect}" if params['server'].nil? || params['server'].empty?
|
12
13
|
|
@@ -15,6 +16,7 @@ module Vmpooler
|
|
15
16
|
@prefix = params['prefix'] || 'vmpooler'
|
16
17
|
@logger = logger
|
17
18
|
end
|
19
|
+
# rubocop:enable Lint/MissingSuper
|
18
20
|
|
19
21
|
def increment(label)
|
20
22
|
log label, 1
|
@@ -23,6 +23,7 @@ module Vmpooler
|
|
23
23
|
@p_metrics = {}
|
24
24
|
@torun = []
|
25
25
|
|
26
|
+
# rubocop:disable Lint/MissingSuper
|
26
27
|
def initialize(logger, params = {})
|
27
28
|
@prefix = params['prefix'] || 'vmpooler'
|
28
29
|
@prometheus_prefix = params['prometheus_prefix'] || 'vmpooler'
|
@@ -32,6 +33,7 @@ module Vmpooler
|
|
32
33
|
# Setup up prometheus registry and data structures
|
33
34
|
@prometheus = Prometheus::Client.registry
|
34
35
|
end
|
36
|
+
# rubocop:enable Lint/MissingSuper
|
35
37
|
|
36
38
|
=begin # rubocop:disable Style/BlockComments
|
37
39
|
The Metrics table is used to register metrics and translate/interpret the incoming metrics.
|
@@ -113,7 +113,7 @@ module Vmpooler
|
|
113
113
|
# Similarly, request IDs are also stripped from the /ondemand path.
|
114
114
|
path
|
115
115
|
.gsub(%r{/vm/.+$}, '/vm')
|
116
|
-
.gsub(%r{/
|
116
|
+
.gsub(%r{/ondemandvm/.+$}, '/ondemandvm')
|
117
117
|
.gsub(%r{/token/.+$}, '/token')
|
118
118
|
.gsub(%r{/lib/.+$}, '/lib')
|
119
119
|
.gsub(%r{/img/.+$}, '/img')
|
@@ -8,6 +8,7 @@ module Vmpooler
|
|
8
8
|
class Statsd < Metrics
|
9
9
|
attr_reader :server, :port, :prefix
|
10
10
|
|
11
|
+
# rubocop:disable Lint/MissingSuper
|
11
12
|
def initialize(logger, params = {})
|
12
13
|
raise ArgumentError, "Statsd server is required. Config: #{params.inspect}" if params['server'].nil? || params['server'].empty?
|
13
14
|
|
@@ -17,21 +18,22 @@ module Vmpooler
|
|
17
18
|
@server = ::Statsd.new(host, @port)
|
18
19
|
@logger = logger
|
19
20
|
end
|
21
|
+
# rubocop:enable Lint/MissingSuper
|
20
22
|
|
21
23
|
def increment(label)
|
22
|
-
server.increment(prefix
|
24
|
+
server.increment("#{prefix}.#{label}")
|
23
25
|
rescue StandardError => e
|
24
26
|
@logger.log('s', "[!] Failure incrementing #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}")
|
25
27
|
end
|
26
28
|
|
27
29
|
def gauge(label, value)
|
28
|
-
server.gauge(prefix
|
30
|
+
server.gauge("#{prefix}.#{label}", value)
|
29
31
|
rescue StandardError => e
|
30
32
|
@logger.log('s', "[!] Failure updating gauge #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}")
|
31
33
|
end
|
32
34
|
|
33
35
|
def timing(label, duration)
|
34
|
-
server.timing(prefix
|
36
|
+
server.timing("#{prefix}.#{label}", duration)
|
35
37
|
rescue StandardError => e
|
36
38
|
@logger.log('s', "[!] Failure updating timing #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}")
|
37
39
|
end
|
@@ -108,7 +108,7 @@ module Vmpooler
|
|
108
108
|
$logger.log('d', "[!] [#{pool}] '#{vm}' no longer exists. Removing from pending.")
|
109
109
|
end
|
110
110
|
|
111
|
-
def fail_pending_vm(vm, pool, timeout, redis, exists
|
111
|
+
def fail_pending_vm(vm, pool, timeout, redis, exists: true)
|
112
112
|
clone_stamp = redis.hget("vmpooler__vm__#{vm}", 'clone')
|
113
113
|
|
114
114
|
time_since_clone = (Time.now - Time.parse(clone_stamp)) / 60
|
@@ -117,7 +117,7 @@ module Vmpooler
|
|
117
117
|
request_id = redis.hget("vmpooler__vm__#{vm}", 'request_id')
|
118
118
|
pool_alias = redis.hget("vmpooler__vm__#{vm}", 'pool_alias') if request_id
|
119
119
|
redis.multi
|
120
|
-
redis.smove(
|
120
|
+
redis.smove("vmpooler__pending__#{pool}", "vmpooler__completed__#{pool}", vm)
|
121
121
|
redis.zadd('vmpooler__odcreate__task', 1, "#{pool_alias}:#{pool}:1:#{request_id}") if request_id
|
122
122
|
redis.exec
|
123
123
|
$metrics.increment("errors.markedasfailed.#{pool}")
|
@@ -133,15 +133,16 @@ module Vmpooler
|
|
133
133
|
end
|
134
134
|
|
135
135
|
def move_pending_vm_to_ready(vm, pool, redis, request_id = nil)
|
136
|
-
clone_time = redis.hget(
|
136
|
+
clone_time = redis.hget("vmpooler__vm__#{vm}", 'clone')
|
137
137
|
finish = format('%<time>.2f', time: Time.now - Time.parse(clone_time))
|
138
138
|
|
139
139
|
if request_id
|
140
140
|
ondemandrequest_hash = redis.hgetall("vmpooler__odrequest__#{request_id}")
|
141
|
-
|
141
|
+
case ondemandrequest_hash['status']
|
142
|
+
when 'failed'
|
142
143
|
move_vm_queue(pool, vm, 'pending', 'completed', redis, "moved to completed queue. '#{request_id}' could not be filled in time")
|
143
144
|
return nil
|
144
|
-
|
145
|
+
when 'deleted'
|
145
146
|
move_vm_queue(pool, vm, 'pending', 'completed', redis, "moved to completed queue. '#{request_id}' has been deleted")
|
146
147
|
return nil
|
147
148
|
end
|
@@ -160,11 +161,11 @@ module Vmpooler
|
|
160
161
|
move_vm_queue(pool, vm, 'pending', 'running', redis)
|
161
162
|
check_ondemand_request_ready(request_id, redis)
|
162
163
|
else
|
163
|
-
redis.smove(
|
164
|
+
redis.smove("vmpooler__pending__#{pool}", "vmpooler__ready__#{pool}", vm)
|
164
165
|
end
|
165
166
|
|
166
167
|
redis.pipelined do
|
167
|
-
redis.hset(
|
168
|
+
redis.hset("vmpooler__boot__#{Date.today}", "#{pool}:#{vm}", finish) # maybe remove as this is never used by vmpooler itself?
|
168
169
|
redis.hset("vmpooler__vm__#{vm}", 'ready', Time.now)
|
169
170
|
|
170
171
|
# last boot time is displayed in API, and used by alarming script
|
@@ -203,11 +204,11 @@ module Vmpooler
|
|
203
204
|
|
204
205
|
mutex.synchronize do
|
205
206
|
@redis.with_metrics do |redis|
|
206
|
-
check_stamp = redis.hget(
|
207
|
+
check_stamp = redis.hget("vmpooler__vm__#{vm}", 'check')
|
207
208
|
last_checked_too_soon = ((Time.now - Time.parse(check_stamp)).to_i < $config[:config]['vm_checktime'] * 60) if check_stamp
|
208
209
|
break if check_stamp && last_checked_too_soon
|
209
210
|
|
210
|
-
redis.hset(
|
211
|
+
redis.hset("vmpooler__vm__#{vm}", 'check', Time.now)
|
211
212
|
# Check if the hosts TTL has expired
|
212
213
|
# if 'boottime' is nil, set bootime to beginning of unix epoch, forces TTL to be assumed expired
|
213
214
|
boottime = redis.hget("vmpooler__vm__#{vm}", 'ready')
|
@@ -217,7 +218,7 @@ module Vmpooler
|
|
217
218
|
boottime = Time.at(0)
|
218
219
|
end
|
219
220
|
if (Time.now - boottime).to_i > ttl * 60
|
220
|
-
redis.smove(
|
221
|
+
redis.smove("vmpooler__ready__#{pool_name}", "vmpooler__completed__#{pool_name}", vm)
|
221
222
|
|
222
223
|
$logger.log('d', "[!] [#{pool_name}] '#{vm}' reached end of TTL after #{ttl} minutes, removed from 'ready' queue")
|
223
224
|
return nil
|
@@ -256,7 +257,7 @@ module Vmpooler
|
|
256
257
|
return if hostname.empty?
|
257
258
|
return if hostname == vm
|
258
259
|
|
259
|
-
redis.smove(
|
260
|
+
redis.smove("vmpooler__ready__#{pool_name}", "vmpooler__completed__#{pool_name}", vm)
|
260
261
|
$logger.log('d', "[!] [#{pool_name}] '#{vm}' has mismatched hostname #{hostname}, removed from 'ready' queue")
|
261
262
|
true
|
262
263
|
end
|
@@ -280,7 +281,7 @@ module Vmpooler
|
|
280
281
|
catch :stop_checking do
|
281
282
|
@redis.with_metrics do |redis|
|
282
283
|
# Check that VM is within defined lifetime
|
283
|
-
checkouttime = redis.hget(
|
284
|
+
checkouttime = redis.hget("vmpooler__active__#{pool}", vm)
|
284
285
|
if checkouttime
|
285
286
|
time_since_checkout = Time.now - Time.parse(checkouttime)
|
286
287
|
running = time_since_checkout / 60 / 60
|
@@ -341,7 +342,7 @@ module Vmpooler
|
|
341
342
|
adjective = @name_generator.adjective(max: 14 - noun.length)
|
342
343
|
random_name = [adjective, noun].join('-')
|
343
344
|
hostname = $config[:config]['prefix'] + random_name
|
344
|
-
available = redis.hlen(
|
345
|
+
available = redis.hlen("vmpooler__vm__#{hostname}") == 0
|
345
346
|
|
346
347
|
[hostname, available]
|
347
348
|
end
|
@@ -395,12 +396,12 @@ module Vmpooler
|
|
395
396
|
@redis.with_metrics do |redis|
|
396
397
|
# Add VM to Redis inventory ('pending' pool)
|
397
398
|
redis.multi
|
398
|
-
redis.sadd(
|
399
|
-
redis.hset(
|
400
|
-
redis.hset(
|
401
|
-
redis.hset(
|
402
|
-
redis.hset(
|
403
|
-
redis.hset(
|
399
|
+
redis.sadd("vmpooler__pending__#{pool_name}", new_vmname)
|
400
|
+
redis.hset("vmpooler__vm__#{new_vmname}", 'clone', Time.now)
|
401
|
+
redis.hset("vmpooler__vm__#{new_vmname}", 'template', pool_name) # This value is used to represent the pool.
|
402
|
+
redis.hset("vmpooler__vm__#{new_vmname}", 'pool', pool_name)
|
403
|
+
redis.hset("vmpooler__vm__#{new_vmname}", 'request_id', request_id) if request_id
|
404
|
+
redis.hset("vmpooler__vm__#{new_vmname}", 'pool_alias', pool_alias) if pool_alias
|
404
405
|
redis.exec
|
405
406
|
end
|
406
407
|
|
@@ -412,8 +413,8 @@ module Vmpooler
|
|
412
413
|
|
413
414
|
@redis.with_metrics do |redis|
|
414
415
|
redis.pipelined do
|
415
|
-
redis.hset(
|
416
|
-
redis.hset(
|
416
|
+
redis.hset("vmpooler__clone__#{Date.today}", "#{pool_name}:#{new_vmname}", finish)
|
417
|
+
redis.hset("vmpooler__vm__#{new_vmname}", 'clone_time', finish)
|
417
418
|
end
|
418
419
|
end
|
419
420
|
$logger.log('s', "[+] [#{pool_name}] '#{new_vmname}' cloned in #{finish} seconds")
|
@@ -456,18 +457,18 @@ module Vmpooler
|
|
456
457
|
mutex.synchronize do
|
457
458
|
@redis.with_metrics do |redis|
|
458
459
|
redis.pipelined do
|
459
|
-
redis.hdel(
|
460
|
-
redis.hset(
|
460
|
+
redis.hdel("vmpooler__active__#{pool}", vm)
|
461
|
+
redis.hset("vmpooler__vm__#{vm}", 'destroy', Time.now)
|
461
462
|
|
462
463
|
# Auto-expire metadata key
|
463
|
-
redis.expire(
|
464
|
+
redis.expire("vmpooler__vm__#{vm}", ($config[:redis]['data_ttl'].to_i * 60 * 60))
|
464
465
|
end
|
465
466
|
|
466
467
|
start = Time.now
|
467
468
|
|
468
469
|
provider.destroy_vm(pool, vm)
|
469
470
|
|
470
|
-
redis.srem(
|
471
|
+
redis.srem("vmpooler__completed__#{pool}", vm)
|
471
472
|
|
472
473
|
finish = format('%<time>.2f', time: Time.now - start)
|
473
474
|
$logger.log('s', "[-] [#{pool}] '#{vm}' destroyed in #{finish} seconds")
|
@@ -603,10 +604,10 @@ module Vmpooler
|
|
603
604
|
|
604
605
|
if result
|
605
606
|
@redis.with_metrics do |redis|
|
606
|
-
rdisks = redis.hget(
|
607
|
+
rdisks = redis.hget("vmpooler__vm__#{vm_name}", 'disk')
|
607
608
|
disks = rdisks ? rdisks.split(':') : []
|
608
609
|
disks.push("+#{disk_size}gb")
|
609
|
-
redis.hset(
|
610
|
+
redis.hset("vmpooler__vm__#{vm_name}", 'disk', disks.join(':'))
|
610
611
|
end
|
611
612
|
|
612
613
|
$logger.log('s', "[+] [disk_manager] '#{vm_name}' attached #{disk_size}gb disk in #{finish} seconds")
|
@@ -638,7 +639,7 @@ module Vmpooler
|
|
638
639
|
|
639
640
|
if result
|
640
641
|
@redis.with_metrics do |redis|
|
641
|
-
redis.hset(
|
642
|
+
redis.hset("vmpooler__vm__#{vm_name}", "snapshot:#{snapshot_name}", Time.now.to_s)
|
642
643
|
end
|
643
644
|
$logger.log('s', "[+] [snapshot_manager] '#{vm_name}' snapshot created in #{finish} seconds")
|
644
645
|
else
|
@@ -697,7 +698,7 @@ module Vmpooler
|
|
697
698
|
|
698
699
|
def get_pool_name_for_vm(vm_name, redis)
|
699
700
|
# the 'template' is a bad name. Should really be 'poolname'
|
700
|
-
redis.hget(
|
701
|
+
redis.hget("vmpooler__vm__#{vm_name}", 'template')
|
701
702
|
end
|
702
703
|
|
703
704
|
# @param pool_name [String] - the name of the pool
|
@@ -867,16 +868,12 @@ module Vmpooler
|
|
867
868
|
|
868
869
|
if options[:clone_target_change]
|
869
870
|
clone_target = redis.hget('vmpooler__config__clone_target}', options[:poolname])
|
870
|
-
if clone_target
|
871
|
-
break unless clone_target == initial_clone_target
|
872
|
-
end
|
871
|
+
break if clone_target && clone_target != initial_clone_target
|
873
872
|
end
|
874
873
|
|
875
874
|
if options[:pool_template_change]
|
876
875
|
configured_template = redis.hget('vmpooler__config__template', options[:poolname])
|
877
|
-
if configured_template
|
878
|
-
break unless initial_template == configured_template
|
879
|
-
end
|
876
|
+
break if configured_template && initial_template != configured_template
|
880
877
|
end
|
881
878
|
|
882
879
|
if options[:pool_reset]
|
@@ -970,21 +967,17 @@ module Vmpooler
|
|
970
967
|
def sync_pool_template(pool)
|
971
968
|
@redis.with_metrics do |redis|
|
972
969
|
pool_template = redis.hget('vmpooler__config__template', pool['name'])
|
973
|
-
if pool_template
|
974
|
-
pool['template'] = pool_template unless pool['template'] == pool_template
|
975
|
-
end
|
970
|
+
pool['template'] = pool_template if pool_template && pool['template'] != pool_template
|
976
971
|
end
|
977
972
|
end
|
978
973
|
|
979
974
|
def prepare_template(pool, provider, redis)
|
980
|
-
if $config[:config]['create_template_delta_disks']
|
981
|
-
|
982
|
-
|
983
|
-
|
984
|
-
|
985
|
-
|
986
|
-
$logger.log('s', "[!] [#{pool['name']}] failed while preparing a template with an error. As a result vmpooler could not create the template delta disks. Either a template delta disk already exists, or the template delta disk creation failed. The error is: #{e}")
|
987
|
-
end
|
975
|
+
if $config[:config]['create_template_delta_disks'] && !redis.sismember('vmpooler__template__deltas', pool['template'])
|
976
|
+
begin
|
977
|
+
provider.create_template_delta_disks(pool)
|
978
|
+
redis.sadd('vmpooler__template__deltas', pool['template'])
|
979
|
+
rescue StandardError => e
|
980
|
+
$logger.log('s', "[!] [#{pool['name']}] failed while preparing a template with an error. As a result vmpooler could not create the template delta disks. Either a template delta disk already exists, or the template delta disk creation failed. The error is: #{e}")
|
988
981
|
end
|
989
982
|
end
|
990
983
|
redis.hset('vmpooler__template__prepared', pool['name'], pool['template'])
|
@@ -1135,15 +1128,15 @@ module Vmpooler
|
|
1135
1128
|
mutex.synchronize do
|
1136
1129
|
@redis.with_metrics do |redis|
|
1137
1130
|
provider.vms_in_pool(pool['name']).each do |vm|
|
1138
|
-
if !redis.sismember(
|
1139
|
-
!redis.sismember(
|
1140
|
-
!redis.sismember(
|
1141
|
-
!redis.sismember(
|
1142
|
-
!redis.sismember(
|
1143
|
-
!redis.sismember(
|
1131
|
+
if !redis.sismember("vmpooler__running__#{pool['name']}", vm['name']) &&
|
1132
|
+
!redis.sismember("vmpooler__ready__#{pool['name']}", vm['name']) &&
|
1133
|
+
!redis.sismember("vmpooler__pending__#{pool['name']}", vm['name']) &&
|
1134
|
+
!redis.sismember("vmpooler__completed__#{pool['name']}", vm['name']) &&
|
1135
|
+
!redis.sismember("vmpooler__discovered__#{pool['name']}", vm['name']) &&
|
1136
|
+
!redis.sismember("vmpooler__migrating__#{pool['name']}", vm['name'])
|
1144
1137
|
|
1145
1138
|
pool_check_response[:discovered_vms] += 1
|
1146
|
-
redis.sadd(
|
1139
|
+
redis.sadd("vmpooler__discovered__#{pool['name']}", vm['name'])
|
1147
1140
|
|
1148
1141
|
$logger.log('s', "[?] [#{pool['name']}] '#{vm['name']}' added to 'discovered' queue")
|
1149
1142
|
end
|
@@ -1164,7 +1157,7 @@ module Vmpooler
|
|
1164
1157
|
redis.smembers("vmpooler__running__#{pool_name}").each do |vm|
|
1165
1158
|
if inventory[vm]
|
1166
1159
|
begin
|
1167
|
-
vm_lifetime = redis.hget(
|
1160
|
+
vm_lifetime = redis.hget("vmpooler__vm__#{vm}", 'lifetime') || $config[:config]['vm_lifetime'] || 12
|
1168
1161
|
pool_check_response[:checked_running_vms] += 1
|
1169
1162
|
check_running_vm(vm, pool_name, vm_lifetime, provider)
|
1170
1163
|
rescue StandardError => e
|
@@ -1206,7 +1199,7 @@ module Vmpooler
|
|
1206
1199
|
$logger.log('d', "[!] [#{pool_name}] _check_pool failed with an error while evaluating pending VMs: #{e}")
|
1207
1200
|
end
|
1208
1201
|
else
|
1209
|
-
fail_pending_vm(vm, pool_name, pool_timeout, redis, false)
|
1202
|
+
fail_pending_vm(vm, pool_name, pool_timeout, redis, exists: false)
|
1210
1203
|
end
|
1211
1204
|
end
|
1212
1205
|
end
|
@@ -79,7 +79,7 @@ module Vmpooler
|
|
79
79
|
return current_vm['vm_host'] if rand(1..100) > provider_config['migratevm_couldmove_percent']
|
80
80
|
|
81
81
|
# Simulate a 10 node cluster and randomly pick a different one
|
82
|
-
new_host =
|
82
|
+
new_host = "HOST#{rand(1..10)}" while new_host == current_vm['vm_host']
|
83
83
|
|
84
84
|
new_host
|
85
85
|
end
|
@@ -95,9 +95,7 @@ module Vmpooler
|
|
95
95
|
end
|
96
96
|
|
97
97
|
# Inject clone failure
|
98
|
-
|
99
|
-
raise('Dummy Failure for migratevm_fail_percent') if rand(1..100) <= provider_config['migratevm_fail_percent']
|
100
|
-
end
|
98
|
+
raise('Dummy Failure for migratevm_fail_percent') if !provider_config['migratevm_fail_percent'].nil? && rand(1..100) <= provider_config['migratevm_fail_percent']
|
101
99
|
|
102
100
|
@write_lock.synchronize do
|
103
101
|
current_vm = get_dummy_vm(pool_name, vm_name)
|
@@ -116,27 +114,23 @@ module Vmpooler
|
|
116
114
|
return nil if dummy.nil?
|
117
115
|
|
118
116
|
# Randomly power off the VM
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
write_backing_file
|
125
|
-
end
|
126
|
-
logger.log('d', "[ ] [#{dummy['poolname']}] '#{dummy['name']}' is being Dummy Powered Off")
|
117
|
+
if !(dummy['powerstate'] != 'PoweredOn' || provider_config['getvm_poweroff_percent'].nil?) && rand(1..100) <= provider_config['getvm_poweroff_percent']
|
118
|
+
@write_lock.synchronize do
|
119
|
+
dummy = get_dummy_vm(pool_name, vm_name)
|
120
|
+
dummy['powerstate'] = 'PoweredOff'
|
121
|
+
write_backing_file
|
127
122
|
end
|
123
|
+
logger.log('d', "[ ] [#{dummy['poolname']}] '#{dummy['name']}' is being Dummy Powered Off")
|
128
124
|
end
|
129
125
|
|
130
126
|
# Randomly rename the host
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
write_backing_file
|
137
|
-
end
|
138
|
-
logger.log('d', "[ ] [#{dummy['poolname']}] '#{dummy['name']}' is being Dummy renamed")
|
127
|
+
if !(dummy['hostname'] != dummy['name'] || provider_config['getvm_rename_percent'].nil?) && rand(1..100) <= provider_config['getvm_rename_percent']
|
128
|
+
@write_lock.synchronize do
|
129
|
+
dummy = get_dummy_vm(pool_name, vm_name)
|
130
|
+
dummy['hostname'] = "DUMMY#{dummy['name']}"
|
131
|
+
write_backing_file
|
139
132
|
end
|
133
|
+
logger.log('d', "[ ] [#{dummy['poolname']}] '#{dummy['name']}' is being Dummy renamed")
|
140
134
|
end
|
141
135
|
|
142
136
|
obj['name'] = dummy['name']
|
@@ -196,9 +190,7 @@ module Vmpooler
|
|
196
190
|
|
197
191
|
begin
|
198
192
|
# Inject clone failure
|
199
|
-
|
200
|
-
raise('Dummy Failure for createvm_fail_percent') if rand(1..100) <= provider_config['createvm_fail_percent']
|
201
|
-
end
|
193
|
+
raise('Dummy Failure for createvm_fail_percent') if !provider_config['createvm_fail_percent'].nil? && rand(1..100) <= provider_config['createvm_fail_percent']
|
202
194
|
|
203
195
|
# Assert the VM is ready for use
|
204
196
|
@write_lock.synchronize do
|
@@ -229,9 +221,7 @@ module Vmpooler
|
|
229
221
|
end
|
230
222
|
|
231
223
|
# Inject create failure
|
232
|
-
|
233
|
-
raise('Dummy Failure for createdisk_fail_percent') if rand(1..100) <= provider_config['createdisk_fail_percent']
|
234
|
-
end
|
224
|
+
raise('Dummy Failure for createdisk_fail_percent') if !provider_config['createdisk_fail_percent'].nil? && rand(1..100) <= provider_config['createdisk_fail_percent']
|
235
225
|
|
236
226
|
@write_lock.synchronize do
|
237
227
|
vm_object = get_dummy_vm(pool_name, vm_name)
|
@@ -255,9 +245,7 @@ module Vmpooler
|
|
255
245
|
end
|
256
246
|
|
257
247
|
# Inject create failure
|
258
|
-
|
259
|
-
raise('Dummy Failure for createsnapshot_fail_percent') if rand(1..100) <= provider_config['createsnapshot_fail_percent']
|
260
|
-
end
|
248
|
+
raise('Dummy Failure for createsnapshot_fail_percent') if !provider_config['createsnapshot_fail_percent'].nil? && rand(1..100) <= provider_config['createsnapshot_fail_percent']
|
261
249
|
|
262
250
|
@write_lock.synchronize do
|
263
251
|
vm_object = get_dummy_vm(pool_name, vm_name)
|
@@ -282,9 +270,7 @@ module Vmpooler
|
|
282
270
|
end
|
283
271
|
|
284
272
|
# Inject create failure
|
285
|
-
|
286
|
-
raise('Dummy Failure for revertsnapshot_fail_percent') if rand(1..100) <= provider_config['revertsnapshot_fail_percent']
|
287
|
-
end
|
273
|
+
raise('Dummy Failure for revertsnapshot_fail_percent') if !provider_config['revertsnapshot_fail_percent'].nil? && rand(1..100) <= provider_config['revertsnapshot_fail_percent']
|
288
274
|
end
|
289
275
|
|
290
276
|
vm_object['snapshots'].include?(snapshot_name)
|
@@ -320,9 +306,7 @@ module Vmpooler
|
|
320
306
|
end
|
321
307
|
|
322
308
|
# Inject destroy VM failure
|
323
|
-
|
324
|
-
raise('Dummy Failure for migratevm_fail_percent') if rand(1..100) <= provider_config['destroyvm_fail_percent']
|
325
|
-
end
|
309
|
+
raise('Dummy Failure for migratevm_fail_percent') if !provider_config['destroyvm_fail_percent'].nil? && rand(1..100) <= provider_config['destroyvm_fail_percent']
|
326
310
|
|
327
311
|
# 'Destroy' the VM
|
328
312
|
@write_lock.synchronize do
|
@@ -354,9 +338,7 @@ module Vmpooler
|
|
354
338
|
# it's ready to receive a connection
|
355
339
|
sleep(2)
|
356
340
|
|
357
|
-
|
358
|
-
raise('Dummy Failure for vmready_fail_percent') if rand(1..100) <= provider_config['vmready_fail_percent']
|
359
|
-
end
|
341
|
+
raise('Dummy Failure for vmready_fail_percent') if !provider_config['vmready_fail_percent'].nil? && rand(1..100) <= provider_config['vmready_fail_percent']
|
360
342
|
|
361
343
|
@write_lock.synchronize do
|
362
344
|
vm_object['ready'] = true
|