opennebula 5.12.11 → 5.12.13
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/lib/ActionManager.rb +1 -1
- data/lib/CommandManager.rb +1 -1
- data/lib/DriverExecHelper.rb +1 -1
- data/lib/OpenNebulaDriver.rb +1 -1
- data/lib/VirtualMachineDriver.rb +1 -1
- data/lib/cloud/CloudClient.rb +3 -3
- data/lib/models/role.rb +1095 -0
- data/lib/models/service.rb +648 -0
- data/lib/models/service_pool.rb +166 -0
- data/lib/models/service_template.rb +503 -0
- data/lib/models/service_template_pool.rb +32 -0
- data/lib/models.rb +32 -0
- data/lib/opennebula/acl.rb +1 -1
- data/lib/opennebula/acl_pool.rb +1 -1
- data/lib/opennebula/client.rb +1 -1
- data/lib/opennebula/cluster.rb +1 -1
- data/lib/opennebula/cluster_pool.rb +1 -1
- data/lib/opennebula/datastore.rb +1 -1
- data/lib/opennebula/datastore_pool.rb +1 -1
- data/lib/opennebula/document.rb +1 -1
- data/lib/opennebula/document_json.rb +1 -1
- data/lib/opennebula/document_pool.rb +1 -1
- data/lib/opennebula/document_pool_json.rb +1 -1
- data/lib/opennebula/error.rb +1 -1
- data/lib/opennebula/group.rb +1 -1
- data/lib/opennebula/group_pool.rb +1 -1
- data/lib/opennebula/hook.rb +1 -1
- data/lib/opennebula/hook_log.rb +1 -1
- data/lib/opennebula/hook_pool.rb +1 -1
- data/lib/opennebula/host.rb +1 -1
- data/lib/opennebula/host_pool.rb +1 -1
- data/lib/opennebula/image.rb +1 -1
- data/lib/opennebula/image_pool.rb +1 -1
- data/lib/opennebula/ldap_auth.rb +1 -1
- data/lib/opennebula/ldap_auth_spec.rb +1 -1
- data/lib/opennebula/marketplace.rb +1 -1
- data/lib/opennebula/marketplace_pool.rb +1 -1
- data/lib/opennebula/marketplaceapp.rb +1 -1
- data/lib/opennebula/marketplaceapp_pool.rb +1 -1
- data/lib/opennebula/oneflow_client.rb +1 -1
- data/lib/opennebula/pool.rb +1 -1
- data/lib/opennebula/pool_element.rb +1 -1
- data/lib/opennebula/security_group.rb +1 -1
- data/lib/opennebula/security_group_pool.rb +1 -1
- data/lib/opennebula/server_cipher_auth.rb +1 -1
- data/lib/opennebula/server_x509_auth.rb +1 -1
- data/lib/opennebula/ssh_auth.rb +1 -1
- data/lib/opennebula/system.rb +1 -1
- data/lib/opennebula/template.rb +1 -1
- data/lib/opennebula/template_pool.rb +1 -1
- data/lib/opennebula/user.rb +1 -1
- data/lib/opennebula/user_pool.rb +1 -1
- data/lib/opennebula/utils.rb +1 -1
- data/lib/opennebula/vdc.rb +1 -1
- data/lib/opennebula/vdc_pool.rb +1 -1
- data/lib/opennebula/virtual_machine.rb +1 -1
- data/lib/opennebula/virtual_machine_pool.rb +1 -1
- data/lib/opennebula/virtual_network.rb +1 -1
- data/lib/opennebula/virtual_network_pool.rb +1 -1
- data/lib/opennebula/virtual_router.rb +1 -1
- data/lib/opennebula/virtual_router_pool.rb +1 -1
- data/lib/opennebula/vm_group.rb +1 -1
- data/lib/opennebula/vm_group_pool.rb +1 -1
- data/lib/opennebula/vntemplate.rb +1 -1
- data/lib/opennebula/vntemplate_pool.rb +1 -1
- data/lib/opennebula/x509_auth.rb +1 -1
- data/lib/opennebula/xml_element.rb +1 -1
- data/lib/opennebula/xml_pool.rb +1 -1
- data/lib/opennebula/xml_utils.rb +1 -1
- data/lib/opennebula/zone.rb +1 -1
- data/lib/opennebula/zone_pool.rb +1 -1
- data/lib/opennebula.rb +2 -2
- data/lib/vcenter_driver.rb +1 -1
- metadata +93 -46
data/lib/models/role.rb
ADDED
@@ -0,0 +1,1095 @@
|
|
1
|
+
# -------------------------------------------------------------------------- #
|
2
|
+
# Copyright 2002-2023, OpenNebula Project, OpenNebula Systems #
|
3
|
+
# #
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
5
|
+
# not use this file except in compliance with the License. You may obtain #
|
6
|
+
# a copy of the License at #
|
7
|
+
# #
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0 #
|
9
|
+
# #
|
10
|
+
# Unless required by applicable law or agreed to in writing, software #
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS, #
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
13
|
+
# See the License for the specific language governing permissions and #
|
14
|
+
# limitations under the License. #
|
15
|
+
#--------------------------------------------------------------------------- #
|
16
|
+
|
17
|
+
require 'treetop'
|
18
|
+
require 'treetop/version'
|
19
|
+
require 'grammar'
|
20
|
+
require 'parse-cron'
|
21
|
+
|
22
|
+
if Gem::Version.create('1.6.3') >= Gem.loaded_specs['treetop'].version
|
23
|
+
raise 'treetop gem version must be >= 1.6.3.' \
|
24
|
+
"Current version is #{Treetop::VERSION::STRING}"
|
25
|
+
end
|
26
|
+
|
27
|
+
module OpenNebula
|
28
|
+
|
29
|
+
# Service Role class
|
30
|
+
class Role
|
31
|
+
|
32
|
+
attr_reader :service
|
33
|
+
|
34
|
+
# Actions that can be performed on the VMs of a given Role
|
35
|
+
SCHEDULE_ACTIONS = %w[
|
36
|
+
terminate
|
37
|
+
terminate-hard
|
38
|
+
undeploy
|
39
|
+
undeploy-hard
|
40
|
+
hold
|
41
|
+
release
|
42
|
+
stop
|
43
|
+
suspend
|
44
|
+
resume
|
45
|
+
reboot
|
46
|
+
reboot-hard
|
47
|
+
poweroff
|
48
|
+
poweroff-hard
|
49
|
+
snapshot-create
|
50
|
+
snapshot-revert
|
51
|
+
snapshot-delete
|
52
|
+
disk-snapshot-create
|
53
|
+
disk-snapshot-revert
|
54
|
+
disk-snapshot-delete
|
55
|
+
]
|
56
|
+
|
57
|
+
STATE = {
|
58
|
+
'PENDING' => 0,
|
59
|
+
'DEPLOYING' => 1,
|
60
|
+
'RUNNING' => 2,
|
61
|
+
'UNDEPLOYING' => 3,
|
62
|
+
'WARNING' => 4,
|
63
|
+
'DONE' => 5,
|
64
|
+
'FAILED_UNDEPLOYING' => 6,
|
65
|
+
'FAILED_DEPLOYING' => 7,
|
66
|
+
'SCALING' => 8,
|
67
|
+
'FAILED_SCALING' => 9,
|
68
|
+
'COOLDOWN' => 10
|
69
|
+
}
|
70
|
+
|
71
|
+
STATE_STR = %w[
|
72
|
+
PENDING
|
73
|
+
DEPLOYING
|
74
|
+
RUNNING
|
75
|
+
UNDEPLOYING
|
76
|
+
WARNING
|
77
|
+
DONE
|
78
|
+
FAILED_UNDEPLOYING
|
79
|
+
FAILED_DEPLOYING
|
80
|
+
SCALING
|
81
|
+
FAILED_SCALING
|
82
|
+
COOLDOWN
|
83
|
+
]
|
84
|
+
|
85
|
+
RECOVER_DEPLOY_STATES = %w[
|
86
|
+
FAILED_DEPLOYING
|
87
|
+
DEPLOYING
|
88
|
+
PENDING
|
89
|
+
]
|
90
|
+
|
91
|
+
RECOVER_UNDEPLOY_STATES = %w[
|
92
|
+
FAILED_UNDEPLOYING
|
93
|
+
UNDEPLOYING
|
94
|
+
]
|
95
|
+
|
96
|
+
RECOVER_SCALE_STATES = %w[
|
97
|
+
FAILED_SCALING
|
98
|
+
SCALING
|
99
|
+
]
|
100
|
+
|
101
|
+
VM_FAILURE_STATES = %w[
|
102
|
+
BOOT_FAILURE
|
103
|
+
BOOT_MIGRATE_FAILURE
|
104
|
+
PROLOG_MIGRATE_FAILURE
|
105
|
+
PROLOG_FAILURE
|
106
|
+
EPILOG_FAILURE
|
107
|
+
EPILOG_STOP_FAILURE
|
108
|
+
EPILOG_UNDEPLOY_FAILURE
|
109
|
+
PROLOG_MIGRATE_POWEROFF_FAILURE
|
110
|
+
PROLOG_MIGRATE_SUSPEND_FAILURE
|
111
|
+
PROLOG_MIGRATE_UNKNOWN_FAILURE
|
112
|
+
BOOT_UNDEPLOY_FAILURE
|
113
|
+
BOOT_STOPPED_FAILURE
|
114
|
+
PROLOG_RESUME_FAILURE
|
115
|
+
PROLOG_UNDEPLOY_FAILURE
|
116
|
+
]
|
117
|
+
|
118
|
+
SCALE_WAYS = {
|
119
|
+
'UP' => 0,
|
120
|
+
'DOWN' => 1
|
121
|
+
}
|
122
|
+
|
123
|
+
# VM information to save in document
|
124
|
+
VM_INFO = %w[ID UID GID UNAME GNAME NAME]
|
125
|
+
|
126
|
+
LOG_COMP = 'ROL'
|
127
|
+
|
128
|
+
def initialize(body, service)
|
129
|
+
@body = body
|
130
|
+
@service = service
|
131
|
+
|
132
|
+
@body['cooldown'] = @@default_cooldown if @body['cooldown'].nil?
|
133
|
+
@body['nodes'] ||= []
|
134
|
+
end
|
135
|
+
|
136
|
+
def name
|
137
|
+
@body['name']
|
138
|
+
end
|
139
|
+
|
140
|
+
# Returns the role state
|
141
|
+
# @return [Integer] the role state
|
142
|
+
def state
|
143
|
+
@body['state'].to_i
|
144
|
+
end
|
145
|
+
|
146
|
+
def can_recover_deploy?
|
147
|
+
if state != STATE['PENDING']
|
148
|
+
return RECOVER_DEPLOY_STATES.include? STATE_STR[state]
|
149
|
+
end
|
150
|
+
|
151
|
+
parents.each do |parent|
|
152
|
+
next unless @service.roles[parent]
|
153
|
+
|
154
|
+
return false if @service.roles[parent].state != STATE['RUNNING']
|
155
|
+
end
|
156
|
+
|
157
|
+
true
|
158
|
+
end
|
159
|
+
|
160
|
+
def can_recover_undeploy?
|
161
|
+
if !RECOVER_UNDEPLOY_STATES.include? STATE_STR[state]
|
162
|
+
# TODO, check childs if !empty? check if can be undeployed
|
163
|
+
@service.roles.each do |role_name, role|
|
164
|
+
next if role_name == name
|
165
|
+
|
166
|
+
if role.parents.include? name
|
167
|
+
return false if role.state != STATE['DONE']
|
168
|
+
end
|
169
|
+
end
|
170
|
+
end
|
171
|
+
|
172
|
+
true
|
173
|
+
end
|
174
|
+
|
175
|
+
def can_recover_scale?
|
176
|
+
return false unless RECOVER_SCALE_STATES.include? STATE_STR[state]
|
177
|
+
|
178
|
+
true
|
179
|
+
end
|
180
|
+
|
181
|
+
# Returns the role parents
|
182
|
+
# @return [Array] the role parents
|
183
|
+
def parents
|
184
|
+
@body['parents'] || []
|
185
|
+
end
|
186
|
+
|
187
|
+
# Returns the role cardinality
|
188
|
+
# @return [Integer] the role cardinality
|
189
|
+
def cardinality
|
190
|
+
@body['cardinality'].to_i
|
191
|
+
end
|
192
|
+
|
193
|
+
# Sets a new cardinality for this role
|
194
|
+
# @param [Integer] the new cardinality
|
195
|
+
# rubocop:disable Naming/AccessorMethodName
|
196
|
+
def set_cardinality(target_cardinality)
|
197
|
+
# rubocop:enable Naming/AccessorMethodName
|
198
|
+
if target_cardinality > cardinality
|
199
|
+
dir = 'up'
|
200
|
+
else
|
201
|
+
dir = 'down'
|
202
|
+
end
|
203
|
+
|
204
|
+
msg = "Role #{name} scaling #{dir} from #{cardinality} to " \
|
205
|
+
"#{target_cardinality} nodes"
|
206
|
+
|
207
|
+
Log.info LOG_COMP, msg, @service.id
|
208
|
+
|
209
|
+
@service.log_info(msg)
|
210
|
+
|
211
|
+
@body['cardinality'] = target_cardinality.to_i
|
212
|
+
end
|
213
|
+
|
214
|
+
# Returns the role max cardinality
|
215
|
+
# @return [Integer,nil] the role cardinality or nil if it isn't defined
|
216
|
+
def max_cardinality
|
217
|
+
max = @body['max_vms']
|
218
|
+
|
219
|
+
return if max.nil?
|
220
|
+
|
221
|
+
max.to_i
|
222
|
+
end
|
223
|
+
|
224
|
+
# Returns the role min cardinality
|
225
|
+
# @return [Integer,nil] the role cardinality or nil if it isn't defined
|
226
|
+
def min_cardinality
|
227
|
+
min = @body['min_vms']
|
228
|
+
|
229
|
+
return if min.nil?
|
230
|
+
|
231
|
+
min.to_i
|
232
|
+
end
|
233
|
+
|
234
|
+
# Returns the string representation of the service state
|
235
|
+
# @return [String] the state string
|
236
|
+
def state_str
|
237
|
+
STATE_STR[state]
|
238
|
+
end
|
239
|
+
|
240
|
+
# Returns the nodes of the role
|
241
|
+
# @return [Array] the nodes
|
242
|
+
def nodes
|
243
|
+
@body['nodes']
|
244
|
+
end
|
245
|
+
|
246
|
+
def info_nodes(vm_pool)
|
247
|
+
ret = []
|
248
|
+
|
249
|
+
monitoring = vm_pool[:monitoring]
|
250
|
+
vm_pool = vm_pool[:vm_pool]
|
251
|
+
|
252
|
+
@body['nodes'].each do |node|
|
253
|
+
id = node['deploy_id']
|
254
|
+
vm = vm_pool.retrieve_xmlelements("/VM_POOL/VM[ID=#{id}]")[0]
|
255
|
+
|
256
|
+
if vm.nil?
|
257
|
+
Log.error LOG_COMP,
|
258
|
+
"Error getting VM #{id}",
|
259
|
+
@service.id
|
260
|
+
else
|
261
|
+
obj = {}
|
262
|
+
obj['deploy_id'] = node['deploy_id']
|
263
|
+
|
264
|
+
hash = vm.to_hash
|
265
|
+
vm_monit = monitoring.select {|v| v['ID'].to_i == id }[0]
|
266
|
+
|
267
|
+
hash['VM']['MONITORING'] = vm_monit if vm_monit
|
268
|
+
obj['vm_info'] = hash
|
269
|
+
|
270
|
+
ret << obj
|
271
|
+
end
|
272
|
+
end
|
273
|
+
|
274
|
+
ret
|
275
|
+
end
|
276
|
+
|
277
|
+
def nodes_ids
|
278
|
+
@body['nodes'].map {|node| node['deploy_id'] }
|
279
|
+
end
|
280
|
+
|
281
|
+
def elasticity_policies
|
282
|
+
@body['elasticity_policies']
|
283
|
+
end
|
284
|
+
|
285
|
+
def update_elasticity_policies(new_policies)
|
286
|
+
@body['elasticity_policies'] = new_policies
|
287
|
+
end
|
288
|
+
|
289
|
+
def scheduled_policies
|
290
|
+
@body['scheduled_policies']
|
291
|
+
end
|
292
|
+
|
293
|
+
def update_scheduled_policies(new_policies)
|
294
|
+
@body['scheduled_policies'] = new_policies
|
295
|
+
end
|
296
|
+
|
297
|
+
def cooldown
|
298
|
+
@body['cooldown']
|
299
|
+
end
|
300
|
+
|
301
|
+
def update_cooldown(new_cooldown)
|
302
|
+
@body['cooldown'] = new_cooldown unless new_cooldown.nil?
|
303
|
+
end
|
304
|
+
|
305
|
+
# Sets a new state
|
306
|
+
# @param [Integer] the new state
|
307
|
+
# @return [true, false] true if the value was changed
|
308
|
+
# rubocop:disable Naming/AccessorMethodName
|
309
|
+
def set_state(state)
|
310
|
+
# rubocop:enable Naming/AccessorMethodName
|
311
|
+
if state < 0 || state > STATE_STR.size
|
312
|
+
return false
|
313
|
+
end
|
314
|
+
|
315
|
+
@body['state'] = state.to_i
|
316
|
+
|
317
|
+
if state == STATE['SCALING']
|
318
|
+
|
319
|
+
elasticity_pol = @body['elasticity_policies']
|
320
|
+
|
321
|
+
if !elasticity_pol.nil?
|
322
|
+
elasticity_pol.each do |policy|
|
323
|
+
policy.delete('true_evals')
|
324
|
+
end
|
325
|
+
end
|
326
|
+
end
|
327
|
+
|
328
|
+
Log.info LOG_COMP,
|
329
|
+
"Role #{name} new state: #{STATE_STR[state]}",
|
330
|
+
@service.id
|
331
|
+
|
332
|
+
true
|
333
|
+
end
|
334
|
+
|
335
|
+
def scale_way(way)
|
336
|
+
@body['scale_way'] = SCALE_WAYS[way]
|
337
|
+
end
|
338
|
+
|
339
|
+
def clean_scale_way
|
340
|
+
@body.delete('scale_way')
|
341
|
+
end
|
342
|
+
|
343
|
+
# Retrieves the VM information for each Node in this Role. If a Node
|
344
|
+
# is to be disposed and it is found in DONE, it will be cleaned
|
345
|
+
#
|
346
|
+
# @return [nil, OpenNebula::Error] nil in case of success, Error
|
347
|
+
# otherwise
|
348
|
+
def info
|
349
|
+
raise 'role.info is not defined'
|
350
|
+
end
|
351
|
+
|
352
|
+
# Deploys all the nodes in this role
|
353
|
+
# @return [Array<true, nil>, Array<false, String>] true if all the VMs
|
354
|
+
# were created, false and the error reason if there was a problem
|
355
|
+
# creating the VMs
|
356
|
+
def deploy
|
357
|
+
deployed_nodes = []
|
358
|
+
n_nodes = cardinality - nodes.size
|
359
|
+
|
360
|
+
return [deployed_nodes, nil] if n_nodes == 0
|
361
|
+
|
362
|
+
@body['last_vmname'] ||= 0
|
363
|
+
|
364
|
+
template_id = @body['vm_template']
|
365
|
+
template = OpenNebula::Template.new_with_id(template_id,
|
366
|
+
@service.client)
|
367
|
+
|
368
|
+
if @body['vm_template_contents']
|
369
|
+
extra_template = @body['vm_template_contents'].dup
|
370
|
+
|
371
|
+
# If the extra_template contains APPEND="<attr1>,<attr2>", it
|
372
|
+
# will add the attributes that already exist in the template,
|
373
|
+
# instead of replacing them.
|
374
|
+
append = extra_template
|
375
|
+
.match(/^\s*APPEND=\"?(.*?)\"?\s*$/)[1]
|
376
|
+
.split(',') rescue nil
|
377
|
+
|
378
|
+
if append && !append.empty?
|
379
|
+
rc = template.info
|
380
|
+
|
381
|
+
if OpenNebula.is_error?(rc)
|
382
|
+
msg = "Role #{name} : Info template #{template_id};" \
|
383
|
+
" #{rc.message}"
|
384
|
+
|
385
|
+
Log.error LOG_COMP, msg, @service.id
|
386
|
+
@service.log_error(msg)
|
387
|
+
|
388
|
+
return [false, 'Error fetching Info to instantiate' \
|
389
|
+
" VM Template #{template_id} in Role " \
|
390
|
+
"#{name}: #{rc.message}"]
|
391
|
+
end
|
392
|
+
|
393
|
+
et = template.template_like_str('TEMPLATE',
|
394
|
+
true,
|
395
|
+
append.join('|'))
|
396
|
+
|
397
|
+
et = et << "\n" << extra_template
|
398
|
+
|
399
|
+
extra_template = et
|
400
|
+
end
|
401
|
+
else
|
402
|
+
extra_template = ''
|
403
|
+
end
|
404
|
+
|
405
|
+
extra_template << "\nSERVICE_ID = #{@service.id}"
|
406
|
+
extra_template << "\nROLE_NAME = \"#{@body['name']}\""
|
407
|
+
|
408
|
+
n_nodes.times do
|
409
|
+
vm_name = @@vm_name_template
|
410
|
+
.gsub('$SERVICE_ID', @service.id.to_s)
|
411
|
+
.gsub('$SERVICE_NAME', @service.name.to_s)
|
412
|
+
.gsub('$ROLE_NAME', name.to_s)
|
413
|
+
.gsub('$VM_NUMBER', @body['last_vmname'].to_s)
|
414
|
+
|
415
|
+
@body['last_vmname'] += 1
|
416
|
+
|
417
|
+
Log.debug LOG_COMP,
|
418
|
+
"Role #{name} : Trying to instantiate " \
|
419
|
+
"template #{template_id}, with name #{vm_name}",
|
420
|
+
@service.id
|
421
|
+
|
422
|
+
vm_id = template.instantiate(vm_name, false, extra_template)
|
423
|
+
|
424
|
+
deployed_nodes << vm_id
|
425
|
+
|
426
|
+
if OpenNebula.is_error?(vm_id)
|
427
|
+
msg = "Role #{name} : Instantiate failed for template " \
|
428
|
+
"#{template_id}; #{vm_id.message}"
|
429
|
+
|
430
|
+
Log.error LOG_COMP, msg, @service.id
|
431
|
+
@service.log_error(msg)
|
432
|
+
|
433
|
+
return [false, 'Error trying to instantiate the VM ' \
|
434
|
+
"Template #{template_id} in Role " \
|
435
|
+
"#{name}: #{vm_id.message}"]
|
436
|
+
end
|
437
|
+
|
438
|
+
Log.debug LOG_COMP, "Role #{name} : Instantiate success," \
|
439
|
+
" VM ID #{vm_id}", @service.id
|
440
|
+
node = {
|
441
|
+
'deploy_id' => vm_id
|
442
|
+
}
|
443
|
+
|
444
|
+
vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
|
445
|
+
@service.client)
|
446
|
+
rc = vm.info
|
447
|
+
|
448
|
+
if OpenNebula.is_error?(rc)
|
449
|
+
node['vm_info'] = nil
|
450
|
+
else
|
451
|
+
hash_vm = vm.to_hash['VM']
|
452
|
+
vm_info = {}
|
453
|
+
vm_info['VM'] = hash_vm.select {|v| VM_INFO.include?(v) }
|
454
|
+
|
455
|
+
node['vm_info'] = vm_info
|
456
|
+
end
|
457
|
+
|
458
|
+
@body['nodes'] << node
|
459
|
+
end
|
460
|
+
|
461
|
+
[deployed_nodes, nil]
|
462
|
+
end
|
463
|
+
|
464
|
+
# Terminate all the nodes in this role
|
465
|
+
#
|
466
|
+
# @param scale_down [true, false] true to terminate and dispose the
|
467
|
+
# number of VMs needed to get down to cardinality nodes
|
468
|
+
# @return [Array<true, nil>, Array<false, String>] true if all the VMs
|
469
|
+
# were terminated, false and the error reason if there was a problem
|
470
|
+
# shutting down the VMs
|
471
|
+
def shutdown(recover)
|
472
|
+
if nodes.size != cardinality
|
473
|
+
n_nodes = nodes.size - cardinality
|
474
|
+
else
|
475
|
+
n_nodes = nodes.size
|
476
|
+
end
|
477
|
+
|
478
|
+
rc = shutdown_nodes(nodes, n_nodes, recover)
|
479
|
+
|
480
|
+
unless rc[0]
|
481
|
+
return [false, "Error undeploying nodes for role `#{name}`"]
|
482
|
+
end
|
483
|
+
|
484
|
+
[rc[1], nil]
|
485
|
+
end
|
486
|
+
|
487
|
+
# Delete all the nodes in this role
|
488
|
+
# @return [Array<true, nil>] All the VMs are deleted, and the return
|
489
|
+
# ignored
|
490
|
+
def delete
|
491
|
+
raise 'role.delete is not defined'
|
492
|
+
end
|
493
|
+
|
494
|
+
# Changes the owner/group of all the nodes in this role
|
495
|
+
#
|
496
|
+
# @param [Integer] uid the new owner id. Set to -1 to leave the current
|
497
|
+
# @param [Integer] gid the new group id. Set to -1 to leave the current
|
498
|
+
#
|
499
|
+
# @return [Array<true, nil>, Array<false, String>] true if all the VMs
|
500
|
+
# were updated, false and the error reason if there was a problem
|
501
|
+
# updating the VMs
|
502
|
+
def chown(uid, gid)
|
503
|
+
nodes.each do |node|
|
504
|
+
vm_id = node['deploy_id']
|
505
|
+
|
506
|
+
Log.debug LOG_COMP,
|
507
|
+
"Role #{name} : Chown for VM #{vm_id}",
|
508
|
+
@service.id
|
509
|
+
|
510
|
+
vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
|
511
|
+
@service.client)
|
512
|
+
rc = vm.chown(uid, gid)
|
513
|
+
|
514
|
+
if OpenNebula.is_error?(rc)
|
515
|
+
msg = "Role #{name} : Chown failed for VM #{vm_id}; " \
|
516
|
+
"#{rc.message}"
|
517
|
+
|
518
|
+
Log.error LOG_COMP, msg, @service.id
|
519
|
+
@service.log_error(msg)
|
520
|
+
|
521
|
+
return [false, rc.message]
|
522
|
+
else
|
523
|
+
Log.debug LOG_COMP,
|
524
|
+
"Role #{name} : Chown success for VM #{vm_id}",
|
525
|
+
@service.id
|
526
|
+
end
|
527
|
+
end
|
528
|
+
|
529
|
+
[true, nil]
|
530
|
+
end
|
531
|
+
|
532
|
+
# Schedule the given action on all the VMs that belong to the Role
|
533
|
+
# @param [String] action one of the available SCHEDULE_ACTIONS
|
534
|
+
# @param [Integer] period
|
535
|
+
# @param [Integer] vm_per_period
|
536
|
+
# @param [String] action arguments
|
537
|
+
def batch_action(action, period, vms_per_period, args)
|
538
|
+
vms_id = []
|
539
|
+
error_msgs = []
|
540
|
+
nodes = @body['nodes']
|
541
|
+
now = Time.now.to_i
|
542
|
+
time_offset = 0
|
543
|
+
|
544
|
+
# if role is done, return error
|
545
|
+
if state == 5
|
546
|
+
return OpenNebula::Error.new("Role #{name} is in DONE state")
|
547
|
+
end
|
548
|
+
|
549
|
+
do_offset = (!period.nil? && period.to_i > 0 &&
|
550
|
+
!vms_per_period.nil? && vms_per_period.to_i > 0)
|
551
|
+
|
552
|
+
nodes.each_with_index do |node, index|
|
553
|
+
vm_id = node['deploy_id']
|
554
|
+
vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
|
555
|
+
@service.client)
|
556
|
+
|
557
|
+
rc = vm.info
|
558
|
+
|
559
|
+
if OpenNebula.is_error?(rc)
|
560
|
+
msg = "Role #{name} : VM #{vm_id} monitorization failed;"\
|
561
|
+
" #{rc.message}"
|
562
|
+
|
563
|
+
error_msgs << msg
|
564
|
+
|
565
|
+
Log.error LOG_COMP, msg, @service.id
|
566
|
+
|
567
|
+
@service.log_error(msg)
|
568
|
+
else
|
569
|
+
ids = vm.retrieve_elements('USER_TEMPLATE/SCHED_ACTION/ID')
|
570
|
+
|
571
|
+
id = 0
|
572
|
+
if !ids.nil? && !ids.empty?
|
573
|
+
ids.map! {|e| e.to_i }
|
574
|
+
id = ids.max + 1
|
575
|
+
end
|
576
|
+
|
577
|
+
tmp_str = vm.user_template_str
|
578
|
+
|
579
|
+
if do_offset
|
580
|
+
offset = (index / vms_per_period.to_i).floor
|
581
|
+
time_offset = offset * period.to_i
|
582
|
+
end
|
583
|
+
|
584
|
+
tmp_str << "\nSCHED_ACTION = ["
|
585
|
+
tmp_str << "ID = #{id},"
|
586
|
+
tmp_str << "ACTION = #{action},"
|
587
|
+
tmp_str << "ARGS = \"#{args}\"," if args
|
588
|
+
tmp_str << "TIME = #{now + time_offset}]"
|
589
|
+
|
590
|
+
rc = vm.update(tmp_str)
|
591
|
+
if OpenNebula.is_error?(rc)
|
592
|
+
msg = "Role #{name} : VM #{vm_id} error scheduling "\
|
593
|
+
"action; #{rc.message}"
|
594
|
+
|
595
|
+
error_msgs << msg
|
596
|
+
|
597
|
+
Log.error LOG_COMP, msg, @service.id
|
598
|
+
|
599
|
+
@service.log_error(msg)
|
600
|
+
else
|
601
|
+
vms_id << vm.id
|
602
|
+
end
|
603
|
+
end
|
604
|
+
end
|
605
|
+
|
606
|
+
log_msg = "Action:#{action} scheduled on Role:#{name}"\
|
607
|
+
"VMs:#{vms_id.join(',')}"
|
608
|
+
|
609
|
+
Log.info LOG_COMP, log_msg, @service.id
|
610
|
+
|
611
|
+
return [true, log_msg] if error_msgs.empty?
|
612
|
+
|
613
|
+
error_msgs << log_msg
|
614
|
+
|
615
|
+
[false, error_msgs.join('\n')]
|
616
|
+
end
|
617
|
+
|
618
|
+
# Returns true if the VM state is failure
|
619
|
+
# @param [Integer] vm_state VM state
|
620
|
+
# @param [Integer] lcm_state VM LCM state
|
621
|
+
# @return [true,false] True if the lcm state is one of *_FAILURE
|
622
|
+
def self.vm_failure?(vm_state, lcm_state)
|
623
|
+
vm_state_str = VirtualMachine::VM_STATE[vm_state.to_i]
|
624
|
+
lcm_state_str = VirtualMachine::LCM_STATE[lcm_state.to_i]
|
625
|
+
|
626
|
+
if vm_state_str == 'ACTIVE' &&
|
627
|
+
VM_FAILURE_STATES.include?(lcm_state_str)
|
628
|
+
return true
|
629
|
+
end
|
630
|
+
|
631
|
+
false
|
632
|
+
end
|
633
|
+
|
634
|
+
# rubocop:disable Style/ClassVars
|
635
|
+
def self.init_default_cooldown(default_cooldown)
|
636
|
+
@@default_cooldown = default_cooldown
|
637
|
+
end
|
638
|
+
|
639
|
+
def self.init_default_shutdown(shutdown_action)
|
640
|
+
@@default_shutdown = shutdown_action
|
641
|
+
end
|
642
|
+
|
643
|
+
def self.init_force_deletion(force_deletion)
|
644
|
+
@@force_deletion = force_deletion
|
645
|
+
end
|
646
|
+
|
647
|
+
def self.init_default_vm_name_template(vm_name_template)
|
648
|
+
@@vm_name_template = vm_name_template
|
649
|
+
end
|
650
|
+
# rubocop:enable Style/ClassVars
|
651
|
+
|
652
|
+
########################################################################
|
653
|
+
# Scalability
|
654
|
+
########################################################################
|
655
|
+
|
656
|
+
# Updates the role
|
657
|
+
# @param [Hash] template
|
658
|
+
# @return [nil, OpenNebula::Error] nil in case of success, Error
|
659
|
+
# otherwise
|
660
|
+
def update(template)
|
661
|
+
force = template['force'] == true
|
662
|
+
new_cardinality = template['cardinality']
|
663
|
+
|
664
|
+
return if new_cardinality.nil?
|
665
|
+
|
666
|
+
new_cardinality = new_cardinality.to_i
|
667
|
+
|
668
|
+
if !force
|
669
|
+
if new_cardinality < min_cardinality.to_i
|
670
|
+
return OpenNebula::Error.new(
|
671
|
+
"Minimum cardinality is #{min_cardinality}"
|
672
|
+
)
|
673
|
+
|
674
|
+
elsif !max_cardinality.nil? &&
|
675
|
+
new_cardinality > max_cardinality.to_i
|
676
|
+
return OpenNebula::Error.new(
|
677
|
+
"Maximum cardinality is #{max_cardinality}"
|
678
|
+
)
|
679
|
+
|
680
|
+
end
|
681
|
+
end
|
682
|
+
|
683
|
+
set_cardinality(new_cardinality)
|
684
|
+
|
685
|
+
nil
|
686
|
+
end
|
687
|
+
|
688
|
+
########################################################################
|
689
|
+
# Recover
|
690
|
+
########################################################################
|
691
|
+
|
692
|
+
def recover_deploy(report)
|
693
|
+
nodes = @body['nodes']
|
694
|
+
deployed_nodes = []
|
695
|
+
|
696
|
+
nodes.each do |node|
|
697
|
+
vm_id = node['deploy_id']
|
698
|
+
|
699
|
+
vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
|
700
|
+
@service.client)
|
701
|
+
|
702
|
+
rc = vm.info
|
703
|
+
|
704
|
+
if OpenNebula.is_error?(rc)
|
705
|
+
msg = "Role #{name} : Retry failed for VM "\
|
706
|
+
"#{vm_id}; #{rc.message}"
|
707
|
+
Log.error LOG_COMP, msg, @service.id
|
708
|
+
|
709
|
+
next true
|
710
|
+
end
|
711
|
+
|
712
|
+
vm_state = vm.state
|
713
|
+
lcm_state = vm.lcm_state
|
714
|
+
|
715
|
+
# ACTIVE/RUNNING
|
716
|
+
next false if vm_state == 3 && lcm_state == 3 && !report
|
717
|
+
|
718
|
+
next true if vm_state == '6' # Delete DONE nodes
|
719
|
+
|
720
|
+
if Role.vm_failure?(vm_state, lcm_state)
|
721
|
+
rc = vm.recover(2)
|
722
|
+
|
723
|
+
if OpenNebula.is_error?(rc)
|
724
|
+
msg = "Role #{name} : Retry failed for VM "\
|
725
|
+
"#{vm_id}; #{rc.message}"
|
726
|
+
|
727
|
+
Log.error LOG_COMP, msg, @service.id
|
728
|
+
@service.log_error(msg)
|
729
|
+
else
|
730
|
+
deployed_nodes << vm_id
|
731
|
+
end
|
732
|
+
else
|
733
|
+
vm.resume
|
734
|
+
|
735
|
+
deployed_nodes << vm_id
|
736
|
+
end
|
737
|
+
end
|
738
|
+
|
739
|
+
rc = deploy
|
740
|
+
|
741
|
+
deployed_nodes.concat(rc[0]) if rc[1].nil?
|
742
|
+
|
743
|
+
deployed_nodes
|
744
|
+
end
|
745
|
+
|
746
|
+
def recover_undeploy
|
747
|
+
undeployed_nodes = []
|
748
|
+
|
749
|
+
rc = shutdown(true)
|
750
|
+
|
751
|
+
undeployed_nodes.concat(rc[0]) if rc[1].nil?
|
752
|
+
|
753
|
+
undeployed_nodes
|
754
|
+
end
|
755
|
+
|
756
|
+
# def recover_warning
|
757
|
+
# end
|
758
|
+
|
759
|
+
def recover_scale(report)
|
760
|
+
rc = nil
|
761
|
+
|
762
|
+
if @body['scale_way'] == SCALE_WAYS['UP']
|
763
|
+
rc = [recover_deploy(report), true]
|
764
|
+
elsif @body['scale_way'] == SCALE_WAYS['DOWN']
|
765
|
+
rc = [recover_undeploy, false]
|
766
|
+
end
|
767
|
+
|
768
|
+
rc
|
769
|
+
end
|
770
|
+
|
771
|
+
# Returns a positive, 0, or negative number of nodes to adjust,
|
772
|
+
# according to the elasticity and scheduled policies
|
773
|
+
# @return [Array<Integer>] positive, 0, or negative number of nodes to
|
774
|
+
# adjust, plus the cooldown period duration
|
775
|
+
def scale?(vm_pool)
|
776
|
+
elasticity_pol = @body['elasticity_policies']
|
777
|
+
scheduled_pol = @body['scheduled_policies']
|
778
|
+
|
779
|
+
elasticity_pol ||= []
|
780
|
+
scheduled_pol ||= []
|
781
|
+
|
782
|
+
scheduled_pol.each do |policy|
|
783
|
+
diff, cooldown_duration = scale_time?(policy)
|
784
|
+
|
785
|
+
return [diff, cooldown_duration] if diff != 0
|
786
|
+
end
|
787
|
+
|
788
|
+
elasticity_pol.each do |policy|
|
789
|
+
diff, cooldown_duration = scale_attributes?(policy, vm_pool)
|
790
|
+
|
791
|
+
next if diff == 0
|
792
|
+
|
793
|
+
cooldown_duration = @body['cooldown'] if cooldown_duration.nil?
|
794
|
+
cooldown_duration = @@default_cooldown if cooldown_duration.nil?
|
795
|
+
|
796
|
+
return [diff, cooldown_duration]
|
797
|
+
end
|
798
|
+
|
799
|
+
# Implicit rule that scales up to maintain the min_cardinality, with
|
800
|
+
# no cooldown period
|
801
|
+
if cardinality < min_cardinality.to_i
|
802
|
+
return [min_cardinality.to_i - cardinality, 0]
|
803
|
+
end
|
804
|
+
|
805
|
+
[0, 0]
|
806
|
+
end
|
807
|
+
|
808
|
+
private
|
809
|
+
|
810
|
+
# Shuts down all the given nodes
|
811
|
+
# @param scale_down [true,false] True to set the 'disposed' node flag
|
812
|
+
def shutdown_nodes(nodes, n_nodes, recover)
|
813
|
+
success = true
|
814
|
+
undeployed_nodes = []
|
815
|
+
|
816
|
+
action = @body['shutdown_action']
|
817
|
+
|
818
|
+
if action.nil?
|
819
|
+
action = @service.shutdown_action
|
820
|
+
end
|
821
|
+
|
822
|
+
if action.nil?
|
823
|
+
action = @@default_shutdown
|
824
|
+
end
|
825
|
+
|
826
|
+
nodes[0..n_nodes - 1].each do |node|
|
827
|
+
vm_id = node['deploy_id']
|
828
|
+
|
829
|
+
Log.debug(LOG_COMP,
|
830
|
+
"Role #{name} : Terminating VM #{vm_id}",
|
831
|
+
@service.id)
|
832
|
+
|
833
|
+
vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
|
834
|
+
@service.client)
|
835
|
+
|
836
|
+
vm_state = nil
|
837
|
+
lcm_state = nil
|
838
|
+
|
839
|
+
if recover
|
840
|
+
vm.info
|
841
|
+
|
842
|
+
vm_state = vm.state
|
843
|
+
lcm_state = vm.lcm_state
|
844
|
+
end
|
845
|
+
|
846
|
+
if recover && Role.vm_failure?(vm_state, lcm_state)
|
847
|
+
rc = vm.recover(2)
|
848
|
+
elsif action == 'terminate-hard'
|
849
|
+
rc = vm.terminate(true)
|
850
|
+
else
|
851
|
+
rc = vm.terminate
|
852
|
+
end
|
853
|
+
|
854
|
+
if OpenNebula.is_error?(rc)
|
855
|
+
msg = "Role #{name} : Terminate failed for VM #{vm_id}, " \
|
856
|
+
"will perform a Delete; #{rc.message}"
|
857
|
+
|
858
|
+
Log.error LOG_COMP, msg, @service.id
|
859
|
+
@service.log_error(msg)
|
860
|
+
|
861
|
+
if action != 'terminate-hard'
|
862
|
+
rc = vm.terminate(true)
|
863
|
+
end
|
864
|
+
|
865
|
+
if OpenNebula.is_error?(rc)
|
866
|
+
rc = vm.delete
|
867
|
+
end
|
868
|
+
|
869
|
+
if OpenNebula.is_error?(rc)
|
870
|
+
msg = "Role #{name} : Delete failed for VM #{vm_id}; " \
|
871
|
+
"#{rc.message}"
|
872
|
+
|
873
|
+
Log.error LOG_COMP, msg, @service.id
|
874
|
+
@service.log_error(msg)
|
875
|
+
|
876
|
+
success = false
|
877
|
+
else
|
878
|
+
Log.debug(LOG_COMP,
|
879
|
+
"Role #{name} : Delete success for VM " \
|
880
|
+
"#{vm_id}",
|
881
|
+
@service.id)
|
882
|
+
|
883
|
+
undeployed_nodes << vm_id
|
884
|
+
end
|
885
|
+
else
|
886
|
+
Log.debug(LOG_COMP,
|
887
|
+
"Role #{name}: Terminate success for VM #{vm_id}",
|
888
|
+
@service.id)
|
889
|
+
undeployed_nodes << vm_id
|
890
|
+
end
|
891
|
+
end
|
892
|
+
|
893
|
+
[success, undeployed_nodes]
|
894
|
+
end
|
895
|
+
|
896
|
+
def vm_failure?(node)
|
897
|
+
if node && node['vm_info']
|
898
|
+
return Role.vm_failure?(node['vm_info']['VM']['STATE'],
|
899
|
+
node['vm_info']['VM']['LCM_STATE'])
|
900
|
+
end
|
901
|
+
|
902
|
+
false
|
903
|
+
end
|
904
|
+
|
905
|
+
# Returns a positive, 0, or negative number of nodes to adjust,
|
906
|
+
# according to a SCHEDULED type policy
|
907
|
+
# @param [Hash] A SCHEDULED type policy
|
908
|
+
# @return [Integer] positive, 0, or negative number of nodes to adjust
|
909
|
+
def scale_time?(elasticity_pol)
|
910
|
+
now = Time.now.to_i
|
911
|
+
last_eval = elasticity_pol['last_eval'].to_i
|
912
|
+
|
913
|
+
elasticity_pol['last_eval'] = now
|
914
|
+
|
915
|
+
# If this is the first time this is evaluated, ignore it.
|
916
|
+
# We don't want to execute actions planned in the past when the
|
917
|
+
# server starts.
|
918
|
+
|
919
|
+
return 0 if last_eval == 0
|
920
|
+
|
921
|
+
start_time = elasticity_pol['start_time']
|
922
|
+
target_vms = elasticity_pol['adjust']
|
923
|
+
|
924
|
+
# TODO: error msg
|
925
|
+
return 0 if target_vms.nil?
|
926
|
+
|
927
|
+
if !(start_time.nil? || start_time.empty?)
|
928
|
+
begin
|
929
|
+
if !start_time.match(/^\d+$/)
|
930
|
+
start_time = Time.parse(start_time).to_i
|
931
|
+
else
|
932
|
+
start_time = start_time.to_i
|
933
|
+
end
|
934
|
+
rescue ArgumentError
|
935
|
+
# TODO: error msg
|
936
|
+
return 0
|
937
|
+
end
|
938
|
+
else
|
939
|
+
recurrence = elasticity_pol['recurrence']
|
940
|
+
|
941
|
+
# TODO: error msg
|
942
|
+
return 0 if recurrence.nil? || recurrence.empty?
|
943
|
+
|
944
|
+
begin
|
945
|
+
cron_parser = CronParser.new(recurrence)
|
946
|
+
|
947
|
+
# This returns the next planned time, starting from the last
|
948
|
+
# step
|
949
|
+
start_time = cron_parser.next(Time.at(last_eval)).to_i
|
950
|
+
rescue StandardError
|
951
|
+
# TODO: error msg bad format
|
952
|
+
return 0
|
953
|
+
end
|
954
|
+
end
|
955
|
+
|
956
|
+
# Only actions planned between last step and this one are triggered
|
957
|
+
if start_time > last_eval && start_time <= now
|
958
|
+
Log.debug LOG_COMP,
|
959
|
+
"Role #{name} : scheduled scalability for " \
|
960
|
+
"#{Time.at(start_time)} triggered", @service.id
|
961
|
+
|
962
|
+
new_cardinality = calculate_new_cardinality(elasticity_pol)
|
963
|
+
|
964
|
+
return [new_cardinality - cardinality,
|
965
|
+
elasticity_pol['cooldown']]
|
966
|
+
end
|
967
|
+
|
968
|
+
[0, elasticity_pol['cooldown']]
|
969
|
+
end
|
970
|
+
|
971
|
+
# Returns a positive, 0, or negative number of nodes to adjust,
|
972
|
+
# according to a policy based on attributes
|
973
|
+
# @param [Hash] A policy based on attributes
|
974
|
+
# @return [Array<Integer>] positive, 0, or negative number of nodes to
|
975
|
+
# adjust, plus the cooldown period duration
|
976
|
+
def scale_attributes?(elasticity_pol, vm_pool)
|
977
|
+
now = Time.now.to_i
|
978
|
+
|
979
|
+
# TODO: enforce true_up_evals type in ServiceTemplate::ROLE_SCHEMA ?
|
980
|
+
|
981
|
+
period_duration = elasticity_pol['period'].to_i
|
982
|
+
period_number = elasticity_pol['period_number'].to_i
|
983
|
+
last_eval = elasticity_pol['last_eval'].to_i
|
984
|
+
true_evals = elasticity_pol['true_evals'].to_i
|
985
|
+
expression = elasticity_pol['expression']
|
986
|
+
|
987
|
+
if !last_eval.nil?
|
988
|
+
if now < (last_eval + period_duration)
|
989
|
+
return [0, 0]
|
990
|
+
end
|
991
|
+
end
|
992
|
+
|
993
|
+
elasticity_pol['last_eval'] = now
|
994
|
+
|
995
|
+
new_cardinality = cardinality
|
996
|
+
new_evals = 0
|
997
|
+
|
998
|
+
exp_value, exp_st = scale_rule(expression, vm_pool)
|
999
|
+
|
1000
|
+
if exp_value
|
1001
|
+
new_evals = true_evals + 1
|
1002
|
+
new_evals = period_number if new_evals > period_number
|
1003
|
+
|
1004
|
+
if new_evals >= period_number
|
1005
|
+
Log.debug LOG_COMP,
|
1006
|
+
"Role #{name} : elasticy policy #{exp_st} "\
|
1007
|
+
'triggered', @service.id
|
1008
|
+
|
1009
|
+
new_cardinality = calculate_new_cardinality(elasticity_pol)
|
1010
|
+
end
|
1011
|
+
end
|
1012
|
+
|
1013
|
+
elasticity_pol['true_evals'] = new_evals
|
1014
|
+
elasticity_pol['expression_evaluated'] = exp_st
|
1015
|
+
|
1016
|
+
[new_cardinality - cardinality, elasticity_pol['cooldown']]
|
1017
|
+
end
|
1018
|
+
|
1019
|
+
# Returns true if the scalability rule is triggered
|
1020
|
+
# @return true if the scalability rule is triggered
|
1021
|
+
def scale_rule(elas_expr, vm_pool)
|
1022
|
+
parser = ElasticityGrammarParser.new
|
1023
|
+
|
1024
|
+
if elas_expr.nil? || elas_expr.empty?
|
1025
|
+
return false
|
1026
|
+
end
|
1027
|
+
|
1028
|
+
treetop = parser.parse(elas_expr)
|
1029
|
+
|
1030
|
+
if treetop.nil?
|
1031
|
+
return [false,
|
1032
|
+
"Parse error. '#{elas_expr}': #{parser.failure_reason}"]
|
1033
|
+
end
|
1034
|
+
|
1035
|
+
val, st = treetop.result(self, vm_pool)
|
1036
|
+
|
1037
|
+
[val, st]
|
1038
|
+
end
|
1039
|
+
|
1040
|
+
def calculate_new_cardinality(elasticity_pol)
|
1041
|
+
type = elasticity_pol['type']
|
1042
|
+
adjust = elasticity_pol['adjust'].to_i
|
1043
|
+
|
1044
|
+
# Min is a hard limit, if the current cardinality + adjustment does
|
1045
|
+
# not reach it, the difference is added
|
1046
|
+
|
1047
|
+
max = [cardinality, max_cardinality.to_i].max()
|
1048
|
+
# min = [cardinality(), min_cardinality.to_i].min()
|
1049
|
+
min = min_cardinality.to_i
|
1050
|
+
|
1051
|
+
case type.upcase
|
1052
|
+
when 'CHANGE'
|
1053
|
+
new_cardinality = cardinality + adjust
|
1054
|
+
when 'PERCENTAGE_CHANGE'
|
1055
|
+
min_adjust_step = elasticity_pol['min_adjust_step'].to_i
|
1056
|
+
|
1057
|
+
change = cardinality * adjust / 100.0
|
1058
|
+
|
1059
|
+
change > 0 ? sign = 1 : sign = -1
|
1060
|
+
change = change.abs
|
1061
|
+
|
1062
|
+
if change < 1
|
1063
|
+
change = 1
|
1064
|
+
else
|
1065
|
+
change = change.to_i
|
1066
|
+
end
|
1067
|
+
|
1068
|
+
change = sign * [change, min_adjust_step].max
|
1069
|
+
|
1070
|
+
new_cardinality = cardinality + change
|
1071
|
+
|
1072
|
+
when 'CARDINALITY'
|
1073
|
+
new_cardinality = adjust
|
1074
|
+
else
|
1075
|
+
# TODO: error message
|
1076
|
+
return cardinality
|
1077
|
+
end
|
1078
|
+
|
1079
|
+
# The cardinality can be forced to be outside the min,max
|
1080
|
+
# range. If that is the case, the scale up/down will not
|
1081
|
+
# move further outside the range. It will move towards the
|
1082
|
+
# range with the adjustement set, instead of jumping the
|
1083
|
+
# difference
|
1084
|
+
if adjust > 0
|
1085
|
+
new_cardinality = max if new_cardinality > max
|
1086
|
+
elsif adjust < 0
|
1087
|
+
new_cardinality = min if new_cardinality < min
|
1088
|
+
end
|
1089
|
+
|
1090
|
+
new_cardinality
|
1091
|
+
end
|
1092
|
+
|
1093
|
+
end
|
1094
|
+
|
1095
|
+
end
|