opennebula 6.10.3 → 6.99.85.pre
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/cloud/CloudClient.rb +3 -3
- data/lib/models/role.rb +349 -823
- data/lib/models/service.rb +156 -80
- data/lib/models/vmrole.rb +703 -0
- data/lib/models/vrrole.rb +284 -0
- data/lib/models.rb +3 -1
- data/lib/opennebula/acl.rb +1 -1
- data/lib/opennebula/acl_pool.rb +1 -1
- data/lib/opennebula/backupjob.rb +1 -1
- data/lib/opennebula/backupjob_pool.rb +1 -1
- data/lib/opennebula/client.rb +1 -1
- data/lib/opennebula/cluster.rb +45 -2
- data/lib/opennebula/cluster_pool.rb +1 -1
- data/lib/opennebula/datastore.rb +1 -1
- data/lib/opennebula/datastore_pool.rb +1 -1
- data/lib/opennebula/document.rb +1 -1
- data/lib/opennebula/document_json.rb +1 -1
- data/lib/opennebula/document_pool.rb +1 -1
- data/lib/opennebula/document_pool_json.rb +1 -1
- data/lib/opennebula/error.rb +1 -1
- data/lib/opennebula/flow/grammar.rb +1 -1
- data/lib/opennebula/flow/service_pool.rb +1 -1
- data/lib/opennebula/flow/service_template.rb +353 -97
- data/lib/opennebula/flow/service_template_ext.rb +3 -3
- data/lib/opennebula/flow/service_template_pool.rb +1 -1
- data/lib/opennebula/flow/validator.rb +458 -410
- data/lib/opennebula/flow.rb +1 -1
- data/lib/opennebula/group.rb +1 -1
- data/lib/opennebula/group_pool.rb +1 -1
- data/lib/opennebula/hook.rb +1 -1
- data/lib/opennebula/hook_log.rb +1 -1
- data/lib/opennebula/hook_pool.rb +1 -1
- data/lib/opennebula/host.rb +1 -60
- data/lib/opennebula/host_pool.rb +1 -1
- data/lib/opennebula/image.rb +1 -1
- data/lib/opennebula/image_pool.rb +1 -1
- data/lib/opennebula/ldap_auth.rb +1 -1
- data/lib/opennebula/ldap_auth_spec.rb +1 -1
- data/lib/opennebula/lockable_ext.rb +1 -1
- data/lib/opennebula/marketplace.rb +1 -1
- data/lib/opennebula/marketplace_pool.rb +1 -1
- data/lib/opennebula/marketplaceapp.rb +1 -1
- data/lib/opennebula/marketplaceapp_ext.rb +14 -211
- data/lib/opennebula/marketplaceapp_pool.rb +1 -1
- data/lib/opennebula/oneflow_client.rb +11 -9
- data/lib/opennebula/pool.rb +1 -1
- data/lib/opennebula/pool_element.rb +1 -1
- data/lib/opennebula/security_group.rb +1 -1
- data/lib/opennebula/security_group_pool.rb +1 -1
- data/lib/opennebula/server_cipher_auth.rb +1 -1
- data/lib/opennebula/server_x509_auth.rb +1 -1
- data/lib/opennebula/ssh_auth.rb +1 -1
- data/lib/opennebula/system.rb +1 -1
- data/lib/opennebula/template.rb +1 -1
- data/lib/opennebula/template_ext.rb +1 -1
- data/lib/opennebula/template_pool.rb +1 -1
- data/lib/opennebula/user.rb +1 -1
- data/lib/opennebula/user_pool.rb +1 -1
- data/lib/opennebula/utils.rb +2 -2
- data/lib/opennebula/vdc.rb +1 -1
- data/lib/opennebula/vdc_pool.rb +1 -1
- data/lib/opennebula/virtual_machine.rb +3 -12
- data/lib/opennebula/virtual_machine_ext.rb +2 -31
- data/lib/opennebula/virtual_machine_pool.rb +1 -1
- data/lib/opennebula/virtual_network.rb +1 -1
- data/lib/opennebula/virtual_network_pool.rb +1 -1
- data/lib/opennebula/virtual_router.rb +1 -1
- data/lib/opennebula/virtual_router_pool.rb +1 -1
- data/lib/opennebula/vm_group.rb +1 -1
- data/lib/opennebula/vm_group_pool.rb +1 -1
- data/lib/opennebula/vntemplate.rb +1 -1
- data/lib/opennebula/vntemplate_pool.rb +1 -1
- data/lib/opennebula/wait_ext.rb +1 -1
- data/lib/opennebula/x509_auth.rb +1 -1
- data/lib/opennebula/xml_element.rb +2 -2
- data/lib/opennebula/xml_pool.rb +1 -1
- data/lib/opennebula/xml_utils.rb +1 -1
- data/lib/opennebula/zone.rb +1 -1
- data/lib/opennebula/zone_pool.rb +1 -1
- data/lib/opennebula.rb +2 -2
- metadata +6 -67
- data/lib/ActionManager.rb +0 -280
- data/lib/CommandManager.rb +0 -328
- data/lib/DriverExecHelper.rb +0 -213
- data/lib/HostSyncManager.rb +0 -111
- data/lib/OpenNebulaDriver.rb +0 -223
- data/lib/VirtualMachineDriver.rb +0 -404
- data/lib/datacenter.rb +0 -1319
- data/lib/datastore.rb +0 -1049
- data/lib/distributed_firewall.rb +0 -293
- data/lib/file_helper.rb +0 -374
- data/lib/host.rb +0 -1518
- data/lib/logical_port.rb +0 -50
- data/lib/logical_switch.rb +0 -77
- data/lib/memoize.rb +0 -74
- data/lib/network.rb +0 -705
- data/lib/nsx_client.rb +0 -157
- data/lib/nsx_component.rb +0 -28
- data/lib/nsx_constants.rb +0 -162
- data/lib/nsx_driver.rb +0 -91
- data/lib/nsx_error.rb +0 -77
- data/lib/nsx_rule.rb +0 -206
- data/lib/nsxt_client.rb +0 -189
- data/lib/nsxt_dfw.rb +0 -196
- data/lib/nsxt_logical_port.rb +0 -94
- data/lib/nsxt_rule.rb +0 -188
- data/lib/nsxt_tz.rb +0 -38
- data/lib/nsxv_client.rb +0 -189
- data/lib/nsxv_dfw.rb +0 -202
- data/lib/nsxv_logical_port.rb +0 -107
- data/lib/nsxv_rule.rb +0 -172
- data/lib/nsxv_tz.rb +0 -41
- data/lib/opaque_network.rb +0 -134
- data/lib/rest_client.rb +0 -191
- data/lib/scripts_common.rb +0 -176
- data/lib/transport_zone.rb +0 -43
- data/lib/vcenter_driver.rb +0 -152
- data/lib/vcenter_importer.rb +0 -626
- data/lib/vi_client.rb +0 -273
- data/lib/vi_helper.rb +0 -328
- data/lib/virtual_machine.rb +0 -3574
- data/lib/virtual_wire.rb +0 -158
- data/lib/vm_device.rb +0 -80
- data/lib/vm_disk.rb +0 -202
- data/lib/vm_folder.rb +0 -69
- data/lib/vm_helper.rb +0 -30
- data/lib/vm_monitor.rb +0 -305
- data/lib/vm_nic.rb +0 -70
- data/lib/vm_template.rb +0 -2112
- data/lib/vmm_importer.rb +0 -165
@@ -0,0 +1,703 @@
|
|
1
|
+
# -------------------------------------------------------------------------- #
|
2
|
+
# Copyright 2002-2025, OpenNebula Project, OpenNebula Systems #
|
3
|
+
# #
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
5
|
+
# not use this file except in compliance with the License. You may obtain #
|
6
|
+
# a copy of the License at #
|
7
|
+
# #
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0 #
|
9
|
+
# #
|
10
|
+
# Unless required by applicable law or agreed to in writing, software #
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS, #
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
13
|
+
# See the License for the specific language governing permissions and #
|
14
|
+
# limitations under the License. #
|
15
|
+
#--------------------------------------------------------------------------- #
|
16
|
+
|
17
|
+
module OpenNebula
|
18
|
+
|
19
|
+
# Service Role class
|
20
|
+
class VMRole < Role
|
21
|
+
|
22
|
+
attr_reader :service
|
23
|
+
|
24
|
+
def initialize(body, service)
|
25
|
+
super(body, service)
|
26
|
+
|
27
|
+
@body['cooldown'] = @@default_cooldown if @body['cooldown'].nil?
|
28
|
+
end
|
29
|
+
|
30
|
+
# Sets a new state
|
31
|
+
# @param [Integer] the new state
|
32
|
+
def state=(state)
|
33
|
+
super(state)
|
34
|
+
return unless state == STATE['SCALING']
|
35
|
+
|
36
|
+
elasticity_pol = @body['elasticity_policies']
|
37
|
+
|
38
|
+
return if elasticity_pol.nil?
|
39
|
+
|
40
|
+
elasticity_pol.each do |policy|
|
41
|
+
policy.delete('true_evals')
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
########################################################################
|
46
|
+
# Operations
|
47
|
+
########################################################################
|
48
|
+
|
49
|
+
# Changes the owner/group of all the nodes in this role
|
50
|
+
#
|
51
|
+
# @param [Integer] uid the new owner id. Set to -1 to leave the current
|
52
|
+
# @param [Integer] gid the new group id. Set to -1 to leave the current
|
53
|
+
#
|
54
|
+
# @return [Array<true, nil>, Array<false, String>] true if all the VMs
|
55
|
+
# were updated, false and the error reason if there was a problem
|
56
|
+
# updating the VMs
|
57
|
+
def chown(uid, gid)
|
58
|
+
nodes.each do |node|
|
59
|
+
vm_id = node['deploy_id']
|
60
|
+
|
61
|
+
Log.debug LOG_COMP,
|
62
|
+
"Role #{name} : Chown for VM #{vm_id}",
|
63
|
+
@service.id
|
64
|
+
|
65
|
+
vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
|
66
|
+
@service.client)
|
67
|
+
rc = vm.chown(uid, gid)
|
68
|
+
|
69
|
+
if OpenNebula.is_error?(rc)
|
70
|
+
msg = "Role #{name} : Chown failed for VM #{vm_id}; " \
|
71
|
+
"#{rc.message}"
|
72
|
+
|
73
|
+
Log.error LOG_COMP, msg, @service.id
|
74
|
+
@service.log_error(msg)
|
75
|
+
|
76
|
+
return [false, rc.message]
|
77
|
+
else
|
78
|
+
Log.debug LOG_COMP,
|
79
|
+
"Role #{name} : Chown success for VM #{vm_id}",
|
80
|
+
@service.id
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
[true, nil]
|
85
|
+
end
|
86
|
+
|
87
|
+
# Updates the role
|
88
|
+
# @param [Hash] template
|
89
|
+
# @return [nil, OpenNebula::Error] nil in case of success, Error
|
90
|
+
# otherwise
|
91
|
+
def update(template)
|
92
|
+
force = template['force'] == true
|
93
|
+
new_cardinality = template['cardinality']
|
94
|
+
|
95
|
+
return if new_cardinality.nil?
|
96
|
+
|
97
|
+
new_cardinality = new_cardinality.to_i
|
98
|
+
|
99
|
+
if !force
|
100
|
+
if new_cardinality < min_cardinality.to_i
|
101
|
+
return OpenNebula::Error.new(
|
102
|
+
"Minimum cardinality is #{min_cardinality}"
|
103
|
+
)
|
104
|
+
|
105
|
+
elsif !max_cardinality.nil? &&
|
106
|
+
new_cardinality > max_cardinality.to_i
|
107
|
+
return OpenNebula::Error.new(
|
108
|
+
"Maximum cardinality is #{max_cardinality}"
|
109
|
+
)
|
110
|
+
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
self.cardinality = new_cardinality
|
115
|
+
|
116
|
+
nil
|
117
|
+
end
|
118
|
+
|
119
|
+
########################################################################
|
120
|
+
# Scheduler
|
121
|
+
########################################################################
|
122
|
+
|
123
|
+
# Schedule the given action on all the VMs that belong to the Role
|
124
|
+
# @param [String] action one of the available SCHEDULE_ACTIONS
|
125
|
+
# @param [Integer] period
|
126
|
+
# @param [Integer] vm_per_period
|
127
|
+
# @param [String] action arguments
|
128
|
+
def batch_action(action, period, vms_per_period, args)
|
129
|
+
vms_id = []
|
130
|
+
error_msgs = []
|
131
|
+
nodes = @body['nodes']
|
132
|
+
now = Time.now.to_i
|
133
|
+
time_offset = 0
|
134
|
+
|
135
|
+
# if role is done, return error
|
136
|
+
if state == 5
|
137
|
+
return OpenNebula::Error.new("Role #{name} is in DONE state")
|
138
|
+
end
|
139
|
+
|
140
|
+
do_offset = !period.nil? && period.to_i > 0 &&
|
141
|
+
!vms_per_period.nil? && vms_per_period.to_i > 0
|
142
|
+
|
143
|
+
nodes.each_with_index do |node, index|
|
144
|
+
vm_id = node['deploy_id']
|
145
|
+
vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
|
146
|
+
@service.client)
|
147
|
+
|
148
|
+
if do_offset
|
149
|
+
offset = (index / vms_per_period.to_i).floor
|
150
|
+
time_offset = offset * period.to_i
|
151
|
+
end
|
152
|
+
|
153
|
+
tmp_str = 'SCHED_ACTION = ['
|
154
|
+
tmp_str << "ACTION = #{action},"
|
155
|
+
tmp_str << "ARGS = \"#{args}\"," if args
|
156
|
+
tmp_str << "TIME = #{now + time_offset}]"
|
157
|
+
|
158
|
+
rc = vm.sched_action_add(tmp_str)
|
159
|
+
if OpenNebula.is_error?(rc)
|
160
|
+
msg = "Role #{name} : VM #{vm_id} error scheduling "\
|
161
|
+
"action; #{rc.message}"
|
162
|
+
|
163
|
+
error_msgs << msg
|
164
|
+
|
165
|
+
Log.error LOG_COMP, msg, @service.id
|
166
|
+
|
167
|
+
@service.log_error(msg)
|
168
|
+
else
|
169
|
+
vms_id << vm.id
|
170
|
+
end
|
171
|
+
end
|
172
|
+
|
173
|
+
log_msg = "Action:#{action} scheduled on Role:#{name}"\
|
174
|
+
"VMs:#{vms_id.join(',')}"
|
175
|
+
|
176
|
+
Log.info LOG_COMP, log_msg, @service.id
|
177
|
+
|
178
|
+
return [true, log_msg] if error_msgs.empty?
|
179
|
+
|
180
|
+
error_msgs << log_msg
|
181
|
+
|
182
|
+
[false, error_msgs.join('\n')]
|
183
|
+
end
|
184
|
+
|
185
|
+
########################################################################
|
186
|
+
# Scalability
|
187
|
+
########################################################################
|
188
|
+
|
189
|
+
# Returns the role max cardinality
|
190
|
+
# @return [Integer,nil] the role cardinality or nil if it isn't defined
|
191
|
+
def max_cardinality
|
192
|
+
max = @body['max_vms']
|
193
|
+
|
194
|
+
return if max.nil?
|
195
|
+
|
196
|
+
max.to_i
|
197
|
+
end
|
198
|
+
|
199
|
+
# Returns the role min cardinality
|
200
|
+
# @return [Integer,nil] the role cardinality or nil if it isn't defined
|
201
|
+
def min_cardinality
|
202
|
+
min = @body['min_vms']
|
203
|
+
|
204
|
+
return if min.nil?
|
205
|
+
|
206
|
+
min.to_i
|
207
|
+
end
|
208
|
+
|
209
|
+
# Returns a positive, 0, or negative number of nodes to adjust,
|
210
|
+
# according to the elasticity and scheduled policies
|
211
|
+
# @return [Array<Integer>] positive, 0, or negative number of nodes to
|
212
|
+
# adjust, plus the cooldown period duration
|
213
|
+
def scale?(vm_pool)
|
214
|
+
elasticity_pol = @body['elasticity_policies']
|
215
|
+
scheduled_pol = @body['scheduled_policies']
|
216
|
+
|
217
|
+
elasticity_pol ||= []
|
218
|
+
scheduled_pol ||= []
|
219
|
+
|
220
|
+
scheduled_pol.each do |policy|
|
221
|
+
diff, cooldown_duration = scale_time?(policy)
|
222
|
+
|
223
|
+
return [diff, cooldown_duration] if diff != 0
|
224
|
+
end
|
225
|
+
|
226
|
+
elasticity_pol.each do |policy|
|
227
|
+
diff, cooldown_duration = scale_attributes?(policy, vm_pool)
|
228
|
+
|
229
|
+
next if diff == 0
|
230
|
+
|
231
|
+
cooldown_duration = @body['cooldown'] if cooldown_duration.nil?
|
232
|
+
cooldown_duration = @@default_cooldown if cooldown_duration.nil?
|
233
|
+
|
234
|
+
return [diff, cooldown_duration]
|
235
|
+
end
|
236
|
+
|
237
|
+
# Implicit rule that scales up to maintain the min_cardinality, with
|
238
|
+
# no cooldown period
|
239
|
+
if cardinality < min_cardinality.to_i
|
240
|
+
return [min_cardinality.to_i - cardinality, 0]
|
241
|
+
end
|
242
|
+
|
243
|
+
[0, 0]
|
244
|
+
end
|
245
|
+
|
246
|
+
def elasticity_policies
|
247
|
+
@body['elasticity_policies']
|
248
|
+
end
|
249
|
+
|
250
|
+
def update_elasticity_policies(new_policies)
|
251
|
+
@body['elasticity_policies'] = new_policies
|
252
|
+
end
|
253
|
+
|
254
|
+
def cooldown
|
255
|
+
@body['cooldown']
|
256
|
+
end
|
257
|
+
|
258
|
+
def update_cooldown(new_cooldown)
|
259
|
+
@body['cooldown'] = new_cooldown unless new_cooldown.nil?
|
260
|
+
end
|
261
|
+
|
262
|
+
def scale_way(way)
|
263
|
+
@body['scale_way'] = SCALE_WAYS[way]
|
264
|
+
end
|
265
|
+
|
266
|
+
def clean_scale_way
|
267
|
+
@body.delete('scale_way')
|
268
|
+
end
|
269
|
+
|
270
|
+
########################################################################
|
271
|
+
# Deployment
|
272
|
+
########################################################################
|
273
|
+
|
274
|
+
# Deploys all the nodes in this role
|
275
|
+
#
|
276
|
+
# @return [Array<true, nil>, Array<false, String>] true if all the VMs
|
277
|
+
# were created, false and the error reason if there was a problem
|
278
|
+
# creating the VMs
|
279
|
+
def deploy
|
280
|
+
deployed_nodes = []
|
281
|
+
n_nodes = cardinality - nodes.size
|
282
|
+
|
283
|
+
return [deployed_nodes, nil] if n_nodes == 0
|
284
|
+
|
285
|
+
template_id, template, extra_template = init_template_attributes
|
286
|
+
|
287
|
+
n_nodes.times do
|
288
|
+
vm_name = @@vm_name_template
|
289
|
+
.gsub('$SERVICE_ID', @service.id.to_s)
|
290
|
+
.gsub('$SERVICE_NAME', @service.name.to_s)
|
291
|
+
.gsub('$ROLE_NAME', name.to_s)
|
292
|
+
.gsub('$VM_NUMBER', @body['last_vmname'].to_s)
|
293
|
+
|
294
|
+
@body['last_vmname'] += 1
|
295
|
+
|
296
|
+
Log.debug(
|
297
|
+
LOG_COMP,
|
298
|
+
"Role #{name} : Instantiate template #{template_id}, name #{vm_name}",
|
299
|
+
@service.id
|
300
|
+
)
|
301
|
+
|
302
|
+
# Instantiate VM
|
303
|
+
vm_id = template.instantiate(vm_name, on_hold?, extra_template)
|
304
|
+
|
305
|
+
if OpenNebula.is_error?(vm_id)
|
306
|
+
msg = "Role #{name} : Instantiate failed for template " \
|
307
|
+
"#{template_id}; #{vm_id.message}"
|
308
|
+
|
309
|
+
Log.error(LOG_COMP, msg, @service.id)
|
310
|
+
|
311
|
+
@service.log_error(msg)
|
312
|
+
|
313
|
+
return [false, "Error instantiating VM Template #{template_id} in Role " \
|
314
|
+
"#{name}: #{vm_id.message}"]
|
315
|
+
end
|
316
|
+
|
317
|
+
Log.debug(
|
318
|
+
LOG_COMP,
|
319
|
+
"Role #{name} : Instantiate success, VM ID #{vm_id}",
|
320
|
+
@service.id
|
321
|
+
)
|
322
|
+
|
323
|
+
# Once deployed, save VM info in role node body
|
324
|
+
deployed_nodes << vm_id
|
325
|
+
fill_node_info(vm_id)
|
326
|
+
end
|
327
|
+
|
328
|
+
[deployed_nodes, nil]
|
329
|
+
end
|
330
|
+
|
331
|
+
########################################################################
|
332
|
+
# Recover
|
333
|
+
########################################################################
|
334
|
+
|
335
|
+
def recover_deploy(report)
|
336
|
+
nodes = @body['nodes']
|
337
|
+
deployed_nodes = []
|
338
|
+
|
339
|
+
nodes.each do |node|
|
340
|
+
vm_id = node['deploy_id']
|
341
|
+
|
342
|
+
vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
|
343
|
+
@service.client)
|
344
|
+
|
345
|
+
rc = vm.info
|
346
|
+
|
347
|
+
if OpenNebula.is_error?(rc)
|
348
|
+
msg = "Role #{name} : Retry failed for VM "\
|
349
|
+
"#{vm_id}; #{rc.message}"
|
350
|
+
Log.error LOG_COMP, msg, @service.id
|
351
|
+
|
352
|
+
next true
|
353
|
+
end
|
354
|
+
|
355
|
+
vm_state = vm.state
|
356
|
+
lcm_state = vm.lcm_state
|
357
|
+
|
358
|
+
# ACTIVE/RUNNING
|
359
|
+
next false if vm_state == 3 && lcm_state == 3 && !report
|
360
|
+
|
361
|
+
next true if vm_state == '6' # Delete DONE nodes
|
362
|
+
|
363
|
+
if Role.vm_failure?(vm_state, lcm_state)
|
364
|
+
rc = vm.recover(2)
|
365
|
+
|
366
|
+
if OpenNebula.is_error?(rc)
|
367
|
+
msg = "Role #{name} : Retry failed for VM "\
|
368
|
+
"#{vm_id}; #{rc.message}"
|
369
|
+
|
370
|
+
Log.error LOG_COMP, msg, @service.id
|
371
|
+
@service.log_error(msg)
|
372
|
+
else
|
373
|
+
deployed_nodes << vm_id
|
374
|
+
end
|
375
|
+
else
|
376
|
+
vm.resume
|
377
|
+
|
378
|
+
deployed_nodes << vm_id
|
379
|
+
end
|
380
|
+
end
|
381
|
+
|
382
|
+
rc = deploy
|
383
|
+
|
384
|
+
unless rc[0]
|
385
|
+
return [false, "Error deploying nodes for role `#{name}`"]
|
386
|
+
end
|
387
|
+
|
388
|
+
deployed_nodes.concat(rc[0])
|
389
|
+
|
390
|
+
deployed_nodes
|
391
|
+
end
|
392
|
+
|
393
|
+
def recover_undeploy
|
394
|
+
undeployed_nodes = []
|
395
|
+
|
396
|
+
rc = shutdown(true)
|
397
|
+
|
398
|
+
return false unless rc[0]
|
399
|
+
|
400
|
+
undeployed_nodes.concat(rc[0]) if rc[1].nil?
|
401
|
+
|
402
|
+
undeployed_nodes
|
403
|
+
end
|
404
|
+
|
405
|
+
def recover_scale(report)
|
406
|
+
rc = nil
|
407
|
+
|
408
|
+
if @body['scale_way'] == SCALE_WAYS['UP']
|
409
|
+
rc = [recover_deploy(report), true]
|
410
|
+
elsif @body['scale_way'] == SCALE_WAYS['DOWN']
|
411
|
+
rc = [recover_undeploy, false]
|
412
|
+
end
|
413
|
+
|
414
|
+
rc
|
415
|
+
end
|
416
|
+
|
417
|
+
########################################################################
|
418
|
+
# Helpers
|
419
|
+
########################################################################
|
420
|
+
|
421
|
+
private
|
422
|
+
|
423
|
+
# Shuts down all the given nodes
|
424
|
+
# @param scale_down [true,false] True to set the 'disposed' node flag
|
425
|
+
def shutdown_nodes(nodes, n_nodes, recover)
|
426
|
+
undeployed_nodes = []
|
427
|
+
|
428
|
+
action = @body['shutdown_action']
|
429
|
+
|
430
|
+
if action.nil?
|
431
|
+
action = @service.shutdown_action
|
432
|
+
end
|
433
|
+
|
434
|
+
if action.nil?
|
435
|
+
action = @@default_shutdown
|
436
|
+
end
|
437
|
+
|
438
|
+
nodes[0..n_nodes - 1].each do |node|
|
439
|
+
vm_id = node['deploy_id']
|
440
|
+
|
441
|
+
Log.debug(LOG_COMP,
|
442
|
+
"Role #{name} : Terminating VM #{vm_id}",
|
443
|
+
@service.id)
|
444
|
+
|
445
|
+
vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
|
446
|
+
@service.client)
|
447
|
+
|
448
|
+
vm_state = nil
|
449
|
+
lcm_state = nil
|
450
|
+
|
451
|
+
if recover
|
452
|
+
vm.info
|
453
|
+
|
454
|
+
vm_state = vm.state
|
455
|
+
lcm_state = vm.lcm_state
|
456
|
+
end
|
457
|
+
|
458
|
+
if recover && Role.vm_failure?(vm_state, lcm_state)
|
459
|
+
rc = vm.recover(2)
|
460
|
+
elsif action == 'terminate-hard'
|
461
|
+
rc = vm.terminate(true)
|
462
|
+
else
|
463
|
+
rc = vm.terminate
|
464
|
+
end
|
465
|
+
|
466
|
+
if OpenNebula.is_error?(rc)
|
467
|
+
msg = "Role #{name} : Terminate failed for VM #{vm_id}, " \
|
468
|
+
"will perform a Delete; #{rc.message}"
|
469
|
+
|
470
|
+
Log.error LOG_COMP, msg, @service.id
|
471
|
+
@service.log_error(msg)
|
472
|
+
|
473
|
+
if action != 'terminate-hard'
|
474
|
+
rc = vm.terminate(true)
|
475
|
+
end
|
476
|
+
|
477
|
+
if OpenNebula.is_error?(rc)
|
478
|
+
rc = vm.delete
|
479
|
+
end
|
480
|
+
|
481
|
+
if OpenNebula.is_error?(rc)
|
482
|
+
msg = "Role #{name} : Delete failed for VM #{vm_id}; " \
|
483
|
+
"#{rc.message}"
|
484
|
+
|
485
|
+
Log.error LOG_COMP, msg, @service.id
|
486
|
+
@service.log_error(msg)
|
487
|
+
else
|
488
|
+
Log.debug(LOG_COMP,
|
489
|
+
"Role #{name} : Delete success for VM " \
|
490
|
+
"#{vm_id}",
|
491
|
+
@service.id)
|
492
|
+
|
493
|
+
undeployed_nodes << vm_id
|
494
|
+
end
|
495
|
+
else
|
496
|
+
Log.debug(LOG_COMP,
|
497
|
+
"Role #{name}: Terminate success for VM #{vm_id}",
|
498
|
+
@service.id)
|
499
|
+
undeployed_nodes << vm_id
|
500
|
+
end
|
501
|
+
end
|
502
|
+
|
503
|
+
# Only considering success if all the nodes were undeployed or
|
504
|
+
# there are no nodes to undeploy
|
505
|
+
success = (undeployed_nodes.size == n_nodes) || (n_nodes < 0)
|
506
|
+
|
507
|
+
[success, undeployed_nodes]
|
508
|
+
end
|
509
|
+
|
510
|
+
# Returns a positive, 0, or negative number of nodes to adjust,
|
511
|
+
# according to a SCHEDULED type policy
|
512
|
+
# @param [Hash] A SCHEDULED type policy
|
513
|
+
# @return [Integer] positive, 0, or negative number of nodes to adjust
|
514
|
+
def scale_time?(elasticity_pol)
|
515
|
+
now = Time.now.to_i
|
516
|
+
last_eval = elasticity_pol['last_eval'].to_i
|
517
|
+
|
518
|
+
elasticity_pol['last_eval'] = now
|
519
|
+
|
520
|
+
# If this is the first time this is evaluated, ignore it.
|
521
|
+
# We don't want to execute actions planned in the past when the
|
522
|
+
# server starts.
|
523
|
+
|
524
|
+
return 0 if last_eval == 0
|
525
|
+
|
526
|
+
start_time = elasticity_pol['start_time']
|
527
|
+
target_vms = elasticity_pol['adjust']
|
528
|
+
|
529
|
+
# TODO: error msg
|
530
|
+
return 0 if target_vms.nil?
|
531
|
+
|
532
|
+
if !(start_time.nil? || start_time.empty?)
|
533
|
+
begin
|
534
|
+
if !start_time.match(/^\d+$/)
|
535
|
+
start_time = Time.parse(start_time).to_i
|
536
|
+
else
|
537
|
+
start_time = start_time.to_i
|
538
|
+
end
|
539
|
+
rescue ArgumentError
|
540
|
+
# TODO: error msg
|
541
|
+
return 0
|
542
|
+
end
|
543
|
+
else
|
544
|
+
recurrence = elasticity_pol['recurrence']
|
545
|
+
|
546
|
+
# TODO: error msg
|
547
|
+
return 0 if recurrence.nil? || recurrence.empty?
|
548
|
+
|
549
|
+
begin
|
550
|
+
cron_parser = CronParser.new(recurrence)
|
551
|
+
|
552
|
+
# This returns the next planned time, starting from the last
|
553
|
+
# step
|
554
|
+
start_time = cron_parser.next(Time.at(last_eval)).to_i
|
555
|
+
rescue StandardError
|
556
|
+
# TODO: error msg bad format
|
557
|
+
return 0
|
558
|
+
end
|
559
|
+
end
|
560
|
+
|
561
|
+
# Only actions planned between last step and this one are triggered
|
562
|
+
if start_time > last_eval && start_time <= now
|
563
|
+
Log.debug LOG_COMP,
|
564
|
+
"Role #{name} : scheduled scalability for " \
|
565
|
+
"#{Time.at(start_time)} triggered", @service.id
|
566
|
+
|
567
|
+
new_cardinality = calculate_new_cardinality(elasticity_pol)
|
568
|
+
|
569
|
+
return [new_cardinality - cardinality,
|
570
|
+
elasticity_pol['cooldown']]
|
571
|
+
end
|
572
|
+
|
573
|
+
[0, elasticity_pol['cooldown']]
|
574
|
+
end
|
575
|
+
|
576
|
+
# Returns a positive, 0, or negative number of nodes to adjust,
|
577
|
+
# according to a policy based on attributes
|
578
|
+
# @param [Hash] A policy based on attributes
|
579
|
+
# @return [Array<Integer>] positive, 0, or negative number of nodes to
|
580
|
+
# adjust, plus the cooldown period duration
|
581
|
+
def scale_attributes?(elasticity_pol, vm_pool)
|
582
|
+
now = Time.now.to_i
|
583
|
+
|
584
|
+
# TODO: enforce true_up_evals type in ServiceTemplate::ROLE_SCHEMA ?
|
585
|
+
|
586
|
+
period_duration = elasticity_pol['period'].to_i
|
587
|
+
period_number = elasticity_pol['period_number'].to_i
|
588
|
+
last_eval = elasticity_pol['last_eval'].to_i
|
589
|
+
true_evals = elasticity_pol['true_evals'].to_i
|
590
|
+
expression = elasticity_pol['expression']
|
591
|
+
|
592
|
+
if !last_eval.nil? && now < (last_eval + period_duration)
|
593
|
+
return [0, 0]
|
594
|
+
end
|
595
|
+
|
596
|
+
elasticity_pol['last_eval'] = now
|
597
|
+
|
598
|
+
new_cardinality = cardinality
|
599
|
+
new_evals = 0
|
600
|
+
|
601
|
+
exp_value, exp_st = scale_rule(expression, vm_pool)
|
602
|
+
|
603
|
+
if exp_value
|
604
|
+
new_evals = true_evals + 1
|
605
|
+
new_evals = period_number if new_evals > period_number
|
606
|
+
|
607
|
+
if new_evals >= period_number
|
608
|
+
Log.debug LOG_COMP,
|
609
|
+
"Role #{name} : elasticy policy #{exp_st} "\
|
610
|
+
'triggered', @service.id
|
611
|
+
|
612
|
+
new_cardinality = calculate_new_cardinality(elasticity_pol)
|
613
|
+
end
|
614
|
+
end
|
615
|
+
|
616
|
+
elasticity_pol['true_evals'] = new_evals
|
617
|
+
elasticity_pol['expression_evaluated'] = exp_st
|
618
|
+
|
619
|
+
[new_cardinality - cardinality, elasticity_pol['cooldown']]
|
620
|
+
end
|
621
|
+
|
622
|
+
# Returns true if the scalability rule is triggered
|
623
|
+
# @return true if the scalability rule is triggered
|
624
|
+
def scale_rule(elas_expr, vm_pool)
|
625
|
+
parser = ElasticityGrammarParser.new
|
626
|
+
|
627
|
+
if elas_expr.nil? || elas_expr.empty?
|
628
|
+
return false
|
629
|
+
end
|
630
|
+
|
631
|
+
treetop = parser.parse(elas_expr)
|
632
|
+
|
633
|
+
if treetop.nil?
|
634
|
+
return [false,
|
635
|
+
"Parse error. '#{elas_expr}': #{parser.failure_reason}"]
|
636
|
+
end
|
637
|
+
|
638
|
+
val, st = treetop.result(self, vm_pool)
|
639
|
+
|
640
|
+
[val, st]
|
641
|
+
end
|
642
|
+
|
643
|
+
def calculate_new_cardinality(elasticity_pol)
|
644
|
+
type = elasticity_pol['type']
|
645
|
+
adjust = elasticity_pol['adjust'].to_i
|
646
|
+
|
647
|
+
# Min is a hard limit, if the current cardinality + adjustment does
|
648
|
+
# not reach it, the difference is added
|
649
|
+
|
650
|
+
max = [cardinality, max_cardinality.to_i].max
|
651
|
+
# min = [cardinality(), min_cardinality.to_i].min()
|
652
|
+
min = min_cardinality.to_i
|
653
|
+
|
654
|
+
case type.upcase
|
655
|
+
when 'CHANGE'
|
656
|
+
new_cardinality = cardinality + adjust
|
657
|
+
when 'PERCENTAGE_CHANGE'
|
658
|
+
min_adjust_step = elasticity_pol['min_adjust_step'].to_i
|
659
|
+
|
660
|
+
change = cardinality * adjust / 100.0
|
661
|
+
|
662
|
+
change > 0 ? sign = 1 : sign = -1
|
663
|
+
change = change.abs
|
664
|
+
|
665
|
+
if change < 1
|
666
|
+
change = 1
|
667
|
+
else
|
668
|
+
change = change.to_i
|
669
|
+
end
|
670
|
+
|
671
|
+
change = sign * [change, min_adjust_step].max
|
672
|
+
|
673
|
+
new_cardinality = cardinality + change
|
674
|
+
|
675
|
+
when 'CARDINALITY'
|
676
|
+
new_cardinality = adjust
|
677
|
+
else
|
678
|
+
Log.error(
|
679
|
+
LOG_COMP,
|
680
|
+
"Error calculating new cardinality for type #{type}",
|
681
|
+
service.id
|
682
|
+
)
|
683
|
+
|
684
|
+
return cardinality
|
685
|
+
end
|
686
|
+
|
687
|
+
# The cardinality can be forced to be outside the min,max
|
688
|
+
# range. If that is the case, the scale up/down will not
|
689
|
+
# move further outside the range. It will move towards the
|
690
|
+
# range with the adjustement set, instead of jumping the
|
691
|
+
# difference
|
692
|
+
if adjust > 0
|
693
|
+
new_cardinality = max if new_cardinality > max
|
694
|
+
elsif adjust < 0
|
695
|
+
new_cardinality = min if new_cardinality < min
|
696
|
+
end
|
697
|
+
|
698
|
+
new_cardinality
|
699
|
+
end
|
700
|
+
|
701
|
+
end
|
702
|
+
|
703
|
+
end
|