opennebula 6.10.3 → 6.99.85.pre

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. checksums.yaml +4 -4
  2. data/lib/cloud/CloudClient.rb +3 -3
  3. data/lib/models/role.rb +349 -823
  4. data/lib/models/service.rb +156 -80
  5. data/lib/models/vmrole.rb +703 -0
  6. data/lib/models/vrrole.rb +284 -0
  7. data/lib/models.rb +3 -1
  8. data/lib/opennebula/acl.rb +1 -1
  9. data/lib/opennebula/acl_pool.rb +1 -1
  10. data/lib/opennebula/backupjob.rb +1 -1
  11. data/lib/opennebula/backupjob_pool.rb +1 -1
  12. data/lib/opennebula/client.rb +1 -1
  13. data/lib/opennebula/cluster.rb +45 -2
  14. data/lib/opennebula/cluster_pool.rb +1 -1
  15. data/lib/opennebula/datastore.rb +1 -1
  16. data/lib/opennebula/datastore_pool.rb +1 -1
  17. data/lib/opennebula/document.rb +1 -1
  18. data/lib/opennebula/document_json.rb +1 -1
  19. data/lib/opennebula/document_pool.rb +1 -1
  20. data/lib/opennebula/document_pool_json.rb +1 -1
  21. data/lib/opennebula/error.rb +1 -1
  22. data/lib/opennebula/flow/grammar.rb +1 -1
  23. data/lib/opennebula/flow/service_pool.rb +1 -1
  24. data/lib/opennebula/flow/service_template.rb +353 -97
  25. data/lib/opennebula/flow/service_template_ext.rb +3 -3
  26. data/lib/opennebula/flow/service_template_pool.rb +1 -1
  27. data/lib/opennebula/flow/validator.rb +458 -410
  28. data/lib/opennebula/flow.rb +1 -1
  29. data/lib/opennebula/group.rb +1 -1
  30. data/lib/opennebula/group_pool.rb +1 -1
  31. data/lib/opennebula/hook.rb +1 -1
  32. data/lib/opennebula/hook_log.rb +1 -1
  33. data/lib/opennebula/hook_pool.rb +1 -1
  34. data/lib/opennebula/host.rb +1 -60
  35. data/lib/opennebula/host_pool.rb +1 -1
  36. data/lib/opennebula/image.rb +1 -1
  37. data/lib/opennebula/image_pool.rb +1 -1
  38. data/lib/opennebula/ldap_auth.rb +1 -1
  39. data/lib/opennebula/ldap_auth_spec.rb +1 -1
  40. data/lib/opennebula/lockable_ext.rb +1 -1
  41. data/lib/opennebula/marketplace.rb +1 -1
  42. data/lib/opennebula/marketplace_pool.rb +1 -1
  43. data/lib/opennebula/marketplaceapp.rb +1 -1
  44. data/lib/opennebula/marketplaceapp_ext.rb +14 -211
  45. data/lib/opennebula/marketplaceapp_pool.rb +1 -1
  46. data/lib/opennebula/oneflow_client.rb +11 -9
  47. data/lib/opennebula/pool.rb +1 -1
  48. data/lib/opennebula/pool_element.rb +1 -1
  49. data/lib/opennebula/security_group.rb +1 -1
  50. data/lib/opennebula/security_group_pool.rb +1 -1
  51. data/lib/opennebula/server_cipher_auth.rb +1 -1
  52. data/lib/opennebula/server_x509_auth.rb +1 -1
  53. data/lib/opennebula/ssh_auth.rb +1 -1
  54. data/lib/opennebula/system.rb +1 -1
  55. data/lib/opennebula/template.rb +1 -1
  56. data/lib/opennebula/template_ext.rb +1 -1
  57. data/lib/opennebula/template_pool.rb +1 -1
  58. data/lib/opennebula/user.rb +1 -1
  59. data/lib/opennebula/user_pool.rb +1 -1
  60. data/lib/opennebula/utils.rb +2 -2
  61. data/lib/opennebula/vdc.rb +1 -1
  62. data/lib/opennebula/vdc_pool.rb +1 -1
  63. data/lib/opennebula/virtual_machine.rb +3 -12
  64. data/lib/opennebula/virtual_machine_ext.rb +2 -31
  65. data/lib/opennebula/virtual_machine_pool.rb +1 -1
  66. data/lib/opennebula/virtual_network.rb +1 -1
  67. data/lib/opennebula/virtual_network_pool.rb +1 -1
  68. data/lib/opennebula/virtual_router.rb +1 -1
  69. data/lib/opennebula/virtual_router_pool.rb +1 -1
  70. data/lib/opennebula/vm_group.rb +1 -1
  71. data/lib/opennebula/vm_group_pool.rb +1 -1
  72. data/lib/opennebula/vntemplate.rb +1 -1
  73. data/lib/opennebula/vntemplate_pool.rb +1 -1
  74. data/lib/opennebula/wait_ext.rb +1 -1
  75. data/lib/opennebula/x509_auth.rb +1 -1
  76. data/lib/opennebula/xml_element.rb +2 -2
  77. data/lib/opennebula/xml_pool.rb +1 -1
  78. data/lib/opennebula/xml_utils.rb +1 -1
  79. data/lib/opennebula/zone.rb +1 -1
  80. data/lib/opennebula/zone_pool.rb +1 -1
  81. data/lib/opennebula.rb +2 -2
  82. metadata +6 -67
  83. data/lib/ActionManager.rb +0 -280
  84. data/lib/CommandManager.rb +0 -328
  85. data/lib/DriverExecHelper.rb +0 -213
  86. data/lib/HostSyncManager.rb +0 -111
  87. data/lib/OpenNebulaDriver.rb +0 -223
  88. data/lib/VirtualMachineDriver.rb +0 -404
  89. data/lib/datacenter.rb +0 -1319
  90. data/lib/datastore.rb +0 -1049
  91. data/lib/distributed_firewall.rb +0 -293
  92. data/lib/file_helper.rb +0 -374
  93. data/lib/host.rb +0 -1518
  94. data/lib/logical_port.rb +0 -50
  95. data/lib/logical_switch.rb +0 -77
  96. data/lib/memoize.rb +0 -74
  97. data/lib/network.rb +0 -705
  98. data/lib/nsx_client.rb +0 -157
  99. data/lib/nsx_component.rb +0 -28
  100. data/lib/nsx_constants.rb +0 -162
  101. data/lib/nsx_driver.rb +0 -91
  102. data/lib/nsx_error.rb +0 -77
  103. data/lib/nsx_rule.rb +0 -206
  104. data/lib/nsxt_client.rb +0 -189
  105. data/lib/nsxt_dfw.rb +0 -196
  106. data/lib/nsxt_logical_port.rb +0 -94
  107. data/lib/nsxt_rule.rb +0 -188
  108. data/lib/nsxt_tz.rb +0 -38
  109. data/lib/nsxv_client.rb +0 -189
  110. data/lib/nsxv_dfw.rb +0 -202
  111. data/lib/nsxv_logical_port.rb +0 -107
  112. data/lib/nsxv_rule.rb +0 -172
  113. data/lib/nsxv_tz.rb +0 -41
  114. data/lib/opaque_network.rb +0 -134
  115. data/lib/rest_client.rb +0 -191
  116. data/lib/scripts_common.rb +0 -176
  117. data/lib/transport_zone.rb +0 -43
  118. data/lib/vcenter_driver.rb +0 -152
  119. data/lib/vcenter_importer.rb +0 -626
  120. data/lib/vi_client.rb +0 -273
  121. data/lib/vi_helper.rb +0 -328
  122. data/lib/virtual_machine.rb +0 -3574
  123. data/lib/virtual_wire.rb +0 -158
  124. data/lib/vm_device.rb +0 -80
  125. data/lib/vm_disk.rb +0 -202
  126. data/lib/vm_folder.rb +0 -69
  127. data/lib/vm_helper.rb +0 -30
  128. data/lib/vm_monitor.rb +0 -305
  129. data/lib/vm_nic.rb +0 -70
  130. data/lib/vm_template.rb +0 -2112
  131. data/lib/vmm_importer.rb +0 -165
data/lib/models/role.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # -------------------------------------------------------------------------- #
2
- # Copyright 2002-2024, OpenNebula Project, OpenNebula Systems #
2
+ # Copyright 2002-2025, OpenNebula Project, OpenNebula Systems #
3
3
  # #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
5
  # not use this file except in compliance with the License. You may obtain #
@@ -26,34 +26,12 @@ end
26
26
 
27
27
  module OpenNebula
28
28
 
29
- # Service Role class
29
+ # Service Role Class (Generic Role type)
30
30
  class Role
31
31
 
32
+ LOG_COMP = 'ROL'
32
33
  attr_reader :service
33
34
 
34
- # Actions that can be performed on the VMs of a given Role
35
- SCHEDULE_ACTIONS = [
36
- 'terminate',
37
- 'terminate-hard',
38
- 'undeploy',
39
- 'undeploy-hard',
40
- 'hold',
41
- 'release',
42
- 'stop',
43
- 'suspend',
44
- 'resume',
45
- 'reboot',
46
- 'reboot-hard',
47
- 'poweroff',
48
- 'poweroff-hard',
49
- 'snapshot-create',
50
- 'snapshot-revert',
51
- 'snapshot-delete',
52
- 'disk-snapshot-create',
53
- 'disk-snapshot-revert',
54
- 'disk-snapshot-delete'
55
- ]
56
-
57
35
  STATE = {
58
36
  'PENDING' => 0,
59
37
  'DEPLOYING' => 1,
@@ -84,23 +62,7 @@ module OpenNebula
84
62
  'HOLD'
85
63
  ]
86
64
 
87
- RECOVER_DEPLOY_STATES = [
88
- 'FAILED_DEPLOYING',
89
- 'DEPLOYING',
90
- 'PENDING'
91
- ]
92
-
93
- RECOVER_UNDEPLOY_STATES = [
94
- 'FAILED_UNDEPLOYING',
95
- 'UNDEPLOYING'
96
- ]
97
-
98
- RECOVER_SCALE_STATES = [
99
- 'FAILED_SCALING',
100
- 'SCALING'
101
- ]
102
-
103
- VM_FAILURE_STATES = [
65
+ FAILURE_STATES = [
104
66
  'BOOT_FAILURE',
105
67
  'BOOT_MIGRATE_FAILURE',
106
68
  'PROLOG_MIGRATE_FAILURE',
@@ -117,151 +79,155 @@ module OpenNebula
117
79
  'PROLOG_UNDEPLOY_FAILURE'
118
80
  ]
119
81
 
82
+ RECOVER_DEPLOY_STATES = [
83
+ 'FAILED_DEPLOYING',
84
+ 'DEPLOYING',
85
+ 'PENDING'
86
+ ]
87
+
88
+ RECOVER_UNDEPLOY_STATES = [
89
+ 'FAILED_UNDEPLOYING',
90
+ 'UNDEPLOYING'
91
+ ]
92
+
93
+ RECOVER_SCALE_STATES = [
94
+ 'FAILED_SCALING',
95
+ 'SCALING'
96
+ ]
97
+
120
98
  SCALE_WAYS = {
121
99
  'UP' => 0,
122
100
  'DOWN' => 1
123
101
  }
124
102
 
103
+ # Actions that can be performed on the VMs of a given Role
104
+ SCHEDULE_ACTIONS = [
105
+ 'terminate',
106
+ 'terminate-hard',
107
+ 'undeploy',
108
+ 'undeploy-hard',
109
+ 'hold',
110
+ 'release',
111
+ 'stop',
112
+ 'suspend',
113
+ 'resume',
114
+ 'reboot',
115
+ 'reboot-hard',
116
+ 'poweroff',
117
+ 'poweroff-hard',
118
+ 'snapshot-create',
119
+ 'snapshot-revert',
120
+ 'snapshot-delete',
121
+ 'disk-snapshot-create',
122
+ 'disk-snapshot-revert',
123
+ 'disk-snapshot-delete'
124
+ ]
125
+
126
+ # Information to save in document
127
+ VM_INFO = ['ID', 'UID', 'GID', 'UNAME', 'GNAME', 'NAME']
128
+
125
129
  # List of attributes that can't be changed in update operation
126
- # cardinality: this is internal information managed by OneFlow server
127
130
  # last_vmname: this is internal information managed by OneFlow server
128
131
  # nodes: this is internal information managed by OneFlow server
129
132
  # parents: this has only sense in deploy operation
130
133
  # state: this is internal information managed by OneFlow server
131
- # vm_template: this will affect scale operation
134
+ # template_id: this will affect scale operation
135
+ # cardinality: this is internal information managed by OneFlow server
132
136
  IMMUTABLE_ATTRS = [
133
137
  'cardinality',
134
138
  'last_vmname',
135
139
  'nodes',
136
140
  'parents',
137
141
  'state',
138
- 'vm_template'
142
+ 'template_id'
139
143
  ]
140
144
 
141
- # VM information to save in document
142
- VM_INFO = ['ID', 'UID', 'GID', 'UNAME', 'GNAME', 'NAME']
143
-
144
- LOG_COMP = 'ROL'
145
+ class << self
145
146
 
146
- def initialize(body, service)
147
- @body = body
148
- @service = service
147
+ # Return a role object based on type attribute of the role template
148
+ # @param [Hash] Role template in Hash format
149
+ # @return [Role] Role object type
150
+ def for(body, service)
151
+ role_type = body.fetch('type', 'vm')
149
152
 
150
- @body['cooldown'] = @@default_cooldown if @body['cooldown'].nil?
151
- @body['nodes'] ||= []
152
- @body['on_hold'] = false if @body['on_hold'].nil?
153
- end
153
+ case role_type.downcase
154
+ when 'vm'
155
+ VMRole.new(body, service)
156
+ when 'vr'
157
+ VRRole.new(body, service)
158
+ else
159
+ raise "Unsupported role type: #{role_type}"
160
+ end
161
+ end
154
162
 
155
- def name
156
- @body['name']
157
- end
163
+ # Returns true if the VM state is failure
164
+ # @param [Integer] vm_state VM state
165
+ # @param [Integer] lcm_state VM LCM state
166
+ # @return [true,false] True if the lcm state is one of *_FAILURE
167
+ def vm_failure?(vm_state, lcm_state)
168
+ vm_state_str = VirtualMachine::VM_STATE[vm_state.to_i]
169
+ lcm_state_str = VirtualMachine::LCM_STATE[lcm_state.to_i]
158
170
 
159
- # Returns the role state
160
- # @return [Integer] the role state
161
- def state
162
- @body['state'].to_i
163
- end
171
+ if vm_state_str == 'ACTIVE' &&
172
+ FAILURE_STATES.include?(lcm_state_str)
173
+ return true
174
+ end
164
175
 
165
- def can_recover_deploy?
166
- if state != STATE['PENDING']
167
- return RECOVER_DEPLOY_STATES.include? STATE_STR[state]
176
+ false
168
177
  end
169
178
 
170
- parents.each do |parent|
171
- next unless @service.roles[parent]
172
-
173
- return false if @service.roles[parent].state != STATE['RUNNING']
179
+ # rubocop:disable Style/ClassVars
180
+ def init_default_cooldown(default_cooldown)
181
+ @@default_cooldown = default_cooldown
174
182
  end
175
183
 
176
- true
177
- end
178
-
179
- def can_recover_undeploy?
180
- if !RECOVER_UNDEPLOY_STATES.include? STATE_STR[state]
181
- # TODO, check childs if !empty? check if can be undeployed
182
- @service.roles.each do |role_name, role|
183
- next if role_name == name
184
-
185
- if role.parents.include?(name) &&
186
- role.state != STATE['DONE']
187
- return false
188
- end
189
- end
184
+ def init_default_shutdown(shutdown_action)
185
+ @@default_shutdown = shutdown_action
190
186
  end
191
187
 
192
- true
193
- end
194
-
195
- def can_recover_scale?
196
- return false unless RECOVER_SCALE_STATES.include? STATE_STR[state]
197
-
198
- true
199
- end
200
-
201
- def can_release?
202
- state == STATE['HOLD']
203
- end
204
-
205
- # Returns the role parents
206
- # @return [Array] the role parents
207
- def parents
208
- @body['parents'] || []
209
- end
210
-
211
- def any_parent_on_hold?
212
- parents.each do |parent|
213
- next unless @service.roles[parent]
214
-
215
- return true if @service.roles[parent].on_hold?
188
+ def init_force_deletion(force_deletion)
189
+ @@force_deletion = force_deletion
216
190
  end
217
- false
218
- end
219
-
220
- # Returns the role cardinality
221
- # @return [Integer] the role cardinality
222
- def cardinality
223
- @body['cardinality'].to_i
224
- end
225
191
 
226
- # Sets a new cardinality for this role
227
- # @param [Integer] the new cardinality
228
- # rubocop:disable Naming/AccessorMethodName
229
- def set_cardinality(target_cardinality)
230
- # rubocop:enable Naming/AccessorMethodName
231
- if target_cardinality > cardinality
232
- dir = 'up'
233
- else
234
- dir = 'down'
192
+ def init_default_vm_name_template(vm_name_template)
193
+ @@vm_name_template = vm_name_template
235
194
  end
236
195
 
237
- msg = "Role #{name} scaling #{dir} from #{cardinality} to " \
238
- "#{target_cardinality} nodes"
196
+ def init_default_vr_name_template(vr_name_template)
197
+ @@vr_name_template = vr_name_template
198
+ end
199
+ # rubocop:enable Style/ClassVars
239
200
 
240
- Log.info LOG_COMP, msg, @service.id
201
+ end
241
202
 
242
- @service.log_info(msg)
203
+ def initialize(body, service)
204
+ @body = body
205
+ @service = service
243
206
 
244
- @body['cardinality'] = target_cardinality.to_i
207
+ @body['nodes'] ||= []
208
+ @body['on_hold'] = false if @body['on_hold'].nil?
245
209
  end
246
210
 
247
- # Returns the role max cardinality
248
- # @return [Integer,nil] the role cardinality or nil if it isn't defined
249
- def max_cardinality
250
- max = @body['max_vms']
251
-
252
- return if max.nil?
211
+ def name
212
+ @body['name']
213
+ end
253
214
 
254
- max.to_i
215
+ def state
216
+ @body['state']
255
217
  end
256
218
 
257
- # Returns the role min cardinality
258
- # @return [Integer,nil] the role cardinality or nil if it isn't defined
259
- def min_cardinality
260
- min = @body['min_vms']
219
+ # Sets a new state
220
+ # @param [Integer] the new state
221
+ def state=(state)
222
+ return if state < 0 || state > STATE_STR.size
261
223
 
262
- return if min.nil?
224
+ @body['state'] = state.to_i
263
225
 
264
- min.to_i
226
+ Log.info(
227
+ LOG_COMP,
228
+ "Role #{name} new state: #{STATE_STR[state]}",
229
+ @service.id
230
+ )
265
231
  end
266
232
 
267
233
  # Returns the string representation of the service state
@@ -276,6 +242,10 @@ module OpenNebula
276
242
  @body['nodes']
277
243
  end
278
244
 
245
+ def nodes_ids
246
+ @body['nodes'].map {|node| node['deploy_id'] }
247
+ end
248
+
279
249
  def info_nodes(vm_pool)
280
250
  ret = []
281
251
 
@@ -307,305 +277,75 @@ module OpenNebula
307
277
  ret
308
278
  end
309
279
 
310
- def nodes_ids
311
- @body['nodes'].map {|node| node['deploy_id'] }
312
- end
313
-
314
- def elasticity_policies
315
- @body['elasticity_policies']
316
- end
317
-
318
- def update_elasticity_policies(new_policies)
319
- @body['elasticity_policies'] = new_policies
320
- end
321
-
322
- def scheduled_policies
323
- @body['scheduled_policies']
324
- end
325
-
326
- def update_scheduled_policies(new_policies)
327
- @body['scheduled_policies'] = new_policies
328
- end
329
-
330
- def cooldown
331
- @body['cooldown']
280
+ # Returns the role parents
281
+ # @return [Array] the role parents
282
+ def parents
283
+ @body['parents'] || []
332
284
  end
333
285
 
334
- def update_cooldown(new_cooldown)
335
- @body['cooldown'] = new_cooldown unless new_cooldown.nil?
286
+ # Returns the role cardinality
287
+ # @return [Integer] the role cardinality
288
+ def cardinality
289
+ @body['cardinality'].to_i
336
290
  end
337
291
 
338
- # Sets a new state
339
- # @param [Integer] the new state
340
- # @return [true, false] true if the value was changed
341
- # rubocop:disable Naming/AccessorMethodName
342
- def set_state(state)
343
- # rubocop:enable Naming/AccessorMethodName
344
- if state < 0 || state > STATE_STR.size
345
- return false
346
- end
347
-
348
- @body['state'] = state.to_i
349
-
350
- if state == STATE['SCALING']
351
-
352
- elasticity_pol = @body['elasticity_policies']
353
-
354
- if !elasticity_pol.nil?
355
- elasticity_pol.each do |policy|
356
- policy.delete('true_evals')
357
- end
358
- end
292
+ # Sets a new cardinality for this role
293
+ # @param [Integer] the new cardinality
294
+ def cardinality=(target_cardinality)
295
+ if target_cardinality > cardinality
296
+ dir = 'up'
297
+ else
298
+ dir = 'down'
359
299
  end
360
300
 
361
- Log.info LOG_COMP,
362
- "Role #{name} new state: #{STATE_STR[state]}",
363
- @service.id
301
+ msg = "Role #{name} scaling #{dir} from #{cardinality} to " \
302
+ "#{target_cardinality} nodes"
364
303
 
365
- true
366
- end
304
+ Log.info(LOG_COMP, msg, @service.id)
305
+ @service.log_info(msg)
367
306
 
368
- def scale_way(way)
369
- @body['scale_way'] = SCALE_WAYS[way]
307
+ @body['cardinality'] = target_cardinality.to_i
370
308
  end
371
309
 
372
- def clean_scale_way
373
- @body.delete('scale_way')
310
+ # Change the `on_hold` option value
311
+ def on_hold=(on_hold)
312
+ @body['on_hold'] = on_hold
374
313
  end
375
314
 
376
- # Returns the on_hold role option
377
- # @return [true, false] true if the on_hold option is enabled
315
+ # Returns the `on_hold` role option
316
+ # @return [true, false] `true` if the `on_hold` option is enabled
378
317
  def on_hold?
379
318
  @body['on_hold']
380
319
  end
381
320
 
382
- # Returns the on_hold service option
383
- # @return [true, false] true if the on_hold option is enabled
321
+ # Returns the `on_hold` service option
322
+ # @return [true, false] `true` if the `on_hold` option is enabled
384
323
  def service_on_hold?
385
324
  @service.on_hold?
386
325
  end
387
326
 
388
- # Set the on_hold vm option to true
389
- def hold(hold)
390
- @body['on_hold'] = hold
391
- end
327
+ # Checks if any parent role is currently on hold.
328
+ # @return [Boolean] Returns `true` if any parent role is in an
329
+ # `on_hold` state, `false` otherwise.
330
+ def any_parent_on_hold?
331
+ parents.each do |parent|
332
+ next unless @service.roles[parent]
392
333
 
393
- # Retrieves the VM information for each Node in this Role. If a Node
394
- # is to be disposed and it is found in DONE, it will be cleaned
395
- #
396
- # @return [nil, OpenNebula::Error] nil in case of success, Error
397
- # otherwise
398
- def info
399
- raise 'role.info is not defined'
334
+ return true if @service.roles[parent].on_hold?
335
+ end
336
+ false
400
337
  end
401
338
 
402
- # Deploys all the nodes in this role
403
- # @return [Array<true, nil>, Array<false, String>] true if all the VMs
404
- # were created, false and the error reason if there was a problem
405
- # creating the VMs
406
- def deploy
407
- deployed_nodes = []
408
- n_nodes = cardinality - nodes.size
339
+ ########################################################################
340
+ # Operations
341
+ ########################################################################
409
342
 
410
- return [deployed_nodes, nil] if n_nodes == 0
411
-
412
- @body['last_vmname'] ||= 0
413
-
414
- template_id = @body['vm_template']
415
- template = OpenNebula::Template.new_with_id(template_id,
416
- @service.client)
417
-
418
- if @body['vm_template_contents']
419
- extra_template = @body['vm_template_contents'].dup
420
-
421
- # If the extra_template contains APPEND="<attr1>,<attr2>", it
422
- # will add the attributes that already exist in the template,
423
- # instead of replacing them.
424
- append = extra_template
425
- .match(/^\s*APPEND=\"?(.*?)\"?\s*$/)[1]
426
- .split(',') rescue nil
427
-
428
- if append && !append.empty?
429
- rc = template.info
430
-
431
- if OpenNebula.is_error?(rc)
432
- msg = "Role #{name} : Info template #{template_id};" \
433
- " #{rc.message}"
434
-
435
- Log.error LOG_COMP, msg, @service.id
436
- @service.log_error(msg)
437
-
438
- return [false, 'Error fetching Info to instantiate' \
439
- " VM Template #{template_id} in Role " \
440
- "#{name}: #{rc.message}"]
441
- end
442
-
443
- et = template.template_like_str('TEMPLATE',
444
- true,
445
- append.join('|'))
446
-
447
- et = et << "\n" << extra_template
448
-
449
- extra_template = et
450
- end
451
- else
452
- extra_template = ''
453
- end
454
-
455
- extra_template << "\nSERVICE_ID = #{@service.id}"
456
- extra_template << "\nROLE_NAME = \"#{@body['name']}\""
457
-
458
- # Evaluate attributes with parent roles
459
- evaluate(extra_template)
460
-
461
- n_nodes.times do
462
- vm_name = @@vm_name_template
463
- .gsub('$SERVICE_ID', @service.id.to_s)
464
- .gsub('$SERVICE_NAME', @service.name.to_s)
465
- .gsub('$ROLE_NAME', name.to_s)
466
- .gsub('$VM_NUMBER', @body['last_vmname'].to_s)
467
-
468
- @body['last_vmname'] += 1
469
-
470
- Log.debug LOG_COMP,
471
- "Role #{name} : Instantiate template #{template_id}, name #{vm_name}",
472
- @service.id
473
-
474
- vm_id = template.instantiate(vm_name, on_hold?, extra_template)
475
-
476
- deployed_nodes << vm_id
477
-
478
- if OpenNebula.is_error?(vm_id)
479
- msg = "Role #{name} : Instantiate failed for template " \
480
- "#{template_id}; #{vm_id.message}"
481
-
482
- Log.error LOG_COMP, msg, @service.id
483
-
484
- @service.log_error(msg)
485
-
486
- return [false, "Error instantiating VM Template #{template_id} in Role " \
487
- "#{name}: #{vm_id.message}"]
488
- end
489
-
490
- Log.debug LOG_COMP,
491
- "Role #{name} : Instantiate success, VM ID #{vm_id}",
492
- @service.id
493
-
494
- node = { 'deploy_id' => vm_id }
495
- vm = OpenNebula::VirtualMachine.new_with_id(vm_id, @service.client)
496
-
497
- tries = 0
498
- loop do
499
- break if tries == 3
500
-
501
- tries += 1
502
-
503
- rc = vm.info
504
-
505
- break unless OpenNebula.is_error?(rc)
506
-
507
- sleep(tries)
508
- end
509
-
510
- if tries == 3
511
- node['vm_info'] = nil
512
-
513
- msg = "Role #{name} : Cannot get info for VM #{vm_id}"
514
-
515
- Log.error LOG_COMP, msg, @service.id
516
-
517
- @service.log_error(msg)
518
-
519
- return [false,
520
- "Error getting VM #{vm_id} info in Role #{name}: #{vm_id.message}"]
521
- end
522
-
523
- hash_vm = vm.to_hash['VM']
524
- vm_info = {}
525
- vm_info['VM'] = hash_vm.select {|v| VM_INFO.include?(v) }
526
-
527
- node['vm_info'] = vm_info
528
-
529
- @body['nodes'] << node
530
- end
531
-
532
- [deployed_nodes, nil]
533
- end
534
-
535
- # Terminate all the nodes in this role
536
- #
537
- # @param scale_down [true, false] true to terminate and dispose the
538
- # number of VMs needed to get down to cardinality nodes
539
- # @return [Array<true, nil>, Array<false, String>] true if all the VMs
540
- # were terminated, false and the error reason if there was a problem
541
- # shutting down the VMs
542
- def shutdown(recover)
543
- if nodes.size != cardinality
544
- n_nodes = nodes.size - cardinality
545
- else
546
- n_nodes = nodes.size
547
- end
548
-
549
- rc = shutdown_nodes(nodes, n_nodes, recover)
550
-
551
- unless rc[0]
552
- return [false, "Error undeploying nodes for role `#{name}`"]
553
- end
554
-
555
- [rc[1], nil]
556
- end
557
-
558
- # Delete all the nodes in this role
559
- # @return [Array<true, nil>] All the VMs are deleted, and the return
560
- # ignored
561
- def delete
562
- raise 'role.delete is not defined'
563
- end
564
-
565
- # Changes the owner/group of all the nodes in this role
566
- #
567
- # @param [Integer] uid the new owner id. Set to -1 to leave the current
568
- # @param [Integer] gid the new group id. Set to -1 to leave the current
569
- #
570
- # @return [Array<true, nil>, Array<false, String>] true if all the VMs
571
- # were updated, false and the error reason if there was a problem
572
- # updating the VMs
573
- def chown(uid, gid)
574
- nodes.each do |node|
575
- vm_id = node['deploy_id']
576
-
577
- Log.debug LOG_COMP,
578
- "Role #{name} : Chown for VM #{vm_id}",
579
- @service.id
580
-
581
- vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
582
- @service.client)
583
- rc = vm.chown(uid, gid)
584
-
585
- if OpenNebula.is_error?(rc)
586
- msg = "Role #{name} : Chown failed for VM #{vm_id}; " \
587
- "#{rc.message}"
588
-
589
- Log.error LOG_COMP, msg, @service.id
590
- @service.log_error(msg)
591
-
592
- return [false, rc.message]
593
- else
594
- Log.debug LOG_COMP,
595
- "Role #{name} : Chown success for VM #{vm_id}",
596
- @service.id
597
- end
598
- end
599
-
600
- [true, nil]
601
- end
602
-
603
- # Release all the nodes in this role
604
- # @return [Array, Bool] true if all the VMs
605
- # were released, false otherwise and Array with VMs released
606
- def release
607
- release_nodes = []
608
- success = true
343
+ # Release all the nodes in this role
344
+ # @return [Array, Bool] true if all the VMs
345
+ # were released, false otherwise and Array with VMs released
346
+ def release
347
+ release_nodes = []
348
+ success = true
609
349
 
610
350
  # Release all vms in the role
611
351
  nodes.each do |node|
@@ -638,157 +378,183 @@ module OpenNebula
638
378
  [release_nodes, success]
639
379
  end
640
380
 
641
- # Schedule the given action on all the VMs that belong to the Role
642
- # @param [String] action one of the available SCHEDULE_ACTIONS
643
- # @param [Integer] period
644
- # @param [Integer] vm_per_period
645
- # @param [String] action arguments
646
- def batch_action(action, period, vms_per_period, args)
647
- vms_id = []
648
- error_msgs = []
649
- nodes = @body['nodes']
650
- now = Time.now.to_i
651
- time_offset = 0
652
-
653
- # if role is done, return error
654
- if state == 5
655
- return OpenNebula::Error.new("Role #{name} is in DONE state")
656
- end
381
+ # Checks if the current role is in a state where it can be released.
382
+ # @return [Boolean] Returns `true` if the current state is `HOLD`,
383
+ # `false` otherwise.
384
+ def can_release?
385
+ state == STATE['HOLD']
386
+ end
657
387
 
658
- do_offset = !period.nil? && period.to_i > 0 &&
659
- !vms_per_period.nil? && vms_per_period.to_i > 0
388
+ def chown(uid, gid)
389
+ raise NotImplementedError
390
+ end
660
391
 
661
- nodes.each_with_index do |node, index|
662
- vm_id = node['deploy_id']
663
- vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
664
- @service.client)
392
+ # Updates the role
393
+ # @param [Hash] template
394
+ # @return [nil, OpenNebula::Error] nil in case of success, Error
395
+ # otherwise
396
+ def update(template)
397
+ raise NotImplementedError
398
+ end
665
399
 
666
- if do_offset
667
- offset = (index / vms_per_period.to_i).floor
668
- time_offset = offset * period.to_i
669
- end
400
+ # Check that changes values are correct
401
+ #
402
+ # @param template_json [String] New template
403
+ #
404
+ # @return [Boolean, String] True, nil if everything is correct
405
+ # False, attr if attr was changed
406
+ def check_new_template(template)
407
+ IMMUTABLE_ATTRS.each do |attr|
408
+ next if template[attr] == @body[attr]
670
409
 
671
- tmp_str = 'SCHED_ACTION = ['
672
- tmp_str << "ACTION = #{action},"
673
- tmp_str << "ARGS = \"#{args}\"," if args
674
- tmp_str << "TIME = #{now + time_offset}]"
410
+ return [false, "role/#{attr}"]
411
+ end
675
412
 
676
- rc = vm.sched_action_add(tmp_str)
677
- if OpenNebula.is_error?(rc)
678
- msg = "Role #{name} : VM #{vm_id} error scheduling "\
679
- "action; #{rc.message}"
413
+ [true, nil]
414
+ end
680
415
 
681
- error_msgs << msg
416
+ ########################################################################
417
+ # Scheduler
418
+ ########################################################################
682
419
 
683
- Log.error LOG_COMP, msg, @service.id
420
+ def scheduled_policies
421
+ @body['scheduled_policies']
422
+ end
684
423
 
685
- @service.log_error(msg)
686
- else
687
- vms_id << vm.id
688
- end
689
- end
424
+ def update_scheduled_policies(new_policies)
425
+ @body['scheduled_policies'] = new_policies
426
+ end
690
427
 
691
- log_msg = "Action:#{action} scheduled on Role:#{name}"\
692
- "VMs:#{vms_id.join(',')}"
428
+ def batch_action(action, period, vms_per_period, args)
429
+ raise NotImplementedError
430
+ end
693
431
 
694
- Log.info LOG_COMP, log_msg, @service.id
432
+ ########################################################################
433
+ # Scalability
434
+ ########################################################################
695
435
 
696
- return [true, log_msg] if error_msgs.empty?
436
+ # Returns the role max cardinality
437
+ # @return [Integer,nil] the role cardinality or nil if it isn't defined
438
+ def max_cardinality
439
+ raise NotImplementedError
440
+ end
697
441
 
698
- error_msgs << log_msg
442
+ # Returns the role min cardinality
443
+ # @return [Integer,nil] the role cardinality or nil if it isn't defined
444
+ def min_cardinality
445
+ raise NotImplementedError
446
+ end
699
447
 
700
- [false, error_msgs.join('\n')]
448
+ # Returns a positive, 0, or negative number of nodes to adjust,
449
+ # according to the elasticity and scheduled policies
450
+ # @return [Array<Integer>] positive, 0, or negative number of nodes to
451
+ # adjust, plus the cooldown period duration
452
+ def scale?(vm_pool)
453
+ raise NotImplementedError
701
454
  end
702
455
 
703
- # Returns true if the VM state is failure
704
- # @param [Integer] vm_state VM state
705
- # @param [Integer] lcm_state VM LCM state
706
- # @return [true,false] True if the lcm state is one of *_FAILURE
707
- def self.vm_failure?(vm_state, lcm_state)
708
- vm_state_str = VirtualMachine::VM_STATE[vm_state.to_i]
709
- lcm_state_str = VirtualMachine::LCM_STATE[lcm_state.to_i]
710
-
711
- if vm_state_str == 'ACTIVE' &&
712
- VM_FAILURE_STATES.include?(lcm_state_str)
713
- return true
714
- end
456
+ def elasticity_policies
457
+ raise NotImplementedError
458
+ end
715
459
 
716
- false
460
+ def update_elasticity_policies(new_policies)
461
+ raise NotImplementedError
717
462
  end
718
463
 
719
- # rubocop:disable Style/ClassVars
720
- def self.init_default_cooldown(default_cooldown)
721
- @@default_cooldown = default_cooldown
464
+ def cooldown
465
+ raise NotImplementedError
722
466
  end
723
467
 
724
- def self.init_default_shutdown(shutdown_action)
725
- @@default_shutdown = shutdown_action
468
+ def update_cooldown(new_cooldown)
469
+ raise NotImplementedError
726
470
  end
727
471
 
728
- def self.init_force_deletion(force_deletion)
729
- @@force_deletion = force_deletion
472
+ def scale_way(_)
473
+ return NotImplementedError
730
474
  end
731
475
 
732
- def self.init_default_vm_name_template(vm_name_template)
733
- @@vm_name_template = vm_name_template
476
+ def clean_scale_way
477
+ return NotImplementedError
734
478
  end
735
- # rubocop:enable Style/ClassVars
736
479
 
737
480
  ########################################################################
738
- # Scalability
481
+ # Deployment
739
482
  ########################################################################
740
483
 
741
- # Updates the role
742
- # @param [Hash] template
743
- # @return [nil, OpenNebula::Error] nil in case of success, Error
744
- # otherwise
745
- def update(template)
746
- force = template['force'] == true
747
- new_cardinality = template['cardinality']
484
+ def deploy
485
+ raise NotImplementedError
486
+ end
748
487
 
749
- return if new_cardinality.nil?
488
+ # Terminate all the nodes in this role
489
+ #
490
+ # @param scale_down [true, false] true to terminate and dispose the
491
+ # number of VMs needed to get down to cardinality nodes
492
+ # @return [Array<true, nil>, Array<false, String>] true if all the VMs
493
+ # were terminated, false and the error reason if there was a problem
494
+ # shutting down the VMs
495
+ def shutdown(recover)
496
+ if nodes.size != cardinality
497
+ n_nodes = nodes.size - cardinality
498
+ else
499
+ n_nodes = nodes.size
500
+ end
501
+
502
+ rc = shutdown_nodes(nodes, n_nodes, recover)
750
503
 
751
- new_cardinality = new_cardinality.to_i
504
+ unless rc[0]
505
+ return [false, "Error undeploying nodes for role `#{name}`"]
506
+ end
752
507
 
753
- if !force
754
- if new_cardinality < min_cardinality.to_i
755
- return OpenNebula::Error.new(
756
- "Minimum cardinality is #{min_cardinality}"
757
- )
508
+ [rc[1], nil]
509
+ end
758
510
 
759
- elsif !max_cardinality.nil? &&
760
- new_cardinality > max_cardinality.to_i
761
- return OpenNebula::Error.new(
762
- "Maximum cardinality is #{max_cardinality}"
763
- )
511
+ ########################################################################
512
+ # Recover
513
+ ########################################################################
764
514
 
765
- end
515
+ # Determines whether the current deployment can be recovered
516
+ # based on its state and the states of its parent roles.
517
+ # @return [Boolean] Returns `true` if the deployment
518
+ # can be recovered, `false` otherwise.
519
+ def can_recover_deploy?
520
+ if state != STATE['PENDING']
521
+ return RECOVER_DEPLOY_STATES.include? STATE_STR[state]
766
522
  end
767
523
 
768
- set_cardinality(new_cardinality)
524
+ parents.each do |parent|
525
+ next unless @service.roles[parent]
526
+
527
+ return false if @service.roles[parent].state != STATE['RUNNING']
528
+ end
769
529
 
770
- nil
530
+ true
771
531
  end
772
532
 
773
- # Check that changes values are correct
774
- #
775
- # @param template_json [String] New template
776
- #
777
- # @return [Boolean, String] True, nil if everything is correct
778
- # False, attr if attr was changed
779
- def check_new_template(template)
780
- IMMUTABLE_ATTRS.each do |attr|
781
- next if template[attr] == @body[attr]
533
+ # Determines if the current deployment can be recovered and
534
+ # undeployed based on its state and the states of its child roles.
535
+ # @return [Boolean] Returns `true` if the deployment can be
536
+ # recovered and undeployed, `false` otherwise.
537
+ def can_recover_undeploy?
538
+ if !RECOVER_UNDEPLOY_STATES.include? STATE_STR[state]
539
+ # TODO, check childs if !empty? check if can be undeployed
540
+ @service.roles.each do |role_name, role|
541
+ next if role_name == name
782
542
 
783
- return [false, "role/#{attr}"]
543
+ if role.parents.include?(name) &&
544
+ role.state != STATE['DONE']
545
+ return false
546
+ end
547
+ end
784
548
  end
785
549
 
786
- [true, nil]
550
+ true
787
551
  end
788
552
 
789
- ########################################################################
790
- # Recover
791
- ########################################################################
553
+ def can_recover_scale?
554
+ return false unless RECOVER_SCALE_STATES.include? STATE_STR[state]
555
+
556
+ true
557
+ end
792
558
 
793
559
  def recover_deploy(report)
794
560
  nodes = @body['nodes']
@@ -858,339 +624,99 @@ module OpenNebula
858
624
  undeployed_nodes
859
625
  end
860
626
 
861
- # def recover_warning
862
- # end
863
-
864
- def recover_scale(report)
865
- rc = nil
866
-
867
- if @body['scale_way'] == SCALE_WAYS['UP']
868
- rc = [recover_deploy(report), true]
869
- elsif @body['scale_way'] == SCALE_WAYS['DOWN']
870
- rc = [recover_undeploy, false]
871
- end
872
-
873
- rc
627
+ def recover_scale
628
+ raise NotImplementedError
874
629
  end
875
630
 
876
- # Returns a positive, 0, or negative number of nodes to adjust,
877
- # according to the elasticity and scheduled policies
878
- # @return [Array<Integer>] positive, 0, or negative number of nodes to
879
- # adjust, plus the cooldown period duration
880
- def scale?(vm_pool)
881
- elasticity_pol = @body['elasticity_policies']
882
- scheduled_pol = @body['scheduled_policies']
883
-
884
- elasticity_pol ||= []
885
- scheduled_pol ||= []
886
-
887
- scheduled_pol.each do |policy|
888
- diff, cooldown_duration = scale_time?(policy)
889
-
890
- return [diff, cooldown_duration] if diff != 0
891
- end
892
-
893
- elasticity_pol.each do |policy|
894
- diff, cooldown_duration = scale_attributes?(policy, vm_pool)
895
-
896
- next if diff == 0
897
-
898
- cooldown_duration = @body['cooldown'] if cooldown_duration.nil?
899
- cooldown_duration = @@default_cooldown if cooldown_duration.nil?
900
-
901
- return [diff, cooldown_duration]
902
- end
903
-
904
- # Implicit rule that scales up to maintain the min_cardinality, with
905
- # no cooldown period
906
- if cardinality < min_cardinality.to_i
907
- return [min_cardinality.to_i - cardinality, 0]
908
- end
909
-
910
- [0, 0]
911
- end
912
-
913
- private
914
-
915
- # Shuts down all the given nodes
916
- # @param scale_down [true,false] True to set the 'disposed' node flag
917
- def shutdown_nodes(nodes, n_nodes, recover)
918
- success = true
919
- undeployed_nodes = []
920
-
921
- action = @body['shutdown_action']
922
-
923
- if action.nil?
924
- action = @service.shutdown_action
925
- end
926
-
927
- if action.nil?
928
- action = @@default_shutdown
929
- end
930
-
931
- nodes[0..n_nodes - 1].each do |node|
932
- vm_id = node['deploy_id']
933
-
934
- Log.debug(LOG_COMP,
935
- "Role #{name} : Terminating VM #{vm_id}",
936
- @service.id)
937
-
938
- vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
939
- @service.client)
631
+ ########################################################################
632
+ # Helpers
633
+ ########################################################################
940
634
 
941
- vm_state = nil
942
- lcm_state = nil
635
+ protected
943
636
 
944
- if recover
945
- vm.info
637
+ def init_template_attributes
638
+ @body['last_vmname'] ||= 0
946
639
 
947
- vm_state = vm.state
948
- lcm_state = vm.lcm_state
949
- end
640
+ template_id = @body['template_id']
641
+ template = OpenNebula::Template.new_with_id(template_id, @service.client)
642
+ extra_template = @body.fetch('template_contents', {}).dup
950
643
 
951
- if recover && Role.vm_failure?(vm_state, lcm_state)
952
- rc = vm.recover(2)
953
- elsif action == 'terminate-hard'
954
- rc = vm.terminate(true)
955
- else
956
- rc = vm.terminate
957
- end
644
+ # Since the OpenNebula core does not apply a deep merge, we replace
645
+ # here the values to avoid the entire CONTEXT replacement.
646
+ if !extra_template.empty?
647
+ rc = template.info
958
648
 
959
649
  if OpenNebula.is_error?(rc)
960
- msg = "Role #{name} : Terminate failed for VM #{vm_id}, " \
961
- "will perform a Delete; #{rc.message}"
650
+ msg = "Role #{name} : Info template #{template_id}; #{rc.message}"
962
651
 
963
- Log.error LOG_COMP, msg, @service.id
652
+ Log.error(LOG_COMP, msg, @service.id)
964
653
  @service.log_error(msg)
965
654
 
966
- if action != 'terminate-hard'
967
- rc = vm.terminate(true)
968
- end
969
-
970
- if OpenNebula.is_error?(rc)
971
- rc = vm.delete
972
- end
973
-
974
- if OpenNebula.is_error?(rc)
975
- msg = "Role #{name} : Delete failed for VM #{vm_id}; " \
976
- "#{rc.message}"
977
-
978
- Log.error LOG_COMP, msg, @service.id
979
- @service.log_error(msg)
980
-
981
- success = false
982
- else
983
- Log.debug(LOG_COMP,
984
- "Role #{name} : Delete success for VM " \
985
- "#{vm_id}",
986
- @service.id)
987
-
988
- undeployed_nodes << vm_id
989
- end
990
- else
991
- Log.debug(LOG_COMP,
992
- "Role #{name}: Terminate success for VM #{vm_id}",
993
- @service.id)
994
- undeployed_nodes << vm_id
655
+ return [
656
+ false,
657
+ "Error fetching Info to instantiate template #{template_id} " \
658
+ "in Role #{name}: #{rc.message}"
659
+ ]
995
660
  end
996
- end
997
661
 
998
- [success, undeployed_nodes]
999
- end
1000
-
1001
- def vm_failure?(node)
1002
- if node && node['vm_info']
1003
- return Role.vm_failure?(node['vm_info']['VM']['STATE'],
1004
- node['vm_info']['VM']['LCM_STATE'])
662
+ vm_template = template.to_hash['VMTEMPLATE']['TEMPLATE'].dup
663
+ extra_template = vm_template.deep_merge(extra_template, false)
1005
664
  end
1006
665
 
1007
- false
1008
- end
1009
-
1010
- # Returns a positive, 0, or negative number of nodes to adjust,
1011
- # according to a SCHEDULED type policy
1012
- # @param [Hash] A SCHEDULED type policy
1013
- # @return [Integer] positive, 0, or negative number of nodes to adjust
1014
- def scale_time?(elasticity_pol)
1015
- now = Time.now.to_i
1016
- last_eval = elasticity_pol['last_eval'].to_i
1017
-
1018
- elasticity_pol['last_eval'] = now
1019
-
1020
- # If this is the first time this is evaluated, ignore it.
1021
- # We don't want to execute actions planned in the past when the
1022
- # server starts.
666
+ extra_template['SERVICE_ID'] = @service.id
667
+ extra_template['ROLE_NAME'] = @body['name']
1023
668
 
1024
- return 0 if last_eval == 0
669
+ extra_template = Hash.to_raw(extra_template)
1025
670
 
1026
- start_time = elasticity_pol['start_time']
1027
- target_vms = elasticity_pol['adjust']
1028
-
1029
- # TODO: error msg
1030
- return 0 if target_vms.nil?
1031
-
1032
- if !(start_time.nil? || start_time.empty?)
1033
- begin
1034
- if !start_time.match(/^\d+$/)
1035
- start_time = Time.parse(start_time).to_i
1036
- else
1037
- start_time = start_time.to_i
1038
- end
1039
- rescue ArgumentError
1040
- # TODO: error msg
1041
- return 0
1042
- end
1043
- else
1044
- recurrence = elasticity_pol['recurrence']
1045
-
1046
- # TODO: error msg
1047
- return 0 if recurrence.nil? || recurrence.empty?
1048
-
1049
- begin
1050
- cron_parser = CronParser.new(recurrence)
1051
-
1052
- # This returns the next planned time, starting from the last
1053
- # step
1054
- start_time = cron_parser.next(Time.at(last_eval)).to_i
1055
- rescue StandardError
1056
- # TODO: error msg bad format
1057
- return 0
1058
- end
1059
- end
1060
-
1061
- # Only actions planned between last step and this one are triggered
1062
- if start_time > last_eval && start_time <= now
1063
- Log.debug LOG_COMP,
1064
- "Role #{name} : scheduled scalability for " \
1065
- "#{Time.at(start_time)} triggered", @service.id
1066
-
1067
- new_cardinality = calculate_new_cardinality(elasticity_pol)
1068
-
1069
- return [new_cardinality - cardinality,
1070
- elasticity_pol['cooldown']]
1071
- end
671
+ # Evaluate attributes with parent roles
672
+ evaluate(extra_template)
1072
673
 
1073
- [0, elasticity_pol['cooldown']]
674
+ [template_id, template, extra_template]
1074
675
  end
1075
676
 
1076
- # Returns a positive, 0, or negative number of nodes to adjust,
1077
- # according to a policy based on attributes
1078
- # @param [Hash] A policy based on attributes
1079
- # @return [Array<Integer>] positive, 0, or negative number of nodes to
1080
- # adjust, plus the cooldown period duration
1081
- def scale_attributes?(elasticity_pol, vm_pool)
1082
- now = Time.now.to_i
1083
-
1084
- # TODO: enforce true_up_evals type in ServiceTemplate::ROLE_SCHEMA ?
1085
-
1086
- period_duration = elasticity_pol['period'].to_i
1087
- period_number = elasticity_pol['period_number'].to_i
1088
- last_eval = elasticity_pol['last_eval'].to_i
1089
- true_evals = elasticity_pol['true_evals'].to_i
1090
- expression = elasticity_pol['expression']
1091
-
1092
- if !last_eval.nil? && now < (last_eval + period_duration)
1093
- return [0, 0]
1094
- end
1095
-
1096
- elasticity_pol['last_eval'] = now
1097
-
1098
- new_cardinality = cardinality
1099
- new_evals = 0
1100
-
1101
- exp_value, exp_st = scale_rule(expression, vm_pool)
677
+ def fill_node_info(vm_id)
678
+ node = { 'deploy_id' => vm_id }
679
+ vm = OpenNebula::VirtualMachine.new_with_id(vm_id, @service.client)
1102
680
 
1103
- if exp_value
1104
- new_evals = true_evals + 1
1105
- new_evals = period_number if new_evals > period_number
681
+ max_retries = 3
682
+ attemps = 0
1106
683
 
1107
- if new_evals >= period_number
1108
- Log.debug LOG_COMP,
1109
- "Role #{name} : elasticy policy #{exp_st} "\
1110
- 'triggered', @service.id
684
+ begin
685
+ attemps += 1
686
+ rc = vm.info
1111
687
 
1112
- new_cardinality = calculate_new_cardinality(elasticity_pol)
688
+ if OpenNebula.is_error?(rc)
689
+ sleep(attemps)
690
+ raise "Error retrieving info for VM #{vm_id}"
1113
691
  end
1114
- end
1115
692
 
1116
- elasticity_pol['true_evals'] = new_evals
1117
- elasticity_pol['expression_evaluated'] = exp_st
693
+ hash_vm = vm.to_hash['VM']
694
+ node['vm_info'] = { 'VM' => hash_vm.select {|k, _| VM_INFO.include?(k) } }
1118
695
 
1119
- [new_cardinality - cardinality, elasticity_pol['cooldown']]
1120
- end
696
+ @body['nodes'] << node
697
+ rescue StandardError => e
698
+ retry if attemps < max_retries
1121
699
 
1122
- # Returns true if the scalability rule is triggered
1123
- # @return true if the scalability rule is triggered
1124
- def scale_rule(elas_expr, vm_pool)
1125
- parser = ElasticityGrammarParser.new
700
+ node['vm_info'] = nil
1126
701
 
1127
- if elas_expr.nil? || elas_expr.empty?
1128
- return false
1129
- end
702
+ msg = "Role #{name} : Cannot get info for VM #{vm_id}: #{e.message}"
703
+ Log.error LOG_COMP, msg, @service.id
1130
704
 
1131
- treetop = parser.parse(elas_expr)
705
+ @service.log_error(msg)
1132
706
 
1133
- if treetop.nil?
1134
707
  return [false,
1135
- "Parse error. '#{elas_expr}': #{parser.failure_reason}"]
708
+ "Error getting VM #{vm_id} info in " \
709
+ "Role #{name}: #{e.message}"]
1136
710
  end
1137
-
1138
- val, st = treetop.result(self, vm_pool)
1139
-
1140
- [val, st]
1141
711
  end
1142
712
 
1143
- def calculate_new_cardinality(elasticity_pol)
1144
- type = elasticity_pol['type']
1145
- adjust = elasticity_pol['adjust'].to_i
1146
-
1147
- # Min is a hard limit, if the current cardinality + adjustment does
1148
- # not reach it, the difference is added
1149
-
1150
- max = [cardinality, max_cardinality.to_i].max
1151
- # min = [cardinality(), min_cardinality.to_i].min()
1152
- min = min_cardinality.to_i
1153
-
1154
- case type.upcase
1155
- when 'CHANGE'
1156
- new_cardinality = cardinality + adjust
1157
- when 'PERCENTAGE_CHANGE'
1158
- min_adjust_step = elasticity_pol['min_adjust_step'].to_i
1159
-
1160
- change = cardinality * adjust / 100.0
1161
-
1162
- change > 0 ? sign = 1 : sign = -1
1163
- change = change.abs
1164
-
1165
- if change < 1
1166
- change = 1
1167
- else
1168
- change = change.to_i
1169
- end
1170
-
1171
- change = sign * [change, min_adjust_step].max
1172
-
1173
- new_cardinality = cardinality + change
1174
-
1175
- when 'CARDINALITY'
1176
- new_cardinality = adjust
1177
- else
1178
- # TODO: error message
1179
- return cardinality
1180
- end
1181
-
1182
- # The cardinality can be forced to be outside the min,max
1183
- # range. If that is the case, the scale up/down will not
1184
- # move further outside the range. It will move towards the
1185
- # range with the adjustement set, instead of jumping the
1186
- # difference
1187
- if adjust > 0
1188
- new_cardinality = max if new_cardinality > max
1189
- elsif adjust < 0
1190
- new_cardinality = min if new_cardinality < min
713
+ def vm_failure?(node)
714
+ if node && node['vm_info']
715
+ return Role.vm_failure?(node['vm_info']['VM']['STATE'],
716
+ node['vm_info']['VM']['LCM_STATE'])
1191
717
  end
1192
718
 
1193
- new_cardinality
719
+ false
1194
720
  end
1195
721
 
1196
722
  # Evaluate rules that references to parent roles