opennebula 5.12.4 → 5.13.80.pre

Sign up to get free protection for your applications and to get access to all the features.
Files changed (126) hide show
  1. checksums.yaml +4 -4
  2. data/lib/ActionManager.rb +1 -1
  3. data/lib/CommandManager.rb +1 -1
  4. data/lib/DriverExecHelper.rb +44 -28
  5. data/lib/OpenNebulaDriver.rb +8 -4
  6. data/lib/VirtualMachineDriver.rb +9 -2
  7. data/lib/cloud/CloudClient.rb +3 -3
  8. data/lib/datacenter.rb +1258 -0
  9. data/lib/datastore.rb +1025 -0
  10. data/lib/distributed_firewall.rb +280 -0
  11. data/lib/file_helper.rb +370 -0
  12. data/lib/host.rb +1517 -0
  13. data/lib/logical_port.rb +50 -0
  14. data/lib/logical_switch.rb +77 -0
  15. data/lib/memoize.rb +74 -0
  16. data/lib/models.rb +32 -0
  17. data/lib/models/role.rb +1126 -0
  18. data/lib/models/service.rb +709 -0
  19. data/lib/network.rb +635 -0
  20. data/lib/nsx_client.rb +144 -0
  21. data/lib/nsx_component.rb +28 -0
  22. data/lib/nsx_constants.rb +149 -0
  23. data/lib/nsx_driver.rb +78 -0
  24. data/lib/nsx_error.rb +77 -0
  25. data/lib/nsx_rule.rb +193 -0
  26. data/lib/nsxt_client.rb +176 -0
  27. data/lib/nsxt_dfw.rb +196 -0
  28. data/lib/nsxt_logical_port.rb +94 -0
  29. data/lib/nsxt_rule.rb +188 -0
  30. data/lib/nsxt_tz.rb +38 -0
  31. data/lib/nsxv_client.rb +176 -0
  32. data/lib/nsxv_dfw.rb +202 -0
  33. data/lib/nsxv_logical_port.rb +107 -0
  34. data/lib/nsxv_rule.rb +172 -0
  35. data/lib/nsxv_tz.rb +41 -0
  36. data/lib/opaque_network.rb +134 -0
  37. data/lib/opennebula.rb +5 -2
  38. data/lib/opennebula/acl.rb +1 -1
  39. data/lib/opennebula/acl_pool.rb +1 -1
  40. data/lib/opennebula/client.rb +1 -1
  41. data/lib/opennebula/cluster.rb +1 -1
  42. data/lib/opennebula/cluster_pool.rb +1 -1
  43. data/lib/opennebula/datastore.rb +1 -1
  44. data/lib/opennebula/datastore_pool.rb +1 -1
  45. data/lib/opennebula/document.rb +8 -29
  46. data/lib/opennebula/document_json.rb +42 -12
  47. data/lib/opennebula/document_pool.rb +1 -1
  48. data/lib/opennebula/document_pool_json.rb +1 -1
  49. data/lib/opennebula/error.rb +4 -1
  50. data/lib/opennebula/flow.rb +23 -0
  51. data/lib/opennebula/flow/grammar.rb +1195 -0
  52. data/lib/opennebula/flow/service_pool.rb +190 -0
  53. data/lib/opennebula/flow/service_template.rb +572 -0
  54. data/lib/opennebula/flow/service_template_ext.rb +84 -0
  55. data/lib/opennebula/flow/service_template_pool.rb +32 -0
  56. data/lib/opennebula/flow/validator.rb +499 -0
  57. data/lib/opennebula/group.rb +1 -1
  58. data/lib/opennebula/group_pool.rb +1 -1
  59. data/lib/opennebula/hook.rb +5 -12
  60. data/lib/opennebula/hook_log.rb +1 -1
  61. data/lib/opennebula/hook_pool.rb +1 -1
  62. data/lib/opennebula/host.rb +1 -1
  63. data/lib/opennebula/host_pool.rb +1 -1
  64. data/lib/opennebula/image.rb +17 -14
  65. data/lib/opennebula/image_pool.rb +1 -1
  66. data/lib/opennebula/ldap_auth.rb +1 -1
  67. data/lib/opennebula/ldap_auth_spec.rb +1 -1
  68. data/lib/opennebula/lockable_ext.rb +163 -0
  69. data/lib/opennebula/marketplace.rb +1 -1
  70. data/lib/opennebula/marketplace_pool.rb +1 -1
  71. data/lib/opennebula/marketplaceapp.rb +9 -119
  72. data/lib/opennebula/marketplaceapp_ext.rb +522 -0
  73. data/lib/opennebula/marketplaceapp_pool.rb +1 -1
  74. data/lib/opennebula/oneflow_client.rb +4 -3
  75. data/lib/opennebula/pool.rb +17 -3
  76. data/lib/opennebula/pool_element.rb +1 -1
  77. data/lib/opennebula/security_group.rb +1 -1
  78. data/lib/opennebula/security_group_pool.rb +1 -1
  79. data/lib/opennebula/server_cipher_auth.rb +1 -1
  80. data/lib/opennebula/server_x509_auth.rb +1 -1
  81. data/lib/opennebula/ssh_auth.rb +1 -1
  82. data/lib/opennebula/system.rb +1 -1
  83. data/lib/opennebula/template.rb +4 -13
  84. data/lib/opennebula/template_ext.rb +325 -0
  85. data/lib/opennebula/template_pool.rb +1 -1
  86. data/lib/opennebula/user.rb +26 -2
  87. data/lib/opennebula/user_pool.rb +1 -1
  88. data/lib/opennebula/utils.rb +1 -1
  89. data/lib/opennebula/vdc.rb +1 -1
  90. data/lib/opennebula/vdc_pool.rb +1 -1
  91. data/lib/opennebula/virtual_machine.rb +25 -207
  92. data/lib/opennebula/virtual_machine_ext.rb +469 -0
  93. data/lib/opennebula/virtual_machine_pool.rb +28 -17
  94. data/lib/opennebula/virtual_network.rb +4 -10
  95. data/lib/opennebula/virtual_network_pool.rb +1 -1
  96. data/lib/opennebula/virtual_router.rb +4 -12
  97. data/lib/opennebula/virtual_router_pool.rb +1 -1
  98. data/lib/opennebula/vm_group.rb +4 -11
  99. data/lib/opennebula/vm_group_pool.rb +1 -1
  100. data/lib/opennebula/vntemplate.rb +4 -13
  101. data/lib/opennebula/vntemplate_pool.rb +1 -1
  102. data/lib/opennebula/wait_ext.rb +222 -0
  103. data/lib/opennebula/x509_auth.rb +1 -1
  104. data/lib/opennebula/xml_element.rb +1 -1
  105. data/lib/opennebula/xml_pool.rb +1 -1
  106. data/lib/opennebula/xml_utils.rb +1 -1
  107. data/lib/opennebula/zone.rb +1 -1
  108. data/lib/opennebula/zone_pool.rb +1 -1
  109. data/lib/rest_client.rb +201 -0
  110. data/lib/scripts_common.rb +180 -0
  111. data/lib/transport_zone.rb +43 -0
  112. data/lib/vcenter_driver.rb +13 -11
  113. data/lib/vcenter_importer.rb +616 -0
  114. data/lib/vi_client.rb +281 -0
  115. data/lib/vi_helper.rb +312 -0
  116. data/lib/virtual_machine.rb +3477 -0
  117. data/lib/virtual_wire.rb +158 -0
  118. data/lib/vm_device.rb +80 -0
  119. data/lib/vm_disk.rb +202 -0
  120. data/lib/vm_folder.rb +69 -0
  121. data/lib/vm_helper.rb +30 -0
  122. data/lib/vm_monitor.rb +303 -0
  123. data/lib/vm_nic.rb +70 -0
  124. data/lib/vm_template.rb +1961 -0
  125. data/lib/vmm_importer.rb +121 -0
  126. metadata +118 -5
@@ -0,0 +1,50 @@
1
+ # -------------------------------------------------------------------------- #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
+ # #
4
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
+ # not use this file except in compliance with the License. You may obtain #
6
+ # a copy of the License at #
7
+ # #
8
+ # http://www.apache.org/licenses/LICENSE-2.0 #
9
+ # #
10
+ # Unless required by applicable law or agreed to in writing, software #
11
+ # distributed under the License is distributed on an "AS IS" BASIS, #
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
13
+ # See the License for the specific language governing permissions and #
14
+ # limitations under the License. #
15
+ #--------------------------------------------------------------------------- #
16
+ module NSXDriver
17
+
18
+ # Class Logical Switch
19
+ class LogicalPort < NSXComponent
20
+
21
+ # ATTRIBUTES
22
+
23
+ def self.new_child(nsx_client, id = nil)
24
+ case nsx_client.nsx_type.upcase
25
+ when NSXConstants::NSXT
26
+ NSXTLogicalPort.new(nsx_client, id)
27
+ when NSXConstants::NSXV
28
+ NSXVLogicalPort.new(nsx_client, id)
29
+ else
30
+ error_msg = "Unknown NSX type: #{type}"
31
+ error = NSXError::UnknownObject.new(error_msg)
32
+ raise error
33
+ end
34
+ end
35
+
36
+ # Check if logical port exists
37
+ def lp?; end
38
+
39
+ # Get logical port id
40
+ def lp_id; end
41
+
42
+ # Get logical port display name
43
+ def lp_name; end
44
+
45
+ # Get resource type
46
+ def lp_type; end
47
+
48
+ end
49
+
50
+ end
@@ -0,0 +1,77 @@
1
+ # -------------------------------------------------------------------------- #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
+ # #
4
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
+ # not use this file except in compliance with the License. You may obtain #
6
+ # a copy of the License at #
7
+ # #
8
+ # http://www.apache.org/licenses/LICENSE-2.0 #
9
+ # #
10
+ # Unless required by applicable law or agreed to in writing, software #
11
+ # distributed under the License is distributed on an "AS IS" BASIS, #
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
13
+ # See the License for the specific language governing permissions and #
14
+ # limitations under the License. #
15
+ #--------------------------------------------------------------------------- #
16
+ module NSXDriver
17
+
18
+ # Class Logical Switch
19
+ class LogicalSwitch < NSXComponent
20
+
21
+ # ATTRIBUTES
22
+ attr_reader :ls_id
23
+ attr_reader :tz_id
24
+ attr_reader :replication_mode
25
+ attr_reader :display_name
26
+ attr_reader :description
27
+
28
+ def ls?; end
29
+
30
+ # Get logical switch's name
31
+ def ls_name; end
32
+
33
+ # Get logical switch's vni
34
+ def ls_vni; end
35
+
36
+ # Get the Transport Zone of the logical switch
37
+ def ls_tz; end
38
+
39
+ # Create a new logical switch
40
+ def new_logical_switch(ls_data); end
41
+
42
+ # Delete a logical switch
43
+ def delete_logical_switch; end
44
+
45
+ # Update a logical switch
46
+ def update_logical_switch; end
47
+
48
+ # Return nsx nics of type NSX-V and NSX-T
49
+ # If only_new_attached = true --> Only returns new attached nsx nics
50
+ def self.nsx_nics(template_xml, only_new_attached = true)
51
+ if only_new_attached == true
52
+ nics = template_xml.xpath('//TEMPLATE/NIC[ATTACH="YES"]')
53
+ else
54
+ nics = template_xml.xpath('//TEMPLATE/NIC')
55
+ end
56
+ nics_array = []
57
+ nics.each do |nic|
58
+ network_id = nic.xpath('NETWORK_ID').text
59
+ # Check Networks exists
60
+ one_vnet = VCenterDriver::VIHelper
61
+ .one_item(OpenNebula::VirtualNetwork, network_id)
62
+ rc = one_vnet.info
63
+ if OpenNebula.is_error?(rc)
64
+ err_msg = rc.message
65
+ raise err_msg
66
+ end
67
+ pg_type = one_vnet['TEMPLATE/VCENTER_PORTGROUP_TYPE']
68
+ nics_array << nic if [NSXConstants::NSXV_LS_TYPE,
69
+ NSXConstants::NSXT_LS_TYPE]
70
+ .include?(pg_type)
71
+ end
72
+ nics_array
73
+ end
74
+
75
+ end
76
+
77
+ end
data/lib/memoize.rb ADDED
@@ -0,0 +1,74 @@
1
+ # -------------------------------------------------------------------------- #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
+ # #
4
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
+ # not use this file except in compliance with the License. You may obtain #
6
+ # a copy of the License at #
7
+ # #
8
+ # http://www.apache.org/licenses/LICENSE-2.0 #
9
+ # #
10
+ # Unless required by applicable law or agreed to in writing, software #
11
+ # distributed under the License is distributed on an "AS IS" BASIS, #
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
13
+ # See the License for the specific language governing permissions and #
14
+ # limitations under the License. #
15
+ #--------------------------------------------------------------------------- #
16
+
17
+ ##############################################################################
18
+ # Module Memoize
19
+ ##############################################################################
20
+ module Memoize
21
+
22
+ def [](property)
23
+ @memoize = {} unless defined?(@memoize)
24
+
25
+ if (value = @memoize[property])
26
+ return value
27
+ end
28
+
29
+ current_item = @item
30
+
31
+ property_path = ''
32
+
33
+ property.split('.').each do |elem|
34
+ if property_path.empty?
35
+ property_path << elem
36
+ else
37
+ property_path << '.' << elem
38
+ end
39
+
40
+ if (val = @memoize[property_path])
41
+ current_item = val
42
+ else
43
+ begin
44
+ current_item = current_item.send(elem)
45
+ rescue StandardError
46
+ current_item = nil
47
+ end
48
+ end
49
+
50
+ break if current_item.nil?
51
+
52
+ @memoize[property_path] = current_item
53
+ end
54
+
55
+ @memoize[property] = current_item
56
+ end
57
+
58
+ def clear(property)
59
+ @memoize = {} unless defined?(@memoize)
60
+ @memoize.clear[property] if @memoize[property]
61
+ end
62
+
63
+ def clear_all
64
+ @memoize = {}
65
+ end
66
+
67
+ def []=(property, value)
68
+ @memoize = {} unless defined?(@memoize)
69
+
70
+ @memoize[property] = value
71
+ end
72
+
73
+ end
74
+ # module Memoize
data/lib/models.rb ADDED
@@ -0,0 +1,32 @@
1
+ # -------------------------------------------------------------------------- #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
+ # #
4
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
+ # not use this file except in compliance with the License. You may obtain #
6
+ # a copy of the License at #
7
+ # #
8
+ # http://www.apache.org/licenses/LICENSE-2.0 #
9
+ # #
10
+ # Unless required by applicable law or agreed to in writing, software #
11
+ # distributed under the License is distributed on an "AS IS" BASIS, #
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
13
+ # See the License for the specific language governing permissions and #
14
+ # limitations under the License. #
15
+ #--------------------------------------------------------------------------- #
16
+
17
+ require 'opennebula'
18
+
19
+ # rubocop:disable Style/MixinUsage
20
+ include OpenNebula
21
+ # rubocop:enable Style/MixinUsage
22
+
23
+ require 'opennebula/document_json'
24
+ require 'opennebula/document_pool_json'
25
+ require 'opennebula/flow/service_pool'
26
+ require 'opennebula/flow/service_template_pool'
27
+ require 'opennebula/flow/service_template'
28
+
29
+ require 'opennebula/flow/validator'
30
+
31
+ require 'models/role'
32
+ require 'models/service'
@@ -0,0 +1,1126 @@
1
+ # -------------------------------------------------------------------------- #
2
+ # Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
3
+ # #
4
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may #
5
+ # not use this file except in compliance with the License. You may obtain #
6
+ # a copy of the License at #
7
+ # #
8
+ # http://www.apache.org/licenses/LICENSE-2.0 #
9
+ # #
10
+ # Unless required by applicable law or agreed to in writing, software #
11
+ # distributed under the License is distributed on an "AS IS" BASIS, #
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
13
+ # See the License for the specific language governing permissions and #
14
+ # limitations under the License. #
15
+ #--------------------------------------------------------------------------- #
16
+
17
+ require 'treetop'
18
+ require 'treetop/version'
19
+ require 'opennebula/flow/grammar'
20
+ require 'parse-cron'
21
+
22
+ if Gem::Version.create('1.6.3') >= Gem.loaded_specs['treetop'].version
23
+ raise 'treetop gem version must be >= 1.6.3.' \
24
+ "Current version is #{Treetop::VERSION::STRING}"
25
+ end
26
+
27
+ module OpenNebula
28
+
29
+ # Service Role class
30
+ class Role
31
+
32
+ attr_reader :service
33
+
34
+ # Actions that can be performed on the VMs of a given Role
35
+ SCHEDULE_ACTIONS = %w[
36
+ terminate
37
+ terminate-hard
38
+ undeploy
39
+ undeploy-hard
40
+ hold
41
+ release
42
+ stop
43
+ suspend
44
+ resume
45
+ reboot
46
+ reboot-hard
47
+ poweroff
48
+ poweroff-hard
49
+ snapshot-create
50
+ snapshot-revert
51
+ snapshot-delete
52
+ disk-snapshot-create
53
+ disk-snapshot-revert
54
+ disk-snapshot-delete
55
+ ]
56
+
57
+ STATE = {
58
+ 'PENDING' => 0,
59
+ 'DEPLOYING' => 1,
60
+ 'RUNNING' => 2,
61
+ 'UNDEPLOYING' => 3,
62
+ 'WARNING' => 4,
63
+ 'DONE' => 5,
64
+ 'FAILED_UNDEPLOYING' => 6,
65
+ 'FAILED_DEPLOYING' => 7,
66
+ 'SCALING' => 8,
67
+ 'FAILED_SCALING' => 9,
68
+ 'COOLDOWN' => 10
69
+ }
70
+
71
+ STATE_STR = %w[
72
+ PENDING
73
+ DEPLOYING
74
+ RUNNING
75
+ UNDEPLOYING
76
+ WARNING
77
+ DONE
78
+ FAILED_UNDEPLOYING
79
+ FAILED_DEPLOYING
80
+ SCALING
81
+ FAILED_SCALING
82
+ COOLDOWN
83
+ ]
84
+
85
+ RECOVER_DEPLOY_STATES = %w[
86
+ FAILED_DEPLOYING
87
+ DEPLOYING
88
+ PENDING
89
+ ]
90
+
91
+ RECOVER_UNDEPLOY_STATES = %w[
92
+ FAILED_UNDEPLOYING
93
+ UNDEPLOYING
94
+ ]
95
+
96
+ RECOVER_SCALE_STATES = %w[
97
+ FAILED_SCALING
98
+ SCALING
99
+ ]
100
+
101
+ VM_FAILURE_STATES = %w[
102
+ BOOT_FAILURE
103
+ BOOT_MIGRATE_FAILURE
104
+ PROLOG_MIGRATE_FAILURE
105
+ PROLOG_FAILURE
106
+ EPILOG_FAILURE
107
+ EPILOG_STOP_FAILURE
108
+ EPILOG_UNDEPLOY_FAILURE
109
+ PROLOG_MIGRATE_POWEROFF_FAILURE
110
+ PROLOG_MIGRATE_SUSPEND_FAILURE
111
+ PROLOG_MIGRATE_UNKNOWN_FAILURE
112
+ BOOT_UNDEPLOY_FAILURE
113
+ BOOT_STOPPED_FAILURE
114
+ PROLOG_RESUME_FAILURE
115
+ PROLOG_UNDEPLOY_FAILURE
116
+ ]
117
+
118
+ SCALE_WAYS = {
119
+ 'UP' => 0,
120
+ 'DOWN' => 1
121
+ }
122
+
123
+ # List of attributes that can't be changed in update operation
124
+ # cardinality: this is internal information managed by OneFlow server
125
+ # last_vmname: this is internal information managed by OneFlow server
126
+ # nodes: this is internal information managed by OneFlow server
127
+ # parents: this has only sense in deploy operation
128
+ # state: this is internal information managed by OneFlow server
129
+ # vm_template: this will affect scale operation
130
+ IMMUTABLE_ATTRS = %w[
131
+ cardinality
132
+ last_vmname
133
+ nodes
134
+ parents
135
+ state
136
+ vm_template
137
+ ]
138
+
139
+ # VM information to save in document
140
+ VM_INFO = %w[ID UID GID UNAME GNAME NAME]
141
+
142
+ LOG_COMP = 'ROL'
143
+
144
+ def initialize(body, service)
145
+ @body = body
146
+ @service = service
147
+
148
+ @body['cooldown'] = @@default_cooldown if @body['cooldown'].nil?
149
+ @body['nodes'] ||= []
150
+ end
151
+
152
+ def name
153
+ @body['name']
154
+ end
155
+
156
+ # Returns the role state
157
+ # @return [Integer] the role state
158
+ def state
159
+ @body['state'].to_i
160
+ end
161
+
162
+ def can_recover_deploy?
163
+ if state != STATE['PENDING']
164
+ return RECOVER_DEPLOY_STATES.include? STATE_STR[state]
165
+ end
166
+
167
+ parents.each do |parent|
168
+ next unless @service.roles[parent]
169
+
170
+ return false if @service.roles[parent].state != STATE['RUNNING']
171
+ end
172
+
173
+ true
174
+ end
175
+
176
+ def can_recover_undeploy?
177
+ if !RECOVER_UNDEPLOY_STATES.include? STATE_STR[state]
178
+ # TODO, check childs if !empty? check if can be undeployed
179
+ @service.roles.each do |role_name, role|
180
+ next if role_name == name
181
+
182
+ if role.parents.include?(name) &&
183
+ role.state != STATE['DONE']
184
+ return false
185
+ end
186
+ end
187
+ end
188
+
189
+ true
190
+ end
191
+
192
+ def can_recover_scale?
193
+ return false unless RECOVER_SCALE_STATES.include? STATE_STR[state]
194
+
195
+ true
196
+ end
197
+
198
+ # Returns the role parents
199
+ # @return [Array] the role parents
200
+ def parents
201
+ @body['parents'] || []
202
+ end
203
+
204
+ # Returns the role cardinality
205
+ # @return [Integer] the role cardinality
206
+ def cardinality
207
+ @body['cardinality'].to_i
208
+ end
209
+
210
+ # Sets a new cardinality for this role
211
+ # @param [Integer] the new cardinality
212
+ # rubocop:disable Naming/AccessorMethodName
213
+ def set_cardinality(target_cardinality)
214
+ # rubocop:enable Naming/AccessorMethodName
215
+ if target_cardinality > cardinality
216
+ dir = 'up'
217
+ else
218
+ dir = 'down'
219
+ end
220
+
221
+ msg = "Role #{name} scaling #{dir} from #{cardinality} to " \
222
+ "#{target_cardinality} nodes"
223
+
224
+ Log.info LOG_COMP, msg, @service.id
225
+
226
+ @service.log_info(msg)
227
+
228
+ @body['cardinality'] = target_cardinality.to_i
229
+ end
230
+
231
+ # Returns the role max cardinality
232
+ # @return [Integer,nil] the role cardinality or nil if it isn't defined
233
+ def max_cardinality
234
+ max = @body['max_vms']
235
+
236
+ return if max.nil?
237
+
238
+ max.to_i
239
+ end
240
+
241
+ # Returns the role min cardinality
242
+ # @return [Integer,nil] the role cardinality or nil if it isn't defined
243
+ def min_cardinality
244
+ min = @body['min_vms']
245
+
246
+ return if min.nil?
247
+
248
+ min.to_i
249
+ end
250
+
251
+ # Returns the string representation of the service state
252
+ # @return [String] the state string
253
+ def state_str
254
+ STATE_STR[state]
255
+ end
256
+
257
+ # Returns the nodes of the role
258
+ # @return [Array] the nodes
259
+ def nodes
260
+ @body['nodes']
261
+ end
262
+
263
+ def info_nodes(vm_pool)
264
+ ret = []
265
+
266
+ monitoring = vm_pool[:monitoring]
267
+ vm_pool = vm_pool[:vm_pool]
268
+
269
+ @body['nodes'].each do |node|
270
+ id = node['deploy_id']
271
+ vm = vm_pool.retrieve_xmlelements("/VM_POOL/VM[ID=#{id}]")[0]
272
+
273
+ if vm.nil?
274
+ Log.error LOG_COMP,
275
+ "Error getting VM #{id}",
276
+ @service.id
277
+ else
278
+ obj = {}
279
+ obj['deploy_id'] = node['deploy_id']
280
+
281
+ hash = vm.to_hash
282
+ vm_monit = monitoring.select {|v| v['ID'].to_i == id }[0]
283
+
284
+ hash['VM']['MONITORING'] = vm_monit if vm_monit
285
+ obj['vm_info'] = hash
286
+
287
+ ret << obj
288
+ end
289
+ end
290
+
291
+ ret
292
+ end
293
+
294
+ def nodes_ids
295
+ @body['nodes'].map {|node| node['deploy_id'] }
296
+ end
297
+
298
+ def elasticity_policies
299
+ @body['elasticity_policies']
300
+ end
301
+
302
+ def update_elasticity_policies(new_policies)
303
+ @body['elasticity_policies'] = new_policies
304
+ end
305
+
306
+ def scheduled_policies
307
+ @body['scheduled_policies']
308
+ end
309
+
310
+ def update_scheduled_policies(new_policies)
311
+ @body['scheduled_policies'] = new_policies
312
+ end
313
+
314
+ def cooldown
315
+ @body['cooldown']
316
+ end
317
+
318
+ def update_cooldown(new_cooldown)
319
+ @body['cooldown'] = new_cooldown unless new_cooldown.nil?
320
+ end
321
+
322
+ # Sets a new state
323
+ # @param [Integer] the new state
324
+ # @return [true, false] true if the value was changed
325
+ # rubocop:disable Naming/AccessorMethodName
326
+ def set_state(state)
327
+ # rubocop:enable Naming/AccessorMethodName
328
+ if state < 0 || state > STATE_STR.size
329
+ return false
330
+ end
331
+
332
+ @body['state'] = state.to_i
333
+
334
+ if state == STATE['SCALING']
335
+
336
+ elasticity_pol = @body['elasticity_policies']
337
+
338
+ if !elasticity_pol.nil?
339
+ elasticity_pol.each do |policy|
340
+ policy.delete('true_evals')
341
+ end
342
+ end
343
+ end
344
+
345
+ Log.info LOG_COMP,
346
+ "Role #{name} new state: #{STATE_STR[state]}",
347
+ @service.id
348
+
349
+ true
350
+ end
351
+
352
+ def scale_way(way)
353
+ @body['scale_way'] = SCALE_WAYS[way]
354
+ end
355
+
356
+ def clean_scale_way
357
+ @body.delete('scale_way')
358
+ end
359
+
360
+ # Retrieves the VM information for each Node in this Role. If a Node
361
+ # is to be disposed and it is found in DONE, it will be cleaned
362
+ #
363
+ # @return [nil, OpenNebula::Error] nil in case of success, Error
364
+ # otherwise
365
+ def info
366
+ raise 'role.info is not defined'
367
+ end
368
+
369
+ # Deploys all the nodes in this role
370
+ # @return [Array<true, nil>, Array<false, String>] true if all the VMs
371
+ # were created, false and the error reason if there was a problem
372
+ # creating the VMs
373
+ def deploy
374
+ deployed_nodes = []
375
+ n_nodes = cardinality - nodes.size
376
+
377
+ return [deployed_nodes, nil] if n_nodes == 0
378
+
379
+ @body['last_vmname'] ||= 0
380
+
381
+ template_id = @body['vm_template']
382
+ template = OpenNebula::Template.new_with_id(template_id,
383
+ @service.client)
384
+
385
+ if @body['vm_template_contents']
386
+ extra_template = @body['vm_template_contents'].dup
387
+
388
+ # If the extra_template contains APPEND="<attr1>,<attr2>", it
389
+ # will add the attributes that already exist in the template,
390
+ # instead of replacing them.
391
+ append = extra_template
392
+ .match(/^\s*APPEND=\"?(.*?)\"?\s*$/)[1]
393
+ .split(',') rescue nil
394
+
395
+ if append && !append.empty?
396
+ rc = template.info
397
+
398
+ if OpenNebula.is_error?(rc)
399
+ msg = "Role #{name} : Info template #{template_id};" \
400
+ " #{rc.message}"
401
+
402
+ Log.error LOG_COMP, msg, @service.id
403
+ @service.log_error(msg)
404
+
405
+ return [false, 'Error fetching Info to instantiate' \
406
+ " VM Template #{template_id} in Role " \
407
+ "#{name}: #{rc.message}"]
408
+ end
409
+
410
+ et = template.template_like_str('TEMPLATE',
411
+ true,
412
+ append.join('|'))
413
+
414
+ et = et << "\n" << extra_template
415
+
416
+ extra_template = et
417
+ end
418
+ else
419
+ extra_template = ''
420
+ end
421
+
422
+ extra_template << "\nSERVICE_ID = #{@service.id}"
423
+ extra_template << "\nROLE_NAME = \"#{@body['name']}\""
424
+
425
+ n_nodes.times do
426
+ vm_name = @@vm_name_template
427
+ .gsub('$SERVICE_ID', @service.id.to_s)
428
+ .gsub('$SERVICE_NAME', @service.name.to_s)
429
+ .gsub('$ROLE_NAME', name.to_s)
430
+ .gsub('$VM_NUMBER', @body['last_vmname'].to_s)
431
+
432
+ @body['last_vmname'] += 1
433
+
434
+ Log.debug LOG_COMP,
435
+ "Role #{name} : Trying to instantiate " \
436
+ "template #{template_id}, with name #{vm_name}",
437
+ @service.id
438
+
439
+ vm_id = template.instantiate(vm_name, false, extra_template)
440
+
441
+ deployed_nodes << vm_id
442
+
443
+ if OpenNebula.is_error?(vm_id)
444
+ msg = "Role #{name} : Instantiate failed for template " \
445
+ "#{template_id}; #{vm_id.message}"
446
+
447
+ Log.error LOG_COMP, msg, @service.id
448
+ @service.log_error(msg)
449
+
450
+ return [false, 'Error trying to instantiate the VM ' \
451
+ "Template #{template_id} in Role " \
452
+ "#{name}: #{vm_id.message}"]
453
+ end
454
+
455
+ Log.debug LOG_COMP, "Role #{name} : Instantiate success," \
456
+ " VM ID #{vm_id}", @service.id
457
+ node = {
458
+ 'deploy_id' => vm_id
459
+ }
460
+
461
+ vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
462
+ @service.client)
463
+ rc = vm.info
464
+
465
+ if OpenNebula.is_error?(rc)
466
+ node['vm_info'] = nil
467
+ else
468
+ hash_vm = vm.to_hash['VM']
469
+ vm_info = {}
470
+ vm_info['VM'] = hash_vm.select {|v| VM_INFO.include?(v) }
471
+
472
+ node['vm_info'] = vm_info
473
+ end
474
+
475
+ @body['nodes'] << node
476
+ end
477
+
478
+ [deployed_nodes, nil]
479
+ end
480
+
481
+ # Terminate all the nodes in this role
482
+ #
483
+ # @param scale_down [true, false] true to terminate and dispose the
484
+ # number of VMs needed to get down to cardinality nodes
485
+ # @return [Array<true, nil>, Array<false, String>] true if all the VMs
486
+ # were terminated, false and the error reason if there was a problem
487
+ # shutting down the VMs
488
+ def shutdown(recover)
489
+ if nodes.size != cardinality
490
+ n_nodes = nodes.size - cardinality
491
+ else
492
+ n_nodes = nodes.size
493
+ end
494
+
495
+ rc = shutdown_nodes(nodes, n_nodes, recover)
496
+
497
+ unless rc[0]
498
+ return [false, "Error undeploying nodes for role `#{name}`"]
499
+ end
500
+
501
+ [rc[1], nil]
502
+ end
503
+
504
+ # Delete all the nodes in this role
505
+ # @return [Array<true, nil>] All the VMs are deleted, and the return
506
+ # ignored
507
+ def delete
508
+ raise 'role.delete is not defined'
509
+ end
510
+
511
+ # Changes the owner/group of all the nodes in this role
512
+ #
513
+ # @param [Integer] uid the new owner id. Set to -1 to leave the current
514
+ # @param [Integer] gid the new group id. Set to -1 to leave the current
515
+ #
516
+ # @return [Array<true, nil>, Array<false, String>] true if all the VMs
517
+ # were updated, false and the error reason if there was a problem
518
+ # updating the VMs
519
+ def chown(uid, gid)
520
+ nodes.each do |node|
521
+ vm_id = node['deploy_id']
522
+
523
+ Log.debug LOG_COMP,
524
+ "Role #{name} : Chown for VM #{vm_id}",
525
+ @service.id
526
+
527
+ vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
528
+ @service.client)
529
+ rc = vm.chown(uid, gid)
530
+
531
+ if OpenNebula.is_error?(rc)
532
+ msg = "Role #{name} : Chown failed for VM #{vm_id}; " \
533
+ "#{rc.message}"
534
+
535
+ Log.error LOG_COMP, msg, @service.id
536
+ @service.log_error(msg)
537
+
538
+ return [false, rc.message]
539
+ else
540
+ Log.debug LOG_COMP,
541
+ "Role #{name} : Chown success for VM #{vm_id}",
542
+ @service.id
543
+ end
544
+ end
545
+
546
+ [true, nil]
547
+ end
548
+
549
+ # Schedule the given action on all the VMs that belong to the Role
550
+ # @param [String] action one of the available SCHEDULE_ACTIONS
551
+ # @param [Integer] period
552
+ # @param [Integer] vm_per_period
553
+ # @param [String] action arguments
554
+ def batch_action(action, period, vms_per_period, args)
555
+ vms_id = []
556
+ error_msgs = []
557
+ nodes = @body['nodes']
558
+ now = Time.now.to_i
559
+ time_offset = 0
560
+
561
+ # if role is done, return error
562
+ if state == 5
563
+ return OpenNebula::Error.new("Role #{name} is in DONE state")
564
+ end
565
+
566
+ do_offset = (!period.nil? && period.to_i > 0 &&
567
+ !vms_per_period.nil? && vms_per_period.to_i > 0)
568
+
569
+ nodes.each_with_index do |node, index|
570
+ vm_id = node['deploy_id']
571
+ vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
572
+ @service.client)
573
+
574
+ rc = vm.info
575
+
576
+ if OpenNebula.is_error?(rc)
577
+ msg = "Role #{name} : VM #{vm_id} monitorization failed;"\
578
+ " #{rc.message}"
579
+
580
+ error_msgs << msg
581
+
582
+ Log.error LOG_COMP, msg, @service.id
583
+
584
+ @service.log_error(msg)
585
+ else
586
+ ids = vm.retrieve_elements('USER_TEMPLATE/SCHED_ACTION/ID')
587
+
588
+ id = 0
589
+ if !ids.nil? && !ids.empty?
590
+ ids.map! {|e| e.to_i }
591
+ id = ids.max + 1
592
+ end
593
+
594
+ tmp_str = vm.user_template_str
595
+
596
+ if do_offset
597
+ offset = (index / vms_per_period.to_i).floor
598
+ time_offset = offset * period.to_i
599
+ end
600
+
601
+ tmp_str << "\nSCHED_ACTION = ["
602
+ tmp_str << "ID = #{id},"
603
+ tmp_str << "ACTION = #{action},"
604
+ tmp_str << "ARGS = \"#{args}\"," if args
605
+ tmp_str << "TIME = #{now + time_offset}]"
606
+
607
+ rc = vm.update(tmp_str)
608
+ if OpenNebula.is_error?(rc)
609
+ msg = "Role #{name} : VM #{vm_id} error scheduling "\
610
+ "action; #{rc.message}"
611
+
612
+ error_msgs << msg
613
+
614
+ Log.error LOG_COMP, msg, @service.id
615
+
616
+ @service.log_error(msg)
617
+ else
618
+ vms_id << vm.id
619
+ end
620
+ end
621
+ end
622
+
623
+ log_msg = "Action:#{action} scheduled on Role:#{name}"\
624
+ "VMs:#{vms_id.join(',')}"
625
+
626
+ Log.info LOG_COMP, log_msg, @service.id
627
+
628
+ return [true, log_msg] if error_msgs.empty?
629
+
630
+ error_msgs << log_msg
631
+
632
+ [false, error_msgs.join('\n')]
633
+ end
634
+
635
+ # Returns true if the VM state is failure
636
+ # @param [Integer] vm_state VM state
637
+ # @param [Integer] lcm_state VM LCM state
638
+ # @return [true,false] True if the lcm state is one of *_FAILURE
639
+ def self.vm_failure?(vm_state, lcm_state)
640
+ vm_state_str = VirtualMachine::VM_STATE[vm_state.to_i]
641
+ lcm_state_str = VirtualMachine::LCM_STATE[lcm_state.to_i]
642
+
643
+ if vm_state_str == 'ACTIVE' &&
644
+ VM_FAILURE_STATES.include?(lcm_state_str)
645
+ return true
646
+ end
647
+
648
+ false
649
+ end
650
+
651
+ # rubocop:disable Style/ClassVars
652
+ def self.init_default_cooldown(default_cooldown)
653
+ @@default_cooldown = default_cooldown
654
+ end
655
+
656
+ def self.init_default_shutdown(shutdown_action)
657
+ @@default_shutdown = shutdown_action
658
+ end
659
+
660
+ def self.init_force_deletion(force_deletion)
661
+ @@force_deletion = force_deletion
662
+ end
663
+
664
+ def self.init_default_vm_name_template(vm_name_template)
665
+ @@vm_name_template = vm_name_template
666
+ end
667
+ # rubocop:enable Style/ClassVars
668
+
669
+ ########################################################################
670
+ # Scalability
671
+ ########################################################################
672
+
673
+ # Updates the role
674
+ # @param [Hash] template
675
+ # @return [nil, OpenNebula::Error] nil in case of success, Error
676
+ # otherwise
677
+ def update(template)
678
+ force = template['force'] == true
679
+ new_cardinality = template['cardinality']
680
+
681
+ return if new_cardinality.nil?
682
+
683
+ new_cardinality = new_cardinality.to_i
684
+
685
+ if !force
686
+ if new_cardinality < min_cardinality.to_i
687
+ return OpenNebula::Error.new(
688
+ "Minimum cardinality is #{min_cardinality}"
689
+ )
690
+
691
+ elsif !max_cardinality.nil? &&
692
+ new_cardinality > max_cardinality.to_i
693
+ return OpenNebula::Error.new(
694
+ "Maximum cardinality is #{max_cardinality}"
695
+ )
696
+
697
+ end
698
+ end
699
+
700
+ set_cardinality(new_cardinality)
701
+
702
+ nil
703
+ end
704
+
705
+ # Check that changes values are correct
706
+ #
707
+ # @param template_json [String] New template
708
+ #
709
+ # @return [Boolean, String] True, nil if everything is correct
710
+ # False, attr if attr was changed
711
+ def check_new_template(template)
712
+ IMMUTABLE_ATTRS.each do |attr|
713
+ next if template[attr] == @body[attr]
714
+
715
+ return [false, "role/#{attr}"]
716
+ end
717
+
718
+ [true, nil]
719
+ end
720
+
721
+ ########################################################################
722
+ # Recover
723
+ ########################################################################
724
+
725
+ def recover_deploy(report)
726
+ nodes = @body['nodes']
727
+ deployed_nodes = []
728
+
729
+ nodes.each do |node|
730
+ vm_id = node['deploy_id']
731
+
732
+ vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
733
+ @service.client)
734
+
735
+ rc = vm.info
736
+
737
+ if OpenNebula.is_error?(rc)
738
+ msg = "Role #{name} : Retry failed for VM "\
739
+ "#{vm_id}; #{rc.message}"
740
+ Log.error LOG_COMP, msg, @service.id
741
+
742
+ next true
743
+ end
744
+
745
+ vm_state = vm.state
746
+ lcm_state = vm.lcm_state
747
+
748
+ # ACTIVE/RUNNING
749
+ next false if vm_state == 3 && lcm_state == 3 && !report
750
+
751
+ next true if vm_state == '6' # Delete DONE nodes
752
+
753
+ if Role.vm_failure?(vm_state, lcm_state)
754
+ rc = vm.recover(2)
755
+
756
+ if OpenNebula.is_error?(rc)
757
+ msg = "Role #{name} : Retry failed for VM "\
758
+ "#{vm_id}; #{rc.message}"
759
+
760
+ Log.error LOG_COMP, msg, @service.id
761
+ @service.log_error(msg)
762
+ else
763
+ deployed_nodes << vm_id
764
+ end
765
+ else
766
+ vm.resume
767
+
768
+ deployed_nodes << vm_id
769
+ end
770
+ end
771
+
772
+ rc = deploy
773
+
774
+ deployed_nodes.concat(rc[0]) if rc[1].nil?
775
+
776
+ deployed_nodes
777
+ end
778
+
779
+ def recover_undeploy
780
+ undeployed_nodes = []
781
+
782
+ rc = shutdown(true)
783
+
784
+ undeployed_nodes.concat(rc[0]) if rc[1].nil?
785
+
786
+ undeployed_nodes
787
+ end
788
+
789
+ # def recover_warning
790
+ # end
791
+
792
+ def recover_scale(report)
793
+ rc = nil
794
+
795
+ if @body['scale_way'] == SCALE_WAYS['UP']
796
+ rc = [recover_deploy(report), true]
797
+ elsif @body['scale_way'] == SCALE_WAYS['DOWN']
798
+ rc = [recover_undeploy, false]
799
+ end
800
+
801
+ rc
802
+ end
803
+
804
+ # Returns a positive, 0, or negative number of nodes to adjust,
805
+ # according to the elasticity and scheduled policies
806
+ # @return [Array<Integer>] positive, 0, or negative number of nodes to
807
+ # adjust, plus the cooldown period duration
808
+ def scale?(vm_pool)
809
+ elasticity_pol = @body['elasticity_policies']
810
+ scheduled_pol = @body['scheduled_policies']
811
+
812
+ elasticity_pol ||= []
813
+ scheduled_pol ||= []
814
+
815
+ scheduled_pol.each do |policy|
816
+ diff, cooldown_duration = scale_time?(policy)
817
+
818
+ return [diff, cooldown_duration] if diff != 0
819
+ end
820
+
821
+ elasticity_pol.each do |policy|
822
+ diff, cooldown_duration = scale_attributes?(policy, vm_pool)
823
+
824
+ next if diff == 0
825
+
826
+ cooldown_duration = @body['cooldown'] if cooldown_duration.nil?
827
+ cooldown_duration = @@default_cooldown if cooldown_duration.nil?
828
+
829
+ return [diff, cooldown_duration]
830
+ end
831
+
832
+ # Implicit rule that scales up to maintain the min_cardinality, with
833
+ # no cooldown period
834
+ if cardinality < min_cardinality.to_i
835
+ return [min_cardinality.to_i - cardinality, 0]
836
+ end
837
+
838
+ [0, 0]
839
+ end
840
+
841
+ private
842
+
843
+ # Shuts down all the given nodes
844
+ # @param scale_down [true,false] True to set the 'disposed' node flag
845
+ def shutdown_nodes(nodes, n_nodes, recover)
846
+ success = true
847
+ undeployed_nodes = []
848
+
849
+ action = @body['shutdown_action']
850
+
851
+ if action.nil?
852
+ action = @service.shutdown_action
853
+ end
854
+
855
+ if action.nil?
856
+ action = @@default_shutdown
857
+ end
858
+
859
+ nodes[0..n_nodes - 1].each do |node|
860
+ vm_id = node['deploy_id']
861
+
862
+ Log.debug(LOG_COMP,
863
+ "Role #{name} : Terminating VM #{vm_id}",
864
+ @service.id)
865
+
866
+ vm = OpenNebula::VirtualMachine.new_with_id(vm_id,
867
+ @service.client)
868
+
869
+ vm_state = nil
870
+ lcm_state = nil
871
+
872
+ if recover
873
+ vm.info
874
+
875
+ vm_state = vm.state
876
+ lcm_state = vm.lcm_state
877
+ end
878
+
879
+ if recover && Role.vm_failure?(vm_state, lcm_state)
880
+ rc = vm.recover(2)
881
+ elsif action == 'terminate-hard'
882
+ rc = vm.terminate(true)
883
+ else
884
+ rc = vm.terminate
885
+ end
886
+
887
+ if OpenNebula.is_error?(rc)
888
+ msg = "Role #{name} : Terminate failed for VM #{vm_id}, " \
889
+ "will perform a Delete; #{rc.message}"
890
+
891
+ Log.error LOG_COMP, msg, @service.id
892
+ @service.log_error(msg)
893
+
894
+ if action != 'terminate-hard'
895
+ rc = vm.terminate(true)
896
+ end
897
+
898
+ if OpenNebula.is_error?(rc)
899
+ rc = vm.delete
900
+ end
901
+
902
+ if OpenNebula.is_error?(rc)
903
+ msg = "Role #{name} : Delete failed for VM #{vm_id}; " \
904
+ "#{rc.message}"
905
+
906
+ Log.error LOG_COMP, msg, @service.id
907
+ @service.log_error(msg)
908
+
909
+ success = false
910
+ else
911
+ Log.debug(LOG_COMP,
912
+ "Role #{name} : Delete success for VM " \
913
+ "#{vm_id}",
914
+ @service.id)
915
+
916
+ undeployed_nodes << vm_id
917
+ end
918
+ else
919
+ Log.debug(LOG_COMP,
920
+ "Role #{name}: Terminate success for VM #{vm_id}",
921
+ @service.id)
922
+ undeployed_nodes << vm_id
923
+ end
924
+ end
925
+
926
+ [success, undeployed_nodes]
927
+ end
928
+
929
+ def vm_failure?(node)
930
+ if node && node['vm_info']
931
+ return Role.vm_failure?(node['vm_info']['VM']['STATE'],
932
+ node['vm_info']['VM']['LCM_STATE'])
933
+ end
934
+
935
+ false
936
+ end
937
+
938
+ # Returns a positive, 0, or negative number of nodes to adjust,
939
+ # according to a SCHEDULED type policy
940
+ # @param [Hash] A SCHEDULED type policy
941
+ # @return [Integer] positive, 0, or negative number of nodes to adjust
942
+ def scale_time?(elasticity_pol)
943
+ now = Time.now.to_i
944
+ last_eval = elasticity_pol['last_eval'].to_i
945
+
946
+ elasticity_pol['last_eval'] = now
947
+
948
+ # If this is the first time this is evaluated, ignore it.
949
+ # We don't want to execute actions planned in the past when the
950
+ # server starts.
951
+
952
+ return 0 if last_eval == 0
953
+
954
+ start_time = elasticity_pol['start_time']
955
+ target_vms = elasticity_pol['adjust']
956
+
957
+ # TODO: error msg
958
+ return 0 if target_vms.nil?
959
+
960
+ if !(start_time.nil? || start_time.empty?)
961
+ begin
962
+ if !start_time.match(/^\d+$/)
963
+ start_time = Time.parse(start_time).to_i
964
+ else
965
+ start_time = start_time.to_i
966
+ end
967
+ rescue ArgumentError
968
+ # TODO: error msg
969
+ return 0
970
+ end
971
+ else
972
+ recurrence = elasticity_pol['recurrence']
973
+
974
+ # TODO: error msg
975
+ return 0 if recurrence.nil? || recurrence.empty?
976
+
977
+ begin
978
+ cron_parser = CronParser.new(recurrence)
979
+
980
+ # This returns the next planned time, starting from the last
981
+ # step
982
+ start_time = cron_parser.next(Time.at(last_eval)).to_i
983
+ rescue StandardError
984
+ # TODO: error msg bad format
985
+ return 0
986
+ end
987
+ end
988
+
989
+ # Only actions planned between last step and this one are triggered
990
+ if start_time > last_eval && start_time <= now
991
+ Log.debug LOG_COMP,
992
+ "Role #{name} : scheduled scalability for " \
993
+ "#{Time.at(start_time)} triggered", @service.id
994
+
995
+ new_cardinality = calculate_new_cardinality(elasticity_pol)
996
+
997
+ return [new_cardinality - cardinality,
998
+ elasticity_pol['cooldown']]
999
+ end
1000
+
1001
+ [0, elasticity_pol['cooldown']]
1002
+ end
1003
+
1004
+ # Returns a positive, 0, or negative number of nodes to adjust,
1005
+ # according to a policy based on attributes
1006
+ # @param [Hash] A policy based on attributes
1007
+ # @return [Array<Integer>] positive, 0, or negative number of nodes to
1008
+ # adjust, plus the cooldown period duration
1009
+ def scale_attributes?(elasticity_pol, vm_pool)
1010
+ now = Time.now.to_i
1011
+
1012
+ # TODO: enforce true_up_evals type in ServiceTemplate::ROLE_SCHEMA ?
1013
+
1014
+ period_duration = elasticity_pol['period'].to_i
1015
+ period_number = elasticity_pol['period_number'].to_i
1016
+ last_eval = elasticity_pol['last_eval'].to_i
1017
+ true_evals = elasticity_pol['true_evals'].to_i
1018
+ expression = elasticity_pol['expression']
1019
+
1020
+ if !last_eval.nil? && now < (last_eval + period_duration)
1021
+ return [0, 0]
1022
+ end
1023
+
1024
+ elasticity_pol['last_eval'] = now
1025
+
1026
+ new_cardinality = cardinality
1027
+ new_evals = 0
1028
+
1029
+ exp_value, exp_st = scale_rule(expression, vm_pool)
1030
+
1031
+ if exp_value
1032
+ new_evals = true_evals + 1
1033
+ new_evals = period_number if new_evals > period_number
1034
+
1035
+ if new_evals >= period_number
1036
+ Log.debug LOG_COMP,
1037
+ "Role #{name} : elasticy policy #{exp_st} "\
1038
+ 'triggered', @service.id
1039
+
1040
+ new_cardinality = calculate_new_cardinality(elasticity_pol)
1041
+ end
1042
+ end
1043
+
1044
+ elasticity_pol['true_evals'] = new_evals
1045
+ elasticity_pol['expression_evaluated'] = exp_st
1046
+
1047
+ [new_cardinality - cardinality, elasticity_pol['cooldown']]
1048
+ end
1049
+
1050
+ # Returns true if the scalability rule is triggered
1051
+ # @return true if the scalability rule is triggered
1052
+ def scale_rule(elas_expr, vm_pool)
1053
+ parser = ElasticityGrammarParser.new
1054
+
1055
+ if elas_expr.nil? || elas_expr.empty?
1056
+ return false
1057
+ end
1058
+
1059
+ treetop = parser.parse(elas_expr)
1060
+
1061
+ if treetop.nil?
1062
+ return [false,
1063
+ "Parse error. '#{elas_expr}': #{parser.failure_reason}"]
1064
+ end
1065
+
1066
+ val, st = treetop.result(self, vm_pool)
1067
+
1068
+ [val, st]
1069
+ end
1070
+
1071
+ def calculate_new_cardinality(elasticity_pol)
1072
+ type = elasticity_pol['type']
1073
+ adjust = elasticity_pol['adjust'].to_i
1074
+
1075
+ # Min is a hard limit, if the current cardinality + adjustment does
1076
+ # not reach it, the difference is added
1077
+
1078
+ max = [cardinality, max_cardinality.to_i].max()
1079
+ # min = [cardinality(), min_cardinality.to_i].min()
1080
+ min = min_cardinality.to_i
1081
+
1082
+ case type.upcase
1083
+ when 'CHANGE'
1084
+ new_cardinality = cardinality + adjust
1085
+ when 'PERCENTAGE_CHANGE'
1086
+ min_adjust_step = elasticity_pol['min_adjust_step'].to_i
1087
+
1088
+ change = cardinality * adjust / 100.0
1089
+
1090
+ change > 0 ? sign = 1 : sign = -1
1091
+ change = change.abs
1092
+
1093
+ if change < 1
1094
+ change = 1
1095
+ else
1096
+ change = change.to_i
1097
+ end
1098
+
1099
+ change = sign * [change, min_adjust_step].max
1100
+
1101
+ new_cardinality = cardinality + change
1102
+
1103
+ when 'CARDINALITY'
1104
+ new_cardinality = adjust
1105
+ else
1106
+ # TODO: error message
1107
+ return cardinality
1108
+ end
1109
+
1110
+ # The cardinality can be forced to be outside the min,max
1111
+ # range. If that is the case, the scale up/down will not
1112
+ # move further outside the range. It will move towards the
1113
+ # range with the adjustement set, instead of jumping the
1114
+ # difference
1115
+ if adjust > 0
1116
+ new_cardinality = max if new_cardinality > max
1117
+ elsif adjust < 0
1118
+ new_cardinality = min if new_cardinality < min
1119
+ end
1120
+
1121
+ new_cardinality
1122
+ end
1123
+
1124
+ end
1125
+
1126
+ end