knife-stackbuilder 0.5.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,424 @@
1
+ # Copyright (c) 2014 Mevan Samaratunga
2
+
3
+ include StackBuilder::Common::Helpers
4
+
5
+ module StackBuilder::Stack
6
+
7
+ class NodeTask
8
+
9
+ attr_reader :name
10
+
11
+ attr_accessor :scale
12
+ attr_accessor :prev_scale
13
+ attr_accessor :sync
14
+
15
+ attr_accessor :deleted
16
+
17
+ attr_reader :counter
18
+ attr_reader :parent_nodes
19
+ attr_reader :child_nodes
20
+
21
+ attr_reader :resource_sync
22
+ attr_reader :manager
23
+
24
+ SYNC_NONE = 0 # All node instances processed asynchronously
25
+ SYNC_FIRST = 1 # First node instance is processed synchronously and the rest asynchronously
26
+ SYNC_ALL = 2 # All node instances are processed synchronously
27
+
28
+ def initialize(manager, nodes, node_config, id)
29
+
30
+ @logger = StackBuilder::Common::Config.logger
31
+
32
+ @manager = manager
33
+
34
+ @id = id
35
+ @nodes = nodes
36
+ @parent_nodes = [ ]
37
+ @child_nodes = [ ]
38
+ @counter = 0
39
+
40
+ @name = node_config['node']
41
+ @attributes = (node_config.has_key?('attributes') ? node_config['attributes'] : { })
42
+
43
+ case node_config['sync']
44
+ when "first"
45
+ @sync = SYNC_FIRST
46
+ when "all"
47
+ @sync = SYNC_ALL
48
+ else
49
+ @sync = SYNC_NONE
50
+ end
51
+
52
+ if node_config.has_key?('targets')
53
+
54
+ @logger.warn("Ignoring 'scale' attribute for '#{@name}' as that node has targets.") \
55
+ if node_config.has_key?("scale")
56
+
57
+ @scale = 0
58
+ else
59
+ current_scale = manager.get_scale
60
+ if current_scale==0
61
+ @scale = (node_config.has_key?("scale") ? node_config["scale"] : 1)
62
+ else
63
+ @scale = current_scale
64
+ end
65
+
66
+ raise ArgumentError, "The scale for node \"#{@name}\" must be greater than 0." if @scale < 1
67
+ end
68
+ @prev_scale = @scale
69
+
70
+ @targets = [ ]
71
+
72
+ @node_mutex = Mutex.new
73
+ @resource_sync = [ ]
74
+
75
+ @deleted = false
76
+ end
77
+
78
+ def add_dependency(node_name, is_target = false)
79
+
80
+ node = @nodes[node_name]
81
+
82
+ @targets << node if is_target
83
+
84
+ node.parent_nodes << self unless node.parent_nodes.include?(self)
85
+ self.child_nodes << node unless self.child_nodes.include?(node)
86
+ end
87
+
88
+ def process_attribute_dependencies
89
+
90
+ @attributes.each_value do |v|
91
+
92
+ if v =~ /^nodes\[.*\]$/
93
+
94
+ lookup_keys = v.split(/[\[\]]/).reject { |l| l == "nodes" || l.empty? }
95
+ add_dependency(lookup_keys.shift)
96
+ end
97
+ end
98
+ end
99
+
100
+ def init_dependency_count(count = nil)
101
+
102
+ if count.nil?
103
+ @counter = child_nodes.size
104
+ else
105
+ @counter += count
106
+ end
107
+
108
+ @counter
109
+ end
110
+
111
+ def dec_dependency_count
112
+ @node_mutex.synchronize {
113
+ @counter -= 1
114
+ return @counter
115
+ }
116
+ end
117
+
118
+ def prepare
119
+
120
+ threads = [ ]
121
+
122
+ if @targets.empty?
123
+
124
+ # You need to prepare nodes only if this node task
125
+ # is the target. i.e. no referenced targets
126
+
127
+ current_scale = @manager.get_scale
128
+
129
+ @resource_sync.size.step(current_scale - 1) do |i|
130
+ @resource_sync[i] ||= StackBuilder::Common::Semaphore.new
131
+ @resource_sync[i].signal
132
+ end
133
+
134
+ if current_scale>@scale
135
+
136
+ @logger.debug("Scaling #{self} from #{current_scale} down to #{@scale}")
137
+
138
+ # Scale Down
139
+
140
+ delete_events = Set.new([ "stop", "uninstall" ])
141
+ @scale.step(current_scale - 1).to_a.each do |i|
142
+
143
+ resource_sync = @resource_sync[i]
144
+ resource_sync.wait
145
+
146
+ threads << Thread.new {
147
+
148
+ begin
149
+ @logger.debug("Deleting #{self} #{i}.")
150
+ $stdout.printf("Deleting node resource '%s[%d]'.\n",
151
+ @name, i) unless @logger.debug?
152
+
153
+ @manager.process(i, delete_events, parse_attributes(@attributes, i))
154
+ @manager.delete(i)
155
+
156
+ rescue Exception => msg
157
+
158
+ puts("Fatal Error: #{msg}")
159
+ @logger.debug(msg.backtrace.join("\n\t"))
160
+
161
+ raise StackBuilder::Common::StackDeleteError,
162
+ "Deleting node resource '#{name}[{i}]' " +
163
+ "terminated with an error: #{msg}"
164
+ ensure
165
+ resource_sync.signal
166
+ end
167
+ }
168
+ end
169
+
170
+ (current_scale - 1).downto(@scale) do |i|
171
+ @resource_sync.delete_at(i)
172
+ end
173
+ end
174
+
175
+ if @scale>current_scale && !@deleted
176
+
177
+ @logger.debug("Scaling #{self} from #{current_scale} up to #{@scale}")
178
+
179
+ # Scale up
180
+
181
+ current_scale.step(@scale - 1) do |i|
182
+
183
+ sync = StackBuilder::Common::Semaphore.new
184
+ @resource_sync[i] = sync
185
+
186
+ threads << Thread.new {
187
+
188
+ begin
189
+ @logger.debug("Creating #{self} #{i}.")
190
+ $stdout.printf( "Creating node resource '%s[%d]'.\n",
191
+ @name, i) unless @logger.debug?
192
+
193
+ @manager.create(i)
194
+
195
+ rescue Exception => msg
196
+
197
+ puts("Fatal Error: #{msg}")
198
+ @logger.debug(msg.backtrace.join("\n\t"))
199
+
200
+ raise StackBuilder::Common::StackCreateError,
201
+ "Creating node resource '#{name}[#{i}]' " +
202
+ "terminated with an error: #{msg}"
203
+ ensure
204
+ @resource_sync[i].signal
205
+ end
206
+ }
207
+ end
208
+ end
209
+
210
+ @prev_scale = current_scale
211
+ @manager.set_scale(@scale)
212
+ end
213
+
214
+ threads
215
+ end
216
+
217
+ def orchestrate(events)
218
+
219
+ threads = [ ]
220
+
221
+ scale = (@deleted ? @manager.get_scale : @scale)
222
+ if scale > 0
223
+
224
+ if @sync == "first"
225
+ @manager.process(scale, events, self.parse_attributes(@attributes, 0))
226
+ scale -= 1
227
+ end
228
+
229
+ if @sync == "all"
230
+ scale.times do |i|
231
+ @manager.process(i, events, self.parse_attributes(@attributes, i))
232
+ end
233
+ else
234
+ scale.times do |i|
235
+ spawn_processing(i, events, threads)
236
+ end
237
+ end
238
+
239
+ elsif !@targets.empty?
240
+
241
+ @targets.each do |t|
242
+ t.scale.times do |i|
243
+ spawn_processing(i, events, threads, t)
244
+ end
245
+ end
246
+ end
247
+
248
+ threads.each { |t| t.join }
249
+
250
+ executable_parents = [ ]
251
+ parent_nodes.each do |p|
252
+ executable_parents << p if p.dec_dependency_count == 0
253
+ end
254
+ executable_parents
255
+ end
256
+
257
+ def to_s
258
+ p = "Parent_Nodes[#{@parent_nodes.collect { |n| "#{n.name}:#{n.counter}" }.join(", ")}]"
259
+ c = "Child_Nodes[#{@child_nodes.collect { |n| n.name }.join(", ")}]"
260
+
261
+ "(#{@name}, #{p}, #{c}, " +
262
+ "Sync[#{@sync==SYNC_NONE ? "async" : @sync==SYNC_FIRST ? "first" : "alls"}], " +
263
+ "Scale[#{manager.get_scale}])"
264
+ end
265
+
266
+ private
267
+
268
+ def spawn_processing(i, events, threads, target = nil)
269
+
270
+ if target.nil?
271
+ target_manager = nil
272
+ prev_scale = @prev_scale
273
+ else
274
+ target_manager = target.manager
275
+ prev_scale = target.prev_scale
276
+ end
277
+
278
+ if target_manager.nil?
279
+ $stdout.printf( "Processing node '%s[%d]'.\n", @name, i)
280
+ else
281
+ $stdout.printf( "Processing target node '%s[%d]' from %s.\n", target_manager.name, i, @name)
282
+ end
283
+
284
+ # If no scale up occurs then run only the given events.
285
+ orchestrate_events = events.clone
286
+ # If new VMs have been added to the cluster to scale up then add default events for the new VM.
287
+ orchestrate_events = orchestrate_events.merge([ "create", "install", "configure" ]) if i >= prev_scale
288
+
289
+ @logger.debug("Events for node '#{name}' instance #{i} build: " +
290
+ "#{orchestrate_events.collect { |e| e } .join(", ")}") if @logger.debug?
291
+
292
+ if @sync==SYNC_ALL || (i==0 && @sync==SYNC_FIRST)
293
+ @manager.process(i, orchestrate_events, parse_attributes(@attributes, i), target_manager)
294
+ else
295
+ @resource_sync[i].wait if target.nil?
296
+ threads << Thread.new {
297
+
298
+ begin
299
+ @manager.process(i, orchestrate_events, parse_attributes(@attributes, i), target_manager)
300
+
301
+ rescue Exception => msg
302
+
303
+ puts("Fatal Error: #{msg}")
304
+ @logger.debug(msg.backtrace.join("\n\t"))
305
+
306
+ raise StackBuilder::Common::StackOrchestrateError,
307
+ "Processing node resource '#{name}[#{i}]' " +
308
+ "terminated with an error: #{msg}"
309
+ ensure
310
+ @resource_sync[i].signal if target.nil?
311
+ end
312
+ }
313
+ end
314
+ end
315
+
316
+ def parse_attributes(attributes, index)
317
+
318
+ results = { }
319
+ attributes.each_pair do |k, v|
320
+
321
+ @logger.debug("Evaluating #{k} = #{v}")
322
+
323
+ if v.is_a?(Hash)
324
+ results[k] = parse_attributes(v, index)
325
+
326
+ elsif v.is_a?(Array)
327
+
328
+ results[k] = [ ]
329
+ v.each do |aval|
330
+ results[k] << parse_attributes( { "#" => aval }, index)["#"]
331
+ end
332
+
333
+ elsif v =~ /^nodes\[.*\](.size)?$/
334
+
335
+ lookup_keys = v.split(/[\[\]]/).reject { |l| l == "nodes" || l.empty? }
336
+
337
+ l = lookup_keys.shift
338
+ node = @nodes[l]
339
+ unless node.nil?
340
+
341
+ node_attributes = node.manager.node_attributes
342
+ unless node_attributes.nil? || node_attributes.empty?
343
+
344
+ indexes = [ ]
345
+ values = [ ]
346
+
347
+ l = lookup_keys.shift
348
+ case l
349
+ when ".size"
350
+ values << node.scale
351
+ when "*"
352
+ indexes = (0..node.scale-1).to_a
353
+ when /\d+/
354
+ indexes << l.to_i
355
+ else
356
+ indexes << 0
357
+ end
358
+
359
+ indexes.each do |i|
360
+ v = node_attributes[i]
361
+ lookup_keys.each do |j|
362
+ v = v[j]
363
+ break if v.nil?
364
+ end
365
+ values << v
366
+ end
367
+
368
+ results[k] = (l == "*" ? values : values[0])
369
+ end
370
+ end
371
+
372
+ elsif v.is_a?(String)
373
+ v = v.split(/(\#){|}/)
374
+
375
+ if v.size == 1
376
+ results[k] = v[0]
377
+ else
378
+ result = ""
379
+
380
+ is_var = false
381
+ v.each do |s|
382
+
383
+ if is_var
384
+ is_var = false
385
+ sstr = (s == "index" ? index.to_s : parse_attributes( { "#" => s }, index)["#"])
386
+ result += sstr unless sstr.nil?
387
+ next
388
+ end
389
+
390
+ if s == "#"
391
+ is_var = true
392
+ next
393
+ end
394
+
395
+ result += s
396
+ end
397
+
398
+ if result.start_with?('<<!')
399
+
400
+ s = result[3, result.length-3]
401
+ @logger.debug("Evaluating the result of: #{s}")
402
+ results[k] = eval(s)
403
+ else
404
+ results[k] = result
405
+ end
406
+ end
407
+ else
408
+ results[k] = v
409
+ end
410
+
411
+ @logger.debug("Evaluated #{k} = #{results[k]}")
412
+ end
413
+
414
+ results
415
+
416
+ rescue Exception => msg
417
+
418
+ @logger.debug("Fatal Error: #{msg} : #{msg.backtrace.join("\n\t")}")
419
+ raise msg
420
+ end
421
+
422
+ end
423
+
424
+ end
@@ -0,0 +1,224 @@
1
+ # Copyright (c) 2014 Mevan Samaratunga
2
+
3
+ include StackBuilder::Common::Helpers
4
+
5
+ module StackBuilder::Stack
6
+
7
+ class Stack
8
+
9
+ attr_reader :id
10
+ attr_reader :name
11
+ attr_reader :nodes
12
+
13
+ def initialize(provider, stack_file, id = nil, overrides = nil)
14
+
15
+ StackBuilder::Common::Config.set_silent
16
+ @logger = StackBuilder::Common::Config.logger
17
+
18
+ raise InvalidArgs, "Node provider is not derived from
19
+ StackBuilder::Stack::NodeProvider." unless provider.is_a?(NodeProvider)
20
+
21
+ @provider = provider
22
+ env_vars = provider.get_env_vars
23
+
24
+ stack = StackBuilder::Common.load_yaml(stack_file, env_vars)
25
+ @logger.debug("Initializing stack definition:\n #{stack.to_yaml}")
26
+
27
+ overrides = JSON.load(File.new(overrides, 'r')) unless overrides.nil? || !overrides.end_with?('.json')
28
+ merge_maps(stack, overrides) unless overrides.nil?
29
+
30
+ if id.nil?
31
+ @id = SecureRandom.uuid.gsub(/-/, '')
32
+ @provider.set_stack(stack, @id)
33
+ else
34
+ @id = id
35
+ @provider.set_stack(stack, @id)
36
+ end
37
+
38
+ @name = stack["name"]
39
+ @nodes = { }
40
+
41
+ if stack.has_key?("stack") && stack["stack"].is_a?(Array)
42
+
43
+ stack["stack"].each do |n|
44
+
45
+ raise ArgumentError, "Node does not have a 'node' attribute " +
46
+ "that identifies it: #{n}" unless n.has_key?("node")
47
+
48
+ node_id = n["node"]
49
+ raise ArgumentError, "Node identified by \"#{node_id}\" " +
50
+ "already exists." if @nodes.has_key? (node_id)
51
+
52
+ n["attributes"] = { } if n["attributes"].nil?
53
+ merge_maps(n["attributes"], stack["attributes"]) unless stack["attributes"].nil?
54
+
55
+ node_manager = @provider.get_node_manager(n)
56
+ raise InvalidArgs, "Node task is of an invalid type. It is not derived " +
57
+ "from StackBuilder::Stack::Node." unless node_manager.is_a?(NodeManager)
58
+
59
+ @nodes[node_id] = NodeTask.new(node_manager, @nodes, n, id)
60
+ end
61
+
62
+ # Associate dependencies
63
+ stack["stack"].each do |n|
64
+
65
+ node_task = @nodes[n["node"]]
66
+
67
+ if n.has_key?("depends_on") && n["depends_on"].is_a?(Array)
68
+
69
+ n["depends_on"].each do |d|
70
+
71
+ raise ArgumentError, "Dependency node \"#{d}\" " +
72
+ "is not defined." unless @nodes.has_key?(d)
73
+
74
+ node_task.add_dependency(d)
75
+ end
76
+ end
77
+
78
+ if n.has_key?("targets") && n["targets"].is_a?(Array)
79
+
80
+ n["targets"].each do |d|
81
+
82
+ raise ArgumentError, "Target node \"#{d}\" " +
83
+ "is not defined." unless @nodes.has_key?(d)
84
+
85
+ node_task.add_dependency(d, true)
86
+ end
87
+ end
88
+
89
+ node_task.process_attribute_dependencies
90
+ end
91
+
92
+ else
93
+ raise ArgumentError, "System needs to have at least one node defined."
94
+ end
95
+ end
96
+
97
+ def orchestrate(events = nil, name = nil, scale = nil)
98
+
99
+ events = Set.new([ "configure" ]) if events.nil?
100
+
101
+ unless name.nil?
102
+ node = @nodes[name]
103
+ raise StackBuilder::Common::StackBuilderError, "Invalid node name \"#{name}'\"." if node.nil?
104
+
105
+ unless scale.nil?
106
+ raise ArgumentError, "The scale for node \"#{@name}\" must be greater than 0." if scale < 1
107
+ node.scale = scale
108
+ end
109
+ end
110
+
111
+ prep_threads = [ ]
112
+ execution_list = [ ]
113
+
114
+ if name.nil?
115
+
116
+ @nodes.each_value do |n|
117
+ execution_list << n if n.init_dependency_count == 0
118
+ prep_threads += n.prepare
119
+ end
120
+
121
+ task_count = @nodes.size
122
+ else
123
+ # Only process nodes that depend on 'name' and their dependencies
124
+
125
+ def add_parent_task(node, prep_threads, nodes_visited)
126
+
127
+ prep_threads += node.prepare
128
+ nodes_visited << node.name
129
+
130
+ node.init_dependency_count(1)
131
+
132
+ node.parent_nodes.each do |n|
133
+ add_parent_task(n, prep_threads, nodes_visited)
134
+ end
135
+ end
136
+
137
+ node = @nodes[name]
138
+ nodes_visited = Set.new([ node.name ])
139
+
140
+ execution_list << node
141
+ prep_threads += node.prepare
142
+
143
+ node.parent_nodes.each do |n|
144
+ add_parent_task(n, prep_threads, nodes_visited)
145
+ end
146
+
147
+ task_count = nodes_visited.size
148
+ end
149
+
150
+ execution_count = 0
151
+ terminate = false
152
+
153
+ while !terminate && !execution_list.empty? do
154
+
155
+ mutex = Mutex.new
156
+ new_execution_list = [ ]
157
+
158
+ exec_threads = [ ]
159
+ execution_list.each do |n|
160
+ exec_threads << Thread.new {
161
+ begin
162
+ executable_parents = n.orchestrate(events)
163
+
164
+ mutex.synchronize {
165
+ new_execution_list |= executable_parents
166
+ execution_count += 1
167
+ }
168
+ rescue Exception => msg
169
+ @logger.error("Processing node '#{n}' terminated with an exception: #{msg}")
170
+ @logger.info(msg.backtrace.join("\n\t"))
171
+ terminate = true
172
+ end
173
+ }
174
+ end
175
+ exec_threads.each { |t| t.join }
176
+
177
+ execution_list = new_execution_list
178
+ end
179
+
180
+ prep_threads.each { |t| t.join }
181
+
182
+ @nodes.each_value do |n|
183
+ n.prev_scale = n.scale
184
+ end
185
+
186
+ raise StackBuilder::Common::StackBuilderError, "Processing of stack nodes " +
187
+ "did not complete because of errors." if execution_count < task_count
188
+ end
189
+
190
+ def scale(name, scale)
191
+ self.orchestrate(nil, name, scale)
192
+ end
193
+
194
+ def destroy
195
+
196
+ @nodes.values.each { |n| n.deleted = true }
197
+
198
+ begin
199
+ destroy_events = Set.new([ "stop", "uninstall" ])
200
+ self.orchestrate(destroy_events)
201
+ rescue Exception => msg
202
+ @logger.warn("An error was encountered attempting to do an orderly tear down of the system: #{msg}")
203
+ @logger.info("All remaining nodes will be destroyed forcefully.")
204
+ end
205
+
206
+ threads = [ ]
207
+
208
+ @nodes.values.each do |n|
209
+ (n.manager.get_scale - 1).downto(0) do |i|
210
+ threads << Thread.new {
211
+
212
+ @logger.debug("Deleted #{n} #{i}.")
213
+ $stdout.printf("Deleting node \"%s\" #%d.\n", n.name, i) unless @logger.debug?
214
+
215
+ n.manager.delete(i)
216
+ }
217
+ end
218
+ end
219
+
220
+ threads.each { |t| t.join }
221
+ end
222
+ end
223
+
224
+ end
@@ -0,0 +1,8 @@
1
+ # Copyright (c) 2014 Mevan Samaratunga
2
+
3
+ module Knife
4
+ module StackBuilder
5
+ VERSION = "0.5.2"
6
+ MAJOR, MINOR, TINY = VERSION.split('.')
7
+ end
8
+ end