sparkle_formation 0.2.0 → 0.2.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +4 -0
- data/Gemfile.lock +2 -4
- data/docs/README.md +177 -0
- data/docs/anatomy.md +203 -0
- data/docs/building-blocks.md +275 -0
- data/docs/examples/cloudformation/components/base.rb +25 -0
- data/docs/examples/cloudformation/dynamics/elb.rb +23 -0
- data/docs/examples/cloudformation/templates/website.rb +30 -0
- data/docs/examples/template_json/website.json +88 -0
- data/docs/examples/website.rb +74 -0
- data/docs/functions.md +41 -0
- data/docs/properties.md +32 -0
- data/docs/provisioning.md +82 -0
- data/docs/resource-reference.md +49 -0
- data/lib/sparkle_formation/sparkle_formation.rb +1 -1
- data/lib/sparkle_formation/translation/heat.rb +84 -1
- data/lib/sparkle_formation/translation/rackspace.rb +355 -11
- data/lib/sparkle_formation/translation.rb +109 -12
- data/lib/sparkle_formation/version.rb +2 -3
- metadata +45 -39
@@ -3,23 +3,215 @@ class SparkleFormation
|
|
3
3
|
# Translation for Rackspace
|
4
4
|
class Rackspace < Heat
|
5
5
|
|
6
|
+
# Custom mapping for network interfaces
|
7
|
+
#
|
8
|
+
# @param value [Object] original property value
|
9
|
+
# @param args [Hash]
|
10
|
+
# @option args [Hash] :new_resource
|
11
|
+
# @option args [Hash] :new_properties
|
12
|
+
# @option args [Hash] :original_resource
|
13
|
+
# @return [Array<String, Object>] name and new value
|
14
|
+
def rackspace_server_network_interfaces_mapping(value, args={})
|
15
|
+
networks = [value].flatten.map do |item|
|
16
|
+
{:uuid => item['NetworkInterfaceId']}
|
17
|
+
end
|
18
|
+
['networks', networks]
|
19
|
+
end
|
20
|
+
|
21
|
+
# Translate override to provide finalization of resources
|
22
|
+
#
|
23
|
+
# @return [TrueClass]
|
24
|
+
def translate!
|
25
|
+
super
|
26
|
+
complete_launch_config_lb_setups
|
27
|
+
true
|
28
|
+
end
|
29
|
+
|
30
|
+
# Update any launch configuration which define load balancers to
|
31
|
+
# ensure they are attached to the correct resources when
|
32
|
+
# multiple listeners (ports) have been defined resulting in
|
33
|
+
# multiple isolated LB resources
|
34
|
+
def complete_launch_config_lb_setups
|
35
|
+
translated['resources'].find_all do |resource_name, resource|
|
36
|
+
resource['type'] == 'Rackspace::AutoScale::Group'
|
37
|
+
end.each do |name, value|
|
38
|
+
if(lbs = value['properties'].delete('load_balancers'))
|
39
|
+
lbs.each do |lb_ref|
|
40
|
+
lb_name = resource_name(lb_ref)
|
41
|
+
lb_resource = translated['resources'][lb_name]
|
42
|
+
vip_resources = translated['resources'].find_all do |k, v|
|
43
|
+
k.match(/#{lb_name}Vip\d+/) && v['type'] == 'Rackspace::Cloud::LoadBalancer'
|
44
|
+
end
|
45
|
+
value['properties']['launchConfiguration']['args'].tap do |lnch_config|
|
46
|
+
lb_instance = {
|
47
|
+
'loadBalancerId' => lb_ref
|
48
|
+
}
|
49
|
+
# @note search for a port defined within parameters
|
50
|
+
# that matches naming of LB ID for when they are
|
51
|
+
# passed in rather than defined within the template.
|
52
|
+
# Be sure to document this in user docs since it's
|
53
|
+
# weird but needed
|
54
|
+
if(lb_resource)
|
55
|
+
lb_instance['port'] = lb_resource['cache_instance_port']
|
56
|
+
else
|
57
|
+
key = parameters.keys.find_all do |k|
|
58
|
+
if(k.end_with?('Port'))
|
59
|
+
lb_ref.values.first.start_with?(k.sub('Instance', '').sub(/Port$/, ''))
|
60
|
+
end
|
61
|
+
end
|
62
|
+
key = key.detect do |k|
|
63
|
+
k.downcase.include?('instance')
|
64
|
+
end || key.first
|
65
|
+
if(key)
|
66
|
+
lb_instance['port'] = {'get_param' => key}
|
67
|
+
else
|
68
|
+
raise "Failed to translate load balancer configuartion. No port found! (#{lb_ref})"
|
69
|
+
end
|
70
|
+
end
|
71
|
+
lnch_config['loadBalancers'] = [lb_instance]
|
72
|
+
vip_resources.each do |vip_name, vip_resource|
|
73
|
+
lnch_config['loadBalancers'].push(
|
74
|
+
'loadBalancerId' => {
|
75
|
+
'Ref' => vip_name
|
76
|
+
},
|
77
|
+
'port' => vip_resource['cache_instance_port']
|
78
|
+
)
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
translated['resources'].find_all do |resource_name, resource|
|
85
|
+
resource['type'] == 'Rackspace::Cloud::LoadBalancer' &&
|
86
|
+
!resource['properties']['nodes'].empty?
|
87
|
+
end.each do |resource_name, resource|
|
88
|
+
resource['properties']['nodes'].map! do |node_ref|
|
89
|
+
{
|
90
|
+
'addresses' => [
|
91
|
+
{
|
92
|
+
'get_attr' => [
|
93
|
+
resource_name(node_ref),
|
94
|
+
'accessIPv4'
|
95
|
+
]
|
96
|
+
}
|
97
|
+
],
|
98
|
+
'port' => resource['cache_instance_port'],
|
99
|
+
'condition' => 'ENABLED'
|
100
|
+
}
|
101
|
+
end
|
102
|
+
end
|
103
|
+
translated['resources'].values.find_all do |resource|
|
104
|
+
resource['type'] == 'Rackspace::Cloud::LoadBalancer'
|
105
|
+
end.each do |resource|
|
106
|
+
resource.delete('cache_instance_port')
|
107
|
+
end
|
108
|
+
true
|
109
|
+
end
|
110
|
+
|
6
111
|
# Rackspace translation mapping
|
7
112
|
MAP = Heat::MAP
|
8
113
|
MAP[:resources]['AWS::EC2::Instance'][:name] = 'Rackspace::Cloud::Server'
|
114
|
+
MAP[:resources]['AWS::EC2::Instance'][:properties]['NetworkInterfaces'] = :rackspace_server_network_interfaces_mapping
|
9
115
|
MAP[:resources]['AWS::AutoScaling::AutoScalingGroup'].tap do |asg|
|
10
116
|
asg[:name] = 'Rackspace::AutoScale::Group'
|
11
117
|
asg[:finalizer] = :rackspace_asg_finalizer
|
12
118
|
asg[:properties].tap do |props|
|
13
119
|
props['MaxSize'] = 'maxEntities'
|
14
120
|
props['MinSize'] = 'minEntities'
|
121
|
+
props['LoadBalancerNames'] = 'load_balancers'
|
15
122
|
props['LaunchConfigurationName'] = :delete
|
16
123
|
end
|
17
124
|
end
|
125
|
+
MAP[:resources]['AWS::EC2::Subnet'] = {}.tap do |subnet|
|
126
|
+
subnet[:name] = 'Rackspace::Cloud::Network'
|
127
|
+
subnet[:finalizer] = :rackspace_subnet_finalizer
|
128
|
+
subnet[:properties] = {
|
129
|
+
'CidrBlock' => 'cidr'
|
130
|
+
}
|
131
|
+
end
|
132
|
+
MAP[:resources]['AWS::ElasticLoadBalancing::LoadBalancer'] = {
|
133
|
+
:name => 'Rackspace::Cloud::LoadBalancer',
|
134
|
+
:finalizer => :rackspace_lb_finalizer,
|
135
|
+
:properties => {
|
136
|
+
'LoadBalancerName' => 'name',
|
137
|
+
'Instances' => 'nodes',
|
138
|
+
'Listeners' => 'listeners',
|
139
|
+
'HealthCheck' => 'health_check'
|
140
|
+
}
|
141
|
+
}
|
18
142
|
|
143
|
+
# Attribute map for autoscaling group server properties
|
19
144
|
RACKSPACE_ASG_SRV_MAP = {
|
20
145
|
'imageRef' => 'image',
|
21
|
-
'flavorRef' => 'flavor'
|
146
|
+
'flavorRef' => 'flavor',
|
147
|
+
'networks' => 'networks'
|
22
148
|
}
|
149
|
+
|
150
|
+
# Finalizer for the rackspace load balancer resource. This
|
151
|
+
# finalizer may generate new resources if the load balancer has
|
152
|
+
# multiple listeners defined (rackspace implementation defines
|
153
|
+
# multiple isolated resources sharing a common virtual IP)
|
154
|
+
#
|
155
|
+
#
|
156
|
+
# @param resource_name [String]
|
157
|
+
# @param new_resource [Hash]
|
158
|
+
# @param old_resource [Hash]
|
159
|
+
# @return [Object]
|
160
|
+
#
|
161
|
+
# @todo make virtualIp creation allow servnet/multiple?
|
162
|
+
def rackspace_lb_finalizer(resource_name, new_resource, old_resource)
|
163
|
+
listeners = new_resource['Properties'].delete('listeners') || []
|
164
|
+
source_listener = listeners.shift
|
165
|
+
if(source_listener)
|
166
|
+
new_resource['Properties']['port'] = source_listener['LoadBalancerPort']
|
167
|
+
new_resource['Properties']['protocol'] = source_listener['Protocol']
|
168
|
+
new_resource['cache_instance_port'] = source_listener['InstancePort']
|
169
|
+
end
|
170
|
+
new_resource['Properties']['virtualIps'] = ['type' => 'PUBLIC', 'ipVersion' => 'IPV4']
|
171
|
+
new_resource['Properties']['nodes'] = [] unless new_resource['Properties']['nodes']
|
172
|
+
health_check = new_resource['Properties'].delete('health_check')
|
173
|
+
health_check = nil
|
174
|
+
if(health_check)
|
175
|
+
new_resource['Properties']['healthCheck'] = {}.tap do |check|
|
176
|
+
check['timeout'] = health_check['Timeout']
|
177
|
+
check['attemptsBeforeDeactivation'] = health_check['UnhealthyThreshold']
|
178
|
+
check['delay'] = health_check['Interval']
|
179
|
+
check_target = dereference_processor(health_check['Target'])
|
180
|
+
check_args = check_target.split(':')
|
181
|
+
check_type = check_args.shift
|
182
|
+
if(check_type == 'HTTP' || check_type == 'HTTPS')
|
183
|
+
check['type'] = check_type
|
184
|
+
check['path'] = check_args.last
|
185
|
+
else
|
186
|
+
check['type'] = 'TCP_STREAM'
|
187
|
+
end
|
188
|
+
end
|
189
|
+
end
|
190
|
+
unless(listeners.empty?)
|
191
|
+
listeners.each_with_index do |listener, idx|
|
192
|
+
port = listener['LoadBalancerPort']
|
193
|
+
proto = listener['Protocol']
|
194
|
+
vip_name = "#{resource_name}Vip#{idx}"
|
195
|
+
vip_resource = MultiJson.load(MultiJson.dump(new_resource))
|
196
|
+
vip_resource['Properties']['name'] = vip_name
|
197
|
+
vip_resource['Properties']['protocol'] = proto
|
198
|
+
vip_resource['Properties']['port'] = port
|
199
|
+
vip_resource['Properties']['virtualIps'] = [
|
200
|
+
'id' => {
|
201
|
+
'get_attr' => [
|
202
|
+
resource_name,
|
203
|
+
'virtualIps',
|
204
|
+
0,
|
205
|
+
'id'
|
206
|
+
]
|
207
|
+
}
|
208
|
+
]
|
209
|
+
vip_resource['cache_instance_port'] = listener['InstancePort']
|
210
|
+
translated['Resources'][vip_name] = vip_resource
|
211
|
+
end
|
212
|
+
end
|
213
|
+
end
|
214
|
+
|
23
215
|
# Finalizer for the rackspace autoscaling group resource.
|
24
216
|
# Extracts metadata and maps into customized personality to
|
25
217
|
# provide bootstraping some what similar to heat bootstrap.
|
@@ -30,10 +222,12 @@ class SparkleFormation
|
|
30
222
|
# @return [Object]
|
31
223
|
def rackspace_asg_finalizer(resource_name, new_resource, old_resource)
|
32
224
|
new_resource['Properties'] = {}.tap do |properties|
|
225
|
+
if(lbs = new_resource['Properties'].delete('load_balancers'))
|
226
|
+
properties['load_balancers'] = lbs
|
227
|
+
end
|
33
228
|
properties['groupConfiguration'] = new_resource['Properties'].merge('name' => resource_name)
|
34
|
-
|
35
229
|
properties['launchConfiguration'] = {}.tap do |config|
|
36
|
-
launch_config_name =
|
230
|
+
launch_config_name = resource_name(old_resource['Properties']['LaunchConfigurationName'])
|
37
231
|
config_resource = original['Resources'][launch_config_name]
|
38
232
|
config_resource['Type'] = 'AWS::EC2::Instance'
|
39
233
|
translated = resource_translation(launch_config_name, config_resource)
|
@@ -51,6 +245,17 @@ class SparkleFormation
|
|
51
245
|
end
|
52
246
|
end
|
53
247
|
|
248
|
+
# Finalizer for the rackspace network resource. Uses
|
249
|
+
# resource name as label identifier.
|
250
|
+
#
|
251
|
+
# @param resource_name [String]
|
252
|
+
# @param new_resource [Hash]
|
253
|
+
# @param old_resource [Hash]
|
254
|
+
# @return [Object]
|
255
|
+
def rackspace_subnet_finalizer(resource_name, new_resource, old_resource)
|
256
|
+
new_resource['Properties']['label'] = resource_name
|
257
|
+
end
|
258
|
+
|
54
259
|
# Custom mapping for server user data. Removes data formatting
|
55
260
|
# and configuration drive attributes as they are not used.
|
56
261
|
#
|
@@ -68,7 +273,10 @@ class SparkleFormation
|
|
68
273
|
end
|
69
274
|
|
70
275
|
# Max chunk size for server personality files
|
71
|
-
|
276
|
+
DEFAULT_CHUNK_SIZE = 950
|
277
|
+
# Max number of files to create (by default this is n-1 since we
|
278
|
+
# require one of the files for injecting into cloud init)
|
279
|
+
DEFAULT_NUMBER_OF_CHUNKS = 4
|
72
280
|
|
73
281
|
# Build server personality structure
|
74
282
|
#
|
@@ -76,21 +284,157 @@ class SparkleFormation
|
|
76
284
|
# @return [Hash] personality hash
|
77
285
|
# @todo update chunking to use join!
|
78
286
|
def build_personality(resource)
|
79
|
-
|
287
|
+
max_chunk_size = options.fetch(
|
288
|
+
:serialization_chunk_size,
|
289
|
+
DEFAULT_CHUNK_SIZE
|
290
|
+
).to_i
|
291
|
+
num_personality_files = options.fetch(
|
292
|
+
:serialization_number_of_chunks,
|
293
|
+
DEFAULT_NUMBER_OF_CHUNKS
|
294
|
+
)
|
80
295
|
init = resource['Metadata']['AWS::CloudFormation::Init']
|
81
|
-
init = dereference_processor(init)
|
82
296
|
content = MultiJson.dump('AWS::CloudFormation::Init' => init)
|
297
|
+
# Break out our content to extract items required during stack
|
298
|
+
# execution (template functions, refs, and the like)
|
299
|
+
raw_result = content.scan(/(?=(\{\s*"(Ref|Fn::[A-Za-z]+)"((?:[^{}]++|\{\g<3>\})++)\}))/).map(&:first)
|
300
|
+
result = [].tap do |filtered|
|
301
|
+
until(raw_result.empty?)
|
302
|
+
item = raw_result.shift
|
303
|
+
filtered.push(item)
|
304
|
+
check_item = nil
|
305
|
+
until(raw_result.empty? || !item.include?(check_item = raw_result.shift))
|
306
|
+
check_item = nil
|
307
|
+
end
|
308
|
+
if(check_item && !item.include?(check_item))
|
309
|
+
raw_result.unshift(check_item)
|
310
|
+
end
|
311
|
+
end
|
312
|
+
end
|
313
|
+
|
314
|
+
# Cycle through the result and format entries where required
|
315
|
+
objects = result.map do |string|
|
316
|
+
# Format for load and make newlines happy
|
317
|
+
string = string.strip.split(
|
318
|
+
/\n(?=(?:[^"]*"[^"]*")*[^"]*\Z)/
|
319
|
+
).join.gsub('\n', '\\\\\n')
|
320
|
+
# Check for nested join and fix quotes
|
321
|
+
if(string.match(/^[^A-Za-z]+Fn::Join/))
|
322
|
+
string.gsub!("\\\"", "\\\\\\\\\\\"") # HAHAHA ohai thar hairy yak!
|
323
|
+
end
|
324
|
+
MultiJson.load(string)
|
325
|
+
end
|
326
|
+
|
327
|
+
# Find and replace any found objects
|
328
|
+
new_content = content.dup
|
329
|
+
result_set = []
|
330
|
+
result.each_with_index do |str, i|
|
331
|
+
cut_index = new_content.index(str)
|
332
|
+
if(cut_index)
|
333
|
+
result_set << new_content.slice!(0, cut_index)
|
334
|
+
result_set << objects[i]
|
335
|
+
new_content.slice!(0, str.size)
|
336
|
+
else
|
337
|
+
logger.warn "Failed to match: #{str}"
|
338
|
+
end
|
339
|
+
end
|
340
|
+
|
341
|
+
# The result set is the final formatted content that
|
342
|
+
# now needs to be split and assigned to files
|
343
|
+
result_set << new_content unless new_content.empty?
|
344
|
+
leftovers = ''
|
345
|
+
|
346
|
+
# Determine optimal chuck sizing and check if viable
|
347
|
+
calculated_chunk_size = (content.size.to_f / num_personality_files).ceil
|
348
|
+
if(calculated_chunk_size > max_chunk_size)
|
349
|
+
logger.error 'ERROR: Unable to split personality files within defined bounds!'
|
350
|
+
logger.error " Maximum chunk size: #{max_chunk_size.inspect}"
|
351
|
+
logger.error " Maximum personality files: #{num_personality_files.inspect}"
|
352
|
+
logger.error " Calculated chunk size: #{calculated_chunk_size}"
|
353
|
+
logger.error "-> Content: #{content.inspect}"
|
354
|
+
raise ArgumentError.new 'Unable to split personality files within defined bounds'
|
355
|
+
end
|
356
|
+
|
357
|
+
# Do the split!
|
358
|
+
chunk_size = calculated_chunk_size
|
359
|
+
file_index = 0
|
83
360
|
parts = {}.tap do |files|
|
84
|
-
(
|
85
|
-
|
86
|
-
|
87
|
-
|
361
|
+
until(leftovers.empty? && result_set.empty?)
|
362
|
+
file_content = []
|
363
|
+
unless(leftovers.empty?)
|
364
|
+
result_set.unshift leftovers
|
365
|
+
leftovers = ''
|
366
|
+
end
|
367
|
+
item = nil
|
368
|
+
# @todo need better way to determine length of objects since
|
369
|
+
# function structures can severely bloat actual length
|
370
|
+
until((cur_len = file_content.map(&:to_s).map(&:size).inject(&:+).to_i) >= chunk_size || result_set.empty?)
|
371
|
+
to_cut = chunk_size - cur_len
|
372
|
+
item = result_set.shift
|
373
|
+
case item
|
374
|
+
when String
|
375
|
+
file_content << item.slice!(0, to_cut)
|
376
|
+
else
|
377
|
+
file_content << item
|
378
|
+
end
|
379
|
+
end
|
380
|
+
leftovers = item if item.is_a?(String) && !item.empty?
|
381
|
+
unless(file_content.empty?)
|
382
|
+
if(file_content.all?{|o|o.is_a?(String)})
|
383
|
+
files["/etc/sprkl/#{file_index}.cfg"] = file_content.join
|
384
|
+
else
|
385
|
+
file_content.map! do |cont|
|
386
|
+
if(cont.is_a?(Hash))
|
387
|
+
["\"", cont, "\""]
|
388
|
+
else
|
389
|
+
cont
|
390
|
+
end
|
391
|
+
end
|
392
|
+
files["/etc/sprkl/#{file_index}.cfg"] = {
|
393
|
+
"Fn::Join" => [
|
394
|
+
"",
|
395
|
+
file_content.flatten
|
396
|
+
]
|
397
|
+
}
|
398
|
+
end
|
399
|
+
end
|
400
|
+
file_index += 1
|
88
401
|
end
|
89
402
|
end
|
90
|
-
parts
|
403
|
+
if(parts.size > num_personality_files)
|
404
|
+
logger.warn "Failed to split files within defined range! (Max files: #{num_personality_files} Actual files: #{parts.size})"
|
405
|
+
logger.warn 'Appending to last file and hoping for the best!'
|
406
|
+
parts = parts.to_a
|
407
|
+
extras = parts.slice!(4, parts.length)
|
408
|
+
tail_name, tail_contents = parts.pop
|
409
|
+
parts = Hash[parts]
|
410
|
+
parts[tail_name] = {
|
411
|
+
"Fn::Join" => [
|
412
|
+
'',
|
413
|
+
extras.map(&:last).unshift(tail_contents)
|
414
|
+
]
|
415
|
+
}
|
416
|
+
end
|
417
|
+
parts['/etc/cloud/cloud.cfg.d/99_s.cfg'] = RUNNER
|
91
418
|
parts
|
92
419
|
end
|
93
420
|
|
421
|
+
FN_MAPPING = {
|
422
|
+
'Fn::GetAtt' => 'get_attr',
|
423
|
+
# 'Fn::Join' => 'list_join' # @todo why is this not working?
|
424
|
+
}
|
425
|
+
|
426
|
+
FN_ATT_MAPPING = {
|
427
|
+
'AWS::EC2::Instance' => {
|
428
|
+
'PrivateDnsName' => 'accessIPv4', # @todo - need srv net name for access via nets
|
429
|
+
'PublicDnsName' => 'accessIPv4',
|
430
|
+
'PrivateIp' => 'accessIPv4', # @todo - need srv net name for access via nets
|
431
|
+
'PublicIp' => 'accessIPv4'
|
432
|
+
},
|
433
|
+
'AWS::ElasticLoadBalancing::LoadBalancer' => {
|
434
|
+
'DNSName' => 'PublicIp'
|
435
|
+
}
|
436
|
+
}
|
437
|
+
|
94
438
|
# Metadata init runner
|
95
439
|
RUNNER = <<-EOR
|
96
440
|
#cloud-config
|
@@ -20,8 +20,8 @@ class SparkleFormation
|
|
20
20
|
attr_reader :template
|
21
21
|
# @return [Logger] current logger
|
22
22
|
attr_reader :logger
|
23
|
-
# @return [Hash]
|
24
|
-
attr_reader :
|
23
|
+
# @return [Hash] extra options (generally used by translation implementations)
|
24
|
+
attr_reader :options
|
25
25
|
|
26
26
|
# Create new instance
|
27
27
|
#
|
@@ -29,12 +29,38 @@ class SparkleFormation
|
|
29
29
|
# @param args [Hash]
|
30
30
|
# @option args [Logger] :logger custom logger
|
31
31
|
# @option args [Hash] :parameters parameters for stack creation
|
32
|
+
# @option args [Hash] :options options for translation
|
32
33
|
def initialize(template_hash, args={})
|
33
34
|
@original = template_hash.dup
|
34
35
|
@template = MultiJson.load(MultiJson.dump(template_hash)) ## LOL: Lazy deep dup
|
35
36
|
@translated = {}
|
36
37
|
@logger = args.fetch(:logger, Logger.new($stdout))
|
37
|
-
@parameters = args
|
38
|
+
@parameters = args[:parameters] || {}
|
39
|
+
@options = args[:options] || {}
|
40
|
+
end
|
41
|
+
|
42
|
+
# @return [Hash] parameters for template
|
43
|
+
def parameters
|
44
|
+
Hash[
|
45
|
+
@original.fetch('Parameters', {}).map do |k,v|
|
46
|
+
[k, v.fetch('Default', '')]
|
47
|
+
end
|
48
|
+
].merge(@parameters)
|
49
|
+
end
|
50
|
+
|
51
|
+
# @return [Hash] mappings for template
|
52
|
+
def mappings
|
53
|
+
@original.fetch('Mappings', {})
|
54
|
+
end
|
55
|
+
|
56
|
+
# @return [Hash] resources for template
|
57
|
+
def resources
|
58
|
+
@original.fetch('Resources', {})
|
59
|
+
end
|
60
|
+
|
61
|
+
# @return [Hash] outputs for template
|
62
|
+
def outputs
|
63
|
+
@original.fetch('Outputs', {})
|
38
64
|
end
|
39
65
|
|
40
66
|
# @return [Hash] resource mapping
|
@@ -160,7 +186,7 @@ class SparkleFormation
|
|
160
186
|
def dereference(obj)
|
161
187
|
result = obj
|
162
188
|
if(obj.is_a?(Hash))
|
163
|
-
name = obj['Ref']
|
189
|
+
name = obj['Ref'] || obj['get_param']
|
164
190
|
if(name)
|
165
191
|
p_val = parameters[name.to_s]
|
166
192
|
if(p_val)
|
@@ -171,22 +197,53 @@ class SparkleFormation
|
|
171
197
|
result
|
172
198
|
end
|
173
199
|
|
200
|
+
# Provide name of resource
|
201
|
+
#
|
202
|
+
# @param obj [Object]
|
203
|
+
# @return [String] name
|
204
|
+
def resource_name(obj)
|
205
|
+
case obj
|
206
|
+
when Hash
|
207
|
+
obj['Ref'] || obj['get_resource']
|
208
|
+
else
|
209
|
+
obj.to_s
|
210
|
+
end
|
211
|
+
end
|
212
|
+
|
174
213
|
# Process object through dereferencer. This will dereference names
|
175
214
|
# and apply functions if possible.
|
176
215
|
#
|
177
216
|
# @param obj [Object]
|
178
217
|
# @return [Object]
|
179
|
-
def dereference_processor(obj)
|
180
|
-
|
218
|
+
def dereference_processor(obj, funcs=[])
|
219
|
+
case obj
|
220
|
+
when Array
|
221
|
+
obj = obj.map{|v| dereference_processor(v, funcs)}
|
222
|
+
when Hash
|
223
|
+
new_hash = {}
|
224
|
+
obj.each do |k,v|
|
225
|
+
new_hash[k] = dereference_processor(v, funcs)
|
226
|
+
end
|
227
|
+
obj = apply_function(new_hash, funcs)
|
228
|
+
end
|
229
|
+
obj
|
230
|
+
end
|
231
|
+
|
232
|
+
# Process object through name mapping
|
233
|
+
#
|
234
|
+
# @param obj [Object]
|
235
|
+
# @param names [Array<Symbol>] enable renaming (:ref, :fn)
|
236
|
+
# @return [Object]
|
237
|
+
def rename_processor(obj, names=[])
|
181
238
|
case obj
|
182
239
|
when Array
|
183
|
-
obj = obj.map{|v|
|
240
|
+
obj = obj.map{|v| rename_processor(v, names)}
|
184
241
|
when Hash
|
185
242
|
new_hash = {}
|
186
243
|
obj.each do |k,v|
|
187
|
-
new_hash[k] =
|
244
|
+
new_hash[k] = rename_processor(v, names)
|
188
245
|
end
|
189
|
-
obj =
|
246
|
+
obj = apply_rename(new_hash, names)
|
190
247
|
end
|
191
248
|
obj
|
192
249
|
end
|
@@ -194,13 +251,47 @@ class SparkleFormation
|
|
194
251
|
# Apply function if possible
|
195
252
|
#
|
196
253
|
# @param hash [Hash]
|
254
|
+
# @param names [Array<Symbol>] enable renaming (:ref, :fn)
|
255
|
+
# @return [Hash]
|
256
|
+
# @note remapping references two constants:
|
257
|
+
# REF_MAPPING for Ref maps
|
258
|
+
# FN_MAPPING for Fn maps
|
259
|
+
def apply_rename(hash, names=[])
|
260
|
+
k,v = hash.first
|
261
|
+
if(hash.size == 1)
|
262
|
+
if(k.start_with?('Fn::'))
|
263
|
+
{self.class.const_get(:FN_MAPPING).fetch(k, k) => v}
|
264
|
+
elsif(k == 'Ref')
|
265
|
+
if(resources.has_key?(v))
|
266
|
+
{'get_resource' => v}
|
267
|
+
else
|
268
|
+
{'get_param' => self.class.const_get(:REF_MAPPING).fetch(v, v)}
|
269
|
+
end
|
270
|
+
else
|
271
|
+
hash
|
272
|
+
end
|
273
|
+
else
|
274
|
+
hash
|
275
|
+
end
|
276
|
+
end
|
277
|
+
|
278
|
+
# Apply function if possible
|
279
|
+
#
|
280
|
+
# @param hash [Hash]
|
281
|
+
# @param funcs [Array] allowed functions
|
197
282
|
# @return [Hash]
|
198
|
-
|
199
|
-
|
200
|
-
|
283
|
+
# @note also allows 'Ref' within funcs to provide mapping
|
284
|
+
# replacements using the REF_MAPPING constant
|
285
|
+
def apply_function(hash, funcs=[])
|
286
|
+
k,v = hash.first
|
287
|
+
if(hash.size == 1 && (k.start_with?('Fn') || k == 'Ref') && (funcs.empty? || funcs.include?(k)))
|
201
288
|
case k
|
202
289
|
when 'Fn::Join'
|
203
290
|
v.last.join(v.first)
|
291
|
+
when 'Fn::FindInMap'
|
292
|
+
mappings[v[0]][dereference(v[1])][v[2]]
|
293
|
+
when 'Ref'
|
294
|
+
{'Ref' => self.class.const_get(:REF_MAPPING).fetch(v, v)}
|
204
295
|
else
|
205
296
|
hash
|
206
297
|
end
|
@@ -209,5 +300,11 @@ class SparkleFormation
|
|
209
300
|
end
|
210
301
|
end
|
211
302
|
|
303
|
+
# @return [Hash] mapping for pseudo-parameters
|
304
|
+
REF_MAPPING = {}
|
305
|
+
|
306
|
+
# @return [Hash] mapping for intrinsic functions
|
307
|
+
FN_MAPPING = {}
|
308
|
+
|
212
309
|
end
|
213
310
|
end
|