sparkle_formation 3.0.30 → 3.0.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +3 -0
  3. data/lib/sparkle_formation.rb +23 -23
  4. data/lib/sparkle_formation/aws.rb +1 -1
  5. data/lib/sparkle_formation/composition.rb +7 -7
  6. data/lib/sparkle_formation/error.rb +2 -2
  7. data/lib/sparkle_formation/function_struct.rb +22 -22
  8. data/lib/sparkle_formation/provider.rb +7 -7
  9. data/lib/sparkle_formation/provider/aws.rb +28 -28
  10. data/lib/sparkle_formation/provider/azure.rb +7 -7
  11. data/lib/sparkle_formation/provider/google.rb +16 -16
  12. data/lib/sparkle_formation/provider/heat.rb +6 -6
  13. data/lib/sparkle_formation/provider/terraform.rb +7 -7
  14. data/lib/sparkle_formation/resources.rb +18 -18
  15. data/lib/sparkle_formation/resources/aws.rb +216 -126
  16. data/lib/sparkle_formation/resources/aws_resources.json +3463 -1601
  17. data/lib/sparkle_formation/resources/azure.rb +6 -6
  18. data/lib/sparkle_formation/resources/google.rb +7 -7
  19. data/lib/sparkle_formation/resources/heat.rb +2 -2
  20. data/lib/sparkle_formation/resources/rackspace.rb +2 -2
  21. data/lib/sparkle_formation/resources/terraform.rb +6 -6
  22. data/lib/sparkle_formation/sparkle.rb +32 -32
  23. data/lib/sparkle_formation/sparkle_attribute.rb +10 -10
  24. data/lib/sparkle_formation/sparkle_attribute/aws.rb +30 -30
  25. data/lib/sparkle_formation/sparkle_attribute/azure.rb +39 -39
  26. data/lib/sparkle_formation/sparkle_attribute/google.rb +19 -19
  27. data/lib/sparkle_formation/sparkle_attribute/heat.rb +16 -16
  28. data/lib/sparkle_formation/sparkle_attribute/rackspace.rb +1 -1
  29. data/lib/sparkle_formation/sparkle_attribute/terraform.rb +41 -41
  30. data/lib/sparkle_formation/sparkle_collection.rb +4 -4
  31. data/lib/sparkle_formation/sparkle_collection/rainbow.rb +3 -3
  32. data/lib/sparkle_formation/sparkle_formation.rb +31 -31
  33. data/lib/sparkle_formation/sparkle_struct.rb +5 -5
  34. data/lib/sparkle_formation/translation.rb +32 -32
  35. data/lib/sparkle_formation/translation/heat.rb +126 -126
  36. data/lib/sparkle_formation/translation/rackspace.rb +118 -118
  37. data/lib/sparkle_formation/utils.rb +5 -5
  38. data/lib/sparkle_formation/version.rb +1 -1
  39. data/sparkle_formation.gemspec +1 -1
  40. metadata +7 -7
@@ -1,4 +1,4 @@
1
- require 'sparkle_formation'
1
+ require "sparkle_formation"
2
2
 
3
3
  class SparkleFormation
4
4
  module Provider
@@ -7,7 +7,7 @@ class SparkleFormation
7
7
 
8
8
  # @return [String] Type string for Azure Resource Manager stack resource
9
9
  def stack_resource_type
10
- 'Microsoft.Resources/deployments'
10
+ "Microsoft.Resources/deployments"
11
11
  end
12
12
 
13
13
  # Generate policy for stack
@@ -24,7 +24,7 @@ class SparkleFormation
24
24
  # @return [AttributeStruct]
25
25
  def set_compiled_state(compiled)
26
26
  super
27
- compiled.outputs.compile_state.type 'String'
27
+ compiled.outputs.compile_state.type "String"
28
28
  compiled
29
29
  end
30
30
 
@@ -112,9 +112,9 @@ class SparkleFormation
112
112
  end
113
113
  end
114
114
  raise ArgumentError.new "Failed to detect available bubbling path for output `#{output_name}`. " <<
115
- 'This may be due to a circular dependency! ' <<
116
- "(Output Path: #{outputs[output_name].root_path.map(&:name).join(' > ')} " <<
117
- "Requester Path: #{root_path.map(&:name).join(' > ')})"
115
+ "This may be due to a circular dependency! " <<
116
+ "(Output Path: #{outputs[output_name].root_path.map(&:name).join(" > ")} " <<
117
+ "Requester Path: #{root_path.map(&:name).join(" > ")})"
118
118
  end
119
119
  result = compile._stack_output(bubble_path.first.name, output_name)
120
120
  if drip_path.size > 1
@@ -123,7 +123,7 @@ class SparkleFormation
123
123
  drip_path.each_slice(2) do |base_sparkle, ref_sparkle|
124
124
  next unless ref_sparkle
125
125
  base_sparkle.compile.resources[ref_sparkle.name].properties.parameters.value._set(output_name, result)
126
- ref_sparkle.compile.parameters._set(output_name).type 'string' # TODO: <<<<------ type check and prop
126
+ ref_sparkle.compile.parameters._set(output_name).type "string" # TODO: <<<<------ type check and prop
127
127
  result = compile._parameter(output_name)
128
128
  end
129
129
  end
@@ -1,4 +1,4 @@
1
- require 'sparkle_formation'
1
+ require "sparkle_formation"
2
2
 
3
3
  class SparkleFormation
4
4
  module Provider
@@ -17,16 +17,16 @@ class SparkleFormation
17
17
  # @param parent_names [Array<String>] name of parent resources
18
18
  # @return [Smash] dump_copy
19
19
  def google_template_extractor(template_hash, dump_copy, parent_names = [])
20
- template_hash.fetch('resources', []).each do |t_resource|
21
- if t_resource['type'] == stack_resource_type
22
- full_names = parent_names + [t_resource['name']]
23
- stack = t_resource['properties'].delete('stack')
24
- if t_resource['properties'].empty?
25
- t_resource.delete('properties')
20
+ template_hash.fetch("resources", []).each do |t_resource|
21
+ if t_resource["type"] == stack_resource_type
22
+ full_names = parent_names + [t_resource["name"]]
23
+ stack = t_resource["properties"].delete("stack")
24
+ if t_resource["properties"].empty?
25
+ t_resource.delete("properties")
26
26
  end
27
27
  google_template_extractor(stack, dump_copy, full_names)
28
- new_type = generate_template_files(full_names.join('-'), stack, dump_copy)
29
- t_resource['type'] = new_type
28
+ new_type = generate_template_files(full_names.join("-"), stack, dump_copy)
29
+ t_resource["type"] = new_type
30
30
  end
31
31
  end
32
32
  dump_copy
@@ -41,7 +41,7 @@ class SparkleFormation
41
41
  # @return [String] new type for stack
42
42
  def generate_template_files(r_name, r_stack, dump_copy)
43
43
  f_name = "#{r_name}.jinja"
44
- r_parameters = r_stack.delete('parameters')
44
+ r_parameters = r_stack.delete("parameters")
45
45
  dump_copy[:imports].push(
46
46
  Smash.new(
47
47
  :name => f_name,
@@ -101,7 +101,7 @@ class SparkleFormation
101
101
  # @note Nested templates aren't defined as a specific type thus no "real"
102
102
  # type exists. So we'll create a custom one!
103
103
  def stack_resource_type
104
- 'sparkleformation.stack'
104
+ "sparkleformation.stack"
105
105
  end
106
106
 
107
107
  # Generate policy for stack
@@ -147,7 +147,7 @@ class SparkleFormation
147
147
  # Forcibly disable shallow nesting as support for it with Google templates doesn't
148
148
  # really make much sense.
149
149
  def apply_shallow_nesting(*args, &block)
150
- raise NotImplementedError.new 'Shallow nesting is not supported for this provider!'
150
+ raise NotImplementedError.new "Shallow nesting is not supported for this provider!"
151
151
  end
152
152
 
153
153
  # Extract output to make available for stack parameter usage at the
@@ -176,9 +176,9 @@ class SparkleFormation
176
176
  end
177
177
  end
178
178
  raise ArgumentError.new "Failed to detect available bubbling path for output `#{output_name}`. " <<
179
- 'This may be due to a circular dependency! ' <<
180
- "(Output Path: #{outputs[output_name].root_path.map(&:name).join(' > ')} " <<
181
- "Requester Path: #{root_path.map(&:name).join(' > ')})"
179
+ "This may be due to a circular dependency! " <<
180
+ "(Output Path: #{outputs[output_name].root_path.map(&:name).join(" > ")} " <<
181
+ "Requester Path: #{root_path.map(&:name).join(" > ")})"
182
182
  end
183
183
  result = source_stack.compile._stack_output(bubble_path.first.name, output_name)
184
184
  if drip_path.size > 1
@@ -187,7 +187,7 @@ class SparkleFormation
187
187
  drip_path.each_slice(2) do |base_sparkle, ref_sparkle|
188
188
  next unless ref_sparkle
189
189
  base_sparkle.compile.resources[ref_sparkle.name].properties.parameters.value._set(output_name, result)
190
- ref_sparkle.compile.parameters._set(output_name).type 'string' # TODO: <<<<------ type check and prop
190
+ ref_sparkle.compile.parameters._set(output_name).type "string" # TODO: <<<<------ type check and prop
191
191
  result = compile._parameter(output_name)
192
192
  end
193
193
  end
@@ -1,4 +1,4 @@
1
- require 'sparkle_formation'
1
+ require "sparkle_formation"
2
2
 
3
3
  class SparkleFormation
4
4
  module Provider
@@ -7,7 +7,7 @@ class SparkleFormation
7
7
 
8
8
  # @return [String] Type string for OpenStack HEAT stack resource
9
9
  def stack_resource_type
10
- 'OS::Heat::Stack'
10
+ "OS::Heat::Stack"
11
11
  end
12
12
 
13
13
  # Generate policy for stack
@@ -96,9 +96,9 @@ class SparkleFormation
96
96
  end
97
97
  end
98
98
  raise ArgumentError.new "Failed to detect available bubbling path for output `#{output_name}`. " <<
99
- 'This may be due to a circular dependency! ' <<
100
- "(Output Path: #{outputs[output_name].root_path.map(&:name).join(' > ')} " <<
101
- "Requester Path: #{root_path.map(&:name).join(' > ')})"
99
+ "This may be due to a circular dependency! " <<
100
+ "(Output Path: #{outputs[output_name].root_path.map(&:name).join(" > ")} " <<
101
+ "Requester Path: #{root_path.map(&:name).join(" > ")})"
102
102
  end
103
103
  result = compile._stack_output(bubble_path.first.name, output_name)
104
104
  if drip_path.size > 1
@@ -107,7 +107,7 @@ class SparkleFormation
107
107
  drip_path.each_slice(2) do |base_sparkle, ref_sparkle|
108
108
  next unless ref_sparkle
109
109
  base_sparkle.compile.resources[ref_sparkle.name].properties.parameters._set(output_name, result)
110
- ref_sparkle.compile.parameters._set(output_name).type 'string' # TODO: <<<<------ type check and prop
110
+ ref_sparkle.compile.parameters._set(output_name).type "string" # TODO: <<<<------ type check and prop
111
111
  result = compile._parameter(output_name)
112
112
  end
113
113
  end
@@ -1,4 +1,4 @@
1
- require 'sparkle_formation'
1
+ require "sparkle_formation"
2
2
 
3
3
  class SparkleFormation
4
4
  module Provider
@@ -49,7 +49,7 @@ class SparkleFormation
49
49
  # @note Nested templates aren't defined as a specific type thus no "real"
50
50
  # type exists. So we'll create a custom one!
51
51
  def stack_resource_type
52
- 'module'
52
+ "module"
53
53
  end
54
54
 
55
55
  # Generate policy for stack
@@ -94,7 +94,7 @@ class SparkleFormation
94
94
 
95
95
  # Forcibly disable shallow nesting
96
96
  def apply_shallow_nesting(*args, &block)
97
- raise NotImplementedError.new 'Shallow nesting is not supported for this provider!'
97
+ raise NotImplementedError.new "Shallow nesting is not supported for this provider!"
98
98
  end
99
99
 
100
100
  # Extract output to make available for stack parameter usage at the
@@ -123,9 +123,9 @@ class SparkleFormation
123
123
  end
124
124
  end
125
125
  raise ArgumentError.new "Failed to detect available bubbling path for output `#{output_name}`. " <<
126
- 'This may be due to a circular dependency! ' <<
127
- "(Output Path: #{outputs[output_name].root_path.map(&:name).join(' > ')} " <<
128
- "Requester Path: #{root_path.map(&:name).join(' > ')})"
126
+ "This may be due to a circular dependency! " <<
127
+ "(Output Path: #{outputs[output_name].root_path.map(&:name).join(" > ")} " <<
128
+ "Requester Path: #{root_path.map(&:name).join(" > ")})"
129
129
  end
130
130
  result = source_stack.compile._stack_output(bubble_path.first.name, output_name)
131
131
  if drip_path.size > 1
@@ -134,7 +134,7 @@ class SparkleFormation
134
134
  drip_path.each_slice(2) do |base_sparkle, ref_sparkle|
135
135
  next unless ref_sparkle
136
136
  base_sparkle.compile.resources[ref_sparkle.name].properties.parameters.value._set(output_name, result)
137
- ref_sparkle.compile.parameters._set(output_name).type 'string' # TODO: <<<<------ type check and prop
137
+ ref_sparkle.compile.parameters._set(output_name).type "string" # TODO: <<<<------ type check and prop
138
138
  result = compile._parameter(output_name)
139
139
  end
140
140
  end
@@ -1,19 +1,19 @@
1
- require 'sparkle_formation'
1
+ require "sparkle_formation"
2
2
 
3
3
  class SparkleFormation
4
4
  # Resources helper
5
5
  class Resources
6
- autoload :Aws, 'sparkle_formation/resources/aws'
7
- autoload :Azure, 'sparkle_formation/resources/azure'
8
- autoload :Google, 'sparkle_formation/resources/google'
9
- autoload :Heat, 'sparkle_formation/resources/heat'
10
- autoload :Rackspace, 'sparkle_formation/resources/rackspace'
11
- autoload :Terraform, 'sparkle_formation/resources/terraform'
6
+ autoload :Aws, "sparkle_formation/resources/aws"
7
+ autoload :Azure, "sparkle_formation/resources/azure"
8
+ autoload :Google, "sparkle_formation/resources/google"
9
+ autoload :Heat, "sparkle_formation/resources/heat"
10
+ autoload :Rackspace, "sparkle_formation/resources/rackspace"
11
+ autoload :Terraform, "sparkle_formation/resources/terraform"
12
12
 
13
13
  # Characters to be removed from supplied key on matching
14
- RESOURCE_TYPE_TR = '_:'
14
+ RESOURCE_TYPE_TR = "_:"
15
15
  # String to split for resource namespacing
16
- RESOURCE_TYPE_NAMESPACE_SPLITTER = '::'
16
+ RESOURCE_TYPE_NAMESPACE_SPLITTER = "::"
17
17
  # Property update conditionals
18
18
  # Format: Smash.new(RESOURCE_TYPE => {PROPERTY_NAME => [PropertyConditional]})
19
19
  PROPERTY_UPDATE_CONDITIONALS = Smash.new
@@ -65,7 +65,7 @@ class SparkleFormation
65
65
  if result
66
66
  result.update_causes
67
67
  else
68
- 'unknown'
68
+ "unknown"
69
69
  end
70
70
  else
71
71
  self[:update_causes]
@@ -79,7 +79,7 @@ class SparkleFormation
79
79
 
80
80
  # @return [String] base registry key
81
81
  def base_key
82
- Bogo::Utility.snake(self.name.split('::').last) # rubocop:disable Style/RedundantSelf
82
+ Bogo::Utility.snake(self.name.split("::").last) # rubocop:disable Style/RedundantSelf
83
83
  end
84
84
 
85
85
  # Register resource
@@ -148,13 +148,13 @@ class SparkleFormation
148
148
  result = key
149
149
  else
150
150
  o_key = key
151
- key = key.to_s.downcase.tr(self.const_get(:RESOURCE_TYPE_TR), '') # rubocop:disable Style/RedundantSelf
151
+ key = key.to_s.downcase.tr(self.const_get(:RESOURCE_TYPE_TR), "") # rubocop:disable Style/RedundantSelf
152
152
  snake_parts = nil
153
153
  result = @@registry[base_key].keys.detect do |ref|
154
154
  ref = ref.downcase
155
155
  snake_parts = ref.split(resource_type_splitter)
156
156
  until snake_parts.empty?
157
- break if snake_parts.join('') == key
157
+ break if snake_parts.join("") == key
158
158
  snake_parts.shift
159
159
  end
160
160
  !snake_parts.empty?
@@ -162,12 +162,12 @@ class SparkleFormation
162
162
  if result
163
163
  collisions = @@registry[base_key].keys.find_all do |ref|
164
164
  split_ref = ref.downcase.split(resource_type_splitter)
165
- ref = Array(split_ref.slice(split_ref.size - snake_parts.size, split_ref.size)).join('')
165
+ ref = Array(split_ref.slice(split_ref.size - snake_parts.size, split_ref.size)).join("")
166
166
  key == ref
167
167
  end
168
168
  if collisions.size > 1
169
- raise ArgumentError.new 'Ambiguous dynamic name returned multiple matches! ' \
170
- "`#{o_key.inspect}` -> #{collisions.sort.join(', ')}"
169
+ raise ArgumentError.new "Ambiguous dynamic name returned multiple matches! " \
170
+ "`#{o_key.inspect}` -> #{collisions.sort.join(", ")}"
171
171
  end
172
172
  end
173
173
  end
@@ -180,7 +180,7 @@ class SparkleFormation
180
180
  Regexp.new(
181
181
  [self.const_get(:RESOURCE_TYPE_NAMESPACE_SPLITTER)].flatten.compact.map { |value|
182
182
  Regexp.escape(value)
183
- }.join('|')
183
+ }.join("|")
184
184
  )
185
185
  end
186
186
 
@@ -217,7 +217,7 @@ class SparkleFormation
217
217
  def resource_lookup(type)
218
218
  result = registry[type]
219
219
  if result
220
- properties = result.fetch('full_properties', {}).map do |p_name, p_info|
220
+ properties = result.fetch("full_properties", {}).map do |p_name, p_info|
221
221
  Property.new(p_name,
222
222
  p_info[:description],
223
223
  p_info[:type],
@@ -1,4 +1,4 @@
1
- require 'sparkle_formation'
1
+ require "sparkle_formation"
2
2
 
3
3
  class SparkleFormation
4
4
 
@@ -10,236 +10,326 @@ class SparkleFormation
10
10
 
11
11
  # Conditionals for property updates
12
12
  PROPERTY_UPDATE_CONDITIONALS = Smash.new(
13
- 'AWS::DynamoDB::Table' => {
14
- 'GlobalSecondaryIndexes' => [
13
+ "AWS::DynamoDB::Table" => {
14
+ "GlobalSecondaryIndexes" => [
15
15
  # Updates not really supported here. Set as unknown to
16
16
  # prompt user to investigate
17
- UpdateCausesConditional.new('unknown', true),
17
+ UpdateCausesConditional.new("unknown", true),
18
18
  ],
19
19
  },
20
- 'AWS::EC2::EIPAssociation' => {
21
- 'AllocationId' => [
22
- UpdateCausesConditional.new('replacement',
20
+ "AWS::EC2::EIPAssociation" => {
21
+ "AllocationId" => [
22
+ UpdateCausesConditional.new("replacement",
23
23
  lambda { |final, original|
24
- original.get('Properties', 'InstanceId') != final.get('Properties', 'InstanceId') ||
25
- original.get('Properties', 'NetworkInterfaceId') != final.get('Properties', 'NewtorkInterfaceId')
24
+ original.get("Properties", "InstanceId") != final.get("Properties", "InstanceId") ||
25
+ original.get("Properties", "NetworkInterfaceId") != final.get("Properties", "NewtorkInterfaceId")
26
26
  }),
27
- UpdateCausesConditional.new('none', true),
27
+ UpdateCausesConditional.new("none", true),
28
28
  ],
29
- 'EIP' => [
30
- UpdateCausesConditional.new('replacement',
29
+ "EIP" => [
30
+ UpdateCausesConditional.new("replacement",
31
31
  lambda { |final, original|
32
- original.get('Properties', 'InstanceId') != final.get('Properties', 'InstanceId') ||
33
- original.get('Properties', 'NetworkInterfaceId') != final.get('Properties', 'NewtorkInterfaceId')
32
+ original.get("Properties", "InstanceId") != final.get("Properties", "InstanceId") ||
33
+ original.get("Properties", "NetworkInterfaceId") != final.get("Properties", "NewtorkInterfaceId")
34
34
  }),
35
- UpdateCausesConditional.new('none', true),
35
+ UpdateCausesConditional.new("none", true),
36
36
  ],
37
- 'InstanceId' => [
38
- UpdateCausesConditional.new('replacement',
37
+ "InstanceId" => [
38
+ UpdateCausesConditional.new("replacement",
39
39
  lambda { |final, original|
40
- original.get('Properties', 'AllocationId') != final.get('Properties', 'AllocationId') ||
41
- original.get('Properties', 'EIP') != final.get('Properties', 'EIP')
40
+ original.get("Properties", "AllocationId") != final.get("Properties", "AllocationId") ||
41
+ original.get("Properties", "EIP") != final.get("Properties", "EIP")
42
42
  }),
43
- UpdateCausesConditional.new('none', true),
43
+ UpdateCausesConditional.new("none", true),
44
44
  ],
45
- 'NetworkInterfaceId' => [
46
- UpdateCausesConditional.new('replacement',
45
+ "NetworkInterfaceId" => [
46
+ UpdateCausesConditional.new("replacement",
47
47
  lambda { |final, original|
48
- original.get('Properties', 'AllocationId') != final.get('Properties', 'AllocationId') ||
49
- original.get('Properties', 'EIP') != final.get('Properties', 'EIP')
48
+ original.get("Properties", "AllocationId") != final.get("Properties", "AllocationId") ||
49
+ original.get("Properties", "EIP") != final.get("Properties", "EIP")
50
50
  }),
51
- UpdateCausesConditional.new('none', true),
51
+ UpdateCausesConditional.new("none", true),
52
52
  ],
53
53
  },
54
- 'AWS::EC2::Instance' => {
55
- 'AdditionalInfo' => [
56
- UpdateCausesConditional.new('unknown', true), # EBS AMI dependent
54
+ "AWS::EC2::Instance" => {
55
+ "AdditionalInfo" => [
56
+ UpdateCausesConditional.new("unknown", true), # EBS AMI dependent
57
57
  ],
58
- 'BlockDeviceMappings' => [
59
- UpdateCausesConditional.new('replacement',
58
+ "BlockDeviceMappings" => [
59
+ UpdateCausesConditional.new("replacement",
60
60
  lambda { |final, original|
61
- f_maps = final.fetch('Properties', 'BlockDeviceMappings', [])
62
- o_maps = original.fetch('Properties', 'BlockDeviceMappings', [])
61
+ f_maps = final.fetch("Properties", "BlockDeviceMappings", [])
62
+ o_maps = original.fetch("Properties", "BlockDeviceMappings", [])
63
63
  f_maps.map! do |m|
64
- m.delete('DeleteOnTermination')
64
+ m.delete("DeleteOnTermination")
65
65
  m.to_smash(:sorted)
66
66
  end
67
67
  o_maps.map! do |m|
68
- m.delete('DeleteOnTermination')
68
+ m.delete("DeleteOnTermination")
69
69
  m.to_smash(:sorted)
70
70
  end
71
71
  f_maps.size != o_maps.size ||
72
72
  !f_maps.all? { |m| o_maps.include?(m) }
73
73
  }),
74
- UpdateCausesConditional.new('none', true),
74
+ UpdateCausesConditional.new("none", true),
75
75
  ],
76
- 'EbsOptimized' => [
77
- UpdateCausesConditional.new('unknown', true), # EBS AMI dependent
76
+ "EbsOptimized" => [
77
+ UpdateCausesConditional.new("unknown", true), # EBS AMI dependent
78
78
  ],
79
- 'InstanceType' => [
80
- UpdateCausesConditional.new('unknown', true), # EBS AMI dependent
79
+ "InstanceType" => [
80
+ UpdateCausesConditional.new("unknown", true), # EBS AMI dependent
81
81
  ],
82
- 'KernelId' => [
83
- UpdateCausesConditional.new('unknown', true), # EBS AMI dependent
82
+ "KernelId" => [
83
+ UpdateCausesConditional.new("unknown", true), # EBS AMI dependent
84
84
  ],
85
- 'RamdiskId' => [
86
- UpdateCausesConditional.new('unknown', true), # EBS AMI dependent
85
+ "RamdiskId" => [
86
+ UpdateCausesConditional.new("unknown", true), # EBS AMI dependent
87
87
  ],
88
- 'SecurityGroupIds' => [
89
- UpdateCausesConditional.new('none',
88
+ "SecurityGroupIds" => [
89
+ UpdateCausesConditional.new("none",
90
90
  lambda { |final, _orig|
91
- final.get('Properties', 'SubnetId') ||
92
- final.fetch('Properties', 'NetworkInterface', {}).values.include?('SubnetId')
91
+ final.get("Properties", "SubnetId") ||
92
+ final.fetch("Properties", "NetworkInterface", {}).values.include?("SubnetId")
93
93
  }),
94
- UpdateCausesConditional.new('replacement', true),
94
+ UpdateCausesConditional.new("replacement", true),
95
95
  ],
96
- 'UserData' => [
97
- UpdateCausesConditional.new('unknown', true), # EBS AMI dependent
96
+ "Tenancy" => [
97
+ UpdateCausesConditional.new("none", lambda { |final, original|
98
+ ["host", "dedicated"].any?{ |val| final.get("Properties", "Tenancy") == val } &&
99
+ ["host", "dedicated"].any?{ |val| original.get("Properties", "Tenancy") == val }
100
+ }
101
+ ),
102
+ UpdateCausesConditional.new("replacement", true),
103
+ ],
104
+ "UserData" => [
105
+ UpdateCausesConditional.new("unknown", true), # EBS AMI dependent
98
106
  ],
99
107
  },
100
- 'AWS::EC2::NetworkInterface' => {
101
- 'PrivateIpAddresses' => [
102
- UpdateCausesConditional.new('replacement',
108
+ "AWS::EC2::NetworkInterface" => {
109
+ "PrivateIpAddresses" => [
110
+ UpdateCausesConditional.new("replacement",
103
111
  lambda { |final, original|
104
- f_primary = final.fetch('Properties', 'PrivateIpAddresses', []).detect do |addr|
105
- addr['Primary']
112
+ f_primary = final.fetch("Properties", "PrivateIpAddresses", []).detect do |addr|
113
+ addr["Primary"]
106
114
  end || Smash.new
107
- o_primary = original.fetch('Properties', 'PrivateIpAddresses', []).detect do |addr|
108
- addr['Primary']
115
+ o_primary = original.fetch("Properties", "PrivateIpAddresses", []).detect do |addr|
116
+ addr["Primary"]
109
117
  end || Smash.new
110
118
  f_primary.to_smash(:sorted) != o_primary.to_smash(:sorted)
111
119
  }),
112
- UpdateCausesConditional.new('none', true),
120
+ UpdateCausesConditional.new("none", true),
113
121
  ],
114
122
  },
115
- 'AWS::ElastiCache::CacheCluster' => {
116
- 'NumCacheNodes' => [
117
- UpdateCausesConditional.new('replacement',
123
+ "AWS::ElastiCache::CacheCluster" => {
124
+ "NumCacheNodes" => [
125
+ UpdateCausesConditional.new("replacement",
118
126
  lambda { |final, original|
119
127
  [
120
- final.get('Properties', 'PreferredAvailabilityZone'),
121
- final.get('Properties', 'PreferredAvailabilityZones'),
122
- original.get('Properties', 'PreferredAvailabilityZone'),
123
- original.get('Properties', 'PreferredAvailabilityZones'),
128
+ final.get("Properties", "PreferredAvailabilityZone"),
129
+ final.get("Properties", "PreferredAvailabilityZones"),
130
+ original.get("Properties", "PreferredAvailabilityZone"),
131
+ original.get("Properties", "PreferredAvailabilityZones"),
124
132
  ].all? { |i| i.nil? || i.empty? }
125
133
  }),
126
- UpdateCausesConditional.new('none', true),
134
+ UpdateCausesConditional.new("none", true),
127
135
  ],
128
- 'PreferredAvailabilityZones' => [
129
- UpdateCausesConditional.new('interrupt',
136
+ "PreferredAvailabilityZones" => [
137
+ UpdateCausesConditional.new("interrupt",
130
138
  lambda { |final, original|
131
- original.get('Properties', 'PreferredAvailabilityZones') ||
132
- final.fetch('Properties', 'PreferredAvailabilityZones', []).include?(
133
- original.get('Properties', 'PreferredAvailabilityZone')
139
+ original.get("Properties", "PreferredAvailabilityZones") ||
140
+ final.fetch("Properties", "PreferredAvailabilityZones", []).include?(
141
+ original.get("Properties", "PreferredAvailabilityZone")
134
142
  )
135
143
  }),
136
- UpdateCausesConditional.new('replacement', true),
144
+ UpdateCausesConditional.new("replacement", true),
137
145
  ],
138
146
  },
139
- 'AWS::ElasticLoadBalancing::LoadBalancer' => {
140
- 'AvailabilityZones' => [
141
- UpdateCausesConditional.new('replacement',
147
+ "AWS::ElasticLoadBalancing::LoadBalancer" => {
148
+ "AvailabilityZones" => [
149
+ UpdateCausesConditional.new("replacement",
142
150
  lambda { |final, original|
143
- original.fetch('Properties', 'AvailabilityZones', []).empty? ||
144
- final.fetch('Properties', 'AvailabilityZones', []).empty?
151
+ original.fetch("Properties", "AvailabilityZones", []).empty? ||
152
+ final.fetch("Properties", "AvailabilityZones", []).empty?
145
153
  }),
146
- UpdateCausesConditional.new('none', true),
154
+ UpdateCausesConditional.new("none", true),
147
155
  ],
148
- 'HealthCheck' => [
149
- UpdateCausesConditional.new('replacement',
156
+ "HealthCheck" => [
157
+ UpdateCausesConditional.new("replacement",
150
158
  lambda { |final, original|
151
- original.fetch('Properties', 'HealthCheck', {}).empty? ||
152
- final.fetch('Properties', 'HealthCheck', {}).empty?
159
+ original.fetch("Properties", "HealthCheck", {}).empty? ||
160
+ final.fetch("Properties", "HealthCheck", {}).empty?
153
161
  }),
154
- UpdateCausesConditional.new('none', true),
162
+ UpdateCausesConditional.new("none", true),
155
163
  ],
156
- 'Subnets' => [
157
- UpdateCausesConditional.new('replacement',
164
+ "Subnets" => [
165
+ UpdateCausesConditional.new("replacement",
158
166
  lambda { |final, original|
159
- original.fetch('Properties', 'Subnets', []).empty? ||
160
- final.fetch('Properties', 'Subnets', []).empty?
167
+ original.fetch("Properties", "Subnets", []).empty? ||
168
+ final.fetch("Properties", "Subnets", []).empty?
161
169
  }),
162
- UpdateCausesConditional.new('none', true),
170
+ UpdateCausesConditional.new("none", true),
171
+ ],
172
+ },
173
+ "AWS::KinesisFirehose::DeliveryStream" => {
174
+ "ElasticsearchDestinationConfiguration" => [
175
+ UpdateCausesConditional.new("interrupt", lambda { |final, original|
176
+ !!original.get("Properties", "ElasticsearchDestinationConfiguration")
177
+ }
178
+ ),
179
+ UpdateCausesConditional.new("none", true),
180
+ ],
181
+ "ExtendedS3DestinationConfiguration" => [
182
+ UpdateCausesConditional.new("interrupt", lambda { |final, original|
183
+ !!original.get("Properties", "ExtendedS3DestinationConfiguration")
184
+ }
185
+ ),
186
+ UpdateCausesConditional.new("none", true),
187
+ ],
188
+ "RedshiftDestinationConfiguration" => [
189
+ UpdateCausesConditional.new("interrupt", lambda { |final, original|
190
+ !!original.get("Properties", "RedshiftDestinationConfiguration")
191
+ }
192
+ ),
193
+ UpdateCausesConditional.new("none", true),
194
+ ],
195
+ "S3DestinationConfiguration" => [
196
+ UpdateCausesConditional.new("interrupt", lambda { |final, original|
197
+ !!original.get("Properties", "S3DestinationConfiguration")
198
+ }
199
+ ),
200
+ UpdateCausesConditional.new("none", true),
201
+ ],
202
+ },
203
+ "AWS::Neptune::DBCluster" => {
204
+ "BackupRetentionPeriod" => [
205
+ # Not clear within documentation
206
+ UpdateCausesConditional.new("unknown", true),
207
+ ],
208
+ "PreferredMaintenanceWindow" => [
209
+ # Not clear within documentation
210
+ UpdateCausesConditional.new("unknown", true),
211
+ ],
212
+ },
213
+ "AWS::Neptune::DBClusterParameterGroup" => {
214
+ "Parameters" => [
215
+ # dependent on what parameters have been changed. doesn't
216
+ # look like parameter modifications are applied immediately?
217
+ # set as unknown for safety
218
+ UpdateCausesConditional.new("unknown", true),
219
+ ],
220
+ },
221
+ "AWS::Neptune::DBInstance" => {
222
+ "AutoMinorVersionUpgrade" => [
223
+ # Documentation does not indicate condition when no
224
+ # interruption occurs, so set as interrupt for safety
225
+ UpdateCausesConditional.new("interrupt", true),
226
+ ],
227
+ "DBParameterGroupName" => [
228
+ UpdateCausesConditional.new("interrupt", true),
229
+ ],
230
+ "PreferredMaintenanceWindow" => [
231
+ UpdateCausesConditional.new("interrupt", true),
163
232
  ],
164
233
  },
165
- 'AWS::RDS::DBCluster' => {
166
- 'BackupRetentionPeriod' => [
167
- UpdateCausesConditional.new('interrupt',
234
+ "AWS::Neptune::DBParameterGroup" => {
235
+ # Interrupt is only caused when parameters are static
236
+ # values. Dynamic values are applied immediately
237
+ "Parameters" => [
238
+ UpdateCausesConditional.new("interrupt", lambda { |final, original|
239
+ o_params = original.fetch("Properties", "Parameters", {})
240
+ f_params = final.fetch("Properties", "Parameters", {})
241
+ f_params.any? { |key, value|
242
+ o_params[key].nil? ||
243
+ o_params[key].is_a?(Hash)
244
+ }
245
+ }
246
+ ),
247
+ UpdateCausesConditional.new("none", true),
248
+ ]
249
+ },
250
+ "AWS::RDS::DBCluster" => {
251
+ "BackupRetentionPeriod" => [
252
+ UpdateCausesConditional.new("interrupt",
168
253
  lambda { |final, original|
169
- fp = final.get('Properties', 'BackupRetentionPeriod').to_i
170
- op = original.get('Properties', 'BackupRetentionPeriod').to_i
254
+ fp = final.get("Properties", "BackupRetentionPeriod").to_i
255
+ op = original.get("Properties", "BackupRetentionPeriod").to_i
171
256
  (fp == 0 && op != 0) ||
172
257
  (op == 0 && fp != 0)
173
258
  }),
174
- UpdateCausesConditional.new('none', true),
259
+ UpdateCausesConditional.new("none", true),
175
260
  ],
176
- 'PreferredMaintenanceWindow' => [
261
+ "PreferredMaintenanceWindow" => [
177
262
  # can interrupt if apply immediately is set on api call but
178
263
  # no way to know
179
- UpdateCausesConditional.new('unknown', true),
264
+ UpdateCausesConditional.new("unknown", true),
180
265
  ],
181
266
  },
182
- 'AWS::RDS::DBClusterParameterGroup' => {
183
- 'Parameters' => [
267
+ "AWS::RDS::DBClusterParameterGroup" => {
268
+ "Parameters" => [
184
269
  # dependent on what parameters have been changed. doesn't
185
270
  # look like parameter modifications are applied immediately?
186
271
  # set as unknown for safety
187
- UpdateCausesConditional.new('unknown', true),
272
+ UpdateCausesConditional.new("unknown", true),
188
273
  ],
189
274
  },
190
- 'AWS::RDS::DBInstance' => {
191
- 'AutoMinorVersionUpgrade' => [
275
+ "AWS::RDS::DBInstance" => {
276
+ "AutoMinorVersionUpgrade" => [
192
277
  # can cause interrupts based on future actions (enables
193
278
  # auto patching) so leave as unknown for safety
194
- UpdateCausesConditional.new('unknown', true),
279
+ UpdateCausesConditional.new("unknown", true),
195
280
  ],
196
- 'BackupRetentionPeriod' => [
197
- UpdateCausesConditional.new('interrupt',
281
+ "BackupRetentionPeriod" => [
282
+ UpdateCausesConditional.new("interrupt",
198
283
  lambda { |final, original|
199
- fp = final.get('Properties', 'BackupRetentionPeriod').to_i
200
- op = original.get('Properties', 'BackupRetentionPeriod').to_i
284
+ fp = final.get("Properties", "BackupRetentionPeriod").to_i
285
+ op = original.get("Properties", "BackupRetentionPeriod").to_i
201
286
  (fp == 0 && op != 0) ||
202
287
  (op == 0 && fp != 0)
203
288
  }),
204
- UpdateCausesConditional.new('none', true),
289
+ UpdateCausesConditional.new("none", true),
205
290
  ],
206
- 'DBParameterGroupName' => [
291
+ "DBParameterGroupName" => [
207
292
  # changes are not applied until reboot, but it could
208
293
  # still be considered an interrupt? setting as unknown
209
294
  # for safety
210
- UpdateCausesConditional.new('unknown', true),
295
+ UpdateCausesConditional.new("unknown", true),
296
+ ],
297
+ "MonitoringInterval" => [
298
+ # There can be an interruption or nothing. No clear distinction
299
+ # on when an interrupt so just default to interrupt for safety
300
+ UpdateCausesConditional.new("interrupt", true),
211
301
  ],
212
- 'PreferredMaintenanceWindow' => [
302
+ "PreferredMaintenanceWindow" => [
213
303
  # can interrupt if apply immediately is set on api call but
214
304
  # no way to know
215
- UpdateCausesConditional.new('unknown', true),
305
+ UpdateCausesConditional.new("unknown", true),
216
306
  ],
217
307
  },
218
- 'AWS::RDS::DBParameterGroup' => {
219
- 'Parameters' => [
308
+ "AWS::RDS::DBParameterGroup" => {
309
+ "Parameters" => [
220
310
  # dependent on what parameters have been changed. doesn't
221
311
  # look like parameter modifications are applied immediately?
222
312
  # set as unknown for safety
223
- UpdateCausesConditional.new('unknown', true),
313
+ UpdateCausesConditional.new("unknown", true),
224
314
  ],
225
315
  },
226
- 'AWS::RDS::EventSubscription' => {
227
- 'SourceType' => [
228
- UpdateCausesConditional.new('replacement',
316
+ "AWS::RDS::EventSubscription" => {
317
+ "SourceType" => [
318
+ UpdateCausesConditional.new("replacement",
229
319
  lambda { |final, original|
230
- !final.get('Properties', 'SourceType')
320
+ !final.get("Properties", "SourceType")
231
321
  }),
232
- UpdateCausesConditional.new('none', true),
322
+ UpdateCausesConditional.new("none", true),
233
323
  ],
234
324
  },
235
- 'AWS::Route53::HostedZone' => {
236
- 'VPCs' => [
237
- UpdateCausesConditional.new('replacement',
325
+ "AWS::Route53::HostedZone" => {
326
+ "VPCs" => [
327
+ UpdateCausesConditional.new("replacement",
238
328
  lambda { |final, original|
239
- !final.get('Properties', 'VPCs') ||
240
- !original.get('Properties', 'VPCs')
329
+ !final.get("Properties", "VPCs") ||
330
+ !original.get("Properties", "VPCs")
241
331
  }),
242
- UpdateCausesConditional.new('none', true),
332
+ UpdateCausesConditional.new("none", true),
243
333
  ],
244
334
  },
245
335
  )
@@ -255,7 +345,7 @@ class SparkleFormation
255
345
  load(
256
346
  File.join(
257
347
  File.dirname(__FILE__),
258
- 'aws_resources.json'
348
+ "aws_resources.json"
259
349
  )
260
350
  )
261
351
  true