prima-twig 0.4.5 → 0.5.26
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/bin/twig-build +2000 -0
- data/bin/twig-deploy +126 -89
- data/bin/twig-feature +644 -561
- data/bin/twig-update-ami +197 -0
- data/lib/command.rb +37 -24
- data/lib/prima_aws_client.rb +494 -0
- data/lib/prima_twig.rb +25 -6
- metadata +174 -27
data/bin/twig-update-ami
ADDED
@@ -0,0 +1,197 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require 'rubygems'
|
4
|
+
require_relative '../lib/prima_twig.rb'
|
5
|
+
require_relative '../lib/prima_aws_client.rb'
|
6
|
+
require 'launchy'
|
7
|
+
require 'json'
|
8
|
+
require 'aws-sdk-s3'
|
9
|
+
|
10
|
+
class TwigUpdateAmi
|
11
|
+
include Command
|
12
|
+
include PrimaAwsClient
|
13
|
+
def initialize
|
14
|
+
output 'Controllo se ci sono aggiornamenti da fare...'
|
15
|
+
exec "gem update prima-twig && twig update-ami #{ARGV.join ' '}" unless `gem outdated`.lines.grep(/^prima-twig \(.*\)/).empty?
|
16
|
+
@s3 = Aws::S3::Client.new
|
17
|
+
@s3_bucket = 'prima-deploy'
|
18
|
+
@templates_base_url = "https://s3-eu-west-1.amazonaws.com"
|
19
|
+
end
|
20
|
+
|
21
|
+
def execute!(args)
|
22
|
+
update_amis(args[0], args[1], args[2], args[3], args[4])
|
23
|
+
end
|
24
|
+
|
25
|
+
private
|
26
|
+
|
27
|
+
def update_amis(ami_template, ami_id, ami_name, ami_description, env)
|
28
|
+
output "updating instance definition #{ami_template}".light_green
|
29
|
+
Dir.chdir 'ami'
|
30
|
+
update_instance_name(ami_id, ami_name, ami_description, ami_template)
|
31
|
+
output 'running packer update (this could take some time)'.light_green
|
32
|
+
new_ami_id = update_packer(ami_template, env)
|
33
|
+
# new_ami_id = 'ami-026890988d91ee8c6'
|
34
|
+
Dir.chdir '..'
|
35
|
+
stop_if(new_ami_id.to_s.empty?, 'Failed to generate AMI!'.red)
|
36
|
+
output "new ami id: #{new_ami_id}"
|
37
|
+
|
38
|
+
output 'searching for ami to update...'
|
39
|
+
ami_mappings = JSON.parse(@s3.get_object(bucket: @s3_bucket, key: "ami/ami-mappings.json")["body"].read())
|
40
|
+
old_amis = update_ami_mappings(ami_mappings, ami_template, env, new_ami_id)
|
41
|
+
stop_if(old_amis.empty?, "No ami to update! No #{ami_template} in env #{env}, exiting".yellow)
|
42
|
+
|
43
|
+
output "retrieving stacks that uses old ami ids: #{old_amis}"
|
44
|
+
exports = list_exports()
|
45
|
+
stacks = get_stacks_from_exports(exports, old_amis)
|
46
|
+
stop_if(stacks.empty?, "No stack to update found! This means that ami-mapping file is not in sync, please check manually")
|
47
|
+
|
48
|
+
stacks.each do |stack|
|
49
|
+
output "processing stack #{stack}"
|
50
|
+
if stack.include?('qa')
|
51
|
+
output "skipping stack #{stack} because is a qa"
|
52
|
+
next
|
53
|
+
else
|
54
|
+
stack_tags = tags_to_hashes(get_stack_tags(stack))
|
55
|
+
stack_tags['TemplateVersion'] = stack_tags['TemplateVersion'].to_i + 1
|
56
|
+
|
57
|
+
if stack.include?('batch')
|
58
|
+
stack_parameters = update_stack_parameters(get_stack_parameters(stack),
|
59
|
+
[
|
60
|
+
{ parameter_key: 'AMIID', parameter_value: new_ami_id },
|
61
|
+
{ parameter_key: 'TemplateVersion', parameter_value: stack_tags['TemplateVersion'].to_s }
|
62
|
+
]
|
63
|
+
)
|
64
|
+
if stack.include?('offsite-backups')
|
65
|
+
stack_template = File.read("./cloudformation/stacks/batch/compute-environment-offsite-backups.yml")
|
66
|
+
else
|
67
|
+
stack_template = File.read("./cloudformation/stacks/batch/compute-environment.yml")
|
68
|
+
end
|
69
|
+
else
|
70
|
+
stack_parameters = update_stack_parameters(get_stack_parameters(stack),
|
71
|
+
[
|
72
|
+
{ parameter_key: 'AMIID', parameter_value: new_ami_id },
|
73
|
+
{ parameter_key: 'DesiredCapacity', parameter_value: get_desired_capacity(stack).to_s },
|
74
|
+
{ parameter_key: 'TemplateVersion', parameter_value: stack_tags['TemplateVersion'].to_s }
|
75
|
+
]
|
76
|
+
)
|
77
|
+
stack_template = File.read("./cloudformation/stacks/asg/#{stack.to_s.split("/")[1]}.yml")
|
78
|
+
end
|
79
|
+
update_stack(stack, stack_template, stack_parameters, hashes_to_tags(stack_tags))
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
stacks.each do |stack|
|
84
|
+
if stack.include?('qa')
|
85
|
+
next
|
86
|
+
end
|
87
|
+
wait_for_stack_ready(stack, ['CREATE_FAILED', 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'])
|
88
|
+
end
|
89
|
+
|
90
|
+
output 'writing new ami mapping'
|
91
|
+
File.open("ami/ami-mappings.json", 'w+') do |f|
|
92
|
+
mapping_file = JSON.pretty_generate(ami_mappings)
|
93
|
+
f.write(mapping_file)
|
94
|
+
@s3.put_object(bucket: @s3_bucket, key: "ami/ami-mappings.json", body: mapping_file)
|
95
|
+
end
|
96
|
+
|
97
|
+
output 'Update finished! ( ͡° ͜ʖ ͡°)'
|
98
|
+
end
|
99
|
+
|
100
|
+
def get_stacks_from_exports(exports, old_amis)
|
101
|
+
stacks = []
|
102
|
+
old_amis.each do |old_ami|
|
103
|
+
exports.each do |export|
|
104
|
+
if export.value.eql?(old_ami)
|
105
|
+
stacks.insert(0,export.exporting_stack_id)
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|
109
|
+
stacks
|
110
|
+
end
|
111
|
+
|
112
|
+
def update_ami_mappings(mappings, ami_template, env, new_ami_id)
|
113
|
+
old_values = []
|
114
|
+
mappings.each do |item|
|
115
|
+
if item['ami_template'].eql?(ami_template) and item['env'].eql?(env)
|
116
|
+
old_values.insert(0,item['ami_id'])
|
117
|
+
item['ami_id'] = new_ami_id
|
118
|
+
end
|
119
|
+
end
|
120
|
+
old_values.uniq
|
121
|
+
end
|
122
|
+
|
123
|
+
def update_stack_parameters(stack_parameters, new_parameters)
|
124
|
+
new_parameters.each do |new_param|
|
125
|
+
stack_parameters.reject{ |k| k["parameter_key"] == new_param["parameter_key"] }
|
126
|
+
stack_parameters.push(new_param)
|
127
|
+
end
|
128
|
+
stack_parameters
|
129
|
+
end
|
130
|
+
|
131
|
+
def update_instance_name(ami_id, ami_name, ami_description, ecs_json_path)
|
132
|
+
ecs_json = JSON.parse File.read(ecs_json_path)
|
133
|
+
|
134
|
+
ecs_json['builders'][0]['source_ami'] = ami_id
|
135
|
+
ecs_json['builders'][0]['ami_name'] = ami_name
|
136
|
+
ecs_json['builders'][0]['ami_description'] = ami_description
|
137
|
+
|
138
|
+
File.open ecs_json_path, 'w' do |f|
|
139
|
+
f.write(JSON.pretty_generate(ecs_json))
|
140
|
+
end
|
141
|
+
end
|
142
|
+
|
143
|
+
def get_desired_capacity(stack_name)
|
144
|
+
stack_outputs = get_stack_outputs(stack_name)
|
145
|
+
stack_outputs.each do |out|
|
146
|
+
if out.export_name.include?('EC2Fleet') or out.export_name.include?('AutoScalingGroup')
|
147
|
+
return get_autoscaling_capacity(out.output_value)
|
148
|
+
end
|
149
|
+
end
|
150
|
+
end
|
151
|
+
|
152
|
+
def update_packer(json_filename, env)
|
153
|
+
execute_command "AWS_MAX_ATTEMPTS=90 AWS_POLL_DELAY_SECONDS=60 packer build -var datadog_apikey=`biscuit get -f ../configs/secrets/common.yml common_production_apikey_datadog` -var github_token=`biscuit get -f ../configs/secrets/common.yml common_private_repo_github_token` -var drone_key=\"`biscuit get -f ../configs/secrets/common.yml drone_license_key`\" -var env=#{env} -machine-readable ./#{json_filename} | tee build.log"
|
154
|
+
`grep 'artifact,0,id' build.log | cut -d, -f6 | cut -d: -f2`.sub(/\n/, '')
|
155
|
+
end
|
156
|
+
|
157
|
+
def help_content
|
158
|
+
<<-HELP
|
159
|
+
|
160
|
+
twig-update-ami
|
161
|
+
===========
|
162
|
+
|
163
|
+
Updates ami version and applies it to stacks on cloudformation
|
164
|
+
|
165
|
+
Synopsis
|
166
|
+
--------
|
167
|
+
|
168
|
+
twig-update-ami
|
169
|
+
|
170
|
+
Description
|
171
|
+
-----------
|
172
|
+
|
173
|
+
from artemide main folder run
|
174
|
+
`twig-update-ami ${AMI_TEMPLATE} ${AMI_ID} ${AMI_NAME} ${AMI_DESCRIPTION} ${ENV}`
|
175
|
+
|
176
|
+
Subcommand for Twig: <http://rondevera.github.io/twig/>
|
177
|
+
Author: Eugenio Laghi <https://github.com/eugeniolaghi>
|
178
|
+
|
179
|
+
HELP
|
180
|
+
end
|
181
|
+
|
182
|
+
class ::Hash
|
183
|
+
def deep_merge(second)
|
184
|
+
merger = proc { |key, v1, v2| Hash === v1 && Hash === v2 ? v1.merge(v2, &merger) : Array === v1 && Array === v2 ? v1 | v2 : [:undefined, nil, :nil].include?(v2) ? v1 : v2 }
|
185
|
+
self.merge(second.to_h, &merger)
|
186
|
+
end
|
187
|
+
end
|
188
|
+
|
189
|
+
args = ARGV.dup
|
190
|
+
|
191
|
+
if args.include?('--help')
|
192
|
+
puts help_content
|
193
|
+
exit
|
194
|
+
end
|
195
|
+
|
196
|
+
TwigUpdateAmi.new.execute!(args)
|
197
|
+
end
|
data/lib/command.rb
CHANGED
@@ -1,60 +1,73 @@
|
|
1
1
|
require 'colorize'
|
2
|
+
require 'English'
|
2
3
|
require 'optparse'
|
3
4
|
|
4
|
-
|
5
|
-
|
5
|
+
module Command
|
6
|
+
# executes command and exits if status != 0
|
7
|
+
def exec_step(command, output = nil)
|
6
8
|
if output
|
7
9
|
puts output
|
8
10
|
else
|
9
|
-
puts
|
11
|
+
puts 'exec > '.green + command.to_s.yellow
|
10
12
|
end
|
11
13
|
`#{command}`
|
12
|
-
exit(
|
14
|
+
exit($CHILD_STATUS.exitstatus) unless $CHILD_STATUS.exitstatus.zero?
|
13
15
|
end
|
14
16
|
|
15
|
-
def output
|
16
|
-
puts
|
17
|
+
def output(msg)
|
18
|
+
puts 'twig binaries > '.black + msg
|
17
19
|
end
|
18
20
|
|
19
|
-
def stop_if
|
21
|
+
def stop_if(check, msg, command = '')
|
20
22
|
if check
|
21
|
-
output_msg =
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
23
|
+
output_msg =
|
24
|
+
case msg
|
25
|
+
when Symbol
|
26
|
+
symbol_message msg
|
27
|
+
when Array
|
28
|
+
array_message msg
|
29
|
+
else
|
30
|
+
msg
|
31
|
+
end
|
32
|
+
command.empty? || exec(command)
|
33
|
+
puts 'there was a problem > '.red + output_msg
|
30
34
|
exit(1)
|
31
35
|
end
|
32
36
|
end
|
33
37
|
|
34
|
-
def symbol_message
|
38
|
+
def symbol_message(s)
|
35
39
|
case s
|
36
40
|
when :clean
|
37
|
-
|
41
|
+
'hai dei file non committati...non posso continuare'
|
38
42
|
when :detached_head
|
39
43
|
"repo in stato 'head detached'"
|
40
44
|
when :wrong_args
|
41
|
-
|
45
|
+
'argomento non corretto'
|
42
46
|
end
|
43
47
|
end
|
44
48
|
|
45
|
-
def array_message
|
49
|
+
def array_message(arr)
|
46
50
|
case arr[0]
|
47
51
|
when :wrong_args
|
48
52
|
msg = symbol_message arr[0]
|
49
53
|
msg += "\n"
|
50
|
-
msg +=
|
51
|
-
values =
|
54
|
+
msg += ' valore possibile: '
|
55
|
+
values = '[ ' + arr[1].join(' | ').yellow + ' ]'
|
52
56
|
msg += values
|
53
57
|
msg
|
54
58
|
end
|
55
59
|
end
|
56
60
|
|
57
|
-
def stop_unless
|
58
|
-
stop_if
|
61
|
+
def stop_unless(check, msg, command = '')
|
62
|
+
stop_if !check, msg, command
|
63
|
+
end
|
64
|
+
|
65
|
+
# executes command and returns properly colored output
|
66
|
+
def execute_command(cmd)
|
67
|
+
output "Eseguo #{cmd}".yellow
|
68
|
+
res = `#{cmd}`
|
69
|
+
color = $CHILD_STATUS.exitstatus.zero? ? 'green' : 'red'
|
70
|
+
output res.send color
|
71
|
+
stop_if (color == 'red'), "Errore durante la build dell'artifact".red
|
59
72
|
end
|
60
73
|
end
|
@@ -0,0 +1,494 @@
|
|
1
|
+
require 'aws-sdk-autoscaling'
|
2
|
+
require 'aws-sdk-batch'
|
3
|
+
require 'aws-sdk-cloudformation'
|
4
|
+
require 'aws-sdk-cloudfront'
|
5
|
+
require 'aws-sdk-ec2'
|
6
|
+
require 'aws-sdk-ecs'
|
7
|
+
require 'aws-sdk-elasticloadbalancingv2'
|
8
|
+
require 'aws-sdk-s3'
|
9
|
+
require 'colorize'
|
10
|
+
#
|
11
|
+
module PrimaAwsClient
|
12
|
+
def s3_client
|
13
|
+
@s3 ||= Aws::S3::Client.new
|
14
|
+
end
|
15
|
+
|
16
|
+
def cf_client
|
17
|
+
@cf ||= Aws::CloudFormation::Client.new
|
18
|
+
end
|
19
|
+
|
20
|
+
def asg_client
|
21
|
+
@asg ||= Aws::AutoScaling::Client.new
|
22
|
+
end
|
23
|
+
|
24
|
+
def ec2_client
|
25
|
+
@ec2 ||= Aws::EC2::Client.new
|
26
|
+
end
|
27
|
+
|
28
|
+
def alb_client
|
29
|
+
@alb ||= Aws::ElasticLoadBalancingV2::Client.new
|
30
|
+
end
|
31
|
+
|
32
|
+
def ecs_client
|
33
|
+
@ecs ||= Aws::ECS::Client.new
|
34
|
+
end
|
35
|
+
|
36
|
+
def stack_list
|
37
|
+
stacks = []
|
38
|
+
next_token = ''
|
39
|
+
loop do
|
40
|
+
print '.'.yellow; STDOUT.flush
|
41
|
+
options = next_token != '' ? { next_token: next_token } : {}
|
42
|
+
begin
|
43
|
+
resp = cf_client.describe_stacks(options)
|
44
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
45
|
+
output 'Throttling, retrying in 15 seconds'.red
|
46
|
+
sleep 15
|
47
|
+
resp = cf_client.describe_stacks(options)
|
48
|
+
end
|
49
|
+
stacks += resp.stacks
|
50
|
+
break unless resp.next_token
|
51
|
+
next_token = resp.next_token
|
52
|
+
end
|
53
|
+
puts '.'.yellow; STDOUT.flush
|
54
|
+
stacks.keep_if { |stack| stack.stack_name.include? '-qa-' }
|
55
|
+
stacks
|
56
|
+
end
|
57
|
+
|
58
|
+
def cluster_list
|
59
|
+
stacks = []
|
60
|
+
next_token = ''
|
61
|
+
loop do
|
62
|
+
print '.'.yellow; STDOUT.flush
|
63
|
+
options = next_token != '' ? { next_token: next_token } : {}
|
64
|
+
begin
|
65
|
+
resp = cf_client.describe_stacks(options)
|
66
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
67
|
+
output 'Throttling, retrying in 15 seconds'.red
|
68
|
+
sleep 15
|
69
|
+
resp = cf_client.describe_stacks(options)
|
70
|
+
end
|
71
|
+
stacks += resp.stacks
|
72
|
+
break unless resp.next_token
|
73
|
+
next_token = resp.next_token
|
74
|
+
end
|
75
|
+
puts '.'.yellow; STDOUT.flush
|
76
|
+
stacks.keep_if { |stack| stack.stack_name.include? 'ecs-cluster-qa-' }
|
77
|
+
stacks
|
78
|
+
end
|
79
|
+
|
80
|
+
def list_exports
|
81
|
+
exports = []
|
82
|
+
next_token = ''
|
83
|
+
loop do
|
84
|
+
print '.'.yellow; STDOUT.flush
|
85
|
+
options = next_token != '' ? { next_token: next_token } : {}
|
86
|
+
begin
|
87
|
+
resp = cf_client.list_exports(options)
|
88
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
89
|
+
output 'Throttling, retrying in 15 seconds'.red
|
90
|
+
sleep 15
|
91
|
+
resp = cf_client.list_exports(options)
|
92
|
+
end
|
93
|
+
exports += resp.exports
|
94
|
+
break unless resp.next_token
|
95
|
+
next_token = resp.next_token
|
96
|
+
end
|
97
|
+
puts '.'.yellow; STDOUT.flush
|
98
|
+
exports
|
99
|
+
end
|
100
|
+
|
101
|
+
def create_stack(stack_name, stack_body, parameters = [], tags = [], role = nil)
|
102
|
+
cf_args = {
|
103
|
+
stack_name: stack_name,
|
104
|
+
template_body: stack_body,
|
105
|
+
parameters: parameters,
|
106
|
+
tags: tags,
|
107
|
+
capabilities: ['CAPABILITY_IAM'],
|
108
|
+
on_failure: 'ROLLBACK'
|
109
|
+
}
|
110
|
+
|
111
|
+
if role != nil then
|
112
|
+
cf_args.merge!(role_arn: role)
|
113
|
+
end
|
114
|
+
|
115
|
+
begin
|
116
|
+
cf_client.create_stack(cf_args)
|
117
|
+
rescue Aws::CloudFormation::Errors::Throttling, Aws::CloudFormation::Errors::LimitExcedeedException => e
|
118
|
+
output 'Throttling, retrying in 15 seconds'.red
|
119
|
+
sleep 15
|
120
|
+
create_stack(stack_name, stack_body, parameters = [], tags = [])
|
121
|
+
else
|
122
|
+
output "La creazione dello stack #{stack_name} è stata avviata".green
|
123
|
+
end
|
124
|
+
end
|
125
|
+
|
126
|
+
def update_stack(stack_name, template_body, parameters = [], tags = [], role = nil)
|
127
|
+
cf_args = {
|
128
|
+
stack_name: stack_name,
|
129
|
+
template_body: template_body,
|
130
|
+
parameters: parameters,
|
131
|
+
tags: tags,
|
132
|
+
capabilities: ['CAPABILITY_IAM']
|
133
|
+
}
|
134
|
+
|
135
|
+
if role != nil then
|
136
|
+
cf_args.merge!(role_arn: role)
|
137
|
+
end
|
138
|
+
|
139
|
+
begin
|
140
|
+
cf_client.update_stack(cf_args)
|
141
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
142
|
+
output 'Throttling, retrying in 15 seconds'.red
|
143
|
+
sleep 15
|
144
|
+
update_stack(stack_name, template_body, parameters = [], tags = [])
|
145
|
+
rescue Aws::CloudFormation::Errors::ValidationError => e
|
146
|
+
raise e
|
147
|
+
else
|
148
|
+
output "L'update dello stack #{stack_name} è stato avviato".green
|
149
|
+
end
|
150
|
+
end
|
151
|
+
|
152
|
+
def update_stack_url(stack_name, template_url, parameters = [], tags = [], role = nil)
|
153
|
+
cf_args = {
|
154
|
+
stack_name: stack_name,
|
155
|
+
template_url: template_url,
|
156
|
+
parameters: parameters,
|
157
|
+
tags: tags,
|
158
|
+
capabilities: ['CAPABILITY_IAM']
|
159
|
+
}
|
160
|
+
|
161
|
+
if role != nil then
|
162
|
+
cf_args.merge!(role_arn: role)
|
163
|
+
end
|
164
|
+
|
165
|
+
begin
|
166
|
+
cf_client.update_stack(cf_args)
|
167
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
168
|
+
output 'Throttling, retrying in 15 seconds'.red
|
169
|
+
sleep 15
|
170
|
+
update_stack_url(stack_name, template_url, parameters = [], tags = [])
|
171
|
+
rescue Aws::CloudFormation::Errors::ValidationError => e
|
172
|
+
raise e
|
173
|
+
else
|
174
|
+
output "L'update dello stack #{stack_name} è stato avviato".green
|
175
|
+
end
|
176
|
+
end
|
177
|
+
|
178
|
+
def stack_exists?(stack_name)
|
179
|
+
begin
|
180
|
+
cf_client.describe_stacks(stack_name: stack_name)
|
181
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
182
|
+
output 'Throttling, retrying in 15 seconds'.red
|
183
|
+
sleep 15
|
184
|
+
stack_exists?(stack_name)
|
185
|
+
rescue Aws::CloudFormation::Errors::ValidationError => e
|
186
|
+
return false if e.message.include? 'does not exist'
|
187
|
+
raise e
|
188
|
+
else
|
189
|
+
true
|
190
|
+
end
|
191
|
+
end
|
192
|
+
|
193
|
+
def delete_stack(stack_name)
|
194
|
+
begin
|
195
|
+
cf_client.delete_stack(stack_name: stack_name)
|
196
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
197
|
+
output 'Throttling, retrying in 15 seconds'.red
|
198
|
+
sleep 15
|
199
|
+
delete_stack(stack_name)
|
200
|
+
else
|
201
|
+
output "Stack #{stack_name} spenta con successo\n".green
|
202
|
+
end
|
203
|
+
end
|
204
|
+
|
205
|
+
def wait_for_stack_ready(stack_name, failed_statuses = ['CREATE_FAILED', 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS'])
|
206
|
+
ready = false
|
207
|
+
sleep_seconds = 13
|
208
|
+
output "Attendo che lo stack #{stack_name} finisca di essere inizializzato...\n".yellow
|
209
|
+
while !ready
|
210
|
+
ready = true if stack_ready?(stack_name, failed_statuses)
|
211
|
+
seconds_elapsed = 0
|
212
|
+
loop do
|
213
|
+
break if seconds_elapsed >= sleep_seconds
|
214
|
+
print '.'.yellow; STDOUT.flush
|
215
|
+
sleep 1
|
216
|
+
seconds_elapsed += 1
|
217
|
+
end
|
218
|
+
end
|
219
|
+
output "\nStack #{stack_name} pronto!\n".green
|
220
|
+
end
|
221
|
+
|
222
|
+
def wait_for_stack_removal(stack_name)
|
223
|
+
ready = false
|
224
|
+
sleep_seconds = 13
|
225
|
+
sleep 10
|
226
|
+
output "Attendo che lo stack #{stack_name} finisca di essere cancellato...\n".yellow
|
227
|
+
while !ready
|
228
|
+
ready = true if stack_deleted?(stack_name)
|
229
|
+
seconds_elapsed = 0
|
230
|
+
loop do
|
231
|
+
break if seconds_elapsed >= sleep_seconds
|
232
|
+
print '.'.yellow; STDOUT.flush
|
233
|
+
sleep 1
|
234
|
+
seconds_elapsed += 1
|
235
|
+
end
|
236
|
+
end
|
237
|
+
output "\nStack #{stack_name} eliminato!\n".green
|
238
|
+
end
|
239
|
+
|
240
|
+
def get_stack_tags(name)
|
241
|
+
begin
|
242
|
+
resp = cf_client.describe_stacks(stack_name: name)
|
243
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
244
|
+
output 'Throttling, retrying in 15 seconds'.red
|
245
|
+
sleep 15
|
246
|
+
get_stack_tags(name)
|
247
|
+
else
|
248
|
+
resp.stacks[0].tags
|
249
|
+
end
|
250
|
+
end
|
251
|
+
|
252
|
+
def get_stack_parameters(name)
|
253
|
+
begin
|
254
|
+
resp = cf_client.describe_stacks(stack_name: name)
|
255
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
256
|
+
output 'Throttling, retrying in 15 seconds'.red
|
257
|
+
sleep 15
|
258
|
+
get_stack_parameters(name)
|
259
|
+
else
|
260
|
+
resp.stacks[0].parameters
|
261
|
+
end
|
262
|
+
end
|
263
|
+
|
264
|
+
def get_stack_outputs(name)
|
265
|
+
begin
|
266
|
+
resp = cf_client.describe_stacks(stack_name: name)
|
267
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
268
|
+
output 'Throttling, retrying in 15 seconds'.red
|
269
|
+
sleep 15
|
270
|
+
get_stack_outputs(name)
|
271
|
+
else
|
272
|
+
resp.stacks[0].outputs
|
273
|
+
end
|
274
|
+
end
|
275
|
+
|
276
|
+
def get_stack_template(name)
|
277
|
+
begin
|
278
|
+
resp = cf_client.get_template(stack_name: name)
|
279
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
280
|
+
output 'Throttling, retrying in 15 seconds'.red
|
281
|
+
sleep 15
|
282
|
+
get_stack_template(name)
|
283
|
+
else
|
284
|
+
resp.template_body
|
285
|
+
end
|
286
|
+
|
287
|
+
end
|
288
|
+
|
289
|
+
def stack_ready?(stack_name, failed_statuses = ['CREATE_FAILED', 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS'])
|
290
|
+
begin
|
291
|
+
resp = cf_client.describe_stacks(
|
292
|
+
stack_name: stack_name
|
293
|
+
)
|
294
|
+
stack_status = resp.stacks[0].stack_status
|
295
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
296
|
+
print 'Throttling'.red; STDOUT.flush
|
297
|
+
return false
|
298
|
+
end
|
299
|
+
raise "The stack #{stack_name} errored out" if failed_statuses.include? stack_status
|
300
|
+
['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'].include? stack_status
|
301
|
+
end
|
302
|
+
|
303
|
+
def stack_deleted?(stack_name, failed_statuses = ['ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'DELETE_FAILED', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS'])
|
304
|
+
begin
|
305
|
+
resp = cf_client.describe_stacks(
|
306
|
+
stack_name: stack_name
|
307
|
+
)
|
308
|
+
stack_status = resp.stacks[0].stack_status
|
309
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
310
|
+
print 'Throttling'.red; STDOUT.flush
|
311
|
+
return false
|
312
|
+
rescue Aws::CloudFormation::Errors::ValidationError => e
|
313
|
+
print 'Stack deleted'
|
314
|
+
return true
|
315
|
+
end
|
316
|
+
raise "The stack #{stack_name} errored out" if failed_statuses.include? stack_status
|
317
|
+
['DELETE_COMPLETE'].include? stack_status
|
318
|
+
end
|
319
|
+
|
320
|
+
def artifact_exists?(bucket, path)
|
321
|
+
resp = s3_client.list_objects(
|
322
|
+
bucket: bucket,
|
323
|
+
max_keys: 1,
|
324
|
+
prefix: path
|
325
|
+
)
|
326
|
+
!resp.contents.empty?
|
327
|
+
end
|
328
|
+
|
329
|
+
def upload_artifact(source_path, destination_path, bucket_name_override=nil)
|
330
|
+
output "Upload dell'artifact in corso (#{(File.size(source_path).to_f / 2**20).round(2)} MiB)\n".yellow
|
331
|
+
s3 = Aws::S3::Resource.new
|
332
|
+
s3_bucket = if !bucket_name_override.nil? then bucket_name_override else @s3_bucket end
|
333
|
+
puts s3_bucket
|
334
|
+
obj = s3.bucket(s3_bucket).object(destination_path)
|
335
|
+
obj.upload_file(source_path)
|
336
|
+
|
337
|
+
output "#{s3_bucket}/#{destination_path} uploadato con successo!\n".green
|
338
|
+
end
|
339
|
+
|
340
|
+
def wait_for_artifact(bucket, path)
|
341
|
+
ready = artifact_exists?(bucket, path)
|
342
|
+
sleep_seconds = 13
|
343
|
+
output "Attendo che lo sia pronto l'artefatto #{path}...\n".yellow
|
344
|
+
retries = 0
|
345
|
+
while !ready
|
346
|
+
ready = true if artifact_exists?(bucket, path)
|
347
|
+
seconds_elapsed = 0
|
348
|
+
loop do
|
349
|
+
break if seconds_elapsed >= sleep_seconds
|
350
|
+
print '.'.yellow; STDOUT.flush
|
351
|
+
sleep 1
|
352
|
+
seconds_elapsed += 1
|
353
|
+
end
|
354
|
+
retries += 1
|
355
|
+
if retries > 150
|
356
|
+
output "\n Timeout raggiunto aspettando #{path}\n".red
|
357
|
+
exit
|
358
|
+
end
|
359
|
+
end
|
360
|
+
output "\nArtefatto #{path} creato!\n".green
|
361
|
+
end
|
362
|
+
|
363
|
+
def list_import_stacks(export_name)
|
364
|
+
stacks = []
|
365
|
+
next_token = ''
|
366
|
+
loop do
|
367
|
+
print '.'.yellow; STDOUT.flush
|
368
|
+
options = next_token != '' ? { export_name: export_name, next_token: next_token } : {export_name: export_name}
|
369
|
+
begin
|
370
|
+
resp = cf_client.list_imports(options)
|
371
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
372
|
+
output 'Throttling, retrying in 15 seconds'.red
|
373
|
+
sleep 15
|
374
|
+
resp = cf_client.list_imports(options)
|
375
|
+
end
|
376
|
+
stacks += resp.imports
|
377
|
+
break unless resp.next_token
|
378
|
+
next_token = resp.next_token
|
379
|
+
end
|
380
|
+
stacks
|
381
|
+
end
|
382
|
+
|
383
|
+
def describe_stack_resource(cluster_stack_name, logical_resource_id)
|
384
|
+
begin
|
385
|
+
resp = cf_client.describe_stack_resource({stack_name: cluster_stack_name, logical_resource_id: logical_resource_id})
|
386
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
387
|
+
output 'Throttling, retrying in 15 seconds'.red
|
388
|
+
sleep 15
|
389
|
+
resp = describe_stack_resource(cluster_stack_name, logical_resource_id)
|
390
|
+
end
|
391
|
+
end
|
392
|
+
|
393
|
+
def describe_instances(instance_ids)
|
394
|
+
begin
|
395
|
+
resp = ec2_client.describe_instances({instance_ids: instance_ids})
|
396
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
397
|
+
output 'Throttling, retrying in 15 seconds'.red
|
398
|
+
sleep 15
|
399
|
+
resp = describe_instances(instance_ids)
|
400
|
+
end
|
401
|
+
end
|
402
|
+
|
403
|
+
def describe_auto_scaling_groups(auto_scaling_group_names, max_records)
|
404
|
+
begin
|
405
|
+
resp = asg_client.describe_auto_scaling_groups({
|
406
|
+
auto_scaling_group_names: auto_scaling_group_names,
|
407
|
+
max_records: max_records
|
408
|
+
})
|
409
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
410
|
+
output 'Throttling, retrying in 15 seconds'.red
|
411
|
+
sleep 15
|
412
|
+
resp = describe_auto_scaling_groups(auto_scaling_group_names, max_records)
|
413
|
+
end
|
414
|
+
end
|
415
|
+
|
416
|
+
def describe_load_balancers(load_balancer_arns)
|
417
|
+
begin
|
418
|
+
resp = alb_client.describe_load_balancers({load_balancer_arns: load_balancer_arns})
|
419
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
420
|
+
output 'Throttling, retrying in 15 seconds'.red
|
421
|
+
sleep 15
|
422
|
+
resp = describe_load_balancers(load_balancer_arns)
|
423
|
+
end
|
424
|
+
end
|
425
|
+
|
426
|
+
def update_ecs_service(cluster, service, deployment_configuration)
|
427
|
+
begin
|
428
|
+
resp = ecs_client.update_service(
|
429
|
+
cluster: cluster,
|
430
|
+
service: service,
|
431
|
+
deployment_configuration: deployment_configuration
|
432
|
+
)
|
433
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
434
|
+
output 'Throttling, retrying in 15 seconds'.red
|
435
|
+
sleep 15
|
436
|
+
resp = update_ecs_service(cluster, service, deployment_configuration)
|
437
|
+
end
|
438
|
+
end
|
439
|
+
|
440
|
+
def describe_ecs_tasks(cluster, tasks)
|
441
|
+
begin
|
442
|
+
resp = ecs_client.describe_tasks({
|
443
|
+
cluster: cluster,
|
444
|
+
tasks: tasks
|
445
|
+
})
|
446
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
447
|
+
output 'Throttling, retrying in 15 seconds'.red
|
448
|
+
sleep 15
|
449
|
+
resp = describe_ecs_tasks(cluster, tasks)
|
450
|
+
end
|
451
|
+
end
|
452
|
+
|
453
|
+
def run_ecs_task(cluster, task_definition, overrides, count)
|
454
|
+
begin
|
455
|
+
resp = ecs_client.run_task({
|
456
|
+
cluster: cluster,
|
457
|
+
task_definition: task_definition,
|
458
|
+
overrides: overrides,
|
459
|
+
count: count
|
460
|
+
})
|
461
|
+
rescue Aws::CloudFormation::Errors::Throttling => e
|
462
|
+
output 'Throttling, retrying in 15 seconds'.red
|
463
|
+
sleep 15
|
464
|
+
resp = run_ecs_task(cluster, task_definition, overrides, count)
|
465
|
+
end
|
466
|
+
end
|
467
|
+
|
468
|
+
def get_autoscaling_capacity(asg_name)
|
469
|
+
resp = asg_client.describe_auto_scaling_groups(auto_scaling_group_names: [asg_name])
|
470
|
+
resp.auto_scaling_groups[0].desired_capacity
|
471
|
+
end
|
472
|
+
|
473
|
+
def get_spotfleet_capacity(fleet_arn)
|
474
|
+
resp = ec2_client.describe_spot_fleet_requests(spot_fleet_request_ids: [fleet_arn])
|
475
|
+
resp.spot_fleet_request_configs[0].spot_fleet_request_config.target_capacity
|
476
|
+
end
|
477
|
+
|
478
|
+
def hashes_to_tags(hashes)
|
479
|
+
tags = []
|
480
|
+
hkeys = hashes.keys
|
481
|
+
hkeys.each do |hkey|
|
482
|
+
tags.insert(0, { key: hkey, value: hashes[hkey].to_s })
|
483
|
+
end
|
484
|
+
tags
|
485
|
+
end
|
486
|
+
|
487
|
+
def tags_to_hashes(tags)
|
488
|
+
hash = Hash.new
|
489
|
+
tags.each do |tags_obj|
|
490
|
+
hash[tags_obj.key] = tags_obj.value
|
491
|
+
end
|
492
|
+
hash
|
493
|
+
end
|
494
|
+
end
|