prima-twig 0.34.7 → 0.34.126

Sign up to get free protection for your applications and to get access to all the features.
@@ -5,7 +5,7 @@ require_relative '../lib/prima_twig.rb'
5
5
  require_relative '../lib/prima_aws_client.rb'
6
6
  require 'launchy'
7
7
  require 'json'
8
- require 'aws-sdk'
8
+ require 'aws-sdk-s3'
9
9
 
10
10
  class TwigUpdateAmi
11
11
  include Command
@@ -15,69 +15,117 @@ class TwigUpdateAmi
15
15
  exec "gem update prima-twig && twig update-ami #{ARGV.join ' '}" unless `gem outdated`.lines.grep(/^prima-twig \(.*\)/).empty?
16
16
  @s3 = Aws::S3::Client.new
17
17
  @s3_bucket = 'prima-deploy'
18
- @instances = JSON.parse File.read('../twig-binaries/cloudformation.json')
18
+ @templates_base_url = "https://s3-eu-west-1.amazonaws.com"
19
19
  end
20
20
 
21
21
  def execute!(args)
22
- if args[3] == 'staging'
23
- update_amis args[0], args[1], args[2], true
24
- else
25
- update_amis args[0], args[1], args[2], false
26
- end
22
+ update_amis(args[0], args[1], args[2], args[3], args[4])
27
23
  end
28
24
 
29
25
  private
30
26
 
31
- def update_amis(ami_id, ami_name, ami_description, only_staging)
32
- @instances['amis'].each do |ami|
33
- if only_staging and ami['json'] != 'ecs.json'
34
- output "skipping #{ami['json']} because only_staging enabled".yellow
27
+ def update_amis(ami_template, ami_id, ami_name, ami_description, env)
28
+ output "updating instance definition #{ami_template}".light_green
29
+ Dir.chdir 'ami'
30
+ update_instance_name(ami_id, ami_name, ami_description, ami_template)
31
+ output 'running packer update (this could take some time)'.light_green
32
+ new_ami_id = update_packer(ami_template, env)
33
+ # new_ami_id = 'ami-026890988d91ee8c6'
34
+ Dir.chdir '..'
35
+ stop_if(new_ami_id.to_s.empty?, 'Failed to generate AMI!'.red)
36
+ output "new ami id: #{new_ami_id}"
37
+
38
+ output 'searching for ami to update...'
39
+ ami_mappings = JSON.parse(@s3.get_object(bucket: @s3_bucket, key: "ami/ami-mappings.json")["body"].read())
40
+ old_amis = update_ami_mappings(ami_mappings, ami_template, env, new_ami_id)
41
+ stop_if(old_amis.empty?, "No ami to update! No #{ami_template} in env #{env}, exiting".yellow)
42
+
43
+ output "retrieving stacks that uses old ami ids: #{old_amis}"
44
+ exports = list_exports()
45
+ stacks = get_stacks_from_exports(exports, old_amis)
46
+ stop_if(stacks.empty?, "No stack to update found! This means that ami-mapping file is not in sync, please check manually")
47
+
48
+ stacks.each do |stack|
49
+ output "processing stack #{stack}"
50
+ if stack.include?('qa')
51
+ output "skipping stack #{stack} because is a qa"
35
52
  next
36
- end
37
- output 'updating instance definition'.light_green
38
- Dir.chdir 'ami'
39
- update_instance_name(ami_id, ami_name, ami_description, ami['json'])
40
- output 'running packer update (this could take some time)'.light_green
41
- new_ami_id = update_packer ami['json']
42
- Dir.chdir '..'
43
- output 'new ami id: ' + new_ami_id
44
- ami['stacks'].each do |stack|
45
- if only_staging and not stack['stack_name'].include?('allinone-staging')
46
- output "skipping #{stack['stack_name']} because only_staging enabled".yellow
47
- next
48
- end
49
- output 'updating ' + stack['yaml_filename'] + ' and copying onto s3'
50
- update_yml_files(new_ami_id, stack['yaml_filename'])
51
- copy_yml_files_to_s3(stack['yaml_filename'], stack['s3_key'])
52
- output 'updating stack on cloudformation'
53
- if stack['stack_name'] and stack_exists?(stack['stack_name'])
54
- stack_parameters = get_stack_parameters(stack['stack_name'])
55
- stack_parameters.each do |param|
56
- if param.parameter_key.eql?('DesiredCapacity')
57
- desired_capacity = get_desired_capacity(stack['stack_name'])
58
- desired_capacity.nil? ? break : param.parameter_value.sub!(/[0-9]+/, desired_capacity.to_s)
59
- break
60
- end
53
+ else
54
+ stack_tags = tags_to_hashes(get_stack_tags(stack))
55
+ stack_tags['TemplateVersion'] = stack_tags['TemplateVersion'].to_i + 1
56
+
57
+ if stack.include?('batch')
58
+ stack_parameters = update_stack_parameters(get_stack_parameters(stack),
59
+ [
60
+ { parameter_key: 'AMIID', parameter_value: new_ami_id },
61
+ { parameter_key: 'TemplateVersion', parameter_value: stack_tags['TemplateVersion'].to_s }
62
+ ]
63
+ )
64
+ if stack.include?('offsite-backups')
65
+ stack_template = File.read("./cloudformation/stacks/batch/compute-environment-offsite-backups.yml")
66
+ else
67
+ stack_template = File.read("./cloudformation/stacks/batch/compute-environment.yml")
61
68
  end
62
- update_stack_url(stack['stack_name'], stack['template_url'], stack_parameters)
69
+ else
70
+ stack_parameters = update_stack_parameters(get_stack_parameters(stack),
71
+ [
72
+ { parameter_key: 'AMIID', parameter_value: new_ami_id },
73
+ { parameter_key: 'DesiredCapacity', parameter_value: get_desired_capacity(stack).to_s },
74
+ { parameter_key: 'TemplateVersion', parameter_value: stack_tags['TemplateVersion'].to_s }
75
+ ]
76
+ )
77
+ stack_template = File.read("./cloudformation/stacks/asg/#{stack.to_s.split("/")[1]}.yml")
63
78
  end
79
+ update_stack(stack, stack_template, stack_parameters, hashes_to_tags(stack_tags))
64
80
  end
81
+ end
65
82
 
66
- unless ami['batch_compute_environments'].nil? or only_staging
67
- ami['batch_compute_environments'].each do |ce|
68
- update_batch_compute_environment(ce, new_ami_id)
69
- end
83
+ stacks.each do |stack|
84
+ if stack.include?('qa')
85
+ next
70
86
  end
71
- ami['stacks'].each do |stack|
72
- if only_staging and not stack['stack_name'].include?('allinone-staging')
73
- output "skipping #{stack['stack_name']} because only_staging enabled".yellow
74
- next
75
- end
76
- if stack['stack_name'] and stack_exists?(stack['stack_name'])
77
- wait_for_stack_ready(stack['stack_name'], ['CREATE_FAILED', 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'])
87
+ wait_for_stack_ready(stack, ['CREATE_FAILED', 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'])
88
+ end
89
+
90
+ output 'writing new ami mapping'
91
+ File.open("ami/ami-mappings.json", 'w+') do |f|
92
+ mapping_file = JSON.pretty_generate(ami_mappings)
93
+ f.write(mapping_file)
94
+ @s3.put_object(bucket: @s3_bucket, key: "ami/ami-mappings.json", body: mapping_file)
95
+ end
96
+
97
+ output 'Update finished! ( ͡° ͜ʖ ͡°)'
98
+ end
99
+
100
+ def get_stacks_from_exports(exports, old_amis)
101
+ stacks = []
102
+ old_amis.each do |old_ami|
103
+ exports.each do |export|
104
+ if export.value.eql?(old_ami)
105
+ stacks.insert(0,export.exporting_stack_id)
78
106
  end
79
107
  end
80
108
  end
109
+ stacks
110
+ end
111
+
112
+ def update_ami_mappings(mappings, ami_template, env, new_ami_id)
113
+ old_values = []
114
+ mappings.each do |item|
115
+ if item['ami_template'].eql?(ami_template) and item['env'].eql?(env)
116
+ old_values.insert(0,item['ami_id'])
117
+ item['ami_id'] = new_ami_id
118
+ end
119
+ end
120
+ old_values.uniq
121
+ end
122
+
123
+ def update_stack_parameters(stack_parameters, new_parameters)
124
+ new_parameters.each do |new_param|
125
+ stack_parameters.reject{ |k| k["parameter_key"] == new_param["parameter_key"] }
126
+ stack_parameters.push(new_param)
127
+ end
128
+ stack_parameters
81
129
  end
82
130
 
83
131
  def update_instance_name(ami_id, ami_name, ami_description, ecs_json_path)
@@ -95,121 +143,17 @@ class TwigUpdateAmi
95
143
  def get_desired_capacity(stack_name)
96
144
  stack_outputs = get_stack_outputs(stack_name)
97
145
  stack_outputs.each do |out|
98
- if out.export_name.include?('ECSAutoScalingGroup')
146
+ if out.export_name.include?('EC2Fleet') or out.export_name.include?('AutoScalingGroup')
99
147
  return get_autoscaling_capacity(out.output_value)
100
- elsif out.export_name.include?('SpotFleet')
101
- return get_spotfleet_capacity(out.output_value)
102
148
  end
103
149
  end
104
- return nil
105
150
  end
106
151
 
107
- def update_packer(json_filename)
108
- execute_command "packer build -var datadog_apikey=`biscuit get -f ../configs/secrets/common.yml common_production_apikey_datadog` -machine-readable ./#{json_filename} | tee build.log"
152
+ def update_packer(json_filename, env)
153
+ execute_command "AWS_MAX_ATTEMPTS=90 AWS_POLL_DELAY_SECONDS=60 packer build -var datadog_apikey=`biscuit get -f ../configs/secrets/common.yml common_production_apikey_datadog` -var github_token=`biscuit get -f ../configs/secrets/common.yml common_private_repo_github_token` -var drone_key=\"`biscuit get -f ../configs/secrets/common.yml drone_license_key`\" -var env=#{env} -machine-readable ./#{json_filename} | tee build.log"
109
154
  `grep 'artifact,0,id' build.log | cut -d, -f6 | cut -d: -f2`.sub(/\n/, '')
110
155
  end
111
156
 
112
- def update_yml_files(ami_id, yaml_filename)
113
- file_content = File.read yaml_filename
114
-
115
- file_content.sub!(/ami-[0-9a-z]{8}/, ami_id)
116
-
117
- File.open yaml_filename, 'w' do |f|
118
- f.write file_content
119
- end
120
-
121
- if yaml_filename.include? 'spotfleet'
122
- old_handle = (File.read yaml_filename)[/InstanceReadyWaitHandleUpdate[0-9]+/]
123
- if old_handle
124
- handle_version = old_handle.sub('InstanceReadyWaitHandleUpdate', '').to_i + 1
125
-
126
- old_condition = (File.read yaml_filename)[/InstanceReadyWaitConditionUpdate[0-9]+/]
127
- condition_version = old_condition.sub('InstanceReadyWaitConditionUpdate', '').to_i + 1
128
-
129
- file_content = File.read yaml_filename
130
- file_content.gsub!(old_handle, 'InstanceReadyWaitHandleUpdate' + handle_version.to_s)
131
- file_content.gsub!(old_condition, 'InstanceReadyWaitConditionUpdate' + condition_version.to_s)
132
-
133
- File.open yaml_filename, 'w' do |f|
134
- f.write file_content
135
- end
136
- end
137
- end
138
- end
139
-
140
- def copy_yml_files_to_s3(source, s3_key)
141
- body = File.read source
142
- @s3.put_object(
143
- body: body,
144
- bucket: @s3_bucket,
145
- key: s3_key
146
- )
147
- end
148
-
149
- def update_batch_compute_environment(stack, ami_id)
150
- output 'updating ' + stack['yaml_filename'] + ' to add a new compute environment'
151
-
152
- file_content = File.read stack['yaml_filename']
153
- file_content.gsub!(/(\w+:\s+)!(\w+)/i, '\1QuaCeraUnPuntoEsclamativo\2')
154
- file_content_original = file_content.clone
155
- old_ce_name = file_content[/#{stack['label']}[0-9]*/]
156
- new_ce_version = old_ce_name.sub(stack['label'], '').to_i + 1
157
- new_ce_name = stack['label'] + new_ce_version.to_s
158
- file_content.gsub!(old_ce_name, new_ce_name)
159
- File.open stack['yaml_filename'] + 'new', 'w' do |f|
160
- f.write file_content
161
- end
162
- update_yml_files(ami_id, stack['yaml_filename'] + 'new')
163
-
164
- stack_body = YAML.load(file_content_original)
165
- stack_body_new = YAML.load_file(stack['yaml_filename'] + 'new')
166
- stack_body_merged = stack_body.deep_merge stack_body_new
167
- File.open(stack['yaml_filename'], 'w') do |file|
168
- file.write stack_body_merged.to_yaml.gsub('QuaCeraUnPuntoEsclamativo', '!')
169
- end
170
-
171
- output 'updating stack on cloudformation, (step 1)'
172
- copy_yml_files_to_s3(stack['yaml_filename'], stack['s3_key'])
173
- if not stack['stack_name'] or not stack_exists?(stack['stack_name'])
174
- return false
175
- end
176
-
177
- update_stack_url(stack['stack_name'], stack['template_url'], get_stack_parameters(stack['stack_name']))
178
- wait_for_stack_ready(stack['stack_name'], ['CREATE_FAILED', 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'])
179
-
180
- output "retrieving the list of stacks that are currently using the stack #{stack['stack_name']}"
181
- job_stacks = list_import_stacks old_ce_name + '-production'
182
- job_stacks.each do |job_stack_name|
183
- output "updating the stack #{job_stack_name} to use to the new compute environment"
184
- stack_body = get_stack_template(job_stack_name)
185
- stack_parameters = get_stack_parameters(job_stack_name).reject{ |k| k.parameter_key == 'ComputeEnvironmentExportName' }
186
- stack_parameters.push(
187
- {
188
- parameter_key: "ComputeEnvironmentExportName",
189
- parameter_value: new_ce_name
190
- }
191
- )
192
- update_stack(job_stack_name, stack_body, stack_parameters)
193
- end
194
- job_stacks.each do |job_stack_name|
195
- wait_for_stack_ready(job_stack_name, ['CREATE_FAILED', 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'])
196
- end
197
-
198
- file_content = File.read stack['yaml_filename'] + 'new'
199
- File.open stack['yaml_filename'], 'w' do |f|
200
- f.write file_content.gsub('QuaCeraUnPuntoEsclamativo', '!')
201
- end
202
-
203
- output "updating stack #{stack['stack_name']} on cloudformation to remove the old compute environment"
204
- copy_yml_files_to_s3(stack['yaml_filename'], stack['s3_key'])
205
- update_stack_url(stack['stack_name'], stack['template_url'], get_stack_parameters(stack['stack_name']))
206
- wait_for_stack_ready(stack['stack_name'], ['CREATE_FAILED', 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'])
207
-
208
- File.delete(stack['yaml_filename'] + 'new')
209
-
210
- output "cloudformation stack update for #{stack['stack_name']} done!"
211
- end
212
-
213
157
  def help_content
214
158
  <<-HELP
215
159
 
@@ -227,7 +171,7 @@ class TwigUpdateAmi
227
171
  -----------
228
172
 
229
173
  from artemide main folder run
230
- `twig-update-ami ${AMI_ID} ${AMI_NAME} ${AMI_DESCRIPTION}`
174
+ `twig-update-ami ${AMI_TEMPLATE} ${AMI_ID} ${AMI_NAME} ${AMI_DESCRIPTION} ${ENV}`
231
175
 
232
176
  Subcommand for Twig: <http://rondevera.github.io/twig/>
233
177
  Author: Eugenio Laghi <https://github.com/eugeniolaghi>
@@ -1,4 +1,11 @@
1
- require 'aws-sdk'
1
+ require 'aws-sdk-autoscaling'
2
+ require 'aws-sdk-batch'
3
+ require 'aws-sdk-cloudformation'
4
+ require 'aws-sdk-cloudfront'
5
+ require 'aws-sdk-ec2'
6
+ require 'aws-sdk-ecs'
7
+ require 'aws-sdk-elasticloadbalancingv2'
8
+ require 'aws-sdk-s3'
2
9
  require 'colorize'
3
10
  #
4
11
  module PrimaAwsClient
@@ -14,8 +21,16 @@ module PrimaAwsClient
14
21
  @asg ||= Aws::AutoScaling::Client.new
15
22
  end
16
23
 
17
- def appscaling_client
18
- @appscaling ||= Aws::ApplicationAutoScaling::Client.new
24
+ def ec2_client
25
+ @ec2 ||= Aws::EC2::Client.new
26
+ end
27
+
28
+ def alb_client
29
+ @alb ||= Aws::ElasticLoadBalancingV2::Client.new
30
+ end
31
+
32
+ def ecs_client
33
+ @ecs ||= Aws::ECS::Client.new
19
34
  end
20
35
 
21
36
  def stack_list
@@ -40,17 +55,66 @@ module PrimaAwsClient
40
55
  stacks
41
56
  end
42
57
 
43
- def create_stack(stack_name, stack_body, parameters = [], tags = [])
58
+ def cluster_list
59
+ stacks = []
60
+ next_token = ''
61
+ loop do
62
+ print '.'.yellow; STDOUT.flush
63
+ options = next_token != '' ? { next_token: next_token } : {}
64
+ begin
65
+ resp = cf_client.describe_stacks(options)
66
+ rescue Aws::CloudFormation::Errors::Throttling => e
67
+ output 'Throttling, retrying in 15 seconds'.red
68
+ sleep 15
69
+ resp = cf_client.describe_stacks(options)
70
+ end
71
+ stacks += resp.stacks
72
+ break unless resp.next_token
73
+ next_token = resp.next_token
74
+ end
75
+ puts '.'.yellow; STDOUT.flush
76
+ stacks.keep_if { |stack| stack.stack_name.include? 'ecs-cluster-qa-' }
77
+ stacks
78
+ end
79
+
80
+ def list_exports
81
+ exports = []
82
+ next_token = ''
83
+ loop do
84
+ print '.'.yellow; STDOUT.flush
85
+ options = next_token != '' ? { next_token: next_token } : {}
86
+ begin
87
+ resp = cf_client.list_exports(options)
88
+ rescue Aws::CloudFormation::Errors::Throttling => e
89
+ output 'Throttling, retrying in 15 seconds'.red
90
+ sleep 15
91
+ resp = cf_client.list_exports(options)
92
+ end
93
+ exports += resp.exports
94
+ break unless resp.next_token
95
+ next_token = resp.next_token
96
+ end
97
+ puts '.'.yellow; STDOUT.flush
98
+ exports
99
+ end
100
+
101
+ def create_stack(stack_name, stack_body, parameters = [], tags = [], role = nil)
102
+ cf_args = {
103
+ stack_name: stack_name,
104
+ template_body: stack_body,
105
+ parameters: parameters,
106
+ tags: tags,
107
+ capabilities: ['CAPABILITY_IAM'],
108
+ on_failure: 'ROLLBACK'
109
+ }
110
+
111
+ if role != nil then
112
+ cf_args.merge!(role_arn: role)
113
+ end
114
+
44
115
  begin
45
- cf_client.create_stack(
46
- stack_name: stack_name,
47
- template_body: stack_body,
48
- parameters: parameters,
49
- tags: tags,
50
- capabilities: ['CAPABILITY_IAM'],
51
- on_failure: 'ROLLBACK'
52
- )
53
- rescue Aws::CloudFormation::Errors::Throttling => e
116
+ cf_client.create_stack(cf_args)
117
+ rescue Aws::CloudFormation::Errors::Throttling, Aws::CloudFormation::Errors::LimitExcedeedException => e
54
118
  output 'Throttling, retrying in 15 seconds'.red
55
119
  sleep 15
56
120
  create_stack(stack_name, stack_body, parameters = [], tags = [])
@@ -59,15 +123,21 @@ module PrimaAwsClient
59
123
  end
60
124
  end
61
125
 
62
- def update_stack(stack_name, template_body, parameters = [], tags = [])
126
+ def update_stack(stack_name, template_body, parameters = [], tags = [], role = nil)
127
+ cf_args = {
128
+ stack_name: stack_name,
129
+ template_body: template_body,
130
+ parameters: parameters,
131
+ tags: tags,
132
+ capabilities: ['CAPABILITY_IAM']
133
+ }
134
+
135
+ if role != nil then
136
+ cf_args.merge!(role_arn: role)
137
+ end
138
+
63
139
  begin
64
- cf_client.update_stack(
65
- stack_name: stack_name,
66
- template_body: template_body,
67
- parameters: parameters,
68
- tags: tags,
69
- capabilities: ['CAPABILITY_IAM']
70
- )
140
+ cf_client.update_stack(cf_args)
71
141
  rescue Aws::CloudFormation::Errors::Throttling => e
72
142
  output 'Throttling, retrying in 15 seconds'.red
73
143
  sleep 15
@@ -79,15 +149,21 @@ module PrimaAwsClient
79
149
  end
80
150
  end
81
151
 
82
- def update_stack_url(stack_name, template_url, parameters = [], tags = [])
152
+ def update_stack_url(stack_name, template_url, parameters = [], tags = [], role = nil)
153
+ cf_args = {
154
+ stack_name: stack_name,
155
+ template_url: template_url,
156
+ parameters: parameters,
157
+ tags: tags,
158
+ capabilities: ['CAPABILITY_IAM']
159
+ }
160
+
161
+ if role != nil then
162
+ cf_args.merge!(role_arn: role)
163
+ end
164
+
83
165
  begin
84
- cf_client.update_stack(
85
- stack_name: stack_name,
86
- template_url: template_url,
87
- parameters: parameters,
88
- tags: tags,
89
- capabilities: ['CAPABILITY_IAM']
90
- )
166
+ cf_client.update_stack(cf_args)
91
167
  rescue Aws::CloudFormation::Errors::Throttling => e
92
168
  output 'Throttling, retrying in 15 seconds'.red
93
169
  sleep 15
@@ -143,6 +219,24 @@ module PrimaAwsClient
143
219
  output "\nStack #{stack_name} pronto!\n".green
144
220
  end
145
221
 
222
+ def wait_for_stack_removal(stack_name)
223
+ ready = false
224
+ sleep_seconds = 13
225
+ sleep 10
226
+ output "Attendo che lo stack #{stack_name} finisca di essere cancellato...\n".yellow
227
+ while !ready
228
+ ready = true if stack_deleted?(stack_name)
229
+ seconds_elapsed = 0
230
+ loop do
231
+ break if seconds_elapsed >= sleep_seconds
232
+ print '.'.yellow; STDOUT.flush
233
+ sleep 1
234
+ seconds_elapsed += 1
235
+ end
236
+ end
237
+ output "\nStack #{stack_name} eliminato!\n".green
238
+ end
239
+
146
240
  def get_stack_tags(name)
147
241
  begin
148
242
  resp = cf_client.describe_stacks(stack_name: name)
@@ -206,6 +300,23 @@ module PrimaAwsClient
206
300
  ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'].include? stack_status
207
301
  end
208
302
 
303
+ def stack_deleted?(stack_name, failed_statuses = ['ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'DELETE_FAILED', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS'])
304
+ begin
305
+ resp = cf_client.describe_stacks(
306
+ stack_name: stack_name
307
+ )
308
+ stack_status = resp.stacks[0].stack_status
309
+ rescue Aws::CloudFormation::Errors::Throttling => e
310
+ print 'Throttling'.red; STDOUT.flush
311
+ return false
312
+ rescue Aws::CloudFormation::Errors::ValidationError => e
313
+ print 'Stack deleted'
314
+ return true
315
+ end
316
+ raise "The stack #{stack_name} errored out" if failed_statuses.include? stack_status
317
+ ['DELETE_COMPLETE'].include? stack_status
318
+ end
319
+
209
320
  def artifact_exists?(bucket, path)
210
321
  resp = s3_client.list_objects(
211
322
  bucket: bucket,
@@ -226,6 +337,29 @@ module PrimaAwsClient
226
337
  output "#{s3_bucket}/#{destination_path} uploadato con successo!\n".green
227
338
  end
228
339
 
340
+ def wait_for_artifact(bucket, path)
341
+ ready = artifact_exists?(bucket, path)
342
+ sleep_seconds = 13
343
+ output "Attendo che sia pronto l'artefatto #{path}...\n".yellow
344
+ retries = 0
345
+ while !ready
346
+ ready = true if artifact_exists?(bucket, path)
347
+ seconds_elapsed = 0
348
+ loop do
349
+ break if seconds_elapsed >= sleep_seconds
350
+ print '.'.yellow; STDOUT.flush
351
+ sleep 1
352
+ seconds_elapsed += 1
353
+ end
354
+ retries += 1
355
+ if retries > 150
356
+ output "\n Timeout raggiunto aspettando #{path}\n".red
357
+ exit
358
+ end
359
+ end
360
+ output "\nArtefatto #{path} creato!\n".green
361
+ end
362
+
229
363
  def list_import_stacks(export_name)
230
364
  stacks = []
231
365
  next_token = ''
@@ -246,13 +380,115 @@ module PrimaAwsClient
246
380
  stacks
247
381
  end
248
382
 
383
+ def describe_stack_resource(cluster_stack_name, logical_resource_id)
384
+ begin
385
+ resp = cf_client.describe_stack_resource({stack_name: cluster_stack_name, logical_resource_id: logical_resource_id})
386
+ rescue Aws::CloudFormation::Errors::Throttling => e
387
+ output 'Throttling, retrying in 15 seconds'.red
388
+ sleep 15
389
+ resp = describe_stack_resource(cluster_stack_name, logical_resource_id)
390
+ end
391
+ end
392
+
393
+ def describe_instances(instance_ids)
394
+ begin
395
+ resp = ec2_client.describe_instances({instance_ids: instance_ids})
396
+ rescue Aws::CloudFormation::Errors::Throttling => e
397
+ output 'Throttling, retrying in 15 seconds'.red
398
+ sleep 15
399
+ resp = describe_instances(instance_ids)
400
+ end
401
+ end
402
+
403
+ def describe_auto_scaling_groups(auto_scaling_group_names, max_records)
404
+ begin
405
+ resp = asg_client.describe_auto_scaling_groups({
406
+ auto_scaling_group_names: auto_scaling_group_names,
407
+ max_records: max_records
408
+ })
409
+ rescue Aws::CloudFormation::Errors::Throttling => e
410
+ output 'Throttling, retrying in 15 seconds'.red
411
+ sleep 15
412
+ resp = describe_auto_scaling_groups(auto_scaling_group_names, max_records)
413
+ end
414
+ end
415
+
416
+ def describe_load_balancers(load_balancer_arns)
417
+ begin
418
+ resp = alb_client.describe_load_balancers({load_balancer_arns: load_balancer_arns})
419
+ rescue Aws::ElasticLoadBalancingV2::Errors::Throttling => e
420
+ output 'Throttling, retrying in 15 seconds'.red
421
+ sleep 15
422
+ resp = describe_load_balancers(load_balancer_arns)
423
+ end
424
+ end
425
+
426
+ def update_ecs_service(cluster, service, deployment_configuration)
427
+ begin
428
+ resp = ecs_client.update_service(
429
+ cluster: cluster,
430
+ service: service,
431
+ deployment_configuration: deployment_configuration
432
+ )
433
+ rescue Aws::CloudFormation::Errors::Throttling => e
434
+ output 'Throttling, retrying in 15 seconds'.red
435
+ sleep 15
436
+ resp = update_ecs_service(cluster, service, deployment_configuration)
437
+ end
438
+ end
439
+
440
+ def describe_ecs_tasks(cluster, tasks)
441
+ begin
442
+ resp = ecs_client.describe_tasks({
443
+ cluster: cluster,
444
+ tasks: tasks
445
+ })
446
+ rescue Aws::CloudFormation::Errors::Throttling => e
447
+ output 'Throttling, retrying in 15 seconds'.red
448
+ sleep 15
449
+ resp = describe_ecs_tasks(cluster, tasks)
450
+ end
451
+ end
452
+
453
+ def run_ecs_task(cluster, task_definition, overrides, count)
454
+ begin
455
+ resp = ecs_client.run_task({
456
+ cluster: cluster,
457
+ task_definition: task_definition,
458
+ overrides: overrides,
459
+ count: count
460
+ })
461
+ rescue Aws::CloudFormation::Errors::Throttling => e
462
+ output 'Throttling, retrying in 15 seconds'.red
463
+ sleep 15
464
+ resp = run_ecs_task(cluster, task_definition, overrides, count)
465
+ end
466
+ end
467
+
249
468
  def get_autoscaling_capacity(asg_name)
250
469
  resp = asg_client.describe_auto_scaling_groups(auto_scaling_group_names: [asg_name])
251
470
  resp.auto_scaling_groups[0].desired_capacity
252
471
  end
253
472
 
254
473
  def get_spotfleet_capacity(fleet_arn)
255
- resp = appscaling_client.describe_scalable_targets(service_namespace: 'ec2', resource_ids: ["spot-fleet-request/#{fleet_arn}"])
256
- resp.scalable_targets[0].min_capacity
474
+ resp = ec2_client.describe_spot_fleet_requests(spot_fleet_request_ids: [fleet_arn])
475
+ resp.spot_fleet_request_configs[0].spot_fleet_request_config.target_capacity
476
+ end
477
+
478
+ def hashes_to_tags(hashes)
479
+ tags = []
480
+ hkeys = hashes.keys
481
+ hkeys.each do |hkey|
482
+ tags.insert(0, { key: hkey, value: hashes[hkey].to_s })
483
+ end
484
+ tags
485
+ end
486
+
487
+ def tags_to_hashes(tags)
488
+ hash = Hash.new
489
+ tags.each do |tags_obj|
490
+ hash[tags_obj.key] = tags_obj.value
491
+ end
492
+ hash
257
493
  end
258
494
  end