prima-twig 0.34.130 → 0.35.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -5,7 +5,7 @@ require_relative '../lib/prima_twig.rb'
5
5
  require_relative '../lib/prima_aws_client.rb'
6
6
  require 'launchy'
7
7
  require 'json'
8
- require 'aws-sdk-s3'
8
+ require 'aws-sdk'
9
9
 
10
10
  class TwigUpdateAmi
11
11
  include Command
@@ -15,117 +15,70 @@ class TwigUpdateAmi
15
15
  exec "gem update prima-twig && twig update-ami #{ARGV.join ' '}" unless `gem outdated`.lines.grep(/^prima-twig \(.*\)/).empty?
16
16
  @s3 = Aws::S3::Client.new
17
17
  @s3_bucket = 'prima-deploy'
18
- @templates_base_url = "https://s3-eu-west-1.amazonaws.com"
18
+ @instances = JSON.parse File.read('../twig-binaries/cloudformation.json')
19
19
  end
20
20
 
21
21
  def execute!(args)
22
- update_amis(args[0], args[1], args[2], args[3], args[4])
22
+ if args[3] == 'staging'
23
+ update_amis args[0], args[1], args[2], true
24
+ else
25
+ update_amis args[0], args[1], args[2], false
26
+ end
23
27
  end
24
28
 
25
29
  private
26
30
 
27
- def update_amis(ami_template, ami_id, ami_name, ami_description, env)
28
- output "updating instance definition #{ami_template}".light_green
29
- Dir.chdir 'ami'
30
- update_instance_name(ami_id, ami_name, ami_description, ami_template)
31
- output 'running packer update (this could take some time)'.light_green
32
- new_ami_id = update_packer(ami_template, env)
33
- # new_ami_id = 'ami-026890988d91ee8c6'
34
- Dir.chdir '..'
35
- stop_if(new_ami_id.to_s.empty?, 'Failed to generate AMI!'.red)
36
- output "new ami id: #{new_ami_id}"
37
-
38
- output 'searching for ami to update...'
39
- ami_mappings = JSON.parse(@s3.get_object(bucket: @s3_bucket, key: "ami/ami-mappings.json")["body"].read())
40
- old_amis = update_ami_mappings(ami_mappings, ami_template, env, new_ami_id)
41
- stop_if(old_amis.empty?, "No ami to update! No #{ami_template} in env #{env}, exiting".yellow)
42
-
43
- output "retrieving stacks that uses old ami ids: #{old_amis}"
44
- exports = list_exports()
45
- stacks = get_stacks_from_exports(exports, old_amis)
46
- stop_if(stacks.empty?, "No stack to update found! This means that ami-mapping file is not in sync, please check manually")
47
-
48
- stacks.each do |stack|
49
- output "processing stack #{stack}"
50
- if stack.include?('qa')
51
- output "skipping stack #{stack} because is a qa"
31
+ def update_amis(ami_id, ami_name, ami_description, only_staging)
32
+ @instances['amis'].each do |ami|
33
+ if only_staging and ami['json'] != 'ecs.json'
34
+ output "skipping #{ami['json']} because only_staging enabled".yellow
52
35
  next
53
- else
54
- stack_tags = tags_to_hashes(get_stack_tags(stack))
55
- stack_tags['TemplateVersion'] = stack_tags['TemplateVersion'].to_i + 1
56
-
57
- if stack.include?('batch')
58
- stack_parameters = update_stack_parameters(get_stack_parameters(stack),
59
- [
60
- { parameter_key: 'AMIID', parameter_value: new_ami_id },
61
- { parameter_key: 'TemplateVersion', parameter_value: stack_tags['TemplateVersion'].to_s }
62
- ]
63
- )
64
- if stack.include?('offsite-backups')
65
- stack_template = File.read("./cloudformation/stacks/batch/compute-environment-offsite-backups.yml")
66
- else
67
- stack_template = File.read("./cloudformation/stacks/batch/compute-environment.yml")
36
+ end
37
+ output 'updating instance definition'.light_green
38
+ Dir.chdir 'ami'
39
+ update_instance_name(ami_id, ami_name, ami_description, ami['json'])
40
+ output 'running packer update (this could take some time)'.light_green
41
+ new_ami_id = update_packer ami['json']
42
+ Dir.chdir '..'
43
+ output 'new ami id: ' + new_ami_id
44
+ stop_if new_ami_id.to_s.empty?, 'Failed to generate AMI!'
45
+ ami['stacks'].each do |stack|
46
+ if only_staging and not stack['stack_name'].include?('allinone-staging')
47
+ output "skipping #{stack['stack_name']} because only_staging enabled".yellow
48
+ next
49
+ end
50
+ output 'updating ' + stack['yaml_filename'] + ' and copying onto s3'
51
+ update_yml_files(new_ami_id, stack['yaml_filename'])
52
+ copy_yml_files_to_s3(stack['yaml_filename'], stack['s3_key'])
53
+ output 'updating stack on cloudformation'
54
+ if stack['stack_name'] and stack_exists?(stack['stack_name'])
55
+ stack_parameters = get_stack_parameters(stack['stack_name'])
56
+ stack_parameters.each do |param|
57
+ if param.parameter_key.eql?('DesiredCapacity')
58
+ desired_capacity = get_desired_capacity(stack['stack_name'])
59
+ desired_capacity.nil? ? break : param.parameter_value.sub!(/[0-9]+/, desired_capacity.to_s)
60
+ break
61
+ end
68
62
  end
69
- else
70
- stack_parameters = update_stack_parameters(get_stack_parameters(stack),
71
- [
72
- { parameter_key: 'AMIID', parameter_value: new_ami_id },
73
- { parameter_key: 'DesiredCapacity', parameter_value: get_desired_capacity(stack).to_s },
74
- { parameter_key: 'TemplateVersion', parameter_value: stack_tags['TemplateVersion'].to_s }
75
- ]
76
- )
77
- stack_template = File.read("./cloudformation/stacks/asg/#{stack.to_s.split("/")[1]}.yml")
63
+ update_stack_url(stack['stack_name'], stack['template_url'], stack_parameters)
78
64
  end
79
- update_stack(stack, stack_template, stack_parameters, hashes_to_tags(stack_tags))
80
65
  end
81
- end
82
66
 
83
- stacks.each do |stack|
84
- if stack.include?('qa')
85
- next
67
+ unless ami['batch_compute_environments'].nil? or only_staging
68
+ ami['batch_compute_environments'].each do |ce|
69
+ update_batch_compute_environment(ce, new_ami_id)
70
+ end
86
71
  end
87
- wait_for_stack_ready(stack, ['CREATE_FAILED', 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'])
88
- end
89
-
90
- output 'writing new ami mapping'
91
- File.open("ami/ami-mappings.json", 'w+') do |f|
92
- mapping_file = JSON.pretty_generate(ami_mappings)
93
- f.write(mapping_file)
94
- @s3.put_object(bucket: @s3_bucket, key: "ami/ami-mappings.json", body: mapping_file)
95
- end
96
-
97
- output 'Update finished! ( ͡° ͜ʖ ͡°)'
98
- end
99
-
100
- def get_stacks_from_exports(exports, old_amis)
101
- stacks = []
102
- old_amis.each do |old_ami|
103
- exports.each do |export|
104
- if export.value.eql?(old_ami)
105
- stacks.insert(0,export.exporting_stack_id)
72
+ ami['stacks'].each do |stack|
73
+ if only_staging and not stack['stack_name'].include?('allinone-staging')
74
+ output "skipping #{stack['stack_name']} because only_staging enabled".yellow
75
+ next
76
+ end
77
+ if stack['stack_name'] and stack_exists?(stack['stack_name'])
78
+ wait_for_stack_ready(stack['stack_name'], ['CREATE_FAILED', 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'])
106
79
  end
107
80
  end
108
81
  end
109
- stacks
110
- end
111
-
112
- def update_ami_mappings(mappings, ami_template, env, new_ami_id)
113
- old_values = []
114
- mappings.each do |item|
115
- if item['ami_template'].eql?(ami_template) and item['env'].eql?(env)
116
- old_values.insert(0,item['ami_id'])
117
- item['ami_id'] = new_ami_id
118
- end
119
- end
120
- old_values.uniq
121
- end
122
-
123
- def update_stack_parameters(stack_parameters, new_parameters)
124
- new_parameters.each do |new_param|
125
- stack_parameters.reject{ |k| k["parameter_key"] == new_param["parameter_key"] }
126
- stack_parameters.push(new_param)
127
- end
128
- stack_parameters
129
82
  end
130
83
 
131
84
  def update_instance_name(ami_id, ami_name, ami_description, ecs_json_path)
@@ -143,17 +96,121 @@ class TwigUpdateAmi
143
96
  def get_desired_capacity(stack_name)
144
97
  stack_outputs = get_stack_outputs(stack_name)
145
98
  stack_outputs.each do |out|
146
- if out.export_name.include?('EC2Fleet') or out.export_name.include?('AutoScalingGroup')
99
+ if out.export_name.include?('ECSAutoScalingGroup')
147
100
  return get_autoscaling_capacity(out.output_value)
101
+ elsif out.export_name.include?('SpotFleet')
102
+ return get_spotfleet_capacity(out.output_value)
148
103
  end
149
104
  end
105
+ return nil
150
106
  end
151
107
 
152
- def update_packer(json_filename, env)
153
- execute_command "AWS_MAX_ATTEMPTS=90 AWS_POLL_DELAY_SECONDS=60 packer build -var datadog_apikey=`biscuit get -f ../configs/secrets/common.yml common_production_apikey_datadog` -var github_token=`biscuit get -f ../configs/secrets/common.yml common_private_repo_github_token` -var drone_key=\"`biscuit get -f ../configs/secrets/common.yml drone_license_key`\" -var env=#{env} -machine-readable ./#{json_filename} | tee build.log"
108
+ def update_packer(json_filename)
109
+ execute_command "packer build -var datadog_apikey=`biscuit get -f ../configs/secrets/common.yml common_production_apikey_datadog` -machine-readable ./#{json_filename} | tee build.log"
154
110
  `grep 'artifact,0,id' build.log | cut -d, -f6 | cut -d: -f2`.sub(/\n/, '')
155
111
  end
156
112
 
113
+ def update_yml_files(ami_id, yaml_filename)
114
+ file_content = File.read yaml_filename
115
+
116
+ file_content.sub!(/ami-[0-9a-z]{8}/, ami_id)
117
+
118
+ File.open yaml_filename, 'w' do |f|
119
+ f.write file_content
120
+ end
121
+
122
+ if yaml_filename.include? 'spotfleet'
123
+ old_handle = (File.read yaml_filename)[/InstanceReadyWaitHandleUpdate[0-9]+/]
124
+ if old_handle
125
+ handle_version = old_handle.sub('InstanceReadyWaitHandleUpdate', '').to_i + 1
126
+
127
+ old_condition = (File.read yaml_filename)[/InstanceReadyWaitConditionUpdate[0-9]+/]
128
+ condition_version = old_condition.sub('InstanceReadyWaitConditionUpdate', '').to_i + 1
129
+
130
+ file_content = File.read yaml_filename
131
+ file_content.gsub!(old_handle, 'InstanceReadyWaitHandleUpdate' + handle_version.to_s)
132
+ file_content.gsub!(old_condition, 'InstanceReadyWaitConditionUpdate' + condition_version.to_s)
133
+
134
+ File.open yaml_filename, 'w' do |f|
135
+ f.write file_content
136
+ end
137
+ end
138
+ end
139
+ end
140
+
141
+ def copy_yml_files_to_s3(source, s3_key)
142
+ body = File.read source
143
+ @s3.put_object(
144
+ body: body,
145
+ bucket: @s3_bucket,
146
+ key: s3_key
147
+ )
148
+ end
149
+
150
+ def update_batch_compute_environment(stack, ami_id)
151
+ output 'updating ' + stack['yaml_filename'] + ' to add a new compute environment'
152
+
153
+ file_content = File.read stack['yaml_filename']
154
+ file_content.gsub!(/(\w+:\s+)!(\w+)/i, '\1QuaCeraUnPuntoEsclamativo\2')
155
+ file_content_original = file_content.clone
156
+ old_ce_name = file_content[/#{stack['label']}[0-9]*/]
157
+ new_ce_version = old_ce_name.sub(stack['label'], '').to_i + 1
158
+ new_ce_name = stack['label'] + new_ce_version.to_s
159
+ file_content.gsub!(old_ce_name, new_ce_name)
160
+ File.open stack['yaml_filename'] + 'new', 'w' do |f|
161
+ f.write file_content
162
+ end
163
+ update_yml_files(ami_id, stack['yaml_filename'] + 'new')
164
+
165
+ stack_body = YAML.load(file_content_original)
166
+ stack_body_new = YAML.load_file(stack['yaml_filename'] + 'new')
167
+ stack_body_merged = stack_body.deep_merge stack_body_new
168
+ File.open(stack['yaml_filename'], 'w') do |file|
169
+ file.write stack_body_merged.to_yaml.gsub('QuaCeraUnPuntoEsclamativo', '!')
170
+ end
171
+
172
+ output 'updating stack on cloudformation, (step 1)'
173
+ copy_yml_files_to_s3(stack['yaml_filename'], stack['s3_key'])
174
+ if not stack['stack_name'] or not stack_exists?(stack['stack_name'])
175
+ return false
176
+ end
177
+
178
+ update_stack_url(stack['stack_name'], stack['template_url'], get_stack_parameters(stack['stack_name']))
179
+ wait_for_stack_ready(stack['stack_name'], ['CREATE_FAILED', 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'])
180
+
181
+ output "retrieving the list of stacks that are currently using the stack #{stack['stack_name']}"
182
+ job_stacks = list_import_stacks old_ce_name + '-production'
183
+ job_stacks.each do |job_stack_name|
184
+ output "updating the stack #{job_stack_name} to use to the new compute environment"
185
+ stack_body = get_stack_template(job_stack_name)
186
+ stack_parameters = get_stack_parameters(job_stack_name).reject{ |k| k.parameter_key == 'ComputeEnvironmentExportName' }
187
+ stack_parameters.push(
188
+ {
189
+ parameter_key: "ComputeEnvironmentExportName",
190
+ parameter_value: new_ce_name
191
+ }
192
+ )
193
+ update_stack(job_stack_name, stack_body, stack_parameters)
194
+ end
195
+ job_stacks.each do |job_stack_name|
196
+ wait_for_stack_ready(job_stack_name, ['CREATE_FAILED', 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'])
197
+ end
198
+
199
+ file_content = File.read stack['yaml_filename'] + 'new'
200
+ File.open stack['yaml_filename'], 'w' do |f|
201
+ f.write file_content.gsub('QuaCeraUnPuntoEsclamativo', '!')
202
+ end
203
+
204
+ output "updating stack #{stack['stack_name']} on cloudformation to remove the old compute environment"
205
+ copy_yml_files_to_s3(stack['yaml_filename'], stack['s3_key'])
206
+ update_stack_url(stack['stack_name'], stack['template_url'], get_stack_parameters(stack['stack_name']))
207
+ wait_for_stack_ready(stack['stack_name'], ['CREATE_FAILED', 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'])
208
+
209
+ File.delete(stack['yaml_filename'] + 'new')
210
+
211
+ output "cloudformation stack update for #{stack['stack_name']} done!"
212
+ end
213
+
157
214
  def help_content
158
215
  <<-HELP
159
216
 
@@ -171,7 +228,7 @@ class TwigUpdateAmi
171
228
  -----------
172
229
 
173
230
  from artemide main folder run
174
- `twig-update-ami ${AMI_TEMPLATE} ${AMI_ID} ${AMI_NAME} ${AMI_DESCRIPTION} ${ENV}`
231
+ `twig-update-ami ${AMI_ID} ${AMI_NAME} ${AMI_DESCRIPTION}`
175
232
 
176
233
  Subcommand for Twig: <http://rondevera.github.io/twig/>
177
234
  Author: Eugenio Laghi <https://github.com/eugeniolaghi>
@@ -1,11 +1,4 @@
1
- require 'aws-sdk-autoscaling'
2
- require 'aws-sdk-batch'
3
- require 'aws-sdk-cloudformation'
4
- require 'aws-sdk-cloudfront'
5
- require 'aws-sdk-ec2'
6
- require 'aws-sdk-ecs'
7
- require 'aws-sdk-elasticloadbalancingv2'
8
- require 'aws-sdk-s3'
1
+ require 'aws-sdk'
9
2
  require 'colorize'
10
3
  #
11
4
  module PrimaAwsClient
@@ -21,16 +14,8 @@ module PrimaAwsClient
21
14
  @asg ||= Aws::AutoScaling::Client.new
22
15
  end
23
16
 
24
- def ec2_client
25
- @ec2 ||= Aws::EC2::Client.new
26
- end
27
-
28
- def alb_client
29
- @alb ||= Aws::ElasticLoadBalancingV2::Client.new
30
- end
31
-
32
- def ecs_client
33
- @ecs ||= Aws::ECS::Client.new
17
+ def appscaling_client
18
+ @appscaling ||= Aws::ApplicationAutoScaling::Client.new
34
19
  end
35
20
 
36
21
  def stack_list
@@ -55,66 +40,17 @@ module PrimaAwsClient
55
40
  stacks
56
41
  end
57
42
 
58
- def cluster_list
59
- stacks = []
60
- next_token = ''
61
- loop do
62
- print '.'.yellow; STDOUT.flush
63
- options = next_token != '' ? { next_token: next_token } : {}
64
- begin
65
- resp = cf_client.describe_stacks(options)
66
- rescue Aws::CloudFormation::Errors::Throttling => e
67
- output 'Throttling, retrying in 15 seconds'.red
68
- sleep 15
69
- resp = cf_client.describe_stacks(options)
70
- end
71
- stacks += resp.stacks
72
- break unless resp.next_token
73
- next_token = resp.next_token
74
- end
75
- puts '.'.yellow; STDOUT.flush
76
- stacks.keep_if { |stack| stack.stack_name.include? 'ecs-cluster-qa-' }
77
- stacks
78
- end
79
-
80
- def list_exports
81
- exports = []
82
- next_token = ''
83
- loop do
84
- print '.'.yellow; STDOUT.flush
85
- options = next_token != '' ? { next_token: next_token } : {}
86
- begin
87
- resp = cf_client.list_exports(options)
88
- rescue Aws::CloudFormation::Errors::Throttling => e
89
- output 'Throttling, retrying in 15 seconds'.red
90
- sleep 15
91
- resp = cf_client.list_exports(options)
92
- end
93
- exports += resp.exports
94
- break unless resp.next_token
95
- next_token = resp.next_token
96
- end
97
- puts '.'.yellow; STDOUT.flush
98
- exports
99
- end
100
-
101
- def create_stack(stack_name, stack_body, parameters = [], tags = [], role = nil)
102
- cf_args = {
103
- stack_name: stack_name,
104
- template_body: stack_body,
105
- parameters: parameters,
106
- tags: tags,
107
- capabilities: ['CAPABILITY_IAM'],
108
- on_failure: 'ROLLBACK'
109
- }
110
-
111
- if role != nil then
112
- cf_args.merge!(role_arn: role)
113
- end
114
-
43
+ def create_stack(stack_name, stack_body, parameters = [], tags = [])
115
44
  begin
116
- cf_client.create_stack(cf_args)
117
- rescue Aws::CloudFormation::Errors::Throttling, Aws::CloudFormation::Errors::LimitExcedeedException => e
45
+ cf_client.create_stack(
46
+ stack_name: stack_name,
47
+ template_body: stack_body,
48
+ parameters: parameters,
49
+ tags: tags,
50
+ capabilities: ['CAPABILITY_IAM'],
51
+ on_failure: 'ROLLBACK'
52
+ )
53
+ rescue Aws::CloudFormation::Errors::Throttling => e
118
54
  output 'Throttling, retrying in 15 seconds'.red
119
55
  sleep 15
120
56
  create_stack(stack_name, stack_body, parameters = [], tags = [])
@@ -123,21 +59,15 @@ module PrimaAwsClient
123
59
  end
124
60
  end
125
61
 
126
- def update_stack(stack_name, template_body, parameters = [], tags = [], role = nil)
127
- cf_args = {
128
- stack_name: stack_name,
129
- template_body: template_body,
130
- parameters: parameters,
131
- tags: tags,
132
- capabilities: ['CAPABILITY_IAM']
133
- }
134
-
135
- if role != nil then
136
- cf_args.merge!(role_arn: role)
137
- end
138
-
62
+ def update_stack(stack_name, template_body, parameters = [], tags = [])
139
63
  begin
140
- cf_client.update_stack(cf_args)
64
+ cf_client.update_stack(
65
+ stack_name: stack_name,
66
+ template_body: template_body,
67
+ parameters: parameters,
68
+ tags: tags,
69
+ capabilities: ['CAPABILITY_IAM']
70
+ )
141
71
  rescue Aws::CloudFormation::Errors::Throttling => e
142
72
  output 'Throttling, retrying in 15 seconds'.red
143
73
  sleep 15
@@ -149,21 +79,15 @@ module PrimaAwsClient
149
79
  end
150
80
  end
151
81
 
152
- def update_stack_url(stack_name, template_url, parameters = [], tags = [], role = nil)
153
- cf_args = {
154
- stack_name: stack_name,
155
- template_url: template_url,
156
- parameters: parameters,
157
- tags: tags,
158
- capabilities: ['CAPABILITY_IAM']
159
- }
160
-
161
- if role != nil then
162
- cf_args.merge!(role_arn: role)
163
- end
164
-
82
+ def update_stack_url(stack_name, template_url, parameters = [], tags = [])
165
83
  begin
166
- cf_client.update_stack(cf_args)
84
+ cf_client.update_stack(
85
+ stack_name: stack_name,
86
+ template_url: template_url,
87
+ parameters: parameters,
88
+ tags: tags,
89
+ capabilities: ['CAPABILITY_IAM']
90
+ )
167
91
  rescue Aws::CloudFormation::Errors::Throttling => e
168
92
  output 'Throttling, retrying in 15 seconds'.red
169
93
  sleep 15
@@ -219,24 +143,6 @@ module PrimaAwsClient
219
143
  output "\nStack #{stack_name} pronto!\n".green
220
144
  end
221
145
 
222
- def wait_for_stack_removal(stack_name)
223
- ready = false
224
- sleep_seconds = 13
225
- sleep 10
226
- output "Attendo che lo stack #{stack_name} finisca di essere cancellato...\n".yellow
227
- while !ready
228
- ready = true if stack_deleted?(stack_name)
229
- seconds_elapsed = 0
230
- loop do
231
- break if seconds_elapsed >= sleep_seconds
232
- print '.'.yellow; STDOUT.flush
233
- sleep 1
234
- seconds_elapsed += 1
235
- end
236
- end
237
- output "\nStack #{stack_name} eliminato!\n".green
238
- end
239
-
240
146
  def get_stack_tags(name)
241
147
  begin
242
148
  resp = cf_client.describe_stacks(stack_name: name)
@@ -300,23 +206,6 @@ module PrimaAwsClient
300
206
  ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE', 'ROLLBACK_COMPLETE'].include? stack_status
301
207
  end
302
208
 
303
- def stack_deleted?(stack_name, failed_statuses = ['ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'DELETE_FAILED', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS'])
304
- begin
305
- resp = cf_client.describe_stacks(
306
- stack_name: stack_name
307
- )
308
- stack_status = resp.stacks[0].stack_status
309
- rescue Aws::CloudFormation::Errors::Throttling => e
310
- print 'Throttling'.red; STDOUT.flush
311
- return false
312
- rescue Aws::CloudFormation::Errors::ValidationError => e
313
- print 'Stack deleted'
314
- return true
315
- end
316
- raise "The stack #{stack_name} errored out" if failed_statuses.include? stack_status
317
- ['DELETE_COMPLETE'].include? stack_status
318
- end
319
-
320
209
  def artifact_exists?(bucket, path)
321
210
  resp = s3_client.list_objects(
322
211
  bucket: bucket,
@@ -337,29 +226,6 @@ module PrimaAwsClient
337
226
  output "#{s3_bucket}/#{destination_path} uploadato con successo!\n".green
338
227
  end
339
228
 
340
- def wait_for_artifact(bucket, path)
341
- ready = artifact_exists?(bucket, path)
342
- sleep_seconds = 13
343
- output "Attendo che sia pronto l'artefatto #{path}...\n".yellow
344
- retries = 0
345
- while !ready
346
- ready = true if artifact_exists?(bucket, path)
347
- seconds_elapsed = 0
348
- loop do
349
- break if seconds_elapsed >= sleep_seconds
350
- print '.'.yellow; STDOUT.flush
351
- sleep 1
352
- seconds_elapsed += 1
353
- end
354
- retries += 1
355
- if retries > 150
356
- output "\n Timeout raggiunto aspettando #{path}\n".red
357
- exit
358
- end
359
- end
360
- output "\nArtefatto #{path} creato!\n".green
361
- end
362
-
363
229
  def list_import_stacks(export_name)
364
230
  stacks = []
365
231
  next_token = ''
@@ -380,115 +246,13 @@ module PrimaAwsClient
380
246
  stacks
381
247
  end
382
248
 
383
- def describe_stack_resource(cluster_stack_name, logical_resource_id)
384
- begin
385
- resp = cf_client.describe_stack_resource({stack_name: cluster_stack_name, logical_resource_id: logical_resource_id})
386
- rescue Aws::CloudFormation::Errors::Throttling => e
387
- output 'Throttling, retrying in 15 seconds'.red
388
- sleep 15
389
- resp = describe_stack_resource(cluster_stack_name, logical_resource_id)
390
- end
391
- end
392
-
393
- def describe_instances(instance_ids)
394
- begin
395
- resp = ec2_client.describe_instances({instance_ids: instance_ids})
396
- rescue Aws::CloudFormation::Errors::Throttling => e
397
- output 'Throttling, retrying in 15 seconds'.red
398
- sleep 15
399
- resp = describe_instances(instance_ids)
400
- end
401
- end
402
-
403
- def describe_auto_scaling_groups(auto_scaling_group_names, max_records)
404
- begin
405
- resp = asg_client.describe_auto_scaling_groups({
406
- auto_scaling_group_names: auto_scaling_group_names,
407
- max_records: max_records
408
- })
409
- rescue Aws::CloudFormation::Errors::Throttling => e
410
- output 'Throttling, retrying in 15 seconds'.red
411
- sleep 15
412
- resp = describe_auto_scaling_groups(auto_scaling_group_names, max_records)
413
- end
414
- end
415
-
416
- def describe_load_balancers(load_balancer_arns)
417
- begin
418
- resp = alb_client.describe_load_balancers({load_balancer_arns: load_balancer_arns})
419
- rescue Aws::ElasticLoadBalancingV2::Errors::Throttling => e
420
- output 'Throttling, retrying in 15 seconds'.red
421
- sleep 15
422
- resp = describe_load_balancers(load_balancer_arns)
423
- end
424
- end
425
-
426
- def update_ecs_service(cluster, service, deployment_configuration)
427
- begin
428
- resp = ecs_client.update_service(
429
- cluster: cluster,
430
- service: service,
431
- deployment_configuration: deployment_configuration
432
- )
433
- rescue Aws::CloudFormation::Errors::Throttling => e
434
- output 'Throttling, retrying in 15 seconds'.red
435
- sleep 15
436
- resp = update_ecs_service(cluster, service, deployment_configuration)
437
- end
438
- end
439
-
440
- def describe_ecs_tasks(cluster, tasks)
441
- begin
442
- resp = ecs_client.describe_tasks({
443
- cluster: cluster,
444
- tasks: tasks
445
- })
446
- rescue Aws::CloudFormation::Errors::Throttling => e
447
- output 'Throttling, retrying in 15 seconds'.red
448
- sleep 15
449
- resp = describe_ecs_tasks(cluster, tasks)
450
- end
451
- end
452
-
453
- def run_ecs_task(cluster, task_definition, overrides, count)
454
- begin
455
- resp = ecs_client.run_task({
456
- cluster: cluster,
457
- task_definition: task_definition,
458
- overrides: overrides,
459
- count: count
460
- })
461
- rescue Aws::CloudFormation::Errors::Throttling => e
462
- output 'Throttling, retrying in 15 seconds'.red
463
- sleep 15
464
- resp = run_ecs_task(cluster, task_definition, overrides, count)
465
- end
466
- end
467
-
468
249
  def get_autoscaling_capacity(asg_name)
469
250
  resp = asg_client.describe_auto_scaling_groups(auto_scaling_group_names: [asg_name])
470
251
  resp.auto_scaling_groups[0].desired_capacity
471
252
  end
472
253
 
473
254
  def get_spotfleet_capacity(fleet_arn)
474
- resp = ec2_client.describe_spot_fleet_requests(spot_fleet_request_ids: [fleet_arn])
475
- resp.spot_fleet_request_configs[0].spot_fleet_request_config.target_capacity
476
- end
477
-
478
- def hashes_to_tags(hashes)
479
- tags = []
480
- hkeys = hashes.keys
481
- hkeys.each do |hkey|
482
- tags.insert(0, { key: hkey, value: hashes[hkey].to_s })
483
- end
484
- tags
485
- end
486
-
487
- def tags_to_hashes(tags)
488
- hash = Hash.new
489
- tags.each do |tags_obj|
490
- hash[tags_obj.key] = tags_obj.value
491
- end
492
- hash
255
+ resp = appscaling_client.describe_scalable_targets(service_namespace: 'ec2', resource_ids: ["spot-fleet-request/#{fleet_arn}"])
256
+ resp.scalable_targets[0].min_capacity
493
257
  end
494
258
  end