ecs_deploy 0.1.2 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +12 -4
- data/ecs_deploy.gemspec +1 -1
- data/lib/ecs_deploy/auto_scaler.rb +233 -86
- data/lib/ecs_deploy/capistrano.rb +3 -8
- data/lib/ecs_deploy/service.rb +4 -14
- data/lib/ecs_deploy/task_definition.rb +4 -1
- data/lib/ecs_deploy/version.rb +1 -1
- data/lib/ecs_deploy.rb +1 -0
- metadata +4 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a5027056d7641256dc566be9a2fd16dff8046f42
|
4
|
+
data.tar.gz: 009c12a42f0fdcd18f3c8a53b35f0e48055112a8
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: c53395003a3e8953aef1d63cdf7a59d89b1f45e90b0acef1a00ff4420a4e96139e854427e204650aecfbf02f6a064348a6dea9f5919ef6b161a4e962e9d2b6bf
|
7
|
+
data.tar.gz: 82ce007f7e3e5ac6f4c30a705a6c16ca16889302d4c316a08990fae786bb403f9679582788584a23032d6894f04bcb6b321c82155c759844283c2400279162e2
|
data/README.md
CHANGED
@@ -96,10 +96,18 @@ set :ecs_tasks, [
|
|
96
96
|
set :ecs_services, [
|
97
97
|
{
|
98
98
|
name: "myapp-#{fetch(:rails_env)}",
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
99
|
+
load_balancers: [
|
100
|
+
{
|
101
|
+
load_balancer_name: "service-elb-name",
|
102
|
+
container_port: 443,
|
103
|
+
container_name: "nginx",
|
104
|
+
},
|
105
|
+
{
|
106
|
+
target_group_arn: "alb_target_group_arn",
|
107
|
+
container_port: 443,
|
108
|
+
container_name: "nginx",
|
109
|
+
}
|
110
|
+
],
|
103
111
|
desired_count: 1,
|
104
112
|
deployment_configuration: {maximum_percent: 200, minimum_healthy_percent: 50},
|
105
113
|
},
|
data/ecs_deploy.gemspec
CHANGED
@@ -18,7 +18,7 @@ Gem::Specification.new do |spec|
|
|
18
18
|
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
|
19
19
|
spec.require_paths = ["lib"]
|
20
20
|
|
21
|
-
spec.add_runtime_dependency "aws-sdk", "~> 2.
|
21
|
+
spec.add_runtime_dependency "aws-sdk", "~> 2.4"
|
22
22
|
spec.add_runtime_dependency "terminal-table"
|
23
23
|
spec.add_runtime_dependency "paint"
|
24
24
|
|
@@ -8,74 +8,83 @@ module EcsDeploy
|
|
8
8
|
attr_reader :logger, :error_logger
|
9
9
|
|
10
10
|
def run(yaml_path, log_file = nil, error_log_file = nil)
|
11
|
-
trap(:TERM) {
|
11
|
+
trap(:TERM) { @stop = true }
|
12
|
+
trap(:INT) { @stop = true }
|
12
13
|
@logger = Logger.new(log_file || STDOUT)
|
14
|
+
@logger.level = Logger.const_get(ENV["ECS_AUTO_SCALER_LOG_LEVEL"].upcase) if ENV["ECS_AUTO_SCALER_LOG_LEVEL"]
|
13
15
|
STDOUT.sync = true unless log_file
|
14
16
|
@error_logger = Logger.new(error_log_file || STDERR)
|
17
|
+
@error_logger.level = Logger.const_get(ENV["ECS_AUTO_SCALER_LOG_LEVEL"].upcase) if ENV["ECS_AUTO_SCALER_LOG_LEVEL"]
|
15
18
|
STDERR.sync = true unless error_log_file
|
16
19
|
load_config(yaml_path)
|
20
|
+
service_configs
|
21
|
+
auto_scaling_group_configs
|
17
22
|
|
18
|
-
|
19
|
-
|
23
|
+
config_groups = service_configs.group_by { |s| [s.region, s.auto_scaling_group_name] }
|
24
|
+
ths = config_groups.map do |(region, auto_scaling_group_name), configs|
|
25
|
+
asg_config = auto_scaling_group_configs.find { |c| c.name == auto_scaling_group_name && c.region == region }
|
26
|
+
Thread.new(asg_config, configs, &method(:main_loop))
|
20
27
|
end
|
21
|
-
end
|
22
28
|
|
23
|
-
|
24
|
-
@stop = true
|
29
|
+
ths.each(&:join)
|
25
30
|
end
|
26
31
|
|
27
|
-
def
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
s.upscale_triggers.each do |trigger|
|
33
|
-
step = trigger.step || s.step
|
34
|
-
next if difference >= step
|
32
|
+
def main_loop(asg_config, configs)
|
33
|
+
loop_with_polling_interval("loop of #{asg_config.name}") do
|
34
|
+
ths = configs.map do |service_config|
|
35
|
+
Thread.new(service_config) do |s|
|
36
|
+
next if s.idle?
|
35
37
|
|
36
|
-
|
37
|
-
logger.info "Fire upscale trigger of #{s.name} by #{trigger.alarm_name} #{trigger.state}"
|
38
|
-
difference = step
|
39
|
-
end
|
40
|
-
end
|
38
|
+
@logger.debug "Start service scaling of #{s.name}"
|
41
39
|
|
42
|
-
|
43
|
-
|
44
|
-
if trigger.match?
|
45
|
-
logger.info "Fire downscale trigger of #{s.name} by #{trigger.alarm_name} #{trigger.state}"
|
40
|
+
difference = 0
|
41
|
+
s.upscale_triggers.each do |trigger|
|
46
42
|
step = trigger.step || s.step
|
47
|
-
|
43
|
+
next if difference >= step
|
44
|
+
|
45
|
+
if trigger.match?
|
46
|
+
logger.info "Fire upscale trigger of #{s.name} by #{trigger.alarm_name} #{trigger.state}"
|
47
|
+
difference = step
|
48
|
+
end
|
48
49
|
end
|
49
|
-
end
|
50
|
-
end
|
51
50
|
|
52
|
-
|
53
|
-
|
54
|
-
|
51
|
+
if difference == 0 && s.desired_count > s.current_min_task_count
|
52
|
+
s.downscale_triggers.each do |trigger|
|
53
|
+
next unless trigger.match?
|
55
54
|
|
56
|
-
|
57
|
-
|
58
|
-
|
55
|
+
logger.info "Fire downscale trigger of #{s.name} by #{trigger.alarm_name} #{trigger.state}"
|
56
|
+
step = trigger.step || s.step
|
57
|
+
difference = [difference, -step].min
|
58
|
+
end
|
59
|
+
end
|
59
60
|
|
60
|
-
|
61
|
-
|
61
|
+
if s.current_min_task_count > s.desired_count + difference
|
62
|
+
difference = s.current_min_task_count - s.desired_count
|
63
|
+
end
|
64
|
+
|
65
|
+
if difference >= 0 && s.desired_count > s.max_task_count.max
|
66
|
+
difference = s.max_task_count.max - s.desired_count
|
67
|
+
end
|
68
|
+
|
69
|
+
if difference != 0
|
70
|
+
s.update_service(difference)
|
71
|
+
end
|
72
|
+
end
|
62
73
|
end
|
63
|
-
end
|
64
74
|
|
65
|
-
|
66
|
-
total_service_count = configs.inject(0) { |sum, s| sum + s.desired_count }
|
67
|
-
asg_config = auto_scaling_group_configs.find { |c| c.name == auto_scaling_group_name && c.region == region }
|
68
|
-
asg_config.update_auto_scaling_group(total_service_count)
|
69
|
-
end
|
75
|
+
ths.each(&:join)
|
70
76
|
|
71
|
-
|
77
|
+
@logger.debug "Start asg scaling of #{asg_config.name}"
|
72
78
|
|
73
|
-
|
79
|
+
total_service_count = configs.inject(0) { |sum, s| sum + s.desired_count }
|
80
|
+
asg_config.update_auto_scaling_group(total_service_count, configs[0])
|
81
|
+
asg_config.detach_and_terminate_orphan_instances(configs[0])
|
82
|
+
end
|
74
83
|
end
|
75
84
|
|
76
85
|
def load_config(yaml_path)
|
77
86
|
@config = YAML.load_file(yaml_path)
|
78
|
-
@polling_interval = @config["polling_interval"]
|
87
|
+
@polling_interval = @config["polling_interval"] || 30
|
79
88
|
end
|
80
89
|
|
81
90
|
def service_configs
|
@@ -85,15 +94,32 @@ module EcsDeploy
|
|
85
94
|
def auto_scaling_group_configs
|
86
95
|
@auto_scaling_group_configs ||= @config["auto_scaling_groups"].map(&AutoScalingConfig.method(:new))
|
87
96
|
end
|
88
|
-
end
|
89
97
|
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
98
|
+
private
|
99
|
+
|
100
|
+
def wait_polling_interval?(last_executed_at)
|
101
|
+
current = Process.clock_gettime(Process::CLOCK_MONOTONIC, :second)
|
102
|
+
diff = current - last_executed_at
|
103
|
+
diff <= @polling_interval
|
104
|
+
end
|
105
|
+
|
106
|
+
def loop_with_polling_interval(name)
|
107
|
+
@logger.debug "Start #{name}"
|
108
|
+
|
109
|
+
last_executed_at = 0
|
110
|
+
loop do
|
111
|
+
break if @stop
|
112
|
+
sleep 1
|
113
|
+
next if wait_polling_interval?(last_executed_at)
|
114
|
+
yield
|
115
|
+
last_executed_at = Process.clock_gettime(Process::CLOCK_MONOTONIC, :second)
|
94
116
|
end
|
117
|
+
|
118
|
+
@logger.debug "Stop #{name}"
|
95
119
|
end
|
120
|
+
end
|
96
121
|
|
122
|
+
module ConfigBase
|
97
123
|
def initialize(attributes = {})
|
98
124
|
attributes.each do |key, val|
|
99
125
|
send("#{key}=", val)
|
@@ -104,7 +130,6 @@ module EcsDeploy
|
|
104
130
|
SERVICE_CONFIG_ATTRIBUTES = %i(name cluster region auto_scaling_group_name step max_task_count min_task_count idle_time scheduled_min_task_count cooldown_time_for_reach_max upscale_triggers downscale_triggers desired_count)
|
105
131
|
ServiceConfig = Struct.new(*SERVICE_CONFIG_ATTRIBUTES) do
|
106
132
|
include ConfigBase
|
107
|
-
extend ConfigBase::ClassMethods
|
108
133
|
|
109
134
|
def initialize(attributes = {})
|
110
135
|
super(attributes)
|
@@ -123,16 +148,22 @@ module EcsDeploy
|
|
123
148
|
end
|
124
149
|
|
125
150
|
def client
|
126
|
-
|
151
|
+
Thread.current["ecs_auto_scaler_ecs_#{region}"] ||= Aws::ECS::Client.new(
|
127
152
|
access_key_id: EcsDeploy.config.access_key_id,
|
128
153
|
secret_access_key: EcsDeploy.config.secret_access_key,
|
129
154
|
region: region
|
130
155
|
)
|
131
156
|
end
|
132
157
|
|
158
|
+
def clear_client
|
159
|
+
Thread.current["ecs_auto_scaler_ecs_#{region}"] = nil
|
160
|
+
end
|
161
|
+
|
133
162
|
def idle?
|
134
163
|
return false unless @last_updated_at
|
135
|
-
|
164
|
+
|
165
|
+
diff = Process.clock_gettime(Process::CLOCK_MONOTONIC, :second) - @last_updated_at
|
166
|
+
diff < idle_time
|
136
167
|
end
|
137
168
|
|
138
169
|
def current_min_task_count
|
@@ -147,7 +178,7 @@ module EcsDeploy
|
|
147
178
|
|
148
179
|
def overheat?
|
149
180
|
return false unless @reach_max_at
|
150
|
-
(
|
181
|
+
(Process.clock_gettime(Process::CLOCK_MONOTONIC, :second) - @reach_max_at) > cooldown_time_for_reach_max
|
151
182
|
end
|
152
183
|
|
153
184
|
def fetch_service
|
@@ -156,10 +187,11 @@ module EcsDeploy
|
|
156
187
|
res.services[0]
|
157
188
|
rescue => e
|
158
189
|
AutoScaler.error_logger.error(e)
|
159
|
-
|
190
|
+
clear_client
|
160
191
|
end
|
161
192
|
|
162
|
-
def update_service(
|
193
|
+
def update_service(difference)
|
194
|
+
next_desired_count = desired_count + difference
|
163
195
|
current_level = max_task_level(desired_count)
|
164
196
|
next_level = max_task_level(next_desired_count)
|
165
197
|
if current_level < next_level && overheat? # next max
|
@@ -168,12 +200,14 @@ module EcsDeploy
|
|
168
200
|
AutoScaler.logger.info "Service \"#{name}\" is overheat, uses next max count"
|
169
201
|
elsif current_level < next_level && !overheat? # wait cooldown
|
170
202
|
level = current_level
|
171
|
-
|
172
|
-
|
203
|
+
now = Process.clock_gettime(Process::CLOCK_MONOTONIC, :second)
|
204
|
+
@reach_max_at ||= now
|
205
|
+
AutoScaler.logger.info "Service \"#{name}\" waits cooldown elapsed #{(now - @reach_max_at).to_i}sec"
|
173
206
|
elsif current_level == next_level && next_desired_count >= max_task_count[current_level] # reach current max
|
174
207
|
level = current_level
|
175
|
-
|
176
|
-
|
208
|
+
now = Process.clock_gettime(Process::CLOCK_MONOTONIC, :second)
|
209
|
+
@reach_max_at ||= now
|
210
|
+
AutoScaler.logger.info "Service \"#{name}\" waits cooldown elapsed #{(now - @reach_max_at).to_i}sec"
|
177
211
|
elsif current_level == next_level && next_desired_count < max_task_count[current_level]
|
178
212
|
level = current_level
|
179
213
|
@reach_max_at = nil
|
@@ -190,12 +224,38 @@ module EcsDeploy
|
|
190
224
|
service: name,
|
191
225
|
desired_count: next_desired_count,
|
192
226
|
)
|
193
|
-
|
227
|
+
client.wait_until(:services_stable, cluster: cluster, services: [name]) do |w|
|
228
|
+
w.before_wait do
|
229
|
+
AutoScaler.logger.debug "wait service stable [#{name}]"
|
230
|
+
end
|
231
|
+
end if difference < 0
|
232
|
+
@last_updated_at = Process.clock_gettime(Process::CLOCK_MONOTONIC, :second)
|
194
233
|
self.desired_count = next_desired_count
|
195
234
|
AutoScaler.logger.info "Update service \"#{name}\": desired_count -> #{next_desired_count}"
|
196
235
|
rescue => e
|
197
236
|
AutoScaler.error_logger.error(e)
|
198
|
-
|
237
|
+
clear_client
|
238
|
+
end
|
239
|
+
|
240
|
+
def fetch_container_instances
|
241
|
+
arns = []
|
242
|
+
resp = nil
|
243
|
+
loop do
|
244
|
+
options = {cluster: cluster}
|
245
|
+
options.merge(next_token: resp.next_token) if resp && resp.next_token
|
246
|
+
resp = client.list_container_instances(options)
|
247
|
+
arns.concat(resp.container_instance_arns)
|
248
|
+
break unless resp.next_token
|
249
|
+
end
|
250
|
+
|
251
|
+
chunk_size = 50
|
252
|
+
container_instances = []
|
253
|
+
arns.each_slice(chunk_size) do |arn_chunk|
|
254
|
+
is = client.describe_container_instances(cluster: cluster, container_instances: arn_chunk).container_instances
|
255
|
+
container_instances.concat(is)
|
256
|
+
end
|
257
|
+
|
258
|
+
container_instances
|
199
259
|
end
|
200
260
|
|
201
261
|
private
|
@@ -207,69 +267,156 @@ module EcsDeploy
|
|
207
267
|
|
208
268
|
TriggerConfig = Struct.new(:alarm_name, :region, :state, :step) do
|
209
269
|
include ConfigBase
|
210
|
-
extend ConfigBase::ClassMethods
|
211
|
-
|
212
|
-
def self.alarm_cache
|
213
|
-
@alarm_cache ||= {}
|
214
|
-
end
|
215
|
-
|
216
|
-
def self.clear_alarm_cache
|
217
|
-
@alarm_cache.clear if @alarm_cache
|
218
|
-
end
|
219
270
|
|
220
271
|
def client
|
221
|
-
|
272
|
+
Thread.current["ecs_auto_scaler_cloud_watch_#{region}"] ||= Aws::CloudWatch::Client.new(
|
222
273
|
access_key_id: EcsDeploy.config.access_key_id,
|
223
274
|
secret_access_key: EcsDeploy.config.secret_access_key,
|
224
275
|
region: region
|
225
276
|
)
|
226
277
|
end
|
227
278
|
|
279
|
+
def clear_client
|
280
|
+
Thread.current["ecs_auto_scaler_cloud_watch_#{region}"] = nil
|
281
|
+
end
|
282
|
+
|
228
283
|
def match?
|
229
284
|
fetch_alarm.state_value == state
|
230
285
|
end
|
231
286
|
|
232
287
|
def fetch_alarm
|
233
|
-
alarm_cache = self.class.alarm_cache
|
234
|
-
return alarm_cache[region][alarm_name] if alarm_cache[region] && alarm_cache[region][alarm_name]
|
235
|
-
|
236
288
|
res = client.describe_alarms(alarm_names: [alarm_name])
|
289
|
+
|
237
290
|
raise "Alarm \"#{alarm_name}\" is not found" if res.metric_alarms.empty?
|
238
291
|
res.metric_alarms[0].tap do |alarm|
|
239
|
-
AutoScaler.logger.debug(alarm.
|
240
|
-
alarm_cache[region] ||= {}
|
241
|
-
alarm_cache[region][alarm_name] = alarm
|
292
|
+
AutoScaler.logger.debug("#{alarm.alarm_name} state is #{alarm.state_value}")
|
242
293
|
end
|
243
294
|
rescue => e
|
244
295
|
AutoScaler.error_logger.error(e)
|
245
|
-
|
296
|
+
clear_client
|
246
297
|
end
|
247
298
|
end
|
248
299
|
|
249
300
|
AutoScalingConfig = Struct.new(:name, :region, :buffer) do
|
250
301
|
include ConfigBase
|
251
|
-
extend ConfigBase::ClassMethods
|
252
302
|
|
253
303
|
def client
|
254
|
-
|
304
|
+
Thread.current["ecs_auto_scaler_auto_scaling_#{region}"] ||= Aws::AutoScaling::Client.new(
|
255
305
|
access_key_id: EcsDeploy.config.access_key_id,
|
256
306
|
secret_access_key: EcsDeploy.config.secret_access_key,
|
257
307
|
region: region
|
258
308
|
)
|
259
309
|
end
|
260
310
|
|
261
|
-
def
|
311
|
+
def clear_client
|
312
|
+
Thread.current["ecs_auto_scaler_auto_scaling_#{region}"] = nil
|
313
|
+
end
|
314
|
+
|
315
|
+
def ec2_client
|
316
|
+
Thread.current["ecs_auto_scaler_ec2_#{region}"] ||= Aws::EC2::Client.new(
|
317
|
+
access_key_id: EcsDeploy.config.access_key_id,
|
318
|
+
secret_access_key: EcsDeploy.config.secret_access_key,
|
319
|
+
region: region
|
320
|
+
)
|
321
|
+
end
|
322
|
+
|
323
|
+
def clear_ec2_client
|
324
|
+
Thread.current["ecs_auto_scaler_ec2_#{region}"] = nil
|
325
|
+
end
|
326
|
+
|
327
|
+
def instances(reload: false)
|
328
|
+
if reload || @instances.nil?
|
329
|
+
resp = client.describe_auto_scaling_groups({
|
330
|
+
auto_scaling_group_names: [name],
|
331
|
+
})
|
332
|
+
@instances = resp.auto_scaling_groups[0].instances
|
333
|
+
else
|
334
|
+
@instances
|
335
|
+
end
|
336
|
+
end
|
337
|
+
|
338
|
+
def update_auto_scaling_group(total_service_count, service_config)
|
262
339
|
desired_capacity = total_service_count + buffer.to_i
|
263
|
-
|
340
|
+
|
341
|
+
current_asg = client.describe_auto_scaling_groups({
|
342
|
+
auto_scaling_group_names: [name],
|
343
|
+
}).auto_scaling_groups[0]
|
344
|
+
|
345
|
+
if current_asg.desired_capacity > desired_capacity
|
346
|
+
diff = current_asg.desired_capacity - desired_capacity
|
347
|
+
container_instances = service_config.fetch_container_instances
|
348
|
+
deregisterable_instances = container_instances.select do |i|
|
349
|
+
i.pending_tasks_count == 0 && i.running_tasks_count == 0
|
350
|
+
end
|
351
|
+
|
352
|
+
AutoScaler.logger.info "Fetch deregisterable instances: #{deregisterable_instances.map(&:ec2_instance_id).inspect}"
|
353
|
+
|
354
|
+
deregistered_instance_ids = []
|
355
|
+
deregisterable_instances.each do |i|
|
356
|
+
break if deregistered_instance_ids.size >= diff
|
357
|
+
|
358
|
+
begin
|
359
|
+
service_config.client.deregister_container_instance(cluster: service_config.cluster, container_instance: i.container_instance_arn, force: false)
|
360
|
+
deregistered_instance_ids << i.ec2_instance_id
|
361
|
+
rescue Aws::ECS::Errors::InvalidParameterException
|
362
|
+
end
|
363
|
+
end
|
364
|
+
|
365
|
+
AutoScaler.logger.info "Deregistered instances: #{deregistered_instance_ids.inspect}"
|
366
|
+
|
367
|
+
detach_and_terminate_instances(deregistered_instance_ids)
|
368
|
+
|
369
|
+
AutoScaler.logger.info "Update auto scaling group \"#{name}\": desired_capacity -> #{desired_capacity}"
|
370
|
+
elsif current_asg.desired_capacity < desired_capacity
|
371
|
+
client.update_auto_scaling_group(
|
372
|
+
auto_scaling_group_name: name,
|
373
|
+
min_size: 0,
|
374
|
+
max_size: [current_asg.max_size, desired_capacity].max,
|
375
|
+
desired_capacity: desired_capacity,
|
376
|
+
)
|
377
|
+
AutoScaler.logger.info "Update auto scaling group \"#{name}\": desired_capacity -> #{desired_capacity}"
|
378
|
+
end
|
379
|
+
rescue => e
|
380
|
+
AutoScaler.error_logger.error(e)
|
381
|
+
clear_client
|
382
|
+
end
|
383
|
+
|
384
|
+
def detach_and_terminate_instances(instance_ids)
|
385
|
+
return if instance_ids.empty?
|
386
|
+
|
387
|
+
client.detach_instances(
|
264
388
|
auto_scaling_group_name: name,
|
265
|
-
|
266
|
-
|
267
|
-
desired_capacity: desired_capacity,
|
389
|
+
instance_ids: instance_ids,
|
390
|
+
should_decrement_desired_capacity: true
|
268
391
|
)
|
269
|
-
|
392
|
+
|
393
|
+
AutoScaler.logger.info "Detach instances from ASG #{name}: #{instance_ids.inspect}"
|
394
|
+
sleep 3
|
395
|
+
|
396
|
+
ec2_client.terminate_instances(instance_ids: instance_ids)
|
397
|
+
|
398
|
+
AutoScaler.logger.info "Terminated instances: #{instance_ids.inspect}"
|
399
|
+
rescue => e
|
400
|
+
AutoScaler.error_logger.error(e)
|
401
|
+
clear_client
|
402
|
+
clear_ec2_client
|
403
|
+
end
|
404
|
+
|
405
|
+
def detach_and_terminate_orphan_instances(service_config)
|
406
|
+
container_instance_ids = service_config.fetch_container_instances.map(&:ec2_instance_id)
|
407
|
+
orphans = instances(reload: true).reject { |i| container_instance_ids.include?(i.instance_id) }.map(&:instance_id)
|
408
|
+
|
409
|
+
return if orphans.empty?
|
410
|
+
|
411
|
+
targets = ec2_client.describe_instances(instance_ids: orphans).reservations[0].instances.select do |i|
|
412
|
+
(Time.now - i.launch_time) > 600
|
413
|
+
end
|
414
|
+
|
415
|
+
detach_and_terminate_instances(targets.map(&:instance_id))
|
270
416
|
rescue => e
|
271
417
|
AutoScaler.error_logger.error(e)
|
272
|
-
|
418
|
+
clear_client
|
419
|
+
clear_ec2_client
|
273
420
|
end
|
274
421
|
end
|
275
422
|
end
|
@@ -27,6 +27,7 @@ namespace :ecs do
|
|
27
27
|
region: r,
|
28
28
|
task_definition_name: t[:name],
|
29
29
|
container_definitions: t[:container_definitions],
|
30
|
+
task_role_arn: t[:task_role_arn],
|
30
31
|
volumes: t[:volumes]
|
31
32
|
)
|
32
33
|
task_definition.register
|
@@ -58,10 +59,7 @@ namespace :ecs do
|
|
58
59
|
cluster: service[:cluster] || fetch(:ecs_default_cluster),
|
59
60
|
service_name: service[:name],
|
60
61
|
task_definition_name: service[:task_definition_name],
|
61
|
-
|
62
|
-
elb_service_port: service[:elb_service_port],
|
63
|
-
elb_healthcheck_port: service[:elb_healthcheck_port],
|
64
|
-
elb_container_name: service[:elb_container_name],
|
62
|
+
load_balancers: service[:load_balancers],
|
65
63
|
desired_count: service[:desired_count],
|
66
64
|
}
|
67
65
|
service_options[:deployment_configuration] = service[:deployment_configuration] if service[:deployment_configuration]
|
@@ -121,10 +119,7 @@ namespace :ecs do
|
|
121
119
|
cluster: service[:cluster] || fetch(:ecs_default_cluster),
|
122
120
|
service_name: service[:name],
|
123
121
|
task_definition_name: rollback_arn,
|
124
|
-
|
125
|
-
elb_service_port: service[:elb_service_port],
|
126
|
-
elb_healthcheck_port: service[:elb_healthcheck_port],
|
127
|
-
elb_container_name: service[:elb_container_name],
|
122
|
+
load_balancers: service[:load_balancers],
|
128
123
|
desired_count: service[:desired_count],
|
129
124
|
}
|
130
125
|
service_options[:deployment_configuration] = service[:deployment_configuration] if service[:deployment_configuration]
|
data/lib/ecs_deploy/service.rb
CHANGED
@@ -7,17 +7,14 @@ module EcsDeploy
|
|
7
7
|
|
8
8
|
def initialize(
|
9
9
|
cluster:, service_name:, task_definition_name: nil, revision: nil,
|
10
|
-
|
10
|
+
load_balancers: nil,
|
11
11
|
desired_count: nil, deployment_configuration: {maximum_percent: 200, minimum_healthy_percent: 100},
|
12
12
|
region: nil
|
13
13
|
)
|
14
14
|
@cluster = cluster
|
15
15
|
@service_name = service_name
|
16
16
|
@task_definition_name = task_definition_name || service_name
|
17
|
-
@
|
18
|
-
@elb_service_port = elb_service_port
|
19
|
-
@elb_healthcheck_port = elb_healthcheck_port
|
20
|
-
@elb_container_name = elb_container_name
|
17
|
+
@load_balancers = load_balancers
|
21
18
|
@desired_count = desired_count
|
22
19
|
@deployment_configuration = deployment_configuration
|
23
20
|
@revision = revision
|
@@ -44,16 +41,10 @@ module EcsDeploy
|
|
44
41
|
service_name: @service_name,
|
45
42
|
desired_count: @desired_count.to_i,
|
46
43
|
})
|
47
|
-
if @
|
44
|
+
if @load_balancers
|
48
45
|
service_options.merge!({
|
49
46
|
role: EcsDeploy.config.ecs_service_role,
|
50
|
-
load_balancers:
|
51
|
-
{
|
52
|
-
load_balancer_name: @elb_name,
|
53
|
-
container_name: @elb_container_name,
|
54
|
-
container_port: @elb_service_port,
|
55
|
-
}
|
56
|
-
],
|
47
|
+
load_balancers: @load_balancers,
|
57
48
|
})
|
58
49
|
end
|
59
50
|
@response = @client.create_service(service_options)
|
@@ -70,7 +61,6 @@ module EcsDeploy
|
|
70
61
|
return if @response.nil?
|
71
62
|
|
72
63
|
service = @response.service
|
73
|
-
deployment = nil
|
74
64
|
|
75
65
|
@client.wait_until(:services_stable, cluster: @cluster, services: [service.service_name]) do |w|
|
76
66
|
w.delay = 10
|
@@ -11,9 +11,11 @@ module EcsDeploy
|
|
11
11
|
|
12
12
|
def initialize(
|
13
13
|
task_definition_name:, region: nil,
|
14
|
-
volumes: [], container_definitions: []
|
14
|
+
volumes: [], container_definitions: [],
|
15
|
+
task_role_arn: nil
|
15
16
|
)
|
16
17
|
@task_definition_name = task_definition_name
|
18
|
+
@task_role_arn = task_role_arn
|
17
19
|
@region = region || EcsDeploy.config.default_region || ENV["AWS_DEFAULT_REGION"]
|
18
20
|
|
19
21
|
@container_definitions = container_definitions.map do |cd|
|
@@ -45,6 +47,7 @@ module EcsDeploy
|
|
45
47
|
family: @task_definition_name,
|
46
48
|
container_definitions: @container_definitions,
|
47
49
|
volumes: @volumes,
|
50
|
+
task_role_arn: @task_role_arn,
|
48
51
|
})
|
49
52
|
EcsDeploy.logger.info "register task definition [#{@task_definition_name}] [#{@region}] [#{Paint['OK', :green]}]"
|
50
53
|
end
|
data/lib/ecs_deploy/version.rb
CHANGED
data/lib/ecs_deploy.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ecs_deploy
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.2.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- joker1007
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2016-
|
11
|
+
date: 2016-10-30 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: aws-sdk
|
@@ -16,14 +16,14 @@ dependencies:
|
|
16
16
|
requirements:
|
17
17
|
- - "~>"
|
18
18
|
- !ruby/object:Gem::Version
|
19
|
-
version: '2.
|
19
|
+
version: '2.4'
|
20
20
|
type: :runtime
|
21
21
|
prerelease: false
|
22
22
|
version_requirements: !ruby/object:Gem::Requirement
|
23
23
|
requirements:
|
24
24
|
- - "~>"
|
25
25
|
- !ruby/object:Gem::Version
|
26
|
-
version: '2.
|
26
|
+
version: '2.4'
|
27
27
|
- !ruby/object:Gem::Dependency
|
28
28
|
name: terminal-table
|
29
29
|
requirement: !ruby/object:Gem::Requirement
|