sumomo 0.1.2 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/data/sumomo/custom_resource_utils.js +1 -1
- data/lib/sumomo.rb +9 -3
- data/lib/sumomo/ec2.rb +123 -4
- data/lib/sumomo/ecs.rb +113 -0
- data/lib/sumomo/version.rb +1 -1
- metadata +4 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 16ab3fda49b8241a0a627ac9e65a4fb293c32d59
|
4
|
+
data.tar.gz: 2f4dac5aa63120ffc1f9bea9fac71a0c0d3830e0
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 230fa5661da19213203825fb697c17836a56a0c71f67b6b6bf45c2a66ff5b45b3b64eb7f125fd78e736c8c722cc73b35b974f40275539c8ef1e58d6297a1f3ab
|
7
|
+
data.tar.gz: 3ff20b4d96d683b2706ee9377f26a07a33dd1ce7601e56ac9c4c77ef5e9d40336634385983d4a0d2f38ba4332037801ea0f45fe16c8285a7c213c5e153f1db61
|
@@ -9,7 +9,7 @@ Cloudformation.send = function(request, context, responseStatus, responseData, r
|
|
9
9
|
var responseBody = JSON.stringify({
|
10
10
|
Status: responseStatus,
|
11
11
|
Reason: reason + " Log Stream: " + context.logStreamName,
|
12
|
-
PhysicalResourceId: physicalResourceId
|
12
|
+
PhysicalResourceId: physicalResourceId === undefined ? context.logStreamName : physicalResourceId,
|
13
13
|
StackId: request.StackId,
|
14
14
|
RequestId: request.RequestId,
|
15
15
|
LogicalResourceId: request.LogicalResourceId,
|
data/lib/sumomo.rb
CHANGED
@@ -6,6 +6,7 @@ require 'yaml'
|
|
6
6
|
|
7
7
|
require "sumomo/version"
|
8
8
|
require 'sumomo/ec2'
|
9
|
+
require 'sumomo/ecs'
|
9
10
|
require 'sumomo/stack'
|
10
11
|
require 'sumomo/network'
|
11
12
|
require 'sumomo/momo_extensions/resource'
|
@@ -105,9 +106,14 @@ module Sumomo
|
|
105
106
|
begin
|
106
107
|
cf.update_stack(update_options)
|
107
108
|
rescue => e
|
108
|
-
|
109
|
-
|
110
|
-
|
109
|
+
if e.message.end_with? "does not exist"
|
110
|
+
update_options[:timeout_in_minutes] = 30
|
111
|
+
update_options[:notification_arns] = sns_arn if sns_arn
|
112
|
+
cf.create_stack(update_options)
|
113
|
+
else
|
114
|
+
p e
|
115
|
+
puts "Error: #{e.message}"
|
116
|
+
end
|
111
117
|
end
|
112
118
|
end
|
113
119
|
|
data/lib/sumomo/ec2.rb
CHANGED
@@ -126,6 +126,7 @@ module Sumomo
|
|
126
126
|
def initialize(bucket_name, &block)
|
127
127
|
@script = ""
|
128
128
|
@bucket_name = bucket_name
|
129
|
+
@tags = []
|
129
130
|
instance_eval(&block) if block
|
130
131
|
end
|
131
132
|
|
@@ -144,6 +145,103 @@ aws s3 cp s3://#{@bucket_name}/uploads/#{name} #{local_path}
|
|
144
145
|
def script
|
145
146
|
@script
|
146
147
|
end
|
148
|
+
|
149
|
+
def tags
|
150
|
+
@tags
|
151
|
+
end
|
152
|
+
|
153
|
+
def tag(name, value)
|
154
|
+
@tags << [name, value]
|
155
|
+
end
|
156
|
+
end
|
157
|
+
|
158
|
+
def make_spotter(
|
159
|
+
price:,
|
160
|
+
network:,
|
161
|
+
layer:,
|
162
|
+
ec2_sns_arn:nil,
|
163
|
+
ecs_cluster:nil,
|
164
|
+
eip:nil,
|
165
|
+
&block)
|
166
|
+
update_time = Time.now.to_i
|
167
|
+
|
168
|
+
spot = make "Custom::SelectSpot" do
|
169
|
+
DateTime update_time
|
170
|
+
ExcludeString "1.,2.,small,micro"
|
171
|
+
LookBack 3
|
172
|
+
TargetPrice price
|
173
|
+
end
|
174
|
+
|
175
|
+
switcher1_src = define_custom_resource(name: "ASGSelector1", code: <<-CODE
|
176
|
+
|
177
|
+
store.get("num1", function(num) {
|
178
|
+
num = parseInt(num);
|
179
|
+
if (request.RequestType != "Delete")
|
180
|
+
{
|
181
|
+
store.put("num1", String(num+1));
|
182
|
+
}
|
183
|
+
else
|
184
|
+
{
|
185
|
+
store.put("num1", String(0));
|
186
|
+
}
|
187
|
+
|
188
|
+
Cloudformation.send(request, context, Cloudformation.SUCCESS, {Num: String(num)}, "Success", String(num % 2));
|
189
|
+
}, function() {
|
190
|
+
store.put("num1", String(1));
|
191
|
+
Cloudformation.send(request, context, Cloudformation.SUCCESS, {Num: 1}, "Success", String(1));
|
192
|
+
});
|
193
|
+
CODE
|
194
|
+
)
|
195
|
+
|
196
|
+
switcher2_src = define_custom_resource(name: "ASGSelector2", code: <<-CODE
|
197
|
+
store.get("num2", function(num) {
|
198
|
+
num = parseInt(num);
|
199
|
+
if (request.RequestType != "Delete")
|
200
|
+
{
|
201
|
+
store.put("num2", String(num+1));
|
202
|
+
}
|
203
|
+
else
|
204
|
+
{
|
205
|
+
store.put("num1", String(0));
|
206
|
+
}
|
207
|
+
|
208
|
+
Cloudformation.send(request, context, Cloudformation.SUCCESS, {Num: String(num)}, "Success", String((num + 1) % 2));
|
209
|
+
}, function() {
|
210
|
+
store.put("num2", String(1));
|
211
|
+
Cloudformation.send(request, context, Cloudformation.SUCCESS, {Num: 1}, "Success", String(0));
|
212
|
+
});
|
213
|
+
CODE
|
214
|
+
)
|
215
|
+
|
216
|
+
size_1 = make_custom switcher1_src, name: "ASGSelector1Value" do
|
217
|
+
DateTime update_time
|
218
|
+
end
|
219
|
+
|
220
|
+
size_2 = make_custom switcher2_src, name: "ASGSelector2Value" do
|
221
|
+
DateTime update_time
|
222
|
+
end
|
223
|
+
|
224
|
+
make_autoscaling_group(
|
225
|
+
type: spot,
|
226
|
+
network: network,
|
227
|
+
layer: "ecs",
|
228
|
+
zone: spot.Zone,
|
229
|
+
spot_price: price,
|
230
|
+
min_size: size_1,
|
231
|
+
ec2_sns_arn: ec2_sns_arn,
|
232
|
+
ecs_cluster: ecs_cluster,
|
233
|
+
eip: eip, &block)
|
234
|
+
|
235
|
+
make_autoscaling_group(
|
236
|
+
type: spot,
|
237
|
+
network: network,
|
238
|
+
layer: "ecs",
|
239
|
+
zone: spot.Zone,
|
240
|
+
spot_price: price,
|
241
|
+
min_size: size_2,
|
242
|
+
ec2_sns_arn: ec2_sns_arn,
|
243
|
+
ecs_cluster: ecs_cluster,
|
244
|
+
eip: eip)
|
147
245
|
end
|
148
246
|
|
149
247
|
def make_autoscaling_group(
|
@@ -163,8 +261,8 @@ aws s3 cp s3://#{@bucket_name}/uploads/#{name} #{local_path}
|
|
163
261
|
egress:nil,
|
164
262
|
machine_tag:nil,
|
165
263
|
ec2_sns_arn:nil,
|
166
|
-
ami_name
|
167
|
-
ebs_root_device
|
264
|
+
ami_name:nil,
|
265
|
+
ebs_root_device:nil,
|
168
266
|
spot_price:nil,
|
169
267
|
script: nil,
|
170
268
|
ecs_cluster: nil,
|
@@ -174,7 +272,23 @@ aws s3 cp s3://#{@bucket_name}/uploads/#{name} #{local_path}
|
|
174
272
|
eip:nil,
|
175
273
|
&block)
|
176
274
|
|
177
|
-
|
275
|
+
if ami_name == nil
|
276
|
+
|
277
|
+
@ami_lookup_resources ||= {}
|
278
|
+
|
279
|
+
if !@ami_lookup_resources[type]
|
280
|
+
@ami_lookup_resources[type] = make "Custom::AMILookup" do
|
281
|
+
InstanceType type
|
282
|
+
end
|
283
|
+
end
|
284
|
+
|
285
|
+
ami_name = @ami_lookup_resources[type]
|
286
|
+
ebs_root_device = @ami_lookup_resources[type].RootDeviceName if ebs_root_device == nil
|
287
|
+
end
|
288
|
+
|
289
|
+
tasks = EC2Tasks.new(@bucket_name, &block)
|
290
|
+
|
291
|
+
task_script = tasks.script
|
178
292
|
|
179
293
|
ingress ||= [ allow(:all) ]
|
180
294
|
egress ||= [ allow(:all) ]
|
@@ -184,7 +298,7 @@ aws s3 cp s3://#{@bucket_name}/uploads/#{name} #{local_path}
|
|
184
298
|
|
185
299
|
bucket_name = @bucket_name
|
186
300
|
|
187
|
-
script += "\n#{
|
301
|
+
script += "\n#{task_script}\n"
|
188
302
|
|
189
303
|
if ecs_cluster
|
190
304
|
script += <<-ECS_START
|
@@ -405,6 +519,11 @@ ECS_ENGINE_AUTH_DATA={"https://index.docker.io/v1/":{"username":"{{docker_userna
|
|
405
519
|
end
|
406
520
|
|
407
521
|
tag "Name", machine_tag, propagate_at_launch: true
|
522
|
+
|
523
|
+
tasks.tags.each do |t|
|
524
|
+
tag t[0], t[1], propagate_at_launch: true
|
525
|
+
end
|
526
|
+
|
408
527
|
end
|
409
528
|
|
410
529
|
asg
|
data/lib/sumomo/ecs.rb
ADDED
@@ -0,0 +1,113 @@
|
|
1
|
+
|
2
|
+
module Sumomo
|
3
|
+
module Stack
|
4
|
+
|
5
|
+
def sluggify(str)
|
6
|
+
str.gsub(/[^0-9a-zA-Z]/, "_")
|
7
|
+
end
|
8
|
+
|
9
|
+
def make_ecs_cluster(name:"ECSCluster",services:[],machine_config:{},log_retention:30)
|
10
|
+
|
11
|
+
ecs = make "AWS::ECS::Cluster", name: "#{name}"
|
12
|
+
|
13
|
+
volumes = []
|
14
|
+
machine_volume_locations = {}
|
15
|
+
|
16
|
+
service_count = 0
|
17
|
+
|
18
|
+
services.each do |service|
|
19
|
+
|
20
|
+
service_count += 1
|
21
|
+
|
22
|
+
containers = service[:containers]
|
23
|
+
service_name = service[:name] || "Service#{service_count}"
|
24
|
+
service_count = service[:count] || 1
|
25
|
+
|
26
|
+
container_defs = containers.map do |container|
|
27
|
+
definition = {}
|
28
|
+
|
29
|
+
definition["Name"] = "#{sluggify(container[:image]).camelize}"
|
30
|
+
definition["Name"] = container[:name] if container[:name]
|
31
|
+
|
32
|
+
definition["Memory"] = 1024
|
33
|
+
|
34
|
+
loggroup = make "AWS::Logs::LogGroup", name: "#{name}#{definition["Name"]}Logs" do
|
35
|
+
LogGroupName "#{definition["Name"].underscore}_logs"
|
36
|
+
RetentionInDays log_retention
|
37
|
+
end
|
38
|
+
|
39
|
+
definition["LogConfiguration"] = {
|
40
|
+
"LogDriver" => "awslogs",
|
41
|
+
"Options" => {
|
42
|
+
"awslogs-group" => loggroup,
|
43
|
+
"awslogs-region" => ref("AWS::Region")
|
44
|
+
}
|
45
|
+
}
|
46
|
+
|
47
|
+
definition["MountPoints"] = container[:files].map do |file, destination|
|
48
|
+
|
49
|
+
s3_location = "container_files/#{sluggify(service_name)}/#{definition["Name"]}/#{file}"
|
50
|
+
volume_name = sluggify("#{definition["Name"].underscore}_#{destination}").camelize
|
51
|
+
|
52
|
+
upload_file s3_location, File.read(file)
|
53
|
+
|
54
|
+
machine_volume_locations[s3_location] = "/opt/s3/#{s3_location}"
|
55
|
+
|
56
|
+
volumes << {
|
57
|
+
"Name" => volume_name,
|
58
|
+
"Host" => { "SourcePath" => machine_volume_locations[s3_location] }
|
59
|
+
}
|
60
|
+
|
61
|
+
{
|
62
|
+
"ContainerPath" => destination,
|
63
|
+
"SourceVolume" => volume_name
|
64
|
+
}
|
65
|
+
end
|
66
|
+
|
67
|
+
container.each do |key, value|
|
68
|
+
if key != :files
|
69
|
+
definition["#{key}".camelize] = value
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
definition
|
74
|
+
end
|
75
|
+
|
76
|
+
|
77
|
+
deployment_config = {
|
78
|
+
"MaximumPercent" => 200,
|
79
|
+
"MinimumHealthyPercent" => 50
|
80
|
+
}
|
81
|
+
|
82
|
+
ecs_task = make "AWS::ECS::TaskDefinition", name: "#{name}#{service_name}Task" do
|
83
|
+
ContainerDefinitions container_defs
|
84
|
+
Volumes volumes
|
85
|
+
end
|
86
|
+
|
87
|
+
ecs_service = make "AWS::ECS::Service", name: "#{name}#{service_name}" do
|
88
|
+
Cluster ecs
|
89
|
+
DesiredCount service_count
|
90
|
+
TaskDefinition ecs_task
|
91
|
+
DeploymentConfiguration deployment_config
|
92
|
+
end
|
93
|
+
end
|
94
|
+
|
95
|
+
machine_config[:methods].each do |method_name|
|
96
|
+
parameters = {ecs_cluster: ecs}
|
97
|
+
|
98
|
+
method(method_name).parameters.each do |param|
|
99
|
+
parameters[param[1]] = machine_config[param[1]] if (param[0] == :keyreq or param[0] == :key) and machine_config[param[1]]
|
100
|
+
end
|
101
|
+
|
102
|
+
method(method_name).call(parameters) do
|
103
|
+
machine_volume_locations.each do |s3_loc, machine_loc|
|
104
|
+
mkdir File.dirname(machine_loc)
|
105
|
+
download_file s3_loc, machine_loc
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
ecs
|
111
|
+
end
|
112
|
+
end
|
113
|
+
end
|
data/lib/sumomo/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: sumomo
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.2.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- David Siaw
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2016-
|
11
|
+
date: 2016-09-03 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|
@@ -161,6 +161,7 @@ files:
|
|
161
161
|
- data/sumomo/sources/spot-watcher.sh
|
162
162
|
- lib/sumomo.rb
|
163
163
|
- lib/sumomo/ec2.rb
|
164
|
+
- lib/sumomo/ecs.rb
|
164
165
|
- lib/sumomo/momo_extensions/resource.rb
|
165
166
|
- lib/sumomo/momo_extensions/stack.rb
|
166
167
|
- lib/sumomo/network.rb
|
@@ -187,7 +188,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
187
188
|
version: '0'
|
188
189
|
requirements: []
|
189
190
|
rubyforge_project:
|
190
|
-
rubygems_version: 2.4.
|
191
|
+
rubygems_version: 2.4.6
|
191
192
|
signing_key:
|
192
193
|
specification_version: 4
|
193
194
|
summary: An advanced infrastructure description language for AWS
|