ecsutil 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: aeb2f9770b7183831199b073f64629a5c7d804bc3cbe3b17c95dda700d57d93c
4
+ data.tar.gz: 83f7201fe909f6379c3ca0b5851925f269bd992a320906154f27e5db758c61f2
5
+ SHA512:
6
+ metadata.gz: aeda8bbb5d7a008583aa6edd4a814081312acbd0929f62d7f0358065b853a05feca66fda7deef85eae03d4e43ff8e8aea9909d2417363933e3a5f77f9d5323bd
7
+ data.tar.gz: '0093c78233a8467aa795e9ef72cfcc866fdaff876de97caeaa39d2c09cb1de00ac0c5ad1090a185e8890d25d805ed2336d64c7ef856a7bcc5e3f26ebcaf3f17e'
@@ -0,0 +1,18 @@
1
+ *.gem
2
+ *.rbc
3
+ .bundle
4
+ .config
5
+ .yardoc
6
+ Gemfile.lock
7
+ InstalledFiles
8
+ _yardoc
9
+ coverage
10
+ doc/
11
+ lib/bundler/man
12
+ pkg
13
+ rdoc
14
+ spec/reports
15
+ test/tmp
16
+ test/version_tmp
17
+ tmp
18
+ .DS_Store
@@ -0,0 +1,118 @@
1
+ # ecsutil
2
+
3
+ Tool to simplify deployments to ECS/Fargate
4
+
5
+ ## Overview
6
+
7
+ - You bring your own infrastructure resources using Terraform (optional)
8
+ - `ecsutil` will manage ECS task definitions, scheduled tasks, services and secrets
9
+ - Deployment config is YAML-based with ability to reference Terraform outputs
10
+ - Cloud secrets are stored in AWS Parameter Store, encrypted by KMS
11
+ - Local secrets are encrypted via Ansible Vault (optional)
12
+
13
+ ## Requirements
14
+
15
+ - AWS CLI
16
+ - Terraform (optional)
17
+
18
+ ## Usage
19
+
20
+ ```
21
+ Usage: escutil <stage> <command>
22
+
23
+ Available commands:
24
+ * deploy - Perform a deployment
25
+ * run - Run a task
26
+ * scale - Change service quantities
27
+ * status - Show current status
28
+ * secrets - Manage secrets
29
+ * destroy - Delete all cloud resources
30
+ ```
31
+
32
+ ## Config
33
+
34
+ Example deployment configuration:
35
+
36
+ ```yaml
37
+ app: myapp
38
+ env: staging
39
+
40
+ cluster: staging
41
+ repository: your-ecr-repo-url
42
+ subnets:
43
+ - a
44
+ - b
45
+ - c
46
+
47
+ roles:
48
+ task: role ARN
49
+ execution: role ARN
50
+ schedule: role ARN
51
+
52
+ tasks:
53
+ web:
54
+ command: bundle exec ruby app.rb
55
+ env:
56
+ PORT: 4567
57
+ security_groups:
58
+ - sg1
59
+ - sg2
60
+ ports:
61
+ - 4567
62
+ awslogs:
63
+ region: us-east-1
64
+ group: myapp-staging
65
+ prefix: web
66
+
67
+ scheduled_tasks:
68
+ hourly:
69
+ task: web
70
+ command: bundle exec rake worker
71
+ expression: rate(1 hour)
72
+
73
+ services:
74
+ web:
75
+ task: web
76
+ desired_count: 3
77
+ max_percent: 200
78
+ min_healthy_percent: 100
79
+ lb:
80
+ target_group: load balancer target group ARN
81
+ container_name: web
82
+ container_port: 4567
83
+ ```
84
+
85
+ ### Reference Terraform outputs
86
+
87
+ Given you have `./terraform/(staging/production)`that contains all stage-specific
88
+ configuration and resources, you can add an output file `outputs.tf` that might be
89
+ referenced in the deployment config. Here's an example:
90
+
91
+ ```tf
92
+ // Output for subnets
93
+ // You can use regular terraform resources here
94
+ output "subnets" {
95
+ value = [
96
+ "subnet-a",
97
+ "subnet-b",
98
+ "subnet-c"
99
+ ]
100
+ }
101
+
102
+ // Output for "web" security group
103
+ output "sg_web" {
104
+ value = aws_security_group.web.id
105
+ }
106
+ ```
107
+
108
+ Once `terraform apply` is executed your state file (or remote state) will include
109
+ the `sg_web` output. We can reference it in the config:
110
+
111
+ ```yaml
112
+ # ...
113
+ subnets: $tf.subnets
114
+ # ....
115
+ tasks:
116
+ web:
117
+ security_groups: $tf.sg_web
118
+ ```
@@ -0,0 +1,9 @@
1
+ require "bundler/gem_tasks"
2
+ require "rspec/core/rake_task"
3
+
4
+ RSpec::Core::RakeTask.new(:test) do |t|
5
+ t.pattern = "spec/**/*_spec.rb"
6
+ t.verbose = false
7
+ end
8
+
9
+ task default: :test
@@ -0,0 +1,9 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ lib = File.expand_path(File.dirname(__FILE__) + '/../lib')
4
+ $LOAD_PATH.unshift(lib) if File.directory?(lib) && !$LOAD_PATH.include?(lib)
5
+
6
+ require "rubygems"
7
+ require "ecsutil"
8
+
9
+ ECSUtil::Runner.new(Dir.pwd, ARGV).run
@@ -0,0 +1,22 @@
1
+ require File.expand_path("../lib/ecsutil/version", __FILE__)
2
+
3
+ Gem::Specification.new do |s|
4
+ s.name = "ecsutil"
5
+ s.version = ECSUtil::VERSION
6
+ s.summary = "TBD"
7
+ s.description = "TBD"
8
+ s.homepage = ""
9
+ s.authors = ["Dan Sosedoff"]
10
+ s.email = ["dan.sosedoff@gmail.com"]
11
+ s.license = "MIT"
12
+
13
+ s.add_development_dependency "rake", "~> 10"
14
+ s.add_dependency "json", "~> 2"
15
+ s.add_dependency "ansible-vault", "~> 0.2"
16
+ s.add_dependency "hashie", "~> 4"
17
+
18
+ s.files = `git ls-files`.split("\n")
19
+ s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
20
+ s.executables = `git ls-files -- bin/*`.split("\n").map{|f| File.basename(f)}
21
+ s.require_paths = ["lib"]
22
+ end
@@ -0,0 +1,6 @@
1
+ require "ecsutil/version"
2
+ require "ecsutil/command"
3
+ require "ecsutil/runner"
4
+
5
+ module ECSUtil
6
+ end
@@ -0,0 +1,258 @@
1
+ module ECSUtil
2
+ module AWS
3
+ def aws_call(service, method, data)
4
+ input = data.is_a?(String) ? data : "--cli-input-json file://#{json_file(data)}"
5
+
6
+ result = `aws #{service} #{method} #{input}`.strip
7
+ unless $?.success?
8
+ fail "#{service} #{method} failed!"
9
+ end
10
+ JSON.load(result)
11
+ end
12
+
13
+ def generate_event_rule(config)
14
+ {
15
+ Name: config[:name],
16
+ ScheduleExpression: config[:expression],
17
+ State: config[:enabled] ? "ENABLED" : "DISABLED",
18
+ Tags: array_hash(config[:tags] || {}, "Key", "Value")
19
+ }
20
+ end
21
+
22
+ def generate_event_target(config, task_name, schedule_name)
23
+ task = config["tasks"][task_name]
24
+ schedule = config["scheduled_tasks"][schedule_name]
25
+ input = {}
26
+
27
+ if schedule["command"]
28
+ input = {
29
+ "containerOverrides": [
30
+ {
31
+ "name": task_name,
32
+ "command": schedule["command"].split(" ")
33
+ }
34
+ ]
35
+ }
36
+ end
37
+
38
+ {
39
+ Rule: schedule["rule_name"],
40
+ Targets: [
41
+ {
42
+ Id: "default",
43
+ Arn: config["cluster"],
44
+ RoleArn: config["roles"]["schedule"],
45
+ Input: JSON.dump(input),
46
+ EcsParameters: {
47
+ TaskDefinitionArn: task["arn"],
48
+ TaskCount: 1,
49
+ LaunchType: "FARGATE",
50
+ PlatformVersion: "LATEST",
51
+ NetworkConfiguration: {
52
+ awsvpcConfiguration: {
53
+ Subnets: [config["subnets"]].flatten,
54
+ SecurityGroups: [task["security_groups"]].flatten,
55
+ AssignPublicIp: "ENABLED"
56
+ }
57
+ }
58
+ }
59
+ }
60
+ ]
61
+ }
62
+ end
63
+
64
+ def generate_task_definition(config, task_name)
65
+ task = config["tasks"][task_name]
66
+ service_name = config["app"]
67
+ service_env = config["env"]
68
+ env = array_hash(task["env"] || {}, :name)
69
+
70
+ secrets = (config["secrets_data"] || []).map do |item|
71
+ {
72
+ name: item[:key],
73
+ valueFrom: item[:name]
74
+ }
75
+ end
76
+
77
+ log_config = nil
78
+
79
+ if awslogs = task["awslogs"]
80
+ log_config = {
81
+ logDriver: "awslogs",
82
+ options: {
83
+ "awslogs-group": awslogs["group"],
84
+ "awslogs-region": awslogs["region"],
85
+ "awslogs-stream-prefix": awslogs["prefix"] || task_name
86
+ }
87
+ }
88
+ end
89
+
90
+ if sumo = task["sumologs"]
91
+ log_config = {
92
+ logDriver: "splunk",
93
+ options: {
94
+ "splunk-url": sumo["url"],
95
+ "splunk-token": sumo["token"],
96
+ "splunk-source": sumo["source"] || "",
97
+ "splunk-sourcetype": sumo["sourcetype"] || "",
98
+ }
99
+ }
100
+ end
101
+
102
+ port_mappings = nil
103
+ if ports = [task["ports"]].flatten.compact.uniq
104
+ port_mappings = ports.map do |p|
105
+ {
106
+ containerPort: p,
107
+ hostPort: p,
108
+ protocol: "tcp"
109
+ }
110
+ end
111
+ end
112
+
113
+ {
114
+ family: "#{service_name}-#{service_env}-#{task_name}",
115
+ taskRoleArn: config["roles"]["task"],
116
+ executionRoleArn: config["roles"]["execution"],
117
+ networkMode: "awsvpc",
118
+ requiresCompatibilities: ["FARGATE"],
119
+ cpu: (task["cpu"] || "256").to_s,
120
+ memory: (task["memory"] || "512").to_s,
121
+ containerDefinitions: [
122
+ {
123
+ name: task_name,
124
+ command: task["command"] ? task["command"].split(" ") : nil,
125
+ image: "#{config["repository"]}:#{config["git_commit"]}",
126
+ environment: env,
127
+ secrets: secrets,
128
+ logConfiguration: log_config,
129
+ portMappings: port_mappings
130
+ }.compact
131
+ ]
132
+ }
133
+ end
134
+
135
+ def degerister_task_definition(arn)
136
+ aws_call("ecs", "deregister-task-definition", "--task-definition #{arn}")
137
+ end
138
+
139
+ def register_task_definition(data)
140
+ aws_call("ecs", "register-task-definition", data)["taskDefinition"]
141
+ end
142
+
143
+ def put_rule(data)
144
+ aws_call("events", "put-rule", data)
145
+ end
146
+
147
+ def put_targets(data)
148
+ aws_call("events", "put-targets", data)
149
+ end
150
+
151
+ def delete_rule(name)
152
+ aws_call("events", "remove-targets", "--rule=#{name} --ids=default")
153
+ aws_call("events", "delete-rule", "--name=#{name}")
154
+ end
155
+
156
+ def list_active_task_definitions
157
+ aws_call("ecs", "list-task-definitions", "--status=ACTIVE --max-items=100")["taskDefinitionArns"]
158
+ end
159
+
160
+ def list_services(cluster)
161
+ aws_call("ecs", "list-services", "--cluster=#{cluster}")["serviceArns"].map do |s|
162
+ s.split("/", 3).last
163
+ end
164
+ end
165
+
166
+ def list_rules
167
+ aws_call("events", "list-rules", "")["Rules"]
168
+ end
169
+
170
+ def fetch_parameter_store_keys(prefix, process = true)
171
+ result = aws_call("ssm", "get-parameters-by-path", "--path=#{prefix} --with-decryption")
172
+ result["Parameters"].map do |p|
173
+ {
174
+ name: p["Name"],
175
+ key: p["Name"].split("/").last,
176
+ value: p["Value"]
177
+ }
178
+ end
179
+ end
180
+
181
+ def generate_service(config, service_name)
182
+ service = config["services"][service_name]
183
+ task = config["tasks"][service["task"]]
184
+ full_service_name = sprintf("%s-%s-%s", config["app"], config["env"], service_name)
185
+ exists = service["exists"] == true
186
+
187
+ data = {
188
+ cluster: config["cluster"],
189
+ taskDefinition: task["arn"],
190
+ desiredCount: service["desired_count"] || 0,
191
+ deploymentConfiguration: {
192
+ maximumPercent: service["max_percent"] || 100,
193
+ minimumHealthyPercent: service["min_healthy_percent"] || 50
194
+ },
195
+ networkConfiguration: {
196
+ awsvpcConfiguration: {
197
+ subnets: [config["subnets"]].flatten,
198
+ securityGroups: [task["security_groups"]].flatten,
199
+ assignPublicIp: "ENABLED"
200
+ }
201
+ }
202
+ }
203
+
204
+ if exists
205
+ data.merge!(
206
+ service: full_service_name,
207
+ forceNewDeployment: service["force_deployment"] == true
208
+ )
209
+ else
210
+ data.merge!(
211
+ serviceName: full_service_name,
212
+ propagateTags: "SERVICE",
213
+ enableECSManagedTags: true,
214
+ schedulingStrategy: "REPLICA",
215
+ launchType: "FARGATE",
216
+ )
217
+
218
+ if lb = service["lb"]
219
+ data.merge!(
220
+ loadBalancers: [
221
+ {
222
+ targetGroupArn: lb["target_group"],
223
+ loadBalancerName: lb["name"],
224
+ containerName: lb["container_name"],
225
+ containerPort: lb["container_port"]
226
+ }.compact
227
+ ]
228
+ )
229
+ end
230
+ end
231
+
232
+ data
233
+ end
234
+
235
+ def describe_service(config, service_name)
236
+ full_service_name = sprintf("%s-%s-%s", config["app"], config["env"], service_name)
237
+ result = aws_call("ecs", "describe-services", "--cluster=#{config["cluster"]} --services=#{full_service_name}")
238
+ result["services"].first
239
+ end
240
+
241
+ def describe_services(config, names)
242
+ aws_call("ecs", "describe-services", "--cluster=#{config.cluster} --services=#{names.join(",")}")["services"]
243
+ end
244
+
245
+ def create_service(config, service_name)
246
+ aws_call("ecs", "create-service", generate_service(config, service_name))
247
+ end
248
+
249
+ def update_service(config, service_name)
250
+ aws_call("ecs", "update-service", generate_service(config, service_name))
251
+ end
252
+
253
+ def delete_service(config, service_name)
254
+ aws_call("ecs", "update-service", "--cluster=#{config["cluster"]} --service=#{service_name} --desired-count 0")
255
+ aws_call("ecs", "delete-service", "--cluster=#{config["cluster"]} --service=#{service_name}")
256
+ end
257
+ end
258
+ end