enscalator 0.4.0.pre.alpha.pre.16
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +15 -0
- data/.rubocop.yml +9 -0
- data/.rubocop_todo.yml +59 -0
- data/.travis.yml +22 -0
- data/CODE_OF_CONDUCT.md +13 -0
- data/Gemfile +4 -0
- data/LICENSE.txt +21 -0
- data/README.md +148 -0
- data/Rakefile +43 -0
- data/bin/console +11 -0
- data/bin/setup +7 -0
- data/enscalator.gemspec +57 -0
- data/exe/enscalator +13 -0
- data/lib/enscalator/core/cf_parameters.rb +146 -0
- data/lib/enscalator/core/cf_resources.rb +225 -0
- data/lib/enscalator/core/instance_type.rb +205 -0
- data/lib/enscalator/core/network_config.rb +21 -0
- data/lib/enscalator/core.rb +10 -0
- data/lib/enscalator/enapp.rb +248 -0
- data/lib/enscalator/helpers/dns.rb +62 -0
- data/lib/enscalator/helpers/stack.rb +107 -0
- data/lib/enscalator/helpers/sub_process.rb +72 -0
- data/lib/enscalator/helpers/wrappers.rb +55 -0
- data/lib/enscalator/helpers.rb +127 -0
- data/lib/enscalator/plugins/amazon_linux.rb +93 -0
- data/lib/enscalator/plugins/auto_scale.rb +80 -0
- data/lib/enscalator/plugins/core_os.rb +88 -0
- data/lib/enscalator/plugins/couchbase.rb +98 -0
- data/lib/enscalator/plugins/debian.rb +71 -0
- data/lib/enscalator/plugins/elastic_beanstalk.rb +74 -0
- data/lib/enscalator/plugins/elasticache.rb +168 -0
- data/lib/enscalator/plugins/elasticsearch_amazon.rb +75 -0
- data/lib/enscalator/plugins/elasticsearch_bitnami.rb +198 -0
- data/lib/enscalator/plugins/elasticsearch_opsworks.rb +225 -0
- data/lib/enscalator/plugins/elb.rb +139 -0
- data/lib/enscalator/plugins/nat_gateway.rb +71 -0
- data/lib/enscalator/plugins/rds.rb +141 -0
- data/lib/enscalator/plugins/redis.rb +38 -0
- data/lib/enscalator/plugins/rethink_db.rb +21 -0
- data/lib/enscalator/plugins/route53.rb +143 -0
- data/lib/enscalator/plugins/ubuntu.rb +85 -0
- data/lib/enscalator/plugins/user-data/elasticsearch +367 -0
- data/lib/enscalator/plugins/vpc_peering_connection.rb +48 -0
- data/lib/enscalator/plugins.rb +30 -0
- data/lib/enscalator/rich_template_dsl.rb +209 -0
- data/lib/enscalator/templates/vpc_peering.rb +112 -0
- data/lib/enscalator/templates.rb +20 -0
- data/lib/enscalator/version.rb +5 -0
- data/lib/enscalator/vpc.rb +11 -0
- data/lib/enscalator/vpc_with_nat_gateway.rb +311 -0
- data/lib/enscalator/vpc_with_nat_instance.rb +402 -0
- data/lib/enscalator.rb +103 -0
- metadata +427 -0
@@ -0,0 +1,367 @@
|
|
1
|
+
#cloud-config
|
2
|
+
|
3
|
+
packages:
|
4
|
+
- ruby
|
5
|
+
|
6
|
+
bootcmd:
|
7
|
+
- update-rc.d monit disable
|
8
|
+
- update-rc.d bitnami disable
|
9
|
+
|
10
|
+
runcmd:
|
11
|
+
- gem install aws-sdk semantic backports elasticsearch --no-ri --no-rdoc
|
12
|
+
- . /usr/local/bin/configure-boot-env
|
13
|
+
- /etc/init.d/monit stop
|
14
|
+
- /opt/bitnami/ctlscript.sh stop apache
|
15
|
+
- /opt/bitnami/ctlscript.sh stop elasticsearch
|
16
|
+
- sleep 5
|
17
|
+
- kill $(pgrep -f elasticsearch)
|
18
|
+
- /usr/local/bin/configure-cluster
|
19
|
+
- /etc/init.d/monit start
|
20
|
+
- /opt/bitnami/ctlscript.sh start apache
|
21
|
+
- /opt/bitnami/ctlscript.sh start elasticsearch
|
22
|
+
- echo "Verifying if hourly cronjob was installed correctly.." && run-parts --test /etc/cron.hourly
|
23
|
+
- /usr/local/bin/backup-cluster --restore-snapshot
|
24
|
+
|
25
|
+
write_files:
|
26
|
+
- path: /usr/local/bin/configure-boot-env
|
27
|
+
owner: root:root
|
28
|
+
permissions: '0755'
|
29
|
+
content: |
|
30
|
+
#!/usr/bin/env bash
|
31
|
+
#
|
32
|
+
# Configure boot environment to properly detect bitnami stack
|
33
|
+
|
34
|
+
BITNAMI_DIR=/opt/bitnami
|
35
|
+
ELASTICSEARCH_DIR=$BITNAMI_DIR/elasticsearch
|
36
|
+
APACHE2_DIR=$BITNAMI_DIR/apache2
|
37
|
+
BITNAMI_COMMON_DIR=$BITNAMI_DIR/common
|
38
|
+
JAVA_DIR=$BITNAMI_DIR/java
|
39
|
+
SYSTEM_PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
40
|
+
|
41
|
+
export PATH=$ELASTICSEARCH_DIR/bin:$APACHE2_DIR/bin:$BITNAMI_COMMON_DIR/bin:$JAVA_DIR/bin:$SYSTEM_PATH
|
42
|
+
export JAVA_HOME=$JAVA_DIR
|
43
|
+
|
44
|
+
- path: /usr/local/bin/configure-cluster
|
45
|
+
owner: root:root
|
46
|
+
permissions: '0755'
|
47
|
+
content: |
|
48
|
+
#!/usr/bin/env ruby
|
49
|
+
#
|
50
|
+
# Elasticsearch configuration
|
51
|
+
#
|
52
|
+
# PURPOSE: update elasticsearch configuration using values from instance tags
|
53
|
+
#
|
54
|
+
# WARNING: since this script also modifies ip address settings, make sure
|
55
|
+
# to stop elasticsearch BEFORE using this script
|
56
|
+
|
57
|
+
require 'yaml'
|
58
|
+
require 'open-uri'
|
59
|
+
require 'json'
|
60
|
+
require 'ostruct'
|
61
|
+
require 'fileutils'
|
62
|
+
require 'aws-sdk'
|
63
|
+
require 'semantic'
|
64
|
+
require 'backports'
|
65
|
+
|
66
|
+
BITNAMI_INSTALL_PATH='/opt/bitnami'
|
67
|
+
ELASTICSEARCH_INSTALL_PATH=File.join(BITNAMI_INSTALL_PATH, 'elasticsearch')
|
68
|
+
ELASTICSEARCH_CONFIG_FILE=File.join(ELASTICSEARCH_INSTALL_PATH, 'config', 'elasticsearch.yml')
|
69
|
+
|
70
|
+
class Plugin
|
71
|
+
attr_reader :install_prefix, :install_link, :mapping, :es_version
|
72
|
+
def initialize(install_prefix, link, mapping, es_version)
|
73
|
+
@install_prefix ||= install_prefix
|
74
|
+
@install_link ||= link
|
75
|
+
@mapping ||= mapping.map {|k,v| [Semantic::Version.new(k.to_s), v] }.to_h
|
76
|
+
@es_version ||= Semantic::Version.new(es_version)
|
77
|
+
end
|
78
|
+
|
79
|
+
def find_plugin_version
|
80
|
+
@mapping[@mapping.keys.select { |v| v.major == @es_version.major && v.minor == @es_version.minor }.first]
|
81
|
+
end
|
82
|
+
|
83
|
+
def install
|
84
|
+
path = [@install_link, find_plugin_version].join('/')
|
85
|
+
cmd = [File.join(@install_prefix, 'bin/plugin'), 'install', path].join(' ')
|
86
|
+
IO.popen(cmd) { |io| io.read }
|
87
|
+
end
|
88
|
+
end
|
89
|
+
|
90
|
+
document = 'http://169.254.169.254/latest/dynamic/instance-identity/document/'
|
91
|
+
metadata = OpenStruct.new(JSON.parse(open(document){|d| d.read }))
|
92
|
+
ec2 = Aws::EC2::Client.new(region: metadata.region)
|
93
|
+
|
94
|
+
tags = nil
|
95
|
+
ec2.describe_tags.each do |response|
|
96
|
+
tags = response[:tags].select { |t| t.resource_id == metadata.instanceId && t.resource_type == 'instance' }
|
97
|
+
end
|
98
|
+
|
99
|
+
cluster_name = tags.select { |t| t.key == 'ClusterName' }.first.value
|
100
|
+
cluster_version = tags.select { |t| t.key == 'Version' }.first.value
|
101
|
+
|
102
|
+
# generate elasticsearch configuration using instance metadata
|
103
|
+
config = {}
|
104
|
+
config['cluster.name'] = cluster_name
|
105
|
+
config['network.host'] = metadata.privateIp
|
106
|
+
config['http.port'] = 9200
|
107
|
+
config['transport.tcp.port'] = 9300
|
108
|
+
config['node.max_local_storage_nodes'] = 1
|
109
|
+
config['plugin.mandatory'] = 'cloud-aws'
|
110
|
+
config['discovery.type'] = 'ec2'
|
111
|
+
config['discovery.zen.ping.multicast.enabled'] = false
|
112
|
+
config['discovery.ec2.tag.ClusterName'] = cluster_name
|
113
|
+
config['discovery.ec2.ping_timeout'] = '20s'
|
114
|
+
|
115
|
+
# if configuration file already present, create its backup and remove original file
|
116
|
+
if File.exists?(ELASTICSEARCH_CONFIG_FILE)
|
117
|
+
FileUtils.move(ELASTICSEARCH_CONFIG_FILE, [ELASTICSEARCH_CONFIG_FILE, 'bak'].join('.'))
|
118
|
+
end
|
119
|
+
|
120
|
+
# write generated configuration file
|
121
|
+
File.open(ELASTICSEARCH_CONFIG_FILE, 'w+') { |file| file.write(config.to_yaml) }
|
122
|
+
|
123
|
+
# install necessary plugins
|
124
|
+
aws = Plugin.new(ELASTICSEARCH_INSTALL_PATH, 'cloud-aws', {}, cluster_version)
|
125
|
+
kuromoji = Plugin.new(ELASTICSEARCH_INSTALL_PATH, 'analysis-kuromoji', {}, cluster_version)
|
126
|
+
|
127
|
+
# execute plugin install command
|
128
|
+
[aws, kuromoji].map(&:install).each { |r| puts r }
|
129
|
+
|
130
|
+
- path: /usr/local/bin/backup-cluster
|
131
|
+
owner: root:root
|
132
|
+
permissions: '0755'
|
133
|
+
content: |
|
134
|
+
#!/usr/bin/env ruby
|
135
|
+
|
136
|
+
require 'elasticsearch'
|
137
|
+
require 'optparse'
|
138
|
+
require 'ostruct'
|
139
|
+
require 'open-uri'
|
140
|
+
require 'json'
|
141
|
+
require 'backports'
|
142
|
+
|
143
|
+
module Storage
|
144
|
+
class Backup
|
145
|
+
attr_reader :instance, :repository, :config
|
146
|
+
|
147
|
+
def initialize(host, opts = {})
|
148
|
+
defaults = {
|
149
|
+
port: 9200,
|
150
|
+
repository: 'backup',
|
151
|
+
config: nil
|
152
|
+
}
|
153
|
+
opts = defaults.merge(opts)
|
154
|
+
|
155
|
+
@instance = ESInstance.new(host, opts[:port])
|
156
|
+
@instance.wait_for_cluster_to_start
|
157
|
+
@config = opts[:config].nil? || opts[:config].empty? ? s3_config : opts[:config]
|
158
|
+
@repository = opts[:repository]
|
159
|
+
end
|
160
|
+
|
161
|
+
def fs_config
|
162
|
+
{
|
163
|
+
type: 'fs',
|
164
|
+
settings: {
|
165
|
+
location: '/tmp/backup',
|
166
|
+
compress: true
|
167
|
+
}
|
168
|
+
}
|
169
|
+
end
|
170
|
+
|
171
|
+
def instance_metadata
|
172
|
+
document = 'http://169.254.169.254/latest/dynamic/instance-identity/document/'
|
173
|
+
OpenStruct.new(JSON.parse(open(document){|d| d.read }))
|
174
|
+
end
|
175
|
+
|
176
|
+
# TODO: adjust configuration to be more flexible
|
177
|
+
def s3_config
|
178
|
+
metadata = instance_metadata
|
179
|
+
path_in_bucket = get_cluster_name # for now use cluster name
|
180
|
+
{
|
181
|
+
type: 's3',
|
182
|
+
settings: {
|
183
|
+
bucket: "elasticsearch-bitnami-#{metadata.region}-#{metadata.accountId}",
|
184
|
+
region: metadata.region,
|
185
|
+
base_path: path_in_bucket
|
186
|
+
}
|
187
|
+
}
|
188
|
+
end
|
189
|
+
|
190
|
+
# Create/Init snapshot repository
|
191
|
+
def create_repository
|
192
|
+
@instance.client
|
193
|
+
.snapshot
|
194
|
+
.create_repository({repository: @repository,
|
195
|
+
body: @config})
|
196
|
+
end
|
197
|
+
|
198
|
+
# Get list of registered snapshots
|
199
|
+
def get_all_snapshots
|
200
|
+
raw_snapshots = @instance.client
|
201
|
+
.snapshot
|
202
|
+
.get(repository: @repository,
|
203
|
+
snapshot: '_all')
|
204
|
+
raw_snapshots['snapshots'].map { |rs| OpenStruct.new(rs) }
|
205
|
+
end
|
206
|
+
|
207
|
+
# Determine last snapshot in the list and invoke restore call with it
|
208
|
+
# TODO: algorithm to determine which snapshot to use, should take into account failures
|
209
|
+
# TODO: provide ability to override/use specific snapshot when instance gets created
|
210
|
+
def restore_from_last_snapshot
|
211
|
+
snapshots = get_all_snapshots.select { |s| s.state.eql? 'SUCCESS' }
|
212
|
+
last = snapshots.sort_by { |x| [x.start_time_in_millis, x.end_time_in_millis] }.last
|
213
|
+
if last
|
214
|
+
restore_snapshot(last)
|
215
|
+
else
|
216
|
+
puts 'Failed to find a list of valid snapshots for the cluster'
|
217
|
+
end
|
218
|
+
end
|
219
|
+
|
220
|
+
def get_cluster_name
|
221
|
+
@instance.client.cluster.state['cluster_name']
|
222
|
+
end
|
223
|
+
|
224
|
+
# Create new snapshot
|
225
|
+
# snapshot pattern: %{cluster_name}_%{increment}
|
226
|
+
def take_snapshot(opts = {})
|
227
|
+
defaults = {
|
228
|
+
create_opts: nil,
|
229
|
+
master_timeout: nil,
|
230
|
+
wait_for_completion: false
|
231
|
+
}
|
232
|
+
defaults.merge(opts)
|
233
|
+
|
234
|
+
cluster_name = get_cluster_name
|
235
|
+
registered_snapshots = get_all_snapshots.map(&:snapshot)
|
236
|
+
default_index = 1
|
237
|
+
index = if !registered_snapshots.empty?
|
238
|
+
last_index = registered_snapshots
|
239
|
+
.map { |s| s.split('_').last }
|
240
|
+
.select { |s| s =~ /^[0-9][0-9]*/ }.sort.last
|
241
|
+
last_index.to_i + 1
|
242
|
+
else
|
243
|
+
default_index
|
244
|
+
end
|
245
|
+
options = {}
|
246
|
+
options[:body] = opts[:create_opts] if opts[:create_opts].instance_of?(Hash) && !opts[:create_opts].empty?
|
247
|
+
options[:repository] = @repository
|
248
|
+
options[:snapshot] = [cluster_name, index].join('_')
|
249
|
+
options[:master_timeout] = opts[:master_timeout] if opts[:master_timeout]
|
250
|
+
options[:wait_for_completion] = opts[:wait_for_completion]
|
251
|
+
@instance.client.snapshot.create(options)
|
252
|
+
end
|
253
|
+
|
254
|
+
# Restore given snapshot
|
255
|
+
def restore_snapshot(snapshot, opts = {})
|
256
|
+
raise ArgumentError, 'instance with snapshot fields is required' unless (snapshot.instance_of?(OpenStruct) || snapshot.snapshot.empty?)
|
257
|
+
defaults = {
|
258
|
+
restore_opts: nil,
|
259
|
+
master_timeout: nil,
|
260
|
+
wait_for_completion: false
|
261
|
+
}
|
262
|
+
|
263
|
+
opts = defaults.merge(opts)
|
264
|
+
|
265
|
+
options = {}
|
266
|
+
options[:repository] = @repository
|
267
|
+
options[:snapshot] = snapshot.snapshot
|
268
|
+
options[:body] = opts[:restore_opts] if opts[:restore_opts].instance_of?(Hash) && !opts[:restore_opts].empty?
|
269
|
+
options[:master_timeout] = opts[:master_timeout] if opts[:master_timeout]
|
270
|
+
options[:wait_for_completion] = opts[:wait_for_completion]
|
271
|
+
@instance.client.snapshot.restore(options)
|
272
|
+
end
|
273
|
+
|
274
|
+
end
|
275
|
+
|
276
|
+
# Create elasticsearch instance reference and setup elasticsearch client
|
277
|
+
class ESInstance
|
278
|
+
attr_reader :host, :port, :client
|
279
|
+
|
280
|
+
def initialize(host, port)
|
281
|
+
@host = host
|
282
|
+
@port = port
|
283
|
+
@client = es_client(host, port)
|
284
|
+
end
|
285
|
+
|
286
|
+
def es_client(host, port, opts = {})
|
287
|
+
defaults = {
|
288
|
+
debug: true
|
289
|
+
}
|
290
|
+
opts = defaults.merge(opts)
|
291
|
+
Elasticsearch::Client.new({host: "#{host}:#{port}", log: opts[:debug]})
|
292
|
+
end
|
293
|
+
|
294
|
+
# Wait for cluster to become alive and ready to accept writes
|
295
|
+
def wait_for_cluster_to_start(retries = 10)
|
296
|
+
|
297
|
+
# check until cluster start to receive web requests
|
298
|
+
health =
|
299
|
+
begin
|
300
|
+
@client.cluster.health
|
301
|
+
rescue
|
302
|
+
sleep 5
|
303
|
+
retry
|
304
|
+
end
|
305
|
+
|
306
|
+
if health['status'] == 'red'
|
307
|
+
sleep 5
|
308
|
+
retries == 0 && puts('Reached limit of retries')
|
309
|
+
retries > 0 && wait_for_cluster_to_start(retries - 1)
|
310
|
+
else
|
311
|
+
puts 'Cluster is ready to accept requests'
|
312
|
+
end
|
313
|
+
|
314
|
+
end
|
315
|
+
|
316
|
+
end
|
317
|
+
|
318
|
+
end
|
319
|
+
|
320
|
+
# Using built-in command-line options parser to reduce dependencies
|
321
|
+
options = {}
|
322
|
+
opt_parser =OptionParser.new do |opts|
|
323
|
+
|
324
|
+
opts.on('-c', '--create-snapshot', 'Create elasticsearch snapshot') do |cs|
|
325
|
+
options[:create_snapshot] = cs
|
326
|
+
end
|
327
|
+
opts.on('-r', '--restore-snapshot', 'Restore elasticsearch snapshot') do |rs|
|
328
|
+
options[:restore_snapshot] = rs
|
329
|
+
end
|
330
|
+
|
331
|
+
end
|
332
|
+
|
333
|
+
begin
|
334
|
+
opt_parser.parse!
|
335
|
+
|
336
|
+
document = 'http://169.254.169.254/latest/dynamic/instance-identity/document/'
|
337
|
+
metadata = OpenStruct.new(JSON.parse(open(document){|d| d.read }))
|
338
|
+
backup = Storage::Backup.new(metadata.privateIp)
|
339
|
+
|
340
|
+
if options[:create_snapshot]
|
341
|
+
puts 'Creating new snapshot'
|
342
|
+
backup.create_repository
|
343
|
+
backup.take_snapshot(master_timeout: 60)
|
344
|
+
end
|
345
|
+
|
346
|
+
if options[:restore_snapshot]
|
347
|
+
puts 'Trying to restore cluster state from snapshot'
|
348
|
+
backup.create_repository
|
349
|
+
backup.restore_from_last_snapshot(master_timeout: 60)
|
350
|
+
end
|
351
|
+
|
352
|
+
if options.empty?
|
353
|
+
puts opt_parser
|
354
|
+
exit 1
|
355
|
+
end
|
356
|
+
|
357
|
+
rescue RuntimeError => e
|
358
|
+
puts e
|
359
|
+
end
|
360
|
+
|
361
|
+
- path: /etc/cron.hourly/take_cluster_snapshot
|
362
|
+
owner: root:root
|
363
|
+
permissions: '0755'
|
364
|
+
content: |
|
365
|
+
#!/usr/bin/env bash
|
366
|
+
|
367
|
+
/usr/local/bin/backup-cluster --create-snapshot
|
@@ -0,0 +1,48 @@
|
|
1
|
+
module Enscalator
|
2
|
+
module Plugins
|
3
|
+
# VPC Peering Connection Plugin
|
4
|
+
module VPCPeeringConnection
|
5
|
+
# Template parameter for VPC ID
|
6
|
+
#
|
7
|
+
# @param [String] name parameter name
|
8
|
+
# @param [String] description parameter description
|
9
|
+
def parameter_vpc_id(name, description, default_value = nil)
|
10
|
+
options = {
|
11
|
+
Description: description,
|
12
|
+
Type: 'String',
|
13
|
+
AllowedPattern: 'vpc-[a-zA-Z0-9]*',
|
14
|
+
ConstraintDescription: 'must be valid VPC id (vpc-*).'
|
15
|
+
}
|
16
|
+
options[:Default] = default_value if default_value && !default_value.nil?
|
17
|
+
parameter name, options
|
18
|
+
end
|
19
|
+
|
20
|
+
# Create new vpc peering connection
|
21
|
+
#
|
22
|
+
# @param [String] conn_name connection name
|
23
|
+
# @param [Array<String>] tags list of tags
|
24
|
+
def vpc_peering_init(conn_name, tags: [])
|
25
|
+
options = {}
|
26
|
+
options[:Properties] = {
|
27
|
+
VpcId: ref("#{conn_name}VpcId"),
|
28
|
+
PeerVpcId: ref("#{conn_name}PeerVpcId")
|
29
|
+
}
|
30
|
+
|
31
|
+
# Set plugin tags
|
32
|
+
options[:Properties][:Tags] = tags if tags && !tags.empty?
|
33
|
+
|
34
|
+
resource conn_name,
|
35
|
+
{
|
36
|
+
Type: 'AWS::EC2::VPCPeeringConnection'
|
37
|
+
}.merge(options)
|
38
|
+
|
39
|
+
output conn_name,
|
40
|
+
Description: 'VPC Peering connection name',
|
41
|
+
Value: ref(conn_name)
|
42
|
+
|
43
|
+
# return resource name
|
44
|
+
conn_name
|
45
|
+
end
|
46
|
+
end # module VPCPeeringConnection
|
47
|
+
end # module Plugins
|
48
|
+
end # module Enscalator
|
@@ -0,0 +1,30 @@
|
|
1
|
+
require 'shellwords'
|
2
|
+
require 'open-uri'
|
3
|
+
require 'nokogiri'
|
4
|
+
require 'semantic'
|
5
|
+
|
6
|
+
require_relative 'plugins/route53'
|
7
|
+
require_relative 'plugins/core_os'
|
8
|
+
require_relative 'plugins/elb'
|
9
|
+
require_relative 'plugins/auto_scale'
|
10
|
+
require_relative 'plugins/couchbase'
|
11
|
+
require_relative 'plugins/core_os'
|
12
|
+
require_relative 'plugins/ubuntu'
|
13
|
+
require_relative 'plugins/redis'
|
14
|
+
require_relative 'plugins/debian'
|
15
|
+
require_relative 'plugins/rethink_db'
|
16
|
+
require_relative 'plugins/rds'
|
17
|
+
require_relative 'plugins/elastic_beanstalk'
|
18
|
+
require_relative 'plugins/elasticache'
|
19
|
+
require_relative 'plugins/elasticsearch_amazon'
|
20
|
+
require_relative 'plugins/elasticsearch_bitnami'
|
21
|
+
require_relative 'plugins/elasticsearch_opsworks'
|
22
|
+
require_relative 'plugins/amazon_linux'
|
23
|
+
require_relative 'plugins/vpc_peering_connection'
|
24
|
+
require_relative 'plugins/nat_gateway'
|
25
|
+
|
26
|
+
module Enscalator
|
27
|
+
# Namespace for enscalator plugins
|
28
|
+
module Plugins
|
29
|
+
end
|
30
|
+
end
|
@@ -0,0 +1,209 @@
|
|
1
|
+
require 'cloudformation-ruby-dsl/cfntemplate'
|
2
|
+
|
3
|
+
module Enscalator
|
4
|
+
# DSL specific for enscalator
|
5
|
+
class RichTemplateDSL < TemplateDSL
|
6
|
+
include Enscalator::Core::CfParameters
|
7
|
+
include Enscalator::Core::CfResources
|
8
|
+
include Enscalator::Helpers
|
9
|
+
include Enscalator::Plugins::Route53
|
10
|
+
|
11
|
+
# Cloudformation limit when sending template body directly
|
12
|
+
TEMPLATE_BODY_LIMIT = 51_200
|
13
|
+
|
14
|
+
# Create new RichTemplateDSL instance
|
15
|
+
#
|
16
|
+
# @param [Hash] options command-line arguments
|
17
|
+
def initialize(options = {})
|
18
|
+
@options = options
|
19
|
+
init_aws_config(@options[:region], profile_name: @options[:profile])
|
20
|
+
super(parse_params(@options[:parameters]), @options[:stack_name], @options[:region], false, &proc { tpl })
|
21
|
+
end
|
22
|
+
|
23
|
+
# Parse semicolon separated parameter string
|
24
|
+
#
|
25
|
+
# @param [String] raw_parameters raw parameter string
|
26
|
+
# @return [Hash] parameter hash
|
27
|
+
def parse_params(raw_parameters)
|
28
|
+
Hash[(raw_parameters || '').split(/;/).map { |pair| pair.split(/=/, 2) }]
|
29
|
+
end
|
30
|
+
|
31
|
+
# Helper method to check if the current command is to create the stack
|
32
|
+
#
|
33
|
+
# @return [Truthy] truthful if currently creating a stack
|
34
|
+
def creating?
|
35
|
+
@options[:create_stack]
|
36
|
+
end
|
37
|
+
|
38
|
+
# Helper method to provide accessor for `region`
|
39
|
+
#
|
40
|
+
# @return [String] region
|
41
|
+
def region
|
42
|
+
aws_region
|
43
|
+
end
|
44
|
+
|
45
|
+
# Helper method to provide value accessor for `vpc_stack_name`
|
46
|
+
#
|
47
|
+
# @return [String] vpc_stack_name
|
48
|
+
# @raise [RuntimeError] if vpc-stack-name was not given
|
49
|
+
def vpc_stack_name
|
50
|
+
@options[:vpc_stack_name] || fail('Requires vpc-stack-name')
|
51
|
+
end
|
52
|
+
|
53
|
+
# Adds trailing dot to make it proper FQDN
|
54
|
+
#
|
55
|
+
# @param [String] str fqdn string
|
56
|
+
# @return [String] fqdn with trailing dot
|
57
|
+
def handle_trailing_dot(str)
|
58
|
+
str.end_with?('.') ? str : str + '.'
|
59
|
+
end
|
60
|
+
|
61
|
+
# @deprecated
|
62
|
+
# Hosted zone accessor
|
63
|
+
#
|
64
|
+
# @return [String] hosted zone, and ensure ending with a '.'
|
65
|
+
# @raise [RuntimeError] if hosted zone is accessed but it's not configured
|
66
|
+
def hosted_zone
|
67
|
+
ActiveSupport::Deprecation.warn('hosted_zone is deprecated (use private_hosted_zone instead)')
|
68
|
+
private_hosted_zone
|
69
|
+
end
|
70
|
+
|
71
|
+
# Private hosted zone accessor
|
72
|
+
#
|
73
|
+
# @return [String] private hosted zone
|
74
|
+
# @raise [RuntimeError] if private hosted zone was accessed before it was configured
|
75
|
+
def private_hosted_zone
|
76
|
+
# TODO: adjust other templates/plugins to use private_hosted_zone
|
77
|
+
@options[:private_hosted_zone] || fail('Private hosted zone has to be configured')
|
78
|
+
handle_trailing_dot(@options[:private_hosted_zone])
|
79
|
+
end
|
80
|
+
|
81
|
+
# Public hosted zone accessor
|
82
|
+
#
|
83
|
+
# @return [String] public hosted zone
|
84
|
+
# @raise [RuntimeError] if hosted zone was accessed before it was configured
|
85
|
+
def public_hosted_zone
|
86
|
+
@options[:public_hosted_zone] || fail('Public hosted zone has to be configured')
|
87
|
+
handle_trailing_dot(@options[:public_hosted_zone])
|
88
|
+
end
|
89
|
+
|
90
|
+
# Get a list of availability zones for the given region
|
91
|
+
def read_availability_zones
|
92
|
+
az = @options[:availability_zone].to_sym
|
93
|
+
supported_az = ec2_client(region).describe_availability_zones.availability_zones
|
94
|
+
alive_az = supported_az.select { |zone| zone.state == 'available' }
|
95
|
+
az_list = alive_az.collect(&:zone_name).map { |n| [n.last.to_sym, n] }.to_h
|
96
|
+
|
97
|
+
# use all zones, specific one, or fail if zone is not supported in given region
|
98
|
+
if az.equal?(:all)
|
99
|
+
az_list
|
100
|
+
elsif az_list.keys.include?(az.to_sym)
|
101
|
+
az_list.select { |k, _| k == az }
|
102
|
+
else
|
103
|
+
fail("Requested zone #{az} is not supported in #{region}, supported ones are #{az_list.keys.join(',')}")
|
104
|
+
end
|
105
|
+
end
|
106
|
+
|
107
|
+
# Availability zones accessor
|
108
|
+
def availability_zones
|
109
|
+
@availability_zones ||= read_availability_zones
|
110
|
+
end
|
111
|
+
|
112
|
+
# Pre-run hook
|
113
|
+
#
|
114
|
+
# @param [Proc] block hook body
|
115
|
+
def pre_run(&block)
|
116
|
+
(@pre_run_blocks ||= []) << block if block_given?
|
117
|
+
end
|
118
|
+
|
119
|
+
# Post-run hook
|
120
|
+
#
|
121
|
+
# @param [Proc] block hook body
|
122
|
+
def post_run(&block)
|
123
|
+
(@post_run_blocks ||= []) << block if block_given?
|
124
|
+
end
|
125
|
+
|
126
|
+
# Convert tags to properties
|
127
|
+
#
|
128
|
+
# @param [Hash] tags collection of tags
|
129
|
+
# @return [Array] list of properties
|
130
|
+
def tags_to_properties(tags)
|
131
|
+
tags.map { |k, v| { Key: k, Value: v } }
|
132
|
+
end
|
133
|
+
|
134
|
+
# Template description
|
135
|
+
#
|
136
|
+
# @param [String] desc template description
|
137
|
+
def description(desc)
|
138
|
+
value(Description: desc)
|
139
|
+
end
|
140
|
+
|
141
|
+
# Network interface
|
142
|
+
#
|
143
|
+
# @param [String] device_index network interface device index
|
144
|
+
# @param [Hash] options
|
145
|
+
def network_interface(device_index, options: {})
|
146
|
+
options[:DeviceIndex] = device_index
|
147
|
+
options
|
148
|
+
end
|
149
|
+
|
150
|
+
# Resource
|
151
|
+
#
|
152
|
+
# @param [String] name of the resource
|
153
|
+
# @param [Hash] options options
|
154
|
+
def resource(name, options)
|
155
|
+
super
|
156
|
+
return nil unless options[:Type] && %w(AWS::EC2::Instance).include?(options[:Type])
|
157
|
+
output "#{name}PrivateIpAddress",
|
158
|
+
Description: "#{name} Private IP Address",
|
159
|
+
Value: get_att(name, 'PrivateIp')
|
160
|
+
end
|
161
|
+
|
162
|
+
# Dynamically define methods to access related parameters
|
163
|
+
#
|
164
|
+
# @param [String] name parameter key
|
165
|
+
# @param [Hash] options options
|
166
|
+
def parameter(name, options)
|
167
|
+
default(:Parameters, {})[name] = options
|
168
|
+
@parameters[name] ||= options[:Default]
|
169
|
+
self.class.send(:define_method, :"ref_#{name.underscore}") do
|
170
|
+
ref(name)
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
174
|
+
# Adds block to the run queue
|
175
|
+
#
|
176
|
+
# @param [Array] items list of blocks
|
177
|
+
def enqueue(items)
|
178
|
+
(@run_queue ||= []).concat(items || [])
|
179
|
+
end
|
180
|
+
|
181
|
+
# Determine content of run queue and execute each block in queue in sequence
|
182
|
+
def exec!
|
183
|
+
init_assets_dir
|
184
|
+
enqueue(@pre_run_blocks) if @options[:pre_run]
|
185
|
+
enqueue([@options[:expand] ? proc { STDOUT.puts(JSON.pretty_generate(self)) } : proc { STDOUT.puts(deploy(self)) }])
|
186
|
+
enqueue(@post_run_blocks) if @options[:post_run]
|
187
|
+
@run_queue.each(&:call) if @run_queue
|
188
|
+
end
|
189
|
+
|
190
|
+
# Pass generated template to underlying cloudformation client to actually create/update stack
|
191
|
+
# @param [TemplateDSL] template instance of template
|
192
|
+
# @raise [RuntimeError] when generated template exceeds 51200 size limit
|
193
|
+
def deploy(template)
|
194
|
+
template_body = template.to_json
|
195
|
+
if template_body.bytesize > TEMPLATE_BODY_LIMIT
|
196
|
+
fail("Unable to deploy template exceeding #{TEMPLATE_BODY_LIMIT} limit: #{template_body.bytesize}")
|
197
|
+
end
|
198
|
+
options = {
|
199
|
+
stack_name: stack_name,
|
200
|
+
capabilities: [@options[:capabilities]],
|
201
|
+
template_body: template_body
|
202
|
+
}
|
203
|
+
options[:parameters] = parameters.map { |k, v| { parameter_key: k, parameter_value: v } } unless parameters.empty?
|
204
|
+
action = @options[:update_stack] ? :update_stack : :create_stack
|
205
|
+
resp = cfn_client(region).send(action, options)
|
206
|
+
resp.stack_id
|
207
|
+
end
|
208
|
+
end # class RichTemplateDSL
|
209
|
+
end # module Enscalator
|