capify-ec2 1.4.11 → 1.5.0.pre
Sign up to get free protection for your applications and to get access to all the features.
- data/Changelog.md +11 -0
- data/capify-ec2.gemspec +1 -1
- data/lib/capify-ec2.rb +78 -52
- data/lib/capify-ec2/capistrano.rb +2 -2
- data/lib/capify-ec2/version.rb +1 -1
- data/readme.md +170 -43
- metadata +18 -15
data/Changelog.md
CHANGED
@@ -1,3 +1,14 @@
|
|
1
|
+
## 1.5.0.pre (Jul 19, 2013)
|
2
|
+
|
3
|
+
Features:
|
4
|
+
|
5
|
+
- Added Support for the [Capistrano Multistage Extension](https://github.com/capistrano/capistrano/wiki/2.x-Multistage-Extension).
|
6
|
+
- Allow the use of IAM roles to authenticate with AWS, rather than an access key id and secret access key.
|
7
|
+
|
8
|
+
Bugfixes:
|
9
|
+
|
10
|
+
- Fixed an issue executing cap tasks on individual instances.
|
11
|
+
|
1
12
|
## 1.4.11 (Jun 13, 2013)
|
2
13
|
|
3
14
|
Features:
|
data/capify-ec2.gemspec
CHANGED
@@ -18,7 +18,7 @@ Gem::Specification.new do |s|
|
|
18
18
|
s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
|
19
19
|
s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) }
|
20
20
|
s.require_paths = ["lib"]
|
21
|
-
s.add_dependency('fog', '
|
21
|
+
s.add_dependency('fog', '>= 1.12.0')
|
22
22
|
s.add_dependency('colored', '=1.2')
|
23
23
|
s.add_dependency('capistrano', '~> 2.14')
|
24
24
|
end
|
data/lib/capify-ec2.rb
CHANGED
@@ -8,12 +8,12 @@ require File.expand_path(File.dirname(__FILE__) + '/capify-ec2/server')
|
|
8
8
|
class CapifyEc2
|
9
9
|
|
10
10
|
attr_accessor :load_balancer, :instances, :ec2_config
|
11
|
-
|
11
|
+
|
12
12
|
unless const_defined? :SLEEP_COUNT
|
13
13
|
SLEEP_COUNT = 5
|
14
14
|
end
|
15
|
-
|
16
|
-
def initialize(ec2_config = "config/ec2.yml")
|
15
|
+
|
16
|
+
def initialize(ec2_config = "config/ec2.yml", stage = '')
|
17
17
|
case ec2_config
|
18
18
|
when Hash
|
19
19
|
@ec2_config = ec2_config
|
@@ -22,6 +22,7 @@ class CapifyEc2
|
|
22
22
|
else
|
23
23
|
raise ArgumentError, "Invalid ec2_config: #{ec2_config.inspect}"
|
24
24
|
end
|
25
|
+
@ec2_config[:stage] = stage
|
25
26
|
|
26
27
|
# Maintain backward compatibility with previous config format
|
27
28
|
@ec2_config[:project_tags] ||= []
|
@@ -31,41 +32,48 @@ class CapifyEc2
|
|
31
32
|
@ec2_config[:aws_roles_tag] ||= "Roles"
|
32
33
|
# User can change the Options tag string.
|
33
34
|
@ec2_config[:aws_options_tag] ||= "Options"
|
35
|
+
# User can change the Stages tag string
|
36
|
+
@ec2_config[:aws_stages_tag] ||= "Stages"
|
34
37
|
|
35
38
|
@ec2_config[:project_tags] << @ec2_config[:project_tag] if @ec2_config[:project_tag]
|
36
|
-
|
39
|
+
|
37
40
|
regions = determine_regions()
|
38
|
-
|
41
|
+
|
39
42
|
@instances = []
|
40
43
|
|
41
44
|
regions.each do |region|
|
42
45
|
begin
|
43
|
-
servers = Fog::Compute.new( :provider => 'AWS',
|
44
|
-
:aws_access_key_id => aws_access_key_id,
|
45
|
-
:aws_secret_access_key => aws_secret_access_key,
|
46
|
-
:region => region
|
47
|
-
).servers
|
46
|
+
servers = Fog::Compute.new( {:provider => 'AWS', :region => region}.merge!(security_credentials) ).servers
|
48
47
|
rescue => e
|
49
48
|
puts "[Capify-EC2] Unable to connect to AWS: #{e}.".red.bold
|
50
49
|
exit 1
|
51
50
|
end
|
52
|
-
|
51
|
+
|
53
52
|
servers.each do |server|
|
54
53
|
@instances << server if server.ready?
|
55
54
|
end
|
56
55
|
end
|
57
|
-
end
|
56
|
+
end
|
57
|
+
|
58
|
+
def security_credentials
|
59
|
+
if @ec2_config[:use_iam_profile]
|
60
|
+
{ :use_iam_profile => true }
|
61
|
+
else
|
62
|
+
{ :aws_access_key_id => @ec2_config[:aws_access_key_id],
|
63
|
+
:aws_secret_access_key => @ec2_config[:aws_secret_access_key] }
|
64
|
+
end
|
65
|
+
end
|
58
66
|
|
59
67
|
def determine_regions()
|
60
68
|
@ec2_config[:aws_params][:regions] || [@ec2_config[:aws_params][:region]]
|
61
69
|
end
|
62
70
|
|
63
71
|
def aws_access_key_id
|
64
|
-
@ec2_config[:aws_access_key_id] || Fog.credentials[:aws_access_key_id] || ENV['AWS_ACCESS_KEY_ID'] || raise("Missing AWS Access Key ID")
|
72
|
+
@ec2_config[:aws_access_key_id] || Fog.credentials[:aws_access_key_id] || ENV['AWS_ACCESS_KEY_ID'] || @ec2_config[:use_iam_profile] || raise("Missing AWS Access Key ID")
|
65
73
|
end
|
66
74
|
|
67
75
|
def aws_secret_access_key
|
68
|
-
@ec2_config[:aws_secret_access_key] || Fog.credentials[:aws_secret_access_key] || ENV['AWS_SECRET_ACCESS_KEY'] || raise("Missing AWS Secret Access Key")
|
76
|
+
@ec2_config[:aws_secret_access_key] || Fog.credentials[:aws_secret_access_key] || ENV['AWS_SECRET_ACCESS_KEY'] || @ec2_config[:use_iam_profile] || raise("Missing AWS Secret Access Key")
|
69
77
|
end
|
70
78
|
|
71
79
|
def display_instances
|
@@ -73,67 +81,85 @@ class CapifyEc2
|
|
73
81
|
puts "[Capify-EC2] No instances were found using your 'ec2.yml' configuration.".red.bold
|
74
82
|
return
|
75
83
|
end
|
76
|
-
|
84
|
+
|
77
85
|
# Set minimum widths for the variable length instance attributes.
|
78
|
-
column_widths = { :name_min => 4, :type_min => 4, :dns_min => 5, :roles_min => @ec2_config[:aws_roles_tag].length, :options_min => @ec2_config[:aws_options_tag].length }
|
86
|
+
column_widths = { :name_min => 4, :type_min => 4, :dns_min => 5, :roles_min => @ec2_config[:aws_roles_tag].length, :stages_min => @ec2_config[:aws_stages_tag].length, :options_min => @ec2_config[:aws_options_tag].length }
|
79
87
|
|
80
88
|
# Find the longest attribute across all instances, to format the columns properly.
|
81
89
|
column_widths[:name] = desired_instances.map{|i| i.name.to_s.ljust( column_widths[:name_min] ) || ' ' * column_widths[:name_min] }.max_by(&:length).length
|
82
90
|
column_widths[:type] = desired_instances.map{|i| i.flavor_id || ' ' * column_widths[:type_min] }.max_by(&:length).length
|
83
91
|
column_widths[:dns] = desired_instances.map{|i| i.contact_point.to_s.ljust( column_widths[:dns_min] ) || ' ' * column_widths[:dns_min] }.max_by(&:length).length
|
84
92
|
column_widths[:roles] = desired_instances.map{|i| i.tags[@ec2_config[:aws_roles_tag]].to_s.ljust( column_widths[:roles_min] ) || ' ' * column_widths[:roles_min] }.max_by(&:length).length
|
93
|
+
column_widths[:stages] = desired_instances.map{|i| i.tags[@ec2_config[:aws_stages_tag]].to_s.ljust( column_widths[:stages_min] ) || ' ' * column_widths[:stages_min] }.max_by(&:length).length
|
85
94
|
column_widths[:options] = desired_instances.map{|i| i.tags[@ec2_config[:aws_options_tag]].to_s.ljust( column_widths[:options_min] ) || ' ' * column_widths[:options_min] }.max_by(&:length).length
|
86
95
|
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
'Num' .bold,
|
91
|
-
'Name' .ljust( column_widths[:name] ).bold,
|
92
|
-
'ID' .ljust( 10 ).bold,
|
93
|
-
'Type' .ljust( column_widths[:type] ).bold,
|
94
|
-
'DNS' .ljust( column_widths[:dns] ).bold,
|
95
|
-
'Zone' .ljust( 10 ).bold,
|
96
|
-
@ec2_config[:aws_roles_tag] .ljust( column_widths[:roles] ).bold,
|
97
|
-
@ec2_config[:aws_options_tag].ljust( column_widths[:options] ).bold
|
96
|
+
roles_present = desired_instances.map{|i| i.tags[@ec2_config[:aws_roles_tag]].to_s}.max_by(&:length).length > 0
|
97
|
+
options_present = desired_instances.map{|i| i.tags[@ec2_config[:aws_options_tag]].to_s}.max_by(&:length).length > 0
|
98
|
+
stages_present = desired_instances.map{|i| i.tags[@ec2_config[:aws_stages_tag]].to_s}.max_by(&:length).length > 0
|
98
99
|
|
100
|
+
# Project and Stages info..
|
101
|
+
info_label_width = [@ec2_config[:aws_project_tag], @ec2_config[:aws_stages_tag]].map(&:length).max
|
102
|
+
puts "#{@ec2_config[:aws_project_tag].rjust( info_label_width ).bold}: #{@ec2_config[:project_tags].join(', ')}." if @ec2_config[:project_tags].any?
|
103
|
+
puts "#{@ec2_config[:aws_stages_tag].rjust( info_label_width ).bold}: #{@ec2_config[:stage]}." unless @ec2_config[:stage].empty?
|
104
|
+
|
105
|
+
# Title row.
|
106
|
+
status_output = []
|
107
|
+
status_output << 'Num' .bold
|
108
|
+
status_output << 'Name' .ljust( column_widths[:name] ).bold
|
109
|
+
status_output << 'ID' .ljust( 10 ).bold
|
110
|
+
status_output << 'Type' .ljust( column_widths[:type] ).bold
|
111
|
+
status_output << 'DNS' .ljust( column_widths[:dns] ).bold
|
112
|
+
status_output << 'Zone' .ljust( 10 ).bold
|
113
|
+
status_output << @ec2_config[:aws_stages_tag] .ljust( column_widths[:stages] ).bold if stages_present
|
114
|
+
status_output << @ec2_config[:aws_roles_tag] .ljust( column_widths[:roles] ).bold if roles_present
|
115
|
+
status_output << @ec2_config[:aws_options_tag].ljust( column_widths[:options] ).bold if options_present
|
116
|
+
puts status_output.join(" ")
|
117
|
+
|
99
118
|
desired_instances.each_with_index do |instance, i|
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
119
|
+
status_output = []
|
120
|
+
status_output << "%02d:" % i
|
121
|
+
status_output << (instance.name || '') .ljust( column_widths[:name] ).green
|
122
|
+
status_output << instance.id .ljust( 2 ).red
|
123
|
+
status_output << instance.flavor_id .ljust( column_widths[:type] ).cyan
|
124
|
+
status_output << instance.contact_point .ljust( column_widths[:dns] ).blue.bold
|
125
|
+
status_output << instance.availability_zone .ljust( 10 ).magenta
|
126
|
+
status_output << (instance.tags[@ec2_config[:aws_stages_tag]] || '').ljust( column_widths[:stages] ).yellow if stages_present
|
127
|
+
status_output << (instance.tags[@ec2_config[:aws_roles_tag]] || '').ljust( column_widths[:roles] ).yellow if roles_present
|
128
|
+
status_output << (instance.tags[@ec2_config[:aws_options_tag]] || '').ljust( column_widths[:options] ).yellow if options_present
|
129
|
+
puts status_output.join(" ")
|
109
130
|
end
|
110
131
|
end
|
111
132
|
|
112
133
|
def server_names
|
113
134
|
desired_instances.map {|instance| instance.name}
|
114
135
|
end
|
115
|
-
|
136
|
+
|
116
137
|
def project_instances
|
117
138
|
@instances.select {|instance| @ec2_config[:project_tags].include?(instance.tags[@ec2_config[:aws_project_tag]])}
|
118
139
|
end
|
119
|
-
|
140
|
+
|
120
141
|
def desired_instances(region = nil)
|
121
|
-
@ec2_config[:project_tags].empty? ? @instances : project_instances
|
142
|
+
instances = @ec2_config[:project_tags].empty? ? @instances : project_instances
|
143
|
+
@ec2_config[:stage].empty? ? instances : get_instances_by_stage(instances)
|
122
144
|
end
|
123
|
-
|
145
|
+
|
124
146
|
def get_instances_by_role(role)
|
125
147
|
desired_instances.select {|instance| instance.tags[@ec2_config[:aws_roles_tag]].split(%r{,\s*}).include?(role.to_s) rescue false}
|
126
148
|
end
|
127
|
-
|
149
|
+
|
150
|
+
def get_instances_by_stage(instances=@instances)
|
151
|
+
instances.select {|instance| instance.tags[@ec2_config[:aws_stages_tag]].split(%r{,\s*}).include?(@ec2_config[:stage].to_s) rescue false}
|
152
|
+
end
|
153
|
+
|
128
154
|
def get_instances_by_region(roles, region)
|
129
155
|
return unless region
|
130
156
|
desired_instances.select {|instance| instance.availability_zone.match(region) && instance.tags[@ec2_config[:aws_roles_tag]].split(%r{,\s*}).include?(roles.to_s) rescue false}
|
131
|
-
end
|
132
|
-
|
157
|
+
end
|
158
|
+
|
133
159
|
def get_instance_by_name(name)
|
134
160
|
desired_instances.select {|instance| instance.name == name}.first
|
135
161
|
end
|
136
|
-
|
162
|
+
|
137
163
|
def get_instance_by_dns(dns)
|
138
164
|
desired_instances.select {|instance| instance.dns_name == dns}.first
|
139
165
|
end
|
@@ -141,10 +167,10 @@ class CapifyEc2
|
|
141
167
|
def instance_health(load_balancer, instance)
|
142
168
|
elb.describe_instance_health(load_balancer.id, instance.id).body['DescribeInstanceHealthResult']['InstanceStates'][0]['State']
|
143
169
|
end
|
144
|
-
|
170
|
+
|
145
171
|
def elb
|
146
|
-
Fog::AWS::ELB.new(:
|
147
|
-
end
|
172
|
+
Fog::AWS::ELB.new({:region => @ec2_config[:aws_params][:region]}.merge!(security_credentials))
|
173
|
+
end
|
148
174
|
|
149
175
|
def get_load_balancer_by_instance(instance_id)
|
150
176
|
hash = elb.load_balancers.inject({}) do |collect, load_balancer|
|
@@ -162,7 +188,7 @@ class CapifyEc2
|
|
162
188
|
lbs[load_balancer_name]
|
163
189
|
|
164
190
|
end
|
165
|
-
|
191
|
+
|
166
192
|
def deregister_instance_from_elb(instance_name)
|
167
193
|
return unless @ec2_config[:load_balanced]
|
168
194
|
instance = get_instance_by_name(instance_name)
|
@@ -172,7 +198,7 @@ class CapifyEc2
|
|
172
198
|
|
173
199
|
elb.deregister_instances_from_load_balancer(instance.id, @@load_balancer.id)
|
174
200
|
end
|
175
|
-
|
201
|
+
|
176
202
|
def register_instance_in_elb(instance_name, load_balancer_name = '')
|
177
203
|
return if !@ec2_config[:load_balanced]
|
178
204
|
instance = get_instance_by_name(instance_name)
|
@@ -185,7 +211,7 @@ class CapifyEc2
|
|
185
211
|
fail_after = @ec2_config[:fail_after] || 30
|
186
212
|
state = instance_health(load_balancer, instance)
|
187
213
|
time_elapsed = 0
|
188
|
-
|
214
|
+
|
189
215
|
while time_elapsed < fail_after
|
190
216
|
break if state == "InService"
|
191
217
|
sleep SLEEP_COUNT
|
@@ -256,8 +282,8 @@ class CapifyEc2
|
|
256
282
|
|
257
283
|
protocol = options[:https] ? 'https://' : 'http://'
|
258
284
|
uri = URI("#{protocol}#{dns}:#{port}#{path}")
|
259
|
-
|
260
|
-
puts "[Capify-EC2] Checking '#{uri}' for the content '#{expected_response.inspect}'..."
|
285
|
+
|
286
|
+
puts "[Capify-EC2] Checking '#{uri}' for the content '#{expected_response.inspect}'..."
|
261
287
|
|
262
288
|
http = Net::HTTP.new(uri.host, uri.port)
|
263
289
|
|
@@ -4,7 +4,7 @@ require 'pp'
|
|
4
4
|
|
5
5
|
Capistrano::Configuration.instance(:must_exist).load do
|
6
6
|
def capify_ec2
|
7
|
-
@capify_ec2 ||= CapifyEc2.new(fetch(:ec2_config, 'config/ec2.yml'))
|
7
|
+
@capify_ec2 ||= CapifyEc2.new(fetch(:ec2_config, 'config/ec2.yml'), fetch(:stage, ''))
|
8
8
|
end
|
9
9
|
|
10
10
|
namespace :ec2 do
|
@@ -213,7 +213,7 @@ Capistrano::Configuration.instance(:must_exist).load do
|
|
213
213
|
if named_instance.respond_to?(:roles)
|
214
214
|
roles = named_instance.roles
|
215
215
|
else
|
216
|
-
roles = [named_instance.tags[
|
216
|
+
roles = [named_instance.tags[ capify_ec2.ec2_config[:aws_roles_tag] ]].flatten
|
217
217
|
end
|
218
218
|
|
219
219
|
roles.each do |role|
|
data/lib/capify-ec2/version.rb
CHANGED
data/readme.md
CHANGED
@@ -10,7 +10,7 @@ Capify-EC2 is used to generate Capistrano namespaces and tasks from Amazon EC2 i
|
|
10
10
|
|
11
11
|
or add the gem to your project's Gemfile.
|
12
12
|
|
13
|
-
You will need to create a YML configuration file at
|
13
|
+
You will need to create a YML configuration file at `config/ec2.yml` that looks like the following:
|
14
14
|
|
15
15
|
```ruby
|
16
16
|
:aws_access_key_id: "YOUR ACCESS KEY"
|
@@ -21,7 +21,7 @@ You will need to create a YML configuration file at 'config/ec2.yml' that looks
|
|
21
21
|
:project_tag: "YOUR APP NAME"
|
22
22
|
```
|
23
23
|
|
24
|
-
Finally, add the gem to your
|
24
|
+
Finally, add the gem to your `deploy.rb`:
|
25
25
|
|
26
26
|
```ruby
|
27
27
|
require "capify-ec2/capistrano"
|
@@ -31,54 +31,67 @@ require "capify-ec2/capistrano"
|
|
31
31
|
|
32
32
|
#### Configuration
|
33
33
|
|
34
|
-
Note:
|
34
|
+
Note: `:aws_access_key_id` and `:aws_secret_access_key` are required, unless you provide them via the two alternative methods detailed below under 'AWS Credentials' or have the `:use_iam_profile` option set to use IAM roles. `:region` is also required. Other settings are optional.
|
35
35
|
|
36
36
|
* :project_tag
|
37
37
|
|
38
38
|
If this is defined, Capify-EC2 will only create namespaces and tasks for the EC2 instances that have a matching 'Project' tag. By default, all instances available to the specified AWS access key will be used.
|
39
39
|
|
40
|
-
It is possible to include multiple projects simultaneously by using the :project_tags parameter, like so:
|
40
|
+
It is possible to include multiple projects simultaneously by using the :project_tags parameter, like so:
|
41
41
|
|
42
42
|
```ruby
|
43
|
-
:project_tags:
|
43
|
+
:project_tags:
|
44
44
|
- "YOUR APP NAME"
|
45
45
|
- "YOUR OTHER APP NAME"
|
46
46
|
```
|
47
47
|
|
48
48
|
* :aws_project_tag
|
49
49
|
|
50
|
-
Use this option to change which EC2 instance tag Capify-EC2 uses to determine instance project. Defaults to 'Project' if
|
50
|
+
Use this option to change which EC2 instance tag Capify-EC2 uses to determine instance project. Defaults to 'Project' if omitted.
|
51
51
|
|
52
52
|
* :aws_roles_tag
|
53
53
|
|
54
|
-
Use this option to change which EC2 instance tag Capify-EC2 uses to determine instance roles. Defaults to 'Roles' if
|
54
|
+
Use this option to change which EC2 instance tag Capify-EC2 uses to determine instance roles. Defaults to 'Roles' if omitted.
|
55
55
|
|
56
56
|
* :aws_options_tag
|
57
57
|
|
58
|
-
Use this option to change which EC2 instance tag Capify-EC2 uses to determine instance options. Defaults to 'Options' if
|
58
|
+
Use this option to change which EC2 instance tag Capify-EC2 uses to determine instance options. Defaults to 'Options' if omitted.
|
59
|
+
|
60
|
+
* :aws_stages_tag
|
61
|
+
|
62
|
+
Use this option to change which EC2 instance tag Capify-EC2 uses to determine which instances belong to a stage. Should be used in conjunction with the [Capistrano Multistage Extension](https://github.com/capistrano/capistrano/wiki/2.x-Multistage-Extension). Defaults to 'Stages' if omitted.
|
59
63
|
|
60
64
|
* :load_balanced
|
61
65
|
|
62
66
|
When ':load_balanced' is set to 'true', Capify-EC2 uses pre and post-deploy hooks to deregister the instance from an associated Elastic Load Balancer, perform the actual deploy, then finally reregister with the ELB and validated the instance health.
|
63
|
-
Note: This options only applies to deployments made to an individual instance, using the command
|
67
|
+
Note: This options only applies to deployments made to an individual instance, using the command `cap INSTANCE_NAME_HERE deploy` - it doesn't apply to roles.
|
68
|
+
|
69
|
+
* :use_iam_profile
|
70
|
+
|
71
|
+
Use this option to use IAM roles for authentication, rather than an access key id and secret access key.
|
72
|
+
|
64
73
|
|
65
74
|
##### AWS Credentials
|
66
75
|
|
67
76
|
###### Via YML Configuration
|
68
77
|
|
69
|
-
By default, Capify-EC2 will attempt to use the credentials found in your
|
78
|
+
By default, Capify-EC2 will attempt to use the credentials found in your `ec2.yml` configuration as detailed above.
|
70
79
|
|
71
80
|
###### Via Fog Configuration
|
72
81
|
|
73
|
-
If you wish, you can have Capify-EC2 use the AWS credentials found in your Fog configuration, instead of instead of specifying
|
82
|
+
If you wish, you can have Capify-EC2 use the AWS credentials found in your Fog configuration, instead of instead of specifying `:aws_access_key_id` and `:aws_secret_access_key` in the YML configuration file. Refer to the Fog documentation for details on specifying AWS credentials.
|
74
83
|
|
75
84
|
###### Via Environment Variables
|
76
85
|
|
77
|
-
If you wish, you can define AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY as environment variables, instead of specifying
|
86
|
+
If you wish, you can define AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY as environment variables, instead of specifying `:aws_access_key_id` and `:aws_secret_access_key` in the YML configuration file.
|
87
|
+
|
88
|
+
###### Via AWS IAM Roles
|
89
|
+
|
90
|
+
If you have IAM roles set up on your box to allow querying EC2 information you tell Fog to use IAM roles and you will not need to provide any credentials at runtime. For more information on IAM roles read Amazon's [IAM documentation](http://aws.amazon.com/iam/).
|
78
91
|
|
79
92
|
###### Ordering
|
80
93
|
|
81
|
-
Capify-EC2 will attempt to load your AWS credentials first from the
|
94
|
+
Capify-EC2 will attempt to load your AWS credentials first from the `ec2.yml` configuration file, then from your Fog configuration file, and finally from environment variables. It will display an error if no credentials are found by any of these methods.
|
82
95
|
|
83
96
|
|
84
97
|
|
@@ -88,7 +101,7 @@ You will need to create instance tags using the AWS Management Console or API, t
|
|
88
101
|
|
89
102
|
* Tag 'Project'
|
90
103
|
|
91
|
-
Used with the
|
104
|
+
Used with the `:project_tag` option in `config/ec2.yml` to limit Capify-EC2's functionality to a subset of your instances.
|
92
105
|
|
93
106
|
* Tag 'Roles'
|
94
107
|
|
@@ -131,7 +144,7 @@ In our examples, imagine that you have three servers on EC2 named and tagged as
|
|
131
144
|
|
132
145
|
#### Single Roles
|
133
146
|
|
134
|
-
You need to add a call to
|
147
|
+
You need to add a call to `ec2_roles` in your `deploy.rb`, like so:
|
135
148
|
|
136
149
|
```ruby
|
137
150
|
ec2_roles :web
|
@@ -154,7 +167,7 @@ task :web do
|
|
154
167
|
end
|
155
168
|
```
|
156
169
|
|
157
|
-
Note that there are no tasks created for 'server-2', as it does not have the role 'web'. If we were to change the
|
170
|
+
Note that there are no tasks created for 'server-2', as it does not have the role 'web'. If we were to change the `ec2_roles` definition in your `deploy.rb` to the following:
|
158
171
|
|
159
172
|
```ruby
|
160
173
|
ec2_roles :db
|
@@ -181,7 +194,7 @@ end
|
|
181
194
|
|
182
195
|
#### Multiple Roles
|
183
196
|
|
184
|
-
If you want to create tasks for servers using multiple roles, you can call
|
197
|
+
If you want to create tasks for servers using multiple roles, you can call `ec2_roles` multiple times in your `deploy.rb` as follows:
|
185
198
|
|
186
199
|
```ruby
|
187
200
|
ec2_roles :web
|
@@ -274,7 +287,7 @@ For example, if we had the following group of instances in EC2:
|
|
274
287
|
</tr>
|
275
288
|
</table>
|
276
289
|
|
277
|
-
You could then create a task in your
|
290
|
+
You could then create a task in your `deploy.rb` that will only be executed on the worker machine, like so:
|
278
291
|
|
279
292
|
```ruby
|
280
293
|
task :reload_workers, :only=>{:worker=>true} do
|
@@ -290,13 +303,13 @@ end
|
|
290
303
|
|
291
304
|
##### Via Role Definitions
|
292
305
|
|
293
|
-
As well as defining Options at an instance level via EC2 tags, you can define an Option in your
|
306
|
+
As well as defining Options at an instance level via EC2 tags, you can define an Option in your `deploy.rb` at the same time as defining the role, as follows:
|
294
307
|
|
295
308
|
```ruby
|
296
309
|
ec2_roles {:name=>"web", :options=>{:worker=>"server-C"}}
|
297
310
|
```
|
298
311
|
|
299
|
-
In this case, you set the value of
|
312
|
+
In this case, you set the value of `:worker` equal to the instance name you want to be a worker. The task definition remains the same:
|
300
313
|
|
301
314
|
```ruby
|
302
315
|
task :reload_workers => :web, :only=>{:worker=>true} do
|
@@ -308,7 +321,7 @@ end
|
|
308
321
|
|
309
322
|
#### Deploying
|
310
323
|
|
311
|
-
Once you have defined the various roles used by your application, you can deploy to it as you normally would a namespace, for example if you define the following in your
|
324
|
+
Once you have defined the various roles used by your application, you can deploy to it as you normally would a namespace, for example if you define the following in your `deploy.rb`:
|
312
325
|
|
313
326
|
```ruby
|
314
327
|
ec2_roles :web
|
@@ -327,11 +340,18 @@ If you've defined multiple roles, you can deploy to them all by chaining the tas
|
|
327
340
|
cap web app deploy
|
328
341
|
```
|
329
342
|
|
343
|
+
You can also deploy to individual instances by specifying their 'Name' tag, for example, with the sample servers above:
|
344
|
+
|
345
|
+
```ruby
|
346
|
+
cap server-1 deploy
|
347
|
+
```
|
348
|
+
|
349
|
+
would deploy only to the instance named 'server-1'.
|
330
350
|
|
331
351
|
|
332
352
|
##### Default Deploys
|
333
353
|
|
334
|
-
You can set a role as the default so that it will be included when you run
|
354
|
+
You can set a role as the default so that it will be included when you run `cap deploy` without specifying any roles, for example in your `deploy.rb`:
|
335
355
|
|
336
356
|
```ruby
|
337
357
|
ec2_roles :name=>"web", :options => {:default=>true}
|
@@ -343,7 +363,7 @@ Then run:
|
|
343
363
|
cap deploy
|
344
364
|
```
|
345
365
|
|
346
|
-
You can set multiple roles as defaults, so they are all included when you run
|
366
|
+
You can set multiple roles as defaults, so they are all included when you run `cap deploy`, like so:
|
347
367
|
|
348
368
|
```ruby
|
349
369
|
ec2_roles :name=>"web", :options => {:default=>true}
|
@@ -378,14 +398,14 @@ cap web db rolling_deploy
|
|
378
398
|
|
379
399
|
##### Usage with Healthchecks
|
380
400
|
|
381
|
-
When defining a role with the
|
401
|
+
When defining a role with the `ec2_role` call, if you configure a healthcheck for that role as follows, it will automatically be used during the rolling deployment:
|
382
402
|
|
383
403
|
```ruby
|
384
404
|
ec2_roles :name => "web",
|
385
|
-
:variables => {
|
405
|
+
:variables => {
|
386
406
|
:healthcheck => {
|
387
407
|
:path => '/status',
|
388
|
-
:port => 80,
|
408
|
+
:port => 80,
|
389
409
|
:result => 'OK'
|
390
410
|
}
|
391
411
|
}
|
@@ -425,16 +445,16 @@ ec2_roles :name => "web",
|
|
425
445
|
}
|
426
446
|
```
|
427
447
|
|
428
|
-
The default timeout of 60 seconds can be overridden by setting
|
448
|
+
The default timeout of 60 seconds can be overridden by setting `:timeout` to a custom value in seconds. The protocol used defaults to 'http://', however you can switch to 'https://' by setting `:https` equal to 'true'. For example:
|
429
449
|
|
430
450
|
```ruby
|
431
451
|
ec2_roles :name => "web",
|
432
|
-
:variables => {
|
452
|
+
:variables => {
|
433
453
|
:healthcheck => {
|
434
454
|
:path => '/status',
|
435
|
-
:port => 80,
|
455
|
+
:port => 80,
|
436
456
|
:result => 'OK'
|
437
|
-
:https => true,
|
457
|
+
:https => true,
|
438
458
|
:timeout => 10
|
439
459
|
}
|
440
460
|
}
|
@@ -446,14 +466,14 @@ You can run multiple different healthchecks for a role by specifying the healthc
|
|
446
466
|
|
447
467
|
```ruby
|
448
468
|
ec2_roles :name => "web",
|
449
|
-
:variables => {
|
469
|
+
:variables => {
|
450
470
|
:healthcheck => [{
|
451
471
|
:path => '/status',
|
452
|
-
:port => 80,
|
472
|
+
:port => 80,
|
453
473
|
:result => 'OK'
|
454
474
|
}, {
|
455
475
|
:path => '/other_status',
|
456
|
-
:port => 81,
|
476
|
+
:port => 81,
|
457
477
|
:result => 'OK'
|
458
478
|
}]
|
459
479
|
}
|
@@ -461,7 +481,7 @@ ec2_roles :name => "web",
|
|
461
481
|
|
462
482
|
##### Usage with Elastic Load Balancers
|
463
483
|
|
464
|
-
You can have Capify-EC2 automatically deregister and reregister an instance from whichever ELB it is associated with, before and after the deployment, by setting
|
484
|
+
You can have Capify-EC2 automatically deregister and reregister an instance from whichever ELB it is associated with, before and after the deployment, by setting `:load_balanced` to 'true' in the role definition, for example:
|
465
485
|
|
466
486
|
```ruby
|
467
487
|
ec2_roles :name => "web",
|
@@ -479,7 +499,7 @@ ec2_roles :name => "web",
|
|
479
499
|
:variables => {
|
480
500
|
:healthcheck => {
|
481
501
|
:path => '/status',
|
482
|
-
:port => 80,
|
502
|
+
:port => 80,
|
483
503
|
:result => 'OK'
|
484
504
|
}
|
485
505
|
:load_balanced => true
|
@@ -488,9 +508,9 @@ ec2_roles :name => "web",
|
|
488
508
|
|
489
509
|
In this example, the instance will be deregistered from the ELB it is associated with and then deployed to. A healthcheck will then be performed, and providing this passes, the instance will be reregistered with the ELB and verified.
|
490
510
|
|
491
|
-
If an instance has been tagged with multiple roles, this behaviour will apply if
|
511
|
+
If an instance has been tagged with multiple roles, this behaviour will apply if `:load_balanced` is set to 'true' in at least one of those roles.
|
492
512
|
|
493
|
-
If an instance is not associated with any ELBs, then the behaviour will be skipped silently, even if
|
513
|
+
If an instance is not associated with any ELBs, then the behaviour will be skipped silently, even if `:load_balanced` is set to 'true'.
|
494
514
|
|
495
515
|
|
496
516
|
|
@@ -516,19 +536,19 @@ cap SERVER_NAME_HERE ec2:deregister_instance
|
|
516
536
|
cap SERVER_NAME_HERE ec2:register_instance -s loadbalancer=ELB_NAME_HERE
|
517
537
|
```
|
518
538
|
|
519
|
-
You need to specify the ELB when reregistering an instance, but not when deregistering. This can also be done automatically using the
|
539
|
+
You need to specify the ELB when reregistering an instance, but not when deregistering. This can also be done automatically using the `:load_balanced` setting (see the 'Configuration' section above).
|
520
540
|
|
521
541
|
|
522
542
|
|
523
543
|
#### Connecting to an Instance via SSH
|
524
544
|
|
525
|
-
Using the
|
545
|
+
Using the `cap ec2:ssh` command, you can quickly connect to a specific instance, by checking the listing from `ec2:status` and using the instance number as a parameter, for example:
|
526
546
|
|
527
547
|
```ruby
|
528
548
|
cap ec2:ssh 1
|
529
549
|
```
|
530
550
|
|
531
|
-
will attempt to connect to instance number 1 (as shown in
|
551
|
+
will attempt to connect to instance number 1 (as shown in `ec2:status`), using the public DNS address provided by AWS.
|
532
552
|
|
533
553
|
|
534
554
|
|
@@ -552,15 +572,21 @@ Will restrict the 'date' command so it is only run on instances that are tagged
|
|
552
572
|
cap web db ec2:date
|
553
573
|
```
|
554
574
|
|
575
|
+
You can use the 'Name' tag of an EC2 instance to limit the scope of the command to an individual instance:
|
576
|
+
|
577
|
+
```ruby
|
578
|
+
cap server-1 ec2:date
|
579
|
+
```
|
580
|
+
|
555
581
|
##### Cap Invoke
|
556
582
|
|
557
|
-
You can use the standard Capistrano
|
583
|
+
You can use the standard Capistrano `invoke` task to run an arbitrary command on your instances, for example:
|
558
584
|
|
559
585
|
```ruby
|
560
586
|
cap COMMAND='uptime' invoke
|
561
587
|
```
|
562
588
|
|
563
|
-
Will run the 'uptime' command on all instances that match your configuration (projects and roles). As with the
|
589
|
+
Will run the 'uptime' command on all instances that match your configuration (projects and roles). As with the `ec2:date` command, you can further limit this by using a role, like so:
|
564
590
|
|
565
591
|
```ruby
|
566
592
|
cap web COMMAND='uptime' invoke
|
@@ -572,9 +598,15 @@ You can also chain many roles together to increase the scope of the command:
|
|
572
598
|
cap web db COMMAND='uptime' invoke
|
573
599
|
```
|
574
600
|
|
601
|
+
As with ec2:date, you can use the 'Name' tag of an EC2 instance to limit the scope of the command to an individual instance:
|
602
|
+
|
603
|
+
```ruby
|
604
|
+
cap server-1 COMMAND='uptime' invoke
|
605
|
+
```
|
606
|
+
|
575
607
|
##### Cap Shell
|
576
608
|
|
577
|
-
You can use the standard Capistrano
|
609
|
+
You can use the standard Capistrano `shell` task to open an interactive terminal session with your instances, for example:
|
578
610
|
|
579
611
|
```ruby
|
580
612
|
cap shell
|
@@ -591,6 +623,101 @@ cap web db shell
|
|
591
623
|
```
|
592
624
|
|
593
625
|
|
626
|
+
### Multistage
|
627
|
+
|
628
|
+
You can use the [Capistrano Multistage Extension](https://github.com/capistrano/capistrano/wiki/2.x-Multistage-Extension) to manage deployments to multiple environments.
|
629
|
+
|
630
|
+
##### Configuration
|
631
|
+
|
632
|
+
You can set the tag name that Capify-EC2 will use to filter your server list by using the `:aws_stages_tag`. It defaults to 'Stages'.
|
633
|
+
|
634
|
+
##### Usage
|
635
|
+
|
636
|
+
In our examples, imagine that you have three servers on EC2 named and tagged as follows:
|
637
|
+
|
638
|
+
<table>
|
639
|
+
<tr>
|
640
|
+
<td>'Name' Tag</td>
|
641
|
+
<td>'Roles' Tag</td>
|
642
|
+
<td>'Options' Tag</td>
|
643
|
+
<td>'Stages' Tag</td>
|
644
|
+
</tr>
|
645
|
+
<tr>
|
646
|
+
<td>server-1</td>
|
647
|
+
<td>web</td>
|
648
|
+
<td>cron,resque</td>
|
649
|
+
<td>production</td>
|
650
|
+
</tr>
|
651
|
+
<tr>
|
652
|
+
<td>server-2</td>
|
653
|
+
<td>db</td>
|
654
|
+
<td></td>
|
655
|
+
<td>production</td>
|
656
|
+
</tr>
|
657
|
+
<tr>
|
658
|
+
<td>server-3</td>
|
659
|
+
<td>web,db</td>
|
660
|
+
<td></td>
|
661
|
+
<td>staging</td>
|
662
|
+
</tr>
|
663
|
+
</table>
|
664
|
+
|
665
|
+
And you have the following 2 stages setup using Capistrano Multistage.
|
666
|
+
|
667
|
+
production.rb
|
668
|
+
|
669
|
+
```ruby
|
670
|
+
ec2_roles name: :web
|
671
|
+
ec2_roles name: :db
|
672
|
+
```
|
673
|
+
|
674
|
+
staging.rb
|
675
|
+
|
676
|
+
```ruby
|
677
|
+
ec2_roles name: :web
|
678
|
+
ec2_roles name: :db
|
679
|
+
```
|
680
|
+
|
681
|
+
This will generate the following for production
|
682
|
+
|
683
|
+
```ruby
|
684
|
+
namespace :production do
|
685
|
+
task :server-1 do
|
686
|
+
role :web, SERVER-1_EC2_PUBLIC_DNS_HERE, :cron=>true, :resque=>true
|
687
|
+
end
|
688
|
+
|
689
|
+
task :server-2 do
|
690
|
+
role :db, SERVER-2_EC2_PUBLIC_DNS_HERE
|
691
|
+
end
|
692
|
+
|
693
|
+
task :web do
|
694
|
+
role :web, SERVER-1_EC2_PUBLIC_DNS_HERE, :cron=>true, :resque=>true
|
695
|
+
end
|
696
|
+
|
697
|
+
task :db do
|
698
|
+
role :db, SERVER-2_EC2_PUBLIC_DNS_HERE
|
699
|
+
end
|
700
|
+
end
|
701
|
+
|
702
|
+
namespace :staging do
|
703
|
+
task :server-3 do
|
704
|
+
role :web, SERVER-3_EC2_PUBLIC_DNS_HERE
|
705
|
+
role :db, SERVER-3_EC2_PUBLIC_DNS_HERE
|
706
|
+
end
|
707
|
+
|
708
|
+
task :web do
|
709
|
+
role :web, SERVER-3_EC2_PUBLIC_DNS_HERE
|
710
|
+
end
|
711
|
+
|
712
|
+
task :db do
|
713
|
+
role :db, SERVER-3_EC2_PUBLIC_DNS_HERE
|
714
|
+
end
|
715
|
+
end
|
716
|
+
```
|
717
|
+
|
718
|
+
With the above config you can deploy to production using `cap production deploy`.
|
719
|
+
|
720
|
+
You will also need to add the environment when running other ec2 commands like `ec2:status`, for example: `cap production ec2:status`
|
594
721
|
|
595
722
|
### Development
|
596
723
|
|
metadata
CHANGED
@@ -1,13 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: capify-ec2
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
hash:
|
5
|
-
prerelease:
|
4
|
+
hash: -2020557639
|
5
|
+
prerelease: 6
|
6
6
|
segments:
|
7
7
|
- 1
|
8
|
-
-
|
9
|
-
-
|
10
|
-
|
8
|
+
- 5
|
9
|
+
- 0
|
10
|
+
- pre
|
11
|
+
version: 1.5.0.pre
|
11
12
|
platform: ruby
|
12
13
|
authors:
|
13
14
|
- Noah Cantor
|
@@ -18,7 +19,7 @@ autorequire:
|
|
18
19
|
bindir: bin
|
19
20
|
cert_chain: []
|
20
21
|
|
21
|
-
date: 2013-
|
22
|
+
date: 2013-07-19 00:00:00 Z
|
22
23
|
dependencies:
|
23
24
|
- !ruby/object:Gem::Dependency
|
24
25
|
name: fog
|
@@ -26,14 +27,14 @@ dependencies:
|
|
26
27
|
requirement: &id001 !ruby/object:Gem::Requirement
|
27
28
|
none: false
|
28
29
|
requirements:
|
29
|
-
- - "
|
30
|
+
- - ">="
|
30
31
|
- !ruby/object:Gem::Version
|
31
|
-
hash:
|
32
|
+
hash: 39
|
32
33
|
segments:
|
33
34
|
- 1
|
34
|
-
-
|
35
|
-
-
|
36
|
-
version: 1.
|
35
|
+
- 12
|
36
|
+
- 0
|
37
|
+
version: 1.12.0
|
37
38
|
type: :runtime
|
38
39
|
version_requirements: *id001
|
39
40
|
- !ruby/object:Gem::Dependency
|
@@ -107,12 +108,14 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
107
108
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
108
109
|
none: false
|
109
110
|
requirements:
|
110
|
-
- - "
|
111
|
+
- - ">"
|
111
112
|
- !ruby/object:Gem::Version
|
112
|
-
hash:
|
113
|
+
hash: 25
|
113
114
|
segments:
|
114
|
-
-
|
115
|
-
|
115
|
+
- 1
|
116
|
+
- 3
|
117
|
+
- 1
|
118
|
+
version: 1.3.1
|
116
119
|
requirements: []
|
117
120
|
|
118
121
|
rubyforge_project: capify-ec2
|