capify-ec2 1.4.0.pre2 → 1.4.1.pre3

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,3 +1,14 @@
1
+ ## 1.4.1.pre3 (Mar 1, 2013)
2
+
3
+ Features:
4
+
5
+ - Added the ability to automatically deregister and reregister an instance from its associated Elastic Load Balancer when using the rolling deployment feature.
6
+
7
+ Bugfixes:
8
+
9
+ - Instance options are now properly retained when performing a rolling deployment.
10
+ - Fixed a range of errors in the documentation.
11
+
1
12
  ## 1.4.0.pre2 (Feb 15, 2013)
2
13
 
3
14
  Features:
@@ -104,6 +104,10 @@ class CapifyEc2
104
104
  desired_instances.select {|instance| instance.name == name}.first
105
105
  end
106
106
 
107
+ def get_instance_by_dns(dns)
108
+ desired_instances.select {|instance| instance.dns_name == dns}.first
109
+ end
110
+
107
111
  def instance_health(load_balancer, instance)
108
112
  elb.describe_instance_health(load_balancer.id, instance.id).body['DescribeInstanceHealthResult']['InstanceStates'][0]['State']
109
113
  end
@@ -111,7 +115,7 @@ class CapifyEc2
111
115
  def elb
112
116
  Fog::AWS::ELB.new(:aws_access_key_id => @ec2_config[:aws_access_key_id], :aws_secret_access_key => @ec2_config[:aws_secret_access_key], :region => @ec2_config[:aws_params][:region])
113
117
  end
114
-
118
+
115
119
  def get_load_balancer_by_instance(instance_id)
116
120
  hash = elb.load_balancers.inject({}) do |collect, load_balancer|
117
121
  load_balancer.instances.each {|load_balancer_instance_id| collect[load_balancer_instance_id] = load_balancer}
@@ -119,7 +123,7 @@ class CapifyEc2
119
123
  end
120
124
  hash[instance_id]
121
125
  end
122
-
126
+
123
127
  def get_load_balancer_by_name(load_balancer_name)
124
128
  lbs = {}
125
129
  elb.load_balancers.each do |load_balancer|
@@ -166,6 +170,49 @@ class CapifyEc2
166
170
  end
167
171
  end
168
172
 
173
+ def deregister_instance_from_elb_by_dns(server_dns)
174
+ instance = get_instance_by_dns(server_dns)
175
+ load_balancer = get_load_balancer_by_instance(instance.id)
176
+
177
+ if load_balancer
178
+ puts "[Capify-EC2] Removing instance from ELB '#{load_balancer.id}'..."
179
+
180
+ result = elb.deregister_instances_from_load_balancer(instance.id, load_balancer.id)
181
+ raise "Unable to remove instance from ELB '#{load_balancer.id}'..." unless result.status == 200
182
+
183
+ return load_balancer
184
+ end
185
+ false
186
+ end
187
+
188
+ def reregister_instance_with_elb_by_dns(server_dns, load_balancer, timeout)
189
+ instance = get_instance_by_dns(server_dns)
190
+
191
+ sleep 10
192
+
193
+ puts "[Capify-EC2] Re-registering instance with ELB '#{load_balancer.id}'..."
194
+ result = elb.register_instances_with_load_balancer(instance.id, load_balancer.id)
195
+
196
+ raise "Unable to re-register instance with ELB '#{load_balancer.id}'..." unless result.status == 200
197
+
198
+ state = nil
199
+
200
+ begin
201
+ Timeout::timeout(timeout) do
202
+ begin
203
+ state = instance_health(load_balancer, instance)
204
+ raise "Instance not ready" unless state == 'InService'
205
+ rescue => e
206
+ puts "[Capify-EC2] Unexpected response: #{e}..."
207
+ sleep 1
208
+ retry
209
+ end
210
+ end
211
+ rescue Timeout::Error => e
212
+ end
213
+ state ? state == 'InService' : false
214
+ end
215
+
169
216
  def instance_health_by_url(dns, port, path, expected_response, options = {})
170
217
  protocol = options[:https] ? 'https://' : 'http://'
171
218
  uri = URI("#{protocol}#{dns}:#{port}#{path}")
@@ -209,4 +256,4 @@ class CapifyEC2RollingDeployError < Exception
209
256
  super(msg)
210
257
  @dns = dns
211
258
  end
212
- end
259
+ end
@@ -70,26 +70,34 @@ Capistrano::Configuration.instance(:must_exist).load do
70
70
  role[1].servers.each do |s|
71
71
  all_servers[ s.host.to_s ] ||= []
72
72
  all_servers[ s.host.to_s ] << role[0]
73
- all_roles[ role[0] ] = {:options => {:healthcheck => s.options[:healthcheck] ||= nil}} unless all_roles[ role[0] ]
73
+ all_roles[ role[0] ] = {:options => (s.options ||= nil)} unless all_roles[ role[0] ]
74
74
  end
75
75
  end
76
76
 
77
77
  successful_deploys = []
78
78
  failed_deploys = []
79
79
 
80
+ # Here outside of the scope of the rescue so we can refer to it if a general exception is raised.
81
+ load_balancer_to_reregister = nil
82
+
80
83
  begin
81
84
  all_servers.each do |server_dns,server_roles|
82
85
 
83
86
  roles.clear
84
87
 
85
- server_roles.each do |a_role|
86
- #TODO: Look at defining any options associated to the specific role, maybe through calling 'ec2_role'.
87
- role a_role, server_dns
88
+ load_balancer_to_reregister = nil # Set to nil again here, to ensure it always starts off nil for every iteration.
89
+ is_load_balanced = false
90
+
91
+ server_roles.each do |a_role|
92
+ role a_role, server_dns, all_roles[a_role][:options]
93
+ is_load_balanced = true if all_roles[a_role][:options][:load_balanced]
88
94
  end
89
95
 
90
96
  puts "[Capify-EC2]"
91
97
  puts "[Capify-EC2] Beginning deployment to #{instance_dns_with_name_tag(server_dns)} with #{server_roles.count > 1 ? 'roles' : 'role'} '#{server_roles.join(', ')}'...".bold
92
98
 
99
+ load_balancer_to_reregister = capify_ec2.deregister_instance_from_elb_by_dns(server_dns) if is_load_balanced
100
+
93
101
  # Call the standard 'cap deploy' task with our redefined role containing a single server.
94
102
  top.deploy.default
95
103
 
@@ -115,6 +123,16 @@ Capistrano::Configuration.instance(:must_exist).load do
115
123
  end
116
124
 
117
125
  end
126
+
127
+ if load_balancer_to_reregister
128
+ reregistered = capify_ec2.reregister_instance_with_elb_by_dns(server_dns, load_balancer_to_reregister, 60)
129
+ if reregistered
130
+ puts "[Capify-EC2] Instance registration with ELB '#{load_balancer_to_reregister.id}' successful.".green.bold
131
+ else
132
+ puts "[Capify-EC2] Instance registration with ELB '#{load_balancer_to_reregister.id}' failed!".red.bold
133
+ raise CapifyEC2RollingDeployError.new("ELB registration timeout exceeded", server_dns)
134
+ end
135
+ end
118
136
 
119
137
  puts "[Capify-EC2] Deployment successful to #{instance_dns_with_name_tag(server_dns)}.".green.bold
120
138
  successful_deploys << server_dns
@@ -124,16 +142,15 @@ Capistrano::Configuration.instance(:must_exist).load do
124
142
  failed_deploys << e.dns
125
143
  puts "[Capify-EC2]"
126
144
  puts "[Capify-EC2] Deployment aborted due to error: #{e}!".red.bold
127
-
145
+ puts "[Capify-EC2] Note: Instance '#{instance_dns_with_name_tag(e.dns)}' was removed from the ELB '#{load_balancer_to_reregister.id}' and should be manually checked and reregistered.".red.bold if load_balancer_to_reregister
128
146
  rescue => e
129
147
  failed_deploys << roles.values.first.servers.first.host
130
148
  puts "[Capify-EC2]"
131
149
  puts "[Capify-EC2] Deployment aborted due to error: #{e}!".red.bold
132
-
150
+ puts "[Capify-EC2] Note: Instance '#{instance_dns_with_name_tag(roles.values.first.servers.first.host)}' was removed from the ELB '#{load_balancer_to_reregister.id}' and should be manually checked and reregistered.".red.bold if load_balancer_to_reregister
133
151
  else
134
152
  puts "[Capify-EC2]"
135
153
  puts "[Capify-EC2] Rolling deployment completed.".green.bold
136
-
137
154
  end
138
155
 
139
156
  puts "[Capify-EC2]"
@@ -1,6 +1,6 @@
1
1
  module Capify
2
2
  module Ec2
3
- VERSION = "1.4.0.pre2"
3
+ VERSION = "1.4.1.pre3"
4
4
  end
5
5
  end
6
6
 
data/readme.md CHANGED
@@ -74,11 +74,32 @@ You will need to create instance tags using the AWS Management Console or API, t
74
74
 
75
75
  ### Usage
76
76
 
77
- In our examples, imagine that you have three servers on EC2 defined as follows:
77
+ In our examples, imagine that you have three servers on EC2 named and tagged as follows:
78
+
79
+ <table>
80
+ <tr>
81
+ <td>'Name' Tag</td>
82
+ <td>'Roles' Tag</td>
83
+ <td>'Options' Tag</td>
84
+ </tr>
85
+ <tr>
86
+ <td>server-1</td>
87
+ <td>web</td>
88
+ <td>cron,resque</td>
89
+ </tr>
90
+ <tr>
91
+ <td>server-2</td>
92
+ <td>db</td>
93
+ <td></td>
94
+ </tr>
95
+ <tr>
96
+ <td>server-3</td>
97
+ <td>web,db,app</td>
98
+ <td></td>
99
+ </tr>
100
+ </table>
101
+
78
102
 
79
- server-1 Tags: Name => "server-1", Roles => "web", Options => "cron,resque"
80
- server-2 Tags: Name => "server-2", Roles => "db"
81
- server-3 Tags: Name => "server-3", Roles => "web,db,app"
82
103
 
83
104
  #### Single Roles
84
105
 
@@ -253,7 +274,7 @@ cap web app deploy
253
274
  You can set a role as the default so that it will be included when you run 'cap deploy' without specifying any roles, for example in your 'deploy.rb':
254
275
 
255
276
  ```ruby
256
- ec2_roles {:name=>"web", :options{:default=>true}
277
+ ec2_roles :name=>"web", :options => {:default=>true}
257
278
  ```
258
279
 
259
280
  Then run:
@@ -265,8 +286,8 @@ cap deploy
265
286
  You can set multiple roles as defaults, so they are all included when you run 'cap deploy', like so:
266
287
 
267
288
  ```ruby
268
- ec2_roles {:name=>"web", :options{:default=>true}
269
- ec2_roles {:name=>"db", :options{:default=>true}
289
+ ec2_roles :name=>"web", :options => {:default=>true}
290
+ ec2_roles :name=>"db", :options => {:default=>true}
270
291
  ```
271
292
 
272
293
 
@@ -275,6 +296,8 @@ ec2_roles {:name=>"db", :options{:default=>true}
275
296
 
276
297
  This feature allows you to deploy your code to instances one at a time, rather than simultaneously. This becomes useful for more complex applications that may take longer to startup after a deployment. Capistrano will perform a full deploy (including any custom hooks) against a single instance, optionally perform a HTTP healthcheck against the instance, then proceed to the next instance if deployment was successful.
277
298
 
299
+ After deployment a status report is displayed, indicating on which instances deployment succeeded, failed or did not begin. With some failures, further action may need to be taken manually; for example if an instance is removed from an ELB (see the 'Usage with Elastic Load Balancers' section below) and the deployment fails, the instance will not be reregistered with the ELB, for safety reasons.
300
+
278
301
  ##### Usage
279
302
 
280
303
  To use the rolling deployment feature without a healthcheck, simple run your deployments with the following command:
@@ -298,13 +321,13 @@ cap web db rolling_deploy
298
321
  When defining a role with the 'ec2_role' command, if you configure a healthcheck for that role as follows, it will automatically be used during the rolling deployment:
299
322
 
300
323
  ```ruby
301
- ec2_roles { :name => "web",
302
- :variables => {
303
- :healthcheck => {
304
- :path => '/status',
305
- :port => 80,
306
- :result => 'OK'
307
- }
324
+ ec2_roles :name => "web",
325
+ :variables => {
326
+ :healthcheck => {
327
+ :path => '/status',
328
+ :port => 80,
329
+ :result => 'OK'
330
+ }
308
331
  }
309
332
  ```
310
333
 
@@ -316,36 +339,55 @@ http://EC2_INSTANCE_PUBLIC_DNS_HERE:80/status
316
339
 
317
340
  And the contents of the page at that URL must match 'OK' for the healthcheck to pass. If unsuccessful, the healthcheck is repeated every second, until a timeout of 60 seconds is reached, at which point the rolling deployment is aborted, and a progress summary displayed.
318
341
 
319
- The default timeout is 60 seconds, which can be overridden by setting ':timeout' to a custom value in seconds. The protocol used defaults to 'http://', however you can switch to 'https://' by setting ':https' equal to 'true'. For example:
342
+ The default timeout of 60 seconds can be overridden by setting ':timeout' to a custom value in seconds. The protocol used defaults to 'http://', however you can switch to 'https://' by setting ':https' equal to 'true'. For example:
320
343
 
321
344
  ```ruby
322
- ec2_roles { :name => "web",
323
- :variables => {
324
- :healthcheck => {
325
- :path => '/status',
326
- :port => 80,
327
- :result => 'OK',
328
- :https => true,
329
- :timeout => 10
330
- }
345
+ ec2_roles :name => "web",
346
+ :variables => {
347
+ :healthcheck => {
348
+ :path => '/status',
349
+ :port => 80,
350
+ :result => 'OK'
351
+ :https => true,
352
+ :timeout => 10
353
+ }
331
354
  }
332
355
  ```
333
356
 
334
357
  Sets a 10 second timeout, and performs the health check over HTTPS.
335
358
 
336
- #### Managing Load Balancers
359
+ ##### Usage with Elastic Load Balancers
337
360
 
338
- You can use the following commands to deregister and reregister instances in an Elastic Load Balancer.
361
+ You can have Capify-EC2 automatically deregister and reregister an instance from whichever ELB it is associated with, before and after the deployment, by setting ':load_balanced' to 'true' in the role definition, for example:
339
362
 
340
363
  ```ruby
341
- cap SERVER_NAME_HERE ec2:deregister_instance
364
+ ec2_roles :name => "web",
365
+ :variables => {
366
+ :load_balanced => true
367
+ }
342
368
  ```
343
369
 
370
+ In this example, when an instance with the role 'web' is deployed to, Capify-EC2 will attempt to find which ELB the instance is currently registered with and deregister it. The deploy will then proceed as usual. When it is complete, the instance will be reregistered with the same ELB, and the status verified as 'InService' before the deployment is deemed successful. Note: You can only use this feature with instances that are registered with a single ELB, if you have instances registered with multiple ELBs, you are advised not to use this feature.
371
+
372
+ You can also combine this feature with a Healthcheck like so:
373
+
344
374
  ```ruby
345
- cap SERVER_NAME_HERE ec2:register_instance -s loadbalancer=ELB_NAME_HERE
375
+ ec2_roles :name => "web",
376
+ :variables => {
377
+ :healthcheck => {
378
+ :path => '/status',
379
+ :port => 80,
380
+ :result => 'OK'
381
+ }
382
+ :load_balanced => true
383
+ }
346
384
  ```
347
385
 
348
- You need to specify the ELB when reregistering an instance, but not when deregistering. This can also be done automatically using the ':load_balanced' setting (see the 'Configuration' section above).
386
+ In this example, the instance will be deregistered from the ELB it is associated with and then deployed to. A healthcheck will then be performed, and providing this passes, the instance will be reregistered with the ELB and verified.
387
+
388
+ If an instance has been tagged with multiple roles, this behaviour will apply if ':load_balanced' is set to 'true' in at least one of those roles.
389
+
390
+ If an instance is not associated with any ELBs, then the behaviour will be skipped silently, even if ':load_balanced' is set to 'true'.
349
391
 
350
392
 
351
393
 
@@ -359,6 +401,22 @@ cap ec2:status
359
401
 
360
402
 
361
403
 
404
+ #### Managing Load Balancers
405
+
406
+ You can use the following commands to deregister and reregister instances in an Elastic Load Balancer.
407
+
408
+ ```ruby
409
+ cap SERVER_NAME_HERE ec2:deregister_instance
410
+ ```
411
+
412
+ ```ruby
413
+ cap SERVER_NAME_HERE ec2:register_instance -s loadbalancer=ELB_NAME_HERE
414
+ ```
415
+
416
+ You need to specify the ELB when reregistering an instance, but not when deregistering. This can also be done automatically using the ':load_balanced' setting (see the 'Configuration' section above).
417
+
418
+
419
+
362
420
  #### Connecting to an Instance via SSH
363
421
 
364
422
  Using the 'cap ec2:ssh' command, you can quickly connect to a specific instance, by checking the listing from 'ec2:status' and using the instance number as a parameter, for example:
@@ -448,4 +506,4 @@ Report Issues/Feature requests on [GitHub Issues](http://github.com/forward/capi
448
506
 
449
507
  ### Copyright
450
508
 
451
- Copyright (c) 2011, 2012, 2013 Forward. See [LICENSE](https://github.com/forward/capify-ec2/blob/master/LICENSE) for details.
509
+ Copyright (c) 2011, 2012, 2013 Forward. See [LICENSE](https://github.com/forward/capify-ec2/blob/master/LICENSE) for details.
metadata CHANGED
@@ -1,15 +1,15 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: capify-ec2
3
3
  version: !ruby/object:Gem::Version
4
- hash: -1103184854
4
+ hash: 382858687
5
5
  prerelease: 6
6
6
  segments:
7
7
  - 1
8
8
  - 4
9
- - 0
9
+ - 1
10
10
  - pre
11
- - 2
12
- version: 1.4.0.pre2
11
+ - 3
12
+ version: 1.4.1.pre3
13
13
  platform: ruby
14
14
  authors:
15
15
  - Noah Cantor
@@ -18,7 +18,7 @@ autorequire:
18
18
  bindir: bin
19
19
  cert_chain: []
20
20
 
21
- date: 2013-02-15 00:00:00 Z
21
+ date: 2013-03-01 00:00:00 Z
22
22
  dependencies:
23
23
  - !ruby/object:Gem::Dependency
24
24
  name: fog