ec2launcher 1.4.3 → 1.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGELOG.md +4 -0
- data/lib/ec2launcher.rb +138 -57
- data/lib/ec2launcher/backoff_runner.rb +19 -0
- data/lib/ec2launcher/block_device_builder.rb +3 -1
- data/lib/ec2launcher/dsl/application.rb +13 -0
- data/lib/ec2launcher/dsl/block_device.rb +43 -12
- data/lib/ec2launcher/terminator.rb +68 -9
- data/lib/ec2launcher/version.rb +1 -1
- data/startup-scripts/setup.rb +2 -0
- data/startup-scripts/setup_instance.rb +395 -309
- metadata +2 -2
data/CHANGELOG.md
CHANGED
data/lib/ec2launcher.rb
CHANGED
@@ -175,7 +175,6 @@ module EC2Launcher
|
|
175
175
|
missing_security_groups = []
|
176
176
|
security_groups.each do |sg_name|
|
177
177
|
missing_security_groups << sg_name unless sg_map.has_key?(sg_name)
|
178
|
-
puts sg_name
|
179
178
|
security_group_ids << sg_map[sg_name].security_group_id
|
180
179
|
end
|
181
180
|
|
@@ -280,6 +279,7 @@ module EC2Launcher
|
|
280
279
|
gems = []
|
281
280
|
gems += @environment.gems unless @environment.gems.nil?
|
282
281
|
gems += @application.gems unless @application.gems.nil?
|
282
|
+
gems << "ec2launcher"
|
283
283
|
|
284
284
|
##############################
|
285
285
|
# Packages - preinstall
|
@@ -340,9 +340,11 @@ module EC2Launcher
|
|
340
340
|
if block_device_mappings[key] =~ /^ephemeral/
|
341
341
|
@log.info " Block device : #{key}, #{block_device_mappings[key]}"
|
342
342
|
else
|
343
|
-
|
344
|
-
|
345
|
-
|
343
|
+
block_device_text = " Block device : #{key}, #{block_device_mappings[key][:volume_size]}GB, "
|
344
|
+
block_device_text += "#{block_device_mappings[key][:snapshot_id]}" if block_device_mappings[key][:snapshot_id]
|
345
|
+
block_device_text += ", (#{block_device_mappings[key][:delete_on_termination] ? 'auto-delete' : 'no delete'}), "
|
346
|
+
block_device_text += "(#{block_device_mappings[key][:iops].nil? ? 'standard' : block_device_mappings[key][:iops].to_s} IOPS)"
|
347
|
+
@log.info block_device_text
|
346
348
|
end
|
347
349
|
end
|
348
350
|
end
|
@@ -352,10 +354,35 @@ module EC2Launcher
|
|
352
354
|
exit 3
|
353
355
|
end
|
354
356
|
|
357
|
+
# Launch options
|
358
|
+
launch_options = {
|
359
|
+
:ami => ami.ami_id,
|
360
|
+
:availability_zone => availability_zone,
|
361
|
+
:aws_keyfile => aws_keyfile,
|
362
|
+
:block_device_mappings => block_device_mappings,
|
363
|
+
:chef_validation_pem_url => chef_validation_pem_url,
|
364
|
+
:email_notifications => email_notifications,
|
365
|
+
:environment => @environment.name,
|
366
|
+
:gems => gems,
|
367
|
+
:iam_profile => iam_profile,
|
368
|
+
:instance_type => instance_type,
|
369
|
+
:key => key_name,
|
370
|
+
:packages => packages,
|
371
|
+
:provisioned_iops => @application.has_provisioned_iops?(),
|
372
|
+
:roles => roles,
|
373
|
+
:security_group_ids => security_group_ids,
|
374
|
+
:subnet => subnet
|
375
|
+
}
|
376
|
+
|
355
377
|
# Quit if we're only displaying the defaults
|
356
378
|
if @options.show_defaults || @options.show_user_data
|
357
379
|
if @options.show_user_data
|
358
|
-
user_data = build_launch_command(
|
380
|
+
user_data = build_launch_command(
|
381
|
+
launch_options.merge({
|
382
|
+
:fqdn => fqdn_names[0],
|
383
|
+
:short_name => short_hostnames[0]
|
384
|
+
})
|
385
|
+
)
|
359
386
|
@log.info ""
|
360
387
|
@log.info "---user-data---"
|
361
388
|
@log.info user_data
|
@@ -371,9 +398,14 @@ module EC2Launcher
|
|
371
398
|
instances = []
|
372
399
|
fqdn_names.each_index do |i|
|
373
400
|
block_device_tags = block_device_builder.generate_device_tags(fqdn_names[i], short_hostnames[i], @environment.name, @application.block_devices)
|
374
|
-
|
375
|
-
|
376
|
-
|
401
|
+
launch_options.merge!({
|
402
|
+
:fqdn => fqdn_names[i],
|
403
|
+
:short_name => short_hostnames[i],
|
404
|
+
:block_device_tags => block_device_tags,
|
405
|
+
})
|
406
|
+
user_data = build_launch_command(launch_options)
|
407
|
+
|
408
|
+
instance = launch_instance(launch_options, user_data)
|
377
409
|
instances << instance
|
378
410
|
|
379
411
|
public_dns_name = get_instance_dns(instance, true)
|
@@ -513,37 +545,58 @@ module EC2Launcher
|
|
513
545
|
|
514
546
|
# Launches an EC2 instance.
|
515
547
|
#
|
516
|
-
#
|
517
|
-
#
|
518
|
-
#
|
519
|
-
#
|
520
|
-
#
|
521
|
-
#
|
522
|
-
#
|
523
|
-
#
|
524
|
-
#
|
525
|
-
#
|
526
|
-
#
|
548
|
+
# launch_options = {
|
549
|
+
# :ami
|
550
|
+
# :availability_zone
|
551
|
+
# :aws_keyfile
|
552
|
+
# :block_device_mappings
|
553
|
+
# :block_device_tags
|
554
|
+
# :chef_validation_pem_url
|
555
|
+
# :email_notifications
|
556
|
+
# :fqdn
|
557
|
+
# :gems
|
558
|
+
# :iam_profile
|
559
|
+
# :instance_type
|
560
|
+
# :key
|
561
|
+
# :packages
|
562
|
+
# :roles
|
563
|
+
# :security_group_ids
|
564
|
+
# :short_name
|
565
|
+
# :subnet
|
566
|
+
# }
|
527
567
|
#
|
528
568
|
# @return [AWS::EC2::Instance] newly created EC2 instance or nil if the launch failed.
|
529
|
-
def launch_instance(
|
530
|
-
@log.warn "Launching instance... #{
|
569
|
+
def launch_instance(launch_options, user_data)
|
570
|
+
@log.warn "Launching instance... #{launch_options[:fqdn]}"
|
531
571
|
new_instance = nil
|
532
572
|
run_with_backoff(30, 1, "launching instance") do
|
533
573
|
launch_mapping = {
|
534
|
-
:image_id =>
|
535
|
-
:availability_zone => availability_zone,
|
536
|
-
:key_name =>
|
537
|
-
:security_group_ids => security_group_ids,
|
574
|
+
:image_id => launch_options[:ami],
|
575
|
+
:availability_zone => launch_options[:availability_zone],
|
576
|
+
:key_name => launch_options[:key],
|
577
|
+
:security_group_ids => launch_options[:security_group_ids],
|
538
578
|
:user_data => user_data,
|
539
|
-
:instance_type => instance_type
|
579
|
+
:instance_type => launch_options[:instance_type]
|
540
580
|
}
|
541
|
-
unless block_device_mappings.nil? || block_device_mappings.keys.empty?
|
542
|
-
|
581
|
+
unless launch_options[:block_device_mappings].nil? || launch_options[:block_device_mappings].keys.empty?
|
582
|
+
if launch_options[:provisioned_iops]
|
583
|
+
# Only include ephemeral devices if we're using provisioned IOPS for the EBS volumes
|
584
|
+
launch_mapping[:block_device_mappings] = {}
|
585
|
+
launch_options[:block_device_mappings].keys.sort.each do |block_device_name|
|
586
|
+
if block_device_name =~ /^ephemeral/
|
587
|
+
launch_mapping[:block_device_mappings][block_device_name] = launch_options[:block_device_mappings][block_device_name]
|
588
|
+
end
|
589
|
+
end
|
590
|
+
else
|
591
|
+
launch_mapping[:block_device_mappings] = launch_options[:block_device_mappings]
|
592
|
+
end
|
593
|
+
|
594
|
+
# Remove the block_device_mappings entry if it's empty. Otherwise the AWS API will throw an error.
|
595
|
+
launch_mapping.delete(:block_device_mappings) if launch_mapping[:block_device_mappings].keys.empty?
|
543
596
|
end
|
544
597
|
|
545
|
-
launch_mapping[:iam_instance_profile] = iam_profile if iam_profile
|
546
|
-
launch_mapping[:subnet] = vpc_subnet if vpc_subnet
|
598
|
+
launch_mapping[:iam_instance_profile] = launch_options[:iam_profile] if launch_options[:iam_profile]
|
599
|
+
launch_mapping[:subnet] = launch_options[:vpc_subnet] if launch_options[:vpc_subnet]
|
547
600
|
|
548
601
|
new_instance = @ec2.instances.create(launch_mapping)
|
549
602
|
end
|
@@ -568,21 +621,24 @@ module EC2Launcher
|
|
568
621
|
##############################
|
569
622
|
# Tag instance
|
570
623
|
@log.info "Tagging instance..."
|
571
|
-
run_with_backoff(30, 1, "tag #{new_instance.id}, tag: name, value: #{
|
572
|
-
run_with_backoff(30, 1, "tag #{new_instance.id}, tag: short_name, value: #{
|
624
|
+
run_with_backoff(30, 1, "tag #{new_instance.id}, tag: name, value: #{launch_options[:fqdn]}") { new_instance.add_tag("Name", :value => launch_options[:fqdn]) }
|
625
|
+
run_with_backoff(30, 1, "tag #{new_instance.id}, tag: short_name, value: #{launch_options[:short_name]}") { new_instance.add_tag("short_name", :value => launch_options[:short_name]) }
|
573
626
|
run_with_backoff(30, 1, "tag #{new_instance.id}, tag: environment, value: #{@environment.name}") { new_instance.add_tag("environment", :value => @environment.name) }
|
574
627
|
run_with_backoff(30, 1, "tag #{new_instance.id}, tag: application, value: #{@application.name}") { new_instance.add_tag("application", :value => @application.name) }
|
628
|
+
if @options.clone_host
|
629
|
+
run_with_backoff(30, 1, "tag #{new_instance.id}, tag: cloned_from, value: #{@options.clone_host}") { new_instance.add_tag("cloned_from", :value => @options.clone_host) }
|
630
|
+
end
|
575
631
|
|
576
632
|
##############################
|
577
633
|
# Tag volumes
|
578
|
-
unless block_device_tags.empty?
|
634
|
+
unless launch_options[:provisioned_iops] || launch_options[:block_device_tags].empty?
|
579
635
|
@log.info "Tagging volumes..."
|
580
636
|
AWS.start_memoizing
|
581
|
-
block_device_tags.keys.each do |device|
|
637
|
+
launch_options[:block_device_tags].keys.each do |device|
|
582
638
|
v = new_instance.block_device_mappings[device].volume
|
583
|
-
block_device_tags[device].keys.each do |tag_name|
|
584
|
-
run_with_backoff(30, 1, "tag #{v.id}, tag: #{tag_name}, value: #{block_device_tags[device][tag_name]}") do
|
585
|
-
v.add_tag(tag_name, :value => block_device_tags[device][tag_name])
|
639
|
+
launch_options[:block_device_tags][device].keys.each do |tag_name|
|
640
|
+
run_with_backoff(30, 1, "tag #{v.id}, tag: #{tag_name}, value: #{launch_options[:block_device_tags][device][tag_name]}") do
|
641
|
+
v.add_tag(tag_name, :value => launch_options[:block_device_tags][device][tag_name])
|
586
642
|
end
|
587
643
|
end
|
588
644
|
end
|
@@ -592,8 +648,8 @@ module EC2Launcher
|
|
592
648
|
##############################
|
593
649
|
# Add to Route53
|
594
650
|
if @route53
|
595
|
-
@log.info "Adding A record to Route53: #{
|
596
|
-
@route53.create_record(
|
651
|
+
@log.info "Adding A record to Route53: #{launch_options[:fqdn]} => #{new_instance.private_ip_address}"
|
652
|
+
@route53.create_record(launch_options[:fqdn], new_instance.private_ip_address, 'A')
|
597
653
|
end
|
598
654
|
|
599
655
|
new_instance
|
@@ -628,26 +684,42 @@ module EC2Launcher
|
|
628
684
|
|
629
685
|
# Builds the launch scripts that should run on the new instance.
|
630
686
|
#
|
631
|
-
#
|
632
|
-
#
|
633
|
-
#
|
634
|
-
#
|
635
|
-
#
|
636
|
-
#
|
637
|
-
#
|
687
|
+
# launch_options = {
|
688
|
+
# :ami
|
689
|
+
# :availability_zone
|
690
|
+
# :aws_keyfile
|
691
|
+
# :block_device_mappings
|
692
|
+
# :block_device_tags
|
693
|
+
# :chef_validation_pem_url
|
694
|
+
# :email_notifications
|
695
|
+
# :fqdn
|
696
|
+
# :gems
|
697
|
+
# :iam_profile
|
698
|
+
# :instance_type
|
699
|
+
# :key
|
700
|
+
# :packages
|
701
|
+
# :roles
|
702
|
+
# :security_group_ids
|
703
|
+
# :short_name
|
704
|
+
# :subnet
|
705
|
+
# }
|
638
706
|
#
|
639
707
|
# @return [String] Launch commands to pass into new instance as userdata
|
640
|
-
def build_launch_command(
|
708
|
+
def build_launch_command(launch_options)
|
641
709
|
# Build JSON for setup scripts
|
710
|
+
|
711
|
+
# Require ec2launcher gem if cloning and using provisioned IOPS
|
642
712
|
setup_json = {
|
643
|
-
'hostname' => fqdn,
|
644
|
-
'short_hostname' =>
|
645
|
-
'
|
713
|
+
'hostname' => launch_options[:fqdn],
|
714
|
+
'short_hostname' => launch_options[:short_name],
|
715
|
+
'block_device_mappings' => launch_options[:block_device_mappings],
|
716
|
+
'roles' => launch_options[:roles],
|
646
717
|
'chef_server_url' => @environment.chef_server_url,
|
647
|
-
'chef_validation_pem_url' => chef_validation_pem_url,
|
648
|
-
'aws_keyfile' => aws_keyfile,
|
649
|
-
'gems' => gems,
|
650
|
-
'packages' => packages
|
718
|
+
'chef_validation_pem_url' => launch_options[:chef_validation_pem_url],
|
719
|
+
'aws_keyfile' => launch_options[:aws_keyfile],
|
720
|
+
'gems' => launch_options[:gems],
|
721
|
+
'packages' => launch_options[:packages],
|
722
|
+
'provisioned_iops' => false
|
651
723
|
}
|
652
724
|
setup_json["gem_path"] = @instance_paths.gem_path
|
653
725
|
setup_json["ruby_path"] = @instance_paths.ruby_path
|
@@ -656,9 +728,16 @@ module EC2Launcher
|
|
656
728
|
|
657
729
|
unless @application.block_devices.nil? || @application.block_devices.empty?
|
658
730
|
setup_json['block_devices'] = @application.block_devices
|
731
|
+
|
732
|
+
@application.block_devices.each do |bd|
|
733
|
+
if bd.provisioned_iops?
|
734
|
+
setup_json['provisioned_iops'] = true
|
735
|
+
break
|
736
|
+
end
|
737
|
+
end
|
659
738
|
end
|
660
|
-
unless email_notifications.nil?
|
661
|
-
setup_json['email_notifications'] = email_notifications
|
739
|
+
unless launch_options[:email_notifications].nil?
|
740
|
+
setup_json['email_notifications'] = launch_options[:email_notifications]
|
662
741
|
end
|
663
742
|
|
664
743
|
##############################
|
@@ -711,7 +790,9 @@ EOF
|
|
711
790
|
user_data += "\nchmod +x /tmp/setup.rb"
|
712
791
|
# user_data += "\nrm -f /tmp/setup.rb.gz.base64"
|
713
792
|
|
714
|
-
user_data += "\
|
793
|
+
user_data += "\ngem install ec2launcher --no-ri --no-rdoc"
|
794
|
+
|
795
|
+
user_data += "\n#{setup_json['ruby_path']} /tmp/setup.rb -e #{@environment.name} -a #{@application.name} -h #{launch_options[:fqdn]} /tmp/setup.json"
|
715
796
|
user_data += " -c #{@options.clone_host}" unless @options.clone_host.nil?
|
716
797
|
user_data += " 2>&1 > /var/log/cloud-startup.log"
|
717
798
|
end
|
@@ -35,5 +35,24 @@ module EC2Launcher
|
|
35
35
|
true
|
36
36
|
end
|
37
37
|
|
38
|
+
# Runs a block that returns true or false. If the block returns
|
39
|
+
# false, retries the request after sleeping. Repeated failures
|
40
|
+
# trigger an exponential backoff in sleep time.
|
41
|
+
#
|
42
|
+
# @return [Boolean] True if the request suceeded, False otherwise.
|
43
|
+
#
|
44
|
+
def test_with_backoff(max_time, sleep_time, message, &block)
|
45
|
+
if sleep_time < max_time
|
46
|
+
result = block.call
|
47
|
+
unless result
|
48
|
+
puts "Retrying #{message} in #{sleep_time} seconds"
|
49
|
+
sleep sleep_time
|
50
|
+
result = test_with_backoff(max_time, sleep_time * 2, message, &block)
|
51
|
+
end
|
52
|
+
result
|
53
|
+
else
|
54
|
+
false
|
55
|
+
end
|
56
|
+
end
|
38
57
|
end
|
39
58
|
end
|
@@ -118,8 +118,10 @@ module EC2Launcher
|
|
118
118
|
|
119
119
|
block_device_mappings["/dev/#{device_name}"] = {
|
120
120
|
:volume_size => volume_size,
|
121
|
-
:delete_on_termination =>
|
121
|
+
:delete_on_termination => block_device.iops.nil?
|
122
122
|
}
|
123
|
+
|
124
|
+
block_device_mappings["/dev/#{device_name}"][:iops] = block_device.iops if block_device.iops
|
123
125
|
end
|
124
126
|
end
|
125
127
|
end
|
@@ -141,6 +141,19 @@ module EC2Launcher
|
|
141
141
|
end
|
142
142
|
end
|
143
143
|
|
144
|
+
def has_provisioned_iops?()
|
145
|
+
return false unless @block_devices
|
146
|
+
|
147
|
+
provisioned_iops = false
|
148
|
+
@block_devices.each do |bd|
|
149
|
+
if bd.provisioned_iops?
|
150
|
+
provisioned_iops = true
|
151
|
+
break
|
152
|
+
end
|
153
|
+
end
|
154
|
+
provisioned_iops
|
155
|
+
end
|
156
|
+
|
144
157
|
# IAM profile role name to use for new instances.
|
145
158
|
#
|
146
159
|
# Expects one param in the form of either:
|
@@ -2,6 +2,7 @@
|
|
2
2
|
# Copyright (c) 2012 Sean Laurent
|
3
3
|
#
|
4
4
|
require 'ec2launcher/dsl/helper'
|
5
|
+
require 'json'
|
5
6
|
|
6
7
|
module EC2Launcher
|
7
8
|
module DSL
|
@@ -16,26 +17,56 @@ module EC2Launcher
|
|
16
17
|
dsl_accessor :owner
|
17
18
|
dsl_accessor :raid_level
|
18
19
|
dsl_accessor :size
|
20
|
+
dsl_accessor :iops
|
19
21
|
|
20
|
-
def initialize()
|
21
|
-
|
22
|
-
|
23
|
-
|
22
|
+
def initialize(option_hash = nil)
|
23
|
+
if option_hash
|
24
|
+
@name = option_hash["name"]
|
25
|
+
@count = option_hash["count"]
|
26
|
+
@size = option_hash["size"]
|
27
|
+
@iops = option_hash["iops"]
|
28
|
+
@raid_level = option_hash["raid_level"]
|
29
|
+
@mount = option_hash["mount_point"]
|
30
|
+
@owner = option_hash["owner"]
|
31
|
+
@group = option_hash["group"]
|
32
|
+
end
|
33
|
+
|
34
|
+
# Default values
|
35
|
+
@count ||= 1
|
36
|
+
@group ||= "root"
|
37
|
+
@user ||= "root"
|
24
38
|
end
|
25
39
|
|
26
40
|
def is_raid?()
|
27
41
|
@raid_level.nil?
|
28
42
|
end
|
29
43
|
|
30
|
-
def
|
44
|
+
def provisioned_iops?()
|
45
|
+
! @iops.nil? || @iops == 0
|
46
|
+
end
|
47
|
+
|
48
|
+
def as_json(*)
|
31
49
|
{
|
32
|
-
|
33
|
-
"
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
50
|
+
JSON.create_id => self.class.name,
|
51
|
+
"data" => {
|
52
|
+
"name" => @name,
|
53
|
+
"count" => @count,
|
54
|
+
"size" => @size,
|
55
|
+
"iops" => @iops,
|
56
|
+
"raid_level" => @raid_level,
|
57
|
+
"mount_point" => @mount,
|
58
|
+
"owner" => @owner,
|
59
|
+
"group" => @group
|
60
|
+
}
|
61
|
+
}
|
62
|
+
end
|
63
|
+
|
64
|
+
def to_json(*a)
|
65
|
+
as_json.to_json(*a)
|
66
|
+
end
|
67
|
+
|
68
|
+
def self.json_create(o)
|
69
|
+
new(o['data'])
|
39
70
|
end
|
40
71
|
end
|
41
72
|
end
|
@@ -78,9 +78,19 @@ module EC2Launcher
|
|
78
78
|
aws_route53 = AWS::Route53.new if @environment.route53_zone_id
|
79
79
|
route53 = EC2Launcher::Route53.new(aws_route53, @environment.route53_zone_id, @log)
|
80
80
|
|
81
|
-
|
81
|
+
##############################
|
82
|
+
# EBS Volumes
|
83
|
+
##############################
|
84
|
+
# Find EBS volumes
|
85
|
+
attachments = nil
|
82
86
|
AWS.memoize do
|
83
|
-
|
87
|
+
attachments = instance.block_device_mappings.values
|
88
|
+
|
89
|
+
# Remove snapshots
|
90
|
+
remove_snapshots(ec2, attachments) if snapshot_removal
|
91
|
+
|
92
|
+
# Remove volumes, if necessary
|
93
|
+
remove_volumes(ec2, attachments)
|
84
94
|
end
|
85
95
|
|
86
96
|
private_ip_address = instance.private_ip_address
|
@@ -104,20 +114,69 @@ module EC2Launcher
|
|
104
114
|
end
|
105
115
|
end
|
106
116
|
|
107
|
-
def remove_snapshots(ec2,
|
108
|
-
# Find EBS volumes for instance
|
109
|
-
volumes = instance.block_device_mappings.values
|
110
|
-
|
117
|
+
def remove_snapshots(ec2, attachments)
|
111
118
|
# Iterate over over volumes to find snapshots
|
112
119
|
@log.info("Searching for snapshots...")
|
113
120
|
snapshots = []
|
114
|
-
|
115
|
-
volume_snaps = ec2.snapshots.filter("volume-id",
|
121
|
+
attachments.each do |attachment|
|
122
|
+
volume_snaps = ec2.snapshots.filter("volume-id", attachment.volume.id)
|
116
123
|
volume_snaps.each {|volume_snapshot| snapshots << volume_snapshot }
|
117
124
|
end
|
118
125
|
|
119
126
|
@log.info("Deleting #{snapshots.size} snapshots...")
|
120
|
-
snapshots.each
|
127
|
+
snapshots.each do |snap|
|
128
|
+
run_with_backoff(30, 1, "Deleting snapshot #{snap.id}") do
|
129
|
+
snap.delete
|
130
|
+
end
|
131
|
+
end
|
132
|
+
end
|
133
|
+
|
134
|
+
def remove_volume(ec2, instance, device, volume)
|
135
|
+
@log.info(" Detaching #{volume.id}...")
|
136
|
+
run_with_backoff(30, 1, "detaching #{volume.id}") do
|
137
|
+
volume.detach_from(instance, device)
|
138
|
+
end
|
139
|
+
|
140
|
+
# Wait for volume to fully detach
|
141
|
+
detached = test_with_backoff(120, 1, "waiting for #{volume.id} to detach") do
|
142
|
+
volume.status == :available
|
143
|
+
end
|
144
|
+
|
145
|
+
# Volume failed to detach - do a force detatch instead
|
146
|
+
unless detached
|
147
|
+
@log.info(" Failed to detach #{volume.id}")
|
148
|
+
run_with_backoff(60, 1, "force detaching #{volume.id}") do
|
149
|
+
unless volume.status == :available
|
150
|
+
volume.detach_from(instance, device, {:force => true})
|
151
|
+
end
|
152
|
+
end
|
153
|
+
# Wait for volume to fully detach
|
154
|
+
detached = test_with_backoff(120, 1, "waiting for #{volume.id} to force detach") do
|
155
|
+
volume.status == :available
|
156
|
+
end
|
157
|
+
end
|
158
|
+
|
159
|
+
@log.info(" Deleting volume #{volume.id}")
|
160
|
+
run_with_backoff(30, 1, "delete volume #{volume.id}") do
|
161
|
+
volume.delete
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
def remove_volumes(ec2, attachments)
|
166
|
+
@log.info("Cleaning up volumes...")
|
167
|
+
|
168
|
+
AWS.memoize do
|
169
|
+
removal_threads = []
|
170
|
+
attachments.each do |attachment|
|
171
|
+
if attachment.exists? && ! attachment.delete_on_termination
|
172
|
+
removal_threads << Thread.new {
|
173
|
+
remove_volume(ec2, attachment.instance, attachment.device, attachment.volume)
|
174
|
+
}
|
175
|
+
end
|
176
|
+
end
|
177
|
+
|
178
|
+
removal_threads.each {|t| t.join }
|
179
|
+
end
|
121
180
|
end
|
122
181
|
end
|
123
182
|
end
|
data/lib/ec2launcher/version.rb
CHANGED
data/startup-scripts/setup.rb
CHANGED
@@ -10,6 +10,8 @@ require 'json'
|
|
10
10
|
|
11
11
|
require 'aws-sdk'
|
12
12
|
|
13
|
+
require 'ec2launcher'
|
14
|
+
|
13
15
|
AWS_KEYS = "/etc/aws/startup_runner_keys"
|
14
16
|
|
15
17
|
class InitOptions
|
@@ -67,351 +69,435 @@ class InitOptions
|
|
67
69
|
end
|
68
70
|
end
|
69
71
|
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
puts f.gets
|
72
|
+
class InstanceSetup
|
73
|
+
include EC2Launcher::AWSInitializer
|
74
|
+
include EC2Launcher::BackoffRunner
|
75
|
+
|
76
|
+
def initialize(args)
|
77
|
+
option_parser = InitOptions.new
|
78
|
+
@options = option_parser.parse(args)
|
79
|
+
|
80
|
+
@setup_json_filename = args[0]
|
81
|
+
|
82
|
+
# Load the AWS access keys
|
83
|
+
properties = {}
|
84
|
+
File.open(AWS_KEYS, 'r') do |file|
|
85
|
+
file.read.each_line do |line|
|
86
|
+
line.strip!
|
87
|
+
if (line[0] != ?# and line[0] != ?=)
|
88
|
+
i = line.index('=')
|
89
|
+
if (i)
|
90
|
+
properties[line[0..i - 1].strip] = line[i + 1..-1].strip
|
91
|
+
else
|
92
|
+
properties[line] = ''
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|
95
96
|
end
|
97
|
+
@AWS_ACCESS_KEY = properties["AWS_ACCESS_KEY"].gsub('"', '')
|
98
|
+
@AWS_SECRET_ACCESS_KEY = properties["AWS_SECRET_ACCESS_KEY"].gsub('"', '')
|
99
|
+
|
100
|
+
##############################
|
101
|
+
# Find current instance data
|
102
|
+
@EC2_INSTANCE_TYPE = `wget -T 5 -q -O - http://169.254.169.254/latest/meta-data/instance-type`
|
103
|
+
@AZ = `wget -T 5 -q -O - http://169.254.169.254/latest/meta-data/placement/availability-zone`
|
104
|
+
@INSTANCE_ID = `wget -T 5 -q -O - http://169.254.169.254/latest/meta-data/instance-id`
|
96
105
|
end
|
97
|
-
$?
|
98
|
-
end
|
99
106
|
|
100
|
-
|
101
|
-
|
107
|
+
def setup()
|
108
|
+
initialize_aws(@AWS_ACCESS_KEY, @AWS_SECRET_ACCESS_KEY)
|
109
|
+
|
110
|
+
# Read the setup JSON file
|
111
|
+
instance_data = JSON.parse(File.read(@setup_json_filename))
|
112
|
+
|
113
|
+
##############################
|
114
|
+
# EBS VOLUMES
|
115
|
+
##############################
|
116
|
+
# Create and setup EBS volumes =
|
117
|
+
setup_ebs_volumes(instance_data) unless instance_data["block_devices"].nil?
|
118
|
+
|
119
|
+
##############################
|
120
|
+
# EPHEMERAL VOLUMES
|
121
|
+
##############################
|
122
|
+
system_arch = `uname -p`.strip
|
123
|
+
default_fs_type = system_arch == "x86_64" ? "xfs" : "ext4"
|
124
|
+
|
125
|
+
# Process ephemeral devices first
|
126
|
+
ephemeral_drive_count = case EC2_INSTANCE_TYPE
|
127
|
+
when "m1.small" then 1
|
128
|
+
when "m1.medium" then 1
|
129
|
+
when "m2.xlarge" then 1
|
130
|
+
when "m2.2xlarge" then 1
|
131
|
+
when "c1.medium" then 1
|
132
|
+
when "m1.large" then 2
|
133
|
+
when "m2.4xlarge" then 2
|
134
|
+
when "cc1.4xlarge" then 2
|
135
|
+
when "cg1.4xlarge" then 2
|
136
|
+
when "m1.xlarge" then 4
|
137
|
+
when "c1.xlarge" then 4
|
138
|
+
when "cc2.8xlarge" then 4
|
139
|
+
else 0
|
140
|
+
end
|
102
141
|
|
103
|
-
|
142
|
+
# Partition the ephemeral drives
|
143
|
+
partition_list = []
|
144
|
+
build_block_devices(ephemeral_drive_count, "xvdf") do |device_name, index|
|
145
|
+
partition_list << "/dev/#{device_name}"
|
146
|
+
end
|
147
|
+
partition_devices(partition_list)
|
104
148
|
|
105
|
-
#
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
if (i)
|
113
|
-
properties[line[0..i - 1].strip] = line[i + 1..-1].strip
|
114
|
-
else
|
115
|
-
properties[line] = ''
|
149
|
+
# Format and mount the ephemeral drives
|
150
|
+
build_block_devices(ephemeral_drive_count, "xvdf") do |device_name, index|
|
151
|
+
format_filesystem(system_arch, "/dev/#{device_name}1")
|
152
|
+
|
153
|
+
mount_point = case index
|
154
|
+
when 0 then "/mnt"
|
155
|
+
else "/mnt/extra#{index - 1}"
|
116
156
|
end
|
157
|
+
mount_device("/dev/#{device_name}1", mount_point, "root", "root", default_fs_type)
|
117
158
|
end
|
118
|
-
end
|
119
|
-
end
|
120
|
-
AWS_ACCESS_KEY = properties["AWS_ACCESS_KEY"].gsub('"', '')
|
121
|
-
AWS_SECRET_ACCESS_KEY = properties["AWS_SECRET_ACCESS_KEY"].gsub('"', '')
|
122
|
-
|
123
|
-
##############################
|
124
|
-
# Find current instance data
|
125
|
-
EC2_INSTANCE_TYPE = `wget -T 5 -q -O - http://169.254.169.254/latest/meta-data/instance-type`
|
126
|
-
|
127
|
-
# Read the setup JSON file
|
128
|
-
instance_data = JSON.parse(File.read(setup_json_filename))
|
129
|
-
|
130
|
-
##############################
|
131
|
-
# Block devices
|
132
|
-
##############################
|
133
|
-
|
134
|
-
# Creates filesystem on a device
|
135
|
-
# XFS on 64-bit
|
136
|
-
# ext4 on 32-bit
|
137
|
-
def format_filesystem(system_arch, device)
|
138
|
-
fs_type = system_arch == "x86_64" ? "XFS" : "ext4"
|
139
|
-
puts "Formatting #{fs_type} filesystem on #{device} ..."
|
140
|
-
|
141
|
-
command = case system_arch
|
142
|
-
when "x86_64" then "/sbin/mkfs.xfs -f #{device}"
|
143
|
-
else "/sbin/mkfs.ext4 -F #{device}"
|
144
|
-
end
|
145
|
-
IO.popen(command) do |f|
|
146
|
-
while ! f.eof
|
147
|
-
puts f.gets
|
148
|
-
end
|
149
|
-
end
|
150
|
-
end
|
151
159
|
|
152
|
-
|
153
|
-
#
|
154
|
-
|
155
|
-
partitions = device_list.collect {|device| "#{device}1" }
|
160
|
+
##############################
|
161
|
+
# CHEF SETUP
|
162
|
+
##############################
|
156
163
|
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
puts `#{command}`
|
164
|
+
# Path to executables
|
165
|
+
chef_path = instance_data["chef_path"]
|
166
|
+
knife_path = instance_data["knife_path"]
|
161
167
|
|
162
|
-
|
163
|
-
|
168
|
+
##############################
|
169
|
+
# Create knife configuration
|
170
|
+
knife_config = <<EOF
|
171
|
+
log_level :info
|
172
|
+
log_location STDOUT
|
173
|
+
node_name '#{options.hostname}'
|
174
|
+
client_key '/etc/chef/client.pem'
|
175
|
+
validation_client_name 'chef-validator'
|
176
|
+
validation_key '/etc/chef/validation.pem'
|
177
|
+
chef_server_url '#{instance_data["chef_server_url"]}'
|
178
|
+
cache_type 'BasicFile'
|
179
|
+
cache_options( :path => '/etc/chef/checksums' )
|
180
|
+
EOF
|
181
|
+
home_folder = `echo $HOME`.strip
|
182
|
+
`mkdir -p #{home_folder}/.chef && chown 700 #{home_folder}/.chef`
|
183
|
+
File.open("#{home_folder}/.chef/knife.rb", "w") {|f| f.puts knife_config }
|
184
|
+
`chmod 600 #{home_folder}/.chef/knife.rb`
|
185
|
+
|
186
|
+
##############################
|
187
|
+
# Add roles
|
188
|
+
instance_data["roles"].each do |role|
|
189
|
+
cmd = "#{knife_path} node run_list add #{options.hostname} \"role[#{role}]\""
|
190
|
+
puts cmd
|
191
|
+
puts `#{cmd}`
|
192
|
+
end
|
164
193
|
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
194
|
+
result = run_chef_client(chef_path)
|
195
|
+
unless result == 0
|
196
|
+
puts "***** ERROR running chef-client. Relaunching chef-client in 30 seconds."
|
197
|
+
sleep(30)
|
198
|
+
result = run_chef_client(chef_path)
|
199
|
+
end
|
200
|
+
unless result == 0
|
201
|
+
puts "***** ERROR running chef-client. Relaunching chef-client in 30 seconds."
|
202
|
+
sleep(30)
|
203
|
+
result = run_chef_client(chef_path)
|
204
|
+
end
|
172
205
|
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
206
|
+
##############################
|
207
|
+
# EMAIL NOTIFICATION
|
208
|
+
##############################
|
209
|
+
if instance_data["email_notifications"]
|
210
|
+
# Email notification through SES
|
211
|
+
puts "Email notification through SES..."
|
212
|
+
AWS.config({
|
213
|
+
:access_key_id => instance_data["email_notifications"]["ses_access_key"],
|
214
|
+
:secret_access_key => instance_data["email_notifications"]["ses_secret_key"]
|
215
|
+
})
|
216
|
+
ses = AWS::SimpleEmailService.new
|
217
|
+
ses.send_email(
|
218
|
+
:from => instance_data["email_notifications"]["from"],
|
219
|
+
:to => instance_data["email_notifications"]["to"],
|
220
|
+
:subject => "Server setup complete: #{options.hostname}",
|
221
|
+
:body_text => "Server setup is complete for Host: #{options.hostname}, Environment: #{options.environ}, Application: #{options.application}",
|
222
|
+
:body_html => "<div>Server setup is complete for:</div><div><strong>Host:</strong> #{options.hostname}</div><div><strong>Environment:</strong> #{options.environ}</div><div><strong>Application:</strong> #{options.application}</div>"
|
223
|
+
)
|
224
|
+
else
|
225
|
+
puts "Skipping email notification."
|
226
|
+
end
|
180
227
|
|
181
|
-
|
182
|
-
sleep 10
|
183
|
-
end
|
228
|
+
end
|
184
229
|
|
185
|
-
##############################
|
186
|
-
#
|
187
|
-
def
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
230
|
+
##############################
|
231
|
+
# Launch Chef
|
232
|
+
def run_chef_client(chef_path)
|
233
|
+
result = 0
|
234
|
+
last_line = nil
|
235
|
+
Open3.popen3(chef_path) do |stdin, stdout, stderr, wait_thr|
|
236
|
+
stdout.each do |line|
|
237
|
+
last_line = line
|
238
|
+
puts line
|
239
|
+
end
|
240
|
+
result = wait_thr.value if wait_thr
|
241
|
+
end
|
242
|
+
if last_line =~ /[ ]ERROR[:][ ]/
|
243
|
+
result = -1
|
244
|
+
end
|
193
245
|
|
194
|
-
|
195
|
-
# Partitions & formats new volumes.
|
196
|
-
# Returns the RAID device name.
|
197
|
-
def setup_attached_raid_array(system_arch, devices, raid_device = '/dev/md0', raid_type = 0, clone = false)
|
198
|
-
partitions = devices.collect {|device| "#{device}1" }
|
199
|
-
|
200
|
-
unless clone
|
201
|
-
partition_devices(devices)
|
202
|
-
initialize_raid_array(system_arch, devices, raid_device, raid_type)
|
203
|
-
else
|
204
|
-
assemble_raid_array(partitions, raid_device, raid_type)
|
205
|
-
end
|
206
|
-
`echo DEVICE #{partitions.join(' ')} |tee -a /etc/mdadm.conf`
|
207
|
-
|
208
|
-
# RAID device name can be a symlink on occasion, so we
|
209
|
-
# want to de-reference the symlink to keep everything clear.
|
210
|
-
raid_info = "/dev/md0"
|
211
|
-
raid_scan_info = `/sbin/mdadm --detail --scan 2>&1`
|
212
|
-
puts "RAID Scan Info: #{raid_scan_info}"
|
213
|
-
if raid_scan_info =~ /cannot open/
|
214
|
-
# This happens occasionally on CentOS 6:
|
215
|
-
# $ /sbin/mdadm --detail --scan
|
216
|
-
# mdadm: cannot open /dev/md/0_0: No such file or directory
|
217
|
-
# mdadm: cannot open /dev/md/1_0: No such file or directory
|
218
|
-
#
|
219
|
-
# This is tied to how the raid array was created, especially if the array was created with an older version of mdadm.
|
220
|
-
# See https://bugzilla.redhat.com/show_bug.cgi?id=606481 for a lengthy discussion. We should really be naming RAID
|
221
|
-
# arrays correctly and using the HOMEHOST setting to re-assemble it.
|
222
|
-
#
|
223
|
-
# As a stop-gap, try to use the specified raid_device name passed into this method.
|
224
|
-
raid_info = raid_device
|
225
|
-
|
226
|
-
# We need to manually retrieve the UUID of the array
|
227
|
-
array_uuid = `mdadm --detail #{raid_device}|grep UUID|awk '// { print $3; }'`.strip
|
228
|
-
|
229
|
-
# We have to manually update mdadm.conf as well
|
230
|
-
#`echo ARRAY #{raid_device} level=raid#{raid_type.to_s} num-devices=#{devices.count.to_s} meta-data=0.90 UUID=#{array_uuid} |tee -a /etc/mdadm.conf`
|
231
|
-
`echo ARRAY #{raid_device} level=raid#{raid_type.to_s} num-devices=#{devices.count.to_s} UUID=#{array_uuid} |tee -a /etc/mdadm.conf`
|
232
|
-
else
|
233
|
-
raid_info = raid_scan_info.split("\n")[-1].split()[1]
|
246
|
+
result
|
234
247
|
end
|
235
|
-
raid_device_real_path = Pathname.new(raid_info).realpath.to_s
|
236
|
-
puts "Using raid device: #{raid_info}. Real path: #{raid_device_real_path}"
|
237
|
-
|
238
|
-
raid_device_real_path
|
239
|
-
end
|
240
248
|
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
249
|
+
# Runs a command and displays the output line by line
|
250
|
+
def run_command(cmd)
|
251
|
+
IO.popen(cmd) do |f|
|
252
|
+
while ! f.eof
|
253
|
+
puts f.gets
|
254
|
+
end
|
255
|
+
end
|
256
|
+
$?
|
246
257
|
end
|
247
|
-
end
|
248
258
|
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
# Process ephemeral devices first
|
253
|
-
ephemeral_drive_count = case EC2_INSTANCE_TYPE
|
254
|
-
when "m1.small" then 1
|
255
|
-
when "m1.medium" then 1
|
256
|
-
when "m2.xlarge" then 1
|
257
|
-
when "m2.2xlarge" then 1
|
258
|
-
when "c1.medium" then 1
|
259
|
-
when "m1.large" then 2
|
260
|
-
when "m2.4xlarge" then 2
|
261
|
-
when "cc1.4xlarge" then 2
|
262
|
-
when "cg1.4xlarge" then 2
|
263
|
-
when "m1.xlarge" then 4
|
264
|
-
when "c1.xlarge" then 4
|
265
|
-
when "cc2.8xlarge" then 4
|
266
|
-
else 0
|
267
|
-
end
|
259
|
+
def attach_volume(instance, device_name, volume)
|
260
|
+
ec2 = AWS::EC2.new
|
268
261
|
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
partition_list << "/dev/#{device_name}"
|
273
|
-
end
|
274
|
-
partition_devices(partition_list)
|
262
|
+
volume_available = test_with_backoff(120, 1, "check EBS volume available #{device_name} (#{volume.id})") do
|
263
|
+
volume.status == :available
|
264
|
+
end
|
275
265
|
|
276
|
-
#
|
277
|
-
build_block_devices(ephemeral_drive_count, "xvdf") do |device_name, index|
|
278
|
-
format_filesystem(system_arch, "/dev/#{device_name}1")
|
266
|
+
# TODO: Handle when volume is still not available
|
279
267
|
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
268
|
+
# Attach volume
|
269
|
+
attachment = nil
|
270
|
+
run_with_backoff(60, 1, "attaching volume #{volume.id} to #{device_name}") do
|
271
|
+
attachment = volume.attach_to(instance, device_name)
|
272
|
+
end
|
273
|
+
volume_attached = test_with_backoff(60, 1, "check EBS volume attached #{device_name} (#{volume.id})") do
|
274
|
+
attachment.status == :attached
|
275
|
+
end
|
276
|
+
|
277
|
+
# TODO: Handle when volume fails to attach
|
286
278
|
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
279
|
+
attachment
|
280
|
+
end
|
281
|
+
|
282
|
+
def setup_ebs_volumes(instance_data)
|
283
|
+
# Install mdadm if we have any RAID devices
|
284
|
+
raid_required = false
|
285
|
+
instance_data["block_devices"].each do |block_device|
|
286
|
+
unless block_device.raid_level.nil?
|
287
|
+
raid_required = true
|
288
|
+
break
|
289
|
+
end
|
290
|
+
end
|
291
|
+
if raid_required
|
292
|
+
result = run_command("yum install mdadm -y")
|
293
|
+
unless result == 0
|
294
|
+
run_command("yum clean all")
|
295
|
+
run_command("yum install mdadm -y")
|
296
|
+
end
|
297
|
+
end
|
298
|
+
|
299
|
+
# Create and attach the EBS volumes, if necessary
|
300
|
+
if instance_data["provisioned_iops"]
|
301
|
+
puts "Setup requires EBS volumes with provisioned IOPS."
|
302
|
+
|
303
|
+
ec2 = AWS::EC2.new
|
304
|
+
instance = ec2.instances[@INSTANCE_ID]
|
305
|
+
|
306
|
+
volumes = {}
|
307
|
+
block_creation_threads = []
|
308
|
+
instance_data["block_device_mappings"].keys.sort.each do |device_name|
|
309
|
+
block_data = instance_data["block_device_mappings"][device_name]
|
310
|
+
next if block_data =~ /^ephemeral/
|
311
|
+
|
312
|
+
block_info = {}
|
313
|
+
block_info[:availability_zone] = @AZ
|
314
|
+
block_info[:size] = block_data["volume_size"]
|
315
|
+
block_info[:snapshot_id] = block_data["snapshot_id"] if block_data["snapshot_id"]
|
316
|
+
if block_data["iops"]
|
317
|
+
block_info[:iops] = block_data["iops"]
|
318
|
+
block_info[:volume_type] = "io1"
|
319
|
+
end
|
320
|
+
|
321
|
+
# Create volume
|
322
|
+
block_device_text = "Creating EBS volume: #{device_name}, #{block_info[:volume_size]}GB, "
|
323
|
+
block_device_text += "#{block_info[:snapshot_id]}" if block_info[:snapshot_id]
|
324
|
+
block_device_text += "#{block_info[:iops].nil? ? 'standard' : block_info[:iops].to_s} IOPS"
|
325
|
+
puts block_device_text
|
326
|
+
volume = nil
|
327
|
+
run_with_backoff(60, 1, "creating ebs volume") do
|
328
|
+
volume = ec2.volumes.create(block_info)
|
329
|
+
end
|
330
|
+
|
331
|
+
volumes[device_name] = volume
|
332
|
+
|
333
|
+
block_creation_threads << Thread.new {
|
334
|
+
attach_volume(instance, device_name, volume)
|
335
|
+
}
|
336
|
+
end
|
337
|
+
|
338
|
+
block_creation_threads.each do |t|
|
339
|
+
t.join
|
340
|
+
end
|
341
|
+
|
342
|
+
AWS.memoize do
|
343
|
+
block_device_builder = EC2Launcher::BlockDeviceBuilder.new(ec2, 60)
|
344
|
+
block_device_tags = block_device_builder.generate_device_tags(instance_data["hostname"], instance_data["short_hostname"], instance_data["environment"], instance_data["block_devices"])
|
345
|
+
unless block_device_tags.empty?
|
346
|
+
puts "Tagging volumes"
|
347
|
+
AWS.memoize do
|
348
|
+
block_device_tags.keys.each do |device_name|
|
349
|
+
volume = volumes[device_name]
|
350
|
+
block_device_tags[device_name].keys.each do |tag_name|
|
351
|
+
run_with_backoff(30, 1, "tag #{volume.id}, tag: #{tag_name}, value: #{block_device_tags[device_name][tag_name]}") do
|
352
|
+
volume.add_tag(tag_name, :value => block_device_tags[device_name][tag_name])
|
353
|
+
end
|
354
|
+
end
|
355
|
+
end
|
356
|
+
end
|
357
|
+
end
|
358
|
+
end
|
359
|
+
end # provisioned iops
|
360
|
+
|
361
|
+
raid_array_count = 0
|
362
|
+
next_device_name = "xvdj"
|
363
|
+
instance_data["block_devices"].each do |block_device_json|
|
364
|
+
if block_device_json["raid_level"].nil?
|
365
|
+
# If we're not cloning an existing snapshot, then we need to partition and format the drive.
|
366
|
+
if options.clone_host.nil?
|
367
|
+
partition_devices([ "/dev/#{next_device_name}" ])
|
368
|
+
format_filesystem(system_arch, "/dev/#{next_device_name}1")
|
369
|
+
end
|
370
|
+
mount_device("/dev/#{next_device_name}1", block_device_json["mount_point"], block_device_json["owner"], block_device_json["group"], default_fs_type)
|
371
|
+
next_device_name.next!
|
372
|
+
else
|
373
|
+
raid_devices = []
|
374
|
+
build_block_devices(block_device_json["count"], next_device_name) do |device_name, index|
|
375
|
+
raid_devices << "/dev/#{device_name}"
|
376
|
+
next_device_name = device_name
|
377
|
+
end
|
378
|
+
puts "Setting up attached raid array... system_arch = #{system_arch}, raid_devices = #{raid_devices}, device = /dev/md#{(127 - raid_array_count).to_s}"
|
379
|
+
raid_device_name = setup_attached_raid_array(system_arch, raid_devices, "/dev/md#{(127 - raid_array_count).to_s}", block_device_json["raid_level"].to_i, ! options.clone_host.nil?)
|
380
|
+
mount_device(raid_device_name, block_device_json["mount_point"], block_device_json["owner"], block_device_json["group"], default_fs_type)
|
381
|
+
raid_array_count += 1
|
382
|
+
end
|
295
383
|
end
|
296
384
|
end
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
385
|
+
|
386
|
+
# Creates filesystem on a device
|
387
|
+
# XFS on 64-bit
|
388
|
+
# ext4 on 32-bit
|
389
|
+
def format_filesystem(system_arch, device)
|
390
|
+
fs_type = system_arch == "x86_64" ? "XFS" : "ext4"
|
391
|
+
puts "Formatting #{fs_type} filesystem on #{device} ..."
|
392
|
+
|
393
|
+
command = case system_arch
|
394
|
+
when "x86_64" then "/sbin/mkfs.xfs -f #{device}"
|
395
|
+
else "/sbin/mkfs.ext4 -F #{device}"
|
396
|
+
end
|
397
|
+
IO.popen(command) do |f|
|
398
|
+
while ! f.eof
|
399
|
+
puts f.gets
|
400
|
+
end
|
302
401
|
end
|
303
402
|
end
|
304
403
|
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
# If we're not cloning an existing snapshot, then we need to partition and format the drive.
|
310
|
-
if options.clone_host.nil?
|
311
|
-
partition_devices([ "/dev/#{next_device_name}" ])
|
312
|
-
format_filesystem(system_arch, "/dev/#{next_device_name}1")
|
313
|
-
end
|
314
|
-
mount_device("/dev/#{next_device_name}1", block_device_json["mount_point"], block_device_json["owner"], block_device_json["group"], default_fs_type)
|
315
|
-
next_device_name.next!
|
316
|
-
else
|
317
|
-
raid_devices = []
|
318
|
-
build_block_devices(block_device_json["count"], next_device_name) do |device_name, index|
|
319
|
-
raid_devices << "/dev/#{device_name}"
|
320
|
-
next_device_name = device_name
|
321
|
-
end
|
322
|
-
puts "Setting up attached raid array... system_arch = #{system_arch}, raid_devices = #{raid_devices}, device = /dev/md#{(127 - raid_array_count).to_s}"
|
323
|
-
raid_device_name = setup_attached_raid_array(system_arch, raid_devices, "/dev/md#{(127 - raid_array_count).to_s}", block_device_json["raid_level"].to_i, ! options.clone_host.nil?)
|
324
|
-
mount_device(raid_device_name, block_device_json["mount_point"], block_device_json["owner"], block_device_json["group"], default_fs_type)
|
325
|
-
raid_array_count += 1
|
326
|
-
end
|
327
|
-
end
|
328
|
-
end
|
404
|
+
# Creates and formats a RAID array, given a
|
405
|
+
# list of partitioned devices
|
406
|
+
def initialize_raid_array(system_arch, device_list, raid_device = '/dev/md0', raid_type = 0)
|
407
|
+
partitions = device_list.collect {|device| "#{device}1" }
|
329
408
|
|
330
|
-
|
331
|
-
#
|
332
|
-
|
409
|
+
puts "Creating RAID-#{raid_type.to_s} array #{raid_device} ..."
|
410
|
+
command = "/sbin/mdadm --create #{raid_device} --level #{raid_type.to_s} --raid-devices #{partitions.length} #{partitions.join(' ')}"
|
411
|
+
puts command
|
412
|
+
puts `#{command}`
|
333
413
|
|
334
|
-
|
335
|
-
|
336
|
-
knife_path = instance_data["knife_path"]
|
414
|
+
format_filesystem(system_arch, raid_device)
|
415
|
+
end
|
337
416
|
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
validation_client_name 'chef-validator'
|
346
|
-
validation_key '/etc/chef/validation.pem'
|
347
|
-
chef_server_url '#{instance_data["chef_server_url"]}'
|
348
|
-
cache_type 'BasicFile'
|
349
|
-
cache_options( :path => '/etc/chef/checksums' )
|
350
|
-
EOF
|
351
|
-
home_folder = `echo $HOME`.strip
|
352
|
-
`mkdir -p #{home_folder}/.chef && chown 700 #{home_folder}/.chef`
|
353
|
-
File.open("#{home_folder}/.chef/knife.rb", "w") do |f|
|
354
|
-
f.puts knife_config
|
355
|
-
end
|
356
|
-
`chmod 600 #{home_folder}/.chef/knife.rb`
|
357
|
-
|
358
|
-
##############################
|
359
|
-
# Add roles
|
360
|
-
instance_data["roles"].each do |role|
|
361
|
-
cmd = "#{knife_path} node run_list add #{options.hostname} \"role[#{role}]\""
|
362
|
-
puts cmd
|
363
|
-
puts `#{cmd}`
|
364
|
-
end
|
417
|
+
# Creates a mount point, mounts the device and adds it to fstab
|
418
|
+
def mount_device(device, mount_point, owner, group, fs_type)
|
419
|
+
puts `echo #{device} #{mount_point} #{fs_type} noatime 0 0|tee -a /etc/fstab`
|
420
|
+
puts `mkdir -p #{mount_point}`
|
421
|
+
puts `mount #{mount_point}`
|
422
|
+
puts `chown #{owner}:#{owner} #{mount_point}`
|
423
|
+
end
|
365
424
|
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
stdout.each do |line|
|
373
|
-
last_line = line
|
374
|
-
puts line
|
425
|
+
# Partitions a list of mounted EBS volumes
|
426
|
+
def partition_devices(device_list)
|
427
|
+
puts "Partioning devices ..."
|
428
|
+
device_list.each do |device|
|
429
|
+
puts " * #{device}"
|
430
|
+
`echo 0|sfdisk #{device}`
|
375
431
|
end
|
376
|
-
|
432
|
+
|
433
|
+
puts "Sleeping 10 seconds to reload partition tables ..."
|
434
|
+
sleep 10
|
377
435
|
end
|
378
|
-
|
379
|
-
|
436
|
+
|
437
|
+
##############################
|
438
|
+
# Assembles a set of existing partitions into a RAID array.
|
439
|
+
def assemble_raid_array(partition_list, raid_device = '/dev/md0', raid_type = 0)
|
440
|
+
puts "Assembling cloned RAID-#{raid_type.to_s} array #{raid_device} ..."
|
441
|
+
command = "/sbin/mdadm --assemble #{raid_device} #{partition_list.join(' ')}"
|
442
|
+
puts command
|
443
|
+
puts `#{command}`
|
380
444
|
end
|
381
445
|
|
382
|
-
|
383
|
-
|
446
|
+
# Initializes a raid array with existing EBS volumes that are already attached to the instace.
|
447
|
+
# Partitions & formats new volumes.
|
448
|
+
# Returns the RAID device name.
|
449
|
+
def setup_attached_raid_array(system_arch, devices, raid_device = '/dev/md0', raid_type = 0, clone = false)
|
450
|
+
partitions = devices.collect {|device| "#{device}1" }
|
451
|
+
|
452
|
+
unless clone
|
453
|
+
partition_devices(devices)
|
454
|
+
initialize_raid_array(system_arch, devices, raid_device, raid_type)
|
455
|
+
else
|
456
|
+
assemble_raid_array(partitions, raid_device, raid_type)
|
457
|
+
end
|
458
|
+
`echo DEVICE #{partitions.join(' ')} |tee -a /etc/mdadm.conf`
|
459
|
+
|
460
|
+
# RAID device name can be a symlink on occasion, so we
|
461
|
+
# want to de-reference the symlink to keep everything clear.
|
462
|
+
raid_info = "/dev/md0"
|
463
|
+
raid_scan_info = `/sbin/mdadm --detail --scan 2>&1`
|
464
|
+
puts "RAID Scan Info: #{raid_scan_info}"
|
465
|
+
if raid_scan_info =~ /cannot open/
|
466
|
+
# This happens occasionally on CentOS 6:
|
467
|
+
# $ /sbin/mdadm --detail --scan
|
468
|
+
# mdadm: cannot open /dev/md/0_0: No such file or directory
|
469
|
+
# mdadm: cannot open /dev/md/1_0: No such file or directory
|
470
|
+
#
|
471
|
+
# This is tied to how the raid array was created, especially if the array was created with an older version of mdadm.
|
472
|
+
# See https://bugzilla.redhat.com/show_bug.cgi?id=606481 for a lengthy discussion. We should really be naming RAID
|
473
|
+
# arrays correctly and using the HOMEHOST setting to re-assemble it.
|
474
|
+
#
|
475
|
+
# As a stop-gap, try to use the specified raid_device name passed into this method.
|
476
|
+
raid_info = raid_device
|
477
|
+
|
478
|
+
# We need to manually retrieve the UUID of the array
|
479
|
+
array_uuid = `mdadm --detail #{raid_device}|grep UUID|awk '// { print $3; }'`.strip
|
480
|
+
|
481
|
+
# We have to manually update mdadm.conf as well
|
482
|
+
#`echo ARRAY #{raid_device} level=raid#{raid_type.to_s} num-devices=#{devices.count.to_s} meta-data=0.90 UUID=#{array_uuid} |tee -a /etc/mdadm.conf`
|
483
|
+
`echo ARRAY #{raid_device} level=raid#{raid_type.to_s} num-devices=#{devices.count.to_s} UUID=#{array_uuid} |tee -a /etc/mdadm.conf`
|
484
|
+
else
|
485
|
+
raid_info = raid_scan_info.split("\n")[-1].split()[1]
|
486
|
+
end
|
487
|
+
raid_device_real_path = Pathname.new(raid_info).realpath.to_s
|
488
|
+
puts "Using raid device: #{raid_info}. Real path: #{raid_device_real_path}"
|
489
|
+
|
490
|
+
raid_device_real_path
|
491
|
+
end
|
384
492
|
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
end
|
391
|
-
|
392
|
-
puts "***** ERROR running chef-client. Relaunching chef-client in 30 seconds."
|
393
|
-
sleep(30)
|
394
|
-
result = run_chef_client(chef_path)
|
493
|
+
def build_block_devices(count, device = "xvdj", &block)
|
494
|
+
device_name = device
|
495
|
+
0.upto(count - 1).each do |index|
|
496
|
+
yield device_name, index
|
497
|
+
device_name.next!
|
498
|
+
end
|
499
|
+
end
|
395
500
|
end
|
396
501
|
|
397
|
-
|
398
|
-
|
399
|
-
##############################
|
400
|
-
if instance_data["email_notifications"]
|
401
|
-
# Email notification through SES
|
402
|
-
puts "Email notification through SES..."
|
403
|
-
AWS.config({
|
404
|
-
:access_key_id => instance_data["email_notifications"]["ses_access_key"],
|
405
|
-
:secret_access_key => instance_data["email_notifications"]["ses_secret_key"]
|
406
|
-
})
|
407
|
-
ses = AWS::SimpleEmailService.new
|
408
|
-
ses.send_email(
|
409
|
-
:from => instance_data["email_notifications"]["from"],
|
410
|
-
:to => instance_data["email_notifications"]["to"],
|
411
|
-
:subject => "Server setup complete: #{options.hostname}",
|
412
|
-
:body_text => "Server setup is complete for Host: #{options.hostname}, Environment: #{options.environ}, Application: #{options.application}",
|
413
|
-
:body_html => "<div>Server setup is complete for:</div><div><strong>Host:</strong> #{options.hostname}</div><div><strong>Environment:</strong> #{options.environ}</div><div><strong>Application:</strong> #{options.application}</div>"
|
414
|
-
)
|
415
|
-
else
|
416
|
-
puts "Skipping email notification."
|
417
|
-
end
|
502
|
+
instance_setup = InstanceSetup.new(ARGV)
|
503
|
+
instance_setup.setup()
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ec2launcher
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.5.0
|
5
5
|
prerelease:
|
6
6
|
platform: ruby
|
7
7
|
authors:
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2012-
|
12
|
+
date: 2012-12-07 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: aws-sdk
|