auser-poolparty 0.0.8 → 0.0.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGELOG +8 -0
- data/README.txt +10 -10
- data/Rakefile +30 -21
- data/{web/static/site/images → assets}/clouds.png +0 -0
- data/bin/instance +39 -34
- data/bin/pool +44 -29
- data/bin/poolnotify +34 -0
- data/config/haproxy.conf +1 -1
- data/config/heartbeat_authkeys.conf +1 -1
- data/config/monit/haproxy.monit.conf +2 -1
- data/config/nginx.conf +1 -1
- data/config/reconfigure_instances_script.sh +28 -9
- data/config/sample-config.yml +1 -1
- data/lib/core/string.rb +3 -0
- data/lib/modules/ec2_wrapper.rb +47 -22
- data/lib/modules/file_writer.rb +38 -0
- data/lib/modules/sprinkle_overrides.rb +32 -0
- data/lib/modules/vlad_override.rb +5 -4
- data/lib/poolparty.rb +14 -10
- data/lib/poolparty/application.rb +33 -19
- data/lib/poolparty/master.rb +227 -105
- data/lib/poolparty/optioner.rb +8 -4
- data/lib/poolparty/plugin.rb +34 -4
- data/lib/poolparty/provider/packages/haproxy.rb +0 -15
- data/lib/poolparty/provider/packages/heartbeat.rb +1 -1
- data/lib/poolparty/provider/packages/ruby.rb +6 -6
- data/lib/poolparty/provider/packages/s3fuse.rb +9 -2
- data/lib/poolparty/provider/provider.rb +65 -25
- data/lib/poolparty/remote_instance.rb +95 -74
- data/lib/poolparty/remoter.rb +48 -37
- data/lib/poolparty/remoting.rb +41 -17
- data/lib/poolparty/scheduler.rb +4 -4
- data/lib/poolparty/tasks.rb +1 -1
- data/lib/poolparty/tasks/package.rake +53 -0
- data/lib/poolparty/tasks/plugins.rake +1 -1
- data/poolparty.gemspec +50 -58
- data/spec/application_spec.rb +28 -0
- data/spec/core_spec.rb +9 -0
- data/spec/ec2_wrapper_spec.rb +87 -0
- data/spec/file_writer_spec.rb +73 -0
- data/spec/files/describe_response +37 -0
- data/spec/files/multi_describe_response +69 -0
- data/spec/files/remote_desc_response +37 -0
- data/spec/helpers/ec2_mock.rb +3 -0
- data/spec/master_spec.rb +302 -78
- data/spec/monitors/cpu_monitor_spec.rb +2 -1
- data/spec/monitors/memory_spec.rb +1 -0
- data/spec/monitors/misc_monitor_spec.rb +1 -0
- data/spec/monitors/web_spec.rb +1 -0
- data/spec/optioner_spec.rb +12 -0
- data/spec/plugin_manager_spec.rb +10 -10
- data/spec/plugin_spec.rb +6 -3
- data/spec/pool_binary_spec.rb +3 -0
- data/spec/poolparty_spec.rb +12 -7
- data/spec/provider_spec.rb +1 -0
- data/spec/remote_instance_spec.rb +18 -18
- data/spec/remoter_spec.rb +4 -2
- data/spec/remoting_spec.rb +10 -2
- data/spec/scheduler_spec.rb +0 -6
- data/spec/spec_helper.rb +13 -0
- metadata +83 -52
- data/Manifest +0 -115
- data/lib/poolparty/tmp.rb +0 -46
- data/misc/basics_tutorial.txt +0 -142
- data/web/static/conf/nginx.conf +0 -22
- data/web/static/site/images/balloon.png +0 -0
- data/web/static/site/images/cb.png +0 -0
- data/web/static/site/images/railsconf_preso_img.png +0 -0
- data/web/static/site/index.html +0 -71
- data/web/static/site/javascripts/application.js +0 -3
- data/web/static/site/javascripts/corner.js +0 -178
- data/web/static/site/javascripts/jquery-1.2.6.pack.js +0 -11
- data/web/static/site/misc.html +0 -42
- data/web/static/site/storage/pool_party_presentation.pdf +0 -0
- data/web/static/site/stylesheets/application.css +0 -100
- data/web/static/site/stylesheets/reset.css +0 -17
- data/web/static/src/layouts/application.haml +0 -25
- data/web/static/src/pages/index.haml +0 -25
- data/web/static/src/pages/misc.haml +0 -5
- data/web/static/src/stylesheets/application.sass +0 -100
data/lib/modules/ec2_wrapper.rb
CHANGED
@@ -16,9 +16,11 @@ module PoolParty
|
|
16
16
|
:maxCount => 1,
|
17
17
|
:key_name => Application.keypair,
|
18
18
|
:size => "#{Application.size}")
|
19
|
-
|
20
|
-
|
21
|
-
|
19
|
+
begin
|
20
|
+
item = instance#.instancesSet.item
|
21
|
+
EC2ResponseObject.get_hash_from_response(item)
|
22
|
+
rescue Exception => e
|
23
|
+
end
|
22
24
|
end
|
23
25
|
# Shutdown the instance by instance_id
|
24
26
|
def terminate_instance!(instance_id)
|
@@ -29,9 +31,9 @@ module PoolParty
|
|
29
31
|
end
|
30
32
|
# Instance description
|
31
33
|
def describe_instance(id)
|
32
|
-
instance = ec2.describe_instances(:instance_id => id)
|
33
|
-
item = instance.
|
34
|
-
EC2ResponseObject.get_hash_from_response(
|
34
|
+
# instance = ec2.describe_instances(:instance_id => id)
|
35
|
+
# item = instance.reservationSet.item.first.instancesSet.item.first
|
36
|
+
EC2ResponseObject.get_hash_from_response(ec2.describe_instances(:instance_id => id))
|
35
37
|
end
|
36
38
|
# Get instance by id
|
37
39
|
def get_instance_by_id(id)
|
@@ -55,28 +57,51 @@ module PoolParty
|
|
55
57
|
end
|
56
58
|
# Provides a simple class to wrap around the amazon responses
|
57
59
|
class EC2ResponseObject
|
58
|
-
def self.get_descriptions(resp)
|
59
|
-
rs = resp
|
60
|
-
|
60
|
+
def self.get_descriptions(resp)
|
61
|
+
rs = get_response_from(resp)
|
62
|
+
|
63
|
+
# puts rs.methods.sort - rs.ancestors.methods
|
61
64
|
out = begin
|
62
|
-
rs.
|
65
|
+
if rs.respond_to?(:instancesSet)
|
66
|
+
[EC2ResponseObject.get_hash_from_response(rs.instancesSet.item)]
|
67
|
+
else
|
68
|
+
rs.collect {|r|
|
69
|
+
if r.instancesSet.item.class == Array
|
70
|
+
r.instancesSet.item.map {|t| EC2ResponseObject.get_hash_from_response(t)}
|
71
|
+
else
|
72
|
+
[EC2ResponseObject.get_hash_from_response(r.instancesSet.item)]
|
73
|
+
end
|
74
|
+
}.flatten.reject {|a| a.nil? }
|
75
|
+
end
|
63
76
|
rescue Exception => e
|
64
|
-
|
65
|
-
|
66
|
-
rs.reject {|a| a.empty? }.collect {|r| EC2ResponseObject.get_hash_from_response(r)}.reject {|a| a.nil? }
|
67
|
-
rescue Exception => e
|
68
|
-
[]
|
69
|
-
end
|
77
|
+
# Really weird bug with amazon's ec2 gem
|
78
|
+
rs.collect {|r| EC2ResponseObject.get_hash_from_response(r)}.reject {|a| a.nil? } rescue []
|
70
79
|
end
|
80
|
+
|
71
81
|
out
|
72
82
|
end
|
83
|
+
def self.get_response_from(resp)
|
84
|
+
begin
|
85
|
+
rs = resp.reservationSet.item unless resp.reservationSet.nil?
|
86
|
+
rs ||= resp.DescribeInstancesResponse.reservationSet.item
|
87
|
+
rs ||= rs.respond_to?(:instancesSet) ? rs.instancesSet : rs
|
88
|
+
rs.reject! {|a| a.nil? || a.empty? }
|
89
|
+
rescue Exception => e
|
90
|
+
end
|
91
|
+
rs
|
92
|
+
end
|
73
93
|
def self.get_hash_from_response(resp)
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
94
|
+
begin
|
95
|
+
{
|
96
|
+
:instance_id => resp.instanceId,
|
97
|
+
:ip => resp.dnsName,
|
98
|
+
:status => resp.instanceState.name,
|
99
|
+
:launching_time => resp.launchTime,
|
100
|
+
:keypair => resp.keyName
|
101
|
+
}
|
102
|
+
rescue Exception => e
|
103
|
+
nil
|
104
|
+
end
|
80
105
|
end
|
81
106
|
end
|
82
107
|
end
|
@@ -0,0 +1,38 @@
|
|
1
|
+
module PoolParty
|
2
|
+
module FileWriter
|
3
|
+
def write_to_file_for(f="haproxy", node=nil, str="", &block)
|
4
|
+
make_base_directory
|
5
|
+
File.open("#{base_tmp_dir}/#{node ? "#{node.name}-" : ""}#{f}", "w+") do |file|
|
6
|
+
file << str
|
7
|
+
file << block.call if block_given?
|
8
|
+
end
|
9
|
+
end
|
10
|
+
# Write a temp file with the content str
|
11
|
+
def write_to_temp_file(str="")
|
12
|
+
tempfile = Tempfile.new("#{base_tmp_dir}/poolparty-#{rand(1000)}-#{rand(1000)}")
|
13
|
+
tempfile.print(str)
|
14
|
+
tempfile.flush
|
15
|
+
tempfile
|
16
|
+
end
|
17
|
+
def with_temp_file(str="", &block)
|
18
|
+
Tempfile.open "#{base_tmp_dir}/poolparty-#{rand(10000)}" do |fp|
|
19
|
+
fp.puts str
|
20
|
+
fp.flush
|
21
|
+
block.call(fp)
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
def base_tmp_dir
|
26
|
+
File.join(user_dir, "tmp")
|
27
|
+
end
|
28
|
+
def remote_base_tmp_dir
|
29
|
+
"~/tmp"
|
30
|
+
end
|
31
|
+
def make_base_directory
|
32
|
+
`mkdir -p #{base_tmp_dir}` unless File.directory?(base_tmp_dir)
|
33
|
+
end
|
34
|
+
def clear_base_directory
|
35
|
+
`rm -rf #{base_tmp_dir}/*` if File.directory?(base_tmp_dir)
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
@@ -0,0 +1,32 @@
|
|
1
|
+
require "sprinkle"
|
2
|
+
module Sprinkle
|
3
|
+
module Installers
|
4
|
+
class Source < Installer
|
5
|
+
|
6
|
+
def custom_dir(dir)
|
7
|
+
@custom_dir = dir
|
8
|
+
end
|
9
|
+
|
10
|
+
def base_dir
|
11
|
+
if @custom_dir
|
12
|
+
return @custom_dir
|
13
|
+
elsif @source.split('/').last =~ /(.*)\.(tar\.gz|tgz|tar\.bz2|tb2)/
|
14
|
+
return $1
|
15
|
+
end
|
16
|
+
raise "Unknown base path for source archive: #{@source}, please update code knowledge"
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
class Gem < Installer
|
21
|
+
protected
|
22
|
+
def install_sequence
|
23
|
+
cmd = "gem install -y #{gem}"
|
24
|
+
cmd << " --version '#{version}'" if version
|
25
|
+
cmd << " --source #{source}" if source
|
26
|
+
cmd
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
|
31
|
+
end
|
32
|
+
end
|
@@ -1,12 +1,13 @@
|
|
1
1
|
require "vlad"
|
2
|
-
class Rake::RemoteTask < Rake::Task
|
2
|
+
class Rake::RemoteTask < Rake::Task
|
3
3
|
def run command
|
4
|
-
cmd = [ssh_cmd, ssh_flags, target_host
|
4
|
+
cmd = [ssh_cmd, ssh_flags, target_host].compact
|
5
5
|
result = []
|
6
6
|
|
7
|
-
|
7
|
+
commander = cmd.join(" ") << " \"#{command}\""
|
8
|
+
warn commander if $TRACE
|
8
9
|
|
9
|
-
pid, inn, out, err = popen4(
|
10
|
+
pid, inn, out, err = popen4(commander)
|
10
11
|
|
11
12
|
inn.sync = true
|
12
13
|
streams = [out, err]
|
data/lib/poolparty.rb
CHANGED
@@ -8,19 +8,20 @@ require 'rubygems'
|
|
8
8
|
require "aws/s3"
|
9
9
|
require "sqs"
|
10
10
|
require "EC2"
|
11
|
+
require "aska"
|
12
|
+
require 'sprinkle'
|
13
|
+
|
11
14
|
require 'thread'
|
12
15
|
require "pp"
|
13
16
|
require "tempfile"
|
14
|
-
|
15
|
-
require "vlad"
|
17
|
+
|
16
18
|
begin
|
17
19
|
require 'fastthread'
|
18
|
-
require 'thin'
|
19
20
|
require 'system_timer'
|
20
|
-
|
21
|
+
@@timer = SystemTimer
|
21
22
|
rescue LoadError
|
22
23
|
require 'timeout'
|
23
|
-
|
24
|
+
@@timer = Timeout
|
24
25
|
end
|
25
26
|
|
26
27
|
## Load PoolParty
|
@@ -41,11 +42,14 @@ end
|
|
41
42
|
|
42
43
|
module PoolParty
|
43
44
|
module Version #:nodoc:
|
44
|
-
|
45
|
-
|
46
|
-
|
45
|
+
@major = 0
|
46
|
+
@minor = 0
|
47
|
+
@tiny = 9
|
47
48
|
|
48
|
-
STRING = [
|
49
|
+
STRING = [@major, @minor, @tiny].join('.')
|
50
|
+
end
|
51
|
+
def timer
|
52
|
+
@@timer
|
49
53
|
end
|
50
54
|
# PoolParty options
|
51
55
|
def options(opts={})
|
@@ -83,7 +87,7 @@ module PoolParty
|
|
83
87
|
end
|
84
88
|
end
|
85
89
|
def load_plugins
|
86
|
-
Dir["#{plugin_dir}/**/init.rb"].each {|a| require a}
|
90
|
+
Dir["#{plugin_dir}/**/init.rb"].each {|a| require a} if File.directory?(plugin_dir)
|
87
91
|
end
|
88
92
|
def reset!
|
89
93
|
@@installed_plugins = nil
|
@@ -17,17 +17,21 @@ module PoolParty
|
|
17
17
|
loading_options = opts.delete(:optsparse) || {}
|
18
18
|
loading_options.merge!( {:argv => opts.delete(:argv)} )
|
19
19
|
|
20
|
-
|
21
|
-
# default_options.merge!(opts)
|
20
|
+
config_file_location = (default_options[:config_file] || opts[:config_file])
|
22
21
|
# If the config_file options are specified and not empty
|
23
|
-
unless
|
22
|
+
unless config_file_location.nil? || config_file_location.empty?
|
24
23
|
require "yaml"
|
25
24
|
# Try loading the file if it exists
|
26
|
-
filedata = open(
|
27
|
-
|
25
|
+
filedata = File.open("#{config_file_location}").read if File.file?("#{config_file_location}")
|
26
|
+
# We want the command-line to overwrite the config file
|
27
|
+
default_options.merge!( YAML.load(filedata) ) if filedata
|
28
28
|
end
|
29
29
|
|
30
|
-
|
30
|
+
default_options.merge!(opts)
|
31
|
+
load_options!(loading_options) # Load command-line options
|
32
|
+
default_options.merge!(local_user_data) unless local_user_data == {}
|
33
|
+
|
34
|
+
OpenStruct.new(default_options)
|
31
35
|
end
|
32
36
|
|
33
37
|
# Load options via commandline
|
@@ -48,15 +52,16 @@ module PoolParty
|
|
48
52
|
op.on('-m monitors', '--monitors names', "Monitor instances using (default: 'web,memory,cpu')") {|s| default_options[:monitor_load_on] = s }
|
49
53
|
op.on('-o port', '--client_port port', "Run on specific client_port (default: 7788)") { |client_port| default_options[:client_port] = client_port }
|
50
54
|
op.on('-O os', '--os os', "Configure for os (default: ubuntu)") { |os| default_options[:os] = os }
|
51
|
-
op.on('-e env', '--environment env', "Run on the specific environment (default: development)") { |env| default_options[:
|
55
|
+
op.on('-e env', '--environment env', "Run on the specific environment (default: development)") { |env| default_options[:environment] = env }
|
52
56
|
op.on('-a address', '--public-ip address', "Associate this public address with the master node") {|s| default_options[:public_ip] = s}
|
53
57
|
op.on('-s size', '--size size', "Run specific sized instance") {|s| default_options[:size] = s}
|
54
|
-
op.on('-
|
58
|
+
op.on('-a name', '--name name', "Application name") {|n| default_options[:app_name] = n}
|
55
59
|
op.on('-u username', '--username name', "Login with the user (default: root)") {|s| default_options[:user] = s}
|
56
60
|
op.on('-d user-data','--user-data data', "Extra data to send each of the instances (default: "")") { |data| default_options[:user_data] = data }
|
61
|
+
op.on('-i', '--install-on-boot', 'Install the PoolParty and custom software on boot (default: false)') {|b| default_options[:install_on_load] = true}
|
57
62
|
op.on('-t seconds', '--polling-time', "Time between polling in seconds (default 50)") {|t| default_options[:polling_time] = t }
|
58
63
|
op.on('-v', '--[no-]verbose', 'Run verbosely (default: false)') {|v| default_options[:verbose] = true}
|
59
|
-
op.on('-
|
64
|
+
op.on('-n number', '--minimum-instances', "The minimum number of instances to run at all times (default 1)") {|i| default_options[:minimum_instances] = i.to_i}
|
60
65
|
op.on('-x number', '--maximum-instances', "The maximum number of instances to run (default 3)") {|x| default_options[:maximum_instances] = x.to_i}
|
61
66
|
|
62
67
|
op.on_tail("-V", "Show version") do
|
@@ -103,24 +108,33 @@ module PoolParty
|
|
103
108
|
:keypair => ENV["KEYPAIR_NAME"],
|
104
109
|
:ami => 'ami-4a46a323',
|
105
110
|
:shared_bucket => "",
|
106
|
-
:services => "",
|
107
111
|
:expand_when => "web_usage < 1.5\n memory > 0.85",
|
108
112
|
:contract_when => "cpu < 0.20\n memory < 0.10",
|
109
113
|
:os => "ubuntu",
|
110
114
|
:plugin_dir => "vendor",
|
111
|
-
:install_on_load =>
|
115
|
+
:install_on_load => false
|
112
116
|
}
|
113
117
|
end
|
114
118
|
# Services monitored by Heartbeat
|
115
|
-
# Always at least monitors haproxy
|
116
|
-
def managed_services
|
117
|
-
"#{services}"
|
118
|
-
end
|
119
119
|
def master_managed_services
|
120
|
-
"cloud_master_takeover
|
120
|
+
"cloud_master_takeover"
|
121
121
|
end
|
122
|
+
alias_method :managed_services, :master_managed_services
|
122
123
|
def launching_user_data
|
123
|
-
{:polling_time => polling_time
|
124
|
+
{:polling_time => polling_time,
|
125
|
+
:access_key => access_key,
|
126
|
+
:secret_access_key => secret_access_key,
|
127
|
+
:user_data => user_data}.to_yaml
|
128
|
+
end
|
129
|
+
def local_user_data
|
130
|
+
@local_user_data ||=
|
131
|
+
begin
|
132
|
+
@@timer.timeout(5.seconds) do
|
133
|
+
YAML.load(open("http://169.254.169.254/latest/user-data").read)
|
134
|
+
end
|
135
|
+
rescue Exception => e
|
136
|
+
{}
|
137
|
+
end
|
124
138
|
end
|
125
139
|
# Keypair path
|
126
140
|
# Idiom:
|
@@ -156,8 +170,8 @@ module PoolParty
|
|
156
170
|
def version
|
157
171
|
PoolParty::Version::STRING
|
158
172
|
end
|
159
|
-
def install_on_load?
|
160
|
-
options.install_on_load == true
|
173
|
+
def install_on_load?(bool=false)
|
174
|
+
options.install_on_load == true || bool
|
161
175
|
end
|
162
176
|
# Call the options from the Application
|
163
177
|
def method_missing(m,*args)
|
data/lib/poolparty/master.rb
CHANGED
@@ -6,7 +6,10 @@ module PoolParty
|
|
6
6
|
include Aska
|
7
7
|
include Callbacks
|
8
8
|
include Monitors
|
9
|
+
# ############################
|
9
10
|
include Remoter
|
11
|
+
# ############################
|
12
|
+
include FileWriter
|
10
13
|
|
11
14
|
def initialize
|
12
15
|
super
|
@@ -24,6 +27,17 @@ module PoolParty
|
|
24
27
|
message "Launching minimum_instances"
|
25
28
|
launch_minimum_instances
|
26
29
|
message "Waiting for master to boot up"
|
30
|
+
|
31
|
+
wait_for_all_instances_to_boot
|
32
|
+
|
33
|
+
setup_cloud
|
34
|
+
end
|
35
|
+
def setup_cloud
|
36
|
+
install_cloud
|
37
|
+
configure_cloud
|
38
|
+
end
|
39
|
+
alias_method :start, :start!
|
40
|
+
def wait_for_all_instances_to_boot
|
27
41
|
reset!
|
28
42
|
while !number_of_pending_instances.zero?
|
29
43
|
wait "2.seconds" unless Application.test?
|
@@ -34,20 +48,49 @@ module PoolParty
|
|
34
48
|
message "Give some time for the instance ssh to start up"
|
35
49
|
wait "15.seconds"
|
36
50
|
end
|
37
|
-
install_cloud if Application.install_on_load?
|
38
|
-
configure_cloud
|
39
51
|
end
|
40
|
-
|
52
|
+
def wait_for_all_instances_to_terminate
|
53
|
+
reset!
|
54
|
+
while !list_of_terminating_instances.size.zero?
|
55
|
+
wait "2.seconds" unless Application.test?
|
56
|
+
waited = true
|
57
|
+
reset!
|
58
|
+
end
|
59
|
+
unless Application.test? || waited.nil?
|
60
|
+
message "Give some time for the instance ssh to start up"
|
61
|
+
wait "15.seconds"
|
62
|
+
end
|
63
|
+
reset!
|
64
|
+
end
|
65
|
+
# Configure the master because the master will take care of the rest after that
|
41
66
|
def configure_cloud
|
42
67
|
message "Configuring master"
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
def install_cloud
|
68
|
+
build_and_send_config_files_in_temp_directory
|
69
|
+
remote_configure_instances
|
70
|
+
|
47
71
|
Master.with_nodes do |node|
|
48
|
-
node.
|
72
|
+
node.configure
|
49
73
|
end
|
50
|
-
|
74
|
+
end
|
75
|
+
def install_cloud(bool=false)
|
76
|
+
if Application.install_on_load? || bool
|
77
|
+
# Just in case, add the new ubuntu apt-sources as well as updating and fixing the
|
78
|
+
# update packages.
|
79
|
+
update_apt_string =<<-EOE
|
80
|
+
touch /etc/apt/sources.list
|
81
|
+
echo 'deb http://mirrors.cs.wmich.edu/ubuntu hardy main universe' >> /etc/apt/sources.list
|
82
|
+
sudo apt-get update --fix-missing
|
83
|
+
EOE
|
84
|
+
|
85
|
+
execute_tasks do
|
86
|
+
ssh(update_apt_string)
|
87
|
+
end
|
88
|
+
Provider.install_poolparty(cloud_ips)
|
89
|
+
Provider.install_userpackages(cloud_ips)
|
90
|
+
end
|
91
|
+
end
|
92
|
+
def cloud_ips
|
93
|
+
@ips ||= nodes.collect {|a| a.ip }
|
51
94
|
end
|
52
95
|
# Launch the minimum number of instances.
|
53
96
|
def launch_minimum_instances
|
@@ -74,7 +117,6 @@ module PoolParty
|
|
74
117
|
end
|
75
118
|
alias_method :start_monitor, :start_monitor!
|
76
119
|
def user_tasks
|
77
|
-
puts "in user_tasks"
|
78
120
|
end
|
79
121
|
# Sole purpose to check the stats, mainly in a plugin
|
80
122
|
def check_stats
|
@@ -90,30 +132,85 @@ module PoolParty
|
|
90
132
|
# This is a basic check against the local store of the instances that have the
|
91
133
|
# stack installed.
|
92
134
|
def reconfigure_cloud_when_necessary
|
93
|
-
|
135
|
+
configure_cloud if number_of_unconfigured_nodes > 0
|
94
136
|
end
|
95
|
-
alias_method :reconfiguration, :reconfigure_cloud_when_necessary
|
96
137
|
def number_of_unconfigured_nodes
|
97
138
|
nodes.reject {|a| a.stack_installed? }.size
|
98
139
|
end
|
99
|
-
def
|
100
|
-
|
101
|
-
|
140
|
+
def grow_by(num=1)
|
141
|
+
request_launch_new_instances(num)
|
142
|
+
|
143
|
+
wait_for_all_instances_to_boot
|
144
|
+
|
145
|
+
reset!
|
146
|
+
configure_cloud
|
147
|
+
end
|
148
|
+
def shrink_by(num=1)
|
149
|
+
num.times do |i|
|
150
|
+
node = nodes.reject {|a| a.master? }[-1]
|
151
|
+
request_termination_of_instance(node.instance_id) if node
|
152
|
+
end
|
153
|
+
wait_for_all_instances_to_terminate
|
154
|
+
configure_cloud
|
102
155
|
end
|
103
|
-
|
104
|
-
|
105
|
-
|
156
|
+
|
157
|
+
def build_and_send_config_files_in_temp_directory
|
158
|
+
require 'ftools'
|
159
|
+
File.copy(get_config_file_for("cloud_master_takeover"), "#{base_tmp_dir}/cloud_master_takeover")
|
160
|
+
File.copy(get_config_file_for("heartbeat.conf"), "#{base_tmp_dir}/ha.cf")
|
161
|
+
|
162
|
+
File.copy(Application.config_file, "#{base_tmp_dir}/config.yml") if Application.config_file && File.exists?(Application.config_file)
|
163
|
+
File.copy(Application.keypair_path, "#{base_tmp_dir}/keypair") if File.exists?(Application.keypair_path)
|
164
|
+
|
165
|
+
File.copy(get_config_file_for("monit.conf"), "#{base_tmp_dir}/monitrc")
|
166
|
+
|
167
|
+
copy_config_files_in_directory_to_tmp_dir("config/resource.d")
|
168
|
+
copy_config_files_in_directory_to_tmp_dir("config/monit.d")
|
169
|
+
|
170
|
+
build_and_copy_heartbeat_authkeys_file
|
171
|
+
build_haproxy_file
|
172
|
+
Master.build_user_global_files
|
173
|
+
|
174
|
+
Master.with_nodes do |node|
|
175
|
+
build_hosts_file_for(node)
|
176
|
+
build_reconfigure_instances_script_for(node)
|
177
|
+
Master.build_user_node_files_for(node)
|
178
|
+
|
179
|
+
if Master.requires_heartbeat?
|
180
|
+
build_heartbeat_config_file_for(node)
|
181
|
+
build_heartbeat_resources_file_for(node)
|
182
|
+
end
|
183
|
+
end
|
184
|
+
end
|
185
|
+
def cleanup_tmp_directory(c)
|
186
|
+
Dir["#{base_tmp_dir}/*"].each {|f| FileUtils.rm_rf f} if File.directory?("tmp/")
|
187
|
+
end
|
188
|
+
before :build_and_send_config_files_in_temp_directory, :cleanup_tmp_directory
|
189
|
+
# Send the files to the nodes
|
190
|
+
def send_config_files_to_nodes(c)
|
191
|
+
run_array_of_tasks(rsync_tasks("#{base_tmp_dir}/*", "#{remote_base_tmp_dir}"))
|
192
|
+
end
|
193
|
+
after :build_and_send_config_files_in_temp_directory, :send_config_files_to_nodes
|
194
|
+
def remote_configure_instances
|
195
|
+
arr = []
|
196
|
+
Master.with_nodes do |node|
|
197
|
+
script_file = "#{remote_base_tmp_dir}/#{node.name}-configuration"
|
198
|
+
str=<<-EOC
|
199
|
+
chmod +x #{script_file}
|
200
|
+
/bin/sh #{script_file}
|
201
|
+
EOC
|
202
|
+
arr << "#{self.class.ssh_string} #{node.ip} '#{str.strip.runnable}'"
|
203
|
+
end
|
204
|
+
run_array_of_tasks(arr)
|
106
205
|
end
|
107
206
|
# Add an instance if the load is high
|
108
207
|
def add_instance_if_load_is_high
|
109
|
-
|
208
|
+
grow_by(1) if expand?
|
110
209
|
end
|
111
210
|
alias_method :add_instance, :add_instance_if_load_is_high
|
112
211
|
# Teardown an instance if the load is pretty low
|
113
|
-
def terminate_instance_if_load_is_low
|
114
|
-
if contract?
|
115
|
-
shrink_by_one
|
116
|
-
end
|
212
|
+
def terminate_instance_if_load_is_low
|
213
|
+
shrink_by(1) if contract?
|
117
214
|
end
|
118
215
|
alias_method :terminate_instance, :terminate_instance_if_load_is_low
|
119
216
|
# FOR MONITORING
|
@@ -129,42 +226,68 @@ module PoolParty
|
|
129
226
|
node.restart_with_monit
|
130
227
|
end
|
131
228
|
end
|
132
|
-
# Reconfigure the running instances
|
133
|
-
# Since we are using vlad, running configure on one of the instances
|
134
|
-
# should configure all of the instances. We set the hosts in this file
|
135
|
-
def reconfigure_running_instances
|
136
|
-
# nodes.each do |node|
|
137
|
-
# node.configure if node.status =~ /running/
|
138
|
-
# end
|
139
|
-
master = get_node(0)
|
140
|
-
master.configure
|
141
|
-
end
|
142
229
|
# Build the basic haproxy config file from the config file in the config directory and return a tempfile
|
143
230
|
def build_haproxy_file
|
144
|
-
|
231
|
+
write_to_file_for("haproxy") do
|
232
|
+
servers=<<-EOS
|
145
233
|
#{nodes.collect {|node| node.haproxy_entry}.join("\n")}
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
# Build the hosts file and return a tempfile
|
150
|
-
def build_hosts_file
|
151
|
-
write_to_temp_file(nodes.collect {|a| a.hosts_entry }.join("\n"))
|
234
|
+
EOS
|
235
|
+
open(Application.haproxy_config_file).read.strip ^ {:servers => servers, :host_port => Application.host_port}
|
236
|
+
end
|
152
237
|
end
|
153
238
|
# Build host file for a specific node
|
154
239
|
def build_hosts_file_for(n)
|
155
|
-
|
156
|
-
#{nodes.collect {|node| node.ip == n.ip ? node.local_hosts_entry : node.hosts_entry}.join("\n")}
|
157
|
-
|
158
|
-
servers
|
240
|
+
write_to_file_for("hosts", n) do
|
241
|
+
"#{nodes.collect {|node| node.ip == n.ip ? node.local_hosts_entry : node.hosts_entry}.join("\n")}"
|
242
|
+
end
|
159
243
|
end
|
160
244
|
# Build the basic auth file for the heartbeat
|
161
|
-
def
|
162
|
-
|
245
|
+
def build_and_copy_heartbeat_authkeys_file
|
246
|
+
write_to_file_for("authkeys") do
|
247
|
+
open(Application.heartbeat_authkeys_config_file).read
|
248
|
+
end
|
163
249
|
end
|
164
250
|
# Build heartbeat config file
|
165
251
|
def build_heartbeat_config_file_for(node)
|
166
|
-
|
167
|
-
|
252
|
+
write_to_file_for("heartbeat", node) do
|
253
|
+
servers = "#{node.node_entry}\n#{get_next_node(node).node_entry}" rescue ""
|
254
|
+
open(Application.heartbeat_config_file).read.strip ^ {:nodes => servers}
|
255
|
+
end
|
256
|
+
end
|
257
|
+
def build_heartbeat_resources_file_for(node)
|
258
|
+
write_to_file_for("haresources", node) do
|
259
|
+
"#{node.haproxy_resources_entry}\n#{get_next_node(node).haproxy_resources_entry}" rescue ""
|
260
|
+
end
|
261
|
+
end
|
262
|
+
# Build basic configuration script for the node
|
263
|
+
def build_reconfigure_instances_script_for(node)
|
264
|
+
write_to_file_for("configuration", node) do
|
265
|
+
open(Application.sh_reconfigure_instances_script).read.strip ^ node.configure_tasks
|
266
|
+
end
|
267
|
+
end
|
268
|
+
|
269
|
+
# Try the user's directory before the master directory
|
270
|
+
def get_config_file_for(name)
|
271
|
+
if File.exists?("#{user_dir}/config/#{name}")
|
272
|
+
"#{user_dir}/config/#{name}"
|
273
|
+
else
|
274
|
+
"#{root_dir}/config/#{name}"
|
275
|
+
end
|
276
|
+
end
|
277
|
+
# Copy all the files in the directory to the dest
|
278
|
+
def copy_config_files_in_directory_to_tmp_dir(dir)
|
279
|
+
dest_dir = "#{base_tmp_dir}/#{File.basename(dir)}"
|
280
|
+
FileUtils.mkdir_p dest_dir
|
281
|
+
|
282
|
+
if File.directory?("#{user_dir}/#{dir}")
|
283
|
+
Dir["#{user_dir}/#{dir}/*"].each do |file|
|
284
|
+
File.copy(file, dest_dir)
|
285
|
+
end
|
286
|
+
else
|
287
|
+
Dir["#{root_dir}/#{dir}/*"].each do |file|
|
288
|
+
File.copy(file, dest_dir)
|
289
|
+
end
|
290
|
+
end
|
168
291
|
end
|
169
292
|
# Return a list of the nodes and cache them
|
170
293
|
def nodes
|
@@ -172,6 +295,18 @@ module PoolParty
|
|
172
295
|
RemoteInstance.new(inst.merge({:number => i}))
|
173
296
|
end
|
174
297
|
end
|
298
|
+
# Return a list of the nodes for each keypair and cache them
|
299
|
+
def cloud_nodes
|
300
|
+
@cloud_nodes ||= begin
|
301
|
+
nodes_list = []
|
302
|
+
cloud_keypairs.each {|keypair|
|
303
|
+
list_of_nonterminated_instances(list_of_instances(keypair)).collect_with_index { |inst, i|
|
304
|
+
nodes_list << RemoteInstance.new(inst.merge({:number => i}))
|
305
|
+
}
|
306
|
+
}
|
307
|
+
nodes_list
|
308
|
+
end
|
309
|
+
end
|
175
310
|
# Get the node at the specific index from the cached nodes
|
176
311
|
def get_node(i=0)
|
177
312
|
nodes.select {|a| a.number == i.to_i}.first
|
@@ -179,7 +314,7 @@ module PoolParty
|
|
179
314
|
# Get the next node in sequence, so we can configure heartbeat to monitor the next node
|
180
315
|
def get_next_node(node)
|
181
316
|
i = node.number + 1
|
182
|
-
i = 0 if i >=
|
317
|
+
i = 0 if i >= nodes.size
|
183
318
|
get_node(i)
|
184
319
|
end
|
185
320
|
# On exit command
|
@@ -195,10 +330,28 @@ module PoolParty
|
|
195
330
|
end
|
196
331
|
out
|
197
332
|
end
|
333
|
+
def clouds_list
|
334
|
+
if number_of_all_pending_and_running_instances > 0
|
335
|
+
out = "-- ALL CLOUDS (#{number_of_all_pending_and_running_instances})--\n"
|
336
|
+
keypair = nil
|
337
|
+
out << cloud_nodes.collect {|node|
|
338
|
+
str = ""
|
339
|
+
if keypair != node.keypair
|
340
|
+
keypair = node.keypair;
|
341
|
+
str = "key pair: #{keypair} (#{number_of_pending_and_running_instances(keypair)})\n"
|
342
|
+
end
|
343
|
+
str += "\t"+node.description if !node.description.nil?
|
344
|
+
}.join("\n")
|
345
|
+
else
|
346
|
+
out = "Clouds are not running"
|
347
|
+
end
|
348
|
+
out
|
349
|
+
end
|
198
350
|
# Reset and clear the caches
|
199
351
|
def reset!
|
200
352
|
@cached_descriptions = nil
|
201
353
|
@nodes = nil
|
354
|
+
@cloud_nodes = nil
|
202
355
|
end
|
203
356
|
|
204
357
|
class << self
|
@@ -231,50 +384,11 @@ module PoolParty
|
|
231
384
|
end
|
232
385
|
# Build a heartbeat resources file from the config directory and return a tempfile
|
233
386
|
def build_heartbeat_resources_file_for(node)
|
234
|
-
return nil unless node
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
new.build_hosts_file_for(node)
|
240
|
-
end
|
241
|
-
# Build the scp script for the specific node
|
242
|
-
def build_scp_instances_script_for(node)
|
243
|
-
authkeys_file = write_to_temp_file(open(Application.heartbeat_authkeys_config_file).read.strip)
|
244
|
-
if Master.requires_heartbeat?
|
245
|
-
ha_d_file = Master.build_heartbeat_config_file_for(node)
|
246
|
-
haresources_file = Master.build_heartbeat_resources_file_for(node)
|
247
|
-
end
|
248
|
-
haproxy_file = Master.build_haproxy_file
|
249
|
-
hosts_file = Master.build_hosts_file_for(node)
|
250
|
-
|
251
|
-
str = open(Application.sh_scp_instances_script).read.strip ^ {
|
252
|
-
:cloud_master_takeover => "#{node.scp_string("#{root_dir}/config/cloud_master_takeover", "/etc/ha.d/resource.d/", :dir => "/etc/ha.d/resource.d")}",
|
253
|
-
:config_file => "#{node.scp_string(Application.config_file, "~/.config")}",
|
254
|
-
:authkeys => "#{node.scp_string(authkeys_file.path, "/etc/ha.d/authkeys", :dir => "/etc/ha.d/")}",
|
255
|
-
:resources => "#{node.scp_string("#{root_dir}/config/resource.d/*", "/etc/ha.d/resource.d/", {:switches => "-r"})}",
|
256
|
-
:monitrc => "#{node.scp_string(Application.monit_config_file, "/etc/monit/monitrc", :dir => "/etc/monit")}",
|
257
|
-
:monit_d => "#{node.scp_string("#{File.dirname(Application.monit_config_file)}/monit/*", "/etc/monit.d/", {:switches => "-r", :dir => "/etc/monit.d/"})}",
|
258
|
-
:haproxy => "#{node.scp_string(haproxy_file, "/etc/haproxy.cfg")}",
|
259
|
-
|
260
|
-
:ha_d => Master.requires_heartbeat? ? "#{node.scp_string(ha_d_file, "/etc/ha.d/ha.cf")}" : "",
|
261
|
-
:haresources => Master.requires_heartbeat? ? "#{node.scp_string(haresources_file, "/etc/ha.d/ha.cf")}" : "",
|
262
|
-
|
263
|
-
:hosts => "#{node.scp_string(hosts_file, "/etc/hosts")}"
|
264
|
-
}
|
265
|
-
write_to_temp_file(str)
|
266
|
-
end
|
267
|
-
# Build basic configuration script for the node
|
268
|
-
def build_reconfigure_instances_script_for(node)
|
269
|
-
str = open(Application.sh_reconfigure_instances_script).read.strip ^ {
|
270
|
-
:config_master => "#{node.update_plugin_string}",
|
271
|
-
:start_pool_maintain => "pool maintain -c ~/.config -l ~/plugins",
|
272
|
-
:set_hostname => "hostname -v #{node.name}",
|
273
|
-
:start_s3fs => "/usr/bin/s3fs #{Application.shared_bucket} -o accessKeyId=#{Application.access_key} -o secretAccessKey=#{Application.secret_access_key} -o nonempty /data"
|
274
|
-
}
|
275
|
-
write_to_temp_file(str)
|
276
|
-
end
|
277
|
-
|
387
|
+
return nil unless node && get_next_node(node)
|
388
|
+
new.write_to_file_for("haresources", node) do
|
389
|
+
"#{node.haproxy_resources_entry}\n#{get_next_node(node).haproxy_resources_entry}"
|
390
|
+
end
|
391
|
+
end
|
278
392
|
def set_hosts(c, remotetask=nil)
|
279
393
|
unless remotetask.nil?
|
280
394
|
rt = remotetask
|
@@ -309,20 +423,28 @@ module PoolParty
|
|
309
423
|
EOS
|
310
424
|
open(Application.haproxy_config_file).read.strip ^ {:servers => servers, :host_port => Application.host_port}
|
311
425
|
end
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
end
|
319
|
-
def
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
426
|
+
|
427
|
+
# Placeholders
|
428
|
+
def build_user_global_files
|
429
|
+
global_user_files.each do |arr|
|
430
|
+
write_to_file_for(arr[0]) &arr[1]
|
431
|
+
end
|
432
|
+
end
|
433
|
+
def build_user_node_files_for(node)
|
434
|
+
user_node_files.each do |arr|
|
435
|
+
write_to_file_for(arr[0], node) do
|
436
|
+
arr[1].call(node)
|
437
|
+
end
|
324
438
|
end
|
325
439
|
end
|
440
|
+
def define_global_user_file(name, &block)
|
441
|
+
global_user_files << [name, block]
|
442
|
+
end
|
443
|
+
def global_user_files;@global_user_files ||= [];end
|
444
|
+
def define_node_user_file(name, &block)
|
445
|
+
user_node_files << [name, block]
|
446
|
+
end
|
447
|
+
def user_node_files;@user_node_files ||= [];end
|
326
448
|
end
|
327
449
|
|
328
450
|
end
|