poolparty 0.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. data/CHANGELOG +4 -0
  2. data/Manifest +55 -0
  3. data/README.txt +113 -0
  4. data/Rakefile +18 -0
  5. data/bin/instance +54 -0
  6. data/bin/pool +33 -0
  7. data/config/config.yml +23 -0
  8. data/config/create_proxy_ami.sh +582 -0
  9. data/config/haproxy.conf +29 -0
  10. data/config/heartbeat.conf +9 -0
  11. data/config/heartbeat_authkeys.conf +2 -0
  12. data/config/monit/haproxy.monit.conf +7 -0
  13. data/config/monit/nginx.monit.conf +0 -0
  14. data/config/monit.conf +8 -0
  15. data/config/nginx.conf +24 -0
  16. data/lib/core/array.rb +10 -0
  17. data/lib/core/exception.rb +9 -0
  18. data/lib/core/kernel.rb +9 -0
  19. data/lib/core/module.rb +22 -0
  20. data/lib/core/object.rb +14 -0
  21. data/lib/core/string.rb +49 -0
  22. data/lib/core/time.rb +41 -0
  23. data/lib/modules/callback.rb +55 -0
  24. data/lib/modules/ec2_wrapper.rb +74 -0
  25. data/lib/modules/safe_instance.rb +31 -0
  26. data/lib/pool_party/application.rb +133 -0
  27. data/lib/pool_party/init.rb +4 -0
  28. data/lib/pool_party/master.rb +189 -0
  29. data/lib/pool_party/monitors/cpu.rb +18 -0
  30. data/lib/pool_party/monitors/memory.rb +21 -0
  31. data/lib/pool_party/monitors/web.rb +18 -0
  32. data/lib/pool_party/monitors.rb +13 -0
  33. data/lib/pool_party/optioner.rb +16 -0
  34. data/lib/pool_party/os/ubuntu.rb +78 -0
  35. data/lib/pool_party/os.rb +11 -0
  36. data/lib/pool_party/remote_instance.rb +180 -0
  37. data/lib/pool_party/remoting.rb +112 -0
  38. data/lib/pool_party/scheduler.rb +93 -0
  39. data/lib/pool_party/tasks.rb +220 -0
  40. data/lib/pool_party.rb +69 -0
  41. data/lib/s3/s3_object_store_folders.rb +44 -0
  42. data/poolparty.gemspec +55 -0
  43. data/spec/application_spec.rb +32 -0
  44. data/spec/callback_spec.rb +65 -0
  45. data/spec/helpers/ec2_mock.rb +56 -0
  46. data/spec/helpers/remote_instance_mock.rb +11 -0
  47. data/spec/kernel_spec.rb +11 -0
  48. data/spec/master_spec.rb +147 -0
  49. data/spec/monitor_spec.rb +16 -0
  50. data/spec/optioner_spec.rb +22 -0
  51. data/spec/poolparty_spec.rb +8 -0
  52. data/spec/remote_instance_spec.rb +29 -0
  53. data/spec/remoting_spec.rb +75 -0
  54. data/spec/spec_helper.rb +38 -0
  55. data/spec/string_spec.rb +28 -0
  56. data/test/test_pool_party.rb +0 -0
  57. metadata +171 -0
@@ -0,0 +1,9 @@
1
+ =begin rdoc
2
+ Kernel overloads
3
+ =end
4
+ module Kernel
5
+ # Nice wait instead of sleep
6
+ def wait(time=10)
7
+ sleep time.is_a?(String) ? eval(time) : time
8
+ end
9
+ end
@@ -0,0 +1,22 @@
1
+ # Module overloads
2
+ class Module
3
+ # Gives us alias_method_chain from rails
4
+ def alias_method_chain(target, feature)
5
+ aliased_target, punctuation = target.to_s.sub(/([?!=])$/, ''), $1
6
+ yield(aliased_target, punctuation) if block_given?
7
+
8
+ with_method, without_method = "#{aliased_target}_with_#{feature}#{punctuation}", "#{aliased_target}_without_#{feature}#{punctuation}"
9
+
10
+ alias_method without_method, target
11
+ alias_method target, with_method
12
+
13
+ case
14
+ when public_method_defined?(without_method)
15
+ public target
16
+ when protected_method_defined?(without_method)
17
+ protected target
18
+ when private_method_defined?(without_method)
19
+ private target
20
+ end
21
+ end
22
+ end
@@ -0,0 +1,14 @@
1
+ =begin rdoc
2
+ Basic, add an alias_method to the object class
3
+ Add returning to the object
4
+ =end
5
+ class Object
6
+ def alias_method(new_id, original_id)
7
+ original = self.method(original_id).to_proc
8
+ define_method(new_id){|*args| original.call(*args)}
9
+ end
10
+ def returning(receiver)
11
+ yield receiver
12
+ receiver
13
+ end
14
+ end
@@ -0,0 +1,49 @@
1
+ class String
2
+ def hasherize(format=[])
3
+ hash = {}
4
+ i = 0
5
+ self.split(%r{[\n|\t|\s| ]+}).collect {|a| a.strip}.each do |f|
6
+ break unless format[i]
7
+ unless f == "" || f.nil?
8
+ hash[format[i]] = f
9
+ i+=1
10
+ end
11
+ end
12
+ hash
13
+ end
14
+ def ^(h={})
15
+ self.gsub(/:([\w]+)/) {h[$1.to_sym] if h.include?($1.to_sym)}
16
+ end
17
+ def runnable
18
+ self.strip.gsub(/\n/, " && ")
19
+ end
20
+ def classify
21
+ self.capitalize
22
+ end
23
+ def bucket_objects
24
+ AWS::S3::Bucket.objects(self)
25
+ end
26
+ def bucket_object(key)
27
+ AWS::S3::S3Object.value key, self if bucket_object_exists?(key)
28
+ end
29
+ def bucket_object_exists?(key)
30
+ AWS::S3::S3Object.exists? key, self
31
+ end
32
+ def store_bucket_value(key, data)
33
+ AWS::S3::S3Object.store key, data, self unless bucket_object_exists?(key)
34
+ end
35
+ def delete_bucket_value(key)
36
+ AWS::S3::S3Object.delete(key, self) if bucket_object_exists?(key)
37
+ end
38
+ def bucket_exists?
39
+ begin
40
+ AWS::S3::Bucket.find(self)
41
+ return true
42
+ rescue
43
+ return false
44
+ end
45
+ end
46
+ def delete_bucket
47
+ AWS::S3::Bucket.delete(self, :force => true) if bucket_exists?
48
+ end
49
+ end
data/lib/core/time.rb ADDED
@@ -0,0 +1,41 @@
1
+ =begin rdoc
2
+ Based off the rails Numeric class.
3
+ Gives us the ability to use nice phrases such as
4
+ 30.seconds, 5.days, etc.
5
+ =end
6
+ class Numeric
7
+ def ago(time = Time.now)
8
+ time - self
9
+ end
10
+ alias :until :ago
11
+
12
+ def since(time = Time.now)
13
+ time + self
14
+ end
15
+ alias :from_now :since
16
+
17
+ def seconds
18
+ self
19
+ end
20
+ alias :second :seconds
21
+
22
+ def minutes
23
+ self * 60
24
+ end
25
+ alias :minute :minutes
26
+
27
+ def hours
28
+ self * 60.minutes
29
+ end
30
+ alias :hour :hours
31
+
32
+ def days
33
+ self * 24.hours
34
+ end
35
+ alias :day :days
36
+
37
+ def weeks
38
+ self * 7.days
39
+ end
40
+ alias :week :weeks
41
+ end
@@ -0,0 +1,55 @@
1
+ module PoolParty
2
+ module Callbacks
3
+ module ClassMethods
4
+ def callback(type,m,e,*args, &block)
5
+
6
+ # Save the old method
7
+ if method_defined?("#{type}_#{m}".to_sym)
8
+ puts "method already defined"
9
+ end
10
+
11
+ case type
12
+ when :before
13
+ str=<<-EOD
14
+ def #{type}_#{m}
15
+ #{e}
16
+ yield if block_given?
17
+ super
18
+ end
19
+ EOD
20
+ when :after
21
+ str=<<-EOD
22
+ def #{type}_#{m}
23
+ super
24
+ yield if block_given?
25
+ #{e}
26
+ end
27
+ EOD
28
+ end
29
+
30
+ mMod = Module.new {eval str}
31
+
32
+ module_eval %{alias_method :#{type}_#{m}, :#{m}}
33
+
34
+ self.send :define_method, "#{m}".to_sym, Proc.new {
35
+ extend(mMod)
36
+ method("#{type}_#{m}".to_sym).call
37
+ }
38
+ end
39
+ def before(m,e,*args, &block)
40
+ callback(:before,m,e,*args, &block)
41
+ end
42
+ def after(m,e,*args, &block)
43
+ callback(:after,m,e,*args,&block)
44
+ end
45
+ end
46
+
47
+ module InstanceMethods
48
+ end
49
+
50
+ def self.included(receiver)
51
+ receiver.extend ClassMethods
52
+ receiver.send :include, InstanceMethods
53
+ end
54
+ end
55
+ end
@@ -0,0 +1,74 @@
1
+ module PoolParty
2
+ extend self
3
+
4
+ module Ec2Wrapper
5
+
6
+ module ClassMethods
7
+ end
8
+
9
+ module InstanceMethods
10
+ # Run a new instance, with the user_data and the ami described in the config
11
+ def launch_new_instance!
12
+ instance = ec2.run_instances(
13
+ :image_id => Application.ami,
14
+ :user_data => "#{Application.launching_user_data}",
15
+ :minCount => 1,
16
+ :maxCount => 1,
17
+ :key_name => Application.keypair,
18
+ :size => "#{Application.size}")
19
+
20
+ item = instance.RunInstancesResponse.instancesSet.item
21
+ EC2ResponseObject.get_hash_from_response(item)
22
+ end
23
+ # Shutdown the instance by instance_id
24
+ def terminate_instance!(instance_id)
25
+ ec2.terminate_instances(:instance_id => instance_id)
26
+ end
27
+ # Instance description
28
+ def describe_instance(id)
29
+ instance = ec2.describe_instances(:instance_id => id)
30
+ item = instance.DescribeInstancesResponse.reservationSet.item.instancesSet.item
31
+ EC2ResponseObject.get_hash_from_response(item)
32
+ end
33
+ # Get instance by id
34
+ def get_instance_by_id(id)
35
+ get_instances_description.select {|a| a.instance_id == id}[0] rescue nil
36
+ end
37
+ # Get the s3 description for the response in a hash format
38
+ def get_instances_description
39
+ @cached_descriptions ||= EC2ResponseObject.get_descriptions(ec2.describe_instances)
40
+ end
41
+
42
+ # EC2 connections
43
+ def ec2
44
+ @ec2 ||= EC2::Base.new(:access_key_id => Application.access_key, :secret_access_key => Application.secret_access_key)
45
+ end
46
+ end
47
+
48
+ def self.included(receiver)
49
+ receiver.extend ClassMethods
50
+ receiver.send :include, InstanceMethods
51
+ end
52
+ end
53
+ # Provides a simple class to wrap around the amazon responses
54
+ class EC2ResponseObject
55
+ def self.get_descriptions(resp)
56
+ rs = resp.DescribeInstancesResponse.reservationSet.item
57
+ rs = rs.respond_to?(:instancesSet) ? rs.instancesSet : rs
58
+ out = begin
59
+ rs.reject {|a| a.empty? }.collect {|r| EC2ResponseObject.get_hash_from_response(r.instancesSet.item)}.reject {|a| a.nil? }
60
+ rescue Exception => e
61
+ begin
62
+ # Really weird bug with amazon's ec2 gem
63
+ rs.reject {|a| a.empty? }.collect {|r| EC2ResponseObject.get_hash_from_response(r)}.reject {|a| a.nil? }
64
+ rescue Exception => e
65
+ []
66
+ end
67
+ end
68
+ out
69
+ end
70
+ def self.get_hash_from_response(resp)
71
+ {:instance_id => resp.instanceId, :ip => resp.dnsName, :status => resp.instanceState.name, :launching_time => resp.launchTime} rescue nil
72
+ end
73
+ end
74
+ end
@@ -0,0 +1,31 @@
1
+ =begin rdoc
2
+ Make a command thread-safe
3
+ =end
4
+ require "monitor"
5
+ module PoolParty
6
+ extend self
7
+
8
+ module ThreadSafeInstance
9
+
10
+ module ClassMethods
11
+ def make_safe(meth)
12
+ original_method = "_unsafe_#{meth}_"
13
+ alias_method original_method, meth
14
+ define_method(meth) {|*args| self.class.synchronize { self.send(original_method) } }
15
+ self
16
+ end
17
+ end
18
+
19
+ module InstanceMethods
20
+ def make_safe meth
21
+ self.class.make_safe meth
22
+ end
23
+ end
24
+
25
+ def self.included(receiver)
26
+ receiver.extend MonitorMixin
27
+ receiver.extend ClassMethods
28
+ receiver.send :include, InstanceMethods
29
+ end
30
+ end
31
+ end
@@ -0,0 +1,133 @@
1
+ =begin rdoc
2
+ Application
3
+ This handles user interaction
4
+ =end
5
+ $:.unshift File.dirname(__FILE__)
6
+
7
+ module PoolParty
8
+ extend self
9
+
10
+ class Application
11
+ class << self
12
+
13
+ # The application options
14
+ def options(opts={})
15
+ @options ||= make_options(opts)
16
+ end
17
+ # Make the options with the config_file overrides included
18
+ # Default config file assumed to be at config/config.yml
19
+ def make_options(opts={})
20
+ load_options!
21
+ default_options.merge!(opts)
22
+ # If the config_file options are specified and not empty
23
+ unless default_options[:config_file].nil? || default_options[:config_file].empty?
24
+ require "yaml"
25
+ # Try loading the file if it exists
26
+ filedata = open(default_options[:config_file]).read if File.file?(default_options[:config_file])
27
+ default_options.merge!( YAML.load(filedata) ) if filedata
28
+ end
29
+
30
+ OpenStruct.new(default_options)
31
+ end
32
+
33
+ # Load options via commandline
34
+ def load_options!
35
+ require 'optparse'
36
+ OptionParser.new do |op|
37
+ op.on('-A key', '--access-key key', "Ec2 access key (ENV['ACCESS_KEY'])") { |key| default_options[:access_key] = key }
38
+ op.on('-S key', '--secret-access-key key', "Ec2 secret access key (ENV['SECRET_ACCESS_KEY'])") { |key| default_options[:secret_access_key] = key }
39
+ op.on('-I ami', '--image-id id', "AMI instance (default: 'ami-4a46a323')") {|id| default_options[:ami] = id }
40
+ op.on('-k keypair', '--keypair name', "Keypair name (ENV['KEYPAIR_NAME'])") { |key| default_options[:keypair] = key }
41
+ op.on('-b bucket', '--bucket bucket', "Application bucket") { |bucket| default_options[:shared_bucket] = bucket }
42
+ op.on('-D ec2 directory', '--ec2-dir dir', "Directory with ec2 data (default: '~/.ec2')") {|id| default_options[:ec2_dir] = id }
43
+ op.on('-S services', '--services names', "Monitored services (default: '')") {|id| default_options[:services] = id }
44
+ op.on('-c file', '--config-file file', "Config file (default: '')") {|file| default_options[:config_file] = file }
45
+ op.on('-p port', '--host_port port', "Run on specific host_port (default: 7788)") { |host_port| default_options[:host_port] = host_port }
46
+ op.on('-m monitors', '--monitors names', "Monitor instances using (default: 'web,memory,cpu')") {|s| default_options[:monitor_load_on] = s }
47
+ op.on('-o port', '--client_port port', "Run on specific client_port (default: 7788)") { |client_port| default_options[:client_port] = client_port }
48
+ op.on('-O os', '--os os', "Configure for os (default: ubuntu)") { |os| default_options[:os] = os }
49
+ op.on('-e env', '--environment env', "Run on the specific environment (default: development)") { |env| default_options[:env] = env }
50
+ op.on('-s size', '--size size', "Run specific sized instance") {|s| default_options[:size] = s}
51
+ op.on('-u username', '--username name', "Login with the user (default: root)") {|s| default_options[:user] = s}
52
+ op.on('-d user-data','--user-data data', "Extra data to send each of the instances (default: "")") { |data| default_options[:user_data] = data }
53
+ op.on('-t seconds', '--polling-time', "Time between polling in seconds (default 50)") {|t| default_options[:polling_time] = t }
54
+ op.on('-v', '--[no-]verbose', 'Run verbosely (default: false)') {|v| default_options[:verbose] = v}
55
+ op.on('-i number', '--minimum-instances', "The minimum number of instances to run at all times (default 1)") {|i| default_options[:minimum_instances] = i}
56
+ op.on('-x number', '--maximum-instances', "The maximum number of instances to run (default 3)") {|x| default_options[:maximum_instances] = x}
57
+
58
+ op.on_tail("-h", "--help", "Show this message") do |o|
59
+ puts "op: #{o}"
60
+ exit
61
+ end
62
+ end.parse!(ARGV.dup)
63
+ end
64
+
65
+ # Basic default options
66
+ # All can be overridden by the command line
67
+ # or in a config.yml file
68
+ def default_options
69
+ @default_options ||= {
70
+ :host_port => 80,
71
+ :client_port => 8001,
72
+ :environment => 'development',
73
+ :verbose => true,
74
+ :logging => true,
75
+ :size => "small",
76
+ :polling_time => "30.seconds",
77
+ :user_data => "",
78
+ :heavy_load => 0.80,
79
+ :light_load => 0.15,
80
+ :minimum_instances => 1,
81
+ :maximum_instances => 3,
82
+ :access_key => ENV["ACCESS_KEY"],
83
+ :secret_access_key => ENV["SECRET_ACCESS_KEY"],
84
+ :config_file => ((ENV["CONFIG_FILE"] && ENV["CONFIG_FILE"].empty?) ? "config/config.yml" : ENV["CONFIG_FILE"]),
85
+ :username => "root",
86
+ :ec2_dir => ENV["EC2_HOME"],
87
+ :keypair => ENV["KEYPAIR_NAME"],
88
+ :ami => 'ami-4a46a323',
89
+ :shared_bucket => "",
90
+ :services => "nginx sinatra",
91
+ :expand_when => "web_usage < 1.5\n memory_usage > 0.85",
92
+ :contract_when => "cpu_usage < 0.20\n memory_usage < 0.10",
93
+ :os => "ubuntu"
94
+ }
95
+ end
96
+ # Services monitored by Heartbeat
97
+ # Always at least monitors haproxy
98
+ def managed_services
99
+ "haproxy #{services}"
100
+ end
101
+ def launching_user_data
102
+ {:polling_time => polling_time}.to_yaml
103
+ end
104
+ # Keypair path
105
+ # Idiom:
106
+ # /Users/username/.ec2/id_rsa-name
107
+ def keypair_path
108
+ "#{ec2_dir}/id_rsa-#{keypair}"
109
+ end
110
+ # Are we in development or test mode
111
+ def development?
112
+ environment == 'development'
113
+ end
114
+ # Are we in production mode?
115
+ def production?
116
+ environment == "production"
117
+ end
118
+ # Standard configuration files
119
+ %w(haproxy monit heartbeat heartbeat_authkeys).each do |file|
120
+ define_method "#{file}_config_file" do
121
+ File.join(File.dirname(__FILE__), "../..", "config", "#{file}.conf")
122
+ end
123
+ end
124
+
125
+ # Call the options from the Application
126
+ def method_missing(m,*args)
127
+ options.methods.include?("#{m}") ? options.send(m,args) : super
128
+ end
129
+ end
130
+
131
+ end
132
+
133
+ end
@@ -0,0 +1,4 @@
1
+ =begin rdoc
2
+ Load the files in order
3
+ =end
4
+ %w(optioner application monitors scheduler remoting os remote_instance master tasks).each {|f| require File.join(File.dirname(__FILE__), f)}
@@ -0,0 +1,189 @@
1
+ =begin rdoc
2
+ The basic master for PoolParty
3
+ =end
4
+ require "aska"
5
+ module PoolParty
6
+ class Master < Remoting
7
+ include Aska
8
+
9
+ def initialize
10
+ super
11
+
12
+ self.class.send :rules, :contract_when, Application.options.contract_when
13
+ self.class.send :rules, :expand_when, Application.options.expand_when
14
+ end
15
+ # Start the cloud
16
+ def start_cloud!
17
+ start!
18
+ end
19
+ # Start the cloud, which launches the minimum_instances
20
+ def start!
21
+ message "Launching minimum_instances"
22
+ launch_minimum_instances
23
+ message "Waiting for master to boot up"
24
+ reset!
25
+ while !number_of_pending_instances.zero?
26
+ wait "2.seconds"
27
+ reset!
28
+ end
29
+ message "Give some time for the instance ssh to start up"
30
+ wait "10.seconds"
31
+ message "Configuring master"
32
+ get_node(0).configure
33
+ end
34
+ # Launch the minimum number of instances.
35
+ def launch_minimum_instances
36
+ request_launch_new_instances(Application.minimum_instances - number_of_pending_and_running_instances)
37
+ nodes
38
+ end
39
+ # Start monitoring the cloud with the threaded loop
40
+ def start_monitor!
41
+ begin
42
+ trap("INT") do
43
+ on_exit
44
+ exit
45
+ end
46
+ run_thread_loop(:daemonize => true) do
47
+ add_task {launch_minimum_instances} # If the base instances go down...
48
+ add_task {reconfigure_cloud_when_necessary}
49
+ add_task {add_instance_if_load_is_high}
50
+ add_task {terminate_instance_if_load_is_low}
51
+ end
52
+ rescue Exception => e
53
+ puts "There was an error: #{e.nice_message}"
54
+ end
55
+ end
56
+ # Tough method:
57
+ # We need to make sure that all the instances have the required software installed
58
+ # This is a basic check against the local store of the instances that have the
59
+ # stack installed.
60
+ def reconfigure_cloud_when_necessary
61
+ reconfigure_running_instances if number_of_unconfigured_nodes > 0
62
+ end
63
+ def number_of_unconfigured_nodes
64
+ nodes.reject {|a| a.stack_installed? }.size
65
+ end
66
+ # Add an instance if the load is high
67
+ def add_instance_if_load_is_high
68
+ request_launch_new_instance if expand?
69
+ end
70
+ # Teardown an instance if the load is pretty low
71
+ def terminate_instance_if_load_is_low
72
+ if contract?
73
+ node = nodes.reject {|a| a.master? }[-1]
74
+ request_termination_of_instance(node.instance_id) if node
75
+ end
76
+ end
77
+ # FOR MONITORING
78
+ def contract?
79
+ valid_rules?(:contract_when)
80
+ end
81
+ def expand?
82
+ valid_rules?(:expand_when)
83
+ end
84
+ # Get the average web requests per cloud
85
+ def web_requests
86
+ nodes.collect {|a| a.web } / nodes.size
87
+ end
88
+ # Get the average cpu usage per cloud
89
+ def cpu_usage
90
+ nodes.collect {|a| a.cpu } / nodes.size
91
+ end
92
+ # Get the average memory usage over the cloud
93
+ def memory_usage
94
+ nodes.collect {|a| a.memory } / nodes.size
95
+ end
96
+ # Restart the running instances services with monit on all the nodes
97
+ def restart_running_instances_services
98
+ nodes.each do |node|
99
+ node.restart_with_monit
100
+ end
101
+ end
102
+ # Reconfigure the running instances
103
+ def reconfigure_running_instances
104
+ nodes.each do |node|
105
+ node.configure if node.status =~ /running/
106
+ end
107
+ end
108
+ # Build the basic haproxy config file from the config file in the config directory and return a tempfile
109
+ def build_haproxy_file
110
+ servers=<<-EOS
111
+ #{nodes.collect {|node| node.haproxy_entry}.join("\n")}
112
+ EOS
113
+ write_to_temp_file(open(Application.haproxy_config_file).read.strip ^ {:servers => servers, :host_port => Application.host_port})
114
+ end
115
+ # Build the hosts file and return a tempfile
116
+ def build_hosts_file
117
+ write_to_temp_file(nodes.collect {|a| a.hosts_entry }.join("\n"))
118
+ end
119
+ # Build host file for a specific node
120
+ def build_hosts_file_for(n)
121
+ servers=<<-EOS
122
+ #{nodes.collect {|node| node.ip == n.ip ? node.local_hosts_entry : node.hosts_entry}.join("\n")}
123
+ EOS
124
+ write_to_temp_file(servers)
125
+ end
126
+ # Build the basic auth file for the heartbeat
127
+ def build_heartbeat_authkeys_file
128
+ write_to_temp_file(open(Application.heartbeat_authkeys_config_file).read)
129
+ end
130
+ # Return a list of the nodes and cache them
131
+ def nodes
132
+ @nodes ||= list_of_nonterminated_instances.collect_with_index do |inst, i|
133
+ RemoteInstance.new(inst.merge({:number => i}))
134
+ end
135
+ end
136
+ # Get the node at the specific index from the cached nodes
137
+ def get_node(i=0)
138
+ nodes.select {|a| a.number == i}.first
139
+ end
140
+ # Get the next node in sequence, so we can configure heartbeat to monitor the next node
141
+ def get_next_node(node)
142
+ i = node.number + 1
143
+ i = 0 if i >= (nodes.size - 1)
144
+ get_node(i)
145
+ end
146
+ # On exit command
147
+ def on_exit
148
+ end
149
+ # List the clouds
150
+ def list
151
+ if number_of_pending_and_running_instances > 0
152
+ out = "-- CLOUD (#{number_of_pending_and_running_instances})--\n"
153
+ out << nodes.collect {|node| node.description }.join("\n")
154
+ else
155
+ out = "Cloud is not running"
156
+ end
157
+ out
158
+ end
159
+ # Reset and clear the caches
160
+ def reset!
161
+ @cached_descriptions = nil
162
+ @nodes = nil
163
+ end
164
+
165
+ class << self
166
+ def requires_heartbeat?
167
+ new.nodes.size > 1
168
+ end
169
+ def get_next_node(node)
170
+ new.get_next_node(node)
171
+ end
172
+ # Build a heartbeat_config_file from the config file in the config directory and return a tempfile
173
+ def build_heartbeat_config_file_for(node)
174
+ return nil unless node
175
+ servers = "#{node.node_entry}\n#{get_next_node(node).node_entry}"
176
+ write_to_temp_file(open(Application.heartbeat_config_file).read.strip ^ {:nodes => servers})
177
+ end
178
+ # Build a heartbeat resources file from the config directory and return a tempfile
179
+ def build_heartbeat_resources_file_for(node)
180
+ return nil unless node
181
+ write_to_temp_file("#{node.haproxy_resources_entry}\n#{get_next_node(node).haproxy_resources_entry}")
182
+ end
183
+ def build_hosts_file_for(node)
184
+ new.build_hosts_file_for(node)
185
+ end
186
+ end
187
+
188
+ end
189
+ end
@@ -0,0 +1,18 @@
1
+ =begin rdoc
2
+ Basic monitor on the cpu stats
3
+ =end
4
+ module PoolParty
5
+ module Monitors
6
+ module Cpu
7
+ def self.monitor!
8
+ IO.popen("uptime") do |up|
9
+ ret = monitor_from_string(up)
10
+ end
11
+ ret
12
+ end
13
+ def self.monitor_from_string(str="")
14
+ str.split(/\s+/)[-3].to_f
15
+ end
16
+ end
17
+ end
18
+ end
@@ -0,0 +1,21 @@
1
+ =begin rdoc
2
+ Basic memory monitor on the instance
3
+ =end
4
+ module PoolParty
5
+ module Monitors
6
+ module Memory
7
+ def self.monitor!
8
+ IO.popen("free -m | grep -i mem") { |io|
9
+ ret = monitor_from_string(io)
10
+ }
11
+ ret
12
+ end
13
+ def self.monitor_from_string(str="")
14
+ total_memory = str.split[1].to_f
15
+ used_memory = str.split[2].to_f
16
+
17
+ used_memory / total_memory
18
+ end
19
+ end
20
+ end
21
+ end
@@ -0,0 +1,18 @@
1
+ =begin rdoc
2
+ Monitor the web stats for the request rate the server can handle at a time
3
+ =end
4
+ module PoolParty
5
+ module Monitors
6
+ module Web
7
+ def self.monitor!(port)
8
+ IO.popen("httperf --server localhost --port #{port} --num-conn 3 --timeout 5 | grep 'Request rate'") do |io|
9
+ @req = monitor_from_string(io.gets)
10
+ end
11
+ @req
12
+ end
13
+ def self.monitor_from_string(str="")
14
+ str[/[.]* ([\d]*\.[\d]*) [.]*/, 0].chomp.to_f
15
+ end
16
+ end
17
+ end
18
+ end