dreadpiratepj-poolparty 0.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGELOG +12 -0
- data/Manifest +115 -0
- data/README.txt +140 -0
- data/Rakefile +27 -0
- data/bin/instance +61 -0
- data/bin/pool +62 -0
- data/config/cloud_master_takeover +17 -0
- data/config/create_proxy_ami.sh +582 -0
- data/config/haproxy.conf +29 -0
- data/config/heartbeat.conf +8 -0
- data/config/heartbeat_authkeys.conf +2 -0
- data/config/installers/ubuntu_install.sh +77 -0
- data/config/monit.conf +9 -0
- data/config/monit/haproxy.monit.conf +7 -0
- data/config/monit/nginx.monit.conf +0 -0
- data/config/nginx.conf +24 -0
- data/config/reconfigure_instances_script.sh +18 -0
- data/config/sample-config.yml +23 -0
- data/config/scp_instances_script.sh +12 -0
- data/lib/core/array.rb +13 -0
- data/lib/core/exception.rb +9 -0
- data/lib/core/float.rb +13 -0
- data/lib/core/hash.rb +11 -0
- data/lib/core/kernel.rb +12 -0
- data/lib/core/module.rb +22 -0
- data/lib/core/object.rb +18 -0
- data/lib/core/proc.rb +15 -0
- data/lib/core/string.rb +49 -0
- data/lib/core/time.rb +41 -0
- data/lib/modules/callback.rb +133 -0
- data/lib/modules/ec2_wrapper.rb +82 -0
- data/lib/modules/safe_instance.rb +31 -0
- data/lib/modules/vlad_override.rb +82 -0
- data/lib/poolparty.rb +105 -0
- data/lib/poolparty/application.rb +170 -0
- data/lib/poolparty/init.rb +6 -0
- data/lib/poolparty/master.rb +329 -0
- data/lib/poolparty/monitors.rb +13 -0
- data/lib/poolparty/monitors/cpu.rb +19 -0
- data/lib/poolparty/monitors/memory.rb +26 -0
- data/lib/poolparty/monitors/web.rb +23 -0
- data/lib/poolparty/optioner.rb +16 -0
- data/lib/poolparty/plugin.rb +43 -0
- data/lib/poolparty/plugin_manager.rb +67 -0
- data/lib/poolparty/provider.rb +2 -0
- data/lib/poolparty/provider/packages/essential.rb +6 -0
- data/lib/poolparty/provider/packages/git.rb +4 -0
- data/lib/poolparty/provider/packages/haproxy.rb +20 -0
- data/lib/poolparty/provider/packages/heartbeat.rb +4 -0
- data/lib/poolparty/provider/packages/monit.rb +6 -0
- data/lib/poolparty/provider/packages/rsync.rb +4 -0
- data/lib/poolparty/provider/packages/ruby.rb +37 -0
- data/lib/poolparty/provider/packages/s3fuse.rb +11 -0
- data/lib/poolparty/provider/provider.rb +60 -0
- data/lib/poolparty/remote_instance.rb +216 -0
- data/lib/poolparty/remoter.rb +106 -0
- data/lib/poolparty/remoting.rb +112 -0
- data/lib/poolparty/scheduler.rb +103 -0
- data/lib/poolparty/tasks.rb +29 -0
- data/lib/poolparty/tasks/cloud.rake +57 -0
- data/lib/poolparty/tasks/development.rake +38 -0
- data/lib/poolparty/tasks/ec2.rake +20 -0
- data/lib/poolparty/tasks/instance.rake +63 -0
- data/lib/poolparty/tasks/plugins.rake +30 -0
- data/lib/poolparty/tasks/server.rake +42 -0
- data/lib/poolparty/tmp.rb +46 -0
- data/lib/s3/s3_object_store_folders.rb +44 -0
- data/misc/basics_tutorial.txt +142 -0
- data/poolparty.gemspec +72 -0
- data/spec/application_spec.rb +39 -0
- data/spec/callback_spec.rb +194 -0
- data/spec/core_spec.rb +15 -0
- data/spec/helpers/ec2_mock.rb +44 -0
- data/spec/kernel_spec.rb +11 -0
- data/spec/master_spec.rb +203 -0
- data/spec/monitors/cpu_monitor_spec.rb +38 -0
- data/spec/monitors/memory_spec.rb +50 -0
- data/spec/monitors/misc_monitor_spec.rb +50 -0
- data/spec/monitors/web_spec.rb +39 -0
- data/spec/optioner_spec.rb +22 -0
- data/spec/plugin_manager_spec.rb +31 -0
- data/spec/plugin_spec.rb +101 -0
- data/spec/pool_binary_spec.rb +10 -0
- data/spec/poolparty_spec.rb +15 -0
- data/spec/provider_spec.rb +17 -0
- data/spec/remote_instance_spec.rb +149 -0
- data/spec/remoter_spec.rb +65 -0
- data/spec/remoting_spec.rb +84 -0
- data/spec/scheduler_spec.rb +75 -0
- data/spec/spec_helper.rb +39 -0
- data/spec/string_spec.rb +28 -0
- data/web/static/conf/nginx.conf +22 -0
- data/web/static/site/images/balloon.png +0 -0
- data/web/static/site/images/cb.png +0 -0
- data/web/static/site/images/clouds.png +0 -0
- data/web/static/site/images/railsconf_preso_img.png +0 -0
- data/web/static/site/index.html +71 -0
- data/web/static/site/javascripts/application.js +3 -0
- data/web/static/site/javascripts/corner.js +178 -0
- data/web/static/site/javascripts/jquery-1.2.6.pack.js +11 -0
- data/web/static/site/misc.html +42 -0
- data/web/static/site/storage/pool_party_presentation.pdf +0 -0
- data/web/static/site/stylesheets/application.css +100 -0
- data/web/static/site/stylesheets/reset.css +17 -0
- data/web/static/src/layouts/application.haml +25 -0
- data/web/static/src/pages/index.haml +25 -0
- data/web/static/src/pages/misc.haml +5 -0
- data/web/static/src/stylesheets/application.sass +100 -0
- metadata +260 -0
@@ -0,0 +1,329 @@
|
|
1
|
+
=begin rdoc
|
2
|
+
The basic master for PoolParty
|
3
|
+
=end
|
4
|
+
module PoolParty
|
5
|
+
class Master < Remoting
|
6
|
+
include Aska
|
7
|
+
include Callbacks
|
8
|
+
include Monitors
|
9
|
+
include Remoter
|
10
|
+
|
11
|
+
def initialize
|
12
|
+
super
|
13
|
+
|
14
|
+
self.class.send :rules, :contract_when, Application.options.contract_when
|
15
|
+
self.class.send :rules, :expand_when, Application.options.expand_when
|
16
|
+
end
|
17
|
+
# Start the cloud
|
18
|
+
def start_cloud!
|
19
|
+
start!
|
20
|
+
end
|
21
|
+
alias_method :start_cloud, :start_cloud!
|
22
|
+
# Start the cloud, which launches the minimum_instances
|
23
|
+
def start!
|
24
|
+
message "Launching minimum_instances"
|
25
|
+
launch_minimum_instances
|
26
|
+
message "Waiting for master to boot up"
|
27
|
+
reset!
|
28
|
+
while !number_of_pending_instances.zero?
|
29
|
+
wait "2.seconds" unless Application.test?
|
30
|
+
waited = true
|
31
|
+
reset!
|
32
|
+
end
|
33
|
+
unless Application.test? || waited.nil?
|
34
|
+
message "Give some time for the instance ssh to start up"
|
35
|
+
wait "15.seconds"
|
36
|
+
end
|
37
|
+
install_cloud if Application.install_on_load?
|
38
|
+
configure_cloud
|
39
|
+
end
|
40
|
+
alias_method :start, :start!
|
41
|
+
def configure_cloud
|
42
|
+
message "Configuring master"
|
43
|
+
master = get_node 0
|
44
|
+
master.configure
|
45
|
+
end
|
46
|
+
def install_cloud
|
47
|
+
Master.with_nodes do |node|
|
48
|
+
node.login_once
|
49
|
+
end
|
50
|
+
Provider.install_poolparty(nodes.collect {|a| a.ip })
|
51
|
+
end
|
52
|
+
# Launch the minimum number of instances.
|
53
|
+
def launch_minimum_instances
|
54
|
+
request_launch_new_instances(Application.minimum_instances - number_of_pending_and_running_instances)
|
55
|
+
nodes
|
56
|
+
end
|
57
|
+
# Start monitoring the cloud with the threaded loop
|
58
|
+
def start_monitor!
|
59
|
+
begin
|
60
|
+
trap("INT") do
|
61
|
+
on_exit
|
62
|
+
exit
|
63
|
+
end
|
64
|
+
# Daemonize only if we are not in the test environment
|
65
|
+
run_thread_loop(:daemonize => !Application.test?) do
|
66
|
+
add_task {launch_minimum_instances} # If the base instances go down...
|
67
|
+
add_task {reconfigure_cloud_when_necessary}
|
68
|
+
add_task {scale_cloud!}
|
69
|
+
add_task {check_stats}
|
70
|
+
end
|
71
|
+
rescue Exception => e
|
72
|
+
puts "There was an error: #{e.nice_message}"
|
73
|
+
end
|
74
|
+
end
|
75
|
+
alias_method :start_monitor, :start_monitor!
|
76
|
+
def user_tasks
|
77
|
+
puts "in user_tasks"
|
78
|
+
end
|
79
|
+
# Sole purpose to check the stats, mainly in a plugin
|
80
|
+
def check_stats
|
81
|
+
end
|
82
|
+
# Add an instance if the cloud needs one ore terminate one if necessary
|
83
|
+
def scale_cloud!
|
84
|
+
add_instance_if_load_is_high
|
85
|
+
terminate_instance_if_load_is_low
|
86
|
+
end
|
87
|
+
alias_method :scale_cloud, :scale_cloud!
|
88
|
+
# Tough method:
|
89
|
+
# We need to make sure that all the instances have the required software installed
|
90
|
+
# This is a basic check against the local store of the instances that have the
|
91
|
+
# stack installed.
|
92
|
+
def reconfigure_cloud_when_necessary
|
93
|
+
reconfigure_running_instances if number_of_unconfigured_nodes > 0
|
94
|
+
end
|
95
|
+
alias_method :reconfiguration, :reconfigure_cloud_when_necessary
|
96
|
+
def number_of_unconfigured_nodes
|
97
|
+
nodes.reject {|a| a.stack_installed? }.size
|
98
|
+
end
|
99
|
+
def grow_by_one
|
100
|
+
request_launch_new_instance
|
101
|
+
self.class.get_master.configure
|
102
|
+
end
|
103
|
+
def shrink_by_one
|
104
|
+
node = nodes.reject {|a| a.master? }[-1]
|
105
|
+
request_termination_of_instance(node.instance_id) if node
|
106
|
+
end
|
107
|
+
# Add an instance if the load is high
|
108
|
+
def add_instance_if_load_is_high
|
109
|
+
request_launch_new_instance if expand?
|
110
|
+
end
|
111
|
+
alias_method :add_instance, :add_instance_if_load_is_high
|
112
|
+
# Teardown an instance if the load is pretty low
|
113
|
+
def terminate_instance_if_load_is_low
|
114
|
+
if contract?
|
115
|
+
shrink_by_one
|
116
|
+
end
|
117
|
+
end
|
118
|
+
alias_method :terminate_instance, :terminate_instance_if_load_is_low
|
119
|
+
# FOR MONITORING
|
120
|
+
def contract?
|
121
|
+
valid_rules?(:contract_when)
|
122
|
+
end
|
123
|
+
def expand?
|
124
|
+
valid_rules?(:expand_when)
|
125
|
+
end
|
126
|
+
# Restart the running instances services with monit on all the nodes
|
127
|
+
def restart_running_instances_services
|
128
|
+
nodes.each do |node|
|
129
|
+
node.restart_with_monit
|
130
|
+
end
|
131
|
+
end
|
132
|
+
# Reconfigure the running instances
|
133
|
+
# Since we are using vlad, running configure on one of the instances
|
134
|
+
# should configure all of the instances. We set the hosts in this file
|
135
|
+
def reconfigure_running_instances
|
136
|
+
# nodes.each do |node|
|
137
|
+
# node.configure if node.status =~ /running/
|
138
|
+
# end
|
139
|
+
master = get_node(0)
|
140
|
+
master.configure
|
141
|
+
end
|
142
|
+
# Build the basic haproxy config file from the config file in the config directory and return a tempfile
|
143
|
+
def build_haproxy_file
|
144
|
+
servers=<<-EOS
|
145
|
+
#{nodes.collect {|node| node.haproxy_entry}.join("\n")}
|
146
|
+
EOS
|
147
|
+
open(Application.haproxy_config_file).read.strip ^ {:servers => servers, :host_port => Application.host_port}
|
148
|
+
end
|
149
|
+
# Build the hosts file and return a tempfile
|
150
|
+
def build_hosts_file
|
151
|
+
write_to_temp_file(nodes.collect {|a| a.hosts_entry }.join("\n"))
|
152
|
+
end
|
153
|
+
# Build host file for a specific node
|
154
|
+
def build_hosts_file_for(n)
|
155
|
+
servers=<<-EOS
|
156
|
+
#{nodes.collect {|node| node.ip == n.ip ? node.local_hosts_entry : node.hosts_entry}.join("\n")}
|
157
|
+
EOS
|
158
|
+
servers
|
159
|
+
end
|
160
|
+
# Build the basic auth file for the heartbeat
|
161
|
+
def build_heartbeat_authkeys_file
|
162
|
+
write_to_temp_file(open(Application.heartbeat_authkeys_config_file).read)
|
163
|
+
end
|
164
|
+
# Build heartbeat config file
|
165
|
+
def build_heartbeat_config_file_for(node)
|
166
|
+
servers = "#{node.node_entry}\n#{get_next_node(node).node_entry}"
|
167
|
+
open(Application.heartbeat_config_file).read.strip ^ {:nodes => servers}
|
168
|
+
end
|
169
|
+
# Return a list of the nodes and cache them
|
170
|
+
def nodes
|
171
|
+
@nodes ||= list_of_nonterminated_instances.collect_with_index do |inst, i|
|
172
|
+
RemoteInstance.new(inst.merge({:number => i}))
|
173
|
+
end
|
174
|
+
end
|
175
|
+
# Get the node at the specific index from the cached nodes
|
176
|
+
def get_node(i=0)
|
177
|
+
nodes.select {|a| a.number == i.to_i}.first
|
178
|
+
end
|
179
|
+
# Get the next node in sequence, so we can configure heartbeat to monitor the next node
|
180
|
+
def get_next_node(node)
|
181
|
+
i = node.number + 1
|
182
|
+
i = 0 if i >= (nodes.size - 1)
|
183
|
+
get_node(i)
|
184
|
+
end
|
185
|
+
# On exit command
|
186
|
+
def on_exit
|
187
|
+
end
|
188
|
+
# List the clouds
|
189
|
+
def list
|
190
|
+
if number_of_pending_and_running_instances > 0
|
191
|
+
out = "-- CLOUD (#{number_of_pending_and_running_instances})--\n"
|
192
|
+
out << nodes.collect {|node| node.description }.join("\n")
|
193
|
+
else
|
194
|
+
out = "Cloud is not running"
|
195
|
+
end
|
196
|
+
out
|
197
|
+
end
|
198
|
+
# Reset and clear the caches
|
199
|
+
def reset!
|
200
|
+
@cached_descriptions = nil
|
201
|
+
@nodes = nil
|
202
|
+
end
|
203
|
+
|
204
|
+
class << self
|
205
|
+
include PoolParty
|
206
|
+
|
207
|
+
def with_nodes(&block)
|
208
|
+
new.nodes.each &block
|
209
|
+
end
|
210
|
+
|
211
|
+
def collect_nodes(&block)
|
212
|
+
new.nodes.collect &block
|
213
|
+
end
|
214
|
+
|
215
|
+
def requires_heartbeat?
|
216
|
+
new.nodes.size > 1
|
217
|
+
end
|
218
|
+
def is_master_responding?
|
219
|
+
`ping -c1 -t5 #{get_master.ip}`
|
220
|
+
end
|
221
|
+
def get_master
|
222
|
+
new.nodes[0]
|
223
|
+
end
|
224
|
+
def get_next_node(node)
|
225
|
+
new.get_next_node(node)
|
226
|
+
end
|
227
|
+
# Build a heartbeat_config_file from the config file in the config directory and return a tempfile
|
228
|
+
def build_heartbeat_config_file_for(node)
|
229
|
+
return nil unless node
|
230
|
+
new.build_heartbeat_config_file_for(node)
|
231
|
+
end
|
232
|
+
# Build a heartbeat resources file from the config directory and return a tempfile
|
233
|
+
def build_heartbeat_resources_file_for(node)
|
234
|
+
return nil unless node
|
235
|
+
"#{node.haproxy_resources_entry}\n#{get_next_node(node).haproxy_resources_entry}"
|
236
|
+
end
|
237
|
+
# Build hosts files for a specific node
|
238
|
+
def build_hosts_file_for(node)
|
239
|
+
new.build_hosts_file_for(node)
|
240
|
+
end
|
241
|
+
# Build the scp script for the specific node
|
242
|
+
def build_scp_instances_script_for(node)
|
243
|
+
authkeys_file = write_to_temp_file(open(Application.heartbeat_authkeys_config_file).read.strip)
|
244
|
+
if Master.requires_heartbeat?
|
245
|
+
ha_d_file = Master.build_heartbeat_config_file_for(node)
|
246
|
+
haresources_file = Master.build_heartbeat_resources_file_for(node)
|
247
|
+
end
|
248
|
+
haproxy_file = Master.build_haproxy_file
|
249
|
+
hosts_file = Master.build_hosts_file_for(node)
|
250
|
+
|
251
|
+
str = open(Application.sh_scp_instances_script).read.strip ^ {
|
252
|
+
:cloud_master_takeover => "#{node.scp_string("#{root_dir}/config/cloud_master_takeover", "/etc/ha.d/resource.d/", :dir => "/etc/ha.d/resource.d")}",
|
253
|
+
:config_file => "#{node.scp_string(Application.config_file, "~/.config")}",
|
254
|
+
:authkeys => "#{node.scp_string(authkeys_file.path, "/etc/ha.d/authkeys", :dir => "/etc/ha.d/")}",
|
255
|
+
:resources => "#{node.scp_string("#{root_dir}/config/resource.d/*", "/etc/ha.d/resource.d/", {:switches => "-r"})}",
|
256
|
+
:monitrc => "#{node.scp_string(Application.monit_config_file, "/etc/monit/monitrc", :dir => "/etc/monit")}",
|
257
|
+
:monit_d => "#{node.scp_string("#{File.dirname(Application.monit_config_file)}/monit/*", "/etc/monit.d/", {:switches => "-r", :dir => "/etc/monit.d/"})}",
|
258
|
+
:haproxy => "#{node.scp_string(haproxy_file, "/etc/haproxy.cfg")}",
|
259
|
+
|
260
|
+
:ha_d => Master.requires_heartbeat? ? "#{node.scp_string(ha_d_file, "/etc/ha.d/ha.cf")}" : "",
|
261
|
+
:haresources => Master.requires_heartbeat? ? "#{node.scp_string(haresources_file, "/etc/ha.d/ha.cf")}" : "",
|
262
|
+
|
263
|
+
:hosts => "#{node.scp_string(hosts_file, "/etc/hosts")}"
|
264
|
+
}
|
265
|
+
write_to_temp_file(str)
|
266
|
+
end
|
267
|
+
# Build basic configuration script for the node
|
268
|
+
def build_reconfigure_instances_script_for(node)
|
269
|
+
str = open(Application.sh_reconfigure_instances_script).read.strip ^ {
|
270
|
+
:config_master => "#{node.update_plugin_string}",
|
271
|
+
:start_pool_maintain => "pool maintain -c ~/.config -l ~/plugins",
|
272
|
+
:set_hostname => "hostname -v #{node.name}",
|
273
|
+
:start_s3fs => "/usr/bin/s3fs #{Application.shared_bucket} -o accessKeyId=#{Application.access_key} -o secretAccessKey=#{Application.secret_access_key} -o nonempty /data"
|
274
|
+
}
|
275
|
+
write_to_temp_file(str)
|
276
|
+
end
|
277
|
+
|
278
|
+
def set_hosts(c, remotetask=nil)
|
279
|
+
unless remotetask.nil?
|
280
|
+
rt = remotetask
|
281
|
+
end
|
282
|
+
|
283
|
+
ssh_location = `which ssh`.gsub(/\n/, '')
|
284
|
+
rsync_location = `which rsync`.gsub(/\n/, '')
|
285
|
+
rt.set :user, Application.username
|
286
|
+
# rt.set :domain, "#{Application.user}@#{ip}"
|
287
|
+
rt.set :application, Application.app_name
|
288
|
+
rt.set :ssh_flags, "-i #{Application.keypair_path} -o StrictHostKeyChecking=no"
|
289
|
+
rt.set :rsync_flags , ['-azP', '--delete', "-e '#{ssh_location} -l #{Application.username} -i #{Application.keypair_path} -o StrictHostKeyChecking=no'"]
|
290
|
+
|
291
|
+
master = get_master
|
292
|
+
rt.set :domain, "#{master.ip}" if master
|
293
|
+
Master.with_nodes { |node|
|
294
|
+
rt.host "#{Application.username}@#{node.ip}",:app if node.status =~ /running/
|
295
|
+
}
|
296
|
+
end
|
297
|
+
|
298
|
+
def ssh_configure_string_for(node)
|
299
|
+
cmd=<<-EOC
|
300
|
+
#{node.update_plugin_string(node)}
|
301
|
+
pool maintain -c ~/.config -l #{PoolParty.plugin_dir}
|
302
|
+
hostname -v #{node.name}
|
303
|
+
/usr/bin/s3fs #{Application.shared_bucket} -o accessKeyId=#{Application.access_key} -o secretAccessKey=#{Application.secret_access_key} -o nonempty /data
|
304
|
+
EOC
|
305
|
+
end
|
306
|
+
def build_haproxy_file
|
307
|
+
servers=<<-EOS
|
308
|
+
#{collect_nodes {|node| node.haproxy_entry}.join("\n")}
|
309
|
+
EOS
|
310
|
+
open(Application.haproxy_config_file).read.strip ^ {:servers => servers, :host_port => Application.host_port}
|
311
|
+
end
|
312
|
+
# Write a temp file with the content str
|
313
|
+
def write_to_temp_file(str="")
|
314
|
+
tempfile = Tempfile.new("pool-party-#{rand(1000)}-#{rand(1000)}")
|
315
|
+
tempfile.print(str)
|
316
|
+
tempfile.flush
|
317
|
+
tempfile
|
318
|
+
end
|
319
|
+
def with_temp_file(str="", &block)
|
320
|
+
Tempfile.open "pool-party-#{rand(10000)}" do |fp|
|
321
|
+
fp.puts str
|
322
|
+
fp.flush
|
323
|
+
block.call(fp)
|
324
|
+
end
|
325
|
+
end
|
326
|
+
end
|
327
|
+
|
328
|
+
end
|
329
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
=begin rdoc
|
2
|
+
Basic monitor on the cpu stats
|
3
|
+
=end
|
4
|
+
module Cpu
|
5
|
+
module Master
|
6
|
+
def cpu
|
7
|
+
nodes.size > 0 ? nodes.inject(0) {|i,a| i+=a.cpu } / nodes.size : 0.0
|
8
|
+
end
|
9
|
+
end
|
10
|
+
|
11
|
+
module Remote
|
12
|
+
def cpu
|
13
|
+
ssh("uptime").split(/\s+/)[-3].to_f rescue 0.0
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
end
|
18
|
+
|
19
|
+
PoolParty.register_monitor Cpu
|
@@ -0,0 +1,26 @@
|
|
1
|
+
=begin rdoc
|
2
|
+
Basic monitor on the cpu stats
|
3
|
+
=end
|
4
|
+
module Memory
|
5
|
+
module Master
|
6
|
+
# Get the average memory usage over the cloud
|
7
|
+
def memory
|
8
|
+
nodes.size > 0 ? nodes.inject(0) {|i,a| i += a.memory } / nodes.size : 0.0
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
module Remote
|
13
|
+
def memory
|
14
|
+
str = ssh("free -m | grep -i mem")
|
15
|
+
total_memory = str.split[1].to_f
|
16
|
+
used_memory = str.split[2].to_f
|
17
|
+
|
18
|
+
used_memory / total_memory
|
19
|
+
rescue
|
20
|
+
0.0
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
end
|
25
|
+
|
26
|
+
PoolParty.register_monitor Memory
|
@@ -0,0 +1,23 @@
|
|
1
|
+
=begin rdoc
|
2
|
+
Basic monitor on the cpu stats
|
3
|
+
=end
|
4
|
+
module Web
|
5
|
+
module Master
|
6
|
+
# Get the average web request capabilities over the cloud
|
7
|
+
def web
|
8
|
+
nodes.size > 0 ? nodes.inject(0) {|i,a| i += a.web } / nodes.size : 0.0
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
module Remote
|
13
|
+
def web
|
14
|
+
str = ssh("httperf --server localhost --port #{Application.client_port} --num-conn 3 --timeout 5 | grep 'Request rate'")
|
15
|
+
str[/[.]* ([\d]*\.[\d]*) [.]*/, 0].chomp.to_f
|
16
|
+
rescue
|
17
|
+
0.0
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
end
|
22
|
+
|
23
|
+
PoolParty.register_monitor Web
|
@@ -0,0 +1,16 @@
|
|
1
|
+
module PoolParty
|
2
|
+
class Optioner
|
3
|
+
# Parse the command line options for options without a switch
|
4
|
+
def self.parse(argv, safe=[])
|
5
|
+
args = []
|
6
|
+
argv.each_with_index do |arg,i|
|
7
|
+
unless arg.index("-")
|
8
|
+
args << arg
|
9
|
+
else
|
10
|
+
argv.delete_at(i+1) unless safe.include?(arg)
|
11
|
+
end
|
12
|
+
end
|
13
|
+
args
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
=begin rdoc
|
2
|
+
Allow for plugins based in callbacks
|
3
|
+
|
4
|
+
A plugin should be able to hook into any method and run their command
|
5
|
+
either before or after the plugin.
|
6
|
+
=end
|
7
|
+
module PoolParty
|
8
|
+
class Plugin
|
9
|
+
|
10
|
+
# Create a class-level method for the name on the class
|
11
|
+
# For instance:
|
12
|
+
# create_methods :install, RemoteInstance
|
13
|
+
# will give the following methods to the class
|
14
|
+
# before_install and after_install on the RemoteInstance
|
15
|
+
def self.create_methods(name, klass)
|
16
|
+
str = ""
|
17
|
+
%w(before after).each do |time|
|
18
|
+
str << <<-EOE
|
19
|
+
def self.#{time}_#{name}(*meth)
|
20
|
+
callee = self
|
21
|
+
#{klass}.class_eval do
|
22
|
+
meth.each {|m| #{time} :#{name}, {m.to_sym => callee.to_s}}
|
23
|
+
end
|
24
|
+
end
|
25
|
+
EOE
|
26
|
+
end
|
27
|
+
eval str
|
28
|
+
end
|
29
|
+
|
30
|
+
%w(install configure associate_public_ip become_master).each do |method|
|
31
|
+
create_methods method, RemoteInstance
|
32
|
+
end
|
33
|
+
%w(start start_monitor scale_cloud reconfiguration add_instance terminate_instance check_stats).each do |method|
|
34
|
+
create_methods method, Master
|
35
|
+
end
|
36
|
+
%w(define_tasks).each do |method|
|
37
|
+
create_methods method, Tasks
|
38
|
+
end
|
39
|
+
%w(run_tasks).each do |method|
|
40
|
+
create_methods method, Scheduler
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|