poolparty 0.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGELOG +4 -0
- data/Manifest +55 -0
- data/README.txt +113 -0
- data/Rakefile +18 -0
- data/bin/instance +54 -0
- data/bin/pool +33 -0
- data/config/config.yml +23 -0
- data/config/create_proxy_ami.sh +582 -0
- data/config/haproxy.conf +29 -0
- data/config/heartbeat.conf +9 -0
- data/config/heartbeat_authkeys.conf +2 -0
- data/config/monit/haproxy.monit.conf +7 -0
- data/config/monit/nginx.monit.conf +0 -0
- data/config/monit.conf +8 -0
- data/config/nginx.conf +24 -0
- data/lib/core/array.rb +10 -0
- data/lib/core/exception.rb +9 -0
- data/lib/core/kernel.rb +9 -0
- data/lib/core/module.rb +22 -0
- data/lib/core/object.rb +14 -0
- data/lib/core/string.rb +49 -0
- data/lib/core/time.rb +41 -0
- data/lib/modules/callback.rb +55 -0
- data/lib/modules/ec2_wrapper.rb +74 -0
- data/lib/modules/safe_instance.rb +31 -0
- data/lib/pool_party/application.rb +133 -0
- data/lib/pool_party/init.rb +4 -0
- data/lib/pool_party/master.rb +189 -0
- data/lib/pool_party/monitors/cpu.rb +18 -0
- data/lib/pool_party/monitors/memory.rb +21 -0
- data/lib/pool_party/monitors/web.rb +18 -0
- data/lib/pool_party/monitors.rb +13 -0
- data/lib/pool_party/optioner.rb +16 -0
- data/lib/pool_party/os/ubuntu.rb +78 -0
- data/lib/pool_party/os.rb +11 -0
- data/lib/pool_party/remote_instance.rb +180 -0
- data/lib/pool_party/remoting.rb +112 -0
- data/lib/pool_party/scheduler.rb +93 -0
- data/lib/pool_party/tasks.rb +220 -0
- data/lib/pool_party.rb +69 -0
- data/lib/s3/s3_object_store_folders.rb +44 -0
- data/poolparty.gemspec +55 -0
- data/spec/application_spec.rb +32 -0
- data/spec/callback_spec.rb +65 -0
- data/spec/helpers/ec2_mock.rb +56 -0
- data/spec/helpers/remote_instance_mock.rb +11 -0
- data/spec/kernel_spec.rb +11 -0
- data/spec/master_spec.rb +147 -0
- data/spec/monitor_spec.rb +16 -0
- data/spec/optioner_spec.rb +22 -0
- data/spec/poolparty_spec.rb +8 -0
- data/spec/remote_instance_spec.rb +29 -0
- data/spec/remoting_spec.rb +75 -0
- data/spec/spec_helper.rb +38 -0
- data/spec/string_spec.rb +28 -0
- data/test/test_pool_party.rb +0 -0
- metadata +171 -0
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
module PoolParty
|
|
2
|
+
class Optioner
|
|
3
|
+
# Parse the command line options for options without a switch
|
|
4
|
+
def self.parse(argv, safe=[])
|
|
5
|
+
args = []
|
|
6
|
+
argv.each_with_index do |arg,i|
|
|
7
|
+
unless arg.index("-")
|
|
8
|
+
args << arg
|
|
9
|
+
else
|
|
10
|
+
argv.delete_at(i+1) unless safe.include?(arg)
|
|
11
|
+
end
|
|
12
|
+
end
|
|
13
|
+
args
|
|
14
|
+
end
|
|
15
|
+
end
|
|
16
|
+
end
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
=begin rdoc
|
|
2
|
+
Ubuntu specific install tasks for PoolParty
|
|
3
|
+
=end
|
|
4
|
+
module PoolParty
|
|
5
|
+
module Os
|
|
6
|
+
module Ubuntu
|
|
7
|
+
def install_stack
|
|
8
|
+
install_haproxy
|
|
9
|
+
install_heartbeat
|
|
10
|
+
install_monit
|
|
11
|
+
install_s3fuse
|
|
12
|
+
end
|
|
13
|
+
def install_haproxy
|
|
14
|
+
cmd=<<-EOC
|
|
15
|
+
apt-get -y install haproxy
|
|
16
|
+
sed -i 's/ENABLED=0/ENABLED=1/g' /etc/default/haproxy
|
|
17
|
+
sed -i 's/SYSLOGD=\"\"/SYSLOGD=\"-r\"/g' /etc/default/syslogd
|
|
18
|
+
echo 'local0.* /var/log/haproxy.log' >> /etc/syslog.conf && killall -9 syslogd && syslogd
|
|
19
|
+
EOC
|
|
20
|
+
ssh cmd.runnable
|
|
21
|
+
end
|
|
22
|
+
def install_monit
|
|
23
|
+
cmd=<<-EOC
|
|
24
|
+
apt-get -y install monit
|
|
25
|
+
mkdir /etc/monit
|
|
26
|
+
sed -i 's/startup=0/startup=1/g' /etc/default/monit
|
|
27
|
+
/etc/init.d/monit start
|
|
28
|
+
EOC
|
|
29
|
+
ssh cmd.runnable
|
|
30
|
+
end
|
|
31
|
+
def install_nginx
|
|
32
|
+
cmd=<<-EOC
|
|
33
|
+
apt-get -y install nginx
|
|
34
|
+
EOC
|
|
35
|
+
ssh cmd.runnable
|
|
36
|
+
end
|
|
37
|
+
def install_heartbeat
|
|
38
|
+
cmd=<<-EOC
|
|
39
|
+
apt-get -y install heartbeat-2
|
|
40
|
+
EOC
|
|
41
|
+
ssh cmd.runnable
|
|
42
|
+
end
|
|
43
|
+
def install_s3fuse
|
|
44
|
+
cmd=<<-EOC
|
|
45
|
+
cd /usr/local/src && wget http://s3fs.googlecode.com/files/s3fs-r166-source.tar.gz
|
|
46
|
+
tar -zxf s3fs-r166-source.tar.gz
|
|
47
|
+
cd s3fs/ && make
|
|
48
|
+
mv s3fs /usr/bin
|
|
49
|
+
mkdir /data
|
|
50
|
+
EOC
|
|
51
|
+
ssh("apt-get -y install build-essential libcurl4-openssl-dev libxml2-dev libfuse-dev")
|
|
52
|
+
ssh(cmd.runnable)
|
|
53
|
+
end
|
|
54
|
+
# We want to slim this down...
|
|
55
|
+
def install_ruby_and_rubygems
|
|
56
|
+
cmd=<<-EOC
|
|
57
|
+
echo 'Installing ruby...'
|
|
58
|
+
ln -sf /usr/bin/ruby1.8 /usr/local/bin/ruby
|
|
59
|
+
ln -sf /usr/bin/ri1.8 /usr/local/bin/ri
|
|
60
|
+
ln -sf /usr/bin/rdoc1.8 /usr/local/bin/rdoc
|
|
61
|
+
ln -sf /usr/bin/irb1.8 /usr/local/bin/irb
|
|
62
|
+
echo '-- Installing Rubygems'
|
|
63
|
+
cd /usr/local/src
|
|
64
|
+
wget http://rubyforge.org/frs/download.php/35283/rubygems-1.1.1.tgz
|
|
65
|
+
tar -xzf rubygems-1.1.1.tgz
|
|
66
|
+
cd rubygems-1.1.1
|
|
67
|
+
ruby setup.rb --no-rdoc --no-ri
|
|
68
|
+
ln -sf /usr/bin/gem1.8 /usr/bin/gem
|
|
69
|
+
gem1.8 update --system
|
|
70
|
+
gem1.8 install aws-s3 amazon-ec2 aska rake poolparty --no-rdoc --no-ri --no-test
|
|
71
|
+
EOC
|
|
72
|
+
ssh("apt-get -y install build-essential ruby1.8-dev ruby1.8 ri1.8 rdoc1.8 irb1.8 libreadline-ruby1.8 libruby1.8")
|
|
73
|
+
ssh(cmd.runnable)
|
|
74
|
+
end
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
end
|
|
78
|
+
end
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
module PoolParty
|
|
2
|
+
class RemoteInstance
|
|
3
|
+
include PoolParty # WTF -> why isn't message included
|
|
4
|
+
include Callbacks
|
|
5
|
+
|
|
6
|
+
attr_reader :ip, :instance_id, :name, :number, :status, :launching_time, :stack_installed
|
|
7
|
+
attr_accessor :name
|
|
8
|
+
|
|
9
|
+
def initialize(obj)
|
|
10
|
+
@ip = obj[:ip]
|
|
11
|
+
@instance_id = obj[:instance_id]
|
|
12
|
+
@name = obj[:name] || "node"
|
|
13
|
+
@number = obj[:number] || 0 # Defaults to the master
|
|
14
|
+
@status = obj[:status] || "running"
|
|
15
|
+
@launching_time = obj[:launching_time] || Time.now
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
# Host entry for this instance
|
|
19
|
+
def hosts_entry
|
|
20
|
+
"#{@ip}\t#{name}"
|
|
21
|
+
end
|
|
22
|
+
# Internal host entry for this instance
|
|
23
|
+
def local_hosts_entry
|
|
24
|
+
"127.0.0.1 #{name}\n127.0.0.1 localhost.localdomain localhost ubuntu"
|
|
25
|
+
end
|
|
26
|
+
# Node entry for heartbeat
|
|
27
|
+
def node_entry
|
|
28
|
+
"node #{name}"
|
|
29
|
+
end
|
|
30
|
+
# Internal naming scheme
|
|
31
|
+
def name
|
|
32
|
+
"#{@name}#{@number}"
|
|
33
|
+
end
|
|
34
|
+
# Entry for the heartbeat config file
|
|
35
|
+
def heartbeat_entry
|
|
36
|
+
"#{name} #{ip} #{Application.managed_services}"
|
|
37
|
+
end
|
|
38
|
+
# Entry for haproxy
|
|
39
|
+
def haproxy_entry
|
|
40
|
+
"server #{name} #{@ip}:#{Application.client_port} weight 1 check"
|
|
41
|
+
end
|
|
42
|
+
def haproxy_resources_entry
|
|
43
|
+
"#{name}\t#{@ip}"
|
|
44
|
+
end
|
|
45
|
+
# Is this the master?
|
|
46
|
+
def master?
|
|
47
|
+
@number == 0
|
|
48
|
+
end
|
|
49
|
+
# Let's define some stuff for monit
|
|
50
|
+
%w(stop start restart).each do |cmd|
|
|
51
|
+
define_method "#{cmd}_with_monit" do
|
|
52
|
+
ssh("monit #{cmd} all")
|
|
53
|
+
end
|
|
54
|
+
end
|
|
55
|
+
# Gets called everytime the cloud reloads itself
|
|
56
|
+
# This is how the cloud reconfigures itself
|
|
57
|
+
def configure
|
|
58
|
+
configure_master if master?
|
|
59
|
+
configure_linux
|
|
60
|
+
configure_hosts
|
|
61
|
+
configure_haproxy
|
|
62
|
+
configure_heartbeat if Master.requires_heartbeat?
|
|
63
|
+
configure_s3fuse
|
|
64
|
+
configure_monit
|
|
65
|
+
end
|
|
66
|
+
# Setup the master tasks
|
|
67
|
+
def configure_master
|
|
68
|
+
message "configuring master (#{name})"
|
|
69
|
+
install_ruby_and_rubygems # Install ruby and the gems required to run the master
|
|
70
|
+
scp(Application.config_file, "~/.config")
|
|
71
|
+
ssh("pool maintain -C ~/.config") # Let's set it to maintain, ey?
|
|
72
|
+
end
|
|
73
|
+
# Change the hostname for the instance
|
|
74
|
+
def configure_linux
|
|
75
|
+
ssh("'hostname -v #{name}'") rescue message "error in setting hostname"
|
|
76
|
+
end
|
|
77
|
+
# Configure s3fs if the bucket is specified in the config.yml
|
|
78
|
+
def configure_s3fuse
|
|
79
|
+
message("Configuring s3fuse")
|
|
80
|
+
|
|
81
|
+
unless Application.shared_bucket.empty?
|
|
82
|
+
install_s3fuse unless ssh("s3fs -v") =~ /missing\ bucket/
|
|
83
|
+
ssh("/usr/bin/s3fs #{Application.shared_bucket} -ouse_cache=/tmp -o accessKeyId=#{Application.access_key} -o secretAccessKey=#{Application.secret_access_key} -o nonempty /data")
|
|
84
|
+
end
|
|
85
|
+
end
|
|
86
|
+
# Configure heartbeat only if there is enough servers
|
|
87
|
+
def configure_heartbeat
|
|
88
|
+
message "Configuring heartbeat"
|
|
89
|
+
install_heartbeat unless has?("heartbeat")
|
|
90
|
+
|
|
91
|
+
file = write_to_temp_file(open(Application.heartbeat_authkeys_config_file).read.strip)
|
|
92
|
+
scp(file.path, "/etc/ha.d/authkeys")
|
|
93
|
+
|
|
94
|
+
file = Master.build_heartbeat_config_file_for(self)
|
|
95
|
+
scp(file.path, "/etc/ha.d/ha.cf")
|
|
96
|
+
|
|
97
|
+
file = Master.build_heartbeat_resources_file_for(self)
|
|
98
|
+
scp(file.path, "/etc/ha.d/haresources")
|
|
99
|
+
end
|
|
100
|
+
# Some configures for monit
|
|
101
|
+
def configure_monit
|
|
102
|
+
message "Configuring monit"
|
|
103
|
+
install_monit unless has?("monit -V")
|
|
104
|
+
|
|
105
|
+
scp(Application.monit_config_file, "/etc/monit/monitrc")
|
|
106
|
+
ssh("mkdir /etc/monit.d")
|
|
107
|
+
Dir["#{File.dirname(Application.monit_config_file)}/monit/*"].each do |f|
|
|
108
|
+
scp(f, "/etc/monit.d/#{File.basename(f)}")
|
|
109
|
+
end
|
|
110
|
+
end
|
|
111
|
+
# Configure haproxy
|
|
112
|
+
def configure_haproxy
|
|
113
|
+
message "Configuring haproxy"
|
|
114
|
+
install_haproxy unless has?("haproxy")
|
|
115
|
+
|
|
116
|
+
file = Master.new.build_haproxy_file
|
|
117
|
+
scp(file.path, "/etc/haproxy.cfg")
|
|
118
|
+
end
|
|
119
|
+
# Configure the hosts for the linux file
|
|
120
|
+
def configure_hosts
|
|
121
|
+
message "Configuring hosts"
|
|
122
|
+
file = Master.build_hosts_file_for(self)
|
|
123
|
+
scp(file.path, "/etc/hosts") rescue message "Error in uploading new /etc/hosts file"
|
|
124
|
+
end
|
|
125
|
+
# Restart all services with monit
|
|
126
|
+
# Send a generic version command to test if the stdout contains
|
|
127
|
+
# any information to test if the software is on the instance
|
|
128
|
+
def has?(str)
|
|
129
|
+
!ssh("#{str} -v").empty?
|
|
130
|
+
end
|
|
131
|
+
# MONITORS
|
|
132
|
+
# Monitor the number of web requests that can be accepted at a time
|
|
133
|
+
def web
|
|
134
|
+
Monitors::Web.monitor_from_string ssh("httperf --server localhost --port #{Application.client_port} --num-conn 3 --timeout 5 | grep 'Request rate'") rescue 0.0
|
|
135
|
+
end
|
|
136
|
+
# Monitor the cpu status of the instance
|
|
137
|
+
def cpu
|
|
138
|
+
Monitors::Cpu.monitor_from_string ssh("uptime") rescue 0.0
|
|
139
|
+
end
|
|
140
|
+
# Monitor the memory
|
|
141
|
+
def memory
|
|
142
|
+
Monitors::Memory.monitor_from_string ssh("free -m | grep -i mem") rescue 0.0
|
|
143
|
+
end
|
|
144
|
+
# Scp src to dest on the instance
|
|
145
|
+
def scp(src="", dest="")
|
|
146
|
+
`scp -i #{Application.keypair_path} #{src} #{Application.username}@#{@ip}:#{dest}`
|
|
147
|
+
end
|
|
148
|
+
# Ssh into the instance or run a command, if the cmd is set
|
|
149
|
+
def ssh(cmd="")
|
|
150
|
+
ssh = "ssh -i #{Application.keypair_path} #{Application.username}@#{@ip}"
|
|
151
|
+
|
|
152
|
+
cmd.empty? ? system("#{ssh}") : %x[#{ssh} '#{cmd.runnable}']
|
|
153
|
+
end
|
|
154
|
+
|
|
155
|
+
# Description in the rake task
|
|
156
|
+
def description
|
|
157
|
+
case @status
|
|
158
|
+
when "running"
|
|
159
|
+
"#{@number}: INSTANCE: #{name} - #{@ip} - #{@instance_id} - #{@launching_time}"
|
|
160
|
+
when "shutting-down"
|
|
161
|
+
"(terminating) INSTANCE: #{name} - #{@ip} - #{@instance_id} - #{@launching_time}"
|
|
162
|
+
when "pending"
|
|
163
|
+
"(booting) INSTANCE: #{name} - #{@ip} - #{@instance_id} - #{@launching_time}"
|
|
164
|
+
end
|
|
165
|
+
end
|
|
166
|
+
def stack_installed?
|
|
167
|
+
@stack_installed == true
|
|
168
|
+
end
|
|
169
|
+
def mark_installed
|
|
170
|
+
@stack_installed = true
|
|
171
|
+
end
|
|
172
|
+
# Include the os specific tasks as specified in the application options (config.yml)
|
|
173
|
+
instance_eval "include PoolParty::Os::#{Application.os.capitalize}"
|
|
174
|
+
|
|
175
|
+
# CALLBACKS
|
|
176
|
+
after :install_stack, :configure # After we install the stack, let's make sure we configure it too
|
|
177
|
+
before :configure, :mark_installed # We want to make sure
|
|
178
|
+
after :configure, :restart_with_monit # Anytime we configure the server, we want the server to restart it's services
|
|
179
|
+
end
|
|
180
|
+
end
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
module PoolParty
|
|
2
|
+
extend self
|
|
3
|
+
|
|
4
|
+
class Remoting
|
|
5
|
+
include PoolParty
|
|
6
|
+
include Ec2Wrapper
|
|
7
|
+
include Scheduler
|
|
8
|
+
|
|
9
|
+
# == GENERAL METHODS
|
|
10
|
+
# == LISTING
|
|
11
|
+
# List all the running instances associated with this account
|
|
12
|
+
def list_of_running_instances
|
|
13
|
+
list_of_nonterminated_instances.select {|a| a[:status] =~ /running/}
|
|
14
|
+
end
|
|
15
|
+
# Get a list of the pending instances
|
|
16
|
+
def list_of_pending_instances
|
|
17
|
+
list_of_nonterminated_instances.select {|a| a[:status] =~ /pending/}
|
|
18
|
+
end
|
|
19
|
+
# list of shutting down instances
|
|
20
|
+
def list_of_terminating_instances
|
|
21
|
+
list_of_nonterminated_instances.select {|a| a[:status] =~ /shutting/}
|
|
22
|
+
end
|
|
23
|
+
# list all the nonterminated instances
|
|
24
|
+
def list_of_nonterminated_instances
|
|
25
|
+
list_of_instances.reject {|a| a[:status] =~ /terminated/}
|
|
26
|
+
end
|
|
27
|
+
# List the instances, regardless of their states
|
|
28
|
+
def list_of_instances
|
|
29
|
+
get_instances_description
|
|
30
|
+
end
|
|
31
|
+
# Get number of pending instances
|
|
32
|
+
def number_of_pending_instances
|
|
33
|
+
list_of_pending_instances.size
|
|
34
|
+
end
|
|
35
|
+
# get the number of running instances
|
|
36
|
+
def number_of_running_instances
|
|
37
|
+
list_of_running_instances.size
|
|
38
|
+
end
|
|
39
|
+
# get the number of pending and running instances
|
|
40
|
+
def number_of_pending_and_running_instances
|
|
41
|
+
number_of_running_instances + number_of_pending_instances
|
|
42
|
+
end
|
|
43
|
+
# == LAUNCHING
|
|
44
|
+
# Request to launch a new instance
|
|
45
|
+
def request_launch_new_instance
|
|
46
|
+
if can_start_a_new_instance?
|
|
47
|
+
request_launch_one_instance_at_a_time
|
|
48
|
+
return true
|
|
49
|
+
else
|
|
50
|
+
return false
|
|
51
|
+
end
|
|
52
|
+
end
|
|
53
|
+
# Can we start a new instance?
|
|
54
|
+
def can_start_a_new_instance?
|
|
55
|
+
maximum_number_of_instances_are_not_running?
|
|
56
|
+
end
|
|
57
|
+
# Are the maximum number of instances running?
|
|
58
|
+
def maximum_number_of_instances_are_not_running?
|
|
59
|
+
list_of_running_instances.size < Application.maximum_instances
|
|
60
|
+
end
|
|
61
|
+
# Request to launch a number of instances
|
|
62
|
+
def request_launch_new_instances(num=1)
|
|
63
|
+
out = []
|
|
64
|
+
num.times {out << request_launch_one_instance_at_a_time}
|
|
65
|
+
out
|
|
66
|
+
end
|
|
67
|
+
# Launch one instance at a time
|
|
68
|
+
def request_launch_one_instance_at_a_time
|
|
69
|
+
reset!
|
|
70
|
+
while !number_of_pending_instances.zero?
|
|
71
|
+
wait "5.seconds"
|
|
72
|
+
reset!
|
|
73
|
+
end
|
|
74
|
+
return launch_new_instance!
|
|
75
|
+
end
|
|
76
|
+
# == SHUTDOWN
|
|
77
|
+
# Terminate all running instances
|
|
78
|
+
def request_termination_of_running_instances
|
|
79
|
+
list_of_running_instances.each {|a| terminate_instance!(a[:instance_id])}
|
|
80
|
+
end
|
|
81
|
+
# Request termination of all instances regardless of their state (includes pending instances)
|
|
82
|
+
def request_termination_of_all_instances
|
|
83
|
+
get_instances_description.each {|a| terminate_instance!(a[:instance_id])}
|
|
84
|
+
end
|
|
85
|
+
# Terminate instance by id
|
|
86
|
+
def request_termination_of_instance(id)
|
|
87
|
+
if can_shutdown_an_instance?
|
|
88
|
+
terminate_instance! id
|
|
89
|
+
return true
|
|
90
|
+
else
|
|
91
|
+
return false
|
|
92
|
+
end
|
|
93
|
+
end
|
|
94
|
+
# Can we shutdown an instance?
|
|
95
|
+
def can_shutdown_an_instance?
|
|
96
|
+
minimum_number_of_instances_are_running?
|
|
97
|
+
end
|
|
98
|
+
# Are the minimum number of instances running?
|
|
99
|
+
def minimum_number_of_instances_are_running?
|
|
100
|
+
list_of_running_instances.size > Application.minimum_instances
|
|
101
|
+
end
|
|
102
|
+
# Get the cached running_instances
|
|
103
|
+
def running_instances
|
|
104
|
+
@running_instances ||= update_instance_values
|
|
105
|
+
end
|
|
106
|
+
# Update the instance values
|
|
107
|
+
def update_instance_values
|
|
108
|
+
@running_instances = list_of_running_instances.collect {|a| RemoteInstance.new(a) }.sort
|
|
109
|
+
end
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
end
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
module PoolParty
|
|
2
|
+
extend self
|
|
3
|
+
# Schedule tasks container
|
|
4
|
+
class ScheduleTasks
|
|
5
|
+
attr_reader :tasks
|
|
6
|
+
include ThreadSafeInstance
|
|
7
|
+
# Initialize tasks array and run
|
|
8
|
+
def initialize
|
|
9
|
+
@tasks = []
|
|
10
|
+
run
|
|
11
|
+
end
|
|
12
|
+
# Synchronize the running threaded tasks
|
|
13
|
+
def run
|
|
14
|
+
unless @tasks.empty?
|
|
15
|
+
self.class.synchronize do
|
|
16
|
+
@tasks.reject!{|a|
|
|
17
|
+
begin
|
|
18
|
+
a.run;a.join
|
|
19
|
+
rescue Exception => e
|
|
20
|
+
puts "There was an error in the task: #{e} #{e.backtrace.join("\n")}"
|
|
21
|
+
end
|
|
22
|
+
true
|
|
23
|
+
}
|
|
24
|
+
end
|
|
25
|
+
end
|
|
26
|
+
end
|
|
27
|
+
# Add a task in a new thread
|
|
28
|
+
def <<(a)
|
|
29
|
+
@tasks.push( Thread.new {a.call} )
|
|
30
|
+
end
|
|
31
|
+
alias_method :push, :<<
|
|
32
|
+
# In the ThreadSafeInstance
|
|
33
|
+
make_safe :<<
|
|
34
|
+
end
|
|
35
|
+
# Scheduler class
|
|
36
|
+
module Scheduler
|
|
37
|
+
attr_reader :tasks
|
|
38
|
+
# Get the tasks or ScheduleTasks
|
|
39
|
+
def tasks
|
|
40
|
+
@tasks ||= ScheduleTasks.new
|
|
41
|
+
end
|
|
42
|
+
# Add a task to the new threaded block
|
|
43
|
+
def add_task(&blk)
|
|
44
|
+
tasks.push proc{blk.call}
|
|
45
|
+
end
|
|
46
|
+
# Grab the polling_time
|
|
47
|
+
def interval
|
|
48
|
+
@interval ||= Application.polling_time
|
|
49
|
+
end
|
|
50
|
+
# Run the threads
|
|
51
|
+
def run_threads
|
|
52
|
+
tasks.run
|
|
53
|
+
end
|
|
54
|
+
# Daemonize the process
|
|
55
|
+
def daemonize
|
|
56
|
+
puts "Daemonizing..."
|
|
57
|
+
|
|
58
|
+
pid = fork do
|
|
59
|
+
Signal.trap('HUP', 'IGNORE') # Don't die upon logout
|
|
60
|
+
File.open("/dev/null", "r+") do |devnull|
|
|
61
|
+
$stdout.reopen(devnull)
|
|
62
|
+
$stderr.reopen(devnull)
|
|
63
|
+
$stdin.reopen(devnull) unless @use_stdin
|
|
64
|
+
end
|
|
65
|
+
yield if block_given?
|
|
66
|
+
end
|
|
67
|
+
Process.detach(pid)
|
|
68
|
+
end
|
|
69
|
+
# Run the loop and wait the amount of time between running the tasks
|
|
70
|
+
# You can send it daemonize => true and it will daemonize
|
|
71
|
+
def run_thread_loop(opts={})
|
|
72
|
+
block = lambda {
|
|
73
|
+
loop do
|
|
74
|
+
begin
|
|
75
|
+
yield if block_given?
|
|
76
|
+
run_threads
|
|
77
|
+
wait interval
|
|
78
|
+
reset!
|
|
79
|
+
rescue Exception => e
|
|
80
|
+
puts "There was an error in the run_thread_loop: #{e}"
|
|
81
|
+
end
|
|
82
|
+
end
|
|
83
|
+
}
|
|
84
|
+
# Run the tasks
|
|
85
|
+
opts[:daemonize] ? daemonize(&block) : block.call
|
|
86
|
+
end
|
|
87
|
+
# Reset
|
|
88
|
+
def reset!
|
|
89
|
+
cached_variables.each {|cached| cached = nil }
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
end
|
|
93
|
+
end
|