eycloud-recipe-resque 1.0.2 → 1.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.DS_Store +0 -0
- data/Gemfile.lock +1 -1
- data/eycloud-recipe-resque.gemspec +1 -1
- data/files/default/resque +257 -0
- data/files/default/resque-web +68 -0
- data/metadata.json +1 -1
- data/metadata.rb +1 -1
- data/recipes/configure.rb +31 -8
- data/recipes/default.rb +1 -0
- data/recipes/install.rb +36 -0
- data/recipes/restart.rb +9 -6
- data/templates/default/log.erb +0 -0
- data/templates/default/monitrc.conf.erb +8 -0
- data/templates/default/resque.yml.erb +3 -0
- data/templates/default/resque_wildcard.conf.erb +1 -0
- metadata +15 -10
- data/templates/default/resque.rb.erb +0 -73
- data/templates/default/resque_scheduler.rb.erb +0 -71
data/.DS_Store
ADDED
Binary file
|
data/Gemfile.lock
CHANGED
@@ -0,0 +1,257 @@
|
|
1
|
+
#!/bin/sh
|
2
|
+
#
|
3
|
+
# This script starts and stops the Resque daemon
|
4
|
+
# This script belongs in /engineyard/bin/resque
|
5
|
+
#
|
6
|
+
|
7
|
+
PATH=/bin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:$PATH
|
8
|
+
CURDIR=`pwd`
|
9
|
+
|
10
|
+
usage() {
|
11
|
+
echo "Usage: $0 <appname> {start|stop|term|quit|pause|cont|abort} <environment> <conf_file>"
|
12
|
+
echo -e "\nstop) is a synonym for quit"
|
13
|
+
echo "quit) issues -QUIT to request the worker to stop"
|
14
|
+
echo "term) issues -TERM to request the worker to stop"
|
15
|
+
echo "pause) issues -USR2 when the current job finishes, worker remains running, but doesn't start any new jobs"
|
16
|
+
echo "cont) issues -CONT to continue after pausing"
|
17
|
+
echo "abort) issues -USR1 kills current job, or shuts down if there is no job"
|
18
|
+
echo "see http://github.com/defunkt/resque for more details"
|
19
|
+
echo -e "\nSet the GRACE_TIME env variable to configure how long we should wait for after issuing quit, before asumming a problem and killing the worker and job"
|
20
|
+
exit 1
|
21
|
+
}
|
22
|
+
|
23
|
+
rm_lockfile(){
|
24
|
+
if [ -e $LOCK_FILE ]; then
|
25
|
+
logger -t "monit-resque[$$]" "removing $LOCK_FILE for `cat $LOCK_FILE`"
|
26
|
+
rm $LOCK_FILE
|
27
|
+
fi
|
28
|
+
}
|
29
|
+
|
30
|
+
exit_cleanly() {
|
31
|
+
cd $CURDIR
|
32
|
+
logger -t "monit-resque[$$]" "exiting wrapper cleanly with $RESULT"
|
33
|
+
exit $RESULT
|
34
|
+
}
|
35
|
+
|
36
|
+
unlock_and_exit_cleanly(){
|
37
|
+
rm_lockfile
|
38
|
+
exit_cleanly
|
39
|
+
}
|
40
|
+
|
41
|
+
set_pid_from_file(){
|
42
|
+
export PID=`cat $PID_FILE`
|
43
|
+
}
|
44
|
+
|
45
|
+
signal_worker() {
|
46
|
+
RESULT=0
|
47
|
+
if [ -f $PID_FILE ]; then
|
48
|
+
set_pid_from_file
|
49
|
+
logger -t "monit-resque[$$]" "Issuing kill with -$SIG $PID"
|
50
|
+
SLEEP_COUNT=0
|
51
|
+
kill -$SIG $PID
|
52
|
+
fi
|
53
|
+
}
|
54
|
+
|
55
|
+
signal_worker_fatally(){
|
56
|
+
signal_worker()
|
57
|
+
if [ -f $PID_FILE ]; then
|
58
|
+
if [ -n "$ALLOW_TIMEOUT" ]; then
|
59
|
+
while [ -e /proc/$PID ]; do
|
60
|
+
sleep .25
|
61
|
+
let "SLEEP_COUNT+=1"
|
62
|
+
let "REPORT_TIME = $SLEEP_COUNT%4"
|
63
|
+
if(( "$SLEEP_COUNT" > $GRACE_TIME )); then
|
64
|
+
logger -t "monit-resque[$$]" "Resque worker with pid $PID for $WORKER_REF still running, issuing -TERM"
|
65
|
+
kill -15 $PID 2>/dev/null; true
|
66
|
+
elif(( $REPORT_TIME == 0 )); then
|
67
|
+
let "RUNTIME = $SLEEP_COUNT/4"
|
68
|
+
logger -t "monit-resque[$$]" "waiting for $PID to die ( for $RUNTIME seconds now)"
|
69
|
+
fi
|
70
|
+
done
|
71
|
+
fi
|
72
|
+
sleep 1
|
73
|
+
if [ -d /proc/$PID ];then
|
74
|
+
for child in $(ps axo pid,ppid | awk "{ if ( \$2 == $PID ) { print \$1 }}");
|
75
|
+
do
|
76
|
+
kill -9 $child 2>/dev/null; true
|
77
|
+
logger -t "monit-resque[$$]" "Murdering Resque workers child with $child for $WORKER_REF"
|
78
|
+
break
|
79
|
+
done
|
80
|
+
while [ -d /proc/$PID ]; do
|
81
|
+
logger -t "monit-resque[$$]" "Murdering Resque worker with $PID for $WORKER_REF"
|
82
|
+
kill -9 $PID
|
83
|
+
sleep 1
|
84
|
+
done
|
85
|
+
fi
|
86
|
+
logger -t "monit-resque[$$]" "Removing pid file for $PID - $WORKER_REF"
|
87
|
+
[ -e "$PID_FILE" -a ! -d /proc/$PID ] && rm -f $PID_FILE
|
88
|
+
fi
|
89
|
+
}
|
90
|
+
|
91
|
+
lock(){
|
92
|
+
RESULT=0
|
93
|
+
if [ -e $LOCK_FILE ]; then
|
94
|
+
LAST_LOCK_PID=`cat $LOCK_FILE`
|
95
|
+
if [ -n $LAST_LOCK_PID -a -z "`ps axo pid|grep $LAST_LOCK_PID`" -a -f $LOCK_FILE ];then
|
96
|
+
sleep 1
|
97
|
+
logger -t "monit-resque[$$]" "Removing stale lock file for $WORKER_REF ($LAST_LOCK_PID)"
|
98
|
+
rm $LOCK_FILE 2>&1
|
99
|
+
else
|
100
|
+
logger -t "monit-resque[$$]" "Monit already messing with $WORKER_REF ($LAST_LOCK_PID)"
|
101
|
+
RESULT=1
|
102
|
+
exit_cleanly
|
103
|
+
fi
|
104
|
+
fi
|
105
|
+
echo $$ > $LOCK_FILE
|
106
|
+
}
|
107
|
+
|
108
|
+
legacy_fix() {
|
109
|
+
#In the transition from 0.18.2 to 0.18.3 of ey monit scripts the way
|
110
|
+
#the pid file is used to find the process to kill has changed.
|
111
|
+
#To avert problems being left behind after an upgrade of this package,
|
112
|
+
if [ -f $PID_FILE ]; then
|
113
|
+
set_pid_from_file
|
114
|
+
if [ -n "`ps axo pid,command|grep $PID|grep 'su -c'`" ];then
|
115
|
+
logger -t "monit-resque[$$]" "Monit Scripts have just been upgraded, killing old style workers"
|
116
|
+
for child in $(ps axo pid,ppid| awk "{ if ( \$2 == $PID ) { print \$1 }}");
|
117
|
+
do
|
118
|
+
kill -TERM $child 2> /dev/null
|
119
|
+
while [ -e /proc/$child ]; do
|
120
|
+
logger -t "monit-resque[$$]" "killing legacy worker: $child"
|
121
|
+
[ -e /proc/$child ] && kill -9 $child 2> /dev/null
|
122
|
+
sleep 1
|
123
|
+
done
|
124
|
+
done
|
125
|
+
[ -e /proc/$PID ] && kill -9 $PID 2> /dev/null
|
126
|
+
rm $PID_FILE
|
127
|
+
unlock_exit_cleanly
|
128
|
+
fi
|
129
|
+
fi
|
130
|
+
}
|
131
|
+
|
132
|
+
if [ $# -lt 4 ]; then usage; fi
|
133
|
+
|
134
|
+
if [ "`whoami`" != "root" ]; then
|
135
|
+
logger -t `basename $0` -s "Must be run as root"
|
136
|
+
exit 1
|
137
|
+
fi
|
138
|
+
|
139
|
+
#Baisc Setup of default values
|
140
|
+
APP=$1 ; ACTION=$2; RACK_ENV=$3; CONF_FILE=$4;
|
141
|
+
|
142
|
+
APP_DIR="/data/${APP}"
|
143
|
+
APP_ROOT="${APP_DIR}/current"
|
144
|
+
APP_SHARED="${APP_DIR}/shared"
|
145
|
+
APP_CONFIG="${APP_SHARED}/config"
|
146
|
+
|
147
|
+
if [ -e "${APP_CONFIG}/${CONF_FILE}" ]; then
|
148
|
+
source "${APP_CONFIG}/${CONF_FILE}"
|
149
|
+
if [ -z "$QUEUE" ]; then
|
150
|
+
logger -t "resque_${APP}" -s "QUEUE not defined in ${CONF_FILE} not found for app: ${APP}"
|
151
|
+
exit 1
|
152
|
+
fi
|
153
|
+
else
|
154
|
+
logger -t "resque_${APP}" -s "/data/${APP}/shared/config/${CONF_FILE} not found for app: ${APP}"
|
155
|
+
exit 1
|
156
|
+
fi
|
157
|
+
|
158
|
+
WORKER_REF=`echo $CONF_FILE | sed s/.conf//`
|
159
|
+
LOG_FILE="$APP_ROOT/log/$WORKER_REF.log"
|
160
|
+
LOCK_FILE="/tmp/$WORKER_REF.monit-lock"
|
161
|
+
PID_FILE="/var/run/engineyard/resque/$APP/$WORKER_REF.pid"
|
162
|
+
GEMFILE="$APP_ROOT/Gemfile"
|
163
|
+
RAKE="rake"
|
164
|
+
if [ -f $GEMFILE ];then
|
165
|
+
RAKE="bundle exec $APP_ROOT/ey_bundler_binstubs/rake"
|
166
|
+
fi
|
167
|
+
|
168
|
+
if [ -d $APP_ROOT ]; then
|
169
|
+
USER=$(stat -L -c"%U" $APP_ROOT)
|
170
|
+
export HOME="/home/$USER"
|
171
|
+
|
172
|
+
# Fix for SD-3786 - stop sending in VERBOSE= and VVERBOSE= by default
|
173
|
+
if declare -p VERBOSE >/dev/null 2>&1; then export V="VERBOSE=$VERBOSE"; fi
|
174
|
+
if declare -p VVERBOSE >/dev/null 2>&1; then export VV="VVERBOSE=$VVERBOSE"; fi
|
175
|
+
|
176
|
+
# Older versions of sudo need us to call env for the env vars to be set correctly
|
177
|
+
COMMAND="/usr/bin/env $V $VV APP_ROOT=${APP_ROOT} RACK_ENV=${RACK_ENV} RAILS_ENV=${RACK_ENV} MERB_ENV=${RACK_ENV} QUEUE=${QUEUE} $RAKE -f ${APP_ROOT}/Rakefile resque:work"
|
178
|
+
|
179
|
+
if [ ! -d /var/run/engineyard/resque/$APP ]; then
|
180
|
+
mkdir -p /var/run/engineyard/resque/$APP
|
181
|
+
fi
|
182
|
+
|
183
|
+
# handle the second param, don't start if already existing
|
184
|
+
|
185
|
+
logger -t "monit-resque[$$]" "${ACTION}ing Resque worker $WORKER_REF"
|
186
|
+
case "$ACTION" in
|
187
|
+
start)
|
188
|
+
lock
|
189
|
+
cd $APP_ROOT
|
190
|
+
if [ -f $PID_FILE ]; then
|
191
|
+
set_pid_from_file
|
192
|
+
if [ -d /proc/$PID ]; then
|
193
|
+
logger -t "monit-resque[$$]" "Resque worker $WORKER_REF is already running with $PID."
|
194
|
+
RESULT=1
|
195
|
+
else
|
196
|
+
rm -f $PID_FILE
|
197
|
+
logger -t "monit-resque[$$]" "Removing stale pid file ($PID_FILE) for pid $PID"
|
198
|
+
fi
|
199
|
+
fi
|
200
|
+
if [ ! -f $PID_FILE ]; then
|
201
|
+
sudo -u $USER -H $COMMAND >> $LOG_FILE 2>&1 &
|
202
|
+
RESULT=$?
|
203
|
+
logger -t "monit-resque[$$]" "Started with pid $! and exit $RESULT"
|
204
|
+
echo $! > $PID_FILE
|
205
|
+
sleep .1
|
206
|
+
fi
|
207
|
+
unlock_and_exit_cleanly
|
208
|
+
;;
|
209
|
+
stop|quit)
|
210
|
+
legacy_fix
|
211
|
+
lock
|
212
|
+
SIG="QUIT"
|
213
|
+
[ -z "$GRACE_TIME" ] && GRACE_TIME=60
|
214
|
+
ALLOW_TIMEOUT=1
|
215
|
+
signal_worker
|
216
|
+
[ -e "$LOCK_FILE" ] && rm $LOCK_FILE
|
217
|
+
unlock_and_exit_cleanly
|
218
|
+
;;
|
219
|
+
kill|term)
|
220
|
+
lock
|
221
|
+
legacy_fix
|
222
|
+
ALLOW_TIMEOUT=
|
223
|
+
SIG="TERM"
|
224
|
+
signal_worker_fatally
|
225
|
+
unlock_and_exit_cleanly
|
226
|
+
;;
|
227
|
+
pause)
|
228
|
+
lock
|
229
|
+
ALLOW_TIMEOUT=
|
230
|
+
SIG="USR2"
|
231
|
+
signal_worker
|
232
|
+
unlock_and_exit_cleanly
|
233
|
+
;;
|
234
|
+
cont)
|
235
|
+
lock
|
236
|
+
ALLOW_TIMEOUT=
|
237
|
+
SIG="CONT"
|
238
|
+
signal_worker
|
239
|
+
unlock_and_exit_cleanly
|
240
|
+
;;
|
241
|
+
abort)
|
242
|
+
lock
|
243
|
+
ALLOW_TIMEOUT=
|
244
|
+
SIG="USR1"
|
245
|
+
signal_worker
|
246
|
+
unlock_and_exit_cleanly
|
247
|
+
;;
|
248
|
+
*)
|
249
|
+
usage
|
250
|
+
exit_cleanly
|
251
|
+
;;
|
252
|
+
esac
|
253
|
+
else
|
254
|
+
echo "/data/$APP/current doesn't exist."
|
255
|
+
usage
|
256
|
+
fi
|
257
|
+
|
@@ -0,0 +1,68 @@
|
|
1
|
+
#!/bin/sh
|
2
|
+
#
|
3
|
+
# This script starts and stops the Resque Web front end
|
4
|
+
# This script belongs in /engineyard/bin/resque-web
|
5
|
+
#
|
6
|
+
PATH=/bin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:$PATH
|
7
|
+
|
8
|
+
usage() {
|
9
|
+
echo "Usage: $0 <appname> {start|stop}"
|
10
|
+
exit 1
|
11
|
+
}
|
12
|
+
|
13
|
+
if [ $# -lt 2 ]; then usage; fi
|
14
|
+
|
15
|
+
if [ "`whoami`" != "root" ]; then
|
16
|
+
logger -t `basename $0` -s "Must be run as root"
|
17
|
+
exit 1
|
18
|
+
fi
|
19
|
+
|
20
|
+
USER=`stat -c"%U" /data/$1/current`
|
21
|
+
HOME="/home/$USER" ; export HOME
|
22
|
+
APP=$1
|
23
|
+
APP_DIR="/data/${APP}"
|
24
|
+
APP_ROOT="${APP_DIR}/current"
|
25
|
+
APP_SHARED="${APP_DIR}/shared"
|
26
|
+
APP_CONFIG="${APP_SHARED}/config"
|
27
|
+
|
28
|
+
GEMFILE="$APP_ROOT/Gemfile"
|
29
|
+
BUNDLER=""
|
30
|
+
if [ -f $GEMFILE ];then
|
31
|
+
BUNDLER="bundle exec"
|
32
|
+
fi
|
33
|
+
|
34
|
+
COMMAND="$BUNDLER resque-web -p 8282"
|
35
|
+
PID_FILE="/var/run/engineyard/resque/$1/resque-web.pid"
|
36
|
+
|
37
|
+
#mkdir -p /var/run/engineyard/resque/
|
38
|
+
|
39
|
+
# handle the second param, don't start if already existing
|
40
|
+
case "$2" in
|
41
|
+
start)
|
42
|
+
echo "Starting Resque Web"
|
43
|
+
cd /data/$1/current
|
44
|
+
if [ -f $PID_FILE ]; then
|
45
|
+
PID=`cat $PID_FILE`
|
46
|
+
if [ -d /proc/$PID ]; then
|
47
|
+
echo "Resque Web is already running."
|
48
|
+
exit 1
|
49
|
+
fi
|
50
|
+
rm -f $PID_FILE
|
51
|
+
fi
|
52
|
+
echo $$ > $PID_FILE
|
53
|
+
logger -t "monit-resque-web[$$]" "Starting with: ${COMMAND}"
|
54
|
+
exec su -c"$COMMAND -f" $USER
|
55
|
+
;;
|
56
|
+
stop)
|
57
|
+
echo "Stopping Resque Web"
|
58
|
+
if [ -f $PID_FILE ]; then
|
59
|
+
kill -15 `cat $PID_FILE` 2>/dev/null; true
|
60
|
+
fi
|
61
|
+
|
62
|
+
[ -e "$PID_FILE" ] && rm -f $PID_FILE
|
63
|
+
exit 0
|
64
|
+
;;
|
65
|
+
*)
|
66
|
+
usage
|
67
|
+
;;
|
68
|
+
esac
|
data/metadata.json
CHANGED
data/metadata.rb
CHANGED
data/recipes/configure.rb
CHANGED
@@ -7,13 +7,6 @@ if node[:instance_role] == "solo" || node[:instance_role] == "eylocal" ||
|
|
7
7
|
(node[:instance_role] == "util" && node[:name] =~ /^(resque)/)
|
8
8
|
resque_workers_count = get_resque_worker_count()
|
9
9
|
|
10
|
-
remote_file "/usr/local/bin/resque_kill_stale" do
|
11
|
-
owner 'root'
|
12
|
-
group 'root'
|
13
|
-
mode 0755
|
14
|
-
source "resque_kill_stale"
|
15
|
-
end
|
16
|
-
|
17
10
|
directory "/tmp/resque_ttls" do
|
18
11
|
owner "root"
|
19
12
|
group "root"
|
@@ -31,17 +24,47 @@ if node[:instance_role] == "solo" || node[:instance_role] == "eylocal" ||
|
|
31
24
|
group node[:owner_name]
|
32
25
|
mode 0755
|
33
26
|
end
|
27
|
+
|
34
28
|
|
35
29
|
node[:applications].each do |app_name, data|
|
30
|
+
|
31
|
+
template "/data/#{app_name}/shared/log/#{node[:environment][:framework_env]}.log" do
|
32
|
+
source "log.erb"
|
33
|
+
owner node[:owner_name]
|
34
|
+
group node[:owner_name]
|
35
|
+
mode 0666
|
36
|
+
end
|
37
|
+
|
36
38
|
# app-server specific recipes usually take care of this
|
37
39
|
link "/data/#{app_name}/shared/log" do
|
38
40
|
to "/var/log/resque"
|
39
41
|
end
|
42
|
+
|
43
|
+
template "/etc/monit.d/resque_#{app_name}.monitrc" do
|
44
|
+
owner 'root'
|
45
|
+
group 'root'
|
46
|
+
mode 0644
|
47
|
+
source "monitrc.conf.erb"
|
48
|
+
variables({
|
49
|
+
:num_workers => resque_workers_count,
|
50
|
+
:app_name => app_name,
|
51
|
+
:rails_env => node[:environment][:framework_env]
|
52
|
+
})
|
53
|
+
end
|
54
|
+
|
55
|
+
resque_workers_count.times do |count|
|
56
|
+
template "/data/#{app_name}/shared/config/resque_#{count}.conf" do
|
57
|
+
owner node[:owner_name]
|
58
|
+
group node[:owner_name]
|
59
|
+
mode 0644
|
60
|
+
source "resque_wildcard.conf.erb"
|
61
|
+
end
|
62
|
+
end
|
40
63
|
end
|
41
64
|
end
|
42
65
|
|
43
66
|
if %w[solo app app_master util].include? node[:instance_role]
|
44
|
-
redis_instance = node[:engineyard][:environment][:
|
67
|
+
redis_instance = node[:engineyard][:environment][:instances].find {|x| x[:name] == "redis"} ||
|
45
68
|
node[:engineyard][:environment][:db_master] ||
|
46
69
|
node[:engineyard][:environment][:instances].find { |x| x[:role] == "solo" }
|
47
70
|
|
data/recipes/default.rb
CHANGED
data/recipes/install.rb
ADDED
@@ -0,0 +1,36 @@
|
|
1
|
+
#
|
2
|
+
# Cookbook Name:: resque
|
3
|
+
# Recipe:: install
|
4
|
+
#
|
5
|
+
|
6
|
+
if node[:instance_role] == "solo" || node[:instance_role] == "eylocal" ||
|
7
|
+
(node[:instance_role] == "util" && node[:name] =~ /^(resque)/)
|
8
|
+
|
9
|
+
file "/usr/local/bin/resque_kill_stale" do
|
10
|
+
owner 'root'
|
11
|
+
group 'root'
|
12
|
+
mode 0755
|
13
|
+
if Chef::VERSION == '0.6.0.2'
|
14
|
+
source "resque_kill_stale"
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
file "/engineyard/bin/resque" do
|
19
|
+
owner 'root'
|
20
|
+
group 'root'
|
21
|
+
mode 0755
|
22
|
+
if Chef::VERSION == '0.6.0.2'
|
23
|
+
source "resque"
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
file "/engineyard/bin/resque-web" do
|
28
|
+
owner 'root'
|
29
|
+
group 'root'
|
30
|
+
mode 0755
|
31
|
+
if Chef::VERSION == '0.6.0.2'
|
32
|
+
source "resque-web"
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
end
|
data/recipes/restart.rb
CHANGED
@@ -3,12 +3,15 @@
|
|
3
3
|
# Recipe:: default
|
4
4
|
#
|
5
5
|
|
6
|
-
node[:
|
7
|
-
|
8
|
-
|
9
|
-
|
6
|
+
if node[:instance_role] == "solo" || node[:instance_role] == "eylocal" ||
|
7
|
+
(node[:instance_role] == "util" && node[:name] =~ /^(resque)/)
|
8
|
+
node[:applications].each do |app_name, data|
|
9
|
+
execute "ensure-resque-is-setup-with-monit" do
|
10
|
+
command %Q{monit reload}
|
11
|
+
end
|
10
12
|
|
11
|
-
|
12
|
-
|
13
|
+
execute "restart-resque" do
|
14
|
+
command %Q{echo "sleep 20 && monit -g #{app_name}_resque restart all" | at now }
|
15
|
+
end
|
13
16
|
end
|
14
17
|
end
|
File without changes
|
@@ -0,0 +1,8 @@
|
|
1
|
+
<% (@num_workers || 1).times do |num| %>
|
2
|
+
check process resque_<%= @app_name %>_<%= num %>
|
3
|
+
with pidfile /var/run/engineyard/resque/<%= @app_name %>/resque_<%= num %>.pid
|
4
|
+
start program = "/engineyard/bin/resque <%= @app_name %> start <%= @rails_env %> resque_<%= num %>.conf" with timeout 90 seconds
|
5
|
+
stop program = "/engineyard/bin/resque <%= @app_name %> stop <%= @rails_env %> resque_<%= num %>.conf" with timeout 90 seconds
|
6
|
+
if totalmem is greater than 300 MB for 2 cycles then restart # eating up memory?
|
7
|
+
group <%= @app_name %>_resque
|
8
|
+
<% end %>
|
@@ -0,0 +1 @@
|
|
1
|
+
QUEUE=*
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: eycloud-recipe-resque
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.0.
|
4
|
+
version: 1.0.3
|
5
5
|
prerelease:
|
6
6
|
platform: ruby
|
7
7
|
authors:
|
@@ -9,11 +9,11 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2012-03-
|
12
|
+
date: 2012-03-17 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: eycloud-helper-cronjobs
|
16
|
-
requirement: &
|
16
|
+
requirement: &70200712055240 !ruby/object:Gem::Requirement
|
17
17
|
none: false
|
18
18
|
requirements:
|
19
19
|
- - ! '>='
|
@@ -21,10 +21,10 @@ dependencies:
|
|
21
21
|
version: '0'
|
22
22
|
type: :runtime
|
23
23
|
prerelease: false
|
24
|
-
version_requirements: *
|
24
|
+
version_requirements: *70200712055240
|
25
25
|
- !ruby/object:Gem::Dependency
|
26
26
|
name: rake
|
27
|
-
requirement: &
|
27
|
+
requirement: &70200712098120 !ruby/object:Gem::Requirement
|
28
28
|
none: false
|
29
29
|
requirements:
|
30
30
|
- - ! '>='
|
@@ -32,7 +32,7 @@ dependencies:
|
|
32
32
|
version: '0'
|
33
33
|
type: :development
|
34
34
|
prerelease: false
|
35
|
-
version_requirements: *
|
35
|
+
version_requirements: *70200712098120
|
36
36
|
description: Resque for EY Cloud
|
37
37
|
email:
|
38
38
|
- drnicwilliams@gmail.com
|
@@ -40,6 +40,7 @@ executables: []
|
|
40
40
|
extensions: []
|
41
41
|
extra_rdoc_files: []
|
42
42
|
files:
|
43
|
+
- .DS_Store
|
43
44
|
- .gitignore
|
44
45
|
- Gemfile
|
45
46
|
- Gemfile.lock
|
@@ -47,16 +48,20 @@ files:
|
|
47
48
|
- Rakefile
|
48
49
|
- attributes/default.rb
|
49
50
|
- eycloud-recipe-resque.gemspec
|
51
|
+
- files/default/resque
|
52
|
+
- files/default/resque-web
|
50
53
|
- files/default/resque_kill_stale
|
51
54
|
- libraries/get_resque_worker_count.rb
|
52
55
|
- metadata.json
|
53
56
|
- metadata.rb
|
54
57
|
- recipes/configure.rb
|
55
58
|
- recipes/default.rb
|
59
|
+
- recipes/install.rb
|
56
60
|
- recipes/restart.rb
|
57
|
-
- templates/default/
|
61
|
+
- templates/default/log.erb
|
62
|
+
- templates/default/monitrc.conf.erb
|
58
63
|
- templates/default/resque.yml.erb
|
59
|
-
- templates/default/
|
64
|
+
- templates/default/resque_wildcard.conf.erb
|
60
65
|
homepage: ''
|
61
66
|
licenses: []
|
62
67
|
post_install_message:
|
@@ -71,7 +76,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
71
76
|
version: '0'
|
72
77
|
segments:
|
73
78
|
- 0
|
74
|
-
hash:
|
79
|
+
hash: -116094860101360678
|
75
80
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
76
81
|
none: false
|
77
82
|
requirements:
|
@@ -80,7 +85,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
80
85
|
version: '0'
|
81
86
|
segments:
|
82
87
|
- 0
|
83
|
-
hash:
|
88
|
+
hash: -116094860101360678
|
84
89
|
requirements: []
|
85
90
|
rubyforge_project:
|
86
91
|
rubygems_version: 1.8.17
|
@@ -1,73 +0,0 @@
|
|
1
|
-
rack_env = "<%= @node[:environment][:framework_env] %>"
|
2
|
-
app_root = "/data/awsm/current"
|
3
|
-
owner = "<%= @node[:owner_name] %>"
|
4
|
-
home = "/home/#{owner}"
|
5
|
-
instance_id = "<%= @node.engineyard.id %>"
|
6
|
-
|
7
|
-
<%= @resque_workers_count %>.times do |num|
|
8
|
-
inline = "#{home}/.ruby_inline/resque-#{num}"
|
9
|
-
|
10
|
-
God.watch do |w|
|
11
|
-
w.name = "resque-#{num}"
|
12
|
-
w.group = 'resque'
|
13
|
-
w.uid = owner
|
14
|
-
w.gid = owner
|
15
|
-
w.interval = 30.seconds
|
16
|
-
w.log = "#{app_root}/log/worker.#{num}.log"
|
17
|
-
w.dir = app_root
|
18
|
-
w.env = {
|
19
|
-
"VERBOSE" => "true",
|
20
|
-
"INSTANCE_ID" => instance_id,
|
21
|
-
"GOD_WATCH" => w.name,
|
22
|
-
"RACK_ENV" => rack_env,
|
23
|
-
"LANG" => "en_US.utf-8",
|
24
|
-
"MERB_ENV" => rack_env,
|
25
|
-
"HOME" => home,
|
26
|
-
"INLINEDIR" => inline
|
27
|
-
}
|
28
|
-
|
29
|
-
w.start = "bundle exec rake --trace resque:work"
|
30
|
-
|
31
|
-
w.behavior(:clean_pid_file)
|
32
|
-
|
33
|
-
w.start_grace = 2.minutes
|
34
|
-
w.restart_grace = 2.minutes
|
35
|
-
|
36
|
-
# retart if memory gets too high
|
37
|
-
w.transition(:up, :restart) do |on|
|
38
|
-
on.condition(:memory_usage) do |c|
|
39
|
-
c.above = 350.megabytes
|
40
|
-
c.times = 2
|
41
|
-
end
|
42
|
-
end
|
43
|
-
|
44
|
-
# determine the state on startup
|
45
|
-
w.transition(:init, { true => :up, false => :start }) do |on|
|
46
|
-
on.condition(:process_running) do |c|
|
47
|
-
c.running = true
|
48
|
-
end
|
49
|
-
end
|
50
|
-
|
51
|
-
# determine when process has finished starting
|
52
|
-
w.transition([:start, :restart], :up) do |on|
|
53
|
-
on.condition(:process_running) do |c|
|
54
|
-
c.running = true
|
55
|
-
c.interval = 5.seconds
|
56
|
-
end
|
57
|
-
|
58
|
-
# failsafe
|
59
|
-
on.condition(:tries) do |c|
|
60
|
-
c.times = 5
|
61
|
-
c.transition = :start
|
62
|
-
c.interval = 5.seconds
|
63
|
-
end
|
64
|
-
end
|
65
|
-
|
66
|
-
# start if process is not running
|
67
|
-
w.transition(:up, :start) do |on|
|
68
|
-
on.condition(:process_running) do |c|
|
69
|
-
c.running = false
|
70
|
-
end
|
71
|
-
end
|
72
|
-
end
|
73
|
-
end
|
@@ -1,71 +0,0 @@
|
|
1
|
-
rack_env = "<%= @node[:environment][:framework_env] %>"
|
2
|
-
app_root = "/data/awsm/current"
|
3
|
-
owner = "<%= @node[:owner_name] %>"
|
4
|
-
home = "/home/#{owner}"
|
5
|
-
instance_id = "<%= @node.engineyard.id %>"
|
6
|
-
|
7
|
-
inline = "#{home}/.ruby_inline/resque-scheduler"
|
8
|
-
|
9
|
-
God.watch do |w|
|
10
|
-
w.name = "resque-scheduler"
|
11
|
-
w.group = 'resque'
|
12
|
-
w.uid = owner
|
13
|
-
w.gid = owner
|
14
|
-
w.interval = 30.seconds
|
15
|
-
w.log = "#{app_root}/log/resque_scheduler.log"
|
16
|
-
w.dir = app_root
|
17
|
-
w.env = {
|
18
|
-
"VERBOSE" => "true",
|
19
|
-
"INSTANCE_ID" => instance_id,
|
20
|
-
"GOD_WATCH" => w.name,
|
21
|
-
"RACK_ENV" => rack_env,
|
22
|
-
"MERB_ENV" => rack_env,
|
23
|
-
"LANG" => "en_US.utf-8",
|
24
|
-
"HOME" => home,
|
25
|
-
"INLINEDIR" => inline
|
26
|
-
}
|
27
|
-
|
28
|
-
w.start = "bundle exec rake --trace resque:scheduler"
|
29
|
-
|
30
|
-
w.behavior(:clean_pid_file)
|
31
|
-
|
32
|
-
w.start_grace = 2.minutes
|
33
|
-
w.restart_grace = 2.minutes
|
34
|
-
|
35
|
-
# retart if memory gets too high
|
36
|
-
w.transition(:up, :restart) do |on|
|
37
|
-
on.condition(:memory_usage) do |c|
|
38
|
-
c.above = 350.megabytes
|
39
|
-
c.times = 2
|
40
|
-
end
|
41
|
-
end
|
42
|
-
|
43
|
-
# determine the state on startup
|
44
|
-
w.transition(:init, { true => :up, false => :start }) do |on|
|
45
|
-
on.condition(:process_running) do |c|
|
46
|
-
c.running = true
|
47
|
-
end
|
48
|
-
end
|
49
|
-
|
50
|
-
# determine when process has finished starting
|
51
|
-
w.transition([:start, :restart], :up) do |on|
|
52
|
-
on.condition(:process_running) do |c|
|
53
|
-
c.running = true
|
54
|
-
c.interval = 5.seconds
|
55
|
-
end
|
56
|
-
|
57
|
-
# failsafe
|
58
|
-
on.condition(:tries) do |c|
|
59
|
-
c.times = 5
|
60
|
-
c.transition = :start
|
61
|
-
c.interval = 5.seconds
|
62
|
-
end
|
63
|
-
end
|
64
|
-
|
65
|
-
# start if process is not running
|
66
|
-
w.transition(:up, :start) do |on|
|
67
|
-
on.condition(:process_running) do |c|
|
68
|
-
c.running = false
|
69
|
-
end
|
70
|
-
end
|
71
|
-
end
|