resque-scheduler 4.4.0 → 4.10.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/dependabot.yml +12 -0
- data/.github/funding.yml +4 -0
- data/.github/workflows/codeql-analysis.yml +59 -0
- data/.github/workflows/rubocop.yml +27 -0
- data/.github/workflows/ruby.yml +81 -0
- data/AUTHORS.md +8 -0
- data/CHANGELOG.md +65 -2
- data/Gemfile +20 -3
- data/README.md +88 -17
- data/Rakefile +2 -5
- data/lib/resque/scheduler/configuration.rb +38 -9
- data/lib/resque/scheduler/delaying_extensions.rb +75 -34
- data/lib/resque/scheduler/env.rb +1 -5
- data/lib/resque/scheduler/lock/base.rb +1 -1
- data/lib/resque/scheduler/locking.rb +1 -1
- data/lib/resque/scheduler/logger_builder.rb +17 -6
- data/lib/resque/scheduler/scheduling_extensions.rb +6 -5
- data/lib/resque/scheduler/server/views/delayed.erb +12 -11
- data/lib/resque/scheduler/server/views/delayed_schedules.erb +1 -1
- data/lib/resque/scheduler/server/views/delayed_timestamp.erb +1 -1
- data/lib/resque/scheduler/server/views/scheduler.erb +2 -2
- data/lib/resque/scheduler/server/views/search.erb +2 -5
- data/lib/resque/scheduler/server/views/search_form.erb +1 -5
- data/lib/resque/scheduler/server.rb +1 -1
- data/lib/resque/scheduler/signal_handling.rb +2 -2
- data/lib/resque/scheduler/util.rb +1 -1
- data/lib/resque/scheduler/version.rb +1 -1
- data/lib/resque/scheduler.rb +71 -7
- data/resque-scheduler.gemspec +15 -7
- metadata +25 -23
@@ -24,7 +24,7 @@ module Resque
|
|
24
24
|
def enqueue_at_with_queue(queue, timestamp, klass, *args)
|
25
25
|
return false unless plugin.run_before_schedule_hooks(klass, *args)
|
26
26
|
|
27
|
-
if Resque.inline? || timestamp.to_i
|
27
|
+
if Resque.inline? || timestamp.to_i <= Time.now.to_i
|
28
28
|
# Just create the job and let resque perform it right away with
|
29
29
|
# inline. If the class is a custom job class, call self#scheduled
|
30
30
|
# on it. This allows you to do things like
|
@@ -33,7 +33,7 @@ module Resque
|
|
33
33
|
if klass.respond_to?(:scheduled)
|
34
34
|
klass.scheduled(queue, klass.to_s, *args)
|
35
35
|
else
|
36
|
-
Resque
|
36
|
+
Resque.enqueue_to(queue, klass, *args)
|
37
37
|
end
|
38
38
|
else
|
39
39
|
delayed_push(timestamp, job_to_hash_with_queue(queue, klass, args))
|
@@ -63,16 +63,34 @@ module Resque
|
|
63
63
|
klass, *args)
|
64
64
|
end
|
65
65
|
|
66
|
+
# Update the delayed timestamp of any matching delayed jobs or enqueue a
|
67
|
+
# new job if no matching jobs are found. Returns the number of delayed or
|
68
|
+
# enqueued jobs.
|
69
|
+
def delay_or_enqueue_at(timestamp, klass, *args)
|
70
|
+
count = remove_delayed(klass, *args)
|
71
|
+
count = 1 if count == 0
|
72
|
+
|
73
|
+
count.times do
|
74
|
+
enqueue_at(timestamp, klass, *args)
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
# Identical to +delay_or_enqueue_at+, except it takes
|
79
|
+
# number_of_seconds_from_now instead of a timestamp
|
80
|
+
def delay_or_enqueue_in(number_of_seconds_from_now, klass, *args)
|
81
|
+
delay_or_enqueue_at(Time.now + number_of_seconds_from_now, klass, *args)
|
82
|
+
end
|
83
|
+
|
66
84
|
# Used internally to stuff the item into the schedule sorted list.
|
67
|
-
# +timestamp+ can be either in seconds or a datetime object
|
68
|
-
#
|
69
|
-
# that time, else false
|
85
|
+
# +timestamp+ can be either in seconds or a datetime object. The
|
86
|
+
# insertion time complexity is O(log(n)). Returns true if it's
|
87
|
+
# the first job to be scheduled at that time, else false.
|
70
88
|
def delayed_push(timestamp, item)
|
71
89
|
# First add this item to the list for this timestamp
|
72
90
|
redis.rpush("delayed:#{timestamp.to_i}", encode(item))
|
73
91
|
|
74
92
|
# Store the timestamps at with this item occurs
|
75
|
-
redis.sadd("timestamps:#{encode(item)}", "delayed:#{timestamp.to_i}")
|
93
|
+
redis.sadd("timestamps:#{encode(item)}", ["delayed:#{timestamp.to_i}"])
|
76
94
|
|
77
95
|
# Now, add this timestamp to the zsets. The score and the value are
|
78
96
|
# the same since we'll be querying by timestamp, and we don't have
|
@@ -88,6 +106,7 @@ module Resque
|
|
88
106
|
end
|
89
107
|
|
90
108
|
# Returns the size of the delayed queue schedule
|
109
|
+
# this does not represent the number of items in the queue to be scheduled
|
91
110
|
def delayed_queue_schedule_size
|
92
111
|
redis.zcard :delayed_queue_schedule
|
93
112
|
end
|
@@ -121,7 +140,7 @@ module Resque
|
|
121
140
|
key = "delayed:#{timestamp.to_i}"
|
122
141
|
|
123
142
|
encoded_item = redis.lpop(key)
|
124
|
-
redis.srem("timestamps:#{encoded_item}", key)
|
143
|
+
redis.srem("timestamps:#{encoded_item}", [key])
|
125
144
|
item = decode(encoded_item)
|
126
145
|
|
127
146
|
# If the list is empty, remove it.
|
@@ -134,10 +153,7 @@ module Resque
|
|
134
153
|
Array(redis.zrange(:delayed_queue_schedule, 0, -1)).each do |item|
|
135
154
|
key = "delayed:#{item}"
|
136
155
|
items = redis.lrange(key, 0, -1)
|
137
|
-
redis.
|
138
|
-
items.each { |ts_item| redis.del("timestamps:#{ts_item}") }
|
139
|
-
end
|
140
|
-
redis.del key
|
156
|
+
redis.del(key, items.map { |ts_item| "timestamps:#{ts_item}" })
|
141
157
|
end
|
142
158
|
|
143
159
|
redis.del :delayed_queue_schedule
|
@@ -149,6 +165,11 @@ module Resque
|
|
149
165
|
remove_delayed_job(search)
|
150
166
|
end
|
151
167
|
|
168
|
+
def remove_delayed_in_queue(klass, queue, *args)
|
169
|
+
search = encode(job_to_hash_with_queue(queue, klass, args))
|
170
|
+
remove_delayed_job(search)
|
171
|
+
end
|
172
|
+
|
152
173
|
# Given an encoded item, enqueue it now
|
153
174
|
def enqueue_delayed(klass, *args)
|
154
175
|
hash = job_to_hash(klass, args)
|
@@ -157,6 +178,13 @@ module Resque
|
|
157
178
|
end
|
158
179
|
end
|
159
180
|
|
181
|
+
def enqueue_delayed_with_queue(klass, queue, *args)
|
182
|
+
hash = job_to_hash_with_queue(queue, klass, args)
|
183
|
+
remove_delayed_in_queue(klass, queue, *args).times do
|
184
|
+
Resque::Scheduler.enqueue_from_config(hash)
|
185
|
+
end
|
186
|
+
end
|
187
|
+
|
160
188
|
# Given a block, remove jobs that return true from a block
|
161
189
|
#
|
162
190
|
# This allows for removal of delayed jobs that have arguments matching
|
@@ -181,7 +209,15 @@ module Resque
|
|
181
209
|
found_jobs.reduce(0) do |sum, encoded_job|
|
182
210
|
decoded_job = decode(encoded_job)
|
183
211
|
klass = Util.constantize(decoded_job['class'])
|
184
|
-
|
212
|
+
queue = decoded_job['queue']
|
213
|
+
|
214
|
+
if queue
|
215
|
+
jobs_queued = enqueue_delayed_with_queue(klass, queue, *decoded_job['args'])
|
216
|
+
else
|
217
|
+
jobs_queued = enqueue_delayed(klass, *decoded_job['args'])
|
218
|
+
end
|
219
|
+
|
220
|
+
jobs_queued + sum
|
185
221
|
end
|
186
222
|
end
|
187
223
|
|
@@ -196,9 +232,9 @@ module Resque
|
|
196
232
|
|
197
233
|
# Beyond 100 there's almost no improvement in speed
|
198
234
|
found = timestamps.each_slice(100).map do |ts_group|
|
199
|
-
jobs = redis.pipelined do |
|
235
|
+
jobs = redis.pipelined do |pipeline|
|
200
236
|
ts_group.each do |ts|
|
201
|
-
|
237
|
+
pipeline.lrange("delayed:#{ts}", 0, -1)
|
202
238
|
end
|
203
239
|
end
|
204
240
|
|
@@ -221,7 +257,7 @@ module Resque
|
|
221
257
|
key = "delayed:#{timestamp.to_i}"
|
222
258
|
encoded_job = encode(job_to_hash(klass, args))
|
223
259
|
|
224
|
-
redis.srem("timestamps:#{encoded_job}", key)
|
260
|
+
redis.srem("timestamps:#{encoded_job}", [key])
|
225
261
|
count = redis.lrem(key, 0, encoded_job)
|
226
262
|
clean_up_timestamp(key, timestamp)
|
227
263
|
|
@@ -261,6 +297,22 @@ module Resque
|
|
261
297
|
redis.hget('delayed:last_enqueued_at', job_name)
|
262
298
|
end
|
263
299
|
|
300
|
+
def clean_up_timestamp(key, timestamp)
|
301
|
+
# Use a watch here to ensure nobody adds jobs to this delayed
|
302
|
+
# queue while we're removing it.
|
303
|
+
redis.watch(key) do
|
304
|
+
if redis.llen(key).to_i == 0
|
305
|
+
# If the list is empty, remove it.
|
306
|
+
redis.multi do |transaction|
|
307
|
+
transaction.del(key)
|
308
|
+
transaction.zrem(:delayed_queue_schedule, timestamp.to_i)
|
309
|
+
end
|
310
|
+
else
|
311
|
+
redis.redis.unwatch
|
312
|
+
end
|
313
|
+
end
|
314
|
+
end
|
315
|
+
|
264
316
|
private
|
265
317
|
|
266
318
|
def job_to_hash(klass, args)
|
@@ -271,38 +323,27 @@ module Resque
|
|
271
323
|
{ class: klass.to_s, args: args, queue: queue }
|
272
324
|
end
|
273
325
|
|
326
|
+
# Removes a job from the queue, but not modify the timestamp schedule. This method
|
327
|
+
# will not effect the output of `delayed_queue_schedule_size`
|
274
328
|
def remove_delayed_job(encoded_job)
|
275
329
|
return 0 if Resque.inline?
|
276
330
|
|
277
331
|
timestamps = redis.smembers("timestamps:#{encoded_job}")
|
278
332
|
|
279
|
-
replies = redis.pipelined do
|
333
|
+
replies = redis.pipelined do |pipeline|
|
280
334
|
timestamps.each do |key|
|
281
|
-
|
282
|
-
|
335
|
+
pipeline.lrem(key, 0, encoded_job)
|
336
|
+
pipeline.srem("timestamps:#{encoded_job}", [key])
|
283
337
|
end
|
284
338
|
end
|
285
339
|
|
340
|
+
# timestamp key is not removed from the schedule, this is done later
|
341
|
+
# by the scheduler loop
|
342
|
+
|
286
343
|
return 0 if replies.nil? || replies.empty?
|
287
344
|
replies.each_slice(2).map(&:first).inject(:+)
|
288
345
|
end
|
289
346
|
|
290
|
-
def clean_up_timestamp(key, timestamp)
|
291
|
-
# Use a watch here to ensure nobody adds jobs to this delayed
|
292
|
-
# queue while we're removing it.
|
293
|
-
redis.watch(key) do
|
294
|
-
if redis.llen(key).to_i == 0
|
295
|
-
# If the list is empty, remove it.
|
296
|
-
redis.multi do
|
297
|
-
redis.del(key)
|
298
|
-
redis.zrem(:delayed_queue_schedule, timestamp.to_i)
|
299
|
-
end
|
300
|
-
else
|
301
|
-
redis.redis.unwatch
|
302
|
-
end
|
303
|
-
end
|
304
|
-
end
|
305
|
-
|
306
347
|
def search_first_delayed_timestamp_in_range(start_at, stop_at)
|
307
348
|
start_at = start_at.nil? ? '-inf' : start_at.to_i
|
308
349
|
stop_at = stop_at.nil? ? '+inf' : stop_at.to_i
|
data/lib/resque/scheduler/env.rb
CHANGED
@@ -38,12 +38,8 @@ module Resque
|
|
38
38
|
true
|
39
39
|
end
|
40
40
|
|
41
|
-
unless Process.respond_to?('daemon')
|
42
|
-
abort 'background option is set, which requires ruby >= 1.9'
|
43
|
-
end
|
44
|
-
|
45
41
|
Process.daemon(true, !Resque::Scheduler.quiet)
|
46
|
-
Resque.redis.
|
42
|
+
Resque.redis.reconnect
|
47
43
|
end
|
48
44
|
|
49
45
|
def setup_pid_file
|
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
# ### Locking the scheduler process
|
4
4
|
#
|
5
|
-
# There are two places in resque-scheduler that need to be
|
5
|
+
# There are two places in resque-scheduler that need to be synchronized in order
|
6
6
|
# to be able to run redundant scheduler processes while ensuring jobs don't get
|
7
7
|
# queued multiple times when the master process changes.
|
8
8
|
#
|
@@ -15,7 +15,7 @@ module Resque
|
|
15
15
|
# - :quiet if logger needs to be silent for all levels. Default - false
|
16
16
|
# - :verbose if there is a need in debug messages. Default - false
|
17
17
|
# - :log_dev to output logs into a desired file. Default - STDOUT
|
18
|
-
# - :format log format, either 'text' or '
|
18
|
+
# - :format log format, either 'text', 'json' or 'logfmt'. Default - 'text'
|
19
19
|
#
|
20
20
|
# Example:
|
21
21
|
#
|
@@ -32,6 +32,7 @@ module Resque
|
|
32
32
|
# Returns an instance of MonoLogger
|
33
33
|
def build
|
34
34
|
logger = MonoLogger.new(@log_dev)
|
35
|
+
logger.progname = 'resque-scheduler'.freeze
|
35
36
|
logger.level = level
|
36
37
|
logger.formatter = send(:"#{@format}_formatter")
|
37
38
|
logger
|
@@ -50,21 +51,31 @@ module Resque
|
|
50
51
|
end
|
51
52
|
|
52
53
|
def text_formatter
|
53
|
-
proc do |severity, datetime,
|
54
|
-
"
|
54
|
+
proc do |severity, datetime, progname, msg|
|
55
|
+
"#{progname}: [#{severity}] #{datetime.iso8601}: #{msg}\n"
|
55
56
|
end
|
56
57
|
end
|
57
58
|
|
58
59
|
def json_formatter
|
59
60
|
proc do |severity, datetime, progname, msg|
|
60
61
|
require 'json'
|
61
|
-
|
62
|
-
name:
|
62
|
+
log_data = {
|
63
|
+
name: progname,
|
63
64
|
progname: progname,
|
64
65
|
level: severity,
|
65
66
|
timestamp: datetime.iso8601,
|
66
67
|
msg: msg
|
67
|
-
|
68
|
+
}
|
69
|
+
JSON.dump(log_data) + "\n"
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
def logfmt_formatter
|
74
|
+
proc do |severity, datetime, progname, msg|
|
75
|
+
"timestamp=\"#{datetime.iso8601}\" " \
|
76
|
+
"level=\"#{severity}\" " \
|
77
|
+
"progname=\"#{progname}\" " \
|
78
|
+
"msg=\"#{msg}\"\n"
|
68
79
|
end
|
69
80
|
end
|
70
81
|
end
|
@@ -36,7 +36,7 @@ module Resque
|
|
36
36
|
# :args can be any yaml which will be converted to a ruby literal and
|
37
37
|
# passed in a params. (optional)
|
38
38
|
#
|
39
|
-
# :
|
39
|
+
# :rails_env is the list of envs where the job gets loaded. Envs are
|
40
40
|
# comma separated (optional)
|
41
41
|
#
|
42
42
|
# :description is just that, a description of the job (optional). If
|
@@ -91,7 +91,7 @@ module Resque
|
|
91
91
|
non_persistent_schedules[name] = decode(encode(config))
|
92
92
|
end
|
93
93
|
|
94
|
-
redis.sadd(:schedules_changed, name)
|
94
|
+
redis.sadd(:schedules_changed, [name])
|
95
95
|
reload_schedule! if reload
|
96
96
|
end
|
97
97
|
|
@@ -101,12 +101,13 @@ module Resque
|
|
101
101
|
end
|
102
102
|
|
103
103
|
# remove a given schedule by name
|
104
|
-
|
104
|
+
# Preventing a reload is optional and available to batch operations
|
105
|
+
def remove_schedule(name, reload = true)
|
105
106
|
non_persistent_schedules.delete(name)
|
106
107
|
redis.hdel(:persistent_schedules, name)
|
107
|
-
redis.sadd(:schedules_changed, name)
|
108
|
+
redis.sadd(:schedules_changed, [name])
|
108
109
|
|
109
|
-
reload_schedule!
|
110
|
+
reload_schedule! if reload
|
110
111
|
end
|
111
112
|
|
112
113
|
private
|
@@ -1,9 +1,9 @@
|
|
1
1
|
<h1>Delayed Jobs</h1>
|
2
|
-
|
2
|
+
<% size = resque.delayed_queue_schedule_size %>
|
3
3
|
|
4
4
|
<%= scheduler_view :search_form, layout: false %>
|
5
5
|
|
6
|
-
<p style="
|
6
|
+
<p style="color: red; font-weight: bold;">
|
7
7
|
<%= @error_message %>
|
8
8
|
</p>
|
9
9
|
|
@@ -16,6 +16,14 @@
|
|
16
16
|
Showing <%= start = params[:start].to_i %> to <%= start + 20 %> of <b><%= size %></b> timestamps
|
17
17
|
</p>
|
18
18
|
|
19
|
+
<% if size > 0 %>
|
20
|
+
<div style="padding-bottom: 10px">
|
21
|
+
<form method="POST" action="<%= u 'delayed/clear' %>" class='clear-delayed confirmSubmission'>
|
22
|
+
<input type='submit' name='' value='Clear Delayed Jobs' />
|
23
|
+
</form>
|
24
|
+
</div>
|
25
|
+
<% end %>
|
26
|
+
|
19
27
|
<table>
|
20
28
|
<tr>
|
21
29
|
<th></th>
|
@@ -27,7 +35,7 @@
|
|
27
35
|
</tr>
|
28
36
|
<% resque.delayed_queue_peek(start, 20).each do |timestamp| %>
|
29
37
|
<tr>
|
30
|
-
<td>
|
38
|
+
<td style="padding-top: 12px; padding-bottom: 2px; width: 10px">
|
31
39
|
<form action="<%= u "/delayed/queue_now" %>" method="post">
|
32
40
|
<input type="hidden" name="timestamp" value="<%= timestamp.to_i %>">
|
33
41
|
<input type="submit" value="Queue now">
|
@@ -46,18 +54,11 @@
|
|
46
54
|
<td><%= h(show_job_arguments(job['args'])) if job && delayed_timestamp_size == 1 %></td>
|
47
55
|
<td>
|
48
56
|
<% if job %>
|
49
|
-
<a href="<%=u URI("/delayed/jobs/#{
|
57
|
+
<a href="<%= u URI("/delayed/jobs/#{CGI.escape(job['class'])}?args=" + CGI.escape(job['args'].to_json)) %>">All schedules</a>
|
50
58
|
<% end %>
|
51
59
|
</td>
|
52
60
|
</tr>
|
53
61
|
<% end %>
|
54
62
|
</table>
|
55
63
|
|
56
|
-
<% if size > 0 %>
|
57
|
-
<br>
|
58
|
-
<form method="POST" action="<%=u 'delayed/clear'%>" class='clear-delayed'>
|
59
|
-
<input type='submit' name='' value='Clear Delayed Jobs' />
|
60
|
-
</form>
|
61
|
-
<% end %>
|
62
|
-
|
63
64
|
<%= partial :next_more, :start => start, :size => size %>
|
@@ -8,7 +8,7 @@
|
|
8
8
|
<br/> Current master: <%= Resque.redis.get(Resque::Scheduler.master_lock.key) %>
|
9
9
|
</p>
|
10
10
|
<p class='intro'>
|
11
|
-
|
11
|
+
The highlighted jobs are skipped for current environment.
|
12
12
|
</p>
|
13
13
|
<div style="overflow-y: auto; width:100%; padding: 0px 5px;">
|
14
14
|
<table>
|
@@ -29,7 +29,7 @@
|
|
29
29
|
<% Resque.schedule.keys.sort.each_with_index do |name, index| %>
|
30
30
|
<% config = Resque.schedule[name] %>
|
31
31
|
<tr style="<%= scheduled_in_this_env?(name) ? '' : 'color: #9F6000;background: #FEEFB3;' %>">
|
32
|
-
|
32
|
+
<td style="padding-left: 15px;"><%= index + 1 %>.</td>
|
33
33
|
<% if Resque::Scheduler.dynamic %>
|
34
34
|
<td style="padding-top: 12px; padding-bottom: 2px; width: 10px">
|
35
35
|
<form action="<%= u "/schedule" %>" method="post" style="margin-left: 0">
|
@@ -13,13 +13,13 @@
|
|
13
13
|
</tr>
|
14
14
|
<% delayed.each do |job| %>
|
15
15
|
<tr>
|
16
|
-
<td>
|
16
|
+
<td style="padding-top: 12px; padding-bottom: 2px; width: 10px">
|
17
17
|
<form action="<%= u "/delayed/queue_now" %>" method="post">
|
18
18
|
<input type="hidden" name="timestamp" value="<%= job['timestamp'].to_i %>">
|
19
19
|
<input type="submit" value="Queue now">
|
20
20
|
</form>
|
21
21
|
</td>
|
22
|
-
<td>
|
22
|
+
<td style="padding-top: 12px; padding-bottom: 2px; width: 10px">
|
23
23
|
<form action="<%= u "/delayed/cancel_now" %>" method="post">
|
24
24
|
<input type="hidden" name="timestamp" value="<%= job['timestamp'].to_i %>">
|
25
25
|
<input type="hidden" name="klass" value="<%= job['class'] %>">
|
@@ -33,7 +33,6 @@
|
|
33
33
|
</tr>
|
34
34
|
<% end %>
|
35
35
|
</table>
|
36
|
-
</h1>
|
37
36
|
|
38
37
|
<% queued = @jobs.select { |j| j['where_at'] == 'queued' } %>
|
39
38
|
<h1>Queued jobs</h1>
|
@@ -68,5 +67,3 @@
|
|
68
67
|
</tr>
|
69
68
|
<% end %>
|
70
69
|
</table>
|
71
|
-
|
72
|
-
|
@@ -87,7 +87,7 @@ module Resque
|
|
87
87
|
def delayed_jobs_klass
|
88
88
|
begin
|
89
89
|
klass = Resque::Scheduler::Util.constantize(params[:klass])
|
90
|
-
@args = JSON.load(
|
90
|
+
@args = JSON.load(CGI.unescape(params[:args]))
|
91
91
|
@timestamps = Resque.scheduled_at(klass, *@args)
|
92
92
|
rescue
|
93
93
|
@timestamps = []
|
@@ -10,13 +10,13 @@ module Resque
|
|
10
10
|
end
|
11
11
|
|
12
12
|
# For all signals, set the shutdown flag and wait for current
|
13
|
-
# poll/
|
13
|
+
# poll/enqueuing to finish (should be almost instant). In the
|
14
14
|
# case of sleeping, exit immediately.
|
15
15
|
def register_signal_handlers
|
16
16
|
(Signal.list.keys & %w(INT TERM USR1 USR2 QUIT)).each do |sig|
|
17
17
|
trap(sig) do
|
18
18
|
signal_queue << sig
|
19
|
-
# break sleep in the primary scheduler thread,
|
19
|
+
# break sleep in the primary scheduler thread, allowing
|
20
20
|
# the signal queue to get processed as soon as possible.
|
21
21
|
@th.wakeup if @th && @th.alive?
|
22
22
|
end
|
@@ -4,7 +4,7 @@ module Resque
|
|
4
4
|
module Scheduler
|
5
5
|
class Util
|
6
6
|
# In order to upgrade to resque(1.25) which has deprecated following
|
7
|
-
# methods, we just added these
|
7
|
+
# methods, we just added these useful helpers back to use in Resque
|
8
8
|
# Scheduler. refer to:
|
9
9
|
# https://github.com/resque/resque-scheduler/pull/273
|
10
10
|
|
data/lib/resque/scheduler.rb
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
# vim:fileencoding=utf-8
|
2
2
|
|
3
|
+
require 'redis/errors'
|
3
4
|
require 'rufus/scheduler'
|
4
5
|
require_relative 'scheduler/configuration'
|
5
6
|
require_relative 'scheduler/locking'
|
@@ -205,16 +206,80 @@ module Resque
|
|
205
206
|
|
206
207
|
# Enqueues all delayed jobs for a timestamp
|
207
208
|
def enqueue_delayed_items_for_timestamp(timestamp)
|
208
|
-
|
209
|
+
count = 0
|
210
|
+
batch_size = delayed_requeue_batch_size
|
211
|
+
actual_batch_size = nil
|
212
|
+
|
213
|
+
log "Processing delayed items for timestamp #{timestamp}, in batches of #{batch_size}"
|
214
|
+
|
209
215
|
loop do
|
210
216
|
handle_shutdown do
|
211
217
|
# Continually check that it is still the master
|
212
|
-
|
218
|
+
if am_master
|
219
|
+
actual_batch_size = enqueue_items_in_batch_for_timestamp(timestamp,
|
220
|
+
batch_size)
|
221
|
+
end
|
213
222
|
end
|
214
|
-
|
215
|
-
|
216
|
-
|
223
|
+
|
224
|
+
count += actual_batch_size
|
225
|
+
log "queued #{count} jobs" if actual_batch_size != -1
|
226
|
+
|
227
|
+
# continue processing until there are no more items in this
|
228
|
+
# timestamp. If we don't have a full batch, this is the last one.
|
229
|
+
# This also breaks us in the event of a redis transaction failure
|
230
|
+
# i.e. enqueue_items_in_batch_for_timestamp returned -1
|
231
|
+
break if actual_batch_size < batch_size
|
217
232
|
end
|
233
|
+
|
234
|
+
log "finished queueing #{count} total jobs for timestamp #{timestamp}" if count != -1
|
235
|
+
end
|
236
|
+
|
237
|
+
def timestamp_key(timestamp)
|
238
|
+
"delayed:#{timestamp.to_i}"
|
239
|
+
end
|
240
|
+
|
241
|
+
def enqueue_items_in_batch_for_timestamp(timestamp, batch_size)
|
242
|
+
timestamp_bucket_key = timestamp_key(timestamp)
|
243
|
+
|
244
|
+
encoded_jobs_to_requeue = Resque.redis.lrange(timestamp_bucket_key, 0, batch_size - 1)
|
245
|
+
|
246
|
+
# Watch is used to ensure that the timestamp bucket we are operating on
|
247
|
+
# is not altered by any other clients between the watch call and when we call exec
|
248
|
+
# (to execute the multi block). We should error catch on the redis.exec return value
|
249
|
+
# as that will indicate if the entire transaction was aborted or not. Though we should
|
250
|
+
# be safe as our ltrim is inside the multi block and therefore also would have been
|
251
|
+
# aborted. So nothing would have been queued, but also nothing lost from the bucket.
|
252
|
+
watch_result = Resque.redis.watch(timestamp_bucket_key) do
|
253
|
+
Resque.redis.multi do |pipeline|
|
254
|
+
encoded_jobs_to_requeue.each do |encoded_job|
|
255
|
+
pipeline.srem("timestamps:#{encoded_job}", timestamp_bucket_key)
|
256
|
+
|
257
|
+
decoded_job = Resque.decode(encoded_job)
|
258
|
+
enqueue(decoded_job)
|
259
|
+
end
|
260
|
+
|
261
|
+
pipeline.ltrim(timestamp_bucket_key, batch_size, -1)
|
262
|
+
end
|
263
|
+
end
|
264
|
+
|
265
|
+
# Did the multi block successfully remove from this timestamp and enqueue the jobs?
|
266
|
+
success = !watch_result.nil?
|
267
|
+
|
268
|
+
# If this was the last batch in this timestamp bucket, clean up
|
269
|
+
if success && encoded_jobs_to_requeue.count < batch_size
|
270
|
+
Resque.clean_up_timestamp(timestamp_bucket_key, timestamp)
|
271
|
+
end
|
272
|
+
|
273
|
+
unless success
|
274
|
+
# Our batched transaction failed in Redis due to the timestamp_bucket_key value
|
275
|
+
# being modified while we built our multi block. We return -1 to ensure we break
|
276
|
+
# out of the loop iterating on this timestamp so it can be re-processed via the
|
277
|
+
# loop in handle_delayed_items.
|
278
|
+
return -1
|
279
|
+
end
|
280
|
+
|
281
|
+
# will return 0 if none were left to batch
|
282
|
+
encoded_jobs_to_requeue.count
|
218
283
|
end
|
219
284
|
|
220
285
|
def enqueue(config)
|
@@ -250,7 +315,7 @@ module Resque
|
|
250
315
|
if job_klass && job_klass != 'Resque::Job'
|
251
316
|
# The custom job class API must offer a static "scheduled" method. If
|
252
317
|
# the custom job class can not be constantized (via a requeue call
|
253
|
-
# from the web perhaps), fall back to
|
318
|
+
# from the web perhaps), fall back to enqueuing normally via
|
254
319
|
# Resque::Job.create.
|
255
320
|
begin
|
256
321
|
Resque::Scheduler::Util.constantize(job_klass).scheduled(
|
@@ -376,7 +441,6 @@ module Resque
|
|
376
441
|
|
377
442
|
def stop_rufus_scheduler
|
378
443
|
rufus_scheduler.shutdown(:wait)
|
379
|
-
rufus_scheduler.join
|
380
444
|
end
|
381
445
|
|
382
446
|
def before_shutdown
|