resque-mongo-scheduler 2.0.2
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +4 -0
- data/Gemfile +2 -0
- data/Gemfile.lock +47 -0
- data/HISTORY.md +88 -0
- data/LICENSE +21 -0
- data/README.markdown +316 -0
- data/Rakefile +13 -0
- data/lib/resque/scheduler.rb +244 -0
- data/lib/resque_scheduler/search_delayed.rb +49 -0
- data/lib/resque_scheduler/server/views/delayed.erb +64 -0
- data/lib/resque_scheduler/server/views/delayed_timestamp.erb +26 -0
- data/lib/resque_scheduler/server/views/scheduler.erb +39 -0
- data/lib/resque_scheduler/server.rb +58 -0
- data/lib/resque_scheduler/tasks.rb +25 -0
- data/lib/resque_scheduler/version.rb +3 -0
- data/lib/resque_scheduler.rb +238 -0
- data/resque-mongo-scheduler.gemspec +29 -0
- data/tasks/resque_scheduler.rake +2 -0
- data/test/delayed_queue_test.rb +234 -0
- data/test/redis-test.conf +115 -0
- data/test/resque-web_test.rb +31 -0
- data/test/scheduler_args_test.rb +83 -0
- data/test/scheduler_test.rb +241 -0
- data/test/test_helper.rb +92 -0
- metadata +150 -0
@@ -0,0 +1,238 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
gem 'resque-mongo'
|
3
|
+
require 'resque'
|
4
|
+
require 'resque/server'
|
5
|
+
require 'resque_scheduler/version'
|
6
|
+
require 'resque/scheduler'
|
7
|
+
require 'resque_scheduler/server'
|
8
|
+
require 'resque_scheduler/search_delayed'
|
9
|
+
|
10
|
+
module ResqueScheduler
|
11
|
+
|
12
|
+
def schedules
|
13
|
+
self.mongo ||= ENV['MONGO'] || 'localhost:27017'
|
14
|
+
@schedules ||= @db.collection('schedules')
|
15
|
+
end
|
16
|
+
|
17
|
+
def schedules_changed
|
18
|
+
self.mongo ||= ENV['MONGO'] || 'localhost:27017'
|
19
|
+
@schedules_changed ||= @db.collection('schedules_changed')
|
20
|
+
end
|
21
|
+
|
22
|
+
def delayed_queue
|
23
|
+
self.mongo ||= ENV['MONGO'] || 'localhost:27017'
|
24
|
+
@delayed_queue ||= @db.collection('delayed_queue')
|
25
|
+
end
|
26
|
+
|
27
|
+
#
|
28
|
+
# Accepts a new schedule configuration of the form:
|
29
|
+
#
|
30
|
+
# {some_name => {"cron" => "5/* * * *",
|
31
|
+
# "class" => DoSomeWork,
|
32
|
+
# "args" => "work on this string",
|
33
|
+
# "description" => "this thing works it"s butter off"},
|
34
|
+
# ...}
|
35
|
+
#
|
36
|
+
# :name can be anything and is used only to describe the scheduled job
|
37
|
+
# :cron can be any cron scheduling string :job can be any resque job class
|
38
|
+
# :every can be used in lieu of :cron. see rufus-scheduler's 'every' usage for
|
39
|
+
# valid syntax. If :cron is present it will take precedence over :every.
|
40
|
+
# :class must be a resque worker class
|
41
|
+
# :args can be any yaml which will be converted to a ruby literal and passed
|
42
|
+
# in a params. (optional)
|
43
|
+
# :rails_envs is the list of envs where the job gets loaded. Envs are comma separated (optional)
|
44
|
+
# :description is just that, a description of the job (optional). If params is
|
45
|
+
# an array, each element in the array is passed as a separate param,
|
46
|
+
# otherwise params is passed in as the only parameter to perform.
|
47
|
+
def schedule=(schedule_hash)
|
48
|
+
if Resque::Scheduler.dynamic
|
49
|
+
schedule_hash.each do |name, job_spec|
|
50
|
+
set_schedule(name, job_spec)
|
51
|
+
end
|
52
|
+
end
|
53
|
+
@schedule = schedule_hash
|
54
|
+
end
|
55
|
+
|
56
|
+
# Returns the schedule hash
|
57
|
+
def schedule
|
58
|
+
@schedule ||= {}
|
59
|
+
end
|
60
|
+
|
61
|
+
# reloads the schedule from mongo
|
62
|
+
def reload_schedule!
|
63
|
+
@schedule = get_schedules
|
64
|
+
end
|
65
|
+
|
66
|
+
# gets the schedule as it exists in mongo
|
67
|
+
def get_schedules
|
68
|
+
if schedules.count > 0
|
69
|
+
h = {}
|
70
|
+
schedules.find.each do |a|
|
71
|
+
h[a.delete('_id')] = a
|
72
|
+
end
|
73
|
+
h
|
74
|
+
else
|
75
|
+
nil
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
# create or update a schedule with the provided name and configuration
|
80
|
+
def set_schedule(name, config)
|
81
|
+
existing_config = get_schedule(name)
|
82
|
+
unless existing_config && existing_config == config
|
83
|
+
schedules.insert(config.merge('_id' => name))
|
84
|
+
schedules_changed.insert('_id' => name)
|
85
|
+
end
|
86
|
+
config
|
87
|
+
end
|
88
|
+
|
89
|
+
# retrieve the schedule configuration for the given name
|
90
|
+
def get_schedule(name)
|
91
|
+
schedule = schedules.find_one('_id' => name)
|
92
|
+
schedule.delete('_id') if schedule
|
93
|
+
schedule
|
94
|
+
end
|
95
|
+
|
96
|
+
# remove a given schedule by name
|
97
|
+
def remove_schedule(name)
|
98
|
+
schedules.remove('_id' => name)
|
99
|
+
schedules_changed.insert('_id' => name)
|
100
|
+
end
|
101
|
+
|
102
|
+
def pop_schedules_changed
|
103
|
+
while doc = schedules_changed.find_and_modify(:remove => true)
|
104
|
+
yield doc['_id']
|
105
|
+
end
|
106
|
+
rescue Mongo::OperationFailure
|
107
|
+
# "Database command 'findandmodify' failed: {"errmsg"=>"No matching object found", "ok"=>0.0}"
|
108
|
+
# Sadly, the mongo driver raises (with a global exception class) instead of returning nil when
|
109
|
+
# the collection is empty.
|
110
|
+
end
|
111
|
+
|
112
|
+
# This method is nearly identical to +enqueue+ only it also
|
113
|
+
# takes a timestamp which will be used to schedule the job
|
114
|
+
# for queueing. Until timestamp is in the past, the job will
|
115
|
+
# sit in the schedule list.
|
116
|
+
# @return the number of items for this timestamp
|
117
|
+
def enqueue_at(timestamp, klass, *args)
|
118
|
+
delayed_push(timestamp, job_to_hash(klass, args))
|
119
|
+
end
|
120
|
+
|
121
|
+
# Identical to enqueue_at but takes number_of_seconds_from_now
|
122
|
+
# instead of a timestamp.
|
123
|
+
# @return the number of items for this timestamp
|
124
|
+
def enqueue_in(number_of_seconds_from_now, klass, *args)
|
125
|
+
enqueue_at(Time.now + number_of_seconds_from_now, klass, *args)
|
126
|
+
end
|
127
|
+
|
128
|
+
# Used internally to stuff the item into the schedule sorted list.
|
129
|
+
# +timestamp+ can be either in seconds or a datetime object
|
130
|
+
# Insertion if O(log(n)).
|
131
|
+
# Returns true if it's the first job to be scheduled at that time, else false
|
132
|
+
# @return the number of items for this timestamp
|
133
|
+
def delayed_push(timestamp, item)
|
134
|
+
# Add this item to the list for this timestamp
|
135
|
+
doc = delayed_queue.find_and_modify(
|
136
|
+
:query => {'_id' => timestamp.to_i},
|
137
|
+
:update => {'$push' => {:items => item}},
|
138
|
+
:upsert => true,
|
139
|
+
:new => true
|
140
|
+
)
|
141
|
+
doc['items'].size
|
142
|
+
end
|
143
|
+
|
144
|
+
# Returns an array of timestamps based on start and count
|
145
|
+
def delayed_queue_peek(start, count)
|
146
|
+
delayed_queue.find({}, :skip => start, :limit => count, :fields => '_id', :sort => ['_id', 1]).map {|d| d['_id']}
|
147
|
+
end
|
148
|
+
|
149
|
+
# Returns the size of the delayed queue schedule
|
150
|
+
def delayed_queue_schedule_size
|
151
|
+
delayed_queue.count
|
152
|
+
end
|
153
|
+
|
154
|
+
# Returns the number of jobs for a given timestamp in the delayed queue schedule
|
155
|
+
def delayed_timestamp_size(timestamp)
|
156
|
+
document = delayed_queue.find_one('_id' => timestamp.to_i)
|
157
|
+
document ? (document['items'] || []).size : 0
|
158
|
+
end
|
159
|
+
|
160
|
+
# Returns an array of delayed items for the given timestamp
|
161
|
+
def delayed_timestamp_peek(timestamp, start, count)
|
162
|
+
doc = delayed_queue.find_one(
|
163
|
+
{'_id' => timestamp.to_i},
|
164
|
+
:fields => {'items' => {'$slice' => [start, count]}}
|
165
|
+
)
|
166
|
+
doc ? doc['items'] || [] : []
|
167
|
+
end
|
168
|
+
|
169
|
+
# Returns the next delayed queue timestamp
|
170
|
+
# (don't call directly)
|
171
|
+
def next_delayed_timestamp(at_time=nil)
|
172
|
+
doc = delayed_queue.find_one(
|
173
|
+
{'_id' => {'$lte' => (at_time || Time.now).to_i}},
|
174
|
+
:sort => ['_id', Mongo::ASCENDING]
|
175
|
+
)
|
176
|
+
doc ? doc['_id'] : nil
|
177
|
+
end
|
178
|
+
|
179
|
+
# Returns the next item to be processed for a given timestamp, nil if
|
180
|
+
# done. (don't call directly)
|
181
|
+
# +timestamp+ can either be in seconds or a datetime
|
182
|
+
def next_item_for_timestamp(timestamp)
|
183
|
+
# Returns the array of items before it was shifted
|
184
|
+
doc = delayed_queue.find_and_modify(
|
185
|
+
:query => {'_id' => timestamp.to_i},
|
186
|
+
:update => {'$pop' => {'items' => -1}} # -1 means shift
|
187
|
+
)
|
188
|
+
item = doc['items'].first
|
189
|
+
|
190
|
+
# If the list is empty, remove it.
|
191
|
+
clean_up_timestamp(timestamp)
|
192
|
+
|
193
|
+
item
|
194
|
+
rescue Mongo::OperationFailure
|
195
|
+
# Database command 'findandmodify' failed: {"errmsg"=>"No matching object found", "ok"=>0.0}
|
196
|
+
nil
|
197
|
+
end
|
198
|
+
|
199
|
+
# Clears all jobs created with enqueue_at or enqueue_in
|
200
|
+
def reset_delayed_queue
|
201
|
+
delayed_queue.remove
|
202
|
+
end
|
203
|
+
|
204
|
+
# given an encoded item, remove it from the delayed_queue
|
205
|
+
# does not clean like +next_item_for_timestamp+
|
206
|
+
# TODO ? unlike resque-scheduler, it does not return the number of removed items,
|
207
|
+
# can't use find_and_modify because it only updates one item.
|
208
|
+
def remove_delayed(klass, *args)
|
209
|
+
delayed_queue.update(
|
210
|
+
{},
|
211
|
+
{'$pull' => {'items' => job_to_hash(klass, args)}},
|
212
|
+
:multi => true
|
213
|
+
)
|
214
|
+
end
|
215
|
+
|
216
|
+
def count_all_scheduled_jobs
|
217
|
+
total_jobs = 0
|
218
|
+
delayed_queue.find.each do |doc|
|
219
|
+
total_jobs += (doc['items'] || []).size
|
220
|
+
end
|
221
|
+
total_jobs
|
222
|
+
end
|
223
|
+
|
224
|
+
private
|
225
|
+
def job_to_hash(klass, args)
|
226
|
+
{:class => klass.to_s, :args => args, :queue => queue_from_class(klass).to_s}
|
227
|
+
end
|
228
|
+
|
229
|
+
def clean_up_timestamp(timestamp)
|
230
|
+
delayed_queue.remove('_id' => timestamp.to_i, :items => {'$size' => 0})
|
231
|
+
end
|
232
|
+
|
233
|
+
end
|
234
|
+
|
235
|
+
Resque.extend ResqueScheduler
|
236
|
+
Resque::Server.class_eval do
|
237
|
+
include ResqueScheduler::Server
|
238
|
+
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# -*- encoding: utf-8 -*-
|
2
|
+
require File.expand_path("../lib/resque_scheduler/version", __FILE__)
|
3
|
+
|
4
|
+
Gem::Specification.new do |s|
|
5
|
+
s.name = "resque-mongo-scheduler"
|
6
|
+
s.version = ResqueScheduler::Version
|
7
|
+
s.platform = Gem::Platform::RUBY
|
8
|
+
s.authors = ['Ben VandenBos', 'Nicolas Fouché']
|
9
|
+
s.email = ['bvandenbos@gmail.com', 'nicolas@silentale.com']
|
10
|
+
s.homepage = "http://github.com/nfo/resque-mongo-scheduler"
|
11
|
+
s.summary = "Light weight job scheduling on top of Resque Mongo"
|
12
|
+
s.description = %q{Light weight job scheduling on top of Resque Mongo.
|
13
|
+
Adds methods enqueue_at/enqueue_in to schedule jobs in the future.
|
14
|
+
Also supports queueing jobs on a fixed, cron-like schedule.}
|
15
|
+
|
16
|
+
s.required_rubygems_version = ">= 1.3.6"
|
17
|
+
s.add_development_dependency "bundler", ">= 1.0.0"
|
18
|
+
|
19
|
+
s.files = `git ls-files`.split("\n")
|
20
|
+
s.executables = `git ls-files`.split("\n").map{|f| f =~ /^bin\/(.*)/ ? $1 : nil}.compact
|
21
|
+
s.require_path = 'lib'
|
22
|
+
|
23
|
+
s.add_runtime_dependency(%q<mongo>, [">= 1.1"])
|
24
|
+
s.add_runtime_dependency(%q<resque-mongo>, [">= 1.11.0"])
|
25
|
+
s.add_runtime_dependency(%q<rufus-scheduler>, [">= 0"])
|
26
|
+
s.add_development_dependency(%q<mocha>, [">= 0"])
|
27
|
+
s.add_development_dependency(%q<rack-test>, [">= 0"])
|
28
|
+
|
29
|
+
end
|
@@ -0,0 +1,234 @@
|
|
1
|
+
require File.dirname(__FILE__) + '/test_helper'
|
2
|
+
|
3
|
+
class Resque::DelayedQueueTest < Test::Unit::TestCase
|
4
|
+
|
5
|
+
def setup
|
6
|
+
Resque::Scheduler.mute = true
|
7
|
+
Resque.flushall
|
8
|
+
end
|
9
|
+
|
10
|
+
def test_enqueue_at_adds_correct_list
|
11
|
+
|
12
|
+
timestamp = Time.now - 1 # 1 second ago (in the past, should come out right away)
|
13
|
+
|
14
|
+
assert_equal(0, Resque.delayed_queue.count, "delayed queue should be empty to start")
|
15
|
+
|
16
|
+
assert_equal 1, Resque.enqueue_at(timestamp, SomeIvarJob, "path")
|
17
|
+
|
18
|
+
# Confirm the correct keys were added
|
19
|
+
assert_equal(1, Resque.delayed_queue.find_one('_id' => timestamp.to_i)['items'].size, "delayed queue should have one entry now")
|
20
|
+
assert_equal(1, Resque.delayed_queue_schedule_size, "The delayed_queue_schedule should have 1 entry now")
|
21
|
+
|
22
|
+
read_timestamp = Resque.next_delayed_timestamp
|
23
|
+
|
24
|
+
# Confirm the timestamp came out correctly
|
25
|
+
assert_equal(timestamp.to_i, read_timestamp, "The timestamp we pull out of redis should match the one we put in")
|
26
|
+
item = Resque.next_item_for_timestamp(read_timestamp)
|
27
|
+
|
28
|
+
# Confirm the item came out correctly
|
29
|
+
assert_equal('SomeIvarJob', item['class'], "Should be the same class that we queued")
|
30
|
+
assert_equal(["path"], item['args'], "Should have the same arguments that we queued")
|
31
|
+
|
32
|
+
# And now confirm the keys are gone
|
33
|
+
assert_equal(0, Resque.delayed_queue.find('_id' => timestamp.to_i).count) # tests +clean_up_timestamp+
|
34
|
+
assert_equal(0, Resque.delayed_queue_schedule_size, "delayed queue should be empty")
|
35
|
+
end
|
36
|
+
|
37
|
+
def test_something_in_the_future_doesnt_come_out
|
38
|
+
timestamp = Time.now + 600 # 10 minutes from now (in the future, shouldn't come out)
|
39
|
+
|
40
|
+
assert_equal(0, Resque.delayed_queue.find('_id' => timestamp.to_i).count, "delayed queue should be empty to start")
|
41
|
+
|
42
|
+
assert_equal 1, Resque.enqueue_at(timestamp, SomeIvarJob, "path")
|
43
|
+
|
44
|
+
# Confirm the correct keys were added
|
45
|
+
assert_equal(1, Resque.delayed_queue.find_one('_id' => timestamp.to_i)['items'].size, "delayed queue should have one entry now")
|
46
|
+
assert_equal(1, Resque.delayed_queue_schedule_size, "The delayed_queue_schedule should have 1 entry now")
|
47
|
+
|
48
|
+
read_timestamp = Resque.next_delayed_timestamp
|
49
|
+
|
50
|
+
assert_nil(read_timestamp, "No timestamps should be ready for queueing")
|
51
|
+
end
|
52
|
+
|
53
|
+
def test_something_in_the_future_comes_out_if_you_want_it_to
|
54
|
+
timestamp = Time.now + 600 # 10 minutes from now
|
55
|
+
|
56
|
+
assert_equal 1, Resque.enqueue_at(timestamp, SomeIvarJob, "path")
|
57
|
+
|
58
|
+
read_timestamp = Resque.next_delayed_timestamp(timestamp)
|
59
|
+
|
60
|
+
assert_equal(timestamp.to_i, read_timestamp, "The timestamp we pull out of redis should match the one we put in")
|
61
|
+
end
|
62
|
+
|
63
|
+
def test_enqueue_at_and_enqueue_in_are_equivalent
|
64
|
+
timestamp = Time.now + 60
|
65
|
+
|
66
|
+
assert_equal 1, Resque.enqueue_at(timestamp, SomeIvarJob, "path")
|
67
|
+
assert_equal 2, Resque.enqueue_in(timestamp - Time.now, SomeIvarJob, "path")
|
68
|
+
|
69
|
+
assert_equal(1, Resque.delayed_queue_schedule_size, "should have one timestamp in the delayed queue")
|
70
|
+
assert_equal(2, Resque.delayed_queue.find_one('_id' => timestamp.to_i)['items'].size, "should have 2 items in the timestamp queue")
|
71
|
+
end
|
72
|
+
|
73
|
+
def test_empty_delayed_queue_peek
|
74
|
+
assert_equal([], Resque.delayed_queue_peek(0,20))
|
75
|
+
end
|
76
|
+
|
77
|
+
def test_delayed_queue_peek
|
78
|
+
t = Time.now
|
79
|
+
expected_timestamps = (1..5).to_a.map do |i|
|
80
|
+
(t + 60 + i).to_i
|
81
|
+
end
|
82
|
+
|
83
|
+
expected_timestamps.each do |timestamp|
|
84
|
+
Resque.delayed_push(timestamp, {:class => SomeIvarJob.to_s, :args => 'blah1'})
|
85
|
+
end
|
86
|
+
|
87
|
+
timestamps = Resque.delayed_queue_peek(2,3)
|
88
|
+
|
89
|
+
assert_equal(expected_timestamps[2,3], timestamps)
|
90
|
+
end
|
91
|
+
|
92
|
+
def test_delayed_queue_schedule_size
|
93
|
+
assert_equal(0, Resque.delayed_queue_schedule_size)
|
94
|
+
assert_equal 1, Resque.enqueue_at(Time.now+60, SomeIvarJob)
|
95
|
+
assert_equal(1, Resque.delayed_queue_schedule_size)
|
96
|
+
end
|
97
|
+
|
98
|
+
def test_delayed_timestamp_size
|
99
|
+
t = Time.now + 60
|
100
|
+
assert_equal(0, Resque.delayed_timestamp_size(t))
|
101
|
+
assert_equal 1, Resque.enqueue_at(t, SomeIvarJob)
|
102
|
+
assert_equal(1, Resque.delayed_timestamp_size(t))
|
103
|
+
assert_equal(0, Resque.delayed_timestamp_size(t.to_i+1))
|
104
|
+
end
|
105
|
+
|
106
|
+
def test_delayed_timestamp_peek
|
107
|
+
t = Time.now + 60
|
108
|
+
assert_equal([], Resque.delayed_timestamp_peek(t, 0, 1), "make sure it's an empty array, not nil")
|
109
|
+
assert_equal 1, Resque.enqueue_at(t, SomeIvarJob)
|
110
|
+
assert_equal(1, Resque.delayed_timestamp_peek(t, 0, 1).length)
|
111
|
+
assert_equal 2, Resque.enqueue_at(t, SomeIvarJob)
|
112
|
+
assert_equal(1, Resque.delayed_timestamp_peek(t, 0, 1).length)
|
113
|
+
assert_equal(2, Resque.delayed_timestamp_peek(t, 0, 3).length)
|
114
|
+
|
115
|
+
assert_equal(
|
116
|
+
{'args' => [], 'class' => 'SomeIvarJob', 'queue' => 'ivar'},
|
117
|
+
Resque.delayed_timestamp_peek(t, 0, 1).first
|
118
|
+
)
|
119
|
+
end
|
120
|
+
|
121
|
+
def test_handle_delayed_items_with_no_items
|
122
|
+
Resque::Scheduler.expects(:enqueue).never
|
123
|
+
Resque::Scheduler.handle_delayed_items
|
124
|
+
end
|
125
|
+
|
126
|
+
def test_handle_delayed_items_with_items
|
127
|
+
t = Time.now - 60 # in the past
|
128
|
+
assert_equal 1, Resque.enqueue_at(t, SomeIvarJob)
|
129
|
+
assert_equal 2, Resque.enqueue_at(t, SomeIvarJob)
|
130
|
+
|
131
|
+
# 2 SomeIvarJob jobs should be created in the "ivar" queue
|
132
|
+
Resque::Job.expects(:create).twice.with('ivar', 'SomeIvarJob', nil)
|
133
|
+
Resque.expects(:queue_from_class).never # Should NOT need to load the class
|
134
|
+
Resque::Scheduler.handle_delayed_items
|
135
|
+
end
|
136
|
+
|
137
|
+
def test_handle_delayed_items_with_items_in_the_future
|
138
|
+
t = Time.now + 60 # in the future
|
139
|
+
assert_equal 1, Resque.enqueue_at(t, SomeIvarJob)
|
140
|
+
assert_equal 2, Resque.enqueue_at(t, SomeIvarJob)
|
141
|
+
|
142
|
+
# 2 SomeIvarJob jobs should be created in the "ivar" queue
|
143
|
+
Resque::Job.expects(:create).twice.with('ivar', 'SomeIvarJob', nil)
|
144
|
+
Resque.expects(:queue_from_class).never # Should NOT need to load the class
|
145
|
+
Resque::Scheduler.handle_delayed_items(t)
|
146
|
+
end
|
147
|
+
|
148
|
+
def test_enqueue_delayed_items_for_timestamp
|
149
|
+
t = Time.now + 60
|
150
|
+
|
151
|
+
assert_equal 1, Resque.enqueue_at(t, SomeIvarJob)
|
152
|
+
assert_equal 2, Resque.enqueue_at(t, SomeIvarJob)
|
153
|
+
|
154
|
+
# 2 SomeIvarJob jobs should be created in the "ivar" queue
|
155
|
+
Resque::Job.expects(:create).twice.with('ivar', 'SomeIvarJob', nil)
|
156
|
+
Resque.expects(:queue_from_class).never # Should NOT need to load the class
|
157
|
+
|
158
|
+
Resque::Scheduler.enqueue_delayed_items_for_timestamp(t)
|
159
|
+
|
160
|
+
# delayed queue for timestamp should be empty
|
161
|
+
assert_equal(0, Resque.delayed_timestamp_peek(t, 0, 3).length)
|
162
|
+
end
|
163
|
+
|
164
|
+
def test_works_with_out_specifying_queue__upgrade_case
|
165
|
+
t = Time.now - 60
|
166
|
+
Resque.delayed_push(t, :class => 'SomeIvarJob')
|
167
|
+
|
168
|
+
# Since we didn't specify :queue when calling delayed_push, it will be forced
|
169
|
+
# to load the class to figure out the queue. This is the upgrade case from 1.0.4
|
170
|
+
# to 1.0.5.
|
171
|
+
Resque::Job.expects(:create).once.with(:ivar, 'SomeIvarJob', nil)
|
172
|
+
|
173
|
+
Resque::Scheduler.handle_delayed_items
|
174
|
+
end
|
175
|
+
|
176
|
+
def test_clearing_delayed_queue
|
177
|
+
t = Time.now + 120
|
178
|
+
4.times { |i| assert_equal i + 1, Resque.enqueue_at(t, SomeIvarJob) }
|
179
|
+
4.times { |i| assert_equal 1, Resque.enqueue_at(Time.now + rand(100), SomeIvarJob) }
|
180
|
+
|
181
|
+
Resque.reset_delayed_queue
|
182
|
+
assert_equal(0, Resque.delayed_queue_schedule_size)
|
183
|
+
end
|
184
|
+
|
185
|
+
def test_remove_specific_item
|
186
|
+
t = Time.now + 120
|
187
|
+
assert_equal 1, Resque.enqueue_at(t, SomeIvarJob)
|
188
|
+
|
189
|
+
Resque.remove_delayed(SomeIvarJob)
|
190
|
+
assert_equal [], Resque.delayed_queue.find_one('_id' => t.to_i)['items']
|
191
|
+
end
|
192
|
+
|
193
|
+
def test_remove_bogus_item_leaves_the_rest_alone
|
194
|
+
t = Time.now + 120
|
195
|
+
assert_equal 1, Resque.enqueue_at(t, SomeIvarJob, "foo")
|
196
|
+
assert_equal 2, Resque.enqueue_at(t, SomeIvarJob, "bar")
|
197
|
+
assert_equal 3, Resque.enqueue_at(t, SomeIvarJob, "bar")
|
198
|
+
assert_equal 4, Resque.enqueue_at(t, SomeIvarJob, "baz")
|
199
|
+
|
200
|
+
Resque.remove_delayed(SomeIvarJob)
|
201
|
+
|
202
|
+
items = Resque.delayed_queue.find_one('_id' => t.to_i)['items']
|
203
|
+
assert_equal(4, items.size)
|
204
|
+
end
|
205
|
+
|
206
|
+
def test_remove_specific_item_in_group_of_other_items_at_same_timestamp
|
207
|
+
t = Time.now + 120
|
208
|
+
assert_equal 1, Resque.enqueue_at(t, SomeIvarJob, "foo")
|
209
|
+
assert_equal 2, Resque.enqueue_at(t, SomeIvarJob, "bar")
|
210
|
+
assert_equal 3, Resque.enqueue_at(t, SomeIvarJob, "bar")
|
211
|
+
assert_equal 4, Resque.enqueue_at(t, SomeIvarJob, "baz")
|
212
|
+
|
213
|
+
Resque.remove_delayed(SomeIvarJob, "bar")
|
214
|
+
|
215
|
+
items = Resque.delayed_queue.find_one('_id' => t.to_i)['items']
|
216
|
+
assert_equal(2, items.size)
|
217
|
+
assert_equal(1, Resque.delayed_queue_schedule_size)
|
218
|
+
end
|
219
|
+
|
220
|
+
def test_remove_specific_item_in_group_of_other_items_at_different_timestamps
|
221
|
+
t = Time.now + 120
|
222
|
+
assert_equal 1, Resque.enqueue_at(t, SomeIvarJob, "foo")
|
223
|
+
assert_equal 1, Resque.enqueue_at(t + 1, SomeIvarJob, "bar")
|
224
|
+
assert_equal 1, Resque.enqueue_at(t + 2, SomeIvarJob, "bar")
|
225
|
+
assert_equal 1, Resque.enqueue_at(t + 3, SomeIvarJob, "baz")
|
226
|
+
|
227
|
+
Resque.remove_delayed(SomeIvarJob, "bar")
|
228
|
+
assert_equal(1, Resque.delayed_queue.find_one('_id' => t.to_i)['items'].size)
|
229
|
+
assert_equal(0, Resque.delayed_queue.find_one('_id' => t.to_i + 1)['items'].size)
|
230
|
+
assert_equal(0, Resque.delayed_queue.find_one('_id' => t.to_i + 2)['items'].size)
|
231
|
+
assert_equal(1, Resque.delayed_queue.find_one('_id' => t.to_i + 3)['items'].size)
|
232
|
+
assert_equal(2, Resque.count_all_scheduled_jobs)
|
233
|
+
end
|
234
|
+
end
|
@@ -0,0 +1,115 @@
|
|
1
|
+
# Redis configuration file example
|
2
|
+
|
3
|
+
# By default Redis does not run as a daemon. Use 'yes' if you need it.
|
4
|
+
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
|
5
|
+
daemonize yes
|
6
|
+
|
7
|
+
# When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
|
8
|
+
# You can specify a custom pid file location here.
|
9
|
+
pidfile ./test/redis-test.pid
|
10
|
+
|
11
|
+
# Accept connections on the specified port, default is 6379
|
12
|
+
port 9736
|
13
|
+
|
14
|
+
# If you want you can bind a single interface, if the bind option is not
|
15
|
+
# specified all the interfaces will listen for connections.
|
16
|
+
#
|
17
|
+
# bind 127.0.0.1
|
18
|
+
|
19
|
+
# Close the connection after a client is idle for N seconds (0 to disable)
|
20
|
+
timeout 300
|
21
|
+
|
22
|
+
# Save the DB on disk:
|
23
|
+
#
|
24
|
+
# save <seconds> <changes>
|
25
|
+
#
|
26
|
+
# Will save the DB if both the given number of seconds and the given
|
27
|
+
# number of write operations against the DB occurred.
|
28
|
+
#
|
29
|
+
# In the example below the behaviour will be to save:
|
30
|
+
# after 900 sec (15 min) if at least 1 key changed
|
31
|
+
# after 300 sec (5 min) if at least 10 keys changed
|
32
|
+
# after 60 sec if at least 10000 keys changed
|
33
|
+
save 900 1
|
34
|
+
save 300 10
|
35
|
+
save 60 10000
|
36
|
+
|
37
|
+
# The filename where to dump the DB
|
38
|
+
dbfilename dump.rdb
|
39
|
+
|
40
|
+
# For default save/load DB in/from the working directory
|
41
|
+
# Note that you must specify a directory not a file name.
|
42
|
+
dir ./test/
|
43
|
+
|
44
|
+
# Set server verbosity to 'debug'
|
45
|
+
# it can be one of:
|
46
|
+
# debug (a lot of information, useful for development/testing)
|
47
|
+
# notice (moderately verbose, what you want in production probably)
|
48
|
+
# warning (only very important / critical messages are logged)
|
49
|
+
loglevel debug
|
50
|
+
|
51
|
+
# Specify the log file name. Also 'stdout' can be used to force
|
52
|
+
# the demon to log on the standard output. Note that if you use standard
|
53
|
+
# output for logging but daemonize, logs will be sent to /dev/null
|
54
|
+
logfile stdout
|
55
|
+
|
56
|
+
# Set the number of databases. The default database is DB 0, you can select
|
57
|
+
# a different one on a per-connection basis using SELECT <dbid> where
|
58
|
+
# dbid is a number between 0 and 'databases'-1
|
59
|
+
databases 16
|
60
|
+
|
61
|
+
################################# REPLICATION #################################
|
62
|
+
|
63
|
+
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
|
64
|
+
# another Redis server. Note that the configuration is local to the slave
|
65
|
+
# so for example it is possible to configure the slave to save the DB with a
|
66
|
+
# different interval, or to listen to another port, and so on.
|
67
|
+
|
68
|
+
# slaveof <masterip> <masterport>
|
69
|
+
|
70
|
+
################################## SECURITY ###################################
|
71
|
+
|
72
|
+
# Require clients to issue AUTH <PASSWORD> before processing any other
|
73
|
+
# commands. This might be useful in environments in which you do not trust
|
74
|
+
# others with access to the host running redis-server.
|
75
|
+
#
|
76
|
+
# This should stay commented out for backward compatibility and because most
|
77
|
+
# people do not need auth (e.g. they run their own servers).
|
78
|
+
|
79
|
+
# requirepass foobared
|
80
|
+
|
81
|
+
################################### LIMITS ####################################
|
82
|
+
|
83
|
+
# Set the max number of connected clients at the same time. By default there
|
84
|
+
# is no limit, and it's up to the number of file descriptors the Redis process
|
85
|
+
# is able to open. The special value '0' means no limts.
|
86
|
+
# Once the limit is reached Redis will close all the new connections sending
|
87
|
+
# an error 'max number of clients reached'.
|
88
|
+
|
89
|
+
# maxclients 128
|
90
|
+
|
91
|
+
# Don't use more memory than the specified amount of bytes.
|
92
|
+
# When the memory limit is reached Redis will try to remove keys with an
|
93
|
+
# EXPIRE set. It will try to start freeing keys that are going to expire
|
94
|
+
# in little time and preserve keys with a longer time to live.
|
95
|
+
# Redis will also try to remove objects from free lists if possible.
|
96
|
+
#
|
97
|
+
# If all this fails, Redis will start to reply with errors to commands
|
98
|
+
# that will use more memory, like SET, LPUSH, and so on, and will continue
|
99
|
+
# to reply to most read-only commands like GET.
|
100
|
+
#
|
101
|
+
# WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
|
102
|
+
# 'state' server or cache, not as a real DB. When Redis is used as a real
|
103
|
+
# database the memory usage will grow over the weeks, it will be obvious if
|
104
|
+
# it is going to use too much memory in the long run, and you'll have the time
|
105
|
+
# to upgrade. With maxmemory after the limit is reached you'll start to get
|
106
|
+
# errors for write operations, and this may even lead to DB inconsistency.
|
107
|
+
|
108
|
+
# maxmemory <bytes>
|
109
|
+
|
110
|
+
############################### ADVANCED CONFIG ###############################
|
111
|
+
|
112
|
+
# Glue small output buffers together in order to send small replies in a
|
113
|
+
# single TCP packet. Uses a bit more CPU but most of the times it is a win
|
114
|
+
# in terms of number of queries per second. Use 'yes' if unsure.
|
115
|
+
glueoutputbuf yes
|
@@ -0,0 +1,31 @@
|
|
1
|
+
require File.dirname(__FILE__) + '/test_helper'
|
2
|
+
|
3
|
+
# Pull in the server test_helper from resque
|
4
|
+
require 'resque/server/test_helper.rb'
|
5
|
+
|
6
|
+
context "on GET to /schedule" do
|
7
|
+
setup { get "/schedule" }
|
8
|
+
|
9
|
+
should_respond_with_success
|
10
|
+
end
|
11
|
+
|
12
|
+
context "on GET to /schedule with scheduled jobs" do
|
13
|
+
setup do
|
14
|
+
ENV['rails_env'] = 'production'
|
15
|
+
Resque.schedule = {:some_ivar_job => {'cron' => "* * * * *", 'class' => 'SomeIvarJob', 'args' => "/tmp", 'rails_env' => 'production'}}
|
16
|
+
Resque::Scheduler.load_schedule!
|
17
|
+
get "/schedule"
|
18
|
+
end
|
19
|
+
|
20
|
+
should_respond_with_success
|
21
|
+
|
22
|
+
test 'see the scheduled job' do
|
23
|
+
assert last_response.body.include?('SomeIvarJob')
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
context "on GET to /delayed" do
|
28
|
+
setup { get "/delayed" }
|
29
|
+
|
30
|
+
should_respond_with_success
|
31
|
+
end
|