rufus-scheduler 3.6.0 → 3.8.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/CHANGELOG.md +30 -0
- data/CREDITS.md +10 -0
- data/LICENSE.txt +1 -1
- data/Makefile +1 -1
- data/README.md +153 -62
- data/lib/rufus/scheduler/job_array.rb +37 -47
- data/lib/rufus/scheduler/jobs_core.rb +369 -0
- data/lib/rufus/scheduler/jobs_one_time.rb +53 -0
- data/lib/rufus/scheduler/jobs_repeat.rb +335 -0
- data/lib/rufus/scheduler/locks.rb +41 -44
- data/lib/rufus/scheduler/util.rb +166 -150
- data/lib/rufus/scheduler.rb +537 -431
- data/rufus-scheduler.gemspec +1 -2
- metadata +11 -10
- data/lib/rufus/scheduler/jobs.rb +0 -701
data/lib/rufus/scheduler.rb
CHANGED
@@ -1,629 +1,735 @@
|
|
1
1
|
|
2
|
-
require 'set'
|
3
2
|
require 'date' if RUBY_VERSION < '1.9.0'
|
4
|
-
require 'time'
|
5
3
|
require 'thread'
|
6
4
|
|
7
5
|
require 'fugit'
|
8
6
|
|
9
7
|
|
10
|
-
module Rufus
|
8
|
+
module Rufus; end
|
11
9
|
|
12
|
-
|
10
|
+
class Rufus::Scheduler
|
13
11
|
|
14
|
-
|
12
|
+
VERSION = '3.8.2'
|
15
13
|
|
16
|
-
|
14
|
+
EoTime = ::EtOrbi::EoTime
|
17
15
|
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
16
|
+
require 'rufus/scheduler/util'
|
17
|
+
require 'rufus/scheduler/jobs_core'
|
18
|
+
require 'rufus/scheduler/jobs_one_time'
|
19
|
+
require 'rufus/scheduler/jobs_repeat'
|
20
|
+
require 'rufus/scheduler/job_array'
|
21
|
+
require 'rufus/scheduler/locks'
|
22
22
|
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
23
|
+
#
|
24
|
+
# A common error class for rufus-scheduler
|
25
|
+
#
|
26
|
+
class Error < StandardError; end
|
27
27
|
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
28
|
+
#
|
29
|
+
# This error is thrown when the :timeout attribute triggers
|
30
|
+
#
|
31
|
+
class TimeoutError < Error; end
|
32
32
|
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
33
|
+
#
|
34
|
+
# For when the scheduler is not running
|
35
|
+
# (it got shut down or didn't start because of a lock)
|
36
|
+
#
|
37
|
+
class NotRunningError < Error; end
|
38
38
|
|
39
|
-
|
40
|
-
|
39
|
+
#MIN_WORK_THREADS = 3
|
40
|
+
MAX_WORK_THREADS = 28
|
41
41
|
|
42
|
-
|
43
|
-
|
44
|
-
attr_reader :thread
|
45
|
-
attr_reader :thread_key
|
46
|
-
attr_reader :mutexes
|
42
|
+
attr_accessor :frequency
|
43
|
+
attr_accessor :discard_past
|
47
44
|
|
48
|
-
|
49
|
-
|
45
|
+
attr_reader :started_at
|
46
|
+
attr_reader :paused_at
|
47
|
+
attr_reader :thread
|
48
|
+
attr_reader :thread_key
|
49
|
+
attr_reader :mutexes
|
50
50
|
|
51
|
-
|
51
|
+
#attr_accessor :min_work_threads
|
52
|
+
attr_accessor :max_work_threads
|
52
53
|
|
53
|
-
|
54
|
+
attr_accessor :stderr
|
54
55
|
|
55
|
-
|
56
|
+
attr_reader :work_queue
|
56
57
|
|
57
|
-
|
58
|
+
def initialize(opts={})
|
58
59
|
|
59
|
-
|
60
|
-
@paused = false
|
60
|
+
@opts = opts
|
61
61
|
|
62
|
-
|
62
|
+
@started_at = nil
|
63
|
+
@paused_at = nil
|
63
64
|
|
64
|
-
|
65
|
-
@mutexes = {}
|
65
|
+
@jobs = JobArray.new
|
66
66
|
|
67
|
-
|
67
|
+
@frequency = Rufus::Scheduler.parse(opts[:frequency] || 0.300)
|
68
|
+
@discard_past = opts.has_key?(:discard_past) ? opts[:discard_past] : true
|
68
69
|
|
69
|
-
|
70
|
-
@max_work_threads = opts[:max_work_threads] || MAX_WORK_THREADS
|
70
|
+
@mutexes = {}
|
71
71
|
|
72
|
-
|
72
|
+
@work_queue = Queue.new
|
73
|
+
@join_queue = Queue.new
|
73
74
|
|
74
|
-
|
75
|
+
#@min_work_threads =
|
76
|
+
# opts[:min_work_threads] || opts[:min_worker_threads] ||
|
77
|
+
# MIN_WORK_THREADS
|
78
|
+
@max_work_threads =
|
79
|
+
opts[:max_work_threads] || opts[:max_worker_threads] ||
|
80
|
+
MAX_WORK_THREADS
|
75
81
|
|
76
|
-
|
77
|
-
if lockfile = opts[:lockfile]
|
78
|
-
Rufus::Scheduler::FileLock.new(lockfile)
|
79
|
-
else
|
80
|
-
opts[:scheduler_lock] || Rufus::Scheduler::NullLock.new
|
81
|
-
end
|
82
|
+
@stderr = $stderr
|
82
83
|
|
83
|
-
|
84
|
+
@thread_key = "rufus_scheduler_#{self.object_id}"
|
84
85
|
|
85
|
-
|
86
|
-
|
86
|
+
@scheduler_lock =
|
87
|
+
if lockfile = opts[:lockfile]
|
88
|
+
Rufus::Scheduler::FileLock.new(lockfile)
|
89
|
+
else
|
90
|
+
opts[:scheduler_lock] || Rufus::Scheduler::NullLock.new
|
91
|
+
end
|
87
92
|
|
88
|
-
|
89
|
-
end
|
93
|
+
@trigger_lock = opts[:trigger_lock] || Rufus::Scheduler::NullLock.new
|
90
94
|
|
91
|
-
#
|
92
|
-
|
93
|
-
def self.singleton(opts={})
|
95
|
+
# If we can't grab the @scheduler_lock, don't run.
|
96
|
+
lock || return
|
94
97
|
|
95
|
-
|
96
|
-
|
98
|
+
start
|
99
|
+
end
|
97
100
|
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
+
# Returns a singleton Rufus::Scheduler instance
|
102
|
+
#
|
103
|
+
def self.singleton(opts={})
|
101
104
|
|
102
|
-
|
103
|
-
|
104
|
-
#
|
105
|
-
# For now, let's assume the people pointing at rufus-scheduler/master
|
106
|
-
# on GitHub know what they do...
|
107
|
-
#
|
108
|
-
def self.start_new
|
105
|
+
@singleton ||= Rufus::Scheduler.new(opts)
|
106
|
+
end
|
109
107
|
|
110
|
-
|
111
|
-
|
108
|
+
# Alias for Rufus::Scheduler.singleton
|
109
|
+
#
|
110
|
+
def self.s(opts={}); singleton(opts); end
|
112
111
|
|
113
|
-
|
112
|
+
# Releasing the gem would probably require redirecting .start_new to
|
113
|
+
# .new and emit a simple deprecation message.
|
114
|
+
#
|
115
|
+
# For now, let's assume the people pointing at rufus-scheduler/master
|
116
|
+
# on GitHub know what they do...
|
117
|
+
#
|
118
|
+
def self.start_new
|
114
119
|
|
115
|
-
|
120
|
+
fail 'this is rufus-scheduler 3.x, use .new instead of .start_new'
|
121
|
+
end
|
116
122
|
|
117
|
-
|
118
|
-
#
|
119
|
-
# which provokes https://github.com/jmettraux/rufus-scheduler/issues/98
|
120
|
-
# using the following instead:
|
121
|
-
#
|
122
|
-
@jobs.array.each { |j| j.unschedule }
|
123
|
+
def uptime
|
123
124
|
|
124
|
-
|
125
|
+
@started_at ? EoTime.now - @started_at : nil
|
126
|
+
end
|
125
127
|
|
126
|
-
|
127
|
-
join_all_work_threads
|
128
|
-
elsif opt == :kill
|
129
|
-
kill_all_work_threads
|
130
|
-
end
|
128
|
+
def around_trigger(job)
|
131
129
|
|
132
|
-
|
133
|
-
|
130
|
+
yield
|
131
|
+
end
|
134
132
|
|
135
|
-
|
133
|
+
def uptime_s
|
136
134
|
|
137
|
-
|
135
|
+
uptime ? self.class.to_duration(uptime) : ''
|
136
|
+
end
|
138
137
|
|
139
|
-
|
140
|
-
end
|
138
|
+
def join(time_limit=nil)
|
141
139
|
|
142
|
-
|
140
|
+
fail NotRunningError.new('cannot join scheduler that is not running') \
|
141
|
+
unless @thread
|
142
|
+
fail ThreadError.new('scheduler thread cannot join itself') \
|
143
|
+
if @thread == Thread.current
|
143
144
|
|
144
|
-
|
145
|
+
if time_limit
|
146
|
+
time_limit_join(time_limit)
|
147
|
+
else
|
148
|
+
no_time_limit_join
|
145
149
|
end
|
150
|
+
end
|
146
151
|
|
147
|
-
|
152
|
+
def down?
|
148
153
|
|
149
|
-
|
150
|
-
|
151
|
-
) unless @thread
|
154
|
+
! @started_at
|
155
|
+
end
|
152
156
|
|
153
|
-
|
154
|
-
end
|
157
|
+
def up?
|
155
158
|
|
156
|
-
|
159
|
+
!! @started_at
|
160
|
+
end
|
157
161
|
|
158
|
-
|
159
|
-
end
|
162
|
+
def paused?
|
160
163
|
|
161
|
-
|
164
|
+
!! @paused_at
|
165
|
+
end
|
162
166
|
|
163
|
-
|
164
|
-
end
|
167
|
+
def pause
|
165
168
|
|
166
|
-
|
169
|
+
@paused_at = EoTime.now
|
170
|
+
end
|
167
171
|
|
168
|
-
|
169
|
-
end
|
172
|
+
def resume(opts={})
|
170
173
|
|
171
|
-
|
174
|
+
dp = opts[:discard_past]
|
175
|
+
jobs.each { |job| job.resume_discard_past = dp }
|
172
176
|
|
173
|
-
|
174
|
-
|
177
|
+
@paused_at = nil
|
178
|
+
end
|
175
179
|
|
176
|
-
|
180
|
+
#--
|
181
|
+
# scheduling methods
|
182
|
+
#++
|
177
183
|
|
178
|
-
|
179
|
-
end
|
184
|
+
def at(time, callable=nil, opts={}, &block)
|
180
185
|
|
181
|
-
|
182
|
-
|
183
|
-
#++
|
186
|
+
do_schedule(:once, time, callable, opts, opts[:job], block)
|
187
|
+
end
|
184
188
|
|
185
|
-
|
189
|
+
def schedule_at(time, callable=nil, opts={}, &block)
|
186
190
|
|
187
|
-
|
188
|
-
|
191
|
+
do_schedule(:once, time, callable, opts, true, block)
|
192
|
+
end
|
189
193
|
|
190
|
-
|
194
|
+
def in(duration, callable=nil, opts={}, &block)
|
191
195
|
|
192
|
-
|
193
|
-
|
196
|
+
do_schedule(:once, duration, callable, opts, opts[:job], block)
|
197
|
+
end
|
194
198
|
|
195
|
-
|
199
|
+
def schedule_in(duration, callable=nil, opts={}, &block)
|
196
200
|
|
197
|
-
|
198
|
-
|
201
|
+
do_schedule(:once, duration, callable, opts, true, block)
|
202
|
+
end
|
199
203
|
|
200
|
-
|
204
|
+
def every(duration, callable=nil, opts={}, &block)
|
201
205
|
|
202
|
-
|
203
|
-
|
206
|
+
do_schedule(:every, duration, callable, opts, opts[:job], block)
|
207
|
+
end
|
204
208
|
|
205
|
-
|
209
|
+
def schedule_every(duration, callable=nil, opts={}, &block)
|
206
210
|
|
207
|
-
|
208
|
-
|
211
|
+
do_schedule(:every, duration, callable, opts, true, block)
|
212
|
+
end
|
209
213
|
|
210
|
-
|
214
|
+
def interval(duration, callable=nil, opts={}, &block)
|
211
215
|
|
212
|
-
|
213
|
-
|
216
|
+
do_schedule(:interval, duration, callable, opts, opts[:job], block)
|
217
|
+
end
|
214
218
|
|
215
|
-
|
219
|
+
def schedule_interval(duration, callable=nil, opts={}, &block)
|
216
220
|
|
217
|
-
|
218
|
-
|
221
|
+
do_schedule(:interval, duration, callable, opts, true, block)
|
222
|
+
end
|
219
223
|
|
220
|
-
|
224
|
+
def cron(cronline, callable=nil, opts={}, &block)
|
221
225
|
|
222
|
-
|
223
|
-
|
226
|
+
do_schedule(:cron, cronline, callable, opts, opts[:job], block)
|
227
|
+
end
|
224
228
|
|
225
|
-
|
229
|
+
def schedule_cron(cronline, callable=nil, opts={}, &block)
|
226
230
|
|
227
|
-
|
228
|
-
|
231
|
+
do_schedule(:cron, cronline, callable, opts, true, block)
|
232
|
+
end
|
233
|
+
|
234
|
+
def schedule(arg, callable=nil, opts={}, &block)
|
229
235
|
|
230
|
-
|
236
|
+
callable, opts = nil, callable if callable.is_a?(Hash)
|
237
|
+
opts = opts.dup
|
231
238
|
|
232
|
-
|
239
|
+
opts[:_t] = Rufus::Scheduler.parse(arg, opts)
|
240
|
+
|
241
|
+
case opts[:_t]
|
242
|
+
when ::Fugit::Cron then schedule_cron(arg, callable, opts, &block)
|
243
|
+
when ::EtOrbi::EoTime, Time then schedule_at(arg, callable, opts, &block)
|
244
|
+
else schedule_in(arg, callable, opts, &block)
|
233
245
|
end
|
246
|
+
end
|
234
247
|
|
235
|
-
|
248
|
+
def repeat(arg, callable=nil, opts={}, &block)
|
236
249
|
|
237
|
-
|
238
|
-
|
250
|
+
callable, opts = nil, callable if callable.is_a?(Hash)
|
251
|
+
opts = opts.dup
|
239
252
|
|
240
|
-
|
253
|
+
opts[:_t] = Rufus::Scheduler.parse(arg, opts)
|
241
254
|
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
else schedule_in(arg, callable, opts, &block)
|
246
|
-
end
|
255
|
+
case opts[:_t]
|
256
|
+
when ::Fugit::Cron then schedule_cron(arg, callable, opts, &block)
|
257
|
+
else schedule_every(arg, callable, opts, &block)
|
247
258
|
end
|
259
|
+
end
|
248
260
|
|
249
|
-
|
261
|
+
def unschedule(job_or_job_id)
|
250
262
|
|
251
|
-
|
252
|
-
opts = opts.dup
|
263
|
+
job, job_id = fetch(job_or_job_id)
|
253
264
|
|
254
|
-
|
265
|
+
fail ArgumentError.new("no job found with id '#{job_id}'") unless job
|
255
266
|
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
267
|
+
job.unschedule if job
|
268
|
+
end
|
269
|
+
|
270
|
+
#--
|
271
|
+
# jobs methods
|
272
|
+
#++
|
261
273
|
|
262
|
-
|
274
|
+
# Returns all the scheduled jobs
|
275
|
+
# (even those right before re-schedule).
|
276
|
+
#
|
277
|
+
def jobs(opts={})
|
263
278
|
|
264
|
-
|
279
|
+
opts = { opts => true } if opts.is_a?(Symbol)
|
265
280
|
|
266
|
-
|
281
|
+
jobs = @jobs.to_a
|
267
282
|
|
268
|
-
|
283
|
+
if opts[:running]
|
284
|
+
jobs = jobs.select { |j| j.running? }
|
285
|
+
elsif ! opts[:all]
|
286
|
+
jobs = jobs.reject { |j| j.next_time.nil? || j.unscheduled_at }
|
269
287
|
end
|
270
288
|
|
271
|
-
|
272
|
-
|
273
|
-
#++
|
289
|
+
tags = Array(opts[:tag] || opts[:tags]).collect(&:to_s)
|
290
|
+
jobs = jobs.reject { |j| tags.find { |t| ! j.tags.include?(t) } }
|
274
291
|
|
275
|
-
|
276
|
-
|
277
|
-
#
|
278
|
-
def jobs(opts={})
|
292
|
+
jobs
|
293
|
+
end
|
279
294
|
|
280
|
-
|
295
|
+
def at_jobs(opts={})
|
281
296
|
|
282
|
-
|
297
|
+
jobs(opts).select { |j| j.is_a?(Rufus::Scheduler::AtJob) }
|
298
|
+
end
|
283
299
|
|
284
|
-
|
285
|
-
jobs = jobs.select { |j| j.running? }
|
286
|
-
elsif ! opts[:all]
|
287
|
-
jobs = jobs.reject { |j| j.next_time.nil? || j.unscheduled_at }
|
288
|
-
end
|
300
|
+
def in_jobs(opts={})
|
289
301
|
|
290
|
-
|
291
|
-
|
302
|
+
jobs(opts).select { |j| j.is_a?(Rufus::Scheduler::InJob) }
|
303
|
+
end
|
292
304
|
|
293
|
-
|
294
|
-
end
|
305
|
+
def every_jobs(opts={})
|
295
306
|
|
296
|
-
|
307
|
+
jobs(opts).select { |j| j.is_a?(Rufus::Scheduler::EveryJob) }
|
308
|
+
end
|
297
309
|
|
298
|
-
|
299
|
-
end
|
310
|
+
def interval_jobs(opts={})
|
300
311
|
|
301
|
-
|
312
|
+
jobs(opts).select { |j| j.is_a?(Rufus::Scheduler::IntervalJob) }
|
313
|
+
end
|
302
314
|
|
303
|
-
|
304
|
-
end
|
315
|
+
def cron_jobs(opts={})
|
305
316
|
|
306
|
-
|
317
|
+
jobs(opts).select { |j| j.is_a?(Rufus::Scheduler::CronJob) }
|
318
|
+
end
|
307
319
|
|
308
|
-
|
309
|
-
end
|
320
|
+
def job(job_id)
|
310
321
|
|
311
|
-
|
322
|
+
@jobs[job_id]
|
323
|
+
end
|
312
324
|
|
313
|
-
|
314
|
-
|
325
|
+
# Returns true if the scheduler has acquired the [exclusive] lock and
|
326
|
+
# thus may run.
|
327
|
+
#
|
328
|
+
# Most of the time, a scheduler is run alone and this method should
|
329
|
+
# return true. It is useful in cases where among a group of applications
|
330
|
+
# only one of them should run the scheduler. For schedulers that should
|
331
|
+
# not run, the method should return false.
|
332
|
+
#
|
333
|
+
# Out of the box, rufus-scheduler proposes the
|
334
|
+
# :lockfile => 'path/to/lock/file' scheduler start option. It makes
|
335
|
+
# it easy for schedulers on the same machine to determine which should
|
336
|
+
# run (the first to write the lockfile and lock it). It uses "man 2 flock"
|
337
|
+
# so it probably won't work reliably on distributed file systems.
|
338
|
+
#
|
339
|
+
# If one needs to use a special/different locking mechanism, the scheduler
|
340
|
+
# accepts :scheduler_lock => lock_object. lock_object only needs to respond
|
341
|
+
# to #lock
|
342
|
+
# and #unlock, and both of these methods should be idempotent.
|
343
|
+
#
|
344
|
+
# Look at rufus/scheduler/locks.rb for an example.
|
345
|
+
#
|
346
|
+
def lock
|
347
|
+
|
348
|
+
@scheduler_lock.lock
|
349
|
+
end
|
315
350
|
|
316
|
-
|
351
|
+
# Sister method to #lock, is called when the scheduler shuts down.
|
352
|
+
#
|
353
|
+
def unlock
|
317
354
|
|
318
|
-
|
319
|
-
|
355
|
+
@trigger_lock.unlock
|
356
|
+
@scheduler_lock.unlock
|
357
|
+
end
|
320
358
|
|
321
|
-
|
359
|
+
# Callback called when a job is triggered. If the lock cannot be acquired,
|
360
|
+
# the job won't run (though it'll still be scheduled to run again if
|
361
|
+
# necessary).
|
362
|
+
#
|
363
|
+
def confirm_lock
|
322
364
|
|
323
|
-
|
324
|
-
|
365
|
+
@trigger_lock.lock
|
366
|
+
end
|
325
367
|
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
# not run, the method should return false.
|
333
|
-
#
|
334
|
-
# Out of the box, rufus-scheduler proposes the
|
335
|
-
# :lockfile => 'path/to/lock/file' scheduler start option. It makes
|
336
|
-
# it easy for schedulers on the same machine to determine which should
|
337
|
-
# run (the first to write the lockfile and lock it). It uses "man 2 flock"
|
338
|
-
# so it probably won't work reliably on distributed file systems.
|
339
|
-
#
|
340
|
-
# If one needs to use a special/different locking mechanism, the scheduler
|
341
|
-
# accepts :scheduler_lock => lock_object. lock_object only needs to respond
|
342
|
-
# to #lock
|
343
|
-
# and #unlock, and both of these methods should be idempotent.
|
344
|
-
#
|
345
|
-
# Look at rufus/scheduler/locks.rb for an example.
|
346
|
-
#
|
347
|
-
def lock
|
348
|
-
|
349
|
-
@scheduler_lock.lock
|
350
|
-
end
|
368
|
+
# Returns true if this job is currently scheduled.
|
369
|
+
#
|
370
|
+
# Takes extra care to answer true if the job is a repeat job
|
371
|
+
# currently firing.
|
372
|
+
#
|
373
|
+
def scheduled?(job_or_job_id)
|
351
374
|
|
352
|
-
|
353
|
-
#
|
354
|
-
def unlock
|
375
|
+
job, _ = fetch(job_or_job_id)
|
355
376
|
|
356
|
-
|
357
|
-
|
358
|
-
|
377
|
+
!! (job && job.unscheduled_at.nil? && job.next_time != nil)
|
378
|
+
end
|
379
|
+
|
380
|
+
# Lists all the threads associated with this scheduler.
|
381
|
+
#
|
382
|
+
def threads
|
359
383
|
|
360
|
-
|
361
|
-
|
362
|
-
# necessary).
|
363
|
-
#
|
364
|
-
def confirm_lock
|
384
|
+
Thread.list.select { |t| t[thread_key] }
|
385
|
+
end
|
365
386
|
|
366
|
-
|
387
|
+
# Lists all the work threads (the ones actually running the scheduled
|
388
|
+
# block code)
|
389
|
+
#
|
390
|
+
# Accepts a query option, which can be set to:
|
391
|
+
# * :all (default), returns all the threads that are work threads
|
392
|
+
# or are currently running a job
|
393
|
+
# * :active, returns all threads that are currently running a job
|
394
|
+
# * :vacant, returns the threads that are not running a job
|
395
|
+
#
|
396
|
+
# If, thanks to :blocking => true, a job is scheduled to monopolize the
|
397
|
+
# main scheduler thread, that thread will get returned when :active or
|
398
|
+
# :all.
|
399
|
+
#
|
400
|
+
def work_threads(query=:all)
|
401
|
+
|
402
|
+
ts = threads.select { |t| t[:rufus_scheduler_work_thread] }
|
403
|
+
|
404
|
+
case query
|
405
|
+
when :active then ts.select { |t| t[:rufus_scheduler_job] }
|
406
|
+
when :vacant then ts.reject { |t| t[:rufus_scheduler_job] }
|
407
|
+
else ts
|
367
408
|
end
|
409
|
+
end
|
368
410
|
|
369
|
-
|
370
|
-
#
|
371
|
-
# Takes extra care to answer true if the job is a repeat job
|
372
|
-
# currently firing.
|
373
|
-
#
|
374
|
-
def scheduled?(job_or_job_id)
|
411
|
+
def running_jobs(opts={})
|
375
412
|
|
376
|
-
|
413
|
+
jobs(opts.merge(:running => true))
|
414
|
+
end
|
377
415
|
|
378
|
-
|
379
|
-
end
|
416
|
+
def occurrences(time0, time1, format=:per_job)
|
380
417
|
|
381
|
-
|
382
|
-
#
|
383
|
-
def threads
|
418
|
+
h = {}
|
384
419
|
|
385
|
-
|
420
|
+
jobs.each do |j|
|
421
|
+
os = j.occurrences(time0, time1)
|
422
|
+
h[j] = os if os.any?
|
386
423
|
end
|
387
424
|
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
# * :active, returns all threads that are currently running a job
|
395
|
-
# * :vacant, returns the threads that are not running a job
|
396
|
-
#
|
397
|
-
# If, thanks to :blocking => true, a job is scheduled to monopolize the
|
398
|
-
# main scheduler thread, that thread will get returned when :active or
|
399
|
-
# :all.
|
400
|
-
#
|
401
|
-
def work_threads(query=:all)
|
402
|
-
|
403
|
-
ts = threads.select { |t| t[:rufus_scheduler_work_thread] }
|
404
|
-
|
405
|
-
case query
|
406
|
-
when :active then ts.select { |t| t[:rufus_scheduler_job] }
|
407
|
-
when :vacant then ts.reject { |t| t[:rufus_scheduler_job] }
|
408
|
-
else ts
|
409
|
-
end
|
425
|
+
if format == :timeline
|
426
|
+
a = []
|
427
|
+
h.each { |j, ts| ts.each { |t| a << [ t, j ] } }
|
428
|
+
a.sort_by { |(t, _)| t }
|
429
|
+
else
|
430
|
+
h
|
410
431
|
end
|
432
|
+
end
|
411
433
|
|
412
|
-
|
434
|
+
def timeline(time0, time1)
|
413
435
|
|
414
|
-
|
415
|
-
|
436
|
+
occurrences(time0, time1, :timeline)
|
437
|
+
end
|
416
438
|
|
417
|
-
|
439
|
+
def on_error(job, err)
|
418
440
|
|
419
|
-
|
441
|
+
pre = err.object_id.to_s
|
420
442
|
|
421
|
-
|
422
|
-
os = j.occurrences(time0, time1)
|
423
|
-
h[j] = os if os.any?
|
424
|
-
end
|
443
|
+
ms = {}; mutexes.each { |k, v| ms[k] = v.locked? }
|
425
444
|
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
445
|
+
stderr.puts("{ #{pre} rufus-scheduler intercepted an error:")
|
446
|
+
if job
|
447
|
+
stderr.puts(" #{pre} job:")
|
448
|
+
stderr.puts(" #{pre} #{job.class} #{job.original.inspect} #{job.opts.inspect}")
|
449
|
+
stderr.puts(" #{pre} #{job.source_location.inspect}")
|
450
|
+
# TODO: eventually use a Job#detail or something like that
|
451
|
+
else
|
452
|
+
stderr.puts(" #{pre} job: (error did not occur in a job)")
|
453
|
+
end
|
454
|
+
stderr.puts(" #{pre} error:")
|
455
|
+
stderr.puts(" #{pre} #{err.object_id}")
|
456
|
+
stderr.puts(" #{pre} #{err.class}")
|
457
|
+
stderr.puts(" #{pre} #{err}")
|
458
|
+
err.backtrace.each do |l|
|
459
|
+
stderr.puts(" #{pre} #{l}")
|
460
|
+
end
|
461
|
+
stderr.puts(" #{pre} tz:")
|
462
|
+
stderr.puts(" #{pre} ENV['TZ']: #{ENV['TZ']}")
|
463
|
+
stderr.puts(" #{pre} Time.now: #{Time.now}")
|
464
|
+
stderr.puts(" #{pre} local_tzone: #{EoTime.local_tzone.inspect}")
|
465
|
+
stderr.puts(" #{pre} et-orbi:")
|
466
|
+
stderr.puts(" #{pre} #{EoTime.platform_info}")
|
467
|
+
stderr.puts(" #{pre} scheduler:")
|
468
|
+
stderr.puts(" #{pre} object_id: #{object_id}")
|
469
|
+
stderr.puts(" #{pre} opts:")
|
470
|
+
stderr.puts(" #{pre} #{@opts.inspect}")
|
471
|
+
stderr.puts(" #{pre} frequency: #{self.frequency}")
|
472
|
+
stderr.puts(" #{pre} scheduler_lock: #{@scheduler_lock.inspect}")
|
473
|
+
stderr.puts(" #{pre} trigger_lock: #{@trigger_lock.inspect}")
|
474
|
+
stderr.puts(" #{pre} uptime: #{uptime} (#{uptime_s})")
|
475
|
+
stderr.puts(" #{pre} down?: #{down?}")
|
476
|
+
stderr.puts(" #{pre} frequency: #{frequency.inspect}")
|
477
|
+
stderr.puts(" #{pre} discard_past: #{discard_past.inspect}")
|
478
|
+
stderr.puts(" #{pre} started_at: #{started_at.inspect}")
|
479
|
+
stderr.puts(" #{pre} paused_at: #{paused_at.inspect}")
|
480
|
+
stderr.puts(" #{pre} threads: #{self.threads.size}")
|
481
|
+
stderr.puts(" #{pre} thread: #{self.thread}")
|
482
|
+
stderr.puts(" #{pre} thread_key: #{self.thread_key}")
|
483
|
+
stderr.puts(" #{pre} work_threads: #{work_threads.size}")
|
484
|
+
stderr.puts(" #{pre} active: #{work_threads(:active).size}")
|
485
|
+
stderr.puts(" #{pre} vacant: #{work_threads(:vacant).size}")
|
486
|
+
stderr.puts(" #{pre} max_work_threads: #{max_work_threads}")
|
487
|
+
stderr.puts(" #{pre} mutexes: #{ms.inspect}")
|
488
|
+
stderr.puts(" #{pre} jobs: #{jobs.size}")
|
489
|
+
stderr.puts(" #{pre} at_jobs: #{at_jobs.size}")
|
490
|
+
stderr.puts(" #{pre} in_jobs: #{in_jobs.size}")
|
491
|
+
stderr.puts(" #{pre} every_jobs: #{every_jobs.size}")
|
492
|
+
stderr.puts(" #{pre} interval_jobs: #{interval_jobs.size}")
|
493
|
+
stderr.puts(" #{pre} cron_jobs: #{cron_jobs.size}")
|
494
|
+
stderr.puts(" #{pre} running_jobs: #{running_jobs.size}")
|
495
|
+
stderr.puts(" #{pre} work_queue:")
|
496
|
+
stderr.puts(" #{pre} size: #{@work_queue.size}")
|
497
|
+
stderr.puts(" #{pre} num_waiting: #{@work_queue.num_waiting}")
|
498
|
+
stderr.puts(" #{pre} join_queue:")
|
499
|
+
stderr.puts(" #{pre} size: #{@join_queue.size}")
|
500
|
+
stderr.puts(" #{pre} num_waiting: #{@join_queue.num_waiting}")
|
501
|
+
stderr.puts("} #{pre} .")
|
502
|
+
|
503
|
+
rescue => e
|
504
|
+
|
505
|
+
stderr.puts("failure in #on_error itself:")
|
506
|
+
stderr.puts(e.inspect)
|
507
|
+
stderr.puts(e.backtrace)
|
508
|
+
|
509
|
+
ensure
|
510
|
+
|
511
|
+
stderr.flush
|
512
|
+
end
|
513
|
+
|
514
|
+
def shutdown(opt=nil)
|
515
|
+
|
516
|
+
opts =
|
517
|
+
case opt
|
518
|
+
when Symbol then { opt => true }
|
519
|
+
when Hash then opt
|
520
|
+
else {}
|
432
521
|
end
|
433
|
-
end
|
434
522
|
|
435
|
-
|
523
|
+
@jobs.unschedule_all
|
436
524
|
|
437
|
-
|
525
|
+
if opts[:wait] || opts[:join]
|
526
|
+
join_shutdown(opts)
|
527
|
+
elsif opts[:kill]
|
528
|
+
kill_shutdown(opts)
|
529
|
+
else
|
530
|
+
regular_shutdown(opts)
|
438
531
|
end
|
439
532
|
|
440
|
-
|
533
|
+
@work_queue.clear
|
441
534
|
|
442
|
-
|
535
|
+
unlock
|
443
536
|
|
444
|
-
|
537
|
+
@thread.join unless @thread == Thread.current
|
538
|
+
end
|
539
|
+
alias stop shutdown
|
445
540
|
|
446
|
-
|
447
|
-
stderr.puts(" #{pre} job:")
|
448
|
-
stderr.puts(" #{pre} #{job.class} #{job.original.inspect} #{job.opts.inspect}")
|
449
|
-
# TODO: eventually use a Job#detail or something like that
|
450
|
-
stderr.puts(" #{pre} error:")
|
451
|
-
stderr.puts(" #{pre} #{err.object_id}")
|
452
|
-
stderr.puts(" #{pre} #{err.class}")
|
453
|
-
stderr.puts(" #{pre} #{err}")
|
454
|
-
err.backtrace.each do |l|
|
455
|
-
stderr.puts(" #{pre} #{l}")
|
456
|
-
end
|
457
|
-
stderr.puts(" #{pre} tz:")
|
458
|
-
stderr.puts(" #{pre} ENV['TZ']: #{ENV['TZ']}")
|
459
|
-
stderr.puts(" #{pre} Time.now: #{Time.now}")
|
460
|
-
stderr.puts(" #{pre} local_tzone: #{EoTime.local_tzone.inspect}")
|
461
|
-
stderr.puts(" #{pre} et-orbi:")
|
462
|
-
stderr.puts(" #{pre} #{EoTime.platform_info}")
|
463
|
-
stderr.puts(" #{pre} scheduler:")
|
464
|
-
stderr.puts(" #{pre} object_id: #{object_id}")
|
465
|
-
stderr.puts(" #{pre} opts:")
|
466
|
-
stderr.puts(" #{pre} #{@opts.inspect}")
|
467
|
-
stderr.puts(" #{pre} frequency: #{self.frequency}")
|
468
|
-
stderr.puts(" #{pre} scheduler_lock: #{@scheduler_lock.inspect}")
|
469
|
-
stderr.puts(" #{pre} trigger_lock: #{@trigger_lock.inspect}")
|
470
|
-
stderr.puts(" #{pre} uptime: #{uptime} (#{uptime_s})")
|
471
|
-
stderr.puts(" #{pre} down?: #{down?}")
|
472
|
-
stderr.puts(" #{pre} threads: #{self.threads.size}")
|
473
|
-
stderr.puts(" #{pre} thread: #{self.thread}")
|
474
|
-
stderr.puts(" #{pre} thread_key: #{self.thread_key}")
|
475
|
-
stderr.puts(" #{pre} work_threads: #{work_threads.size}")
|
476
|
-
stderr.puts(" #{pre} active: #{work_threads(:active).size}")
|
477
|
-
stderr.puts(" #{pre} vacant: #{work_threads(:vacant).size}")
|
478
|
-
stderr.puts(" #{pre} max_work_threads: #{max_work_threads}")
|
479
|
-
stderr.puts(" #{pre} mutexes: #{ms.inspect}")
|
480
|
-
stderr.puts(" #{pre} jobs: #{jobs.size}")
|
481
|
-
stderr.puts(" #{pre} at_jobs: #{at_jobs.size}")
|
482
|
-
stderr.puts(" #{pre} in_jobs: #{in_jobs.size}")
|
483
|
-
stderr.puts(" #{pre} every_jobs: #{every_jobs.size}")
|
484
|
-
stderr.puts(" #{pre} interval_jobs: #{interval_jobs.size}")
|
485
|
-
stderr.puts(" #{pre} cron_jobs: #{cron_jobs.size}")
|
486
|
-
stderr.puts(" #{pre} running_jobs: #{running_jobs.size}")
|
487
|
-
stderr.puts(" #{pre} work_queue: #{work_queue.size}")
|
488
|
-
stderr.puts("} #{pre} .")
|
489
|
-
|
490
|
-
rescue => e
|
491
|
-
|
492
|
-
stderr.puts("failure in #on_error itself:")
|
493
|
-
stderr.puts(e.inspect)
|
494
|
-
stderr.puts(e.backtrace)
|
495
|
-
|
496
|
-
ensure
|
497
|
-
|
498
|
-
stderr.flush
|
499
|
-
end
|
541
|
+
protected
|
500
542
|
|
501
|
-
|
543
|
+
def join_shutdown(opts)
|
502
544
|
|
503
|
-
|
504
|
-
|
505
|
-
def fetch(job_or_job_id)
|
545
|
+
limit = opts[:wait] || opts[:join]
|
546
|
+
limit = limit.is_a?(Numeric) ? limit : nil
|
506
547
|
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
548
|
+
#@started_at = nil
|
549
|
+
#
|
550
|
+
# when @started_at is nil, the scheduler thread exits, here
|
551
|
+
# we want it to exit when all the work threads have been joined
|
552
|
+
# hence it's set to nil later on
|
553
|
+
#
|
554
|
+
@paused_at = EoTime.now
|
513
555
|
|
514
|
-
|
556
|
+
(work_threads.size * 2 + 1).times { @work_queue << :shutdown }
|
515
557
|
|
516
|
-
|
558
|
+
work_threads
|
559
|
+
.collect { |wt|
|
560
|
+
wt == Thread.current ? nil : Thread.new { wt.join(limit); wt.kill } }
|
561
|
+
.each { |st|
|
562
|
+
st.join if st }
|
517
563
|
|
518
|
-
|
519
|
-
|
564
|
+
@started_at = nil
|
565
|
+
end
|
520
566
|
|
521
|
-
|
567
|
+
def kill_shutdown(opts)
|
522
568
|
|
523
|
-
|
569
|
+
@started_at = nil
|
570
|
+
work_threads.each(&:kill)
|
571
|
+
end
|
572
|
+
|
573
|
+
def regular_shutdown(opts)
|
574
|
+
|
575
|
+
@started_at = nil
|
576
|
+
end
|
577
|
+
|
578
|
+
def time_limit_join(limit)
|
524
579
|
|
525
|
-
|
580
|
+
fail ArgumentError.new("limit #{limit.inspect} should be > 0") \
|
581
|
+
unless limit.is_a?(Numeric) && limit > 0
|
526
582
|
|
527
|
-
|
583
|
+
t0 = monow
|
584
|
+
f = [ limit.to_f / 20, 0.100 ].min
|
585
|
+
|
586
|
+
while monow - t0 < limit
|
587
|
+
r =
|
588
|
+
begin
|
589
|
+
@join_queue.pop(true)
|
590
|
+
rescue ThreadError
|
591
|
+
# #<ThreadError: queue empty>
|
592
|
+
false
|
593
|
+
end
|
594
|
+
return r if r
|
595
|
+
sleep(f)
|
528
596
|
end
|
529
597
|
|
530
|
-
|
598
|
+
nil
|
599
|
+
end
|
600
|
+
|
601
|
+
def no_time_limit_join
|
531
602
|
|
532
|
-
|
603
|
+
@join_queue.pop
|
604
|
+
end
|
605
|
+
|
606
|
+
# Returns [ job, job_id ]
|
607
|
+
#
|
608
|
+
def fetch(job_or_job_id)
|
609
|
+
|
610
|
+
if job_or_job_id.respond_to?(:job_id)
|
611
|
+
[ job_or_job_id, job_or_job_id.job_id ]
|
612
|
+
else
|
613
|
+
[ job(job_or_job_id), job_or_job_id ]
|
533
614
|
end
|
615
|
+
end
|
534
616
|
|
535
|
-
|
536
|
-
#
|
537
|
-
# work_threads.each { |t| t.raise(KillSignal) }
|
538
|
-
#end
|
617
|
+
def terminate_all_jobs
|
539
618
|
|
540
|
-
|
619
|
+
jobs.each { |j| j.unschedule }
|
541
620
|
|
542
|
-
|
621
|
+
sleep 0.01 while running_jobs.size > 0
|
622
|
+
end
|
543
623
|
|
544
|
-
|
545
|
-
|
624
|
+
#def free_all_work_threads
|
625
|
+
#
|
626
|
+
# work_threads.each { |t| t.raise(KillSignal) }
|
627
|
+
#end
|
546
628
|
|
547
|
-
|
629
|
+
def start
|
630
|
+
|
631
|
+
@started_at = EoTime.now
|
632
|
+
|
633
|
+
@thread =
|
634
|
+
Thread.new do
|
635
|
+
|
636
|
+
while @started_at do
|
637
|
+
begin
|
548
638
|
|
549
639
|
unschedule_jobs
|
550
|
-
trigger_jobs unless @
|
640
|
+
trigger_jobs unless @paused_at
|
551
641
|
timeout_jobs
|
552
642
|
|
553
643
|
sleep(@frequency)
|
644
|
+
|
645
|
+
rescue => err
|
646
|
+
#
|
647
|
+
# for `blocking: true` jobs mostly
|
648
|
+
#
|
649
|
+
on_error(nil, err)
|
554
650
|
end
|
555
651
|
end
|
556
652
|
|
557
|
-
|
558
|
-
|
559
|
-
@thread[:name] = @opts[:thread_name] || "#{@thread_key}_scheduler"
|
560
|
-
end
|
653
|
+
rejoin
|
654
|
+
end
|
561
655
|
|
562
|
-
|
656
|
+
@thread[@thread_key] = true
|
657
|
+
@thread[:rufus_scheduler] = self
|
658
|
+
@thread[:name] = @opts[:thread_name] || "#{@thread_key}_scheduler"
|
659
|
+
end
|
563
660
|
|
564
|
-
|
565
|
-
end
|
661
|
+
def unschedule_jobs
|
566
662
|
|
567
|
-
|
663
|
+
@jobs.delete_unscheduled
|
664
|
+
end
|
568
665
|
|
569
|
-
|
666
|
+
def trigger_jobs
|
570
667
|
|
571
|
-
|
668
|
+
now = EoTime.now
|
572
669
|
|
573
|
-
|
574
|
-
|
670
|
+
@jobs.each(now) do |job|
|
671
|
+
|
672
|
+
job.trigger(now)
|
575
673
|
end
|
674
|
+
end
|
576
675
|
|
577
|
-
|
676
|
+
def timeout_jobs
|
578
677
|
|
579
|
-
|
678
|
+
work_threads(:active).each do |t|
|
580
679
|
|
581
|
-
|
582
|
-
|
583
|
-
|
680
|
+
job = t[:rufus_scheduler_job]
|
681
|
+
to = t[:rufus_scheduler_timeout]
|
682
|
+
ts = t[:rufus_scheduler_time]
|
584
683
|
|
585
|
-
|
586
|
-
|
684
|
+
next unless job && to && ts
|
685
|
+
# thread might just have become inactive (job -> nil)
|
587
686
|
|
588
|
-
|
687
|
+
to = ts + to unless to.is_a?(EoTime)
|
589
688
|
|
590
|
-
|
689
|
+
next if to > EoTime.now
|
591
690
|
|
592
|
-
|
593
|
-
end
|
691
|
+
t.raise(Rufus::Scheduler::TimeoutError)
|
594
692
|
end
|
693
|
+
end
|
694
|
+
|
695
|
+
def rejoin
|
595
696
|
|
596
|
-
|
697
|
+
(@join_queue.num_waiting * 2 + 1).times { @join_queue << @thread }
|
698
|
+
end
|
597
699
|
|
598
|
-
|
599
|
-
'cannot schedule, scheduler is down or shutting down'
|
600
|
-
) if @started_at.nil?
|
700
|
+
def do_schedule(job_type, t, callable, opts, return_job_instance, block)
|
601
701
|
|
602
|
-
|
603
|
-
|
702
|
+
fail NotRunningError.new(
|
703
|
+
'cannot schedule, scheduler is down or shutting down'
|
704
|
+
) if @started_at.nil?
|
604
705
|
|
605
|
-
|
706
|
+
callable, opts = nil, callable if callable.is_a?(Hash)
|
707
|
+
opts = opts.dup unless opts.has_key?(:_t)
|
606
708
|
|
607
|
-
|
608
|
-
case job_type
|
609
|
-
when :once
|
610
|
-
opts[:_t] ||= Rufus::Scheduler.parse(t, opts)
|
611
|
-
opts[:_t].is_a?(Numeric) ? InJob : AtJob
|
612
|
-
when :every
|
613
|
-
EveryJob
|
614
|
-
when :interval
|
615
|
-
IntervalJob
|
616
|
-
when :cron
|
617
|
-
CronJob
|
618
|
-
end
|
709
|
+
return_job_instance ||= opts[:job]
|
619
710
|
|
620
|
-
|
621
|
-
|
711
|
+
job_class =
|
712
|
+
case job_type
|
713
|
+
when :once
|
714
|
+
opts[:_t] ||= Rufus::Scheduler.parse(t, opts)
|
715
|
+
opts[:_t].is_a?(Numeric) ? InJob : AtJob
|
716
|
+
when :every
|
717
|
+
EveryJob
|
718
|
+
when :interval
|
719
|
+
IntervalJob
|
720
|
+
when :cron
|
721
|
+
CronJob
|
722
|
+
end
|
622
723
|
|
623
|
-
|
724
|
+
job = job_class.new(self, t, opts, block || callable)
|
725
|
+
job.check_frequency
|
624
726
|
|
625
|
-
|
626
|
-
|
727
|
+
@jobs.push(job)
|
728
|
+
|
729
|
+
return_job_instance ? job : job.job_id
|
627
730
|
end
|
731
|
+
|
732
|
+
def monow; self.class.monow; end
|
733
|
+
def ltstamp; self.class.ltstamp; end
|
628
734
|
end
|
629
735
|
|