rufus-scheduler 3.4.2 → 3.7.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/CHANGELOG.md +393 -0
- data/CREDITS.md +137 -0
- data/LICENSE.txt +1 -1
- data/Makefile +5 -1
- data/README.md +175 -96
- data/lib/rufus/scheduler.rb +528 -440
- data/lib/rufus/scheduler/job_array.rb +37 -47
- data/lib/rufus/scheduler/jobs_core.rb +363 -0
- data/lib/rufus/scheduler/jobs_one_time.rb +53 -0
- data/lib/rufus/scheduler/jobs_repeat.rb +333 -0
- data/lib/rufus/scheduler/locks.rb +41 -44
- data/lib/rufus/scheduler/util.rb +75 -124
- data/rufus-scheduler.gemspec +17 -6
- metadata +39 -36
- data/CHANGELOG.txt +0 -353
- data/CREDITS.txt +0 -124
- data/TODO.txt +0 -151
- data/fail.txt +0 -2
- data/fail18.txt +0 -12
- data/lib/rufus/scheduler/cronline.rb +0 -498
- data/lib/rufus/scheduler/jobs.rb +0 -650
- data/log.txt +0 -285
- data/n.txt +0 -38
- data/pics.txt +0 -15
- data/sofia.md +0 -89
data/lib/rufus/scheduler.rb
CHANGED
@@ -1,634 +1,722 @@
|
|
1
1
|
|
2
|
-
require 'set'
|
3
2
|
require 'date' if RUBY_VERSION < '1.9.0'
|
4
|
-
require 'time'
|
5
3
|
require 'thread'
|
6
4
|
|
7
|
-
require '
|
5
|
+
require 'fugit'
|
8
6
|
|
9
7
|
|
10
|
-
module Rufus
|
8
|
+
module Rufus; end
|
11
9
|
|
12
|
-
|
10
|
+
class Rufus::Scheduler
|
13
11
|
|
14
|
-
|
12
|
+
VERSION = '3.7.0'
|
15
13
|
|
16
|
-
|
14
|
+
EoTime = ::EtOrbi::EoTime
|
17
15
|
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
16
|
+
require 'rufus/scheduler/util'
|
17
|
+
require 'rufus/scheduler/jobs_core'
|
18
|
+
require 'rufus/scheduler/jobs_one_time'
|
19
|
+
require 'rufus/scheduler/jobs_repeat'
|
20
|
+
require 'rufus/scheduler/job_array'
|
21
|
+
require 'rufus/scheduler/locks'
|
23
22
|
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
23
|
+
#
|
24
|
+
# A common error class for rufus-scheduler
|
25
|
+
#
|
26
|
+
class Error < StandardError; end
|
28
27
|
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
28
|
+
#
|
29
|
+
# This error is thrown when the :timeout attribute triggers
|
30
|
+
#
|
31
|
+
class TimeoutError < Error; end
|
33
32
|
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
33
|
+
#
|
34
|
+
# For when the scheduler is not running
|
35
|
+
# (it got shut down or didn't start because of a lock)
|
36
|
+
#
|
37
|
+
class NotRunningError < Error; end
|
39
38
|
|
40
|
-
|
41
|
-
|
39
|
+
#MIN_WORK_THREADS = 3
|
40
|
+
MAX_WORK_THREADS = 28
|
42
41
|
|
43
|
-
|
44
|
-
|
45
|
-
attr_reader :thread
|
46
|
-
attr_reader :thread_key
|
47
|
-
attr_reader :mutexes
|
42
|
+
attr_accessor :frequency
|
43
|
+
attr_accessor :discard_past
|
48
44
|
|
49
|
-
|
50
|
-
|
45
|
+
attr_reader :started_at
|
46
|
+
attr_reader :paused_at
|
47
|
+
attr_reader :thread
|
48
|
+
attr_reader :thread_key
|
49
|
+
attr_reader :mutexes
|
51
50
|
|
52
|
-
|
51
|
+
#attr_accessor :min_work_threads
|
52
|
+
attr_accessor :max_work_threads
|
53
53
|
|
54
|
-
|
54
|
+
attr_accessor :stderr
|
55
55
|
|
56
|
-
|
56
|
+
attr_reader :work_queue
|
57
57
|
|
58
|
-
|
58
|
+
def initialize(opts={})
|
59
59
|
|
60
|
-
|
61
|
-
@paused = false
|
60
|
+
@opts = opts
|
62
61
|
|
63
|
-
|
62
|
+
@started_at = nil
|
63
|
+
@paused_at = nil
|
64
64
|
|
65
|
-
|
66
|
-
@mutexes = {}
|
65
|
+
@jobs = JobArray.new
|
67
66
|
|
68
|
-
|
67
|
+
@frequency = Rufus::Scheduler.parse(opts[:frequency] || 0.300)
|
68
|
+
@discard_past = opts.has_key?(:discard_past) ? opts[:discard_past] : true
|
69
69
|
|
70
|
-
|
71
|
-
@max_work_threads = opts[:max_work_threads] || MAX_WORK_THREADS
|
70
|
+
@mutexes = {}
|
72
71
|
|
73
|
-
|
72
|
+
@work_queue = Queue.new
|
73
|
+
@join_queue = Queue.new
|
74
74
|
|
75
|
-
|
75
|
+
#@min_work_threads =
|
76
|
+
# opts[:min_work_threads] || opts[:min_worker_threads] ||
|
77
|
+
# MIN_WORK_THREADS
|
78
|
+
@max_work_threads =
|
79
|
+
opts[:max_work_threads] || opts[:max_worker_threads] ||
|
80
|
+
MAX_WORK_THREADS
|
76
81
|
|
77
|
-
|
78
|
-
if lockfile = opts[:lockfile]
|
79
|
-
Rufus::Scheduler::FileLock.new(lockfile)
|
80
|
-
else
|
81
|
-
opts[:scheduler_lock] || Rufus::Scheduler::NullLock.new
|
82
|
-
end
|
82
|
+
@stderr = $stderr
|
83
83
|
|
84
|
-
|
84
|
+
@thread_key = "rufus_scheduler_#{self.object_id}"
|
85
85
|
|
86
|
-
|
87
|
-
|
86
|
+
@scheduler_lock =
|
87
|
+
if lockfile = opts[:lockfile]
|
88
|
+
Rufus::Scheduler::FileLock.new(lockfile)
|
89
|
+
else
|
90
|
+
opts[:scheduler_lock] || Rufus::Scheduler::NullLock.new
|
91
|
+
end
|
88
92
|
|
89
|
-
|
90
|
-
end
|
93
|
+
@trigger_lock = opts[:trigger_lock] || Rufus::Scheduler::NullLock.new
|
91
94
|
|
92
|
-
#
|
93
|
-
|
94
|
-
def self.singleton(opts={})
|
95
|
+
# If we can't grab the @scheduler_lock, don't run.
|
96
|
+
lock || return
|
95
97
|
|
96
|
-
|
97
|
-
|
98
|
+
start
|
99
|
+
end
|
98
100
|
|
99
|
-
|
100
|
-
|
101
|
-
|
101
|
+
# Returns a singleton Rufus::Scheduler instance
|
102
|
+
#
|
103
|
+
def self.singleton(opts={})
|
102
104
|
|
103
|
-
|
104
|
-
|
105
|
-
#
|
106
|
-
# For now, let's assume the people pointing at rufus-scheduler/master
|
107
|
-
# on GitHub know what they do...
|
108
|
-
#
|
109
|
-
def self.start_new
|
105
|
+
@singleton ||= Rufus::Scheduler.new(opts)
|
106
|
+
end
|
110
107
|
|
111
|
-
|
112
|
-
|
108
|
+
# Alias for Rufus::Scheduler.singleton
|
109
|
+
#
|
110
|
+
def self.s(opts={}); singleton(opts); end
|
113
111
|
|
114
|
-
|
112
|
+
# Releasing the gem would probably require redirecting .start_new to
|
113
|
+
# .new and emit a simple deprecation message.
|
114
|
+
#
|
115
|
+
# For now, let's assume the people pointing at rufus-scheduler/master
|
116
|
+
# on GitHub know what they do...
|
117
|
+
#
|
118
|
+
def self.start_new
|
115
119
|
|
116
|
-
|
120
|
+
fail 'this is rufus-scheduler 3.x, use .new instead of .start_new'
|
121
|
+
end
|
117
122
|
|
118
|
-
|
119
|
-
# provokes https://github.com/jmettraux/rufus-scheduler/issue/98
|
120
|
-
@jobs.array.each { |j| j.unschedule }
|
123
|
+
def uptime
|
121
124
|
|
122
|
-
|
125
|
+
@started_at ? EoTime.now - @started_at : nil
|
126
|
+
end
|
123
127
|
|
124
|
-
|
125
|
-
join_all_work_threads
|
126
|
-
elsif opt == :kill
|
127
|
-
kill_all_work_threads
|
128
|
-
end
|
128
|
+
def around_trigger(job)
|
129
129
|
|
130
|
-
|
131
|
-
|
130
|
+
yield
|
131
|
+
end
|
132
132
|
|
133
|
-
|
133
|
+
def uptime_s
|
134
134
|
|
135
|
-
|
135
|
+
uptime ? self.class.to_duration(uptime) : ''
|
136
|
+
end
|
136
137
|
|
137
|
-
|
138
|
-
end
|
138
|
+
def join(time_limit=nil)
|
139
139
|
|
140
|
-
|
140
|
+
fail NotRunningError.new('cannot join scheduler that is not running') \
|
141
|
+
unless @thread
|
142
|
+
fail ThreadError.new('scheduler thread cannot join itself') \
|
143
|
+
if @thread == Thread.current
|
141
144
|
|
142
|
-
|
145
|
+
if time_limit
|
146
|
+
time_limit_join(time_limit)
|
147
|
+
else
|
148
|
+
no_time_limit_join
|
143
149
|
end
|
150
|
+
end
|
144
151
|
|
145
|
-
|
152
|
+
def down?
|
146
153
|
|
147
|
-
|
148
|
-
|
149
|
-
) unless @thread
|
154
|
+
! @started_at
|
155
|
+
end
|
150
156
|
|
151
|
-
|
152
|
-
end
|
157
|
+
def up?
|
153
158
|
|
154
|
-
|
159
|
+
!! @started_at
|
160
|
+
end
|
155
161
|
|
156
|
-
|
157
|
-
end
|
162
|
+
def paused?
|
158
163
|
|
159
|
-
|
164
|
+
!! @paused_at
|
165
|
+
end
|
160
166
|
|
161
|
-
|
162
|
-
end
|
167
|
+
def pause
|
163
168
|
|
164
|
-
|
169
|
+
@paused_at = EoTime.now
|
170
|
+
end
|
165
171
|
|
166
|
-
|
167
|
-
end
|
172
|
+
def resume(opts={})
|
168
173
|
|
169
|
-
|
174
|
+
dp = opts[:discard_past]
|
175
|
+
jobs.each { |job| job.resume_discard_past = dp }
|
170
176
|
|
171
|
-
|
172
|
-
|
177
|
+
@paused_at = nil
|
178
|
+
end
|
173
179
|
|
174
|
-
|
180
|
+
#--
|
181
|
+
# scheduling methods
|
182
|
+
#++
|
175
183
|
|
176
|
-
|
177
|
-
end
|
184
|
+
def at(time, callable=nil, opts={}, &block)
|
178
185
|
|
179
|
-
|
180
|
-
|
181
|
-
#++
|
186
|
+
do_schedule(:once, time, callable, opts, opts[:job], block)
|
187
|
+
end
|
182
188
|
|
183
|
-
|
189
|
+
def schedule_at(time, callable=nil, opts={}, &block)
|
184
190
|
|
185
|
-
|
186
|
-
|
191
|
+
do_schedule(:once, time, callable, opts, true, block)
|
192
|
+
end
|
187
193
|
|
188
|
-
|
194
|
+
def in(duration, callable=nil, opts={}, &block)
|
189
195
|
|
190
|
-
|
191
|
-
|
196
|
+
do_schedule(:once, duration, callable, opts, opts[:job], block)
|
197
|
+
end
|
192
198
|
|
193
|
-
|
199
|
+
def schedule_in(duration, callable=nil, opts={}, &block)
|
194
200
|
|
195
|
-
|
196
|
-
|
201
|
+
do_schedule(:once, duration, callable, opts, true, block)
|
202
|
+
end
|
197
203
|
|
198
|
-
|
204
|
+
def every(duration, callable=nil, opts={}, &block)
|
199
205
|
|
200
|
-
|
201
|
-
|
206
|
+
do_schedule(:every, duration, callable, opts, opts[:job], block)
|
207
|
+
end
|
202
208
|
|
203
|
-
|
209
|
+
def schedule_every(duration, callable=nil, opts={}, &block)
|
204
210
|
|
205
|
-
|
206
|
-
|
211
|
+
do_schedule(:every, duration, callable, opts, true, block)
|
212
|
+
end
|
207
213
|
|
208
|
-
|
214
|
+
def interval(duration, callable=nil, opts={}, &block)
|
209
215
|
|
210
|
-
|
211
|
-
|
216
|
+
do_schedule(:interval, duration, callable, opts, opts[:job], block)
|
217
|
+
end
|
212
218
|
|
213
|
-
|
219
|
+
def schedule_interval(duration, callable=nil, opts={}, &block)
|
214
220
|
|
215
|
-
|
216
|
-
|
221
|
+
do_schedule(:interval, duration, callable, opts, true, block)
|
222
|
+
end
|
217
223
|
|
218
|
-
|
224
|
+
def cron(cronline, callable=nil, opts={}, &block)
|
219
225
|
|
220
|
-
|
221
|
-
|
226
|
+
do_schedule(:cron, cronline, callable, opts, opts[:job], block)
|
227
|
+
end
|
222
228
|
|
223
|
-
|
229
|
+
def schedule_cron(cronline, callable=nil, opts={}, &block)
|
224
230
|
|
225
|
-
|
226
|
-
|
231
|
+
do_schedule(:cron, cronline, callable, opts, true, block)
|
232
|
+
end
|
233
|
+
|
234
|
+
def schedule(arg, callable=nil, opts={}, &block)
|
227
235
|
|
228
|
-
|
236
|
+
callable, opts = nil, callable if callable.is_a?(Hash)
|
237
|
+
opts = opts.dup
|
229
238
|
|
230
|
-
|
239
|
+
opts[:_t] = Rufus::Scheduler.parse(arg, opts)
|
240
|
+
|
241
|
+
case opts[:_t]
|
242
|
+
when ::Fugit::Cron then schedule_cron(arg, callable, opts, &block)
|
243
|
+
when ::EtOrbi::EoTime, Time then schedule_at(arg, callable, opts, &block)
|
244
|
+
else schedule_in(arg, callable, opts, &block)
|
231
245
|
end
|
246
|
+
end
|
232
247
|
|
233
|
-
|
248
|
+
def repeat(arg, callable=nil, opts={}, &block)
|
234
249
|
|
235
|
-
|
236
|
-
|
250
|
+
callable, opts = nil, callable if callable.is_a?(Hash)
|
251
|
+
opts = opts.dup
|
237
252
|
|
238
|
-
|
253
|
+
opts[:_t] = Rufus::Scheduler.parse(arg, opts)
|
239
254
|
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
else schedule_in(arg, callable, opts, &block)
|
244
|
-
end
|
255
|
+
case opts[:_t]
|
256
|
+
when ::Fugit::Cron then schedule_cron(arg, callable, opts, &block)
|
257
|
+
else schedule_every(arg, callable, opts, &block)
|
245
258
|
end
|
259
|
+
end
|
246
260
|
|
247
|
-
|
261
|
+
def unschedule(job_or_job_id)
|
248
262
|
|
249
|
-
|
250
|
-
opts = opts.dup
|
263
|
+
job, job_id = fetch(job_or_job_id)
|
251
264
|
|
252
|
-
|
265
|
+
fail ArgumentError.new("no job found with id '#{job_id}'") unless job
|
253
266
|
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
267
|
+
job.unschedule if job
|
268
|
+
end
|
269
|
+
|
270
|
+
#--
|
271
|
+
# jobs methods
|
272
|
+
#++
|
259
273
|
|
260
|
-
|
274
|
+
# Returns all the scheduled jobs
|
275
|
+
# (even those right before re-schedule).
|
276
|
+
#
|
277
|
+
def jobs(opts={})
|
261
278
|
|
262
|
-
|
279
|
+
opts = { opts => true } if opts.is_a?(Symbol)
|
263
280
|
|
264
|
-
|
281
|
+
jobs = @jobs.to_a
|
265
282
|
|
266
|
-
|
283
|
+
if opts[:running]
|
284
|
+
jobs = jobs.select { |j| j.running? }
|
285
|
+
elsif ! opts[:all]
|
286
|
+
jobs = jobs.reject { |j| j.next_time.nil? || j.unscheduled_at }
|
267
287
|
end
|
268
288
|
|
269
|
-
|
270
|
-
|
271
|
-
#++
|
289
|
+
tags = Array(opts[:tag] || opts[:tags]).collect(&:to_s)
|
290
|
+
jobs = jobs.reject { |j| tags.find { |t| ! j.tags.include?(t) } }
|
272
291
|
|
273
|
-
|
274
|
-
|
275
|
-
#
|
276
|
-
def jobs(opts={})
|
292
|
+
jobs
|
293
|
+
end
|
277
294
|
|
278
|
-
|
295
|
+
def at_jobs(opts={})
|
279
296
|
|
280
|
-
|
297
|
+
jobs(opts).select { |j| j.is_a?(Rufus::Scheduler::AtJob) }
|
298
|
+
end
|
281
299
|
|
282
|
-
|
283
|
-
jobs = jobs.select { |j| j.running? }
|
284
|
-
elsif ! opts[:all]
|
285
|
-
jobs = jobs.reject { |j| j.next_time.nil? || j.unscheduled_at }
|
286
|
-
end
|
300
|
+
def in_jobs(opts={})
|
287
301
|
|
288
|
-
|
289
|
-
|
302
|
+
jobs(opts).select { |j| j.is_a?(Rufus::Scheduler::InJob) }
|
303
|
+
end
|
290
304
|
|
291
|
-
|
292
|
-
end
|
305
|
+
def every_jobs(opts={})
|
293
306
|
|
294
|
-
|
307
|
+
jobs(opts).select { |j| j.is_a?(Rufus::Scheduler::EveryJob) }
|
308
|
+
end
|
295
309
|
|
296
|
-
|
297
|
-
end
|
310
|
+
def interval_jobs(opts={})
|
298
311
|
|
299
|
-
|
312
|
+
jobs(opts).select { |j| j.is_a?(Rufus::Scheduler::IntervalJob) }
|
313
|
+
end
|
300
314
|
|
301
|
-
|
302
|
-
end
|
315
|
+
def cron_jobs(opts={})
|
303
316
|
|
304
|
-
|
317
|
+
jobs(opts).select { |j| j.is_a?(Rufus::Scheduler::CronJob) }
|
318
|
+
end
|
305
319
|
|
306
|
-
|
307
|
-
end
|
320
|
+
def job(job_id)
|
308
321
|
|
309
|
-
|
322
|
+
@jobs[job_id]
|
323
|
+
end
|
310
324
|
|
311
|
-
|
312
|
-
|
325
|
+
# Returns true if the scheduler has acquired the [exclusive] lock and
|
326
|
+
# thus may run.
|
327
|
+
#
|
328
|
+
# Most of the time, a scheduler is run alone and this method should
|
329
|
+
# return true. It is useful in cases where among a group of applications
|
330
|
+
# only one of them should run the scheduler. For schedulers that should
|
331
|
+
# not run, the method should return false.
|
332
|
+
#
|
333
|
+
# Out of the box, rufus-scheduler proposes the
|
334
|
+
# :lockfile => 'path/to/lock/file' scheduler start option. It makes
|
335
|
+
# it easy for schedulers on the same machine to determine which should
|
336
|
+
# run (the first to write the lockfile and lock it). It uses "man 2 flock"
|
337
|
+
# so it probably won't work reliably on distributed file systems.
|
338
|
+
#
|
339
|
+
# If one needs to use a special/different locking mechanism, the scheduler
|
340
|
+
# accepts :scheduler_lock => lock_object. lock_object only needs to respond
|
341
|
+
# to #lock
|
342
|
+
# and #unlock, and both of these methods should be idempotent.
|
343
|
+
#
|
344
|
+
# Look at rufus/scheduler/locks.rb for an example.
|
345
|
+
#
|
346
|
+
def lock
|
347
|
+
|
348
|
+
@scheduler_lock.lock
|
349
|
+
end
|
313
350
|
|
314
|
-
|
351
|
+
# Sister method to #lock, is called when the scheduler shuts down.
|
352
|
+
#
|
353
|
+
def unlock
|
315
354
|
|
316
|
-
|
317
|
-
|
355
|
+
@trigger_lock.unlock
|
356
|
+
@scheduler_lock.unlock
|
357
|
+
end
|
318
358
|
|
319
|
-
|
359
|
+
# Callback called when a job is triggered. If the lock cannot be acquired,
|
360
|
+
# the job won't run (though it'll still be scheduled to run again if
|
361
|
+
# necessary).
|
362
|
+
#
|
363
|
+
def confirm_lock
|
320
364
|
|
321
|
-
|
322
|
-
|
365
|
+
@trigger_lock.lock
|
366
|
+
end
|
323
367
|
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
# not run, the method should return false.
|
331
|
-
#
|
332
|
-
# Out of the box, rufus-scheduler proposes the
|
333
|
-
# :lockfile => 'path/to/lock/file' scheduler start option. It makes
|
334
|
-
# it easy for schedulers on the same machine to determine which should
|
335
|
-
# run (the first to write the lockfile and lock it). It uses "man 2 flock"
|
336
|
-
# so it probably won't work reliably on distributed file systems.
|
337
|
-
#
|
338
|
-
# If one needs to use a special/different locking mechanism, the scheduler
|
339
|
-
# accepts :scheduler_lock => lock_object. lock_object only needs to respond
|
340
|
-
# to #lock
|
341
|
-
# and #unlock, and both of these methods should be idempotent.
|
342
|
-
#
|
343
|
-
# Look at rufus/scheduler/locks.rb for an example.
|
344
|
-
#
|
345
|
-
def lock
|
346
|
-
|
347
|
-
@scheduler_lock.lock
|
348
|
-
end
|
368
|
+
# Returns true if this job is currently scheduled.
|
369
|
+
#
|
370
|
+
# Takes extra care to answer true if the job is a repeat job
|
371
|
+
# currently firing.
|
372
|
+
#
|
373
|
+
def scheduled?(job_or_job_id)
|
349
374
|
|
350
|
-
|
351
|
-
#
|
352
|
-
def unlock
|
375
|
+
job, _ = fetch(job_or_job_id)
|
353
376
|
|
354
|
-
|
355
|
-
|
356
|
-
|
377
|
+
!! (job && job.unscheduled_at.nil? && job.next_time != nil)
|
378
|
+
end
|
379
|
+
|
380
|
+
# Lists all the threads associated with this scheduler.
|
381
|
+
#
|
382
|
+
def threads
|
357
383
|
|
358
|
-
|
359
|
-
|
360
|
-
# necessary).
|
361
|
-
#
|
362
|
-
def confirm_lock
|
384
|
+
Thread.list.select { |t| t[thread_key] }
|
385
|
+
end
|
363
386
|
|
364
|
-
|
387
|
+
# Lists all the work threads (the ones actually running the scheduled
|
388
|
+
# block code)
|
389
|
+
#
|
390
|
+
# Accepts a query option, which can be set to:
|
391
|
+
# * :all (default), returns all the threads that are work threads
|
392
|
+
# or are currently running a job
|
393
|
+
# * :active, returns all threads that are currently running a job
|
394
|
+
# * :vacant, returns the threads that are not running a job
|
395
|
+
#
|
396
|
+
# If, thanks to :blocking => true, a job is scheduled to monopolize the
|
397
|
+
# main scheduler thread, that thread will get returned when :active or
|
398
|
+
# :all.
|
399
|
+
#
|
400
|
+
def work_threads(query=:all)
|
401
|
+
|
402
|
+
ts = threads.select { |t| t[:rufus_scheduler_work_thread] }
|
403
|
+
|
404
|
+
case query
|
405
|
+
when :active then ts.select { |t| t[:rufus_scheduler_job] }
|
406
|
+
when :vacant then ts.reject { |t| t[:rufus_scheduler_job] }
|
407
|
+
else ts
|
365
408
|
end
|
409
|
+
end
|
366
410
|
|
367
|
-
|
368
|
-
#
|
369
|
-
# Takes extra care to answer true if the job is a repeat job
|
370
|
-
# currently firing.
|
371
|
-
#
|
372
|
-
def scheduled?(job_or_job_id)
|
411
|
+
def running_jobs(opts={})
|
373
412
|
|
374
|
-
|
413
|
+
jobs(opts.merge(:running => true))
|
414
|
+
end
|
375
415
|
|
376
|
-
|
377
|
-
end
|
416
|
+
def occurrences(time0, time1, format=:per_job)
|
378
417
|
|
379
|
-
|
380
|
-
#
|
381
|
-
def threads
|
418
|
+
h = {}
|
382
419
|
|
383
|
-
|
420
|
+
jobs.each do |j|
|
421
|
+
os = j.occurrences(time0, time1)
|
422
|
+
h[j] = os if os.any?
|
384
423
|
end
|
385
424
|
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
# * :active, returns all threads that are currently running a job
|
393
|
-
# * :vacant, returns the threads that are not running a job
|
394
|
-
#
|
395
|
-
# If, thanks to :blocking => true, a job is scheduled to monopolize the
|
396
|
-
# main scheduler thread, that thread will get returned when :active or
|
397
|
-
# :all.
|
398
|
-
#
|
399
|
-
def work_threads(query=:all)
|
400
|
-
|
401
|
-
ts =
|
402
|
-
threads.select { |t|
|
403
|
-
t[:rufus_scheduler_job] || t[:rufus_scheduler_work_thread]
|
404
|
-
}
|
405
|
-
|
406
|
-
case query
|
407
|
-
when :active then ts.select { |t| t[:rufus_scheduler_job] }
|
408
|
-
when :vacant then ts.reject { |t| t[:rufus_scheduler_job] }
|
409
|
-
else ts
|
410
|
-
end
|
425
|
+
if format == :timeline
|
426
|
+
a = []
|
427
|
+
h.each { |j, ts| ts.each { |t| a << [ t, j ] } }
|
428
|
+
a.sort_by { |(t, _)| t }
|
429
|
+
else
|
430
|
+
h
|
411
431
|
end
|
432
|
+
end
|
412
433
|
|
413
|
-
|
434
|
+
def timeline(time0, time1)
|
414
435
|
|
415
|
-
|
416
|
-
|
436
|
+
occurrences(time0, time1, :timeline)
|
437
|
+
end
|
417
438
|
|
418
|
-
|
439
|
+
def on_error(job, err)
|
440
|
+
|
441
|
+
pre = err.object_id.to_s
|
442
|
+
|
443
|
+
ms = {}; mutexes.each { |k, v| ms[k] = v.locked? }
|
444
|
+
|
445
|
+
stderr.puts("{ #{pre} rufus-scheduler intercepted an error:")
|
446
|
+
stderr.puts(" #{pre} job:")
|
447
|
+
stderr.puts(" #{pre} #{job.class} #{job.original.inspect} #{job.opts.inspect}")
|
448
|
+
# TODO: eventually use a Job#detail or something like that
|
449
|
+
stderr.puts(" #{pre} error:")
|
450
|
+
stderr.puts(" #{pre} #{err.object_id}")
|
451
|
+
stderr.puts(" #{pre} #{err.class}")
|
452
|
+
stderr.puts(" #{pre} #{err}")
|
453
|
+
err.backtrace.each do |l|
|
454
|
+
stderr.puts(" #{pre} #{l}")
|
455
|
+
end
|
456
|
+
stderr.puts(" #{pre} tz:")
|
457
|
+
stderr.puts(" #{pre} ENV['TZ']: #{ENV['TZ']}")
|
458
|
+
stderr.puts(" #{pre} Time.now: #{Time.now}")
|
459
|
+
stderr.puts(" #{pre} local_tzone: #{EoTime.local_tzone.inspect}")
|
460
|
+
stderr.puts(" #{pre} et-orbi:")
|
461
|
+
stderr.puts(" #{pre} #{EoTime.platform_info}")
|
462
|
+
stderr.puts(" #{pre} scheduler:")
|
463
|
+
stderr.puts(" #{pre} object_id: #{object_id}")
|
464
|
+
stderr.puts(" #{pre} opts:")
|
465
|
+
stderr.puts(" #{pre} #{@opts.inspect}")
|
466
|
+
stderr.puts(" #{pre} frequency: #{self.frequency}")
|
467
|
+
stderr.puts(" #{pre} scheduler_lock: #{@scheduler_lock.inspect}")
|
468
|
+
stderr.puts(" #{pre} trigger_lock: #{@trigger_lock.inspect}")
|
469
|
+
stderr.puts(" #{pre} uptime: #{uptime} (#{uptime_s})")
|
470
|
+
stderr.puts(" #{pre} down?: #{down?}")
|
471
|
+
stderr.puts(" #{pre} frequency: #{frequency.inspect}")
|
472
|
+
stderr.puts(" #{pre} discard_past: #{discard_past.inspect}")
|
473
|
+
stderr.puts(" #{pre} started_at: #{started_at.inspect}")
|
474
|
+
stderr.puts(" #{pre} paused_at: #{paused_at.inspect}")
|
475
|
+
stderr.puts(" #{pre} threads: #{self.threads.size}")
|
476
|
+
stderr.puts(" #{pre} thread: #{self.thread}")
|
477
|
+
stderr.puts(" #{pre} thread_key: #{self.thread_key}")
|
478
|
+
stderr.puts(" #{pre} work_threads: #{work_threads.size}")
|
479
|
+
stderr.puts(" #{pre} active: #{work_threads(:active).size}")
|
480
|
+
stderr.puts(" #{pre} vacant: #{work_threads(:vacant).size}")
|
481
|
+
stderr.puts(" #{pre} max_work_threads: #{max_work_threads}")
|
482
|
+
stderr.puts(" #{pre} mutexes: #{ms.inspect}")
|
483
|
+
stderr.puts(" #{pre} jobs: #{jobs.size}")
|
484
|
+
stderr.puts(" #{pre} at_jobs: #{at_jobs.size}")
|
485
|
+
stderr.puts(" #{pre} in_jobs: #{in_jobs.size}")
|
486
|
+
stderr.puts(" #{pre} every_jobs: #{every_jobs.size}")
|
487
|
+
stderr.puts(" #{pre} interval_jobs: #{interval_jobs.size}")
|
488
|
+
stderr.puts(" #{pre} cron_jobs: #{cron_jobs.size}")
|
489
|
+
stderr.puts(" #{pre} running_jobs: #{running_jobs.size}")
|
490
|
+
stderr.puts(" #{pre} work_queue:")
|
491
|
+
stderr.puts(" #{pre} size: #{@work_queue.size}")
|
492
|
+
stderr.puts(" #{pre} num_waiting: #{@work_queue.num_waiting}")
|
493
|
+
stderr.puts(" #{pre} join_queue:")
|
494
|
+
stderr.puts(" #{pre} size: #{@join_queue.size}")
|
495
|
+
stderr.puts(" #{pre} num_waiting: #{@join_queue.num_waiting}")
|
496
|
+
stderr.puts("} #{pre} .")
|
497
|
+
|
498
|
+
rescue => e
|
499
|
+
|
500
|
+
stderr.puts("failure in #on_error itself:")
|
501
|
+
stderr.puts(e.inspect)
|
502
|
+
stderr.puts(e.backtrace)
|
503
|
+
|
504
|
+
ensure
|
505
|
+
|
506
|
+
stderr.flush
|
507
|
+
end
|
419
508
|
|
420
|
-
|
509
|
+
def shutdown(opt=nil)
|
421
510
|
|
422
|
-
|
423
|
-
|
424
|
-
|
511
|
+
opts =
|
512
|
+
case opt
|
513
|
+
when Symbol then { opt => true }
|
514
|
+
when Hash then opt
|
515
|
+
else {}
|
425
516
|
end
|
426
517
|
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
518
|
+
@jobs.unschedule_all
|
519
|
+
|
520
|
+
if opts[:wait] || opts[:join]
|
521
|
+
join_shutdown(opts)
|
522
|
+
elsif opts[:kill]
|
523
|
+
kill_shutdown(opts)
|
524
|
+
else
|
525
|
+
regular_shutdown(opts)
|
434
526
|
end
|
435
527
|
|
436
|
-
|
528
|
+
@work_queue.clear
|
437
529
|
|
438
|
-
|
439
|
-
end
|
530
|
+
unlock
|
440
531
|
|
441
|
-
|
532
|
+
@thread.join
|
533
|
+
end
|
534
|
+
alias stop shutdown
|
442
535
|
|
443
|
-
|
536
|
+
protected
|
444
537
|
|
445
|
-
|
538
|
+
def join_shutdown(opts)
|
446
539
|
|
447
|
-
|
448
|
-
|
449
|
-
stderr.puts(" #{pre} #{job.class} #{job.original.inspect} #{job.opts.inspect}")
|
450
|
-
# TODO: eventually use a Job#detail or something like that
|
451
|
-
stderr.puts(" #{pre} error:")
|
452
|
-
stderr.puts(" #{pre} #{err.object_id}")
|
453
|
-
stderr.puts(" #{pre} #{err.class}")
|
454
|
-
stderr.puts(" #{pre} #{err}")
|
455
|
-
err.backtrace.each do |l|
|
456
|
-
stderr.puts(" #{pre} #{l}")
|
457
|
-
end
|
458
|
-
stderr.puts(" #{pre} tz:")
|
459
|
-
stderr.puts(" #{pre} ENV['TZ']: #{ENV['TZ']}")
|
460
|
-
stderr.puts(" #{pre} Time.now: #{Time.now}")
|
461
|
-
stderr.puts(" #{pre} local_tzone: #{EoTime.local_tzone.inspect}")
|
462
|
-
stderr.puts(" #{pre} et-orbi:")
|
463
|
-
stderr.puts(" #{pre} #{EoTime.platform_info}")
|
464
|
-
stderr.puts(" #{pre} scheduler:")
|
465
|
-
stderr.puts(" #{pre} object_id: #{object_id}")
|
466
|
-
stderr.puts(" #{pre} opts:")
|
467
|
-
stderr.puts(" #{pre} #{@opts.inspect}")
|
468
|
-
stderr.puts(" #{pre} frequency: #{self.frequency}")
|
469
|
-
stderr.puts(" #{pre} scheduler_lock: #{@scheduler_lock.inspect}")
|
470
|
-
stderr.puts(" #{pre} trigger_lock: #{@trigger_lock.inspect}")
|
471
|
-
stderr.puts(" #{pre} uptime: #{uptime} (#{uptime_s})")
|
472
|
-
stderr.puts(" #{pre} down?: #{down?}")
|
473
|
-
stderr.puts(" #{pre} threads: #{self.threads.size}")
|
474
|
-
stderr.puts(" #{pre} thread: #{self.thread}")
|
475
|
-
stderr.puts(" #{pre} thread_key: #{self.thread_key}")
|
476
|
-
stderr.puts(" #{pre} work_threads: #{work_threads.size}")
|
477
|
-
stderr.puts(" #{pre} active: #{work_threads(:active).size}")
|
478
|
-
stderr.puts(" #{pre} vacant: #{work_threads(:vacant).size}")
|
479
|
-
stderr.puts(" #{pre} max_work_threads: #{max_work_threads}")
|
480
|
-
stderr.puts(" #{pre} mutexes: #{ms.inspect}")
|
481
|
-
stderr.puts(" #{pre} jobs: #{jobs.size}")
|
482
|
-
stderr.puts(" #{pre} at_jobs: #{at_jobs.size}")
|
483
|
-
stderr.puts(" #{pre} in_jobs: #{in_jobs.size}")
|
484
|
-
stderr.puts(" #{pre} every_jobs: #{every_jobs.size}")
|
485
|
-
stderr.puts(" #{pre} interval_jobs: #{interval_jobs.size}")
|
486
|
-
stderr.puts(" #{pre} cron_jobs: #{cron_jobs.size}")
|
487
|
-
stderr.puts(" #{pre} running_jobs: #{running_jobs.size}")
|
488
|
-
stderr.puts(" #{pre} work_queue: #{work_queue.size}")
|
489
|
-
stderr.puts("} #{pre} .")
|
490
|
-
|
491
|
-
rescue => e
|
492
|
-
|
493
|
-
stderr.puts("failure in #on_error itself:")
|
494
|
-
stderr.puts(e.inspect)
|
495
|
-
stderr.puts(e.backtrace)
|
496
|
-
|
497
|
-
ensure
|
498
|
-
|
499
|
-
stderr.flush
|
500
|
-
end
|
540
|
+
limit = opts[:wait] || opts[:join]
|
541
|
+
limit = limit.is_a?(Numeric) ? limit : nil
|
501
542
|
|
502
|
-
|
543
|
+
#@started_at = nil
|
544
|
+
#
|
545
|
+
# when @started_at is nil, the scheduler thread exits, here
|
546
|
+
# we want it to exit when all the work threads have been joined
|
547
|
+
# hence it's set to nil later on
|
548
|
+
#
|
549
|
+
@paused_at = EoTime.now
|
503
550
|
|
504
|
-
|
505
|
-
#
|
506
|
-
def fetch(job_or_job_id)
|
551
|
+
(work_threads.size * 2 + 1).times { @work_queue << :shutdown }
|
507
552
|
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
553
|
+
work_threads
|
554
|
+
.collect { |wt|
|
555
|
+
wt == Thread.current ? nil : Thread.new { wt.join(limit); wt.kill } }
|
556
|
+
.each { |st|
|
557
|
+
st.join if st }
|
558
|
+
|
559
|
+
@started_at = nil
|
560
|
+
end
|
561
|
+
|
562
|
+
def kill_shutdown(opts)
|
514
563
|
|
515
|
-
|
564
|
+
@started_at = nil
|
565
|
+
work_threads.each(&:kill)
|
566
|
+
end
|
516
567
|
|
517
|
-
|
568
|
+
def regular_shutdown(opts)
|
518
569
|
|
519
|
-
|
520
|
-
|
570
|
+
@started_at = nil
|
571
|
+
end
|
521
572
|
|
522
|
-
|
573
|
+
def time_limit_join(limit)
|
523
574
|
|
524
|
-
|
575
|
+
fail ArgumentError.new("limit #{limit.inspect} should be > 0") \
|
576
|
+
unless limit.is_a?(Numeric) && limit > 0
|
525
577
|
|
526
|
-
|
578
|
+
t0 = monow
|
579
|
+
f = [ limit.to_f / 20, 0.100 ].min
|
527
580
|
|
528
|
-
|
581
|
+
while monow - t0 < limit
|
582
|
+
r =
|
583
|
+
begin
|
584
|
+
@join_queue.pop(true)
|
585
|
+
rescue ThreadError => e
|
586
|
+
# #<ThreadError: queue empty>
|
587
|
+
false
|
588
|
+
end
|
589
|
+
return r if r
|
590
|
+
sleep(f)
|
529
591
|
end
|
530
592
|
|
531
|
-
|
593
|
+
nil
|
594
|
+
end
|
595
|
+
|
596
|
+
def no_time_limit_join
|
532
597
|
|
533
|
-
|
598
|
+
@join_queue.pop
|
599
|
+
end
|
600
|
+
|
601
|
+
# Returns [ job, job_id ]
|
602
|
+
#
|
603
|
+
def fetch(job_or_job_id)
|
604
|
+
|
605
|
+
if job_or_job_id.respond_to?(:job_id)
|
606
|
+
[ job_or_job_id, job_or_job_id.job_id ]
|
607
|
+
else
|
608
|
+
[ job(job_or_job_id), job_or_job_id ]
|
534
609
|
end
|
610
|
+
end
|
611
|
+
|
612
|
+
def terminate_all_jobs
|
613
|
+
|
614
|
+
jobs.each { |j| j.unschedule }
|
615
|
+
|
616
|
+
sleep 0.01 while running_jobs.size > 0
|
617
|
+
end
|
535
618
|
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
619
|
+
#def free_all_work_threads
|
620
|
+
#
|
621
|
+
# work_threads.each { |t| t.raise(KillSignal) }
|
622
|
+
#end
|
540
623
|
|
541
|
-
|
624
|
+
def start
|
542
625
|
|
543
|
-
|
626
|
+
@started_at = EoTime.now
|
544
627
|
|
545
|
-
|
546
|
-
|
628
|
+
@thread =
|
629
|
+
Thread.new do
|
547
630
|
|
548
|
-
|
631
|
+
while @started_at do
|
549
632
|
|
550
|
-
|
551
|
-
|
552
|
-
|
633
|
+
unschedule_jobs
|
634
|
+
trigger_jobs unless @paused_at
|
635
|
+
timeout_jobs
|
553
636
|
|
554
|
-
|
555
|
-
end
|
637
|
+
sleep(@frequency)
|
556
638
|
end
|
557
639
|
|
558
|
-
|
559
|
-
|
560
|
-
@thread[:name] = @opts[:thread_name] || "#{@thread_key}_scheduler"
|
561
|
-
end
|
640
|
+
rejoin
|
641
|
+
end
|
562
642
|
|
563
|
-
|
643
|
+
@thread[@thread_key] = true
|
644
|
+
@thread[:rufus_scheduler] = self
|
645
|
+
@thread[:name] = @opts[:thread_name] || "#{@thread_key}_scheduler"
|
646
|
+
end
|
564
647
|
|
565
|
-
|
566
|
-
end
|
648
|
+
def unschedule_jobs
|
567
649
|
|
568
|
-
|
650
|
+
@jobs.delete_unscheduled
|
651
|
+
end
|
569
652
|
|
570
|
-
|
653
|
+
def trigger_jobs
|
571
654
|
|
572
|
-
|
655
|
+
now = EoTime.now
|
573
656
|
|
574
|
-
|
575
|
-
|
657
|
+
@jobs.each(now) do |job|
|
658
|
+
|
659
|
+
job.trigger(now)
|
576
660
|
end
|
661
|
+
end
|
577
662
|
|
578
|
-
|
663
|
+
def timeout_jobs
|
579
664
|
|
580
|
-
|
665
|
+
work_threads(:active).each do |t|
|
581
666
|
|
582
|
-
|
583
|
-
|
584
|
-
|
667
|
+
job = t[:rufus_scheduler_job]
|
668
|
+
to = t[:rufus_scheduler_timeout]
|
669
|
+
ts = t[:rufus_scheduler_time]
|
585
670
|
|
586
|
-
|
587
|
-
|
671
|
+
next unless job && to && ts
|
672
|
+
# thread might just have become inactive (job -> nil)
|
588
673
|
|
589
|
-
|
674
|
+
to = ts + to unless to.is_a?(EoTime)
|
590
675
|
|
591
|
-
|
676
|
+
next if to > EoTime.now
|
592
677
|
|
593
|
-
|
594
|
-
end
|
678
|
+
t.raise(Rufus::Scheduler::TimeoutError)
|
595
679
|
end
|
680
|
+
end
|
596
681
|
|
597
|
-
|
682
|
+
def rejoin
|
598
683
|
|
599
|
-
|
600
|
-
|
601
|
-
) if @started_at.nil?
|
684
|
+
(@join_queue.num_waiting * 2 + 1).times { @join_queue << @thread }
|
685
|
+
end
|
602
686
|
|
603
|
-
|
604
|
-
opts = opts.dup unless opts.has_key?(:_t)
|
687
|
+
def do_schedule(job_type, t, callable, opts, return_job_instance, block)
|
605
688
|
|
606
|
-
|
689
|
+
fail NotRunningError.new(
|
690
|
+
'cannot schedule, scheduler is down or shutting down'
|
691
|
+
) if @started_at.nil?
|
607
692
|
|
608
|
-
|
609
|
-
|
610
|
-
when :once
|
611
|
-
opts[:_t] ||= Rufus::Scheduler.parse(t, opts)
|
612
|
-
opts[:_t].is_a?(Numeric) ? InJob : AtJob
|
613
|
-
when :every
|
614
|
-
EveryJob
|
615
|
-
when :interval
|
616
|
-
IntervalJob
|
617
|
-
when :cron
|
618
|
-
CronJob
|
619
|
-
end
|
693
|
+
callable, opts = nil, callable if callable.is_a?(Hash)
|
694
|
+
opts = opts.dup unless opts.has_key?(:_t)
|
620
695
|
|
621
|
-
|
696
|
+
return_job_instance ||= opts[:job]
|
622
697
|
|
623
|
-
|
624
|
-
|
625
|
-
|
626
|
-
|
698
|
+
job_class =
|
699
|
+
case job_type
|
700
|
+
when :once
|
701
|
+
opts[:_t] ||= Rufus::Scheduler.parse(t, opts)
|
702
|
+
opts[:_t].is_a?(Numeric) ? InJob : AtJob
|
703
|
+
when :every
|
704
|
+
EveryJob
|
705
|
+
when :interval
|
706
|
+
IntervalJob
|
707
|
+
when :cron
|
708
|
+
CronJob
|
709
|
+
end
|
627
710
|
|
628
|
-
|
711
|
+
job = job_class.new(self, t, opts, block || callable)
|
712
|
+
job.check_frequency
|
629
713
|
|
630
|
-
|
631
|
-
|
714
|
+
@jobs.push(job)
|
715
|
+
|
716
|
+
return_job_instance ? job : job.job_id
|
632
717
|
end
|
718
|
+
|
719
|
+
def monow; self.class.monow; end
|
720
|
+
def ltstamp; self.class.ltstamp; end
|
633
721
|
end
|
634
722
|
|