canvas-jobs 0.10.5 → 0.10.6

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 1a1dcbb68bb9d5945b79b3806d27b6a7a3c05688
4
- data.tar.gz: fc3aaa3d27c040360abbcf76f4a24db2390384ec
3
+ metadata.gz: c17b53cc20eeba1d6def3d6defed59ec2f4e6786
4
+ data.tar.gz: 462039f0631c4c8446e485ff09543c2430958fd9
5
5
  SHA512:
6
- metadata.gz: cdef99a4e43b2cde8ff73fdbb9b7e7f3148acac67f7016ae3fa6895dc9337ecb706afa0ace9fe92538465c3340a35ad2ed1be956c3b8e6d3fe7d69e11b4832f0
7
- data.tar.gz: 8c88f6fd42e35a134c1511e2137037766642a83fd2b700ac6253344e33520801654b42306af7e8d4c7982e34fc05fd727935f7cca89f0514cfa5c69a15d2159a
6
+ metadata.gz: 479c5177f7f37fa4c2f84caf850f214a316def2cc182f815bb59ef63f905490912e6b87396f57df6e87eee6af77b45047f4a2051ecf1076d467ebf963a0a01a7
7
+ data.tar.gz: 779faa887354c56f5dcba06e16e1bc52f20d0084c0b432d59c282afc0908a74d32eb51ca83e1067cdf833daa1d5461e44c1e78607bf0d9443aa16e02f5e27ccf
@@ -1,4 +1,4 @@
1
1
  #!/usr/bin/env ruby
2
2
 
3
3
  require File.expand_path('config/environment')
4
- Delayed::Pool.new.run()
4
+ Delayed::CLI.new.run()
@@ -0,0 +1,104 @@
1
+ require 'optparse'
2
+
3
+ module Delayed
4
+ class CLI
5
+ def initialize(args = ARGV)
6
+ @args = args
7
+ # config that will be applied on Settings
8
+ @config = {}
9
+ # worker configs that will be passed to the created Pool
10
+ @worker_configs = []
11
+ # CLI options that will be kept to this class
12
+ @options = {
13
+ :config_file => Settings.default_worker_config_name,
14
+ :pid_folder => Settings.expand_rails_path("tmp/pids"),
15
+ :tail_logs => true, # only in FG mode
16
+ }
17
+ end
18
+
19
+ def run
20
+ parse_cli_options!
21
+ load_and_apply_config!
22
+
23
+ command = @args.shift
24
+ case command
25
+ when 'start'
26
+ exit 1 if daemon.status(print: :alive) == :running
27
+ daemon.daemonize!
28
+ start
29
+ when 'stop'
30
+ daemon.stop(kill: @options[:kill])
31
+ when 'run'
32
+ start
33
+ when 'status'
34
+ if daemon.status
35
+ exit 0
36
+ else
37
+ exit 1
38
+ end
39
+ when 'restart'
40
+ daemon.stop(kill: @options[:kill])
41
+ daemon.daemonize!
42
+ start
43
+ when nil
44
+ puts option_parser.to_s
45
+ else
46
+ raise("Unknown command: #{command.inspect}")
47
+ end
48
+ end
49
+
50
+ def parse_cli_options!
51
+ option_parser.parse!(@args)
52
+ @options
53
+ end
54
+
55
+ protected
56
+
57
+ def load_and_apply_config!
58
+ @config = Settings.worker_config(@options[:config_file])
59
+ @worker_configs = @config.delete(:workers)
60
+ Settings.apply_worker_config!(@config)
61
+ end
62
+
63
+ def option_parser
64
+ @option_parser ||= OptionParser.new do |opts|
65
+ opts.banner = "Usage #{$0} <command> <options>"
66
+ opts.separator %{\nWhere <command> is one of:
67
+ start start the jobs daemon
68
+ stop stop the jobs daemon
69
+ run start and run in the foreground
70
+ restart stop and then start the jobs daemon
71
+ status show daemon status
72
+ }
73
+
74
+ opts.separator "\n<options>"
75
+ opts.on("-c", "--config [CONFIG_PATH]", "Use alternate config file (default #{@options[:config_file]})") { |c| @options[:config_file] = c }
76
+ opts.on("-p", "--pid", "Use alternate folder for PID files (default #{@options[:pid_folder]})") { |p| @options[:pid_folder] = p }
77
+ opts.on("--no-tail", "Don't tail the logs (only affects non-daemon mode)") { @options[:tail_logs] = false }
78
+ opts.on("--with-prejudice", "When stopping, interrupt jobs in progress, instead of letting them drain") { @options[:kill] ||= true }
79
+ opts.on("--with-extreme-prejudice", "When stopping, immediately kill jobs in progress, instead of letting them drain") { @options[:kill] = 9 }
80
+ opts.on_tail("-h", "--help", "Show this message") { puts opts; exit }
81
+ end
82
+ end
83
+
84
+ def daemon
85
+ @daemon ||= Delayed::Daemon.new(@options[:pid_folder])
86
+ end
87
+
88
+ def start
89
+ load_rails
90
+ tail_rails_log unless daemon.daemonized?
91
+ Delayed::Pool.new(@worker_configs).start
92
+ end
93
+
94
+ def load_rails
95
+ require(Settings.expand_rails_path("config/environment.rb"))
96
+ Dir.chdir(Rails.root)
97
+ end
98
+
99
+ def tail_rails_log
100
+ return if !@options[:tail_logs]
101
+ Delayed::LogTailer.new.run
102
+ end
103
+ end
104
+ end
@@ -0,0 +1,103 @@
1
+ require 'fileutils'
2
+
3
+ module Delayed
4
+ # Daemon controls the parent proces that runs the Pool and monitors the Worker processes.
5
+ class Daemon
6
+ attr_reader :pid_folder
7
+
8
+ def initialize(pid_folder)
9
+ @pid_folder = pid_folder
10
+ end
11
+
12
+ def status(print: true, pid: self.pid)
13
+ alive = pid && (Process.kill(0, pid) rescue false) && :running
14
+ alive ||= :draining if pid && Process.kill(0, -pid) rescue false
15
+ if alive
16
+ puts "Delayed jobs #{alive}, pool PID: #{pid}" if print
17
+ else
18
+ puts "No delayed jobs pool running" if print && print != :alive
19
+ end
20
+ alive
21
+ end
22
+
23
+ def daemonize!
24
+ FileUtils.mkdir_p(pid_folder)
25
+ puts "Daemonizing..."
26
+
27
+ exit if fork
28
+ Process.setsid
29
+ exit if fork
30
+ Process.setpgrp
31
+
32
+ @daemon = true
33
+ lock_file = File.open(pid_file, 'wb')
34
+ # someone else is already running; just exit
35
+ unless lock_file.flock(File::LOCK_EX | File::LOCK_NB)
36
+ exit
37
+ end
38
+ at_exit { lock_file.flock(File::LOCK_UN) }
39
+ lock_file.puts(Process.pid.to_s)
40
+ lock_file.flush
41
+
42
+ # if we blow up so badly that we can't syslog the error, try to send
43
+ # it somewhere useful
44
+ last_ditch_logfile = Settings.last_ditch_logfile || "log/delayed_job.log"
45
+ if last_ditch_logfile[0] != '|'
46
+ last_ditch_logfile = Settings.expand_rails_path(last_ditch_logfile)
47
+ end
48
+ STDIN.reopen("/dev/null")
49
+ STDOUT.reopen(open(last_ditch_logfile, 'a'))
50
+ STDERR.reopen(STDOUT)
51
+ STDOUT.sync = STDERR.sync = true
52
+ end
53
+
54
+ # stop the currently running daemon (not this current process, the one in the pid_file)
55
+ def stop(kill: false, pid: self.pid)
56
+ alive = status(pid: pid, print: false)
57
+ if alive == :running || (kill && alive == :draining)
58
+ puts "Stopping pool #{pid}..."
59
+ signal = 'INT'
60
+ if kill
61
+ pid = -pid # send to the whole group
62
+ if kill == 9
63
+ signal = 'KILL'
64
+ else
65
+ signal = 'TERM'
66
+ end
67
+ end
68
+ begin
69
+ Process.kill(signal, pid)
70
+ rescue Errno::ESRCH
71
+ # ignore if the pid no longer exists
72
+ end
73
+ wait(kill)
74
+ else
75
+ status
76
+ end
77
+ end
78
+
79
+ def wait(kill)
80
+ if kill
81
+ sleep(0.5) while status(pid: pid, print: false)
82
+ else
83
+ sleep(0.5) while status(pid: pid, print: false) == :running
84
+ end
85
+ end
86
+
87
+ def pid_file
88
+ File.join(pid_folder, 'delayed_jobs_pool.pid')
89
+ end
90
+
91
+ def pid
92
+ if File.file?(pid_file)
93
+ pid = File.read(pid_file).to_i
94
+ pid = nil unless pid > 0
95
+ end
96
+ pid
97
+ end
98
+
99
+ def daemonized?
100
+ !!@daemon
101
+ end
102
+ end
103
+ end
@@ -3,11 +3,13 @@ module Delayed
3
3
 
4
4
  class Lifecycle
5
5
  EVENTS = {
6
- :perform => [:worker, :job],
7
- :pop => [:worker],
6
+ :error => [:worker, :job, :exception],
8
7
  :exceptional_exit => [:worker, :exception],
8
+ :execute => [:worker],
9
9
  :invoke_job => [:job],
10
- :error => [:worker, :job, :exception],
10
+ :loop => [:worker],
11
+ :perform => [:worker, :job],
12
+ :pop => [:worker],
11
13
  }
12
14
 
13
15
  def initialize
@@ -0,0 +1,22 @@
1
+ module Delayed
2
+ class LogTailer
3
+ def run
4
+ if Rails.logger.respond_to?(:log_path)
5
+ log_path = Rails.logger.log_path
6
+ elsif Rails.logger.instance_variable_get('@logdev').try(:instance_variable_get, '@dev').try(:path)
7
+ log_path = Rails.logger.instance_variable_get('@logdev').instance_variable_get('@dev').path
8
+ else
9
+ return
10
+ end
11
+ Rails.logger.auto_flushing = true if Rails.logger.respond_to?(:auto_flushing=)
12
+ Thread.new do
13
+ f = File.open(log_path, 'r')
14
+ f.seek(0, IO::SEEK_END)
15
+ loop do
16
+ content = f.read
17
+ content.present? ? STDOUT.print(content) : sleep(0.5)
18
+ end
19
+ end
20
+ end
21
+ end
22
+ end
@@ -1,103 +1,28 @@
1
- require 'erb'
2
- require 'optparse'
3
- require 'yaml'
4
- require 'fileutils'
5
-
6
1
  module Delayed
7
2
  class Pool
8
3
  mattr_accessor :on_fork
9
4
  self.on_fork = ->{ }
10
5
 
11
- attr_reader :options, :workers
6
+ attr_reader :workers
12
7
 
13
- def initialize(args = ARGV)
14
- @args = args
15
- @workers = {}
16
- @config = { :workers => [] }
17
- @options = {
18
- :config_file => Settings.default_worker_config_name,
19
- :pid_folder => Settings.expand_rails_path("tmp/pids"),
20
- :tail_logs => true, # only in FG mode
21
- }
22
- end
23
-
24
- def run
25
- parse_cli_options!
26
-
27
- read_config(options[:config_file])
28
-
29
- command = @args.shift
30
- case command
31
- when 'start'
32
- exit 1 if status(print: :alive) == :running
33
- daemonize
34
- start
35
- when 'stop'
36
- stop(kill: options[:kill])
37
- when 'run'
38
- start
39
- when 'status'
40
- if status
41
- exit 0
42
- else
43
- exit 1
44
- end
45
- when 'restart'
46
- pid = self.pid
47
- alive = status(pid: pid, print: false)
48
- if alive == :running || (options[:kill] && alive == :draining)
49
- stop(pid: pid, kill: options[:kill])
50
- if options[:kill]
51
- sleep(0.5) while status(pid: pid, print: false)
52
- else
53
- sleep(0.5) while status(pid: pid, print: false) == :running
54
- end
55
- end
56
- daemonize
57
- start
58
- when nil
59
- puts op
8
+ def initialize(*args)
9
+ if args.size == 1 && args.first.is_a?(Array)
10
+ worker_configs = args.first
60
11
  else
61
- raise("Unknown command: #{command.inspect}")
12
+ warn "Calling Delayed::Pool.new directly is deprecated. Use `Delayed::CLI.new.run()` instead."
62
13
  end
14
+ @workers = {}
15
+ @config = { workers: worker_configs }
63
16
  end
64
17
 
65
- def parse_cli_options!
66
- op = OptionParser.new do |opts|
67
- opts.banner = "Usage #{$0} <command> <options>"
68
- opts.separator %{\nWhere <command> is one of:
69
- start start the jobs daemon
70
- stop stop the jobs daemon
71
- run start and run in the foreground
72
- restart stop and then start the jobs daemon
73
- status show daemon status
74
- }
75
-
76
- opts.separator "\n<options>"
77
- opts.on("-c", "--config [CONFIG_PATH]", "Use alternate config file (default #{options[:config_file]})") { |c| options[:config_file] = c }
78
- opts.on("-p", "--pid", "Use alternate folder for PID files (default #{options[:pid_folder]})") { |p| options[:pid_folder] = p }
79
- opts.on("--no-tail", "Don't tail the logs (only affects non-daemon mode)") { options[:tail_logs] = false }
80
- opts.on("--with-prejudice", "When stopping, interrupt jobs in progress, instead of letting them drain") { options[:kill] ||= true }
81
- opts.on("--with-extreme-prejudice", "When stopping, immediately kill jobs in progress, instead of letting them drain") { options[:kill] = 9 }
82
- opts.on_tail("-h", "--help", "Show this message") { puts opts; exit }
83
- end
84
- op.parse!(@args)
85
- end
86
-
87
- protected
88
-
89
- def procname
90
- "delayed_jobs_pool#{Settings.pool_procname_suffix}"
18
+ def run
19
+ warn "Delayed::Pool#run is deprecated and will be removed. Use `Delayed::CLI.new.run()` instead."
20
+ Delayed::CLI.new.run()
91
21
  end
92
22
 
93
23
  def start
94
- load_rails
95
- tail_rails_log unless @daemon
96
-
97
24
  say "Started job master", :info
98
25
  $0 = procname
99
- apply_config
100
-
101
26
  # fork to handle unlocking (to prevent polluting the parent with worker objects)
102
27
  unlock_pid = fork_with_reconnects do
103
28
  unlock_orphaned_jobs
@@ -116,6 +41,12 @@ class Pool
116
41
  raise
117
42
  end
118
43
 
44
+ protected
45
+
46
+ def procname
47
+ "delayed_jobs_pool#{Settings.pool_procname_suffix}"
48
+ end
49
+
119
50
  def say(msg, level = :debug)
120
51
  if defined?(Rails.logger) && Rails.logger
121
52
  Rails.logger.send(level, "[#{Process.pid}]P #{msg}")
@@ -124,16 +55,8 @@ class Pool
124
55
  end
125
56
  end
126
57
 
127
- def load_rails
128
- require(Settings.expand_rails_path("config/environment.rb"))
129
- Dir.chdir(Rails.root)
130
- end
131
-
132
58
  def unlock_orphaned_jobs(worker = nil, pid = nil)
133
- # don't bother trying to unlock jobs by process name if the name is overridden
134
- return if @config.key?(:name)
135
- return if @config[:disable_automatic_orphan_unlocking]
136
- return if @config[:workers].any? { |worker_config| worker_config.key?(:name) || worker_config.key?('name') }
59
+ return if Settings.disable_automatic_orphan_unlocking
137
60
 
138
61
  unlocked_jobs = Delayed::Job.unlock_orphaned_jobs(pid)
139
62
  say "Unlocked #{unlocked_jobs} orphaned jobs" if unlocked_jobs > 0
@@ -144,8 +67,7 @@ class Pool
144
67
  ActiveRecord::Base.connection_handler.clear_all_connections!
145
68
 
146
69
  @config[:workers].each do |worker_config|
147
- worker_config = worker_config.with_indifferent_access
148
- (worker_config[:workers] || 1).times { spawn_worker(@config.merge(worker_config)) }
70
+ (worker_config[:workers] || 1).times { spawn_worker(worker_config) }
149
71
  end
150
72
  end
151
73
 
@@ -174,7 +96,7 @@ class Pool
174
96
  end
175
97
 
176
98
  def spawn_periodic_auditor
177
- return if @config[:disable_periodic_jobs]
99
+ return if Settings.disable_periodic_jobs
178
100
 
179
101
  @periodic_thread = Thread.new do
180
102
  # schedule the initial audit immediately on startup
@@ -217,122 +139,5 @@ class Pool
217
139
  end
218
140
  end
219
141
  end
220
-
221
- def tail_rails_log
222
- return if !@options[:tail_logs]
223
- if Rails.logger.respond_to?(:log_path)
224
- log_path = Rails.logger.log_path
225
- elsif Rails.logger.instance_variable_get('@logdev').try(:instance_variable_get, '@dev').try(:path)
226
- log_path = Rails.logger.instance_variable_get('@logdev').instance_variable_get('@dev').path
227
- else
228
- return
229
- end
230
- Rails.logger.auto_flushing = true if Rails.logger.respond_to?(:auto_flushing=)
231
- Thread.new do
232
- f = File.open(log_path, 'r')
233
- f.seek(0, IO::SEEK_END)
234
- loop do
235
- content = f.read
236
- content.present? ? STDOUT.print(content) : sleep(0.5)
237
- end
238
- end
239
- end
240
-
241
- def daemonize
242
- FileUtils.mkdir_p(pid_folder)
243
- puts "Daemonizing..."
244
-
245
- exit if fork
246
- Process.setsid
247
- exit if fork
248
- Process.setpgrp
249
-
250
- @daemon = true
251
- lock_file = File.open(pid_file, 'wb')
252
- # someone else is already running; just exit
253
- unless lock_file.flock(File::LOCK_EX | File::LOCK_NB)
254
- exit
255
- end
256
- at_exit { lock_file.flock(File::LOCK_UN) }
257
- lock_file.puts(Process.pid.to_s)
258
- lock_file.flush
259
-
260
- # if we blow up so badly that we can't syslog the error, try to send
261
- # it somewhere useful
262
- last_ditch_logfile = self.last_ditch_logfile || "log/delayed_job.log"
263
- if last_ditch_logfile[0] != '|'
264
- last_ditch_logfile = Settings.expand_rails_path(last_ditch_logfile)
265
- end
266
- STDIN.reopen("/dev/null")
267
- STDOUT.reopen(open(last_ditch_logfile, 'a'))
268
- STDERR.reopen(STDOUT)
269
- STDOUT.sync = STDERR.sync = true
270
- end
271
-
272
- def pid_folder
273
- options[:pid_folder]
274
- end
275
-
276
- def pid_file
277
- File.join(pid_folder, 'delayed_jobs_pool.pid')
278
- end
279
-
280
- def last_ditch_logfile
281
- @config['last_ditch_logfile']
282
- end
283
-
284
- def stop(options = {})
285
- kill = options[:kill]
286
- pid = options[:pid] || self.pid
287
- if pid && status(pid: pid, print: false)
288
- puts "Stopping pool #{pid}..."
289
- signal = 'INT'
290
- if kill
291
- pid = -pid # send to the whole group
292
- if kill == 9
293
- signal = 'KILL'
294
- else
295
- signal = 'TERM'
296
- end
297
- end
298
- begin
299
- Process.kill(signal, pid)
300
- rescue Errno::ESRCH
301
- # ignore if the pid no longer exists
302
- end
303
- else
304
- status
305
- end
306
- end
307
-
308
- def pid
309
- if File.file?(pid_file)
310
- pid = File.read(pid_file).to_i
311
- pid = nil unless pid > 0
312
- end
313
- pid
314
- end
315
-
316
- def status(options = { print: true })
317
- print = options[:print]
318
- pid = options[:pid] || self.pid
319
- alive = pid && (Process.kill(0, pid) rescue false) && :running
320
- alive ||= :draining if pid && Process.kill(0, -pid) rescue false
321
- if alive
322
- puts "Delayed jobs #{alive}, pool PID: #{pid}" if print
323
- else
324
- puts "No delayed jobs pool running" if print && print != :alive
325
- end
326
- alive
327
- end
328
-
329
- def read_config(config_filename)
330
- @config = Settings.worker_config(config_filename)
331
- end
332
-
333
- def apply_config
334
- Settings.apply_worker_config!(@config)
335
- end
336
-
337
142
  end
338
143
  end
@@ -1,3 +1,6 @@
1
+ require 'yaml'
2
+ require 'erb'
3
+
1
4
  module Delayed
2
5
  module Settings
3
6
  SETTINGS = [
@@ -11,6 +14,9 @@ module Delayed
11
14
  :pool_procname_suffix,
12
15
  :default_job_options,
13
16
  :silence_periodic_log,
17
+ :disable_periodic_jobs,
18
+ :disable_automatic_orphan_unlocking,
19
+ :last_ditch_logfile,
14
20
  ]
15
21
  SETTINGS_WITH_ARGS = [ :num_strands ]
16
22
 
@@ -52,7 +58,11 @@ module Delayed
52
58
  raise ArgumentError,
53
59
  "Invalid config file #{config_filename}"
54
60
  end
55
- config.with_indifferent_access
61
+ config = config.with_indifferent_access
62
+ config[:workers].map! do |worker_config|
63
+ config.except(:workers).merge(worker_config.with_indifferent_access)
64
+ end
65
+ config
56
66
  end
57
67
 
58
68
  def self.apply_worker_config!(config)
@@ -1,3 +1,3 @@
1
1
  module Delayed
2
- VERSION = "0.10.5"
2
+ VERSION = "0.10.6"
3
3
  end
@@ -55,10 +55,6 @@ class Worker
55
55
  plugins.each { |plugin| plugin.inject! }
56
56
  end
57
57
 
58
- def name=(name)
59
- @name = name
60
- end
61
-
62
58
  def name
63
59
  @name ||= "#{Socket.gethostname rescue "X"}:#{self.id}"
64
60
  end
@@ -80,9 +76,11 @@ class Worker
80
76
 
81
77
  trap('INT') { say 'Exiting'; @exit = true }
82
78
 
83
- loop do
84
- run
85
- break if exit?
79
+ self.class.lifecycle.run_callbacks(:execute, self) do
80
+ loop do
81
+ run
82
+ break if exit?
83
+ end
86
84
  end
87
85
 
88
86
  say "Stopping worker", :info
@@ -94,37 +92,39 @@ class Worker
94
92
  end
95
93
 
96
94
  def run
97
- job =
98
- self.class.lifecycle.run_callbacks(:pop, self) do
99
- Delayed::Job.get_and_lock_next_available(
100
- name,
101
- queue,
102
- min_priority,
103
- max_priority)
104
- end
105
-
106
- if job
107
- configure_for_job(job) do
108
- @job_count += perform(job)
95
+ self.class.lifecycle.run_callbacks(:loop, self) do
96
+ job =
97
+ self.class.lifecycle.run_callbacks(:pop, self) do
98
+ Delayed::Job.get_and_lock_next_available(
99
+ name,
100
+ queue,
101
+ min_priority,
102
+ max_priority)
103
+ end
109
104
 
110
- if @max_job_count > 0 && @job_count >= @max_job_count
111
- say "Max job count of #{@max_job_count} exceeded, dying"
112
- @exit = true
113
- end
105
+ if job
106
+ configure_for_job(job) do
107
+ @job_count += perform(job)
114
108
 
115
- if @max_memory_usage > 0
116
- memory = sample_memory
117
- if memory > @max_memory_usage
118
- say "Memory usage of #{memory} exceeds max of #{@max_memory_usage}, dying"
109
+ if @max_job_count > 0 && @job_count >= @max_job_count
110
+ say "Max job count of #{@max_job_count} exceeded, dying"
119
111
  @exit = true
120
- else
121
- say "Memory usage: #{memory}"
112
+ end
113
+
114
+ if @max_memory_usage > 0
115
+ memory = sample_memory
116
+ if memory > @max_memory_usage
117
+ say "Memory usage of #{memory} exceeds max of #{@max_memory_usage}, dying"
118
+ @exit = true
119
+ else
120
+ say "Memory usage: #{memory}"
121
+ end
122
122
  end
123
123
  end
124
+ else
125
+ set_process_name("wait:#{Settings.worker_procname_prefix}#{@queue}:#{min_priority || 0}:#{max_priority || 'max'}")
126
+ sleep(Settings.sleep_delay + (rand * Settings.sleep_delay_stagger))
124
127
  end
125
- else
126
- set_process_name("wait:#{Settings.worker_procname_prefix}#{@queue}:#{min_priority || 0}:#{max_priority || 'max'}")
127
- sleep(Settings.sleep_delay + (rand * Settings.sleep_delay_stagger))
128
128
  end
129
129
  end
130
130
 
@@ -24,8 +24,11 @@ require 'delayed/backend/base'
24
24
  require 'delayed/backend/active_record'
25
25
  require 'delayed/backend/redis/job'
26
26
  require 'delayed/batch'
27
+ require 'delayed/cli'
28
+ require 'delayed/daemon'
27
29
  require 'delayed/job_tracking'
28
30
  require 'delayed/lifecycle'
31
+ require 'delayed/log_tailer'
29
32
  require 'delayed/message_sending'
30
33
  require 'delayed/performable_method'
31
34
  require 'delayed/periodic'
@@ -0,0 +1,23 @@
1
+ require 'spec_helper'
2
+
3
+ RSpec.describe Delayed::CLI do
4
+ describe '#parse_cli_options!' do
5
+ it 'correctly parses the --config option' do
6
+ cli = described_class.new(%w{run --config /path/to/some/file.yml})
7
+ options = cli.parse_cli_options!
8
+ expect(options).to include config_file: '/path/to/some/file.yml'
9
+ end
10
+ end
11
+
12
+ describe '#run' do
13
+ before do
14
+ expect(Delayed::Settings).to receive(:worker_config).and_return({})
15
+ end
16
+
17
+ it 'prints help when no command is given' do
18
+ cli = described_class.new([])
19
+ expect(cli).to receive(:puts).with(/Usage/)
20
+ cli.run
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,35 @@
1
+ require 'spec_helper'
2
+
3
+ RSpec.describe Delayed::Daemon do
4
+ let(:pid_folder) { "/test/pid/folder" }
5
+ let(:pid) { 9999 }
6
+ let(:subject) { described_class.new(pid_folder) }
7
+
8
+ before do
9
+ allow(subject).to receive(:pid).and_return(pid)
10
+ end
11
+
12
+ describe '#stop' do
13
+ it 'prints status if not running' do
14
+ expect(subject).to receive(:status).with(print: false, pid: pid).and_return(false)
15
+ expect(subject).to receive(:status).with(no_args)
16
+ expect(Process).to receive(:kill).never
17
+ subject.stop
18
+ end
19
+
20
+ it 'prints status if draining' do
21
+ expect(subject).to receive(:status).with(print: false, pid: pid).and_return(:draining)
22
+ expect(subject).to receive(:status).with(no_args)
23
+ expect(Process).to receive(:kill).never
24
+ subject.stop
25
+ end
26
+
27
+ it 'sends INT by default' do
28
+ expect(subject).to receive(:status).with(print: false, pid: pid).and_return(:running)
29
+ expect(subject).to receive(:puts).with(/Stopping pool/)
30
+ expect(Process).to receive(:kill).with('INT', pid)
31
+ expect(subject).to receive(:wait).with(false)
32
+ subject.stop
33
+ end
34
+ end
35
+ end
@@ -0,0 +1,32 @@
1
+ require 'spec_helper'
2
+
3
+ RSpec.describe Delayed::Settings do
4
+ let(:configfile) {<<-YAML
5
+ default:
6
+ workers:
7
+ - queue: myqueue
8
+ workers: 2
9
+ - queue: secondqueue
10
+ max_priority: 7
11
+ max_attempts: 1
12
+ YAML
13
+ }
14
+
15
+ describe '.worker_config' do
16
+ it 'merges each worker config with the top-level config' do
17
+ expect(File).to receive(:read).with("fname").and_return(configfile)
18
+ config = described_class.worker_config("fname")
19
+ expect(config[:workers]).to eq([
20
+ {'queue' => 'myqueue', 'workers' => 2, 'max_attempts' => 1},
21
+ {'queue' => 'secondqueue', 'max_priority' => 7, 'max_attempts' => 1},
22
+ ])
23
+ end
24
+ end
25
+
26
+ describe '.apply_worker_config!' do
27
+ it 'applies global settings from the given config' do
28
+ expect(described_class).to receive(:last_ditch_logfile=).with(true)
29
+ described_class.apply_worker_config!('last_ditch_logfile' => true)
30
+ end
31
+ end
32
+ end
@@ -46,7 +46,7 @@ shared_examples_for 'Delayed::Worker' do
46
46
  @worker.perform(batch_job).should == 3
47
47
  expect(@runs).to eql 4 # batch, plus all jobs
48
48
  end
49
-
49
+
50
50
  it "should succeed regardless of the success/failure of its component jobs" do
51
51
  change_setting(Delayed::Settings, :max_attempts, 2) do
52
52
  batch = Delayed::Batch::PerformableBatch.new(:serial, [
@@ -66,7 +66,7 @@ shared_examples_for 'Delayed::Worker' do
66
66
  to_retry[0].attempts.should == 1
67
67
  end
68
68
  end
69
-
69
+
70
70
  it "should retry a failed individual job" do
71
71
  batch = Delayed::Batch::PerformableBatch.new(:serial, [
72
72
  { :payload_object => Delayed::PerformableMethod.new(1, :/, [0]) },
@@ -108,21 +108,17 @@ shared_examples_for 'Delayed::Worker' do
108
108
  end
109
109
 
110
110
  context "while running with locked jobs" do
111
- before(:each) do
112
- @worker.name = 'worker1'
113
- end
114
-
115
111
  it "should not run jobs locked by another worker" do
116
112
  job_create(:locked_by => 'other_worker', :locked_at => (Delayed::Job.db_time_now - 1.minutes))
117
113
  lambda { @worker.run }.should_not change { SimpleJob.runs }
118
114
  end
119
-
115
+
120
116
  it "should run open jobs" do
121
117
  job_create
122
118
  lambda { @worker.run }.should change { SimpleJob.runs }.from(0).to(1)
123
119
  end
124
120
  end
125
-
121
+
126
122
  describe "failed jobs" do
127
123
  before do
128
124
  # reset defaults
@@ -150,7 +146,7 @@ shared_examples_for 'Delayed::Worker' do
150
146
 
151
147
  Delayed::Job.find_available(100, @job.queue).should == []
152
148
  end
153
-
149
+
154
150
  it "should re-schedule jobs after failing" do
155
151
  @worker.perform(@job)
156
152
  @job = Delayed::Job.find(@job.id)
@@ -174,18 +170,18 @@ shared_examples_for 'Delayed::Worker' do
174
170
  ErrorJob.permanent_failure_runs.should == 1
175
171
  end
176
172
  end
177
-
173
+
178
174
  context "reschedule" do
179
175
  before do
180
176
  @job = Delayed::Job.create :payload_object => SimpleJob.new
181
177
  end
182
-
178
+
183
179
  context "and we want to destroy jobs" do
184
180
  it "should be destroyed if it failed more than Settings.max_attempts times" do
185
181
  expect(@job).to receive(:destroy)
186
182
  Delayed::Settings.max_attempts.times { @job.reschedule }
187
183
  end
188
-
184
+
189
185
  it "should not be destroyed if failed fewer than Settings.max_attempts times" do
190
186
  expect(@job).to receive(:destroy).never
191
187
  (Delayed::Settings.max_attempts - 1).times { @job.reschedule }
@@ -205,7 +201,7 @@ shared_examples_for 'Delayed::Worker' do
205
201
  job.reschedule
206
202
  end
207
203
  end
208
-
204
+
209
205
  context "and we don't want to destroy jobs" do
210
206
  before do
211
207
  Delayed::Worker.on_max_failures = proc { false }
@@ -268,7 +264,7 @@ shared_examples_for 'Delayed::Worker' do
268
264
  SimpleJob.runs.should == 0
269
265
  worker.run
270
266
  SimpleJob.runs.should == 1
271
-
267
+
272
268
  SimpleJob.runs = 0
273
269
 
274
270
  worker = worker_create(:queue=>'queue2')
@@ -291,7 +287,7 @@ shared_examples_for 'Delayed::Worker' do
291
287
  worker = worker_create(:queue=>nil)
292
288
  worker.queue.should == queue_name
293
289
  end
294
-
290
+
295
291
  it "should override default queue name if specified in initialize" do
296
292
  queue_name = "my_queue"
297
293
  Delayed::Settings.queue = "default_queue"
@@ -357,4 +353,26 @@ shared_examples_for 'Delayed::Worker' do
357
353
  expect(ErrorJob.last_error.to_s).to eq 'did not work'
358
354
  end
359
355
  end
356
+
357
+ describe "#start" do
358
+ it "fires off an execute callback on the processing jobs loop" do
359
+ fired = false
360
+ expect(@worker).to receive(:run)
361
+ expect(@worker).to receive(:exit?).and_return(true)
362
+ Delayed::Worker.lifecycle.before(:execute) { |w| w == @worker && fired = true }
363
+ @worker.start
364
+ expect(fired).to eq(true)
365
+ end
366
+ end
367
+
368
+ describe "#run" do
369
+ it "fires off a loop callback on each call to run" do
370
+ fired = 0
371
+ Delayed::Worker.lifecycle.before(:loop) { |w| w == @worker && fired += 1 }
372
+ expect(Delayed::Job).to receive(:get_and_lock_next_available).twice.and_return(nil)
373
+ @worker.run
374
+ @worker.run
375
+ expect(fired).to eq(2)
376
+ end
377
+ end
360
378
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: canvas-jobs
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.10.5
4
+ version: 0.10.6
5
5
  platform: ruby
6
6
  authors:
7
7
  - Tobias Luetke
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2016-02-16 00:00:00.000000000 Z
12
+ date: 2016-03-04 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: after_transaction_commit
@@ -293,9 +293,12 @@ files:
293
293
  - lib/delayed/backend/redis/set_running.lua
294
294
  - lib/delayed/backend/redis/tickle_strand.lua
295
295
  - lib/delayed/batch.rb
296
+ - lib/delayed/cli.rb
297
+ - lib/delayed/daemon.rb
296
298
  - lib/delayed/engine.rb
297
299
  - lib/delayed/job_tracking.rb
298
300
  - lib/delayed/lifecycle.rb
301
+ - lib/delayed/log_tailer.rb
299
302
  - lib/delayed/message_sending.rb
300
303
  - lib/delayed/performable_method.rb
301
304
  - lib/delayed/periodic.rb
@@ -314,11 +317,12 @@ files:
314
317
  - lib/delayed/yaml_extensions.rb
315
318
  - lib/delayed_job.rb
316
319
  - spec/active_record_job_spec.rb
317
- - spec/delayed/pool_spec.rb
320
+ - spec/delayed/cli_spec.rb
321
+ - spec/delayed/daemon_spec.rb
318
322
  - spec/delayed/server_spec.rb
323
+ - spec/delayed/settings_spec.rb
319
324
  - spec/delayed/worker_spec.rb
320
325
  - spec/gemfiles/32.gemfile
321
- - spec/gemfiles/32.gemfile.lock
322
326
  - spec/gemfiles/40.gemfile
323
327
  - spec/gemfiles/41.gemfile
324
328
  - spec/gemfiles/42.gemfile
@@ -352,17 +356,18 @@ required_rubygems_version: !ruby/object:Gem::Requirement
352
356
  version: '0'
353
357
  requirements: []
354
358
  rubyforge_project:
355
- rubygems_version: 2.4.7
359
+ rubygems_version: 2.5.1
356
360
  signing_key:
357
361
  specification_version: 4
358
362
  summary: Instructure-maintained fork of delayed_job
359
363
  test_files:
360
364
  - spec/active_record_job_spec.rb
361
- - spec/delayed/pool_spec.rb
365
+ - spec/delayed/cli_spec.rb
366
+ - spec/delayed/daemon_spec.rb
362
367
  - spec/delayed/server_spec.rb
368
+ - spec/delayed/settings_spec.rb
363
369
  - spec/delayed/worker_spec.rb
364
370
  - spec/gemfiles/32.gemfile
365
- - spec/gemfiles/32.gemfile.lock
366
371
  - spec/gemfiles/40.gemfile
367
372
  - spec/gemfiles/41.gemfile
368
373
  - spec/gemfiles/42.gemfile
@@ -377,4 +382,3 @@ test_files:
377
382
  - spec/shared/worker.rb
378
383
  - spec/shared_jobs_specs.rb
379
384
  - spec/spec_helper.rb
380
- has_rdoc:
@@ -1,11 +0,0 @@
1
- require 'spec_helper'
2
-
3
- RSpec.describe Delayed::Pool do
4
- describe '#parse_cli_options!' do
5
- it 'must correctly parse the --config option' do
6
- pool = Delayed::Pool.new(%w{run --config /path/to/some/file.yml})
7
- pool.parse_cli_options!
8
- expect(pool.options).to include config_file: '/path/to/some/file.yml'
9
- end
10
- end
11
- end
@@ -1,155 +0,0 @@
1
- PATH
2
- remote: ../../
3
- specs:
4
- canvas-jobs (0.9.14)
5
- after_transaction_commit (= 1.0.1)
6
- rails (>= 3.2)
7
- redis (> 3.0)
8
- redis-scripting (~> 1.0.1)
9
- rufus-scheduler (~> 3.1.2)
10
-
11
- GEM
12
- remote: https://rubygems.org/
13
- specs:
14
- actionmailer (3.2.22)
15
- actionpack (= 3.2.22)
16
- mail (~> 2.5.4)
17
- actionpack (3.2.22)
18
- activemodel (= 3.2.22)
19
- activesupport (= 3.2.22)
20
- builder (~> 3.0.0)
21
- erubis (~> 2.7.0)
22
- journey (~> 1.0.4)
23
- rack (~> 1.4.5)
24
- rack-cache (~> 1.2)
25
- rack-test (~> 0.6.1)
26
- sprockets (~> 2.2.1)
27
- activemodel (3.2.22)
28
- activesupport (= 3.2.22)
29
- builder (~> 3.0.0)
30
- activerecord (3.2.22)
31
- activemodel (= 3.2.22)
32
- activesupport (= 3.2.22)
33
- arel (~> 3.0.2)
34
- tzinfo (~> 0.3.29)
35
- activeresource (3.2.22)
36
- activemodel (= 3.2.22)
37
- activesupport (= 3.2.22)
38
- activesupport (3.2.22)
39
- i18n (~> 0.6, >= 0.6.4)
40
- multi_json (~> 1.0)
41
- after_transaction_commit (1.0.1)
42
- activerecord (>= 3.2)
43
- arel (3.0.3)
44
- backports (3.6.6)
45
- builder (3.0.4)
46
- bump (0.5.2)
47
- coderay (1.1.0)
48
- database_cleaner (1.3.0)
49
- diff-lcs (1.2.5)
50
- erubis (2.7.0)
51
- hike (1.2.3)
52
- i18n (0.7.0)
53
- journey (1.0.4)
54
- json (1.8.3)
55
- mail (2.5.4)
56
- mime-types (~> 1.16)
57
- treetop (~> 1.4.8)
58
- method_source (0.8.2)
59
- mime-types (1.25.1)
60
- multi_json (1.11.2)
61
- pg (0.18.2)
62
- polyglot (0.3.5)
63
- pry (0.10.1)
64
- coderay (~> 1.1.0)
65
- method_source (~> 0.8.1)
66
- slop (~> 3.4)
67
- rack (1.4.7)
68
- rack-cache (1.2)
69
- rack (>= 0.4)
70
- rack-protection (1.5.3)
71
- rack
72
- rack-ssl (1.3.4)
73
- rack
74
- rack-test (0.6.3)
75
- rack (>= 1.0)
76
- rails (3.2.22)
77
- actionmailer (= 3.2.22)
78
- actionpack (= 3.2.22)
79
- activerecord (= 3.2.22)
80
- activeresource (= 3.2.22)
81
- activesupport (= 3.2.22)
82
- bundler (~> 1.0)
83
- railties (= 3.2.22)
84
- railties (3.2.22)
85
- actionpack (= 3.2.22)
86
- activesupport (= 3.2.22)
87
- rack-ssl (~> 1.3.2)
88
- rake (>= 0.8.7)
89
- rdoc (~> 3.4)
90
- thor (>= 0.14.6, < 2.0)
91
- rake (10.4.2)
92
- rdoc (3.12.2)
93
- json (~> 1.4)
94
- redis (3.2.1)
95
- redis-scripting (1.0.1)
96
- redis (>= 3.0)
97
- rspec (3.1.0)
98
- rspec-core (~> 3.1.0)
99
- rspec-expectations (~> 3.1.0)
100
- rspec-mocks (~> 3.1.0)
101
- rspec-core (3.1.7)
102
- rspec-support (~> 3.1.0)
103
- rspec-expectations (3.1.2)
104
- diff-lcs (>= 1.2.0, < 2.0)
105
- rspec-support (~> 3.1.0)
106
- rspec-mocks (3.1.3)
107
- rspec-support (~> 3.1.0)
108
- rspec-support (3.1.2)
109
- rufus-scheduler (3.1.3)
110
- sinatra (1.4.6)
111
- rack (~> 1.4)
112
- rack-protection (~> 1.4)
113
- tilt (>= 1.3, < 3)
114
- sinatra-contrib (1.4.6)
115
- backports (>= 2.0)
116
- multi_json
117
- rack-protection
118
- rack-test
119
- sinatra (~> 1.4.0)
120
- tilt (>= 1.3, < 3)
121
- slop (3.6.0)
122
- sprockets (2.2.3)
123
- hike (~> 1.2)
124
- multi_json (~> 1.0)
125
- rack (~> 1.0)
126
- tilt (~> 1.1, != 1.3.0)
127
- test_after_commit (0.4.1)
128
- activerecord (>= 3.2)
129
- thor (0.19.1)
130
- tilt (1.4.1)
131
- timecop (0.7.1)
132
- treetop (1.4.15)
133
- polyglot
134
- polyglot (>= 0.3.1)
135
- tzinfo (0.3.44)
136
- wwtd (0.7.0)
137
-
138
- PLATFORMS
139
- ruby
140
-
141
- DEPENDENCIES
142
- bump
143
- canvas-jobs!
144
- database_cleaner (= 1.3.0)
145
- pg
146
- pry
147
- rack-test
148
- rails (~> 3.2.19)
149
- rake
150
- rspec (= 3.1.0)
151
- sinatra
152
- sinatra-contrib
153
- test_after_commit (= 0.4.1)
154
- timecop (= 0.7.1)
155
- wwtd (= 0.7.0)