delayed_job_csi 2.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,129 @@
1
+ module Delayed
2
+ module Backend
3
+ class DeserializationError < StandardError
4
+ end
5
+
6
+ module Base
7
+ def self.included(base)
8
+ base.extend ClassMethods
9
+ end
10
+
11
+ module ClassMethods
12
+ # Add a job to the queue
13
+ def enqueue(*args)
14
+ object = args.shift
15
+ unless object.respond_to?(:perform)
16
+ raise ArgumentError, 'Cannot enqueue items which do not respond to perform'
17
+ end
18
+
19
+ priority = args.first || Delayed::Worker.default_priority
20
+ run_at = args[1]
21
+ self.create(:payload_object => object, :priority => priority.to_i, :run_at => run_at)
22
+ end
23
+
24
+ def reserve(worker, max_run_time = Worker.max_run_time)
25
+ # We get up to 5 jobs from the db. In case we cannot get exclusive access to a job we try the next.
26
+ # this leads to a more even distribution of jobs across the worker processes
27
+ find_available(worker.name, 5, max_run_time).detect do |job|
28
+ job.lock_exclusively!(max_run_time, worker.name)
29
+ end
30
+ end
31
+
32
+ # Hook method that is called before a new worker is forked
33
+ def before_fork
34
+ end
35
+
36
+ # Hook method that is called after a new worker is forked
37
+ def after_fork
38
+ end
39
+
40
+ def work_off(num = 100)
41
+ warn "[DEPRECATION] `Delayed::Job.work_off` is deprecated. Use `Delayed::Worker.new.work_off instead."
42
+ Delayed::Worker.new.work_off(num)
43
+ end
44
+ end
45
+
46
+ ParseObjectFromYaml = /\!ruby\/\w+\:([^\s]+)/
47
+
48
+ def failed?
49
+ failed_at
50
+ end
51
+ alias_method :failed, :failed?
52
+
53
+ def name
54
+ @name ||= begin
55
+ payload = payload_object
56
+ if payload.respond_to?(:display_name)
57
+ payload.display_name
58
+ else
59
+ payload.class.name
60
+ end
61
+ end
62
+ end
63
+
64
+ def payload_object=(object)
65
+ self['handler'] = object.to_yaml
66
+ end
67
+
68
+ def payload_object
69
+ @payload_object ||= deserialize(self['handler'])
70
+ end
71
+
72
+ # Moved into its own method so that new_relic can trace it.
73
+ def invoke_job
74
+ payload_object.perform
75
+ end
76
+
77
+ # Unlock this job (note: not saved to DB)
78
+ def unlock
79
+ self.locked_at = nil
80
+ self.locked_by = nil
81
+ end
82
+
83
+ def reschedule_at
84
+ payload_object.respond_to?(:reschedule_at) ?
85
+ payload_object.reschedule_at(self.class.db_time_now, attempts) :
86
+ self.class.db_time_now + (attempts ** 4) + 5
87
+ end
88
+
89
+ def max_attempts
90
+ payload_object.max_attempts if payload_object.respond_to?(:max_attempts)
91
+ end
92
+
93
+ private
94
+
95
+ def deserialize(source)
96
+ handler = YAML.load(source) rescue nil
97
+
98
+ unless handler.respond_to?(:perform)
99
+ if handler.nil? && source =~ ParseObjectFromYaml
100
+ handler_class = $1
101
+ end
102
+ attempt_to_load(handler_class || handler.class)
103
+ handler = YAML.load(source)
104
+ end
105
+
106
+ return handler if handler.respond_to?(:perform)
107
+
108
+ raise DeserializationError,
109
+ 'Job failed to load: Unknown handler. Try to manually require the appropriate file.'
110
+ rescue TypeError, LoadError, NameError => e
111
+ raise DeserializationError,
112
+ "Job failed to load: #{e.message}. Try to manually require the required file."
113
+ end
114
+
115
+ # Constantize the object so that ActiveSupport can attempt
116
+ # its auto loading magic. Will raise LoadError if not successful.
117
+ def attempt_to_load(klass)
118
+ klass.constantize
119
+ end
120
+
121
+ protected
122
+
123
+ def set_default_run_at
124
+ self.run_at ||= self.class.db_time_now
125
+ end
126
+
127
+ end
128
+ end
129
+ end
@@ -0,0 +1,125 @@
1
+ require 'dm-core'
2
+ require 'dm-observer'
3
+ require 'dm-aggregates'
4
+
5
+ module DataMapper
6
+ module Resource
7
+ module ClassMethods
8
+ def load_for_delayed_job(id)
9
+ find!(id)
10
+ end
11
+ end
12
+
13
+ module InstanceMethods
14
+ def dump_for_delayed_job
15
+ "#{self.class};#{id}"
16
+ end
17
+ end
18
+ end
19
+ end
20
+
21
+ module Delayed
22
+ module Backend
23
+ module DataMapper
24
+ class Job
25
+ include ::DataMapper::Resource
26
+ include Delayed::Backend::Base
27
+
28
+ storage_names[:default] = 'delayed_jobs'
29
+
30
+ property :id, Serial
31
+ property :priority, Integer, :default => 0, :index => :run_at_priority
32
+ property :attempts, Integer, :default => 0
33
+ property :handler, Text, :lazy => false
34
+ property :run_at, Time, :index => :run_at_priority
35
+ property :locked_at, Time, :index => true
36
+ property :locked_by, String
37
+ property :failed_at, Time
38
+ property :last_error, Text
39
+
40
+ def self.db_time_now
41
+ Time.now
42
+ end
43
+
44
+ def self.find_available(worker_name, limit = 5, max_run_time = Worker.max_run_time)
45
+
46
+ simple_conditions = { :run_at.lte => db_time_now, :limit => limit, :failed_at => nil, :order => [:priority.asc, :run_at.asc] }
47
+
48
+ # respect priorities
49
+ simple_conditions[:priority.gte] = Worker.min_priority if Worker.min_priority
50
+ simple_conditions[:priority.lte] = Worker.max_priority if Worker.max_priority
51
+
52
+ # lockable
53
+ lockable = (
54
+ # not locked or past the max time
55
+ ( all(:locked_at => nil ) | all(:locked_at.lt => db_time_now - max_run_time)) |
56
+
57
+ # OR locked by our worker
58
+ all(:locked_by => worker_name))
59
+
60
+ # plus some other boring junk
61
+ (lockable).all( simple_conditions )
62
+ end
63
+
64
+ # When a worker is exiting, make sure we don't have any locked jobs.
65
+ def self.clear_locks!(worker_name)
66
+ all(:locked_by => worker_name).update(:locked_at => nil, :locked_by => nil)
67
+ end
68
+
69
+ # Lock this job for this worker.
70
+ # Returns true if we have the lock, false otherwise.
71
+ def lock_exclusively!(max_run_time, worker = worker_name)
72
+
73
+ now = self.class.db_time_now
74
+ overtime = now - max_run_time
75
+
76
+ # FIXME - this is a bit gross
77
+ # DM doesn't give us the number of rows affected by a collection update
78
+ # so we have to circumvent some niceness in DM::Collection here
79
+ collection = locked_by != worker ?
80
+ (self.class.all(:id => id, :run_at.lte => now) & ( self.class.all(:locked_at => nil) | self.class.all(:locked_at.lt => overtime) ) ) :
81
+ self.class.all(:id => id, :locked_by => worker)
82
+
83
+ attributes = collection.model.new(:locked_at => now, :locked_by => worker).dirty_attributes
84
+ affected_rows = self.repository.update(attributes, collection)
85
+
86
+ if affected_rows == 1
87
+ self.locked_at = now
88
+ self.locked_by = worker
89
+ return true
90
+ else
91
+ return false
92
+ end
93
+ end
94
+
95
+ # these are common to the other backends, so we provide an implementation
96
+ def self.delete_all
97
+ Delayed::Job.auto_migrate!
98
+ end
99
+
100
+ def self.find id
101
+ get id
102
+ end
103
+
104
+ def update_attributes(attributes)
105
+ attributes.each do |k,v|
106
+ self[k] = v
107
+ end
108
+ self.save
109
+ end
110
+
111
+
112
+ end
113
+
114
+ class JobObserver
115
+ include ::DataMapper::Observer
116
+
117
+ observe Job
118
+
119
+ before :save do
120
+ self.run_at ||= self.class.db_time_now
121
+ end
122
+ end
123
+ end
124
+ end
125
+ end
@@ -0,0 +1,110 @@
1
+ require 'mongo_mapper'
2
+
3
+ module ::MongoMapper
4
+ module Document
5
+ module ClassMethods
6
+ def load_for_delayed_job(id)
7
+ find!(id)
8
+ end
9
+ end
10
+
11
+ module InstanceMethods
12
+ def dump_for_delayed_job
13
+ "#{self.class};#{id}"
14
+ end
15
+ end
16
+ end
17
+ end
18
+
19
+ module Delayed
20
+ module Backend
21
+ module MongoMapper
22
+ class Job
23
+ include ::MongoMapper::Document
24
+ include Delayed::Backend::Base
25
+ set_collection_name 'delayed_jobs'
26
+
27
+ key :priority, Integer, :default => 0
28
+ key :attempts, Integer, :default => 0
29
+ key :handler, String
30
+ key :run_at, Time
31
+ key :locked_at, Time
32
+ key :locked_by, String, :index => true
33
+ key :failed_at, Time
34
+ key :last_error, String
35
+ timestamps!
36
+
37
+ before_save :set_default_run_at
38
+
39
+ ensure_index [[:priority, 1], [:run_at, 1]]
40
+
41
+ def self.before_fork
42
+ ::MongoMapper.connection.close
43
+ end
44
+
45
+ def self.after_fork
46
+ ::MongoMapper.connect(RAILS_ENV)
47
+ end
48
+
49
+ def self.db_time_now
50
+ Time.now.utc
51
+ end
52
+
53
+ def self.find_available(worker_name, limit = 5, max_run_time = Worker.max_run_time)
54
+ right_now = db_time_now
55
+
56
+ conditions = {
57
+ :run_at => {"$lte" => right_now},
58
+ :limit => -limit, # In mongo, positive limits are 'soft' and negative are 'hard'
59
+ :failed_at => nil,
60
+ :sort => [['priority', 1], ['run_at', 1]]
61
+ }
62
+
63
+ where = "this.locked_at == null || this.locked_at < #{make_date(right_now - max_run_time)}"
64
+
65
+ (conditions[:priority] ||= {})['$gte'] = Worker.min_priority.to_i if Worker.min_priority
66
+ (conditions[:priority] ||= {})['$lte'] = Worker.max_priority.to_i if Worker.max_priority
67
+
68
+ results = all(conditions.merge(:locked_by => worker_name))
69
+ results += all(conditions.merge('$where' => where)) if results.size < limit
70
+ results
71
+ end
72
+
73
+ # When a worker is exiting, make sure we don't have any locked jobs.
74
+ def self.clear_locks!(worker_name)
75
+ collection.update({:locked_by => worker_name}, {"$set" => {:locked_at => nil, :locked_by => nil}}, :multi => true)
76
+ end
77
+
78
+ # Lock this job for this worker.
79
+ # Returns true if we have the lock, false otherwise.
80
+ def lock_exclusively!(max_run_time, worker = worker_name)
81
+ right_now = self.class.db_time_now
82
+ overtime = right_now - max_run_time.to_i
83
+
84
+ query = "this.locked_at == null || this.locked_at < #{make_date(overtime)} || this.locked_by == #{worker.to_json}"
85
+ conditions = {:_id => id, :run_at => {"$lte" => right_now}, "$where" => query}
86
+
87
+ collection.update(conditions, {"$set" => {:locked_at => right_now, :locked_by => worker}})
88
+ affected_rows = collection.find({:_id => id, :locked_by => worker}).count
89
+ if affected_rows == 1
90
+ self.locked_at = right_now
91
+ self.locked_by = worker
92
+ return true
93
+ else
94
+ return false
95
+ end
96
+ end
97
+
98
+ private
99
+
100
+ def self.make_date(date_or_seconds)
101
+ "new Date(#{date_or_seconds.to_f * 1000})"
102
+ end
103
+
104
+ def make_date(date)
105
+ self.class.make_date(date)
106
+ end
107
+ end
108
+ end
109
+ end
110
+ end
@@ -0,0 +1,108 @@
1
+ require 'rubygems'
2
+ require 'daemons'
3
+ require 'optparse'
4
+
5
+ module Delayed
6
+ class Command
7
+ attr_accessor :worker_count
8
+
9
+ def initialize(args)
10
+ @files_to_reopen = []
11
+ @options = {
12
+ :quiet => true,
13
+ :pid_dir => "#{RAILS_ROOT}/tmp/pids"
14
+ }
15
+
16
+ @worker_count = 1
17
+ @monitor = false
18
+
19
+ opts = OptionParser.new do |opts|
20
+ opts.banner = "Usage: #{File.basename($0)} [options] start|stop|restart|run"
21
+
22
+ opts.on('-h', '--help', 'Show this message') do
23
+ puts opts
24
+ exit 1
25
+ end
26
+ opts.on('-e', '--environment=NAME', 'Specifies the environment to run this delayed jobs under (test/development/production).') do |e|
27
+ STDERR.puts "The -e/--environment option has been deprecated and has no effect. Use RAILS_ENV and see http://github.com/collectiveidea/delayed_job/issues/#issue/7"
28
+ end
29
+ opts.on('--min-priority N', 'Minimum priority of jobs to run.') do |n|
30
+ @options[:min_priority] = n
31
+ end
32
+ opts.on('--max-priority N', 'Maximum priority of jobs to run.') do |n|
33
+ @options[:max_priority] = n
34
+ end
35
+ opts.on('-n', '--number_of_workers=workers', "Number of unique workers to spawn") do |worker_count|
36
+ @worker_count = worker_count.to_i rescue 1
37
+ end
38
+ opts.on('--pid-dir=DIR', 'Specifies an alternate directory in which to store the process ids.') do |dir|
39
+ @options[:pid_dir] = dir
40
+ end
41
+ opts.on('-i', '--identifier=n', 'A numeric identifier for the worker.') do |n|
42
+ @options[:identifier] = n
43
+ end
44
+ opts.on('-m', '--monitor', 'Start monitor process.') do
45
+ @monitor = true
46
+ end
47
+ opts.on('--sleep-delay N', "Amount of time to sleep when no jobs are found") do |n|
48
+ @options[:sleep_delay] = n
49
+ end
50
+ end
51
+ @args = opts.parse!(args)
52
+ end
53
+
54
+ def daemonize
55
+ Delayed::Worker.backend.before_fork
56
+
57
+ ObjectSpace.each_object(File) do |file|
58
+ @files_to_reopen << file unless file.closed?
59
+ end
60
+
61
+ dir = @options[:pid_dir]
62
+ Dir.mkdir(dir) unless File.exists?(dir)
63
+
64
+ if @worker_count > 1 && @options[:identifier]
65
+ raise ArgumentError, 'Cannot specify both --number-of-workers and --identifier'
66
+ elsif @worker_count == 1 && @options[:identifier]
67
+ process_name = "delayed_job.#{@options[:identifier]}"
68
+ run_process(process_name, dir)
69
+ else
70
+ worker_count.times do |worker_index|
71
+ process_name = worker_count == 1 ? "delayed_job" : "delayed_job.#{worker_index}"
72
+ run_process(process_name, dir)
73
+ end
74
+ end
75
+ end
76
+
77
+ def run_process(process_name, dir)
78
+ Daemons.run_proc(process_name, :dir => dir, :dir_mode => :normal, :monitor => @monitor, :ARGV => @args) do |*args|
79
+ run process_name
80
+ end
81
+ end
82
+
83
+ def run(worker_name = nil)
84
+ Dir.chdir(RAILS_ROOT)
85
+
86
+ # Re-open file handles
87
+ @files_to_reopen.each do |file|
88
+ begin
89
+ file.reopen file.path, "a+"
90
+ file.sync = true
91
+ rescue ::Exception
92
+ end
93
+ end
94
+
95
+ Delayed::Worker.logger = Logger.new(File.join(RAILS_ROOT, 'log', 'delayed_job.log'))
96
+ Delayed::Worker.backend.after_fork
97
+
98
+ worker = Delayed::Worker.new(@options)
99
+ worker.name_prefix = "#{worker_name} "
100
+ worker.start
101
+ rescue => e
102
+ Rails.logger.fatal e
103
+ STDERR.puts e.message
104
+ exit 1
105
+ end
106
+
107
+ end
108
+ end