delayed_job 2.0.8 → 2.1.0.pre

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,109 @@
1
+ require 'couchrest'
2
+
3
+ #extent couchrest to handle delayed_job serialization.
4
+ class CouchRest::ExtendedDocument
5
+ yaml_as "tag:ruby.yaml.org,2002:CouchRest"
6
+
7
+ def reload
8
+ job = self.class.get self['_id']
9
+ job.each {|k,v| self[k] = v}
10
+ end
11
+ def self.find(id)
12
+ get id
13
+ end
14
+ def self.yaml_new(klass, tag, val)
15
+ klass.get(val['_id'])
16
+ end
17
+ def ==(other)
18
+ if other.is_a? ::CouchRest::ExtendedDocument
19
+ self['_id'] == other['_id']
20
+ else
21
+ super
22
+ end
23
+ end
24
+ end
25
+
26
+ #couchrest adapter
27
+ module Delayed
28
+ module Backend
29
+ module CouchRest
30
+ class Job < ::CouchRest::ExtendedDocument
31
+ include Delayed::Backend::Base
32
+ use_database ::CouchRest::Server.new.database('delayed_job')
33
+
34
+ property :handler
35
+ property :last_error
36
+ property :locked_by
37
+ property :priority, :default => 0
38
+ property :attempts, :default => 0
39
+ property :run_at, :cast_as => 'Time'
40
+ property :locked_at, :cast_as => 'Time'
41
+ property :failed_at, :cast_as => 'Time'
42
+ timestamps!
43
+
44
+ set_callback :save, :before, :set_default_run_at
45
+
46
+ view_by(:failed_at, :locked_by, :run_at,
47
+ :map => "function(doc){" +
48
+ " if(doc['couchrest-type'] == 'Delayed::Backend::CouchRest::Job') {" +
49
+ " emit([doc.failed_at || null, doc.locked_by || null, doc.run_at || null], null);}" +
50
+ " }")
51
+ view_by(:failed_at, :locked_at, :run_at,
52
+ :map => "function(doc){" +
53
+ " if(doc['couchrest-type'] == 'Delayed::Backend::CouchRest::Job') {" +
54
+ " emit([doc.failed_at || null, doc.locked_at || null, doc.run_at || null], null);}" +
55
+ " }")
56
+
57
+ def self.db_time_now; Time.now; end
58
+ def self.find_available(worker_name, limit = 5, max_run_time = ::Delayed::Worker.max_run_time)
59
+ ready = ready_jobs
60
+ mine = my_jobs worker_name
61
+ expire = expired_jobs max_run_time
62
+ jobs = (ready + mine + expire)[0..limit-1].sort_by { |j| j.priority }
63
+ jobs = jobs.find_all { |j| j.priority >= Worker.min_priority } if Worker.min_priority
64
+ jobs = jobs.find_all { |j| j.priority <= Worker.max_priority } if Worker.max_priority
65
+ jobs
66
+ end
67
+ def self.clear_locks!(worker_name)
68
+ jobs = my_jobs worker_name
69
+ jobs.each { |j| j.locked_by, j.locked_at = nil, nil; }
70
+ database.bulk_save jobs
71
+ end
72
+ def self.delete_all
73
+ database.bulk_save all.each { |doc| doc['_deleted'] = true }
74
+ end
75
+
76
+ def lock_exclusively!(max_run_time, worker = worker_name)
77
+ return false if locked_by_other?(worker) and not expired?(max_run_time)
78
+ case
79
+ when locked_by_me?(worker)
80
+ self.locked_at = self.class.db_time_now
81
+ when (unlocked? or (locked_by_other?(worker) and expired?(max_run_time)))
82
+ self.locked_at, self.locked_by = self.class.db_time_now, worker
83
+ end
84
+ save
85
+ rescue RestClient::Conflict
86
+ false
87
+ end
88
+
89
+ private
90
+ def self.ready_jobs
91
+ options = {:startkey => [nil, nil], :endkey => [nil, nil, db_time_now]}
92
+ by_failed_at_and_locked_by_and_run_at options
93
+ end
94
+ def self.my_jobs(worker_name)
95
+ options = {:startkey => [nil, worker_name], :endkey => [nil, worker_name, {}]}
96
+ by_failed_at_and_locked_by_and_run_at options
97
+ end
98
+ def self.expired_jobs(max_run_time)
99
+ options = {:startkey => [nil,'0'], :endkey => [nil, db_time_now - max_run_time, db_time_now]}
100
+ by_failed_at_and_locked_at_and_run_at options
101
+ end
102
+ def unlocked?; locked_by.nil?; end
103
+ def expired?(time); locked_at < self.class.db_time_now - time; end
104
+ def locked_by_me?(worker); not locked_by.nil? and locked_by == worker; end
105
+ def locked_by_other?(worker); not locked_by.nil? and locked_by != worker; end
106
+ end
107
+ end
108
+ end
109
+ end
@@ -2,19 +2,15 @@ require 'dm-core'
2
2
  require 'dm-observer'
3
3
  require 'dm-aggregates'
4
4
 
5
- module DataMapper
6
- module Resource
7
- module ClassMethods
8
- def load_for_delayed_job(id)
9
- find!(id)
10
- end
11
- end
5
+ DataMapper::Resource.class_eval do
6
+ yaml_as "tag:ruby.yaml.org,2002:DataMapper"
12
7
 
13
- module InstanceMethods
14
- def dump_for_delayed_job
15
- "#{self.class};#{id}"
16
- end
17
- end
8
+ def self.yaml_new(klass, tag, val)
9
+ klass.find(val['id'])
10
+ end
11
+
12
+ def to_yaml_properties
13
+ ['@id']
18
14
  end
19
15
  end
20
16
 
@@ -1,18 +1,14 @@
1
1
  require 'mongo_mapper'
2
2
 
3
- module ::MongoMapper
4
- module Document
5
- module ClassMethods
6
- def load_for_delayed_job(id)
7
- find!(id)
8
- end
9
- end
3
+ MongoMapper::Document.class_eval do
4
+ yaml_as "tag:ruby.yaml.org,2002:MongoMapper"
5
+
6
+ def self.yaml_new(klass, tag, val)
7
+ klass.find(val['_id'])
8
+ end
10
9
 
11
- module InstanceMethods
12
- def dump_for_delayed_job
13
- "#{self.class};#{id}"
14
- end
15
- end
10
+ def to_yaml_properties
11
+ ['@_id']
16
12
  end
17
13
  end
18
14
 
@@ -44,12 +44,8 @@ module Delayed
44
44
  opts.on('-m', '--monitor', 'Start monitor process.') do
45
45
  @monitor = true
46
46
  end
47
- opts.on('--sleep-delay N', "Amount of time to sleep when no jobs are found") do |n|
48
- @options[:sleep_delay] = n
49
- end
50
- opts.on('-p', '--prefix NAME', "String to be prefixed to worker process names") do |prefix|
51
- @options[:prefix] = prefix
52
- end
47
+
48
+
53
49
  end
54
50
  @args = opts.parse!(args)
55
51
  end
@@ -79,7 +75,6 @@ module Delayed
79
75
 
80
76
  def run_process(process_name, dir)
81
77
  Daemons.run_proc(process_name, :dir => dir, :dir_mode => :normal, :monitor => @monitor, :ARGV => @args) do |*args|
82
- $0 = File.join @options[:prefix], process_name if @options[:prefix]
83
78
  run process_name
84
79
  end
85
80
  end
@@ -90,7 +85,7 @@ module Delayed
90
85
  # Re-open file handles
91
86
  @files_to_reopen.each do |file|
92
87
  begin
93
- file.reopen file.path, "a+"
88
+ file.reopen file.path
94
89
  file.sync = true
95
90
  rescue ::Exception
96
91
  end
@@ -1,18 +1,19 @@
1
+ require 'active_support/basic_object'
2
+
1
3
  module Delayed
2
4
  class DelayProxy < ActiveSupport::BasicObject
3
5
  def initialize(target, options)
4
6
  @target = target
5
7
  @options = options
6
8
  end
7
-
9
+
8
10
  def method_missing(method, *args)
9
- Job.create({
10
- :payload_object => PerformableMethod.new(@target, method.to_sym, args),
11
- :priority => ::Delayed::Worker.default_priority
12
- }.merge(@options))
11
+ Job.create @options.merge(
12
+ :payload_object => PerformableMethod.new(@target, method.to_sym, args)
13
+ )
13
14
  end
14
15
  end
15
-
16
+
16
17
  module MessageSending
17
18
  def delay(options = {})
18
19
  DelayProxy.new(self, options)
@@ -30,24 +31,14 @@ module Delayed
30
31
  end
31
32
 
32
33
  module ClassMethods
33
- def handle_asynchronously(method, opts = {})
34
+ def handle_asynchronously(method)
34
35
  aliased_method, punctuation = method.to_s.sub(/([?!=])$/, ''), $1
35
36
  with_method, without_method = "#{aliased_method}_with_delay#{punctuation}", "#{aliased_method}_without_delay#{punctuation}"
36
37
  define_method(with_method) do |*args|
37
- curr_opts = opts.clone
38
- curr_opts.each_key do |key|
39
- if (val = curr_opts[key]).is_a?(Proc)
40
- curr_opts[key] = if val.arity == 1
41
- val.call(self)
42
- else
43
- val.call
44
- end
45
- end
46
- end
47
- delay(curr_opts).__send__(without_method, *args)
38
+ delay.__send__(without_method, *args)
48
39
  end
49
40
  alias_method_chain method, :delay
50
41
  end
51
42
  end
52
43
  end
53
- end
44
+ end
@@ -1,62 +1,19 @@
1
- class Class
2
- def load_for_delayed_job(arg)
3
- self
4
- end
5
-
6
- def dump_for_delayed_job
7
- name
8
- end
9
- end
10
-
11
1
  module Delayed
12
2
  class PerformableMethod < Struct.new(:object, :method, :args)
13
- STRING_FORMAT = /^LOAD\;([A-Z][\w\:]+)(?:\;(\w+))?$/
14
-
15
- class LoadError < StandardError
16
- end
17
-
18
3
  def initialize(object, method, args)
19
- raise NoMethodError, "undefined method `#{method}' for #{object.inspect}" unless object.respond_to?(method, true)
4
+ raise NoMethodError, "undefined method `#{method}' for #{object.inspect}" unless object.respond_to?(method)
20
5
 
21
- self.object = dump(object)
22
- self.args = args.map { |a| dump(a) }
6
+ self.object = object
7
+ self.args = args
23
8
  self.method = method.to_sym
24
9
  end
25
10
 
26
11
  def display_name
27
- if STRING_FORMAT === object
28
- "#{$1}#{$2 ? '#' : '.'}#{method}"
29
- else
30
- "#{object.class}##{method}"
31
- end
12
+ "#{object.class}##{method}"
32
13
  end
33
14
 
34
15
  def perform
35
- load(object).send(method, *args.map{|a| load(a)})
36
- rescue PerformableMethod::LoadError
37
- # We cannot do anything about objects that can't be loaded
38
- true
39
- end
40
-
41
- private
42
-
43
- def load(obj)
44
- if STRING_FORMAT === obj
45
- $1.constantize.load_for_delayed_job($2)
46
- else
47
- obj
48
- end
49
- rescue => e
50
- Delayed::Worker.logger.warn "Could not load object for job: #{e.message}"
51
- raise PerformableMethod::LoadError
52
- end
53
-
54
- def dump(obj)
55
- if obj.respond_to?(:dump_for_delayed_job)
56
- "LOAD;#{obj.dump_for_delayed_job}"
57
- else
58
- obj
59
- end
16
+ object.send(method, *args) if object
60
17
  end
61
18
  end
62
19
  end
@@ -6,5 +6,9 @@ module Delayed
6
6
  initializer :after_initialize do
7
7
  Delayed::Worker.guess_backend
8
8
  end
9
+
10
+ rake_tasks do
11
+ load 'delayed/tasks.rb'
12
+ end
9
13
  end
10
14
  end
@@ -6,17 +6,6 @@
6
6
  # after "deploy:stop", "delayed_job:stop"
7
7
  # after "deploy:start", "delayed_job:start"
8
8
  # after "deploy:restart", "delayed_job:restart"
9
- #
10
- # If you want to use command line options, for example to start multiple workers,
11
- # define a Capistrano variable delayed_job_args:
12
- #
13
- # set :delayed_jobs_args, "-n 2"
14
- #
15
- # If you've got delayed_job workers running on a servers, you can also specify
16
- # which servers have delayed_job running and should be restarted after deploy.
17
- #
18
- # set :delayed_job_server_role, :worker
19
- #
20
9
 
21
10
  Capistrano::Configuration.instance.load do
22
11
  namespace :delayed_job do
@@ -24,27 +13,19 @@ Capistrano::Configuration.instance.load do
24
13
  fetch(:rails_env, false) ? "RAILS_ENV=#{fetch(:rails_env)}" : ''
25
14
  end
26
15
 
27
- def args
28
- fetch(:delayed_job_args, "")
29
- end
30
-
31
- def roles
32
- fetch(:delayed_job_server_role, :app)
33
- end
34
-
35
16
  desc "Stop the delayed_job process"
36
- task :stop, :roles => lambda { roles } do
17
+ task :stop, :roles => :app do
37
18
  run "cd #{current_path};#{rails_env} script/delayed_job stop"
38
19
  end
39
20
 
40
21
  desc "Start the delayed_job process"
41
- task :start, :roles => lambda { roles } do
42
- run "cd #{current_path};#{rails_env} script/delayed_job start #{args}"
22
+ task :start, :roles => :app do
23
+ run "cd #{current_path};#{rails_env} script/delayed_job start"
43
24
  end
44
25
 
45
26
  desc "Restart the delayed_job process"
46
- task :restart, :roles => lambda { roles } do
47
- run "cd #{current_path};#{rails_env} script/delayed_job restart #{args}"
27
+ task :restart, :roles => :app do
28
+ run "cd #{current_path};#{rails_env} script/delayed_job restart"
48
29
  end
49
30
  end
50
31
  end
@@ -1,13 +1,14 @@
1
1
  require 'timeout'
2
2
  require 'active_support/core_ext/numeric/time'
3
+ require 'active_support/core_ext/class/attribute_accessors'
4
+ require 'active_support/core_ext/kernel'
3
5
 
4
6
  module Delayed
5
7
  class Worker
6
- cattr_accessor :min_priority, :max_priority, :max_attempts, :max_run_time, :default_priority, :sleep_delay, :logger
8
+ cattr_accessor :min_priority, :max_priority, :max_attempts, :max_run_time, :sleep_delay, :logger
7
9
  self.sleep_delay = 5
8
10
  self.max_attempts = 25
9
11
  self.max_run_time = 4.hours
10
- self.default_priority = 0
11
12
 
12
13
  # By default failed jobs are destroyed after too many attempts. If you want to keep them around
13
14
  # (perhaps to inspect the reason for the failure), set this to false.
@@ -49,7 +50,6 @@ module Delayed
49
50
  @quiet = options[:quiet]
50
51
  self.class.min_priority = options[:min_priority] if options.has_key?(:min_priority)
51
52
  self.class.max_priority = options[:max_priority] if options.has_key?(:max_priority)
52
- self.class.sleep_delay = options[:sleep_delay] if options.has_key?(:sleep_delay)
53
53
  end
54
54
 
55
55
  # Every worker has a unique name which by default is the pid of the process. There are some
@@ -85,7 +85,7 @@ module Delayed
85
85
  break if $exit
86
86
 
87
87
  if count.zero?
88
- sleep(self.class.sleep_delay)
88
+ sleep(@@sleep_delay)
89
89
  else
90
90
  say "#{count} jobs processed at %.4f j/s, %d failed ..." % [count / realtime, result.last]
91
91
  end
@@ -124,38 +124,29 @@ module Delayed
124
124
  end
125
125
  say "#{job.name} completed after %.4f" % runtime
126
126
  return true # did work
127
- rescue DeserializationError => error
128
- job.last_error = "{#{error.message}\n#{error.backtrace.join('\n')}"
129
- failed(job)
130
- rescue Exception => error
131
- handle_failed_job(job, error)
127
+ rescue Exception => e
128
+ handle_failed_job(job, e)
132
129
  return false # work failed
133
130
  end
134
131
 
135
132
  # Reschedule the job in the future (when a job fails).
136
133
  # Uses an exponential scale depending on the number of failed attempts.
137
134
  def reschedule(job, time = nil)
138
- if (job.attempts += 1) < max_attempts(job)
139
- job.run_at = time || job.reschedule_at
135
+ if (job.attempts += 1) < self.class.max_attempts
136
+ time ||= Job.db_time_now + (job.attempts ** 4) + 5
137
+ job.run_at = time
140
138
  job.unlock
141
139
  job.save!
142
140
  else
143
141
  say "PERMANENTLY removing #{job.name} because of #{job.attempts} consecutive failures.", Logger::INFO
144
- failed(job)
145
- end
146
- end
147
142
 
148
- def failed(job)
149
- begin
150
143
  if job.payload_object.respond_to? :on_permanent_failure
151
- say "Running on_permanent_failure hook"
152
- job.payload_object.on_permanent_failure
144
+ say "Running on_permanent_failure hook"
145
+ job.payload_object.on_permanent_failure
153
146
  end
154
- rescue DeserializationError
155
- # do nothing
147
+
148
+ self.class.destroy_failed_jobs ? job.destroy : job.update_attributes(:failed_at => Delayed::Job.db_time_now)
156
149
  end
157
-
158
- self.class.destroy_failed_jobs ? job.destroy : job.update_attributes(:failed_at => Delayed::Job.db_time_now)
159
150
  end
160
151
 
161
152
  def say(text, level = Logger::INFO)
@@ -164,10 +155,6 @@ module Delayed
164
155
  logger.add level, "#{Time.now.strftime('%FT%T%z')}: #{text}" if logger
165
156
  end
166
157
 
167
- def max_attempts(job)
168
- job.max_attempts || self.class.max_attempts
169
- end
170
-
171
158
  protected
172
159
 
173
160
  def handle_failed_job(job, error)
@@ -179,7 +166,19 @@ module Delayed
179
166
  # Run the next job we can get an exclusive lock on.
180
167
  # If no jobs are left we return nil
181
168
  def reserve_and_run_one_job
182
- job = Delayed::Job.reserve(self)
169
+
170
+ # We get up to 5 jobs from the db. In case we cannot get exclusive access to a job we try the next.
171
+ # this leads to a more even distribution of jobs across the worker processes
172
+ job = Delayed::Job.find_available(name, 5, self.class.max_run_time).detect do |job|
173
+ if job.lock_exclusively!(self.class.max_run_time, name)
174
+ say "acquired lock on #{job.name}"
175
+ true
176
+ else
177
+ say "failed to acquire exclusive lock for #{job.name}", Logger::WARN
178
+ false
179
+ end
180
+ end
181
+
183
182
  run(job) if job
184
183
  end
185
184
  end