inst-jobs 0.11.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (86) hide show
  1. checksums.yaml +7 -0
  2. data/bin/inst_job +4 -0
  3. data/db/migrate/20101216224513_create_delayed_jobs.rb +40 -0
  4. data/db/migrate/20110208031356_add_delayed_jobs_tag.rb +14 -0
  5. data/db/migrate/20110426161613_add_delayed_jobs_max_attempts.rb +13 -0
  6. data/db/migrate/20110516225834_add_delayed_jobs_strand.rb +14 -0
  7. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +26 -0
  8. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +40 -0
  9. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +52 -0
  10. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +31 -0
  11. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +15 -0
  12. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +80 -0
  13. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +15 -0
  14. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +15 -0
  15. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +13 -0
  16. data/db/migrate/20140505215131_add_failed_jobs_original_job_id.rb +13 -0
  17. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +13 -0
  18. data/db/migrate/20140505223637_drop_failed_jobs_original_id.rb +13 -0
  19. data/db/migrate/20140512213941_add_source_to_jobs.rb +15 -0
  20. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +70 -0
  21. data/db/migrate/20151123210429_add_expires_at_to_jobs.rb +15 -0
  22. data/db/migrate/20151210162949_improve_max_concurrent.rb +50 -0
  23. data/lib/delayed/backend/active_record.rb +340 -0
  24. data/lib/delayed/backend/base.rb +335 -0
  25. data/lib/delayed/backend/redis/bulk_update.lua +50 -0
  26. data/lib/delayed/backend/redis/destroy_job.lua +2 -0
  27. data/lib/delayed/backend/redis/enqueue.lua +29 -0
  28. data/lib/delayed/backend/redis/fail_job.lua +5 -0
  29. data/lib/delayed/backend/redis/find_available.lua +3 -0
  30. data/lib/delayed/backend/redis/functions.rb +57 -0
  31. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +17 -0
  32. data/lib/delayed/backend/redis/includes/jobs_common.lua +203 -0
  33. data/lib/delayed/backend/redis/job.rb +497 -0
  34. data/lib/delayed/backend/redis/set_running.lua +5 -0
  35. data/lib/delayed/backend/redis/tickle_strand.lua +2 -0
  36. data/lib/delayed/batch.rb +56 -0
  37. data/lib/delayed/cli.rb +101 -0
  38. data/lib/delayed/daemon.rb +103 -0
  39. data/lib/delayed/engine.rb +4 -0
  40. data/lib/delayed/job_tracking.rb +31 -0
  41. data/lib/delayed/lifecycle.rb +90 -0
  42. data/lib/delayed/log_tailer.rb +22 -0
  43. data/lib/delayed/message_sending.rb +134 -0
  44. data/lib/delayed/performable_method.rb +52 -0
  45. data/lib/delayed/periodic.rb +85 -0
  46. data/lib/delayed/plugin.rb +22 -0
  47. data/lib/delayed/pool.rb +161 -0
  48. data/lib/delayed/server/helpers.rb +28 -0
  49. data/lib/delayed/server/public/css/app.css +12 -0
  50. data/lib/delayed/server/public/js/app.js +132 -0
  51. data/lib/delayed/server/views/index.erb +90 -0
  52. data/lib/delayed/server/views/layout.erb +47 -0
  53. data/lib/delayed/server.rb +120 -0
  54. data/lib/delayed/settings.rb +90 -0
  55. data/lib/delayed/testing.rb +32 -0
  56. data/lib/delayed/version.rb +3 -0
  57. data/lib/delayed/work_queue/in_process.rb +13 -0
  58. data/lib/delayed/work_queue/parent_process.rb +180 -0
  59. data/lib/delayed/worker.rb +234 -0
  60. data/lib/delayed/yaml_extensions.rb +109 -0
  61. data/lib/delayed_job.rb +46 -0
  62. data/lib/inst-jobs.rb +1 -0
  63. data/spec/active_record_job_spec.rb +246 -0
  64. data/spec/delayed/cli_spec.rb +23 -0
  65. data/spec/delayed/daemon_spec.rb +35 -0
  66. data/spec/delayed/server_spec.rb +63 -0
  67. data/spec/delayed/settings_spec.rb +32 -0
  68. data/spec/delayed/work_queue/in_process_spec.rb +31 -0
  69. data/spec/delayed/work_queue/parent_process_spec.rb +159 -0
  70. data/spec/delayed/worker_spec.rb +16 -0
  71. data/spec/gemfiles/32.gemfile +6 -0
  72. data/spec/gemfiles/40.gemfile +5 -0
  73. data/spec/gemfiles/41.gemfile +5 -0
  74. data/spec/gemfiles/42.gemfile +5 -0
  75. data/spec/migrate/20140924140513_add_story_table.rb +7 -0
  76. data/spec/redis_job_spec.rb +140 -0
  77. data/spec/sample_jobs.rb +28 -0
  78. data/spec/shared/delayed_batch.rb +85 -0
  79. data/spec/shared/delayed_method.rb +419 -0
  80. data/spec/shared/performable_method.rb +66 -0
  81. data/spec/shared/shared_backend.rb +819 -0
  82. data/spec/shared/testing.rb +48 -0
  83. data/spec/shared/worker.rb +378 -0
  84. data/spec/shared_jobs_specs.rb +15 -0
  85. data/spec/spec_helper.rb +97 -0
  86. metadata +390 -0
@@ -0,0 +1,180 @@
1
+ require 'socket'
2
+ require 'tempfile'
3
+ require 'timeout'
4
+
5
+ module Delayed
6
+ module WorkQueue
7
+ # ParentProcess is a WorkQueue implementation that spawns a separate worker
8
+ # process for querying the queue. Each Worker child process sends requests to
9
+ # the ParentProcess via IPC, and receives responses. This centralized queue
10
+ # querying cuts down on db queries and lock contention, and allows the
11
+ # possibility for other centralized logic such as notifications when all workers
12
+ # are idle.
13
+ #
14
+ # The IPC implementation uses Unix stream sockets and Ruby's built-in Marshal
15
+ # functionality. The ParentProcess creates a Unix socket on the filesystem in
16
+ # the tmp directory, so that if a worker process dies and is restarted it can
17
+ # reconnect to the socket.
18
+ #
19
+ # While Unix and IP sockets are API compatible, we take a lot of shortcuts
20
+ # because we know it's just a local Unix socket. If we ever wanted to swap this
21
+ # out for a TCP/IP socket and have the WorkQueue running on another host, we'd
22
+ # want to be a lot more robust about partial reads/writes and timeouts.
23
+ class ParentProcess
24
+ class ProtocolError < RuntimeError
25
+ end
26
+
27
+ def initialize
28
+ @path = self.class.generate_socket_path
29
+ end
30
+
31
+ def self.generate_socket_path
32
+ # We utilize Tempfile as a convenient way to get a socket filename in the
33
+ # writeable temp directory. However, since we destroy the normal file and
34
+ # write a unix socket file to the same location, we lose the hard uniqueness
35
+ # guarantees of Tempfile. This is OK for this use case, we only generate one
36
+ # Tempfile with this prefix.
37
+ tmp = Tempfile.new("inst-jobs-#{Process.pid}-")
38
+ path = tmp.path
39
+ tmp.close!
40
+ path
41
+ end
42
+
43
+ def server(parent_pid: nil)
44
+ # The unix_server_socket method takes care of cleaning up any existing
45
+ # socket for us if the work queue process dies and is restarted.
46
+ listen_socket = Socket.unix_server_socket(@path)
47
+ Server.new(listen_socket, parent_pid: parent_pid)
48
+ end
49
+
50
+ def client
51
+ Client.new(Addrinfo.unix(@path))
52
+ end
53
+
54
+ class Client
55
+ attr_reader :addrinfo
56
+
57
+ def initialize(addrinfo)
58
+ @addrinfo = addrinfo
59
+ end
60
+
61
+ def get_and_lock_next_available(name, queue_name, min_priority, max_priority)
62
+ @socket ||= @addrinfo.connect
63
+ Marshal.dump([name, queue_name, min_priority, max_priority], @socket)
64
+ response = Marshal.load(@socket)
65
+ unless response.nil? || (response.is_a?(Delayed::Job) && response.locked_by == name)
66
+ raise(ProtocolError, "response is not a locked job: #{response.inspect}")
67
+ end
68
+ response
69
+ rescue SystemCallError, IOError
70
+ # The work queue process died. Return nil to signal the worker
71
+ # process should sleep as if no job was found, and then retry.
72
+ @socket = nil
73
+ nil
74
+ end
75
+ end
76
+
77
+ class Server
78
+ attr_reader :listen_socket
79
+
80
+ def initialize(listen_socket, parent_pid: nil)
81
+ @listen_socket = listen_socket
82
+ @parent_pid = parent_pid
83
+ @clients = {}
84
+ end
85
+
86
+ def connected_clients
87
+ @clients.size
88
+ end
89
+
90
+ def all_workers_idle?
91
+ !@clients.any? { |_, c| c.working }
92
+ end
93
+
94
+ def say(msg, level = :debug)
95
+ if defined?(Rails.logger) && Rails.logger
96
+ Rails.logger.send(level, "[#{Process.pid}]Q #{msg}")
97
+ else
98
+ puts(msg)
99
+ end
100
+ end
101
+
102
+ # run the server queue worker
103
+ # this method does not return, only exits or raises an exception
104
+ def run
105
+ say "Starting work queue process"
106
+
107
+ while !exit?
108
+ run_once
109
+ end
110
+
111
+ rescue => e
112
+ say "WorkQueue Server died: #{e.inspect}"
113
+ raise
114
+ end
115
+
116
+ def run_once
117
+ handles = @clients.keys + [@listen_socket]
118
+ readable, _, _ = IO.select(handles, nil, nil, 1)
119
+ if readable
120
+ readable.each { |s| handle_read(s) }
121
+ end
122
+ end
123
+
124
+ def handle_read(socket)
125
+ if socket == @listen_socket
126
+ handle_accept
127
+ else
128
+ handle_request(socket)
129
+ end
130
+ end
131
+
132
+ # Any error on the listen socket other than WaitReadable will bubble up
133
+ # and terminate the work queue process, to be restarted by the parent daemon.
134
+ def handle_accept
135
+ client, _addr = @listen_socket.accept_nonblock
136
+ if client
137
+ @clients[client] = ClientState.new(false)
138
+ end
139
+ rescue IO::WaitReadable
140
+ # ignore and just try accepting again next time through the loop
141
+ end
142
+
143
+ def handle_request(socket)
144
+ # There is an assumption here that the client will never send a partial
145
+ # request and then leave the socket open. Doing so would leave us hanging
146
+ # here forever. This is only a reasonable assumption because we control
147
+ # the client.
148
+ request = client_timeout { Marshal.load(socket) }
149
+ response = nil
150
+ Delayed::Worker.lifecycle.run_callbacks(:work_queue_pop, self) do
151
+ response = Delayed::Job.get_and_lock_next_available(*request)
152
+ @clients[socket].working = !response.nil?
153
+ end
154
+ client_timeout { Marshal.dump(response, socket) }
155
+ rescue SystemCallError, IOError, Timeout::Error
156
+ # this socket went away
157
+ begin
158
+ socket.close
159
+ rescue IOError
160
+ end
161
+ @clients.delete(socket)
162
+ end
163
+
164
+ def exit?
165
+ parent_exited?
166
+ end
167
+
168
+ def parent_exited?
169
+ @parent_pid && @parent_pid != Process.ppid
170
+ end
171
+
172
+ def client_timeout
173
+ Timeout.timeout(Settings.parent_process_client_timeout) { yield }
174
+ end
175
+
176
+ ClientState = Struct.new(:working)
177
+ end
178
+ end
179
+ end
180
+ end
@@ -0,0 +1,234 @@
1
+ module Delayed
2
+
3
+ class TimeoutError < RuntimeError; end
4
+
5
+ require 'tmpdir'
6
+ require 'set'
7
+
8
+ class Worker
9
+ attr_reader :config, :queue_name, :min_priority, :max_priority, :work_queue
10
+
11
+ # Callback to fire when a delayed job fails max_attempts times. If this
12
+ # callback is defined, then the value of destroy_failed_jobs is ignored, and
13
+ # the job is destroyed if this block returns true.
14
+ #
15
+ # This allows for destroying "uninteresting" failures, while keeping around
16
+ # interesting failures to be investigated later.
17
+ #
18
+ # The block is called with args(job, last_exception)
19
+ def self.on_max_failures=(block)
20
+ @@on_max_failures = block
21
+ end
22
+ cattr_reader :on_max_failures
23
+
24
+ cattr_accessor :plugins
25
+ self.plugins = Set.new
26
+
27
+ def self.lifecycle
28
+ @lifecycle ||= Delayed::Lifecycle.new
29
+ end
30
+
31
+ def initialize(options = {})
32
+ @exit = false
33
+ @config = options
34
+ @parent_pid = options[:parent_pid]
35
+ @queue_name = options[:queue] || Settings.queue
36
+ @min_priority = options[:min_priority]
37
+ @max_priority = options[:max_priority]
38
+ @max_job_count = options[:worker_max_job_count].to_i
39
+ @max_memory_usage = options[:worker_max_memory_usage].to_i
40
+ @work_queue = options[:work_queue] || WorkQueue::InProcess.new
41
+ @job_count = 0
42
+
43
+ app = Rails.application
44
+ if app && !app.config.cache_classes
45
+ Delayed::Worker.lifecycle.around(:perform) do |&block|
46
+ reload = app.config.reload_classes_only_on_change != true || app.reloaders.map(&:updated?).any?
47
+ ActionDispatch::Reloader.prepare! if reload
48
+ begin
49
+ block.call
50
+ ensure
51
+ ActionDispatch::Reloader.cleanup! if reload
52
+ end
53
+ end
54
+ end
55
+
56
+ plugins.each { |plugin| plugin.inject! }
57
+ end
58
+
59
+ def name
60
+ @name ||= "#{Socket.gethostname rescue "X"}:#{self.id}"
61
+ end
62
+
63
+ def set_process_name(new_name)
64
+ $0 = "delayed:#{new_name}"
65
+ end
66
+
67
+ def exit?
68
+ @exit || parent_exited?
69
+ end
70
+
71
+ def parent_exited?
72
+ @parent_pid && @parent_pid != Process.ppid
73
+ end
74
+
75
+ def start
76
+ say "Starting worker", :info
77
+
78
+ trap('INT') { say 'Exiting'; @exit = true }
79
+
80
+ self.class.lifecycle.run_callbacks(:execute, self) do
81
+ loop do
82
+ run
83
+ break if exit?
84
+ end
85
+ end
86
+
87
+ say "Stopping worker", :info
88
+ rescue => e
89
+ Rails.logger.fatal("Child process died: #{e.inspect}") rescue nil
90
+ self.class.lifecycle.run_callbacks(:exceptional_exit, self, e) { }
91
+ ensure
92
+ Delayed::Job.clear_locks!(name)
93
+ end
94
+
95
+ def run
96
+ self.class.lifecycle.run_callbacks(:loop, self) do
97
+ job = self.class.lifecycle.run_callbacks(:pop, self) do
98
+ work_queue.get_and_lock_next_available(name, queue_name, min_priority, max_priority)
99
+ end
100
+
101
+ if job
102
+ configure_for_job(job) do
103
+ @job_count += perform(job)
104
+
105
+ if @max_job_count > 0 && @job_count >= @max_job_count
106
+ say "Max job count of #{@max_job_count} exceeded, dying"
107
+ @exit = true
108
+ end
109
+
110
+ if @max_memory_usage > 0
111
+ memory = sample_memory
112
+ if memory > @max_memory_usage
113
+ say "Memory usage of #{memory} exceeds max of #{@max_memory_usage}, dying"
114
+ @exit = true
115
+ else
116
+ say "Memory usage: #{memory}"
117
+ end
118
+ end
119
+ end
120
+ else
121
+ set_process_name("wait:#{Settings.worker_procname_prefix}#{@queue_name}:#{min_priority || 0}:#{max_priority || 'max'}")
122
+ sleep(Settings.sleep_delay + (rand * Settings.sleep_delay_stagger))
123
+ end
124
+ end
125
+ end
126
+
127
+ def perform(job)
128
+ count = 1
129
+ raise Delayed::Backend::JobExpired, "job expired at #{job.expires_at}" if job.expired?
130
+ self.class.lifecycle.run_callbacks(:perform, self, job) do
131
+ set_process_name("run:#{Settings.worker_procname_prefix}#{job.id}:#{job.name}")
132
+ say("Processing #{log_job(job, :long)}", :info)
133
+ runtime = Benchmark.realtime do
134
+ if job.batch?
135
+ # each job in the batch will have perform called on it, so we don't
136
+ # need a timeout around this
137
+ count = perform_batch(job)
138
+ else
139
+ job.invoke_job
140
+ end
141
+ job.destroy
142
+ end
143
+ say("Completed #{log_job(job)} #{"%.0fms" % (runtime * 1000)}", :info)
144
+ end
145
+ count
146
+ rescue Exception => e
147
+ self.class.lifecycle.run_callbacks(:error, self, job, e) do
148
+ handle_failed_job(job, e)
149
+ end
150
+ count
151
+ end
152
+
153
+ def perform_batch(parent_job)
154
+ batch = parent_job.payload_object
155
+ if batch.mode == :serial
156
+ batch.jobs.each do |job|
157
+ job.source = parent_job.source
158
+ job.create_and_lock!(name)
159
+ configure_for_job(job) do
160
+ perform(job)
161
+ end
162
+ end
163
+ batch.items.size
164
+ end
165
+ end
166
+
167
+ def handle_failed_job(job, error)
168
+ job.last_error = "#{error.message}\n#{error.backtrace.join("\n")}"
169
+ say("Failed with #{error.class} [#{error.message}] (#{job.attempts} attempts)", :error)
170
+ job.reschedule(error)
171
+ end
172
+
173
+ def id
174
+ Process.pid
175
+ end
176
+
177
+ def say(msg, level = :debug)
178
+ Rails.logger.send(level, msg)
179
+ end
180
+
181
+ def log_job(job, format = :short)
182
+ case format
183
+ when :long
184
+ "#{job.full_name} #{ job.to_json(:include_root => false, :only => %w(tag strand priority attempts created_at max_attempts source)) }"
185
+ else
186
+ job.full_name
187
+ end
188
+ end
189
+
190
+ # set up the session context information, so that it gets logged with the job log lines
191
+ # also set up a unique tmpdir, which will get removed at the end of the job.
192
+ def configure_for_job(job)
193
+ previous_tmpdir = ENV['TMPDIR']
194
+ Thread.current[:running_delayed_job] = job
195
+
196
+ dir = Dir.mktmpdir("job-#{job.id}-#{self.name.gsub(/[^\w\.]/, '.')}-")
197
+ begin
198
+ ENV['TMPDIR'] = dir
199
+ yield
200
+ ensure
201
+ FileUtils.remove_entry(dir, true)
202
+ end
203
+ ensure
204
+ ENV['TMPDIR'] = previous_tmpdir
205
+ Thread.current[:running_delayed_job] = nil
206
+ end
207
+
208
+ def self.current_job
209
+ Thread.current[:running_delayed_job]
210
+ end
211
+
212
+ # `sample` reports KB, not B
213
+ if File.directory?("/proc")
214
+ # linux w/ proc fs
215
+ LINUX_PAGE_SIZE = (size = `getconf PAGESIZE`.to_i; size > 0 ? size : 4096)
216
+ def sample_memory
217
+ s = File.read("/proc/#{Process.pid}/statm").to_i rescue 0
218
+ s * LINUX_PAGE_SIZE / 1024
219
+ end
220
+ else
221
+ # generic unix solution
222
+ def sample_memory
223
+ if Rails.env.test?
224
+ 0
225
+ else
226
+ # hmm this is actually resident set size, doesn't include swapped-to-disk
227
+ # memory.
228
+ `ps -o rss= -p #{Process.pid}`.to_i
229
+ end
230
+ end
231
+ end
232
+
233
+ end
234
+ end
@@ -0,0 +1,109 @@
1
+ # New definitions for YAML to aid in serialization and deserialization of delayed jobs.
2
+
3
+ require 'yaml'
4
+
5
+ # First, tell YAML how to load a Module. This depends on Rails .constantize and autoloading.
6
+ YAML.add_ruby_type("object:Module") do |type, val|
7
+ val.constantize
8
+ end
9
+
10
+ Psych.add_domain_type("ruby/object", "Module") do |type, val|
11
+ val.constantize
12
+ end
13
+
14
+ Psych.add_domain_type("ruby/object", "Class") do |type, val|
15
+ val.constantize
16
+ end
17
+
18
+ class Module
19
+ def to_yaml(opts = {})
20
+ YAML.quick_emit(self.object_id, opts) do |out|
21
+ out.scalar(taguri, name)
22
+ end
23
+ end
24
+ end
25
+
26
+ # Now we have to do the same for Class.
27
+ YAML.add_ruby_type("object:Class") do |type, val|
28
+ val.constantize
29
+ end
30
+
31
+ class Class
32
+ def to_yaml(opts = {})
33
+ YAML.quick_emit(self.object_id, opts) do |out|
34
+ out.scalar(taguri, name)
35
+ end
36
+ end
37
+
38
+ def encode_with(coder)
39
+ coder.scalar("!ruby/object:Class", name)
40
+ end
41
+ end
42
+
43
+ # Now, tell YAML how to intelligently load ActiveRecord objects, using the
44
+ # database rather than just serializing their attributes to the YAML. This
45
+ # ensures the object is up to date when we use it in the job.
46
+ class ActiveRecord::Base
47
+ yaml_as "tag:ruby.yaml.org,2002:ActiveRecord"
48
+
49
+ def to_yaml(opts = {})
50
+ if id.nil?
51
+ raise("Can't serialize unsaved ActiveRecord object for delayed job: #{self.inspect}")
52
+ end
53
+ YAML.quick_emit(self.object_id, opts) do |out|
54
+ out.scalar(taguri, id.to_s)
55
+ end
56
+ end
57
+
58
+ def encode_with(coder)
59
+ if id.nil?
60
+ raise("Can't serialize unsaved ActiveRecord object for delayed job: #{self.inspect}")
61
+ end
62
+ coder.scalar("!ruby/ActiveRecord:#{self.class.name}", id.to_s)
63
+ end
64
+
65
+ def self.yaml_new(klass, tag, val)
66
+ klass.find(val)
67
+ rescue ActiveRecord::RecordNotFound
68
+ raise Delayed::Backend::RecordNotFound, "Couldn't find #{klass} with id #{val.inspect}"
69
+ end
70
+ end
71
+
72
+ module Delayed
73
+ module PsychExt
74
+ module ToRuby
75
+ def visit_Psych_Nodes_Scalar(object)
76
+ case object.tag
77
+ when %r{^!ruby/ActiveRecord:(.+)$}
78
+ begin
79
+ klass = resolve_class(Regexp.last_match[1])
80
+ klass.unscoped.find(object.value)
81
+ rescue ActiveRecord::RecordNotFound
82
+ raise Delayed::Backend::RecordNotFound, "Couldn't find #{klass} with id #{object.value.inspect}"
83
+ end
84
+ when "tag:ruby.yaml.org,2002:Delayed::Periodic", "!ruby/Delayed::Periodic"
85
+ Delayed::Periodic.scheduled[object.value] || raise(NameError, "job #{object.value} is no longer scheduled")
86
+ else
87
+ super
88
+ end
89
+ end
90
+
91
+ def resolve_class(klass_name)
92
+ return nil if !klass_name || klass_name.empty?
93
+ klass_name.constantize
94
+ rescue
95
+ super
96
+ end
97
+ end
98
+ end
99
+ end
100
+
101
+ Psych::Visitors::ToRuby.prepend(Delayed::PsychExt::ToRuby)
102
+
103
+ # Load Module/Class from yaml tag.
104
+ class Module
105
+ def yaml_tag_read_class(name)
106
+ name.constantize
107
+ name
108
+ end
109
+ end
@@ -0,0 +1,46 @@
1
+ module Delayed
2
+ MIN_PRIORITY = 0
3
+ HIGH_PRIORITY = 0
4
+ NORMAL_PRIORITY = 10
5
+ LOW_PRIORITY = 20
6
+ LOWER_PRIORITY = 50
7
+ MAX_PRIORITY = 1_000_000
8
+
9
+ def self.select_backend(backend)
10
+ remove_const(:Job) if defined?(::Delayed::Job)
11
+ const_set(:Job, backend)
12
+ end
13
+ end
14
+
15
+ require 'rails'
16
+ require 'active_support/core_ext/module/attribute_accessors'
17
+ require 'active_record'
18
+ require 'after_transaction_commit'
19
+
20
+ require 'delayed/settings'
21
+ require 'delayed/yaml_extensions'
22
+
23
+ require 'delayed/backend/base'
24
+ require 'delayed/backend/active_record'
25
+ require 'delayed/backend/redis/job'
26
+ require 'delayed/batch'
27
+ require 'delayed/cli'
28
+ require 'delayed/daemon'
29
+ require 'delayed/job_tracking'
30
+ require 'delayed/lifecycle'
31
+ require 'delayed/log_tailer'
32
+ require 'delayed/message_sending'
33
+ require 'delayed/performable_method'
34
+ require 'delayed/periodic'
35
+ require 'delayed/plugin'
36
+ require 'delayed/pool'
37
+ require 'delayed/worker'
38
+ require 'delayed/work_queue/in_process'
39
+ require 'delayed/work_queue/parent_process'
40
+
41
+ require 'delayed/engine'
42
+
43
+ Delayed.select_backend(Delayed::Backend::ActiveRecord::Job)
44
+
45
+ Object.send(:include, Delayed::MessageSending)
46
+ Module.send(:include, Delayed::MessageSending::ClassMethods)
data/lib/inst-jobs.rb ADDED
@@ -0,0 +1 @@
1
+ require 'delayed_job'