inst-jobs 2.0.0 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
- data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
- data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
- data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
- data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
- data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
- data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
- data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
- data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
- data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
- data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
- data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
- data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
- data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
- data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
- data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
- data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
- data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
- data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
- data/db/migrate/20210812210128_add_singleton_column.rb +200 -0
- data/db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb +27 -0
- data/db/migrate/20210928174754_fix_singleton_condition_in_before_insert.rb +56 -0
- data/db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb +27 -0
- data/exe/inst_jobs +3 -2
- data/lib/delayed/backend/active_record.rb +211 -168
- data/lib/delayed/backend/base.rb +110 -72
- data/lib/delayed/batch.rb +11 -9
- data/lib/delayed/cli.rb +98 -84
- data/lib/delayed/core_ext/kernel.rb +4 -2
- data/lib/delayed/daemon.rb +70 -74
- data/lib/delayed/job_tracking.rb +26 -25
- data/lib/delayed/lifecycle.rb +27 -23
- data/lib/delayed/log_tailer.rb +17 -17
- data/lib/delayed/logging.rb +13 -16
- data/lib/delayed/message_sending.rb +43 -52
- data/lib/delayed/performable_method.rb +6 -8
- data/lib/delayed/periodic.rb +72 -68
- data/lib/delayed/plugin.rb +2 -4
- data/lib/delayed/pool.rb +205 -168
- data/lib/delayed/server/helpers.rb +6 -6
- data/lib/delayed/server.rb +51 -54
- data/lib/delayed/settings.rb +94 -81
- data/lib/delayed/testing.rb +21 -22
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/work_queue/in_process.rb +21 -17
- data/lib/delayed/work_queue/parent_process/client.rb +55 -53
- data/lib/delayed/work_queue/parent_process/server.rb +245 -207
- data/lib/delayed/work_queue/parent_process.rb +52 -53
- data/lib/delayed/worker/consul_health_check.rb +32 -33
- data/lib/delayed/worker/health_check.rb +34 -26
- data/lib/delayed/worker/null_health_check.rb +3 -1
- data/lib/delayed/worker/process_helper.rb +8 -9
- data/lib/delayed/worker.rb +272 -241
- data/lib/delayed/yaml_extensions.rb +12 -10
- data/lib/delayed_job.rb +37 -37
- data/lib/inst-jobs.rb +1 -1
- data/spec/active_record_job_spec.rb +143 -139
- data/spec/delayed/cli_spec.rb +7 -7
- data/spec/delayed/daemon_spec.rb +10 -9
- data/spec/delayed/message_sending_spec.rb +16 -9
- data/spec/delayed/periodic_spec.rb +14 -21
- data/spec/delayed/server_spec.rb +38 -38
- data/spec/delayed/settings_spec.rb +26 -25
- data/spec/delayed/work_queue/in_process_spec.rb +7 -8
- data/spec/delayed/work_queue/parent_process/client_spec.rb +17 -12
- data/spec/delayed/work_queue/parent_process/server_spec.rb +117 -41
- data/spec/delayed/work_queue/parent_process_spec.rb +21 -23
- data/spec/delayed/worker/consul_health_check_spec.rb +37 -50
- data/spec/delayed/worker/health_check_spec.rb +60 -52
- data/spec/delayed/worker_spec.rb +44 -21
- data/spec/sample_jobs.rb +45 -15
- data/spec/shared/delayed_batch.rb +74 -67
- data/spec/shared/delayed_method.rb +143 -102
- data/spec/shared/performable_method.rb +39 -38
- data/spec/shared/shared_backend.rb +550 -437
- data/spec/shared/testing.rb +14 -14
- data/spec/shared/worker.rb +156 -148
- data/spec/shared_jobs_specs.rb +13 -13
- data/spec/spec_helper.rb +53 -55
- metadata +148 -82
- data/lib/delayed/backend/redis/bulk_update.lua +0 -50
- data/lib/delayed/backend/redis/destroy_job.lua +0 -2
- data/lib/delayed/backend/redis/enqueue.lua +0 -29
- data/lib/delayed/backend/redis/fail_job.lua +0 -5
- data/lib/delayed/backend/redis/find_available.lua +0 -3
- data/lib/delayed/backend/redis/functions.rb +0 -59
- data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
- data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
- data/lib/delayed/backend/redis/job.rb +0 -535
- data/lib/delayed/backend/redis/set_running.lua +0 -5
- data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
- data/spec/gemfiles/42.gemfile +0 -7
- data/spec/gemfiles/50.gemfile +0 -7
- data/spec/gemfiles/51.gemfile +0 -7
- data/spec/gemfiles/52.gemfile +0 -7
- data/spec/gemfiles/60.gemfile +0 -7
- data/spec/redis_job_spec.rb +0 -148
data/lib/delayed/worker.rb
CHANGED
@@ -1,236 +1,267 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module Delayed
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
# Callback to fire when a delayed job fails max_attempts times. If this
|
17
|
-
# callback is defined, then the value of destroy_failed_jobs is ignored, and
|
18
|
-
# the job is destroyed if this block returns true.
|
19
|
-
#
|
20
|
-
# This allows for destroying "uninteresting" failures, while keeping around
|
21
|
-
# interesting failures to be investigated later.
|
22
|
-
#
|
23
|
-
# The block is called with args(job, last_exception)
|
24
|
-
def self.on_max_failures=(block)
|
25
|
-
@@on_max_failures = block
|
4
|
+
class TimeoutError < RuntimeError; end
|
5
|
+
|
6
|
+
class RetriableError < RuntimeError
|
7
|
+
# this error is a special case. You _should_ raise
|
8
|
+
# it from inside the rescue block for another error,
|
9
|
+
# because it indicates: "something made this job fail
|
10
|
+
# but we're pretty sure it's transient and it's safe to try again".
|
11
|
+
# the workflow is still the same (retry will happen unless
|
12
|
+
# retries are exhausted), but it won't call the :error
|
13
|
+
# callback unless it can't retry anymore. It WILL call the
|
14
|
+
# separate ":retry" callback, which is ONLY activated
|
15
|
+
# for this kind of error.
|
26
16
|
end
|
27
|
-
cattr_reader :on_max_failures
|
28
17
|
|
29
|
-
|
30
|
-
|
18
|
+
require "tmpdir"
|
19
|
+
require "set"
|
20
|
+
|
21
|
+
class Worker
|
22
|
+
include Delayed::Logging
|
23
|
+
SIGNALS = %i[INT TERM QUIT].freeze
|
24
|
+
|
25
|
+
attr_reader :config, :queue_name, :min_priority, :max_priority, :work_queue
|
26
|
+
|
27
|
+
class << self
|
28
|
+
# Callback to fire when a delayed job fails max_attempts times. If this
|
29
|
+
# callback is defined, then the value of destroy_failed_jobs is ignored, and
|
30
|
+
# the job is destroyed if this block returns true.
|
31
|
+
#
|
32
|
+
# This allows for destroying "uninteresting" failures, while keeping around
|
33
|
+
# interesting failures to be investigated later.
|
34
|
+
#
|
35
|
+
# The block is called with args(job, last_exception)
|
36
|
+
attr_accessor :on_max_failures
|
37
|
+
end
|
31
38
|
|
32
|
-
|
33
|
-
|
34
|
-
end
|
39
|
+
cattr_accessor :plugins
|
40
|
+
self.plugins = Set.new
|
35
41
|
|
36
|
-
|
37
|
-
|
38
|
-
|
42
|
+
def self.lifecycle
|
43
|
+
@lifecycle ||= Delayed::Lifecycle.new
|
44
|
+
end
|
39
45
|
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
ensure
|
44
|
-
Thread.current[:running_delayed_job] = nil
|
45
|
-
end
|
46
|
+
def self.current_job
|
47
|
+
Thread.current[:running_delayed_job]
|
48
|
+
end
|
46
49
|
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
50
|
+
def self.running_job(job)
|
51
|
+
Thread.current[:running_delayed_job] = job
|
52
|
+
yield
|
53
|
+
ensure
|
54
|
+
Thread.current[:running_delayed_job] = nil
|
55
|
+
end
|
56
|
+
|
57
|
+
def initialize(options = {})
|
58
|
+
@exit = false
|
59
|
+
@parent_pid = options[:parent_pid]
|
60
|
+
@queue_name = options[:queue] ||= Settings.queue
|
61
|
+
@min_priority = options[:min_priority]
|
62
|
+
@max_priority = options[:max_priority]
|
63
|
+
@max_job_count = options[:worker_max_job_count].to_i
|
64
|
+
@max_memory_usage = options[:worker_max_memory_usage].to_i
|
65
|
+
@work_queue = options.delete(:work_queue) || WorkQueue::InProcess.new
|
66
|
+
@health_check_type = Settings.worker_health_check_type
|
67
|
+
@health_check_config = Settings.worker_health_check_config
|
68
|
+
@config = options
|
69
|
+
@job_count = 0
|
70
|
+
|
71
|
+
@signal_queue = []
|
72
|
+
|
73
|
+
app = Rails.application
|
74
|
+
if app && !app.config.cache_classes
|
75
|
+
Delayed::Worker.lifecycle.around(:perform) do |worker, job, &block|
|
76
|
+
reload = app.config.reload_classes_only_on_change != true || app.reloaders.map(&:updated?).any?
|
77
|
+
|
78
|
+
if reload
|
79
|
+
if defined?(ActiveSupport::Reloader)
|
80
|
+
Rails.application.reloader.reload!
|
81
|
+
else
|
82
|
+
ActionDispatch::Reloader.prepare!
|
83
|
+
end
|
74
84
|
end
|
75
|
-
end
|
76
85
|
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
86
|
+
begin
|
87
|
+
block.call(worker, job)
|
88
|
+
ensure
|
89
|
+
ActionDispatch::Reloader.cleanup! if reload && !defined?(ActiveSupport::Reloader)
|
90
|
+
end
|
81
91
|
end
|
82
92
|
end
|
83
|
-
end
|
84
|
-
|
85
|
-
plugins.each { |plugin| plugin.inject! }
|
86
|
-
end
|
87
93
|
|
88
|
-
|
89
|
-
|
90
|
-
end
|
94
|
+
plugins.each(&:inject!)
|
95
|
+
end
|
91
96
|
|
92
|
-
|
93
|
-
|
94
|
-
|
97
|
+
def name
|
98
|
+
@name ||= "#{Socket.gethostname rescue 'X'}:#{id}"
|
99
|
+
end
|
95
100
|
|
96
|
-
|
97
|
-
|
98
|
-
|
101
|
+
def process_name=(new_name)
|
102
|
+
$0 = "delayed:#{new_name}"
|
103
|
+
end
|
99
104
|
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
end
|
105
|
+
def exit?
|
106
|
+
!!@exit || parent_exited?
|
107
|
+
end
|
104
108
|
|
105
|
-
|
106
|
-
|
107
|
-
|
109
|
+
def parent_exited?
|
110
|
+
@parent_pid && @parent_pid != Process.ppid
|
111
|
+
end
|
108
112
|
|
109
|
-
|
110
|
-
|
111
|
-
|
113
|
+
def wake_up
|
114
|
+
@self_pipe[1].write_nonblock(".", exception: false)
|
115
|
+
work_queue.wake_up
|
112
116
|
end
|
113
117
|
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
@exit = true
|
127
|
-
else
|
128
|
-
logger.error "Unknown signal '#{sig}' received"
|
118
|
+
def start
|
119
|
+
logger.info "Starting worker"
|
120
|
+
self.process_name =
|
121
|
+
"start:#{Settings.worker_procname_prefix}#{@queue_name}:#{min_priority || 0}:#{max_priority || 'max'}"
|
122
|
+
@self_pipe = IO.pipe
|
123
|
+
work_queue.init
|
124
|
+
|
125
|
+
work_thread = Thread.current
|
126
|
+
SIGNALS.each do |sig|
|
127
|
+
trap(sig) do
|
128
|
+
@signal_queue << sig
|
129
|
+
wake_up
|
129
130
|
end
|
130
131
|
end
|
131
|
-
end
|
132
132
|
|
133
|
-
|
134
|
-
|
135
|
-
|
133
|
+
raise "Could not register health_check" unless health_check.start
|
134
|
+
|
135
|
+
signal_processor = Thread.new do
|
136
|
+
loop do
|
137
|
+
@self_pipe[0].read(1)
|
138
|
+
case @signal_queue.pop
|
139
|
+
when :INT, :TERM
|
140
|
+
@exit = true # get the main thread to bail early if it's waiting for a job
|
141
|
+
work_thread.raise(SystemExit) # Force the main thread to bail out of the current job
|
142
|
+
cleanup! # we're going to get SIGKILL'd in a moment, so clean up asap
|
143
|
+
break
|
144
|
+
when :QUIT
|
145
|
+
@exit = true
|
146
|
+
else
|
147
|
+
logger.error "Unknown signal '#{sig}' received"
|
148
|
+
end
|
149
|
+
end
|
150
|
+
end
|
151
|
+
|
152
|
+
self.class.lifecycle.run_callbacks(:execute, self) do
|
153
|
+
run until exit?
|
136
154
|
end
|
137
|
-
end
|
138
155
|
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
156
|
+
logger.info "Stopping worker"
|
157
|
+
rescue => e
|
158
|
+
Rails.logger.fatal("Child process died: #{e.inspect}") rescue nil
|
159
|
+
self.class.lifecycle.run_callbacks(:exceptional_exit, self, e) { nil }
|
160
|
+
ensure
|
161
|
+
cleanup!
|
145
162
|
|
146
|
-
|
147
|
-
|
148
|
-
|
163
|
+
if signal_processor
|
164
|
+
signal_processor.kill
|
165
|
+
signal_processor.join
|
166
|
+
end
|
167
|
+
|
168
|
+
@self_pipe&.each(&:close)
|
169
|
+
@self_pipe = nil
|
149
170
|
end
|
150
|
-
end
|
151
171
|
|
152
|
-
|
153
|
-
|
172
|
+
def cleanup!
|
173
|
+
return if cleaned?
|
154
174
|
|
155
|
-
|
156
|
-
|
157
|
-
|
175
|
+
health_check.stop
|
176
|
+
work_queue.close
|
177
|
+
Delayed::Job.clear_locks!(name)
|
158
178
|
|
159
|
-
|
160
|
-
|
179
|
+
@cleaned = true
|
180
|
+
end
|
161
181
|
|
162
|
-
|
163
|
-
|
164
|
-
|
182
|
+
def cleaned?
|
183
|
+
@cleaned
|
184
|
+
end
|
165
185
|
|
166
|
-
|
167
|
-
|
168
|
-
self.class.lifecycle.run_callbacks(:loop, self) do
|
169
|
-
set_process_name("pop:#{Settings.worker_procname_prefix}#{@queue_name}:#{min_priority || 0}:#{max_priority || 'max'}")
|
170
|
-
job = self.class.lifecycle.run_callbacks(:pop, self) do
|
171
|
-
work_queue.get_and_lock_next_available(name, config)
|
172
|
-
end
|
186
|
+
def run
|
187
|
+
return if exit?
|
173
188
|
|
174
|
-
|
175
|
-
|
176
|
-
@
|
189
|
+
self.class.lifecycle.run_callbacks(:loop, self) do
|
190
|
+
self.process_name =
|
191
|
+
"pop:#{Settings.worker_procname_prefix}#{@queue_name}:#{min_priority || 0}:#{max_priority || 'max'}"
|
192
|
+
job = self.class.lifecycle.run_callbacks(:pop, self) do
|
193
|
+
work_queue.get_and_lock_next_available(name, config)
|
194
|
+
end
|
177
195
|
|
178
|
-
|
179
|
-
|
180
|
-
@
|
181
|
-
end
|
196
|
+
if job
|
197
|
+
configure_for_job(job) do
|
198
|
+
@job_count += perform(job)
|
182
199
|
|
183
|
-
|
184
|
-
|
185
|
-
if memory > @max_memory_usage
|
186
|
-
logger.debug "Memory usage of #{memory} exceeds max of #{@max_memory_usage}, dying"
|
200
|
+
if @max_job_count.positive? && @job_count >= @max_job_count
|
201
|
+
logger.debug "Max job count of #{@max_job_count} exceeded, dying"
|
187
202
|
@exit = true
|
188
|
-
|
189
|
-
|
203
|
+
end
|
204
|
+
|
205
|
+
if @max_memory_usage.positive?
|
206
|
+
memory = sample_memory
|
207
|
+
if memory > @max_memory_usage
|
208
|
+
logger.debug "Memory usage of #{memory} exceeds max of #{@max_memory_usage}, dying"
|
209
|
+
@exit = true
|
210
|
+
else
|
211
|
+
logger.debug "Memory usage: #{memory}"
|
212
|
+
end
|
190
213
|
end
|
191
214
|
end
|
215
|
+
else
|
216
|
+
self.process_name =
|
217
|
+
"wait:#{Settings.worker_procname_prefix}#{@queue_name}:#{min_priority || 0}:#{max_priority || 'max'}"
|
218
|
+
sleep(Settings.sleep_delay + (rand * Settings.sleep_delay_stagger)) unless exit?
|
192
219
|
end
|
193
|
-
else
|
194
|
-
set_process_name("wait:#{Settings.worker_procname_prefix}#{@queue_name}:#{min_priority || 0}:#{max_priority || 'max'}")
|
195
|
-
sleep(Settings.sleep_delay + (rand * Settings.sleep_delay_stagger)) unless exit?
|
196
220
|
end
|
197
221
|
end
|
198
|
-
end
|
199
222
|
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
223
|
+
def perform(job)
|
224
|
+
begin
|
225
|
+
count = 1
|
226
|
+
raise Delayed::Backend::JobExpired, "job expired at #{job.expires_at}" if job.expired?
|
227
|
+
|
228
|
+
self.class.lifecycle.run_callbacks(:perform, self, job) do
|
229
|
+
self.process_name = "run:#{Settings.worker_procname_prefix}#{job.id}:#{job.name}"
|
230
|
+
logger.info("Processing #{log_job(job, :long)}")
|
231
|
+
runtime = Benchmark.realtime do
|
232
|
+
if job.batch?
|
233
|
+
# each job in the batch will have perform called on it, so we don't
|
234
|
+
# need a timeout around this
|
235
|
+
count = perform_batch(job)
|
236
|
+
else
|
237
|
+
job.invoke_job
|
238
|
+
end
|
239
|
+
job.destroy
|
240
|
+
end
|
241
|
+
logger.info("Completed #{log_job(job)} #{format('%.0fms', (runtime * 1000))}")
|
242
|
+
end
|
243
|
+
rescue ::Delayed::RetriableError => e
|
244
|
+
can_retry = job.attempts + 1 < job.inferred_max_attempts
|
245
|
+
callback_type = can_retry ? :retry : :error
|
246
|
+
self.class.lifecycle.run_callbacks(callback_type, self, job, e) do
|
247
|
+
handle_failed_job(job, e)
|
248
|
+
end
|
249
|
+
rescue SystemExit => e
|
250
|
+
# There wasn't really a failure here so no callbacks and whatnot needed,
|
251
|
+
# still reschedule the job though.
|
252
|
+
job.reschedule(e)
|
253
|
+
rescue Exception => e # rubocop:disable Lint/RescueException
|
254
|
+
self.class.lifecycle.run_callbacks(:error, self, job, e) do
|
255
|
+
handle_failed_job(job, e)
|
213
256
|
end
|
214
|
-
job.destroy
|
215
257
|
end
|
216
|
-
|
217
|
-
end
|
218
|
-
count
|
219
|
-
rescue SystemExit => se
|
220
|
-
# There wasn't really a failure here so no callbacks and whatnot needed,
|
221
|
-
# still reschedule the job though.
|
222
|
-
job.reschedule(se)
|
223
|
-
count
|
224
|
-
rescue Exception => e
|
225
|
-
self.class.lifecycle.run_callbacks(:error, self, job, e) do
|
226
|
-
handle_failed_job(job, e)
|
258
|
+
count
|
227
259
|
end
|
228
|
-
count
|
229
|
-
end
|
230
260
|
|
231
|
-
|
232
|
-
|
233
|
-
|
261
|
+
def perform_batch(parent_job)
|
262
|
+
batch = parent_job.payload_object
|
263
|
+
return unless batch.mode == :serial
|
264
|
+
|
234
265
|
batch.jobs.each do |job|
|
235
266
|
job.source = parent_job.source
|
236
267
|
job.create_and_lock!(name)
|
@@ -240,72 +271,72 @@ class Worker
|
|
240
271
|
end
|
241
272
|
batch.items.size
|
242
273
|
end
|
243
|
-
end
|
244
274
|
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
275
|
+
def handle_failed_job(job, error)
|
276
|
+
job.last_error = "#{error.message}\n#{error.backtrace.join("\n")}"
|
277
|
+
logger.error("Failed with #{error.class} [#{error.message}] (#{job.attempts} attempts)")
|
278
|
+
job.reschedule(error)
|
279
|
+
end
|
250
280
|
|
251
|
-
|
252
|
-
|
253
|
-
|
281
|
+
def id
|
282
|
+
Process.pid
|
283
|
+
end
|
254
284
|
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
285
|
+
def log_job(job, format = :short)
|
286
|
+
case format
|
287
|
+
when :long
|
288
|
+
"#{job.full_name} #{Settings.job_detailed_log_format.call(job)}"
|
289
|
+
else
|
290
|
+
job.full_name
|
291
|
+
end
|
261
292
|
end
|
262
|
-
end
|
263
293
|
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
294
|
+
# set up the session context information, so that it gets logged with the job log lines
|
295
|
+
# also set up a unique tmpdir, which will get removed at the end of the job.
|
296
|
+
def configure_for_job(job)
|
297
|
+
previous_tmpdir = ENV["TMPDIR"]
|
268
298
|
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
299
|
+
self.class.running_job(job) do
|
300
|
+
dir = Dir.mktmpdir("job-#{job.id}-#{name.gsub(/[^\w.]/, '.')}-")
|
301
|
+
begin
|
302
|
+
ENV["TMPDIR"] = dir
|
303
|
+
yield
|
304
|
+
ensure
|
305
|
+
FileUtils.remove_entry(dir, true)
|
306
|
+
end
|
276
307
|
end
|
308
|
+
ensure
|
309
|
+
ENV["TMPDIR"] = previous_tmpdir
|
277
310
|
end
|
278
|
-
ensure
|
279
|
-
ENV['TMPDIR'] = previous_tmpdir
|
280
|
-
end
|
281
|
-
|
282
|
-
def health_check
|
283
|
-
@health_check ||= HealthCheck.build(
|
284
|
-
type: @health_check_type,
|
285
|
-
worker_name: name,
|
286
|
-
config: @health_check_config
|
287
|
-
)
|
288
|
-
end
|
289
311
|
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
s * LINUX_PAGE_SIZE / 1024
|
312
|
+
def health_check
|
313
|
+
@health_check ||= HealthCheck.build(
|
314
|
+
type: @health_check_type,
|
315
|
+
worker_name: name,
|
316
|
+
config: @health_check_config
|
317
|
+
)
|
297
318
|
end
|
298
|
-
|
299
|
-
#
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
319
|
+
|
320
|
+
# `sample` reports KB, not B
|
321
|
+
if File.directory?("/proc")
|
322
|
+
# linux w/ proc fs
|
323
|
+
LINUX_PAGE_SIZE = (size = `getconf PAGESIZE`.to_i
|
324
|
+
size.positive? ? size : 4096)
|
325
|
+
def sample_memory
|
326
|
+
s = File.read("/proc/#{Process.pid}/statm").to_i rescue 0
|
327
|
+
s * LINUX_PAGE_SIZE / 1024
|
328
|
+
end
|
329
|
+
else
|
330
|
+
# generic unix solution
|
331
|
+
def sample_memory
|
332
|
+
if Rails.env.test?
|
333
|
+
0
|
334
|
+
else
|
335
|
+
# hmm this is actually resident set size, doesn't include swapped-to-disk
|
336
|
+
# memory.
|
337
|
+
`ps -o rss= -p #{Process.pid}`.to_i
|
338
|
+
end
|
307
339
|
end
|
308
340
|
end
|
309
341
|
end
|
310
342
|
end
|
311
|
-
end
|
@@ -2,36 +2,37 @@
|
|
2
2
|
|
3
3
|
# New definitions for YAML to aid in serialization and deserialization of delayed jobs.
|
4
4
|
|
5
|
-
require
|
5
|
+
require "yaml"
|
6
6
|
|
7
7
|
# These two added domain types are for backwards compatibility with jobs created
|
8
8
|
# using the old syck tags, as syck didn't have built-in module/class dumping. We
|
9
9
|
# now use Psych's built-in tags, which are `!ruby/module` and `!ruby/class`. At
|
10
10
|
# some point we can remove these, once there are no more jobs in any queues with
|
11
11
|
# these tags.
|
12
|
-
Psych.add_domain_type("ruby/object", "Module") do |
|
12
|
+
Psych.add_domain_type("ruby/object", "Module") do |_type, val|
|
13
13
|
val.constantize
|
14
14
|
end
|
15
|
-
Psych.add_domain_type("ruby/object", "Class") do |
|
15
|
+
Psych.add_domain_type("ruby/object", "Class") do |_type, val|
|
16
16
|
val.constantize
|
17
17
|
end
|
18
18
|
|
19
19
|
# Tell YAML how to intelligently load ActiveRecord objects, using the
|
20
20
|
# database rather than just serializing their attributes to the YAML. This
|
21
21
|
# ensures the object is up to date when we use it in the job.
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
raise("Can't serialize unsaved ActiveRecord object for delayed job: #{
|
22
|
+
module ActiveRecord
|
23
|
+
class Base
|
24
|
+
def encode_with(coder)
|
25
|
+
raise("Can't serialize unsaved ActiveRecord object for delayed job: #{inspect}") if id.nil?
|
26
|
+
|
27
|
+
coder.scalar("!ruby/ActiveRecord:#{self.class.name}", id.to_s)
|
26
28
|
end
|
27
|
-
coder.scalar("!ruby/ActiveRecord:#{self.class.name}", id.to_s)
|
28
29
|
end
|
29
30
|
end
|
30
31
|
|
31
32
|
module Delayed
|
32
33
|
module PsychExt
|
33
34
|
module ToRuby
|
34
|
-
def visit_Psych_Nodes_Scalar(object)
|
35
|
+
def visit_Psych_Nodes_Scalar(object) # rubocop:disable Naming/MethodName
|
35
36
|
case object.tag
|
36
37
|
when %r{^!ruby/ActiveRecord:(.+)$}
|
37
38
|
begin
|
@@ -50,7 +51,8 @@ module Delayed
|
|
50
51
|
end
|
51
52
|
|
52
53
|
def resolve_class(klass_name)
|
53
|
-
return nil if
|
54
|
+
return nil if klass_name.blank?
|
55
|
+
|
54
56
|
klass_name.constantize
|
55
57
|
rescue
|
56
58
|
super
|